Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * linux/arch/arm/lib/uaccess_with_memcpy.c |
| 3 | * |
| 4 | * Written by: Lennert Buytenhek and Nicolas Pitre |
| 5 | * Copyright (C) 2009 Marvell Semiconductor |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/ctype.h> |
| 14 | #include <linux/uaccess.h> |
| 15 | #include <linux/rwsem.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/hardirq.h> /* for in_atomic() */ |
| 19 | #include <linux/gfp.h> |
| 20 | #include <linux/highmem.h> |
| 21 | #include <linux/hugetlb.h> |
| 22 | #include <asm/current.h> |
| 23 | #include <asm/page.h> |
| 24 | |
| 25 | static int |
| 26 | pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) |
| 27 | { |
| 28 | unsigned long addr = (unsigned long)_addr; |
| 29 | pgd_t *pgd; |
| 30 | pmd_t *pmd; |
| 31 | pte_t *pte; |
| 32 | pud_t *pud; |
| 33 | spinlock_t *ptl; |
| 34 | |
| 35 | pgd = pgd_offset(current->mm, addr); |
| 36 | if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) |
| 37 | return 0; |
| 38 | |
| 39 | pud = pud_offset(pgd, addr); |
| 40 | if (unlikely(pud_none(*pud) || pud_bad(*pud))) |
| 41 | return 0; |
| 42 | |
| 43 | pmd = pmd_offset(pud, addr); |
| 44 | if (unlikely(pmd_none(*pmd))) |
| 45 | return 0; |
| 46 | |
| 47 | /* |
| 48 | * A pmd can be bad if it refers to a HugeTLB or THP page. |
| 49 | * |
| 50 | * Both THP and HugeTLB pages have the same pmd layout |
| 51 | * and should not be manipulated by the pte functions. |
| 52 | * |
| 53 | * Lock the page table for the destination and check |
| 54 | * to see that it's still huge and whether or not we will |
| 55 | * need to fault on write, or if we have a splitting THP. |
| 56 | */ |
| 57 | if (unlikely(pmd_thp_or_huge(*pmd))) { |
| 58 | ptl = ¤t->mm->page_table_lock; |
| 59 | spin_lock(ptl); |
| 60 | if (unlikely(!pmd_thp_or_huge(*pmd) |
| 61 | || pmd_hugewillfault(*pmd) |
| 62 | || pmd_trans_splitting(*pmd))) { |
| 63 | spin_unlock(ptl); |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | *ptep = NULL; |
| 68 | *ptlp = ptl; |
| 69 | return 1; |
| 70 | } |
| 71 | |
| 72 | if (unlikely(pmd_bad(*pmd))) |
| 73 | return 0; |
| 74 | |
| 75 | pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); |
| 76 | if (unlikely(!pte_present(*pte) || !pte_young(*pte) || |
| 77 | !pte_write(*pte) || !pte_dirty(*pte))) { |
| 78 | pte_unmap_unlock(pte, ptl); |
| 79 | return 0; |
| 80 | } |
| 81 | |
| 82 | *ptep = pte; |
| 83 | *ptlp = ptl; |
| 84 | |
| 85 | return 1; |
| 86 | } |
| 87 | |
| 88 | static unsigned long noinline |
| 89 | __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) |
| 90 | { |
| 91 | unsigned long ua_flags; |
| 92 | int atomic; |
| 93 | |
| 94 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { |
| 95 | memcpy((void *)to, from, n); |
| 96 | return 0; |
| 97 | } |
| 98 | |
| 99 | /* the mmap semaphore is taken only if not in an atomic context */ |
| 100 | atomic = faulthandler_disabled(); |
| 101 | |
| 102 | if (!atomic) |
| 103 | down_read(¤t->mm->mmap_sem); |
| 104 | while (n) { |
| 105 | pte_t *pte; |
| 106 | spinlock_t *ptl; |
| 107 | int tocopy; |
| 108 | |
| 109 | while (!pin_page_for_write(to, &pte, &ptl)) { |
| 110 | if (!atomic) |
| 111 | up_read(¤t->mm->mmap_sem); |
| 112 | if (__put_user(0, (char __user *)to)) |
| 113 | goto out; |
| 114 | if (!atomic) |
| 115 | down_read(¤t->mm->mmap_sem); |
| 116 | } |
| 117 | |
| 118 | tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1; |
| 119 | if (tocopy > n) |
| 120 | tocopy = n; |
| 121 | |
| 122 | ua_flags = uaccess_save_and_enable(); |
| 123 | memcpy((void *)to, from, tocopy); |
| 124 | uaccess_restore(ua_flags); |
| 125 | to += tocopy; |
| 126 | from += tocopy; |
| 127 | n -= tocopy; |
| 128 | |
| 129 | if (pte) |
| 130 | pte_unmap_unlock(pte, ptl); |
| 131 | else |
| 132 | spin_unlock(ptl); |
| 133 | } |
| 134 | if (!atomic) |
| 135 | up_read(¤t->mm->mmap_sem); |
| 136 | |
| 137 | out: |
| 138 | return n; |
| 139 | } |
| 140 | |
| 141 | unsigned long |
| 142 | arm_copy_to_user(void __user *to, const void *from, unsigned long n) |
| 143 | { |
| 144 | /* |
| 145 | * This test is stubbed out of the main function above to keep |
| 146 | * the overhead for small copies low by avoiding a large |
| 147 | * register dump on the stack just to reload them right away. |
| 148 | * With frame pointer disabled, tail call optimization kicks in |
| 149 | * as well making this test almost invisible. |
| 150 | */ |
| 151 | if (n < 64) { |
| 152 | unsigned long ua_flags = uaccess_save_and_enable(); |
| 153 | n = __copy_to_user_std(to, from, n); |
| 154 | uaccess_restore(ua_flags); |
| 155 | } else { |
| 156 | n = __copy_to_user_memcpy(to, from, n); |
| 157 | } |
| 158 | return n; |
| 159 | } |
| 160 | |
| 161 | static unsigned long noinline |
| 162 | __clear_user_memset(void __user *addr, unsigned long n) |
| 163 | { |
| 164 | unsigned long ua_flags; |
| 165 | |
| 166 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { |
| 167 | memset((void *)addr, 0, n); |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | down_read(¤t->mm->mmap_sem); |
| 172 | while (n) { |
| 173 | pte_t *pte; |
| 174 | spinlock_t *ptl; |
| 175 | int tocopy; |
| 176 | |
| 177 | while (!pin_page_for_write(addr, &pte, &ptl)) { |
| 178 | up_read(¤t->mm->mmap_sem); |
| 179 | if (__put_user(0, (char __user *)addr)) |
| 180 | goto out; |
| 181 | down_read(¤t->mm->mmap_sem); |
| 182 | } |
| 183 | |
| 184 | tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1; |
| 185 | if (tocopy > n) |
| 186 | tocopy = n; |
| 187 | |
| 188 | ua_flags = uaccess_save_and_enable(); |
| 189 | memset((void *)addr, 0, tocopy); |
| 190 | uaccess_restore(ua_flags); |
| 191 | addr += tocopy; |
| 192 | n -= tocopy; |
| 193 | |
| 194 | if (pte) |
| 195 | pte_unmap_unlock(pte, ptl); |
| 196 | else |
| 197 | spin_unlock(ptl); |
| 198 | } |
| 199 | up_read(¤t->mm->mmap_sem); |
| 200 | |
| 201 | out: |
| 202 | return n; |
| 203 | } |
| 204 | |
| 205 | unsigned long arm_clear_user(void __user *addr, unsigned long n) |
| 206 | { |
| 207 | /* See rational for this in __copy_to_user() above. */ |
| 208 | if (n < 64) { |
| 209 | unsigned long ua_flags = uaccess_save_and_enable(); |
| 210 | n = __clear_user_std(addr, n); |
| 211 | uaccess_restore(ua_flags); |
| 212 | } else { |
| 213 | n = __clear_user_memset(addr, n); |
| 214 | } |
| 215 | return n; |
| 216 | } |
| 217 | |
| 218 | #if 0 |
| 219 | |
| 220 | /* |
| 221 | * This code is disabled by default, but kept around in case the chosen |
| 222 | * thresholds need to be revalidated. Some overhead (small but still) |
| 223 | * would be implied by a runtime determined variable threshold, and |
| 224 | * so far the measurement on concerned targets didn't show a worthwhile |
| 225 | * variation. |
| 226 | * |
| 227 | * Note that a fairly precise sched_clock() implementation is needed |
| 228 | * for results to make some sense. |
| 229 | */ |
| 230 | |
| 231 | #include <linux/vmalloc.h> |
| 232 | |
| 233 | static int __init test_size_treshold(void) |
| 234 | { |
| 235 | struct page *src_page, *dst_page; |
| 236 | void *user_ptr, *kernel_ptr; |
| 237 | unsigned long long t0, t1, t2; |
| 238 | int size, ret; |
| 239 | |
| 240 | ret = -ENOMEM; |
| 241 | src_page = alloc_page(GFP_KERNEL); |
| 242 | if (!src_page) |
| 243 | goto no_src; |
| 244 | dst_page = alloc_page(GFP_KERNEL); |
| 245 | if (!dst_page) |
| 246 | goto no_dst; |
| 247 | kernel_ptr = page_address(src_page); |
| 248 | user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); |
| 249 | if (!user_ptr) |
| 250 | goto no_vmap; |
| 251 | |
| 252 | /* warm up the src page dcache */ |
| 253 | ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE); |
| 254 | |
| 255 | for (size = PAGE_SIZE; size >= 4; size /= 2) { |
| 256 | t0 = sched_clock(); |
| 257 | ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size); |
| 258 | t1 = sched_clock(); |
| 259 | ret |= __copy_to_user_std(user_ptr, kernel_ptr, size); |
| 260 | t2 = sched_clock(); |
| 261 | printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); |
| 262 | } |
| 263 | |
| 264 | for (size = PAGE_SIZE; size >= 4; size /= 2) { |
| 265 | t0 = sched_clock(); |
| 266 | ret |= __clear_user_memset(user_ptr, size); |
| 267 | t1 = sched_clock(); |
| 268 | ret |= __clear_user_std(user_ptr, size); |
| 269 | t2 = sched_clock(); |
| 270 | printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); |
| 271 | } |
| 272 | |
| 273 | if (ret) |
| 274 | ret = -EFAULT; |
| 275 | |
| 276 | vunmap(user_ptr); |
| 277 | no_vmap: |
| 278 | put_page(dst_page); |
| 279 | no_dst: |
| 280 | put_page(src_page); |
| 281 | no_src: |
| 282 | return ret; |
| 283 | } |
| 284 | |
| 285 | subsys_initcall(test_size_treshold); |
| 286 | |
| 287 | #endif |