| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * User address space access functions. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 38 | 39 | "3: lea 0(%[size1],%[size8],8),%[size8]\n" |
|---|
| 39 | 40 | " jmp 2b\n" |
|---|
| 40 | 41 | ".previous\n" |
|---|
| 41 | | - _ASM_EXTABLE(0b,3b) |
|---|
| 42 | | - _ASM_EXTABLE(1b,2b) |
|---|
| 42 | + _ASM_EXTABLE_UA(0b, 3b) |
|---|
| 43 | + _ASM_EXTABLE_UA(1b, 2b) |
|---|
| 43 | 44 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
|---|
| 44 | 45 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr)); |
|---|
| 45 | 46 | clac(); |
|---|
| .. | .. |
|---|
| 49 | 50 | |
|---|
| 50 | 51 | unsigned long clear_user(void __user *to, unsigned long n) |
|---|
| 51 | 52 | { |
|---|
| 52 | | - if (access_ok(VERIFY_WRITE, to, n)) |
|---|
| 53 | + if (access_ok(to, n)) |
|---|
| 53 | 54 | return __clear_user(to, n); |
|---|
| 54 | 55 | return n; |
|---|
| 55 | 56 | } |
|---|
| 56 | 57 | EXPORT_SYMBOL(clear_user); |
|---|
| 57 | | - |
|---|
| 58 | | -/* |
|---|
| 59 | | - * Try to copy last bytes and clear the rest if needed. |
|---|
| 60 | | - * Since protection fault in copy_from/to_user is not a normal situation, |
|---|
| 61 | | - * it is not necessary to optimize tail handling. |
|---|
| 62 | | - */ |
|---|
| 63 | | -__visible unsigned long |
|---|
| 64 | | -copy_user_handle_tail(char *to, char *from, unsigned len) |
|---|
| 65 | | -{ |
|---|
| 66 | | - for (; len; --len, to++) { |
|---|
| 67 | | - char c; |
|---|
| 68 | | - |
|---|
| 69 | | - if (__get_user_nocheck(c, from++, sizeof(char))) |
|---|
| 70 | | - break; |
|---|
| 71 | | - if (__put_user_nocheck(c, to, sizeof(char))) |
|---|
| 72 | | - break; |
|---|
| 73 | | - } |
|---|
| 74 | | - clac(); |
|---|
| 75 | | - return len; |
|---|
| 76 | | -} |
|---|
| 77 | | - |
|---|
| 78 | | -/* |
|---|
| 79 | | - * Similar to copy_user_handle_tail, probe for the write fault point, |
|---|
| 80 | | - * but reuse __memcpy_mcsafe in case a new read error is encountered. |
|---|
| 81 | | - * clac() is handled in _copy_to_iter_mcsafe(). |
|---|
| 82 | | - */ |
|---|
| 83 | | -__visible unsigned long |
|---|
| 84 | | -mcsafe_handle_tail(char *to, char *from, unsigned len) |
|---|
| 85 | | -{ |
|---|
| 86 | | - for (; len; --len, to++, from++) { |
|---|
| 87 | | - /* |
|---|
| 88 | | - * Call the assembly routine back directly since |
|---|
| 89 | | - * memcpy_mcsafe() may silently fallback to memcpy. |
|---|
| 90 | | - */ |
|---|
| 91 | | - unsigned long rem = __memcpy_mcsafe(to, from, 1); |
|---|
| 92 | | - |
|---|
| 93 | | - if (rem) |
|---|
| 94 | | - break; |
|---|
| 95 | | - } |
|---|
| 96 | | - return len; |
|---|
| 97 | | -} |
|---|
| 98 | 58 | |
|---|
| 99 | 59 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
|---|
| 100 | 60 | /** |
|---|
| .. | .. |
|---|
| 154 | 114 | return rc; |
|---|
| 155 | 115 | } |
|---|
| 156 | 116 | |
|---|
| 157 | | -void memcpy_flushcache(void *_dst, const void *_src, size_t size) |
|---|
| 117 | +void __memcpy_flushcache(void *_dst, const void *_src, size_t size) |
|---|
| 158 | 118 | { |
|---|
| 159 | 119 | unsigned long dest = (unsigned long) _dst; |
|---|
| 160 | 120 | unsigned long source = (unsigned long) _src; |
|---|
| 161 | 121 | |
|---|
| 162 | 122 | /* cache copy and flush to align dest */ |
|---|
| 163 | 123 | if (!IS_ALIGNED(dest, 8)) { |
|---|
| 164 | | - unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); |
|---|
| 124 | + size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest); |
|---|
| 165 | 125 | |
|---|
| 166 | 126 | memcpy((void *) dest, (void *) source, len); |
|---|
| 167 | 127 | clean_cache_range((void *) dest, len); |
|---|
| .. | .. |
|---|
| 217 | 177 | clean_cache_range((void *) dest, size); |
|---|
| 218 | 178 | } |
|---|
| 219 | 179 | } |
|---|
| 220 | | -EXPORT_SYMBOL_GPL(memcpy_flushcache); |
|---|
| 180 | +EXPORT_SYMBOL_GPL(__memcpy_flushcache); |
|---|
| 221 | 181 | |
|---|
| 222 | 182 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, |
|---|
| 223 | 183 | size_t len) |
|---|