.. | .. |
---|
8 | 8 | #include <asm/extable.h> |
---|
9 | 9 | #include <asm/kup.h> |
---|
10 | 10 | |
---|
11 | | -/* |
---|
12 | | - * The fs value determines whether argument validity checking should be |
---|
13 | | - * performed or not. If get_fs() == USER_DS, checking is performed, with |
---|
14 | | - * get_fs() == KERNEL_DS, checking is bypassed. |
---|
15 | | - * |
---|
16 | | - * For historical reasons, these macros are grossly misnamed. |
---|
17 | | - * |
---|
18 | | - * The fs/ds values are now the highest legal address in the "segment". |
---|
19 | | - * This simplifies the checking in the routines below. |
---|
20 | | - */ |
---|
21 | | - |
---|
22 | | -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
---|
23 | | - |
---|
24 | | -#define KERNEL_DS MAKE_MM_SEG(~0UL) |
---|
25 | 11 | #ifdef __powerpc64__ |
---|
26 | 12 | /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ |
---|
27 | | -#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1) |
---|
| 13 | +#define TASK_SIZE_MAX TASK_SIZE_USER64 |
---|
28 | 14 | #else |
---|
29 | | -#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) |
---|
| 15 | +#define TASK_SIZE_MAX TASK_SIZE |
---|
30 | 16 | #endif |
---|
31 | 17 | |
---|
32 | | -#define get_ds() (KERNEL_DS) |
---|
33 | | -#define get_fs() (current->thread.addr_limit) |
---|
34 | | - |
---|
35 | | -static inline void set_fs(mm_segment_t fs) |
---|
| 18 | +static inline bool __access_ok(unsigned long addr, unsigned long size) |
---|
36 | 19 | { |
---|
37 | | - current->thread.addr_limit = fs; |
---|
38 | | - /* On user-mode return check addr_limit (fs) is correct */ |
---|
39 | | - set_thread_flag(TIF_FSCHECK); |
---|
| 20 | + return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr; |
---|
40 | 21 | } |
---|
41 | 22 | |
---|
42 | | -#define segment_eq(a, b) ((a).seg == (b).seg) |
---|
43 | | - |
---|
44 | | -#define user_addr_max() (get_fs().seg) |
---|
45 | | - |
---|
46 | | -#ifdef __powerpc64__ |
---|
47 | | -/* |
---|
48 | | - * This check is sufficient because there is a large enough |
---|
49 | | - * gap between user addresses and the kernel addresses |
---|
50 | | - */ |
---|
51 | | -#define __access_ok(addr, size, segment) \ |
---|
52 | | - (((addr) <= (segment).seg) && ((size) <= (segment).seg)) |
---|
53 | | - |
---|
54 | | -#else |
---|
55 | | - |
---|
56 | | -static inline int __access_ok(unsigned long addr, unsigned long size, |
---|
57 | | - mm_segment_t seg) |
---|
58 | | -{ |
---|
59 | | - if (addr > seg.seg) |
---|
60 | | - return 0; |
---|
61 | | - return (size == 0 || size - 1 <= seg.seg - addr); |
---|
62 | | -} |
---|
63 | | - |
---|
64 | | -#endif |
---|
65 | | - |
---|
66 | | -#define access_ok(type, addr, size) \ |
---|
67 | | - (__chk_user_ptr(addr), (void)(type), \ |
---|
68 | | - __access_ok((__force unsigned long)(addr), (size), get_fs())) |
---|
| 23 | +#define access_ok(addr, size) \ |
---|
| 24 | + (__chk_user_ptr(addr), \ |
---|
| 25 | + __access_ok((unsigned long)(addr), (size))) |
---|
69 | 26 | |
---|
70 | 27 | /* |
---|
71 | 28 | * These are the main single-value transfer routines. They automatically |
---|
.. | .. |
---|
94 | 51 | #define __get_user(x, ptr) \ |
---|
95 | 52 | __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) |
---|
96 | 53 | #define __put_user(x, ptr) \ |
---|
97 | | - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) |
---|
| 54 | + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
---|
| 55 | +#define __put_user_goto(x, ptr, label) \ |
---|
| 56 | + __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) |
---|
98 | 57 | |
---|
99 | 58 | #define __get_user_allowed(x, ptr) \ |
---|
100 | 59 | __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) |
---|
101 | | -#define __put_user_allowed(x, ptr) \ |
---|
102 | | - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) |
---|
103 | 60 | |
---|
104 | 61 | #define __get_user_inatomic(x, ptr) \ |
---|
105 | 62 | __get_user_nosleep((x), (ptr), sizeof(*(ptr))) |
---|
106 | 63 | #define __put_user_inatomic(x, ptr) \ |
---|
107 | 64 | __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
---|
108 | 65 | |
---|
| 66 | +#ifdef CONFIG_PPC64 |
---|
| 67 | + |
---|
| 68 | +#define ___get_user_instr(gu_op, dest, ptr) \ |
---|
| 69 | +({ \ |
---|
| 70 | + long __gui_ret = 0; \ |
---|
| 71 | + unsigned long __gui_ptr = (unsigned long)ptr; \ |
---|
| 72 | + struct ppc_inst __gui_inst; \ |
---|
| 73 | + unsigned int __prefix, __suffix; \ |
---|
| 74 | + __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \ |
---|
| 75 | + if (__gui_ret == 0) { \ |
---|
| 76 | + if ((__prefix >> 26) == OP_PREFIX) { \ |
---|
| 77 | + __gui_ret = gu_op(__suffix, \ |
---|
| 78 | + (unsigned int __user *)__gui_ptr + 1); \ |
---|
| 79 | + __gui_inst = ppc_inst_prefix(__prefix, \ |
---|
| 80 | + __suffix); \ |
---|
| 81 | + } else { \ |
---|
| 82 | + __gui_inst = ppc_inst(__prefix); \ |
---|
| 83 | + } \ |
---|
| 84 | + if (__gui_ret == 0) \ |
---|
| 85 | + (dest) = __gui_inst; \ |
---|
| 86 | + } \ |
---|
| 87 | + __gui_ret; \ |
---|
| 88 | +}) |
---|
| 89 | + |
---|
| 90 | +#define get_user_instr(x, ptr) \ |
---|
| 91 | + ___get_user_instr(get_user, x, ptr) |
---|
| 92 | + |
---|
| 93 | +#define __get_user_instr(x, ptr) \ |
---|
| 94 | + ___get_user_instr(__get_user, x, ptr) |
---|
| 95 | + |
---|
| 96 | +#define __get_user_instr_inatomic(x, ptr) \ |
---|
| 97 | + ___get_user_instr(__get_user_inatomic, x, ptr) |
---|
| 98 | + |
---|
| 99 | +#else /* !CONFIG_PPC64 */ |
---|
| 100 | +#define get_user_instr(x, ptr) \ |
---|
| 101 | + get_user((x).val, (u32 __user *)(ptr)) |
---|
| 102 | + |
---|
| 103 | +#define __get_user_instr(x, ptr) \ |
---|
| 104 | + __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true) |
---|
| 105 | + |
---|
| 106 | +#define __get_user_instr_inatomic(x, ptr) \ |
---|
| 107 | + __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32)) |
---|
| 108 | + |
---|
| 109 | +#endif /* CONFIG_PPC64 */ |
---|
| 110 | + |
---|
109 | 111 | extern long __put_user_bad(void); |
---|
110 | | - |
---|
111 | | -/* |
---|
112 | | - * We don't tell gcc that we are accessing memory, but this is OK |
---|
113 | | - * because we do not write to any memory gcc knows about, so there |
---|
114 | | - * are no aliasing issues. |
---|
115 | | - */ |
---|
116 | | -#define __put_user_asm(x, addr, err, op) \ |
---|
117 | | - __asm__ __volatile__( \ |
---|
118 | | - "1: " op " %1,0(%2) # put_user\n" \ |
---|
119 | | - "2:\n" \ |
---|
120 | | - ".section .fixup,\"ax\"\n" \ |
---|
121 | | - "3: li %0,%3\n" \ |
---|
122 | | - " b 2b\n" \ |
---|
123 | | - ".previous\n" \ |
---|
124 | | - EX_TABLE(1b, 3b) \ |
---|
125 | | - : "=r" (err) \ |
---|
126 | | - : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) |
---|
127 | | - |
---|
128 | | -#ifdef __powerpc64__ |
---|
129 | | -#define __put_user_asm2(x, ptr, retval) \ |
---|
130 | | - __put_user_asm(x, ptr, retval, "std") |
---|
131 | | -#else /* __powerpc64__ */ |
---|
132 | | -#define __put_user_asm2(x, addr, err) \ |
---|
133 | | - __asm__ __volatile__( \ |
---|
134 | | - "1: stw %1,0(%2)\n" \ |
---|
135 | | - "2: stw %1+1,4(%2)\n" \ |
---|
136 | | - "3:\n" \ |
---|
137 | | - ".section .fixup,\"ax\"\n" \ |
---|
138 | | - "4: li %0,%3\n" \ |
---|
139 | | - " b 3b\n" \ |
---|
140 | | - ".previous\n" \ |
---|
141 | | - EX_TABLE(1b, 4b) \ |
---|
142 | | - EX_TABLE(2b, 4b) \ |
---|
143 | | - : "=r" (err) \ |
---|
144 | | - : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) |
---|
145 | | -#endif /* __powerpc64__ */ |
---|
146 | 112 | |
---|
147 | 113 | #define __put_user_size_allowed(x, ptr, size, retval) \ |
---|
148 | 114 | do { \ |
---|
| 115 | + __label__ __pu_failed; \ |
---|
| 116 | + \ |
---|
149 | 117 | retval = 0; \ |
---|
150 | | - switch (size) { \ |
---|
151 | | - case 1: __put_user_asm(x, ptr, retval, "stb"); break; \ |
---|
152 | | - case 2: __put_user_asm(x, ptr, retval, "sth"); break; \ |
---|
153 | | - case 4: __put_user_asm(x, ptr, retval, "stw"); break; \ |
---|
154 | | - case 8: __put_user_asm2(x, ptr, retval); break; \ |
---|
155 | | - default: __put_user_bad(); \ |
---|
156 | | - } \ |
---|
| 118 | + __put_user_size_goto(x, ptr, size, __pu_failed); \ |
---|
| 119 | + break; \ |
---|
| 120 | + \ |
---|
| 121 | +__pu_failed: \ |
---|
| 122 | + retval = -EFAULT; \ |
---|
157 | 123 | } while (0) |
---|
158 | 124 | |
---|
159 | 125 | #define __put_user_size(x, ptr, size, retval) \ |
---|
.. | .. |
---|
163 | 129 | prevent_write_to_user(ptr, size); \ |
---|
164 | 130 | } while (0) |
---|
165 | 131 | |
---|
166 | | -#define __put_user_nocheck(x, ptr, size, do_allow) \ |
---|
| 132 | +#define __put_user_nocheck(x, ptr, size) \ |
---|
167 | 133 | ({ \ |
---|
168 | 134 | long __pu_err; \ |
---|
169 | 135 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
---|
.. | .. |
---|
173 | 139 | if (!is_kernel_addr((unsigned long)__pu_addr)) \ |
---|
174 | 140 | might_fault(); \ |
---|
175 | 141 | __chk_user_ptr(__pu_addr); \ |
---|
176 | | - if (do_allow) \ |
---|
177 | | - __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ |
---|
178 | | - else \ |
---|
179 | | - __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ |
---|
| 142 | + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ |
---|
180 | 143 | \ |
---|
181 | 144 | __pu_err; \ |
---|
182 | 145 | }) |
---|
.. | .. |
---|
189 | 152 | __typeof__(size) __pu_size = (size); \ |
---|
190 | 153 | \ |
---|
191 | 154 | might_fault(); \ |
---|
192 | | - if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \ |
---|
| 155 | + if (access_ok(__pu_addr, __pu_size)) \ |
---|
193 | 156 | __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ |
---|
194 | 157 | \ |
---|
195 | 158 | __pu_err; \ |
---|
.. | .. |
---|
209 | 172 | }) |
---|
210 | 173 | |
---|
211 | 174 | |
---|
| 175 | +/* |
---|
| 176 | + * We don't tell gcc that we are accessing memory, but this is OK |
---|
| 177 | + * because we do not write to any memory gcc knows about, so there |
---|
| 178 | + * are no aliasing issues. |
---|
| 179 | + */ |
---|
| 180 | +#define __put_user_asm_goto(x, addr, label, op) \ |
---|
| 181 | + asm_volatile_goto( \ |
---|
| 182 | + "1: " op "%U1%X1 %0,%1 # put_user\n" \ |
---|
| 183 | + EX_TABLE(1b, %l2) \ |
---|
| 184 | + : \ |
---|
| 185 | + : "r" (x), "m"UPD_CONSTR (*addr) \ |
---|
| 186 | + : \ |
---|
| 187 | + : label) |
---|
| 188 | + |
---|
| 189 | +#ifdef __powerpc64__ |
---|
| 190 | +#define __put_user_asm2_goto(x, ptr, label) \ |
---|
| 191 | + __put_user_asm_goto(x, ptr, label, "std") |
---|
| 192 | +#else /* __powerpc64__ */ |
---|
| 193 | +#define __put_user_asm2_goto(x, addr, label) \ |
---|
| 194 | + asm_volatile_goto( \ |
---|
| 195 | + "1: stw%X1 %0, %1\n" \ |
---|
| 196 | + "2: stw%X1 %L0, %L1\n" \ |
---|
| 197 | + EX_TABLE(1b, %l2) \ |
---|
| 198 | + EX_TABLE(2b, %l2) \ |
---|
| 199 | + : \ |
---|
| 200 | + : "r" (x), "m" (*addr) \ |
---|
| 201 | + : \ |
---|
| 202 | + : label) |
---|
| 203 | +#endif /* __powerpc64__ */ |
---|
| 204 | + |
---|
| 205 | +#define __put_user_size_goto(x, ptr, size, label) \ |
---|
| 206 | +do { \ |
---|
| 207 | + switch (size) { \ |
---|
| 208 | + case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \ |
---|
| 209 | + case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \ |
---|
| 210 | + case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \ |
---|
| 211 | + case 8: __put_user_asm2_goto(x, ptr, label); break; \ |
---|
| 212 | + default: __put_user_bad(); \ |
---|
| 213 | + } \ |
---|
| 214 | +} while (0) |
---|
| 215 | + |
---|
| 216 | +#define __put_user_nocheck_goto(x, ptr, size, label) \ |
---|
| 217 | +do { \ |
---|
| 218 | + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
---|
| 219 | + __chk_user_ptr(ptr); \ |
---|
| 220 | + __put_user_size_goto((x), __pu_addr, (size), label); \ |
---|
| 221 | +} while (0) |
---|
| 222 | + |
---|
| 223 | + |
---|
212 | 224 | extern long __get_user_bad(void); |
---|
213 | 225 | |
---|
214 | 226 | /* |
---|
.. | .. |
---|
217 | 229 | */ |
---|
218 | 230 | #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ |
---|
219 | 231 | __asm__ __volatile__( \ |
---|
| 232 | + ".machine push\n" \ |
---|
| 233 | + ".machine altivec\n" \ |
---|
220 | 234 | "1: lvx 0,0,%1 # get user\n" \ |
---|
221 | 235 | " stvx 0,0,%2 # put kernel\n" \ |
---|
| 236 | + ".machine pop\n" \ |
---|
222 | 237 | "2:\n" \ |
---|
223 | 238 | ".section .fixup,\"ax\"\n" \ |
---|
224 | 239 | "3: li %0,%3\n" \ |
---|
.. | .. |
---|
230 | 245 | |
---|
231 | 246 | #define __get_user_asm(x, addr, err, op) \ |
---|
232 | 247 | __asm__ __volatile__( \ |
---|
233 | | - "1: "op" %1,0(%2) # get_user\n" \ |
---|
| 248 | + "1: "op"%U2%X2 %1, %2 # get_user\n" \ |
---|
234 | 249 | "2:\n" \ |
---|
235 | 250 | ".section .fixup,\"ax\"\n" \ |
---|
236 | 251 | "3: li %0,%3\n" \ |
---|
.. | .. |
---|
239 | 254 | ".previous\n" \ |
---|
240 | 255 | EX_TABLE(1b, 3b) \ |
---|
241 | 256 | : "=r" (err), "=r" (x) \ |
---|
242 | | - : "b" (addr), "i" (-EFAULT), "0" (err)) |
---|
| 257 | + : "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err)) |
---|
243 | 258 | |
---|
244 | 259 | #ifdef __powerpc64__ |
---|
245 | 260 | #define __get_user_asm2(x, addr, err) \ |
---|
.. | .. |
---|
247 | 262 | #else /* __powerpc64__ */ |
---|
248 | 263 | #define __get_user_asm2(x, addr, err) \ |
---|
249 | 264 | __asm__ __volatile__( \ |
---|
250 | | - "1: lwz %1,0(%2)\n" \ |
---|
251 | | - "2: lwz %1+1,4(%2)\n" \ |
---|
| 265 | + "1: lwz%X2 %1, %2\n" \ |
---|
| 266 | + "2: lwz%X2 %L1, %L2\n" \ |
---|
252 | 267 | "3:\n" \ |
---|
253 | 268 | ".section .fixup,\"ax\"\n" \ |
---|
254 | 269 | "4: li %0,%3\n" \ |
---|
.. | .. |
---|
259 | 274 | EX_TABLE(1b, 4b) \ |
---|
260 | 275 | EX_TABLE(2b, 4b) \ |
---|
261 | 276 | : "=r" (err), "=&r" (x) \ |
---|
262 | | - : "b" (addr), "i" (-EFAULT), "0" (err)) |
---|
| 277 | + : "m" (*addr), "i" (-EFAULT), "0" (err)) |
---|
263 | 278 | #endif /* __powerpc64__ */ |
---|
264 | 279 | |
---|
265 | 280 | #define __get_user_size_allowed(x, ptr, size, retval) \ |
---|
.. | .. |
---|
269 | 284 | if (size > sizeof(x)) \ |
---|
270 | 285 | (x) = __get_user_bad(); \ |
---|
271 | 286 | switch (size) { \ |
---|
272 | | - case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \ |
---|
273 | | - case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \ |
---|
274 | | - case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \ |
---|
275 | | - case 8: __get_user_asm2(x, ptr, retval); break; \ |
---|
| 287 | + case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \ |
---|
| 288 | + case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \ |
---|
| 289 | + case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \ |
---|
| 290 | + case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \ |
---|
276 | 291 | default: (x) = __get_user_bad(); \ |
---|
277 | 292 | } \ |
---|
278 | 293 | } while (0) |
---|
.. | .. |
---|
299 | 314 | __typeof__(size) __gu_size = (size); \ |
---|
300 | 315 | \ |
---|
301 | 316 | __chk_user_ptr(__gu_addr); \ |
---|
302 | | - if (!is_kernel_addr((unsigned long)__gu_addr)) \ |
---|
| 317 | + if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \ |
---|
303 | 318 | might_fault(); \ |
---|
304 | 319 | barrier_nospec(); \ |
---|
305 | 320 | if (do_allow) \ |
---|
.. | .. |
---|
319 | 334 | __typeof__(size) __gu_size = (size); \ |
---|
320 | 335 | \ |
---|
321 | 336 | might_fault(); \ |
---|
322 | | - if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \ |
---|
| 337 | + if (access_ok(__gu_addr, __gu_size)) { \ |
---|
323 | 338 | barrier_nospec(); \ |
---|
324 | 339 | __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ |
---|
325 | 340 | } \ |
---|
.. | .. |
---|
349 | 364 | extern unsigned long __copy_tofrom_user(void __user *to, |
---|
350 | 365 | const void __user *from, unsigned long size); |
---|
351 | 366 | |
---|
| 367 | +#ifdef CONFIG_ARCH_HAS_COPY_MC |
---|
| 368 | +unsigned long __must_check |
---|
| 369 | +copy_mc_generic(void *to, const void *from, unsigned long size); |
---|
| 370 | + |
---|
| 371 | +static inline unsigned long __must_check |
---|
| 372 | +copy_mc_to_kernel(void *to, const void *from, unsigned long size) |
---|
| 373 | +{ |
---|
| 374 | + return copy_mc_generic(to, from, size); |
---|
| 375 | +} |
---|
| 376 | +#define copy_mc_to_kernel copy_mc_to_kernel |
---|
| 377 | + |
---|
| 378 | +static inline unsigned long __must_check |
---|
| 379 | +copy_mc_to_user(void __user *to, const void *from, unsigned long n) |
---|
| 380 | +{ |
---|
| 381 | + if (likely(check_copy_size(from, n, true))) { |
---|
| 382 | + if (access_ok(to, n)) { |
---|
| 383 | + allow_write_to_user(to, n); |
---|
| 384 | + n = copy_mc_generic((void *)to, from, n); |
---|
| 385 | + prevent_write_to_user(to, n); |
---|
| 386 | + } |
---|
| 387 | + } |
---|
| 388 | + |
---|
| 389 | + return n; |
---|
| 390 | +} |
---|
| 391 | +#endif |
---|
| 392 | + |
---|
352 | 393 | #ifdef __powerpc64__ |
---|
353 | 394 | static inline unsigned long |
---|
354 | 395 | raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) |
---|
.. | .. |
---|
356 | 397 | unsigned long ret; |
---|
357 | 398 | |
---|
358 | 399 | barrier_nospec(); |
---|
359 | | - allow_user_access(to, from, n); |
---|
| 400 | + allow_read_write_user(to, from, n); |
---|
360 | 401 | ret = __copy_tofrom_user(to, from, n); |
---|
361 | | - prevent_user_access(to, from, n); |
---|
| 402 | + prevent_read_write_user(to, from, n); |
---|
362 | 403 | return ret; |
---|
363 | 404 | } |
---|
364 | 405 | #endif /* __powerpc64__ */ |
---|
.. | .. |
---|
443 | 484 | { |
---|
444 | 485 | unsigned long ret = size; |
---|
445 | 486 | might_fault(); |
---|
446 | | - if (likely(access_ok(VERIFY_WRITE, addr, size))) { |
---|
| 487 | + if (likely(access_ok(addr, size))) { |
---|
447 | 488 | allow_write_to_user(addr, size); |
---|
448 | 489 | ret = __arch_clear_user(addr, size); |
---|
449 | 490 | prevent_write_to_user(addr, size); |
---|
.. | .. |
---|
464 | 505 | extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset, |
---|
465 | 506 | size_t len); |
---|
466 | 507 | |
---|
467 | | -#define user_access_begin(type, ptr, len) access_ok(type, ptr, len) |
---|
468 | | -#define user_access_end() prevent_user_access(NULL, NULL, ~0ul) |
---|
| 508 | +static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) |
---|
| 509 | +{ |
---|
| 510 | + if (unlikely(!access_ok(ptr, len))) |
---|
| 511 | + return false; |
---|
| 512 | + |
---|
| 513 | + might_fault(); |
---|
| 514 | + |
---|
| 515 | + allow_read_write_user((void __user *)ptr, ptr, len); |
---|
| 516 | + return true; |
---|
| 517 | +} |
---|
| 518 | +#define user_access_begin user_access_begin |
---|
| 519 | +#define user_access_end prevent_current_access_user |
---|
| 520 | +#define user_access_save prevent_user_access_return |
---|
| 521 | +#define user_access_restore restore_user_access |
---|
| 522 | + |
---|
| 523 | +static __must_check inline bool |
---|
| 524 | +user_read_access_begin(const void __user *ptr, size_t len) |
---|
| 525 | +{ |
---|
| 526 | + if (unlikely(!access_ok(ptr, len))) |
---|
| 527 | + return false; |
---|
| 528 | + |
---|
| 529 | + might_fault(); |
---|
| 530 | + |
---|
| 531 | + allow_read_from_user(ptr, len); |
---|
| 532 | + return true; |
---|
| 533 | +} |
---|
| 534 | +#define user_read_access_begin user_read_access_begin |
---|
| 535 | +#define user_read_access_end prevent_current_read_from_user |
---|
| 536 | + |
---|
| 537 | +static __must_check inline bool |
---|
| 538 | +user_write_access_begin(const void __user *ptr, size_t len) |
---|
| 539 | +{ |
---|
| 540 | + if (unlikely(!access_ok(ptr, len))) |
---|
| 541 | + return false; |
---|
| 542 | + |
---|
| 543 | + might_fault(); |
---|
| 544 | + |
---|
| 545 | + allow_write_to_user((void __user *)ptr, len); |
---|
| 546 | + return true; |
---|
| 547 | +} |
---|
| 548 | +#define user_write_access_begin user_write_access_begin |
---|
| 549 | +#define user_write_access_end prevent_current_write_to_user |
---|
469 | 550 | |
---|
470 | 551 | #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) |
---|
471 | 552 | #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) |
---|
472 | | -#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) |
---|
| 553 | +#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) |
---|
| 554 | + |
---|
473 | 555 | #define unsafe_copy_to_user(d, s, l, e) \ |
---|
474 | | - unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) |
---|
| 556 | +do { \ |
---|
| 557 | + u8 __user *_dst = (u8 __user *)(d); \ |
---|
| 558 | + const u8 *_src = (const u8 *)(s); \ |
---|
| 559 | + size_t _len = (l); \ |
---|
| 560 | + int _i; \ |
---|
| 561 | + \ |
---|
| 562 | + for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ |
---|
| 563 | + __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\ |
---|
| 564 | + if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ |
---|
| 565 | + __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ |
---|
| 566 | + _i += 4; \ |
---|
| 567 | + } \ |
---|
| 568 | + if (_len & 2) { \ |
---|
| 569 | + __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ |
---|
| 570 | + _i += 2; \ |
---|
| 571 | + } \ |
---|
| 572 | + if (_len & 1) \ |
---|
| 573 | + __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ |
---|
| 574 | +} while (0) |
---|
| 575 | + |
---|
| 576 | +#define HAVE_GET_KERNEL_NOFAULT |
---|
| 577 | + |
---|
| 578 | +#define __get_kernel_nofault(dst, src, type, err_label) \ |
---|
| 579 | +do { \ |
---|
| 580 | + int __kr_err; \ |
---|
| 581 | + \ |
---|
| 582 | + __get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\ |
---|
| 583 | + sizeof(type), __kr_err); \ |
---|
| 584 | + if (unlikely(__kr_err)) \ |
---|
| 585 | + goto err_label; \ |
---|
| 586 | +} while (0) |
---|
| 587 | + |
---|
| 588 | +#define __put_kernel_nofault(dst, src, type, err_label) \ |
---|
| 589 | + __put_user_size_goto(*((type *)(src)), \ |
---|
| 590 | + (__force type __user *)(dst), sizeof(type), err_label) |
---|
475 | 591 | |
---|
476 | 592 | #endif /* _ARCH_POWERPC_UACCESS_H */ |
---|