.. | .. |
---|
13 | 13 | #include <asm/extable.h> |
---|
14 | 14 | |
---|
15 | 15 | /* |
---|
16 | | - * The fs value determines whether argument validity checking should be |
---|
17 | | - * performed or not. If get_fs() == USER_DS, checking is performed, with |
---|
18 | | - * get_fs() == KERNEL_DS, checking is bypassed. |
---|
19 | | - * |
---|
20 | | - * For historical reasons, these macros are grossly misnamed. |
---|
21 | | - */ |
---|
22 | | - |
---|
23 | | -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
---|
24 | | - |
---|
25 | | -#define KERNEL_DS MAKE_MM_SEG(-1UL) |
---|
26 | | -#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) |
---|
27 | | - |
---|
28 | | -#define get_ds() (KERNEL_DS) |
---|
29 | | -#define get_fs() (current->thread.addr_limit) |
---|
30 | | -static inline void set_fs(mm_segment_t fs) |
---|
31 | | -{ |
---|
32 | | - current->thread.addr_limit = fs; |
---|
33 | | - /* On user-mode return, check fs is correct */ |
---|
34 | | - set_thread_flag(TIF_FSCHECK); |
---|
35 | | -} |
---|
36 | | - |
---|
37 | | -#define segment_eq(a, b) ((a).seg == (b).seg) |
---|
38 | | - |
---|
39 | | -#define user_addr_max() (current->thread.addr_limit.seg) |
---|
40 | | -#define __addr_ok(addr) \ |
---|
41 | | - ((unsigned long __force)(addr) < user_addr_max()) |
---|
42 | | - |
---|
43 | | -/* |
---|
44 | 16 | * Test whether a block of memory is a valid user space address. |
---|
45 | 17 | * Returns 0 if the range is valid, nonzero otherwise. |
---|
46 | 18 | */ |
---|
.. | .. |
---|
70 | 42 | }) |
---|
71 | 43 | |
---|
72 | 44 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
73 | | -# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task()) |
---|
| 45 | +static inline bool pagefault_disabled(void); |
---|
| 46 | +# define WARN_ON_IN_IRQ() \ |
---|
| 47 | + WARN_ON_ONCE(!in_task() && !pagefault_disabled()) |
---|
74 | 48 | #else |
---|
75 | 49 | # define WARN_ON_IN_IRQ() |
---|
76 | 50 | #endif |
---|
77 | 51 | |
---|
78 | 52 | /** |
---|
79 | | - * access_ok: - Checks if a user space pointer is valid |
---|
80 | | - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that |
---|
81 | | - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe |
---|
82 | | - * to write to a block, it is always safe to read from it. |
---|
| 53 | + * access_ok - Checks if a user space pointer is valid |
---|
83 | 54 | * @addr: User space pointer to start of block to check |
---|
84 | 55 | * @size: Size of block to check |
---|
85 | 56 | * |
---|
.. | .. |
---|
88 | 59 | * |
---|
89 | 60 | * Checks if a pointer to a block of memory in user space is valid. |
---|
90 | 61 | * |
---|
91 | | - * Returns true (nonzero) if the memory block may be valid, false (zero) |
---|
92 | | - * if it is definitely invalid. |
---|
93 | | - * |
---|
94 | 62 | * Note that, depending on architecture, this function probably just |
---|
95 | 63 | * checks that the pointer is in the user space range - after calling |
---|
96 | 64 | * this function, memory access functions may still return -EFAULT. |
---|
| 65 | + * |
---|
| 66 | + * Return: true (nonzero) if the memory block may be valid, false (zero) |
---|
| 67 | + * if it is definitely invalid. |
---|
97 | 68 | */ |
---|
98 | | -#define access_ok(type, addr, size) \ |
---|
| 69 | +#define access_ok(addr, size) \ |
---|
99 | 70 | ({ \ |
---|
100 | 71 | WARN_ON_IN_IRQ(); \ |
---|
101 | | - likely(!__range_not_ok(addr, size, user_addr_max())); \ |
---|
| 72 | + likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \ |
---|
102 | 73 | }) |
---|
103 | | - |
---|
104 | | -/* |
---|
105 | | - * These are the main single-value transfer routines. They automatically |
---|
106 | | - * use the right size if we just have the right pointer type. |
---|
107 | | - * |
---|
108 | | - * This gets kind of ugly. We want to return _two_ values in "get_user()" |
---|
109 | | - * and yet we don't want to do any pointers, because that is too much |
---|
110 | | - * of a performance impact. Thus we have a few rather ugly macros here, |
---|
111 | | - * and hide all the ugliness from the user. |
---|
112 | | - * |
---|
113 | | - * The "__xxx" versions of the user access functions are versions that |
---|
114 | | - * do not verify the address space, that must have been done previously |
---|
115 | | - * with a separate "access_ok()" call (this is used when we do multiple |
---|
116 | | - * accesses to the same area of user memory). |
---|
117 | | - */ |
---|
118 | 74 | |
---|
119 | 75 | extern int __get_user_1(void); |
---|
120 | 76 | extern int __get_user_2(void); |
---|
121 | 77 | extern int __get_user_4(void); |
---|
122 | 78 | extern int __get_user_8(void); |
---|
| 79 | +extern int __get_user_nocheck_1(void); |
---|
| 80 | +extern int __get_user_nocheck_2(void); |
---|
| 81 | +extern int __get_user_nocheck_4(void); |
---|
| 82 | +extern int __get_user_nocheck_8(void); |
---|
123 | 83 | extern int __get_user_bad(void); |
---|
124 | 84 | |
---|
125 | 85 | #define __uaccess_begin() stac() |
---|
.. | .. |
---|
131 | 91 | }) |
---|
132 | 92 | |
---|
133 | 93 | /* |
---|
134 | | - * This is a type: either unsigned long, if the argument fits into |
---|
135 | | - * that type, or otherwise unsigned long long. |
---|
| 94 | + * This is the smallest unsigned integer type that can fit a value |
---|
| 95 | + * (up to 'long long') |
---|
136 | 96 | */ |
---|
137 | | -#define __inttype(x) \ |
---|
138 | | -__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) |
---|
| 97 | +#define __inttype(x) __typeof__( \ |
---|
| 98 | + __typefits(x,char, \ |
---|
| 99 | + __typefits(x,short, \ |
---|
| 100 | + __typefits(x,int, \ |
---|
| 101 | + __typefits(x,long,0ULL))))) |
---|
139 | 102 | |
---|
140 | | -/** |
---|
141 | | - * get_user: - Get a simple variable from user space. |
---|
142 | | - * @x: Variable to store result. |
---|
143 | | - * @ptr: Source address, in user space. |
---|
144 | | - * |
---|
145 | | - * Context: User context only. This function may sleep if pagefaults are |
---|
146 | | - * enabled. |
---|
147 | | - * |
---|
148 | | - * This macro copies a single simple variable from user space to kernel |
---|
149 | | - * space. It supports simple types like char and int, but not larger |
---|
150 | | - * data types like structures or arrays. |
---|
151 | | - * |
---|
152 | | - * @ptr must have pointer-to-simple-variable type, and the result of |
---|
153 | | - * dereferencing @ptr must be assignable to @x without a cast. |
---|
154 | | - * |
---|
155 | | - * Returns zero on success, or -EFAULT on error. |
---|
156 | | - * On error, the variable @x is set to zero. |
---|
157 | | - */ |
---|
| 103 | +#define __typefits(x,type,not) \ |
---|
| 104 | + __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not) |
---|
| 105 | + |
---|
158 | 106 | /* |
---|
| 107 | + * This is used for both get_user() and __get_user() to expand to |
---|
| 108 | + * the proper special function call that has odd calling conventions |
---|
| 109 | + * due to returning both a value and an error, and that depends on |
---|
| 110 | + * the size of the pointer passed in. |
---|
| 111 | + * |
---|
159 | 112 | * Careful: we have to cast the result to the type of the pointer |
---|
160 | 113 | * for sign reasons. |
---|
161 | 114 | * |
---|
.. | .. |
---|
168 | 121 | * Clang/LLVM cares about the size of the register, but still wants |
---|
169 | 122 | * the base register for something that ends up being a pair. |
---|
170 | 123 | */ |
---|
171 | | -#define get_user(x, ptr) \ |
---|
| 124 | +#define do_get_user_call(fn,x,ptr) \ |
---|
172 | 125 | ({ \ |
---|
173 | 126 | int __ret_gu; \ |
---|
174 | 127 | register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ |
---|
175 | 128 | __chk_user_ptr(ptr); \ |
---|
176 | | - might_fault(); \ |
---|
177 | | - asm volatile("call __get_user_%P4" \ |
---|
| 129 | + asm volatile("call __" #fn "_%P4" \ |
---|
178 | 130 | : "=a" (__ret_gu), "=r" (__val_gu), \ |
---|
179 | 131 | ASM_CALL_CONSTRAINT \ |
---|
180 | 132 | : "0" (ptr), "i" (sizeof(*(ptr)))); \ |
---|
.. | .. |
---|
182 | 134 | __builtin_expect(__ret_gu, 0); \ |
---|
183 | 135 | }) |
---|
184 | 136 | |
---|
185 | | -#define __put_user_x(size, x, ptr, __ret_pu) \ |
---|
186 | | - asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ |
---|
187 | | - : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
---|
188 | | - |
---|
189 | | - |
---|
190 | | - |
---|
191 | | -#ifdef CONFIG_X86_32 |
---|
192 | | -#define __put_user_asm_u64(x, addr, err, errret) \ |
---|
193 | | - asm volatile("\n" \ |
---|
194 | | - "1: movl %%eax,0(%2)\n" \ |
---|
195 | | - "2: movl %%edx,4(%2)\n" \ |
---|
196 | | - "3:" \ |
---|
197 | | - ".section .fixup,\"ax\"\n" \ |
---|
198 | | - "4: movl %3,%0\n" \ |
---|
199 | | - " jmp 3b\n" \ |
---|
200 | | - ".previous\n" \ |
---|
201 | | - _ASM_EXTABLE(1b, 4b) \ |
---|
202 | | - _ASM_EXTABLE(2b, 4b) \ |
---|
203 | | - : "=r" (err) \ |
---|
204 | | - : "A" (x), "r" (addr), "i" (errret), "0" (err)) |
---|
205 | | - |
---|
206 | | -#define __put_user_asm_ex_u64(x, addr) \ |
---|
207 | | - asm volatile("\n" \ |
---|
208 | | - "1: movl %%eax,0(%1)\n" \ |
---|
209 | | - "2: movl %%edx,4(%1)\n" \ |
---|
210 | | - "3:" \ |
---|
211 | | - _ASM_EXTABLE_EX(1b, 2b) \ |
---|
212 | | - _ASM_EXTABLE_EX(2b, 3b) \ |
---|
213 | | - : : "A" (x), "r" (addr)) |
---|
214 | | - |
---|
215 | | -#define __put_user_x8(x, ptr, __ret_pu) \ |
---|
216 | | - asm volatile("call __put_user_8" : "=a" (__ret_pu) \ |
---|
217 | | - : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
---|
218 | | -#else |
---|
219 | | -#define __put_user_asm_u64(x, ptr, retval, errret) \ |
---|
220 | | - __put_user_asm(x, ptr, retval, "q", "", "er", errret) |
---|
221 | | -#define __put_user_asm_ex_u64(x, addr) \ |
---|
222 | | - __put_user_asm_ex(x, addr, "q", "", "er") |
---|
223 | | -#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) |
---|
224 | | -#endif |
---|
225 | | - |
---|
226 | | -extern void __put_user_bad(void); |
---|
227 | | - |
---|
228 | | -/* |
---|
229 | | - * Strange magic calling convention: pointer in %ecx, |
---|
230 | | - * value in %eax(:%edx), return value in %eax. clobbers %rbx |
---|
231 | | - */ |
---|
232 | | -extern void __put_user_1(void); |
---|
233 | | -extern void __put_user_2(void); |
---|
234 | | -extern void __put_user_4(void); |
---|
235 | | -extern void __put_user_8(void); |
---|
236 | | - |
---|
237 | 137 | /** |
---|
238 | | - * put_user: - Write a simple value into user space. |
---|
239 | | - * @x: Value to copy to user space. |
---|
240 | | - * @ptr: Destination address, in user space. |
---|
| 138 | + * get_user - Get a simple variable from user space. |
---|
| 139 | + * @x: Variable to store result. |
---|
| 140 | + * @ptr: Source address, in user space. |
---|
241 | 141 | * |
---|
242 | 142 | * Context: User context only. This function may sleep if pagefaults are |
---|
243 | 143 | * enabled. |
---|
244 | 144 | * |
---|
245 | | - * This macro copies a single simple value from kernel space to user |
---|
| 145 | + * This macro copies a single simple variable from user space to kernel |
---|
246 | 146 | * space. It supports simple types like char and int, but not larger |
---|
247 | 147 | * data types like structures or arrays. |
---|
248 | 148 | * |
---|
249 | | - * @ptr must have pointer-to-simple-variable type, and @x must be assignable |
---|
250 | | - * to the result of dereferencing @ptr. |
---|
| 149 | + * @ptr must have pointer-to-simple-variable type, and the result of |
---|
| 150 | + * dereferencing @ptr must be assignable to @x without a cast. |
---|
251 | 151 | * |
---|
252 | | - * Returns zero on success, or -EFAULT on error. |
---|
| 152 | + * Return: zero on success, or -EFAULT on error. |
---|
| 153 | + * On error, the variable @x is set to zero. |
---|
253 | 154 | */ |
---|
254 | | -#define put_user(x, ptr) \ |
---|
255 | | -({ \ |
---|
256 | | - int __ret_pu; \ |
---|
257 | | - __typeof__(*(ptr)) __pu_val; \ |
---|
258 | | - __chk_user_ptr(ptr); \ |
---|
259 | | - might_fault(); \ |
---|
260 | | - __pu_val = x; \ |
---|
261 | | - switch (sizeof(*(ptr))) { \ |
---|
262 | | - case 1: \ |
---|
263 | | - __put_user_x(1, __pu_val, ptr, __ret_pu); \ |
---|
264 | | - break; \ |
---|
265 | | - case 2: \ |
---|
266 | | - __put_user_x(2, __pu_val, ptr, __ret_pu); \ |
---|
267 | | - break; \ |
---|
268 | | - case 4: \ |
---|
269 | | - __put_user_x(4, __pu_val, ptr, __ret_pu); \ |
---|
270 | | - break; \ |
---|
271 | | - case 8: \ |
---|
272 | | - __put_user_x8(__pu_val, ptr, __ret_pu); \ |
---|
273 | | - break; \ |
---|
274 | | - default: \ |
---|
275 | | - __put_user_x(X, __pu_val, ptr, __ret_pu); \ |
---|
276 | | - break; \ |
---|
277 | | - } \ |
---|
278 | | - __builtin_expect(__ret_pu, 0); \ |
---|
279 | | -}) |
---|
280 | | - |
---|
281 | | -#define __put_user_size(x, ptr, size, retval, errret) \ |
---|
282 | | -do { \ |
---|
283 | | - retval = 0; \ |
---|
284 | | - __chk_user_ptr(ptr); \ |
---|
285 | | - switch (size) { \ |
---|
286 | | - case 1: \ |
---|
287 | | - __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ |
---|
288 | | - break; \ |
---|
289 | | - case 2: \ |
---|
290 | | - __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ |
---|
291 | | - break; \ |
---|
292 | | - case 4: \ |
---|
293 | | - __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \ |
---|
294 | | - break; \ |
---|
295 | | - case 8: \ |
---|
296 | | - __put_user_asm_u64(x, ptr, retval, errret); \ |
---|
297 | | - break; \ |
---|
298 | | - default: \ |
---|
299 | | - __put_user_bad(); \ |
---|
300 | | - } \ |
---|
301 | | -} while (0) |
---|
302 | | - |
---|
303 | | -/* |
---|
304 | | - * This doesn't do __uaccess_begin/end - the exception handling |
---|
305 | | - * around it must do that. |
---|
306 | | - */ |
---|
307 | | -#define __put_user_size_ex(x, ptr, size) \ |
---|
308 | | -do { \ |
---|
309 | | - __chk_user_ptr(ptr); \ |
---|
310 | | - switch (size) { \ |
---|
311 | | - case 1: \ |
---|
312 | | - __put_user_asm_ex(x, ptr, "b", "b", "iq"); \ |
---|
313 | | - break; \ |
---|
314 | | - case 2: \ |
---|
315 | | - __put_user_asm_ex(x, ptr, "w", "w", "ir"); \ |
---|
316 | | - break; \ |
---|
317 | | - case 4: \ |
---|
318 | | - __put_user_asm_ex(x, ptr, "l", "k", "ir"); \ |
---|
319 | | - break; \ |
---|
320 | | - case 8: \ |
---|
321 | | - __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \ |
---|
322 | | - break; \ |
---|
323 | | - default: \ |
---|
324 | | - __put_user_bad(); \ |
---|
325 | | - } \ |
---|
326 | | -} while (0) |
---|
327 | | - |
---|
328 | | -#ifdef CONFIG_X86_32 |
---|
329 | | -#define __get_user_asm_u64(x, ptr, retval, errret) \ |
---|
330 | | -({ \ |
---|
331 | | - __typeof__(ptr) __ptr = (ptr); \ |
---|
332 | | - asm volatile("\n" \ |
---|
333 | | - "1: movl %2,%%eax\n" \ |
---|
334 | | - "2: movl %3,%%edx\n" \ |
---|
335 | | - "3:\n" \ |
---|
336 | | - ".section .fixup,\"ax\"\n" \ |
---|
337 | | - "4: mov %4,%0\n" \ |
---|
338 | | - " xorl %%eax,%%eax\n" \ |
---|
339 | | - " xorl %%edx,%%edx\n" \ |
---|
340 | | - " jmp 3b\n" \ |
---|
341 | | - ".previous\n" \ |
---|
342 | | - _ASM_EXTABLE(1b, 4b) \ |
---|
343 | | - _ASM_EXTABLE(2b, 4b) \ |
---|
344 | | - : "=r" (retval), "=&A"(x) \ |
---|
345 | | - : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \ |
---|
346 | | - "i" (errret), "0" (retval)); \ |
---|
347 | | -}) |
---|
348 | | - |
---|
349 | | -#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad() |
---|
350 | | -#else |
---|
351 | | -#define __get_user_asm_u64(x, ptr, retval, errret) \ |
---|
352 | | - __get_user_asm(x, ptr, retval, "q", "", "=r", errret) |
---|
353 | | -#define __get_user_asm_ex_u64(x, ptr) \ |
---|
354 | | - __get_user_asm_ex(x, ptr, "q", "", "=r") |
---|
355 | | -#endif |
---|
356 | | - |
---|
357 | | -#define __get_user_size(x, ptr, size, retval, errret) \ |
---|
358 | | -do { \ |
---|
359 | | - retval = 0; \ |
---|
360 | | - __chk_user_ptr(ptr); \ |
---|
361 | | - switch (size) { \ |
---|
362 | | - case 1: \ |
---|
363 | | - __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ |
---|
364 | | - break; \ |
---|
365 | | - case 2: \ |
---|
366 | | - __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ |
---|
367 | | - break; \ |
---|
368 | | - case 4: \ |
---|
369 | | - __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ |
---|
370 | | - break; \ |
---|
371 | | - case 8: \ |
---|
372 | | - __get_user_asm_u64(x, ptr, retval, errret); \ |
---|
373 | | - break; \ |
---|
374 | | - default: \ |
---|
375 | | - (x) = __get_user_bad(); \ |
---|
376 | | - } \ |
---|
377 | | -} while (0) |
---|
378 | | - |
---|
379 | | -#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
---|
380 | | - asm volatile("\n" \ |
---|
381 | | - "1: mov"itype" %2,%"rtype"1\n" \ |
---|
382 | | - "2:\n" \ |
---|
383 | | - ".section .fixup,\"ax\"\n" \ |
---|
384 | | - "3: mov %3,%0\n" \ |
---|
385 | | - " xor"itype" %"rtype"1,%"rtype"1\n" \ |
---|
386 | | - " jmp 2b\n" \ |
---|
387 | | - ".previous\n" \ |
---|
388 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
389 | | - : "=r" (err), ltype(x) \ |
---|
390 | | - : "m" (__m(addr)), "i" (errret), "0" (err)) |
---|
391 | | - |
---|
392 | | -#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ |
---|
393 | | - asm volatile("\n" \ |
---|
394 | | - "1: mov"itype" %2,%"rtype"1\n" \ |
---|
395 | | - "2:\n" \ |
---|
396 | | - ".section .fixup,\"ax\"\n" \ |
---|
397 | | - "3: mov %3,%0\n" \ |
---|
398 | | - " jmp 2b\n" \ |
---|
399 | | - ".previous\n" \ |
---|
400 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
401 | | - : "=r" (err), ltype(x) \ |
---|
402 | | - : "m" (__m(addr)), "i" (errret), "0" (err)) |
---|
403 | | - |
---|
404 | | -/* |
---|
405 | | - * This doesn't do __uaccess_begin/end - the exception handling |
---|
406 | | - * around it must do that. |
---|
407 | | - */ |
---|
408 | | -#define __get_user_size_ex(x, ptr, size) \ |
---|
409 | | -do { \ |
---|
410 | | - __chk_user_ptr(ptr); \ |
---|
411 | | - switch (size) { \ |
---|
412 | | - case 1: \ |
---|
413 | | - __get_user_asm_ex(x, ptr, "b", "b", "=q"); \ |
---|
414 | | - break; \ |
---|
415 | | - case 2: \ |
---|
416 | | - __get_user_asm_ex(x, ptr, "w", "w", "=r"); \ |
---|
417 | | - break; \ |
---|
418 | | - case 4: \ |
---|
419 | | - __get_user_asm_ex(x, ptr, "l", "k", "=r"); \ |
---|
420 | | - break; \ |
---|
421 | | - case 8: \ |
---|
422 | | - __get_user_asm_ex_u64(x, ptr); \ |
---|
423 | | - break; \ |
---|
424 | | - default: \ |
---|
425 | | - (x) = __get_user_bad(); \ |
---|
426 | | - } \ |
---|
427 | | -} while (0) |
---|
428 | | - |
---|
429 | | -#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ |
---|
430 | | - asm volatile("1: mov"itype" %1,%"rtype"0\n" \ |
---|
431 | | - "2:\n" \ |
---|
432 | | - ".section .fixup,\"ax\"\n" \ |
---|
433 | | - "3:xor"itype" %"rtype"0,%"rtype"0\n" \ |
---|
434 | | - " jmp 2b\n" \ |
---|
435 | | - ".previous\n" \ |
---|
436 | | - _ASM_EXTABLE_EX(1b, 3b) \ |
---|
437 | | - : ltype(x) : "m" (__m(addr))) |
---|
438 | | - |
---|
439 | | -#define __put_user_nocheck(x, ptr, size) \ |
---|
440 | | -({ \ |
---|
441 | | - int __pu_err; \ |
---|
442 | | - __typeof__(*(ptr)) __pu_val; \ |
---|
443 | | - __pu_val = x; \ |
---|
444 | | - __uaccess_begin(); \ |
---|
445 | | - __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\ |
---|
446 | | - __uaccess_end(); \ |
---|
447 | | - __builtin_expect(__pu_err, 0); \ |
---|
448 | | -}) |
---|
449 | | - |
---|
450 | | -#define __get_user_nocheck(x, ptr, size) \ |
---|
451 | | -({ \ |
---|
452 | | - int __gu_err; \ |
---|
453 | | - __inttype(*(ptr)) __gu_val; \ |
---|
454 | | - __typeof__(ptr) __gu_ptr = (ptr); \ |
---|
455 | | - __typeof__(size) __gu_size = (size); \ |
---|
456 | | - __uaccess_begin_nospec(); \ |
---|
457 | | - __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ |
---|
458 | | - __uaccess_end(); \ |
---|
459 | | - (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
---|
460 | | - __builtin_expect(__gu_err, 0); \ |
---|
461 | | -}) |
---|
462 | | - |
---|
463 | | -/* FIXME: this hack is definitely wrong -AK */ |
---|
464 | | -struct __large_struct { unsigned long buf[100]; }; |
---|
465 | | -#define __m(x) (*(struct __large_struct __user *)(x)) |
---|
466 | | - |
---|
467 | | -/* |
---|
468 | | - * Tell gcc we read from memory instead of writing: this is because |
---|
469 | | - * we do not write to any memory gcc knows about, so there are no |
---|
470 | | - * aliasing issues. |
---|
471 | | - */ |
---|
472 | | -#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ |
---|
473 | | - asm volatile("\n" \ |
---|
474 | | - "1: mov"itype" %"rtype"1,%2\n" \ |
---|
475 | | - "2:\n" \ |
---|
476 | | - ".section .fixup,\"ax\"\n" \ |
---|
477 | | - "3: mov %3,%0\n" \ |
---|
478 | | - " jmp 2b\n" \ |
---|
479 | | - ".previous\n" \ |
---|
480 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
481 | | - : "=r"(err) \ |
---|
482 | | - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) |
---|
483 | | - |
---|
484 | | -#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ |
---|
485 | | - asm volatile("1: mov"itype" %"rtype"0,%1\n" \ |
---|
486 | | - "2:\n" \ |
---|
487 | | - _ASM_EXTABLE_EX(1b, 2b) \ |
---|
488 | | - : : ltype(x), "m" (__m(addr))) |
---|
489 | | - |
---|
490 | | -/* |
---|
491 | | - * uaccess_try and catch |
---|
492 | | - */ |
---|
493 | | -#define uaccess_try do { \ |
---|
494 | | - current->thread.uaccess_err = 0; \ |
---|
495 | | - __uaccess_begin(); \ |
---|
496 | | - barrier(); |
---|
497 | | - |
---|
498 | | -#define uaccess_try_nospec do { \ |
---|
499 | | - current->thread.uaccess_err = 0; \ |
---|
500 | | - __uaccess_begin_nospec(); \ |
---|
501 | | - |
---|
502 | | -#define uaccess_catch(err) \ |
---|
503 | | - __uaccess_end(); \ |
---|
504 | | - (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ |
---|
505 | | -} while (0) |
---|
| 155 | +#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); }) |
---|
506 | 156 | |
---|
507 | 157 | /** |
---|
508 | | - * __get_user: - Get a simple variable from user space, with less checking. |
---|
| 158 | + * __get_user - Get a simple variable from user space, with less checking. |
---|
509 | 159 | * @x: Variable to store result. |
---|
510 | 160 | * @ptr: Source address, in user space. |
---|
511 | 161 | * |
---|
.. | .. |
---|
522 | 172 | * Caller must check the pointer with access_ok() before calling this |
---|
523 | 173 | * function. |
---|
524 | 174 | * |
---|
525 | | - * Returns zero on success, or -EFAULT on error. |
---|
| 175 | + * Return: zero on success, or -EFAULT on error. |
---|
526 | 176 | * On error, the variable @x is set to zero. |
---|
527 | 177 | */ |
---|
| 178 | +#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr) |
---|
528 | 179 | |
---|
529 | | -#define __get_user(x, ptr) \ |
---|
530 | | - __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
---|
| 180 | + |
---|
| 181 | +#ifdef CONFIG_X86_32 |
---|
| 182 | +#define __put_user_goto_u64(x, addr, label) \ |
---|
| 183 | + asm_volatile_goto("\n" \ |
---|
| 184 | + "1: movl %%eax,0(%1)\n" \ |
---|
| 185 | + "2: movl %%edx,4(%1)\n" \ |
---|
| 186 | + _ASM_EXTABLE_UA(1b, %l2) \ |
---|
| 187 | + _ASM_EXTABLE_UA(2b, %l2) \ |
---|
| 188 | + : : "A" (x), "r" (addr) \ |
---|
| 189 | + : : label) |
---|
| 190 | + |
---|
| 191 | +#else |
---|
| 192 | +#define __put_user_goto_u64(x, ptr, label) \ |
---|
| 193 | + __put_user_goto(x, ptr, "q", "er", label) |
---|
| 194 | +#endif |
---|
| 195 | + |
---|
| 196 | +extern void __put_user_bad(void); |
---|
| 197 | + |
---|
| 198 | +/* |
---|
| 199 | + * Strange magic calling convention: pointer in %ecx, |
---|
| 200 | + * value in %eax(:%edx), return value in %ecx. clobbers %rbx |
---|
| 201 | + */ |
---|
| 202 | +extern void __put_user_1(void); |
---|
| 203 | +extern void __put_user_2(void); |
---|
| 204 | +extern void __put_user_4(void); |
---|
| 205 | +extern void __put_user_8(void); |
---|
| 206 | +extern void __put_user_nocheck_1(void); |
---|
| 207 | +extern void __put_user_nocheck_2(void); |
---|
| 208 | +extern void __put_user_nocheck_4(void); |
---|
| 209 | +extern void __put_user_nocheck_8(void); |
---|
| 210 | + |
---|
| 211 | +/* |
---|
| 212 | + * ptr must be evaluated and assigned to the temporary __ptr_pu before |
---|
| 213 | + * the assignment of x to __val_pu, to avoid any function calls |
---|
| 214 | + * involved in the ptr expression (possibly implicitly generated due |
---|
| 215 | + * to KASAN) from clobbering %ax. |
---|
| 216 | + */ |
---|
| 217 | +#define do_put_user_call(fn,x,ptr) \ |
---|
| 218 | +({ \ |
---|
| 219 | + int __ret_pu; \ |
---|
| 220 | + void __user *__ptr_pu; \ |
---|
| 221 | + register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \ |
---|
| 222 | + __chk_user_ptr(ptr); \ |
---|
| 223 | + __ptr_pu = (ptr); \ |
---|
| 224 | + __val_pu = (x); \ |
---|
| 225 | + asm volatile("call __" #fn "_%P[size]" \ |
---|
| 226 | + : "=c" (__ret_pu), \ |
---|
| 227 | + ASM_CALL_CONSTRAINT \ |
---|
| 228 | + : "0" (__ptr_pu), \ |
---|
| 229 | + "r" (__val_pu), \ |
---|
| 230 | + [size] "i" (sizeof(*(ptr))) \ |
---|
| 231 | + :"ebx"); \ |
---|
| 232 | + __builtin_expect(__ret_pu, 0); \ |
---|
| 233 | +}) |
---|
531 | 234 | |
---|
532 | 235 | /** |
---|
533 | | - * __put_user: - Write a simple value into user space, with less checking. |
---|
| 236 | + * put_user - Write a simple value into user space. |
---|
| 237 | + * @x: Value to copy to user space. |
---|
| 238 | + * @ptr: Destination address, in user space. |
---|
| 239 | + * |
---|
| 240 | + * Context: User context only. This function may sleep if pagefaults are |
---|
| 241 | + * enabled. |
---|
| 242 | + * |
---|
| 243 | + * This macro copies a single simple value from kernel space to user |
---|
| 244 | + * space. It supports simple types like char and int, but not larger |
---|
| 245 | + * data types like structures or arrays. |
---|
| 246 | + * |
---|
| 247 | + * @ptr must have pointer-to-simple-variable type, and @x must be assignable |
---|
| 248 | + * to the result of dereferencing @ptr. |
---|
| 249 | + * |
---|
| 250 | + * Return: zero on success, or -EFAULT on error. |
---|
| 251 | + */ |
---|
| 252 | +#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); }) |
---|
| 253 | + |
---|
| 254 | +/** |
---|
| 255 | + * __put_user - Write a simple value into user space, with less checking. |
---|
534 | 256 | * @x: Value to copy to user space. |
---|
535 | 257 | * @ptr: Destination address, in user space. |
---|
536 | 258 | * |
---|
.. | .. |
---|
547 | 269 | * Caller must check the pointer with access_ok() before calling this |
---|
548 | 270 | * function. |
---|
549 | 271 | * |
---|
550 | | - * Returns zero on success, or -EFAULT on error. |
---|
| 272 | + * Return: zero on success, or -EFAULT on error. |
---|
551 | 273 | */ |
---|
| 274 | +#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr) |
---|
552 | 275 | |
---|
553 | | -#define __put_user(x, ptr) \ |
---|
554 | | - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
---|
555 | | - |
---|
556 | | -/* |
---|
557 | | - * {get|put}_user_try and catch |
---|
558 | | - * |
---|
559 | | - * get_user_try { |
---|
560 | | - * get_user_ex(...); |
---|
561 | | - * } get_user_catch(err) |
---|
562 | | - */ |
---|
563 | | -#define get_user_try uaccess_try_nospec |
---|
564 | | -#define get_user_catch(err) uaccess_catch(err) |
---|
565 | | - |
---|
566 | | -#define get_user_ex(x, ptr) do { \ |
---|
567 | | - unsigned long __gue_val; \ |
---|
568 | | - __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ |
---|
569 | | - (x) = (__force __typeof__(*(ptr)))__gue_val; \ |
---|
| 276 | +#define __put_user_size(x, ptr, size, label) \ |
---|
| 277 | +do { \ |
---|
| 278 | + __chk_user_ptr(ptr); \ |
---|
| 279 | + switch (size) { \ |
---|
| 280 | + case 1: \ |
---|
| 281 | + __put_user_goto(x, ptr, "b", "iq", label); \ |
---|
| 282 | + break; \ |
---|
| 283 | + case 2: \ |
---|
| 284 | + __put_user_goto(x, ptr, "w", "ir", label); \ |
---|
| 285 | + break; \ |
---|
| 286 | + case 4: \ |
---|
| 287 | + __put_user_goto(x, ptr, "l", "ir", label); \ |
---|
| 288 | + break; \ |
---|
| 289 | + case 8: \ |
---|
| 290 | + __put_user_goto_u64(x, ptr, label); \ |
---|
| 291 | + break; \ |
---|
| 292 | + default: \ |
---|
| 293 | + __put_user_bad(); \ |
---|
| 294 | + } \ |
---|
570 | 295 | } while (0) |
---|
571 | 296 | |
---|
572 | | -#define put_user_try uaccess_try |
---|
573 | | -#define put_user_catch(err) uaccess_catch(err) |
---|
| 297 | +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
574 | 298 | |
---|
575 | | -#define put_user_ex(x, ptr) \ |
---|
576 | | - __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) |
---|
| 299 | +#ifdef CONFIG_X86_32 |
---|
| 300 | +#define __get_user_asm_u64(x, ptr, label) do { \ |
---|
| 301 | + unsigned int __gu_low, __gu_high; \ |
---|
| 302 | + const unsigned int __user *__gu_ptr; \ |
---|
| 303 | + __gu_ptr = (const void __user *)(ptr); \ |
---|
| 304 | + __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \ |
---|
| 305 | + __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \ |
---|
| 306 | + (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ |
---|
| 307 | +} while (0) |
---|
| 308 | +#else |
---|
| 309 | +#define __get_user_asm_u64(x, ptr, label) \ |
---|
| 310 | + __get_user_asm(x, ptr, "q", "=r", label) |
---|
| 311 | +#endif |
---|
| 312 | + |
---|
| 313 | +#define __get_user_size(x, ptr, size, label) \ |
---|
| 314 | +do { \ |
---|
| 315 | + __chk_user_ptr(ptr); \ |
---|
| 316 | + switch (size) { \ |
---|
| 317 | + case 1: { \ |
---|
| 318 | + unsigned char x_u8__; \ |
---|
| 319 | + __get_user_asm(x_u8__, ptr, "b", "=q", label); \ |
---|
| 320 | + (x) = x_u8__; \ |
---|
| 321 | + break; \ |
---|
| 322 | + } \ |
---|
| 323 | + case 2: \ |
---|
| 324 | + __get_user_asm(x, ptr, "w", "=r", label); \ |
---|
| 325 | + break; \ |
---|
| 326 | + case 4: \ |
---|
| 327 | + __get_user_asm(x, ptr, "l", "=r", label); \ |
---|
| 328 | + break; \ |
---|
| 329 | + case 8: \ |
---|
| 330 | + __get_user_asm_u64(x, ptr, label); \ |
---|
| 331 | + break; \ |
---|
| 332 | + default: \ |
---|
| 333 | + (x) = __get_user_bad(); \ |
---|
| 334 | + } \ |
---|
| 335 | +} while (0) |
---|
| 336 | + |
---|
| 337 | +#define __get_user_asm(x, addr, itype, ltype, label) \ |
---|
| 338 | + asm_volatile_goto("\n" \ |
---|
| 339 | + "1: mov"itype" %[umem],%[output]\n" \ |
---|
| 340 | + _ASM_EXTABLE_UA(1b, %l2) \ |
---|
| 341 | + : [output] ltype(x) \ |
---|
| 342 | + : [umem] "m" (__m(addr)) \ |
---|
| 343 | + : : label) |
---|
| 344 | + |
---|
| 345 | +#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
| 346 | + |
---|
| 347 | +#ifdef CONFIG_X86_32 |
---|
| 348 | +#define __get_user_asm_u64(x, ptr, retval) \ |
---|
| 349 | +({ \ |
---|
| 350 | + __typeof__(ptr) __ptr = (ptr); \ |
---|
| 351 | + asm volatile("\n" \ |
---|
| 352 | + "1: movl %[lowbits],%%eax\n" \ |
---|
| 353 | + "2: movl %[highbits],%%edx\n" \ |
---|
| 354 | + "3:\n" \ |
---|
| 355 | + ".section .fixup,\"ax\"\n" \ |
---|
| 356 | + "4: mov %[efault],%[errout]\n" \ |
---|
| 357 | + " xorl %%eax,%%eax\n" \ |
---|
| 358 | + " xorl %%edx,%%edx\n" \ |
---|
| 359 | + " jmp 3b\n" \ |
---|
| 360 | + ".previous\n" \ |
---|
| 361 | + _ASM_EXTABLE_UA(1b, 4b) \ |
---|
| 362 | + _ASM_EXTABLE_UA(2b, 4b) \ |
---|
| 363 | + : [errout] "=r" (retval), \ |
---|
| 364 | + [output] "=&A"(x) \ |
---|
| 365 | + : [lowbits] "m" (__m(__ptr)), \ |
---|
| 366 | + [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \ |
---|
| 367 | + [efault] "i" (-EFAULT), "0" (retval)); \ |
---|
| 368 | +}) |
---|
| 369 | + |
---|
| 370 | +#else |
---|
| 371 | +#define __get_user_asm_u64(x, ptr, retval) \ |
---|
| 372 | + __get_user_asm(x, ptr, retval, "q", "=r") |
---|
| 373 | +#endif |
---|
| 374 | + |
---|
| 375 | +#define __get_user_size(x, ptr, size, retval) \ |
---|
| 376 | +do { \ |
---|
| 377 | + unsigned char x_u8__; \ |
---|
| 378 | + \ |
---|
| 379 | + retval = 0; \ |
---|
| 380 | + __chk_user_ptr(ptr); \ |
---|
| 381 | + switch (size) { \ |
---|
| 382 | + case 1: \ |
---|
| 383 | + __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \ |
---|
| 384 | + (x) = x_u8__; \ |
---|
| 385 | + break; \ |
---|
| 386 | + case 2: \ |
---|
| 387 | + __get_user_asm(x, ptr, retval, "w", "=r"); \ |
---|
| 388 | + break; \ |
---|
| 389 | + case 4: \ |
---|
| 390 | + __get_user_asm(x, ptr, retval, "l", "=r"); \ |
---|
| 391 | + break; \ |
---|
| 392 | + case 8: \ |
---|
| 393 | + __get_user_asm_u64(x, ptr, retval); \ |
---|
| 394 | + break; \ |
---|
| 395 | + default: \ |
---|
| 396 | + (x) = __get_user_bad(); \ |
---|
| 397 | + } \ |
---|
| 398 | +} while (0) |
---|
| 399 | + |
---|
| 400 | +#define __get_user_asm(x, addr, err, itype, ltype) \ |
---|
| 401 | + asm volatile("\n" \ |
---|
| 402 | + "1: mov"itype" %[umem],%[output]\n" \ |
---|
| 403 | + "2:\n" \ |
---|
| 404 | + ".section .fixup,\"ax\"\n" \ |
---|
| 405 | + "3: mov %[efault],%[errout]\n" \ |
---|
| 406 | + " xorl %k[output],%k[output]\n" \ |
---|
| 407 | + " jmp 2b\n" \ |
---|
| 408 | + ".previous\n" \ |
---|
| 409 | + _ASM_EXTABLE_UA(1b, 3b) \ |
---|
| 410 | + : [errout] "=r" (err), \ |
---|
| 411 | + [output] ltype(x) \ |
---|
| 412 | + : [umem] "m" (__m(addr)), \ |
---|
| 413 | + [efault] "i" (-EFAULT), "0" (err)) |
---|
| 414 | + |
---|
| 415 | +#endif // CONFIG_CC_ASM_GOTO_OUTPUT |
---|
| 416 | + |
---|
| 417 | +/* FIXME: this hack is definitely wrong -AK */ |
---|
| 418 | +struct __large_struct { unsigned long buf[100]; }; |
---|
| 419 | +#define __m(x) (*(struct __large_struct __user *)(x)) |
---|
| 420 | + |
---|
| 421 | +/* |
---|
| 422 | + * Tell gcc we read from memory instead of writing: this is because |
---|
| 423 | + * we do not write to any memory gcc knows about, so there are no |
---|
| 424 | + * aliasing issues. |
---|
| 425 | + */ |
---|
| 426 | +#define __put_user_goto(x, addr, itype, ltype, label) \ |
---|
| 427 | + asm_volatile_goto("\n" \ |
---|
| 428 | + "1: mov"itype" %0,%1\n" \ |
---|
| 429 | + _ASM_EXTABLE_UA(1b, %l2) \ |
---|
| 430 | + : : ltype(x), "m" (__m(addr)) \ |
---|
| 431 | + : : label) |
---|
577 | 432 | |
---|
578 | 433 | extern unsigned long |
---|
579 | 434 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n); |
---|
.. | .. |
---|
585 | 440 | unsigned long __must_check clear_user(void __user *mem, unsigned long len); |
---|
586 | 441 | unsigned long __must_check __clear_user(void __user *mem, unsigned long len); |
---|
587 | 442 | |
---|
588 | | -extern void __cmpxchg_wrong_size(void) |
---|
589 | | - __compiletime_error("Bad argument size for cmpxchg"); |
---|
| 443 | +#ifdef CONFIG_ARCH_HAS_COPY_MC |
---|
| 444 | +unsigned long __must_check |
---|
| 445 | +copy_mc_to_kernel(void *to, const void *from, unsigned len); |
---|
| 446 | +#define copy_mc_to_kernel copy_mc_to_kernel |
---|
590 | 447 | |
---|
591 | | -#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ |
---|
592 | | -({ \ |
---|
593 | | - int __ret = 0; \ |
---|
594 | | - __typeof__(ptr) __uval = (uval); \ |
---|
595 | | - __typeof__(*(ptr)) __old = (old); \ |
---|
596 | | - __typeof__(*(ptr)) __new = (new); \ |
---|
597 | | - __uaccess_begin_nospec(); \ |
---|
598 | | - switch (size) { \ |
---|
599 | | - case 1: \ |
---|
600 | | - { \ |
---|
601 | | - asm volatile("\n" \ |
---|
602 | | - "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ |
---|
603 | | - "2:\n" \ |
---|
604 | | - "\t.section .fixup, \"ax\"\n" \ |
---|
605 | | - "3:\tmov %3, %0\n" \ |
---|
606 | | - "\tjmp 2b\n" \ |
---|
607 | | - "\t.previous\n" \ |
---|
608 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
609 | | - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
---|
610 | | - : "i" (-EFAULT), "q" (__new), "1" (__old) \ |
---|
611 | | - : "memory" \ |
---|
612 | | - ); \ |
---|
613 | | - break; \ |
---|
614 | | - } \ |
---|
615 | | - case 2: \ |
---|
616 | | - { \ |
---|
617 | | - asm volatile("\n" \ |
---|
618 | | - "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ |
---|
619 | | - "2:\n" \ |
---|
620 | | - "\t.section .fixup, \"ax\"\n" \ |
---|
621 | | - "3:\tmov %3, %0\n" \ |
---|
622 | | - "\tjmp 2b\n" \ |
---|
623 | | - "\t.previous\n" \ |
---|
624 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
625 | | - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
---|
626 | | - : "i" (-EFAULT), "r" (__new), "1" (__old) \ |
---|
627 | | - : "memory" \ |
---|
628 | | - ); \ |
---|
629 | | - break; \ |
---|
630 | | - } \ |
---|
631 | | - case 4: \ |
---|
632 | | - { \ |
---|
633 | | - asm volatile("\n" \ |
---|
634 | | - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ |
---|
635 | | - "2:\n" \ |
---|
636 | | - "\t.section .fixup, \"ax\"\n" \ |
---|
637 | | - "3:\tmov %3, %0\n" \ |
---|
638 | | - "\tjmp 2b\n" \ |
---|
639 | | - "\t.previous\n" \ |
---|
640 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
641 | | - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
---|
642 | | - : "i" (-EFAULT), "r" (__new), "1" (__old) \ |
---|
643 | | - : "memory" \ |
---|
644 | | - ); \ |
---|
645 | | - break; \ |
---|
646 | | - } \ |
---|
647 | | - case 8: \ |
---|
648 | | - { \ |
---|
649 | | - if (!IS_ENABLED(CONFIG_X86_64)) \ |
---|
650 | | - __cmpxchg_wrong_size(); \ |
---|
651 | | - \ |
---|
652 | | - asm volatile("\n" \ |
---|
653 | | - "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ |
---|
654 | | - "2:\n" \ |
---|
655 | | - "\t.section .fixup, \"ax\"\n" \ |
---|
656 | | - "3:\tmov %3, %0\n" \ |
---|
657 | | - "\tjmp 2b\n" \ |
---|
658 | | - "\t.previous\n" \ |
---|
659 | | - _ASM_EXTABLE(1b, 3b) \ |
---|
660 | | - : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ |
---|
661 | | - : "i" (-EFAULT), "r" (__new), "1" (__old) \ |
---|
662 | | - : "memory" \ |
---|
663 | | - ); \ |
---|
664 | | - break; \ |
---|
665 | | - } \ |
---|
666 | | - default: \ |
---|
667 | | - __cmpxchg_wrong_size(); \ |
---|
668 | | - } \ |
---|
669 | | - __uaccess_end(); \ |
---|
670 | | - *__uval = __old; \ |
---|
671 | | - __ret; \ |
---|
672 | | -}) |
---|
673 | | - |
---|
674 | | -#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ |
---|
675 | | -({ \ |
---|
676 | | - access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ |
---|
677 | | - __user_atomic_cmpxchg_inatomic((uval), (ptr), \ |
---|
678 | | - (old), (new), sizeof(*(ptr))) : \ |
---|
679 | | - -EFAULT; \ |
---|
680 | | -}) |
---|
| 448 | +unsigned long __must_check |
---|
| 449 | +copy_mc_to_user(void *to, const void *from, unsigned len); |
---|
| 450 | +#endif |
---|
681 | 451 | |
---|
682 | 452 | /* |
---|
683 | 453 | * movsl can be slow when source and dest are not both 8-byte aligned |
---|
.. | .. |
---|
697 | 467 | #endif |
---|
698 | 468 | |
---|
699 | 469 | /* |
---|
700 | | - * We rely on the nested NMI work to allow atomic faults from the NMI path; the |
---|
701 | | - * nested NMI paths are careful to preserve CR2. |
---|
702 | | - * |
---|
703 | | - * Caller must use pagefault_enable/disable, or run in interrupt context, |
---|
704 | | - * and also do a uaccess_ok() check |
---|
705 | | - */ |
---|
706 | | -#define __copy_from_user_nmi __copy_from_user_inatomic |
---|
707 | | - |
---|
708 | | -/* |
---|
709 | 470 | * The "unsafe" user accesses aren't really "unsafe", but the naming |
---|
710 | 471 | * is a big fat warning: you have to not only do the access_ok() |
---|
711 | 472 | * checking before using them, but you have to surround them with the |
---|
712 | 473 | * user_access_begin/end() pair. |
---|
713 | 474 | */ |
---|
714 | | -static __must_check inline bool user_access_begin(int type, |
---|
715 | | - const void __user *ptr, |
---|
716 | | - size_t len) |
---|
| 475 | +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) |
---|
717 | 476 | { |
---|
718 | | - if (unlikely(!access_ok(type, ptr, len))) |
---|
| 477 | + if (unlikely(!access_ok(ptr,len))) |
---|
719 | 478 | return 0; |
---|
720 | 479 | __uaccess_begin_nospec(); |
---|
721 | 480 | return 1; |
---|
722 | 481 | } |
---|
723 | | - |
---|
724 | | -#define user_access_begin(a, b, c) user_access_begin(a, b, c) |
---|
| 482 | +#define user_access_begin(a,b) user_access_begin(a,b) |
---|
725 | 483 | #define user_access_end() __uaccess_end() |
---|
726 | 484 | |
---|
727 | 485 | #define user_access_save() smap_save() |
---|
728 | 486 | #define user_access_restore(x) smap_restore(x) |
---|
729 | 487 | |
---|
730 | | -#define unsafe_put_user(x, ptr, err_label) \ |
---|
731 | | -do { \ |
---|
732 | | - int __pu_err; \ |
---|
733 | | - __typeof__(*(ptr)) __pu_val = (x); \ |
---|
734 | | - __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \ |
---|
735 | | - if (unlikely(__pu_err)) goto err_label; \ |
---|
736 | | -} while (0) |
---|
| 488 | +#define unsafe_put_user(x, ptr, label) \ |
---|
| 489 | + __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) |
---|
737 | 490 | |
---|
| 491 | +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
| 492 | +#define unsafe_get_user(x, ptr, err_label) \ |
---|
| 493 | +do { \ |
---|
| 494 | + __inttype(*(ptr)) __gu_val; \ |
---|
| 495 | + __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \ |
---|
| 496 | + (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
---|
| 497 | +} while (0) |
---|
| 498 | +#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
738 | 499 | #define unsafe_get_user(x, ptr, err_label) \ |
---|
739 | 500 | do { \ |
---|
740 | 501 | int __gu_err; \ |
---|
741 | 502 | __inttype(*(ptr)) __gu_val; \ |
---|
742 | | - __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \ |
---|
| 503 | + __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \ |
---|
743 | 504 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
---|
744 | 505 | if (unlikely(__gu_err)) goto err_label; \ |
---|
745 | 506 | } while (0) |
---|
| 507 | +#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
| 508 | + |
---|
| 509 | +/* |
---|
| 510 | + * We want the unsafe accessors to always be inlined and use |
---|
| 511 | + * the error labels - thus the macro games. |
---|
| 512 | + */ |
---|
| 513 | +#define unsafe_copy_loop(dst, src, len, type, label) \ |
---|
| 514 | + while (len >= sizeof(type)) { \ |
---|
| 515 | + unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ |
---|
| 516 | + dst += sizeof(type); \ |
---|
| 517 | + src += sizeof(type); \ |
---|
| 518 | + len -= sizeof(type); \ |
---|
| 519 | + } |
---|
| 520 | + |
---|
| 521 | +#define unsafe_copy_to_user(_dst,_src,_len,label) \ |
---|
| 522 | +do { \ |
---|
| 523 | + char __user *__ucu_dst = (_dst); \ |
---|
| 524 | + const char *__ucu_src = (_src); \ |
---|
| 525 | + size_t __ucu_len = (_len); \ |
---|
| 526 | + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ |
---|
| 527 | + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ |
---|
| 528 | + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ |
---|
| 529 | + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ |
---|
| 530 | +} while (0) |
---|
| 531 | + |
---|
| 532 | +#define HAVE_GET_KERNEL_NOFAULT |
---|
| 533 | + |
---|
| 534 | +#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
| 535 | +#define __get_kernel_nofault(dst, src, type, err_label) \ |
---|
| 536 | + __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ |
---|
| 537 | + sizeof(type), err_label) |
---|
| 538 | +#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
| 539 | +#define __get_kernel_nofault(dst, src, type, err_label) \ |
---|
| 540 | +do { \ |
---|
| 541 | + int __kr_err; \ |
---|
| 542 | + \ |
---|
| 543 | + __get_user_size(*((type *)(dst)), (__force type __user *)(src), \ |
---|
| 544 | + sizeof(type), __kr_err); \ |
---|
| 545 | + if (unlikely(__kr_err)) \ |
---|
| 546 | + goto err_label; \ |
---|
| 547 | +} while (0) |
---|
| 548 | +#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT |
---|
| 549 | + |
---|
| 550 | +#define __put_kernel_nofault(dst, src, type, err_label) \ |
---|
| 551 | + __put_user_size(*((type *)(src)), (__force type __user *)(dst), \ |
---|
| 552 | + sizeof(type), err_label) |
---|
746 | 553 | |
---|
747 | 554 | #endif /* _ASM_X86_UACCESS_H */ |
---|
748 | 555 | |
---|