hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/powerpc/include/asm/uaccess.h
....@@ -8,64 +8,21 @@
88 #include <asm/extable.h>
99 #include <asm/kup.h>
1010
11
-/*
12
- * The fs value determines whether argument validity checking should be
13
- * performed or not. If get_fs() == USER_DS, checking is performed, with
14
- * get_fs() == KERNEL_DS, checking is bypassed.
15
- *
16
- * For historical reasons, these macros are grossly misnamed.
17
- *
18
- * The fs/ds values are now the highest legal address in the "segment".
19
- * This simplifies the checking in the routines below.
20
- */
21
-
22
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
23
-
24
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
2511 #ifdef __powerpc64__
2612 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27
-#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
13
+#define TASK_SIZE_MAX TASK_SIZE_USER64
2814 #else
29
-#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
15
+#define TASK_SIZE_MAX TASK_SIZE
3016 #endif
3117
32
-#define get_ds() (KERNEL_DS)
33
-#define get_fs() (current->thread.addr_limit)
34
-
35
-static inline void set_fs(mm_segment_t fs)
18
+static inline bool __access_ok(unsigned long addr, unsigned long size)
3619 {
37
- current->thread.addr_limit = fs;
38
- /* On user-mode return check addr_limit (fs) is correct */
39
- set_thread_flag(TIF_FSCHECK);
20
+ return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr;
4021 }
4122
42
-#define segment_eq(a, b) ((a).seg == (b).seg)
43
-
44
-#define user_addr_max() (get_fs().seg)
45
-
46
-#ifdef __powerpc64__
47
-/*
48
- * This check is sufficient because there is a large enough
49
- * gap between user addresses and the kernel addresses
50
- */
51
-#define __access_ok(addr, size, segment) \
52
- (((addr) <= (segment).seg) && ((size) <= (segment).seg))
53
-
54
-#else
55
-
56
-static inline int __access_ok(unsigned long addr, unsigned long size,
57
- mm_segment_t seg)
58
-{
59
- if (addr > seg.seg)
60
- return 0;
61
- return (size == 0 || size - 1 <= seg.seg - addr);
62
-}
63
-
64
-#endif
65
-
66
-#define access_ok(type, addr, size) \
67
- (__chk_user_ptr(addr), (void)(type), \
68
- __access_ok((__force unsigned long)(addr), (size), get_fs()))
23
+#define access_ok(addr, size) \
24
+ (__chk_user_ptr(addr), \
25
+ __access_ok((unsigned long)(addr), (size)))
6926
7027 /*
7128 * These are the main single-value transfer routines. They automatically
....@@ -94,66 +51,75 @@
9451 #define __get_user(x, ptr) \
9552 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
9653 #define __put_user(x, ptr) \
97
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
54
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
55
+#define __put_user_goto(x, ptr, label) \
56
+ __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
9857
9958 #define __get_user_allowed(x, ptr) \
10059 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
101
-#define __put_user_allowed(x, ptr) \
102
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
10360
10461 #define __get_user_inatomic(x, ptr) \
10562 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
10663 #define __put_user_inatomic(x, ptr) \
10764 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10865
66
+#ifdef CONFIG_PPC64
67
+
68
+#define ___get_user_instr(gu_op, dest, ptr) \
69
+({ \
70
+ long __gui_ret = 0; \
71
+ unsigned long __gui_ptr = (unsigned long)ptr; \
72
+ struct ppc_inst __gui_inst; \
73
+ unsigned int __prefix, __suffix; \
74
+ __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
75
+ if (__gui_ret == 0) { \
76
+ if ((__prefix >> 26) == OP_PREFIX) { \
77
+ __gui_ret = gu_op(__suffix, \
78
+ (unsigned int __user *)__gui_ptr + 1); \
79
+ __gui_inst = ppc_inst_prefix(__prefix, \
80
+ __suffix); \
81
+ } else { \
82
+ __gui_inst = ppc_inst(__prefix); \
83
+ } \
84
+ if (__gui_ret == 0) \
85
+ (dest) = __gui_inst; \
86
+ } \
87
+ __gui_ret; \
88
+})
89
+
90
+#define get_user_instr(x, ptr) \
91
+ ___get_user_instr(get_user, x, ptr)
92
+
93
+#define __get_user_instr(x, ptr) \
94
+ ___get_user_instr(__get_user, x, ptr)
95
+
96
+#define __get_user_instr_inatomic(x, ptr) \
97
+ ___get_user_instr(__get_user_inatomic, x, ptr)
98
+
99
+#else /* !CONFIG_PPC64 */
100
+#define get_user_instr(x, ptr) \
101
+ get_user((x).val, (u32 __user *)(ptr))
102
+
103
+#define __get_user_instr(x, ptr) \
104
+ __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
105
+
106
+#define __get_user_instr_inatomic(x, ptr) \
107
+ __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
108
+
109
+#endif /* CONFIG_PPC64 */
110
+
109111 extern long __put_user_bad(void);
110
-
111
-/*
112
- * We don't tell gcc that we are accessing memory, but this is OK
113
- * because we do not write to any memory gcc knows about, so there
114
- * are no aliasing issues.
115
- */
116
-#define __put_user_asm(x, addr, err, op) \
117
- __asm__ __volatile__( \
118
- "1: " op " %1,0(%2) # put_user\n" \
119
- "2:\n" \
120
- ".section .fixup,\"ax\"\n" \
121
- "3: li %0,%3\n" \
122
- " b 2b\n" \
123
- ".previous\n" \
124
- EX_TABLE(1b, 3b) \
125
- : "=r" (err) \
126
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
127
-
128
-#ifdef __powerpc64__
129
-#define __put_user_asm2(x, ptr, retval) \
130
- __put_user_asm(x, ptr, retval, "std")
131
-#else /* __powerpc64__ */
132
-#define __put_user_asm2(x, addr, err) \
133
- __asm__ __volatile__( \
134
- "1: stw %1,0(%2)\n" \
135
- "2: stw %1+1,4(%2)\n" \
136
- "3:\n" \
137
- ".section .fixup,\"ax\"\n" \
138
- "4: li %0,%3\n" \
139
- " b 3b\n" \
140
- ".previous\n" \
141
- EX_TABLE(1b, 4b) \
142
- EX_TABLE(2b, 4b) \
143
- : "=r" (err) \
144
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
145
-#endif /* __powerpc64__ */
146112
147113 #define __put_user_size_allowed(x, ptr, size, retval) \
148114 do { \
115
+ __label__ __pu_failed; \
116
+ \
149117 retval = 0; \
150
- switch (size) { \
151
- case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
152
- case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
153
- case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
154
- case 8: __put_user_asm2(x, ptr, retval); break; \
155
- default: __put_user_bad(); \
156
- } \
118
+ __put_user_size_goto(x, ptr, size, __pu_failed); \
119
+ break; \
120
+ \
121
+__pu_failed: \
122
+ retval = -EFAULT; \
157123 } while (0)
158124
159125 #define __put_user_size(x, ptr, size, retval) \
....@@ -163,7 +129,7 @@
163129 prevent_write_to_user(ptr, size); \
164130 } while (0)
165131
166
-#define __put_user_nocheck(x, ptr, size, do_allow) \
132
+#define __put_user_nocheck(x, ptr, size) \
167133 ({ \
168134 long __pu_err; \
169135 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
....@@ -173,10 +139,7 @@
173139 if (!is_kernel_addr((unsigned long)__pu_addr)) \
174140 might_fault(); \
175141 __chk_user_ptr(__pu_addr); \
176
- if (do_allow) \
177
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
178
- else \
179
- __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
142
+ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
180143 \
181144 __pu_err; \
182145 })
....@@ -189,7 +152,7 @@
189152 __typeof__(size) __pu_size = (size); \
190153 \
191154 might_fault(); \
192
- if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \
155
+ if (access_ok(__pu_addr, __pu_size)) \
193156 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
194157 \
195158 __pu_err; \
....@@ -209,6 +172,55 @@
209172 })
210173
211174
175
+/*
176
+ * We don't tell gcc that we are accessing memory, but this is OK
177
+ * because we do not write to any memory gcc knows about, so there
178
+ * are no aliasing issues.
179
+ */
180
+#define __put_user_asm_goto(x, addr, label, op) \
181
+ asm_volatile_goto( \
182
+ "1: " op "%U1%X1 %0,%1 # put_user\n" \
183
+ EX_TABLE(1b, %l2) \
184
+ : \
185
+ : "r" (x), "m"UPD_CONSTR (*addr) \
186
+ : \
187
+ : label)
188
+
189
+#ifdef __powerpc64__
190
+#define __put_user_asm2_goto(x, ptr, label) \
191
+ __put_user_asm_goto(x, ptr, label, "std")
192
+#else /* __powerpc64__ */
193
+#define __put_user_asm2_goto(x, addr, label) \
194
+ asm_volatile_goto( \
195
+ "1: stw%X1 %0, %1\n" \
196
+ "2: stw%X1 %L0, %L1\n" \
197
+ EX_TABLE(1b, %l2) \
198
+ EX_TABLE(2b, %l2) \
199
+ : \
200
+ : "r" (x), "m" (*addr) \
201
+ : \
202
+ : label)
203
+#endif /* __powerpc64__ */
204
+
205
+#define __put_user_size_goto(x, ptr, size, label) \
206
+do { \
207
+ switch (size) { \
208
+ case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \
209
+ case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \
210
+ case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \
211
+ case 8: __put_user_asm2_goto(x, ptr, label); break; \
212
+ default: __put_user_bad(); \
213
+ } \
214
+} while (0)
215
+
216
+#define __put_user_nocheck_goto(x, ptr, size, label) \
217
+do { \
218
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
219
+ __chk_user_ptr(ptr); \
220
+ __put_user_size_goto((x), __pu_addr, (size), label); \
221
+} while (0)
222
+
223
+
212224 extern long __get_user_bad(void);
213225
214226 /*
....@@ -217,8 +229,11 @@
217229 */
218230 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
219231 __asm__ __volatile__( \
232
+ ".machine push\n" \
233
+ ".machine altivec\n" \
220234 "1: lvx 0,0,%1 # get user\n" \
221235 " stvx 0,0,%2 # put kernel\n" \
236
+ ".machine pop\n" \
222237 "2:\n" \
223238 ".section .fixup,\"ax\"\n" \
224239 "3: li %0,%3\n" \
....@@ -230,7 +245,7 @@
230245
231246 #define __get_user_asm(x, addr, err, op) \
232247 __asm__ __volatile__( \
233
- "1: "op" %1,0(%2) # get_user\n" \
248
+ "1: "op"%U2%X2 %1, %2 # get_user\n" \
234249 "2:\n" \
235250 ".section .fixup,\"ax\"\n" \
236251 "3: li %0,%3\n" \
....@@ -239,7 +254,7 @@
239254 ".previous\n" \
240255 EX_TABLE(1b, 3b) \
241256 : "=r" (err), "=r" (x) \
242
- : "b" (addr), "i" (-EFAULT), "0" (err))
257
+ : "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err))
243258
244259 #ifdef __powerpc64__
245260 #define __get_user_asm2(x, addr, err) \
....@@ -247,8 +262,8 @@
247262 #else /* __powerpc64__ */
248263 #define __get_user_asm2(x, addr, err) \
249264 __asm__ __volatile__( \
250
- "1: lwz %1,0(%2)\n" \
251
- "2: lwz %1+1,4(%2)\n" \
265
+ "1: lwz%X2 %1, %2\n" \
266
+ "2: lwz%X2 %L1, %L2\n" \
252267 "3:\n" \
253268 ".section .fixup,\"ax\"\n" \
254269 "4: li %0,%3\n" \
....@@ -259,7 +274,7 @@
259274 EX_TABLE(1b, 4b) \
260275 EX_TABLE(2b, 4b) \
261276 : "=r" (err), "=&r" (x) \
262
- : "b" (addr), "i" (-EFAULT), "0" (err))
277
+ : "m" (*addr), "i" (-EFAULT), "0" (err))
263278 #endif /* __powerpc64__ */
264279
265280 #define __get_user_size_allowed(x, ptr, size, retval) \
....@@ -269,10 +284,10 @@
269284 if (size > sizeof(x)) \
270285 (x) = __get_user_bad(); \
271286 switch (size) { \
272
- case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
273
- case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
274
- case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
275
- case 8: __get_user_asm2(x, ptr, retval); break; \
287
+ case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
288
+ case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
289
+ case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
290
+ case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
276291 default: (x) = __get_user_bad(); \
277292 } \
278293 } while (0)
....@@ -299,7 +314,7 @@
299314 __typeof__(size) __gu_size = (size); \
300315 \
301316 __chk_user_ptr(__gu_addr); \
302
- if (!is_kernel_addr((unsigned long)__gu_addr)) \
317
+ if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
303318 might_fault(); \
304319 barrier_nospec(); \
305320 if (do_allow) \
....@@ -319,7 +334,7 @@
319334 __typeof__(size) __gu_size = (size); \
320335 \
321336 might_fault(); \
322
- if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \
337
+ if (access_ok(__gu_addr, __gu_size)) { \
323338 barrier_nospec(); \
324339 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
325340 } \
....@@ -349,6 +364,32 @@
349364 extern unsigned long __copy_tofrom_user(void __user *to,
350365 const void __user *from, unsigned long size);
351366
367
+#ifdef CONFIG_ARCH_HAS_COPY_MC
368
+unsigned long __must_check
369
+copy_mc_generic(void *to, const void *from, unsigned long size);
370
+
371
+static inline unsigned long __must_check
372
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
373
+{
374
+ return copy_mc_generic(to, from, size);
375
+}
376
+#define copy_mc_to_kernel copy_mc_to_kernel
377
+
378
+static inline unsigned long __must_check
379
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
380
+{
381
+ if (likely(check_copy_size(from, n, true))) {
382
+ if (access_ok(to, n)) {
383
+ allow_write_to_user(to, n);
384
+ n = copy_mc_generic((void *)to, from, n);
385
+ prevent_write_to_user(to, n);
386
+ }
387
+ }
388
+
389
+ return n;
390
+}
391
+#endif
392
+
352393 #ifdef __powerpc64__
353394 static inline unsigned long
354395 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
....@@ -356,9 +397,9 @@
356397 unsigned long ret;
357398
358399 barrier_nospec();
359
- allow_user_access(to, from, n);
400
+ allow_read_write_user(to, from, n);
360401 ret = __copy_tofrom_user(to, from, n);
361
- prevent_user_access(to, from, n);
402
+ prevent_read_write_user(to, from, n);
362403 return ret;
363404 }
364405 #endif /* __powerpc64__ */
....@@ -443,7 +484,7 @@
443484 {
444485 unsigned long ret = size;
445486 might_fault();
446
- if (likely(access_ok(VERIFY_WRITE, addr, size))) {
487
+ if (likely(access_ok(addr, size))) {
447488 allow_write_to_user(addr, size);
448489 ret = __arch_clear_user(addr, size);
449490 prevent_write_to_user(addr, size);
....@@ -464,13 +505,88 @@
464505 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
465506 size_t len);
466507
467
-#define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
468
-#define user_access_end() prevent_user_access(NULL, NULL, ~0ul)
508
+static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
509
+{
510
+ if (unlikely(!access_ok(ptr, len)))
511
+ return false;
512
+
513
+ might_fault();
514
+
515
+ allow_read_write_user((void __user *)ptr, ptr, len);
516
+ return true;
517
+}
518
+#define user_access_begin user_access_begin
519
+#define user_access_end prevent_current_access_user
520
+#define user_access_save prevent_user_access_return
521
+#define user_access_restore restore_user_access
522
+
523
+static __must_check inline bool
524
+user_read_access_begin(const void __user *ptr, size_t len)
525
+{
526
+ if (unlikely(!access_ok(ptr, len)))
527
+ return false;
528
+
529
+ might_fault();
530
+
531
+ allow_read_from_user(ptr, len);
532
+ return true;
533
+}
534
+#define user_read_access_begin user_read_access_begin
535
+#define user_read_access_end prevent_current_read_from_user
536
+
537
+static __must_check inline bool
538
+user_write_access_begin(const void __user *ptr, size_t len)
539
+{
540
+ if (unlikely(!access_ok(ptr, len)))
541
+ return false;
542
+
543
+ might_fault();
544
+
545
+ allow_write_to_user((void __user *)ptr, len);
546
+ return true;
547
+}
548
+#define user_write_access_begin user_write_access_begin
549
+#define user_write_access_end prevent_current_write_to_user
469550
470551 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
471552 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
472
-#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
553
+#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
554
+
473555 #define unsafe_copy_to_user(d, s, l, e) \
474
- unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
556
+do { \
557
+ u8 __user *_dst = (u8 __user *)(d); \
558
+ const u8 *_src = (const u8 *)(s); \
559
+ size_t _len = (l); \
560
+ int _i; \
561
+ \
562
+ for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
563
+ __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
564
+ if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
565
+ __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
566
+ _i += 4; \
567
+ } \
568
+ if (_len & 2) { \
569
+ __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
570
+ _i += 2; \
571
+ } \
572
+ if (_len & 1) \
573
+ __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
574
+} while (0)
575
+
576
+#define HAVE_GET_KERNEL_NOFAULT
577
+
578
+#define __get_kernel_nofault(dst, src, type, err_label) \
579
+do { \
580
+ int __kr_err; \
581
+ \
582
+ __get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
583
+ sizeof(type), __kr_err); \
584
+ if (unlikely(__kr_err)) \
585
+ goto err_label; \
586
+} while (0)
587
+
588
+#define __put_kernel_nofault(dst, src, type, err_label) \
589
+ __put_user_size_goto(*((type *)(src)), \
590
+ (__force type __user *)(dst), sizeof(type), err_label)
475591
476592 #endif /* _ARCH_POWERPC_UACCESS_H */