hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/include/linux/uaccess.h
....@@ -2,16 +2,53 @@
22 #ifndef __LINUX_UACCESS_H__
33 #define __LINUX_UACCESS_H__
44
5
+#include <linux/fault-inject-usercopy.h>
6
+#include <linux/instrumented.h>
7
+#include <linux/minmax.h>
58 #include <linux/sched.h>
69 #include <linux/thread_info.h>
7
-#include <linux/kasan-checks.h>
8
-
9
-#define VERIFY_READ 0
10
-#define VERIFY_WRITE 1
11
-
12
-#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
1310
1411 #include <asm/uaccess.h>
12
+
13
+#ifdef CONFIG_SET_FS
14
+/*
15
+ * Force the uaccess routines to be wired up for actual userspace access,
16
+ * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone
17
+ * using force_uaccess_end below.
18
+ */
19
+static inline mm_segment_t force_uaccess_begin(void)
20
+{
21
+ mm_segment_t fs = get_fs();
22
+
23
+ set_fs(USER_DS);
24
+ return fs;
25
+}
26
+
27
+static inline void force_uaccess_end(mm_segment_t oldfs)
28
+{
29
+ set_fs(oldfs);
30
+}
31
+#else /* CONFIG_SET_FS */
32
+typedef struct {
33
+ /* empty dummy */
34
+} mm_segment_t;
35
+
36
+#ifndef TASK_SIZE_MAX
37
+#define TASK_SIZE_MAX TASK_SIZE
38
+#endif
39
+
40
+#define uaccess_kernel() (false)
41
+#define user_addr_max() (TASK_SIZE_MAX)
42
+
43
+static inline mm_segment_t force_uaccess_begin(void)
44
+{
45
+ return (mm_segment_t) { };
46
+}
47
+
48
+static inline void force_uaccess_end(mm_segment_t oldfs)
49
+{
50
+}
51
+#endif /* CONFIG_SET_FS */
1552
1653 /*
1754 * Architectures should provide two primitives (raw_copy_{to,from}_user())
....@@ -58,19 +95,21 @@
5895 * as usual) and both source and destination can trigger faults.
5996 */
6097
61
-static __always_inline unsigned long
98
+static __always_inline __must_check unsigned long
6299 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
63100 {
64
- kasan_check_write(to, n);
101
+ instrument_copy_from_user(to, from, n);
65102 check_object_size(to, n, false);
66103 return raw_copy_from_user(to, from, n);
67104 }
68105
69
-static __always_inline unsigned long
106
+static __always_inline __must_check unsigned long
70107 __copy_from_user(void *to, const void __user *from, unsigned long n)
71108 {
72109 might_fault();
73
- kasan_check_write(to, n);
110
+ if (should_fail_usercopy())
111
+ return n;
112
+ instrument_copy_from_user(to, from, n);
74113 check_object_size(to, n, false);
75114 return raw_copy_from_user(to, from, n);
76115 }
....@@ -88,31 +127,35 @@
88127 * The caller should also make sure he pins the user space address
89128 * so that we don't result in page fault and sleep.
90129 */
91
-static __always_inline unsigned long
130
+static __always_inline __must_check unsigned long
92131 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
93132 {
94
- kasan_check_read(from, n);
133
+ if (should_fail_usercopy())
134
+ return n;
135
+ instrument_copy_to_user(to, from, n);
95136 check_object_size(from, n, true);
96137 return raw_copy_to_user(to, from, n);
97138 }
98139
99
-static __always_inline unsigned long
140
+static __always_inline __must_check unsigned long
100141 __copy_to_user(void __user *to, const void *from, unsigned long n)
101142 {
102143 might_fault();
103
- kasan_check_read(from, n);
144
+ if (should_fail_usercopy())
145
+ return n;
146
+ instrument_copy_to_user(to, from, n);
104147 check_object_size(from, n, true);
105148 return raw_copy_to_user(to, from, n);
106149 }
107150
108151 #ifdef INLINE_COPY_FROM_USER
109
-static inline unsigned long
152
+static inline __must_check unsigned long
110153 _copy_from_user(void *to, const void __user *from, unsigned long n)
111154 {
112155 unsigned long res = n;
113156 might_fault();
114
- if (likely(access_ok(VERIFY_READ, from, n))) {
115
- kasan_check_write(to, n);
157
+ if (!should_fail_usercopy() && likely(access_ok(from, n))) {
158
+ instrument_copy_from_user(to, from, n);
116159 res = raw_copy_from_user(to, from, n);
117160 }
118161 if (unlikely(res))
....@@ -120,23 +163,25 @@
120163 return res;
121164 }
122165 #else
123
-extern unsigned long
166
+extern __must_check unsigned long
124167 _copy_from_user(void *, const void __user *, unsigned long);
125168 #endif
126169
127170 #ifdef INLINE_COPY_TO_USER
128
-static inline unsigned long
171
+static inline __must_check unsigned long
129172 _copy_to_user(void __user *to, const void *from, unsigned long n)
130173 {
131174 might_fault();
132
- if (access_ok(VERIFY_WRITE, to, n)) {
133
- kasan_check_read(from, n);
175
+ if (should_fail_usercopy())
176
+ return n;
177
+ if (access_ok(to, n)) {
178
+ instrument_copy_to_user(to, from, n);
134179 n = raw_copy_to_user(to, from, n);
135180 }
136181 return n;
137182 }
138183 #else
139
-extern unsigned long
184
+extern __must_check unsigned long
140185 _copy_to_user(void __user *, const void *, unsigned long);
141186 #endif
142187
....@@ -160,9 +205,22 @@
160205 copy_in_user(void __user *to, const void __user *from, unsigned long n)
161206 {
162207 might_fault();
163
- if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
208
+ if (access_ok(to, n) && access_ok(from, n))
164209 n = raw_copy_in_user(to, from, n);
165210 return n;
211
+}
212
+#endif
213
+
214
+#ifndef copy_mc_to_kernel
215
+/*
216
+ * Without arch opt-in this generic copy_mc_to_kernel() will not handle
217
+ * #MC (or arch equivalent) during source read.
218
+ */
219
+static inline unsigned long __must_check
220
+copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
221
+{
222
+ memcpy(dst, src, cnt);
223
+ return 0;
166224 }
167225 #endif
168226
....@@ -185,7 +243,6 @@
185243 */
186244 static inline void pagefault_disable(void)
187245 {
188
- migrate_disable();
189246 pagefault_disabled_inc();
190247 /*
191248 * make sure to have issued the store before a pagefault
....@@ -202,13 +259,15 @@
202259 */
203260 barrier();
204261 pagefault_disabled_dec();
205
- migrate_enable();
206262 }
207263
208264 /*
209265 * Is the pagefault handler disabled? If so, user access methods will not sleep.
210266 */
211
-#define pagefault_disabled() (current->pagefault_disabled != 0)
267
+static inline bool pagefault_disabled(void)
268
+{
269
+ return current->pagefault_disabled != 0;
270
+}
212271
213272 /*
214273 * The pagefault handler is in general disabled by pagefault_disable() or
....@@ -224,84 +283,135 @@
224283
225284 #ifndef ARCH_HAS_NOCACHE_UACCESS
226285
227
-static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
228
- const void __user *from, unsigned long n)
286
+static inline __must_check unsigned long
287
+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
288
+ unsigned long n)
229289 {
230290 return __copy_from_user_inatomic(to, from, n);
231291 }
232292
233293 #endif /* ARCH_HAS_NOCACHE_UACCESS */
234294
235
-/*
236
- * probe_kernel_read(): safely attempt to read from a location
237
- * @dst: pointer to the buffer that shall take the data
238
- * @src: address to read from
239
- * @size: size of the data chunk
240
- *
241
- * Safely read from address @src to the buffer at @dst. If a kernel fault
242
- * happens, handle that and return -EFAULT.
243
- */
244
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
245
-extern long __probe_kernel_read(void *dst, const void *src, size_t size);
246
-
247
-/*
248
- * probe_user_read(): safely attempt to read from a location in user space
249
- * @dst: pointer to the buffer that shall take the data
250
- * @src: address to read from
251
- * @size: size of the data chunk
252
- *
253
- * Safely read from address @src to the buffer at @dst. If a kernel fault
254
- * happens, handle that and return -EFAULT.
255
- */
256
-extern long probe_user_read(void *dst, const void __user *src, size_t size);
257
-
258
-/*
259
- * probe_kernel_write(): safely attempt to write to a location
260
- * @dst: address to write to
261
- * @src: pointer to the data that shall be written
262
- * @size: size of the data chunk
263
- *
264
- * Safely write to address @dst from the buffer at @src. If a kernel fault
265
- * happens, handle that and return -EFAULT.
266
- */
267
-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
268
-extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
269
-
270
-/*
271
- * probe_user_write(): safely attempt to write to a location in user space
272
- * @dst: address to write to
273
- * @src: pointer to the data that shall be written
274
- * @size: size of the data chunk
275
- *
276
- * Safely write to address @dst from the buffer at @src. If a kernel fault
277
- * happens, handle that and return -EFAULT.
278
- */
279
-extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
280
-extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
281
-
282
-extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
283
-extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
284
- long count);
285
-extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
295
+extern __must_check int check_zeroed_user(const void __user *from, size_t size);
286296
287297 /**
288
- * probe_kernel_address(): safely attempt to read from a location
289
- * @addr: address to read from
290
- * @retval: read into this variable
298
+ * copy_struct_from_user: copy a struct from userspace
299
+ * @dst: Destination address, in kernel space. This buffer must be @ksize
300
+ * bytes long.
301
+ * @ksize: Size of @dst struct.
302
+ * @src: Source address, in userspace.
303
+ * @usize: (Alleged) size of @src struct.
304
+ *
305
+ * Copies a struct from userspace to kernel space, in a way that guarantees
306
+ * backwards-compatibility for struct syscall arguments (as long as future
307
+ * struct extensions are made such that all new fields are *appended* to the
308
+ * old struct, and zeroed-out new fields have the same meaning as the old
309
+ * struct).
310
+ *
311
+ * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
312
+ * The recommended usage is something like the following:
313
+ *
314
+ * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
315
+ * {
316
+ * int err;
317
+ * struct foo karg = {};
318
+ *
319
+ * if (usize > PAGE_SIZE)
320
+ * return -E2BIG;
321
+ * if (usize < FOO_SIZE_VER0)
322
+ * return -EINVAL;
323
+ *
324
+ * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
325
+ * if (err)
326
+ * return err;
327
+ *
328
+ * // ...
329
+ * }
330
+ *
331
+ * There are three cases to consider:
332
+ * * If @usize == @ksize, then it's copied verbatim.
333
+ * * If @usize < @ksize, then the userspace has passed an old struct to a
334
+ * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
335
+ * are to be zero-filled.
336
+ * * If @usize > @ksize, then the userspace has passed a new struct to an
337
+ * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
338
+ * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
339
+ *
340
+ * Returns (in all cases, some data may have been copied):
341
+ * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
342
+ * * -EFAULT: access to userspace failed.
343
+ */
344
+static __always_inline __must_check int
345
+copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
346
+ size_t usize)
347
+{
348
+ size_t size = min(ksize, usize);
349
+ size_t rest = max(ksize, usize) - size;
350
+
351
+ /* Double check if ksize is larger than a known object size. */
352
+ if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
353
+ return -E2BIG;
354
+
355
+ /* Deal with trailing bytes. */
356
+ if (usize < ksize) {
357
+ memset(dst + size, 0, rest);
358
+ } else if (usize > ksize) {
359
+ int ret = check_zeroed_user(src + size, rest);
360
+ if (ret <= 0)
361
+ return ret ?: -E2BIG;
362
+ }
363
+ /* Copy the interoperable parts of the struct. */
364
+ if (copy_from_user(dst, src, size))
365
+ return -EFAULT;
366
+ return 0;
367
+}
368
+
369
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
370
+
371
+long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
372
+long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
373
+
374
+long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
375
+long notrace copy_to_user_nofault(void __user *dst, const void *src,
376
+ size_t size);
377
+
378
+long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
379
+ long count);
380
+
381
+long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
382
+ long count);
383
+long strnlen_user_nofault(const void __user *unsafe_addr, long count);
384
+
385
+/**
386
+ * get_kernel_nofault(): safely attempt to read from a location
387
+ * @val: read into this variable
388
+ * @ptr: address to read from
291389 *
292390 * Returns 0 on success, or -EFAULT.
293391 */
294
-#define probe_kernel_address(addr, retval) \
295
- probe_kernel_read(&retval, addr, sizeof(retval))
392
+#define get_kernel_nofault(val, ptr) ({ \
393
+ const typeof(val) *__gk_ptr = (ptr); \
394
+ copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
395
+})
296396
297397 #ifndef user_access_begin
298
-#define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
398
+#define user_access_begin(ptr,len) access_ok(ptr, len)
299399 #define user_access_end() do { } while (0)
300
-#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
301
-#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
400
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
401
+#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
402
+#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
403
+#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
302404 static inline unsigned long user_access_save(void) { return 0UL; }
303405 static inline void user_access_restore(unsigned long flags) { }
304406 #endif
407
+#ifndef user_write_access_begin
408
+#define user_write_access_begin user_access_begin
409
+#define user_write_access_end user_access_end
410
+#endif
411
+#ifndef user_read_access_begin
412
+#define user_read_access_begin user_access_begin
413
+#define user_read_access_end user_access_end
414
+#endif
305415
306416 #ifdef CONFIG_HARDENED_USERCOPY
307417 void usercopy_warn(const char *name, const char *detail, bool to_user,