.. | .. |
---|
2 | 2 | #ifndef __LINUX_UACCESS_H__ |
---|
3 | 3 | #define __LINUX_UACCESS_H__ |
---|
4 | 4 | |
---|
| 5 | +#include <linux/fault-inject-usercopy.h> |
---|
| 6 | +#include <linux/instrumented.h> |
---|
| 7 | +#include <linux/minmax.h> |
---|
5 | 8 | #include <linux/sched.h> |
---|
6 | 9 | #include <linux/thread_info.h> |
---|
7 | | -#include <linux/kasan-checks.h> |
---|
8 | | - |
---|
9 | | -#define VERIFY_READ 0 |
---|
10 | | -#define VERIFY_WRITE 1 |
---|
11 | | - |
---|
12 | | -#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) |
---|
13 | 10 | |
---|
14 | 11 | #include <asm/uaccess.h> |
---|
| 12 | + |
---|
| 13 | +#ifdef CONFIG_SET_FS |
---|
| 14 | +/* |
---|
| 15 | + * Force the uaccess routines to be wired up for actual userspace access, |
---|
| 16 | + * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone |
---|
| 17 | + * using force_uaccess_end below. |
---|
| 18 | + */ |
---|
| 19 | +static inline mm_segment_t force_uaccess_begin(void) |
---|
| 20 | +{ |
---|
| 21 | + mm_segment_t fs = get_fs(); |
---|
| 22 | + |
---|
| 23 | + set_fs(USER_DS); |
---|
| 24 | + return fs; |
---|
| 25 | +} |
---|
| 26 | + |
---|
| 27 | +static inline void force_uaccess_end(mm_segment_t oldfs) |
---|
| 28 | +{ |
---|
| 29 | + set_fs(oldfs); |
---|
| 30 | +} |
---|
| 31 | +#else /* CONFIG_SET_FS */ |
---|
| 32 | +typedef struct { |
---|
| 33 | + /* empty dummy */ |
---|
| 34 | +} mm_segment_t; |
---|
| 35 | + |
---|
| 36 | +#ifndef TASK_SIZE_MAX |
---|
| 37 | +#define TASK_SIZE_MAX TASK_SIZE |
---|
| 38 | +#endif |
---|
| 39 | + |
---|
| 40 | +#define uaccess_kernel() (false) |
---|
| 41 | +#define user_addr_max() (TASK_SIZE_MAX) |
---|
| 42 | + |
---|
| 43 | +static inline mm_segment_t force_uaccess_begin(void) |
---|
| 44 | +{ |
---|
| 45 | + return (mm_segment_t) { }; |
---|
| 46 | +} |
---|
| 47 | + |
---|
| 48 | +static inline void force_uaccess_end(mm_segment_t oldfs) |
---|
| 49 | +{ |
---|
| 50 | +} |
---|
| 51 | +#endif /* CONFIG_SET_FS */ |
---|
15 | 52 | |
---|
16 | 53 | /* |
---|
17 | 54 | * Architectures should provide two primitives (raw_copy_{to,from}_user()) |
---|
.. | .. |
---|
58 | 95 | * as usual) and both source and destination can trigger faults. |
---|
59 | 96 | */ |
---|
60 | 97 | |
---|
61 | | -static __always_inline unsigned long |
---|
| 98 | +static __always_inline __must_check unsigned long |
---|
62 | 99 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
---|
63 | 100 | { |
---|
64 | | - kasan_check_write(to, n); |
---|
| 101 | + instrument_copy_from_user(to, from, n); |
---|
65 | 102 | check_object_size(to, n, false); |
---|
66 | 103 | return raw_copy_from_user(to, from, n); |
---|
67 | 104 | } |
---|
68 | 105 | |
---|
69 | | -static __always_inline unsigned long |
---|
| 106 | +static __always_inline __must_check unsigned long |
---|
70 | 107 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
---|
71 | 108 | { |
---|
72 | 109 | might_fault(); |
---|
73 | | - kasan_check_write(to, n); |
---|
| 110 | + if (should_fail_usercopy()) |
---|
| 111 | + return n; |
---|
| 112 | + instrument_copy_from_user(to, from, n); |
---|
74 | 113 | check_object_size(to, n, false); |
---|
75 | 114 | return raw_copy_from_user(to, from, n); |
---|
76 | 115 | } |
---|
.. | .. |
---|
88 | 127 | * The caller should also make sure he pins the user space address |
---|
89 | 128 | * so that we don't result in page fault and sleep. |
---|
90 | 129 | */ |
---|
91 | | -static __always_inline unsigned long |
---|
| 130 | +static __always_inline __must_check unsigned long |
---|
92 | 131 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
---|
93 | 132 | { |
---|
94 | | - kasan_check_read(from, n); |
---|
| 133 | + if (should_fail_usercopy()) |
---|
| 134 | + return n; |
---|
| 135 | + instrument_copy_to_user(to, from, n); |
---|
95 | 136 | check_object_size(from, n, true); |
---|
96 | 137 | return raw_copy_to_user(to, from, n); |
---|
97 | 138 | } |
---|
98 | 139 | |
---|
99 | | -static __always_inline unsigned long |
---|
| 140 | +static __always_inline __must_check unsigned long |
---|
100 | 141 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
---|
101 | 142 | { |
---|
102 | 143 | might_fault(); |
---|
103 | | - kasan_check_read(from, n); |
---|
| 144 | + if (should_fail_usercopy()) |
---|
| 145 | + return n; |
---|
| 146 | + instrument_copy_to_user(to, from, n); |
---|
104 | 147 | check_object_size(from, n, true); |
---|
105 | 148 | return raw_copy_to_user(to, from, n); |
---|
106 | 149 | } |
---|
107 | 150 | |
---|
108 | 151 | #ifdef INLINE_COPY_FROM_USER |
---|
109 | | -static inline unsigned long |
---|
| 152 | +static inline __must_check unsigned long |
---|
110 | 153 | _copy_from_user(void *to, const void __user *from, unsigned long n) |
---|
111 | 154 | { |
---|
112 | 155 | unsigned long res = n; |
---|
113 | 156 | might_fault(); |
---|
114 | | - if (likely(access_ok(VERIFY_READ, from, n))) { |
---|
115 | | - kasan_check_write(to, n); |
---|
| 157 | + if (!should_fail_usercopy() && likely(access_ok(from, n))) { |
---|
| 158 | + instrument_copy_from_user(to, from, n); |
---|
116 | 159 | res = raw_copy_from_user(to, from, n); |
---|
117 | 160 | } |
---|
118 | 161 | if (unlikely(res)) |
---|
.. | .. |
---|
120 | 163 | return res; |
---|
121 | 164 | } |
---|
122 | 165 | #else |
---|
123 | | -extern unsigned long |
---|
| 166 | +extern __must_check unsigned long |
---|
124 | 167 | _copy_from_user(void *, const void __user *, unsigned long); |
---|
125 | 168 | #endif |
---|
126 | 169 | |
---|
127 | 170 | #ifdef INLINE_COPY_TO_USER |
---|
128 | | -static inline unsigned long |
---|
| 171 | +static inline __must_check unsigned long |
---|
129 | 172 | _copy_to_user(void __user *to, const void *from, unsigned long n) |
---|
130 | 173 | { |
---|
131 | 174 | might_fault(); |
---|
132 | | - if (access_ok(VERIFY_WRITE, to, n)) { |
---|
133 | | - kasan_check_read(from, n); |
---|
| 175 | + if (should_fail_usercopy()) |
---|
| 176 | + return n; |
---|
| 177 | + if (access_ok(to, n)) { |
---|
| 178 | + instrument_copy_to_user(to, from, n); |
---|
134 | 179 | n = raw_copy_to_user(to, from, n); |
---|
135 | 180 | } |
---|
136 | 181 | return n; |
---|
137 | 182 | } |
---|
138 | 183 | #else |
---|
139 | | -extern unsigned long |
---|
| 184 | +extern __must_check unsigned long |
---|
140 | 185 | _copy_to_user(void __user *, const void *, unsigned long); |
---|
141 | 186 | #endif |
---|
142 | 187 | |
---|
.. | .. |
---|
160 | 205 | copy_in_user(void __user *to, const void __user *from, unsigned long n) |
---|
161 | 206 | { |
---|
162 | 207 | might_fault(); |
---|
163 | | - if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) |
---|
| 208 | + if (access_ok(to, n) && access_ok(from, n)) |
---|
164 | 209 | n = raw_copy_in_user(to, from, n); |
---|
165 | 210 | return n; |
---|
| 211 | +} |
---|
| 212 | +#endif |
---|
| 213 | + |
---|
| 214 | +#ifndef copy_mc_to_kernel |
---|
| 215 | +/* |
---|
| 216 | + * Without arch opt-in this generic copy_mc_to_kernel() will not handle |
---|
| 217 | + * #MC (or arch equivalent) during source read. |
---|
| 218 | + */ |
---|
| 219 | +static inline unsigned long __must_check |
---|
| 220 | +copy_mc_to_kernel(void *dst, const void *src, size_t cnt) |
---|
| 221 | +{ |
---|
| 222 | + memcpy(dst, src, cnt); |
---|
| 223 | + return 0; |
---|
166 | 224 | } |
---|
167 | 225 | #endif |
---|
168 | 226 | |
---|
.. | .. |
---|
206 | 264 | /* |
---|
207 | 265 | * Is the pagefault handler disabled? If so, user access methods will not sleep. |
---|
208 | 266 | */ |
---|
209 | | -#define pagefault_disabled() (current->pagefault_disabled != 0) |
---|
| 267 | +static inline bool pagefault_disabled(void) |
---|
| 268 | +{ |
---|
| 269 | + return current->pagefault_disabled != 0; |
---|
| 270 | +} |
---|
210 | 271 | |
---|
211 | 272 | /* |
---|
212 | 273 | * The pagefault handler is in general disabled by pagefault_disable() or |
---|
.. | .. |
---|
222 | 283 | |
---|
223 | 284 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
---|
224 | 285 | |
---|
225 | | -static inline unsigned long __copy_from_user_inatomic_nocache(void *to, |
---|
226 | | - const void __user *from, unsigned long n) |
---|
| 286 | +static inline __must_check unsigned long |
---|
| 287 | +__copy_from_user_inatomic_nocache(void *to, const void __user *from, |
---|
| 288 | + unsigned long n) |
---|
227 | 289 | { |
---|
228 | 290 | return __copy_from_user_inatomic(to, from, n); |
---|
229 | 291 | } |
---|
230 | 292 | |
---|
231 | 293 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
---|
232 | 294 | |
---|
233 | | -/* |
---|
234 | | - * probe_kernel_read(): safely attempt to read from a location |
---|
235 | | - * @dst: pointer to the buffer that shall take the data |
---|
236 | | - * @src: address to read from |
---|
237 | | - * @size: size of the data chunk |
---|
238 | | - * |
---|
239 | | - * Safely read from address @src to the buffer at @dst. If a kernel fault |
---|
240 | | - * happens, handle that and return -EFAULT. |
---|
241 | | - */ |
---|
242 | | -extern long probe_kernel_read(void *dst, const void *src, size_t size); |
---|
243 | | -extern long __probe_kernel_read(void *dst, const void *src, size_t size); |
---|
244 | | - |
---|
245 | | -/* |
---|
246 | | - * probe_user_read(): safely attempt to read from a location in user space |
---|
247 | | - * @dst: pointer to the buffer that shall take the data |
---|
248 | | - * @src: address to read from |
---|
249 | | - * @size: size of the data chunk |
---|
250 | | - * |
---|
251 | | - * Safely read from address @src to the buffer at @dst. If a kernel fault |
---|
252 | | - * happens, handle that and return -EFAULT. |
---|
253 | | - */ |
---|
254 | | -extern long probe_user_read(void *dst, const void __user *src, size_t size); |
---|
255 | | - |
---|
256 | | -/* |
---|
257 | | - * probe_kernel_write(): safely attempt to write to a location |
---|
258 | | - * @dst: address to write to |
---|
259 | | - * @src: pointer to the data that shall be written |
---|
260 | | - * @size: size of the data chunk |
---|
261 | | - * |
---|
262 | | - * Safely write to address @dst from the buffer at @src. If a kernel fault |
---|
263 | | - * happens, handle that and return -EFAULT. |
---|
264 | | - */ |
---|
265 | | -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
---|
266 | | -extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); |
---|
267 | | - |
---|
268 | | -/* |
---|
269 | | - * probe_user_write(): safely attempt to write to a location in user space |
---|
270 | | - * @dst: address to write to |
---|
271 | | - * @src: pointer to the data that shall be written |
---|
272 | | - * @size: size of the data chunk |
---|
273 | | - * |
---|
274 | | - * Safely write to address @dst from the buffer at @src. If a kernel fault |
---|
275 | | - * happens, handle that and return -EFAULT. |
---|
276 | | - */ |
---|
277 | | -extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); |
---|
278 | | -extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); |
---|
279 | | - |
---|
280 | | -extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); |
---|
281 | | -extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, |
---|
282 | | - long count); |
---|
283 | | -extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); |
---|
| 295 | +extern __must_check int check_zeroed_user(const void __user *from, size_t size); |
---|
284 | 296 | |
---|
285 | 297 | /** |
---|
286 | | - * probe_kernel_address(): safely attempt to read from a location |
---|
287 | | - * @addr: address to read from |
---|
288 | | - * @retval: read into this variable |
---|
| 298 | + * copy_struct_from_user: copy a struct from userspace |
---|
| 299 | + * @dst: Destination address, in kernel space. This buffer must be @ksize |
---|
| 300 | + * bytes long. |
---|
| 301 | + * @ksize: Size of @dst struct. |
---|
| 302 | + * @src: Source address, in userspace. |
---|
| 303 | + * @usize: (Alleged) size of @src struct. |
---|
| 304 | + * |
---|
| 305 | + * Copies a struct from userspace to kernel space, in a way that guarantees |
---|
| 306 | + * backwards-compatibility for struct syscall arguments (as long as future |
---|
| 307 | + * struct extensions are made such that all new fields are *appended* to the |
---|
| 308 | + * old struct, and zeroed-out new fields have the same meaning as the old |
---|
| 309 | + * struct). |
---|
| 310 | + * |
---|
| 311 | + * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. |
---|
| 312 | + * The recommended usage is something like the following: |
---|
| 313 | + * |
---|
| 314 | + * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) |
---|
| 315 | + * { |
---|
| 316 | + * int err; |
---|
| 317 | + * struct foo karg = {}; |
---|
| 318 | + * |
---|
| 319 | + * if (usize > PAGE_SIZE) |
---|
| 320 | + * return -E2BIG; |
---|
| 321 | + * if (usize < FOO_SIZE_VER0) |
---|
| 322 | + * return -EINVAL; |
---|
| 323 | + * |
---|
| 324 | + * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); |
---|
| 325 | + * if (err) |
---|
| 326 | + * return err; |
---|
| 327 | + * |
---|
| 328 | + * // ... |
---|
| 329 | + * } |
---|
| 330 | + * |
---|
| 331 | + * There are three cases to consider: |
---|
| 332 | + * * If @usize == @ksize, then it's copied verbatim. |
---|
| 333 | + * * If @usize < @ksize, then the userspace has passed an old struct to a |
---|
| 334 | + * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) |
---|
| 335 | + * are to be zero-filled. |
---|
| 336 | + * * If @usize > @ksize, then the userspace has passed a new struct to an |
---|
| 337 | + * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) |
---|
| 338 | + * are checked to ensure they are zeroed, otherwise -E2BIG is returned. |
---|
| 339 | + * |
---|
| 340 | + * Returns (in all cases, some data may have been copied): |
---|
| 341 | + * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. |
---|
| 342 | + * * -EFAULT: access to userspace failed. |
---|
| 343 | + */ |
---|
| 344 | +static __always_inline __must_check int |
---|
| 345 | +copy_struct_from_user(void *dst, size_t ksize, const void __user *src, |
---|
| 346 | + size_t usize) |
---|
| 347 | +{ |
---|
| 348 | + size_t size = min(ksize, usize); |
---|
| 349 | + size_t rest = max(ksize, usize) - size; |
---|
| 350 | + |
---|
| 351 | + /* Double check if ksize is larger than a known object size. */ |
---|
| 352 | + if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) |
---|
| 353 | + return -E2BIG; |
---|
| 354 | + |
---|
| 355 | + /* Deal with trailing bytes. */ |
---|
| 356 | + if (usize < ksize) { |
---|
| 357 | + memset(dst + size, 0, rest); |
---|
| 358 | + } else if (usize > ksize) { |
---|
| 359 | + int ret = check_zeroed_user(src + size, rest); |
---|
| 360 | + if (ret <= 0) |
---|
| 361 | + return ret ?: -E2BIG; |
---|
| 362 | + } |
---|
| 363 | + /* Copy the interoperable parts of the struct. */ |
---|
| 364 | + if (copy_from_user(dst, src, size)) |
---|
| 365 | + return -EFAULT; |
---|
| 366 | + return 0; |
---|
| 367 | +} |
---|
| 368 | + |
---|
| 369 | +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); |
---|
| 370 | + |
---|
| 371 | +long copy_from_kernel_nofault(void *dst, const void *src, size_t size); |
---|
| 372 | +long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); |
---|
| 373 | + |
---|
| 374 | +long copy_from_user_nofault(void *dst, const void __user *src, size_t size); |
---|
| 375 | +long notrace copy_to_user_nofault(void __user *dst, const void *src, |
---|
| 376 | + size_t size); |
---|
| 377 | + |
---|
| 378 | +long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, |
---|
| 379 | + long count); |
---|
| 380 | + |
---|
| 381 | +long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, |
---|
| 382 | + long count); |
---|
| 383 | +long strnlen_user_nofault(const void __user *unsafe_addr, long count); |
---|
| 384 | + |
---|
| 385 | +/** |
---|
| 386 | + * get_kernel_nofault(): safely attempt to read from a location |
---|
| 387 | + * @val: read into this variable |
---|
| 388 | + * @ptr: address to read from |
---|
289 | 389 | * |
---|
290 | 390 | * Returns 0 on success, or -EFAULT. |
---|
291 | 391 | */ |
---|
292 | | -#define probe_kernel_address(addr, retval) \ |
---|
293 | | - probe_kernel_read(&retval, addr, sizeof(retval)) |
---|
| 392 | +#define get_kernel_nofault(val, ptr) ({ \ |
---|
| 393 | + const typeof(val) *__gk_ptr = (ptr); \ |
---|
| 394 | + copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ |
---|
| 395 | +}) |
---|
294 | 396 | |
---|
295 | 397 | #ifndef user_access_begin |
---|
296 | | -#define user_access_begin(type, ptr, len) access_ok(type, ptr, len) |
---|
| 398 | +#define user_access_begin(ptr,len) access_ok(ptr, len) |
---|
297 | 399 | #define user_access_end() do { } while (0) |
---|
298 | | -#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) |
---|
299 | | -#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) |
---|
| 400 | +#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) |
---|
| 401 | +#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) |
---|
| 402 | +#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) |
---|
| 403 | +#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) |
---|
300 | 404 | static inline unsigned long user_access_save(void) { return 0UL; } |
---|
301 | 405 | static inline void user_access_restore(unsigned long flags) { } |
---|
302 | 406 | #endif |
---|
| 407 | +#ifndef user_write_access_begin |
---|
| 408 | +#define user_write_access_begin user_access_begin |
---|
| 409 | +#define user_write_access_end user_access_end |
---|
| 410 | +#endif |
---|
| 411 | +#ifndef user_read_access_begin |
---|
| 412 | +#define user_read_access_begin user_access_begin |
---|
| 413 | +#define user_read_access_end user_access_end |
---|
| 414 | +#endif |
---|
303 | 415 | |
---|
304 | 416 | #ifdef CONFIG_HARDENED_USERCOPY |
---|
305 | 417 | void usercopy_warn(const char *name, const char *detail, bool to_user, |
---|