hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/arch/arm64/include/asm/uaccess.h
....@@ -1,19 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Based on arch/arm/include/asm/uaccess.h
34 *
45 * Copyright (C) 2012 ARM Ltd.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
14
- *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
176 */
187 #ifndef __ASM_UACCESS_H
198 #define __ASM_UACCESS_H
....@@ -30,12 +19,14 @@
3019 #include <linux/string.h>
3120
3221 #include <asm/cpufeature.h>
22
+#include <asm/mmu.h>
23
+#include <asm/mte.h>
3324 #include <asm/ptrace.h>
3425 #include <asm/memory.h>
35
-#include <asm/compiler.h>
3626 #include <asm/extable.h>
3727
38
-#define get_ds() (KERNEL_DS)
28
+#define HAVE_GET_KERNEL_NOFAULT
29
+
3930 #define get_fs() (current_thread_info()->addr_limit)
4031
4132 static inline void set_fs(mm_segment_t fs)
....@@ -46,8 +37,7 @@
4637 * Prevent a mispredicted conditional call to set_fs from forwarding
4738 * the wrong address limit to access_ok under speculation.
4839 */
49
- dsb(nsh);
50
- isb();
40
+ spec_bar();
5141
5242 /* On user-mode return, check fs is correct */
5343 set_thread_flag(TIF_FSCHECK);
....@@ -63,7 +53,7 @@
6353 CONFIG_ARM64_UAO));
6454 }
6555
66
-#define segment_eq(a, b) ((a) == (b))
56
+#define uaccess_kernel() (get_fs() == KERNEL_DS)
6757
6858 /*
6959 * Test whether a block of memory is a valid user space address.
....@@ -106,7 +96,7 @@
10696 return ret;
10797 }
10898
109
-#define access_ok(type, addr, size) __range_ok(addr, size)
99
+#define access_ok(addr, size) __range_ok(addr, size)
110100 #define user_addr_max get_fs
111101
112102 #define _ASM_EXTABLE(from, to) \
....@@ -126,8 +116,8 @@
126116 local_irq_save(flags);
127117 ttbr = read_sysreg(ttbr1_el1);
128118 ttbr &= ~TTBR_ASID_MASK;
129
- /* reserved_ttbr0 placed before swapper_pg_dir */
130
- write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
119
+ /* reserved_pg_dir placed before swapper_pg_dir */
120
+ write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
131121 isb();
132122 /* Set reserved ASID */
133123 write_sysreg(ttbr, ttbr1_el1);
....@@ -213,13 +203,63 @@
213203 CONFIG_ARM64_PAN)); \
214204 } while (0)
215205
216
-static inline void uaccess_disable(void)
206
+/*
207
+ * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
208
+ * affects EL0 and TCF affects EL1 irrespective of which TTBR is
209
+ * used.
210
+ * The kernel accesses TTBR0 usually with LDTR/STTR instructions
211
+ * when UAO is available, so these would act as EL0 accesses using
212
+ * TCF0.
213
+ * However futex.h code uses exclusives which would be executed as
214
+ * EL1, this can potentially cause a tag check fault even if the
215
+ * user disables TCF0.
216
+ *
217
+ * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
218
+ * and reset it in uaccess_disable().
219
+ *
220
+ * The Tag check override (TCO) bit disables temporarily the tag checking
221
+ * preventing the issue.
222
+ */
223
+static inline void __uaccess_disable_tco(void)
217224 {
225
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
226
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
227
+}
228
+
229
+static inline void __uaccess_enable_tco(void)
230
+{
231
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
232
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
233
+}
234
+
235
+/*
236
+ * These functions disable tag checking only if in MTE async mode
237
+ * since the sync mode generates exceptions synchronously and the
238
+ * nofault or load_unaligned_zeropad can handle them.
239
+ */
240
+static inline void __uaccess_disable_tco_async(void)
241
+{
242
+ if (system_uses_mte_async_mode())
243
+ __uaccess_disable_tco();
244
+}
245
+
246
+static inline void __uaccess_enable_tco_async(void)
247
+{
248
+ if (system_uses_mte_async_mode())
249
+ __uaccess_enable_tco();
250
+}
251
+
252
+static inline void uaccess_disable_privileged(void)
253
+{
254
+ __uaccess_disable_tco();
255
+
218256 __uaccess_disable(ARM64_HAS_PAN);
219257 }
220258
221
-static inline void uaccess_enable(void)
259
+static inline void uaccess_enable_privileged(void)
222260 {
261
+ __uaccess_enable_tco();
262
+
223263 __uaccess_enable(ARM64_HAS_PAN);
224264 }
225265
....@@ -266,10 +306,9 @@
266306 * The "__xxx_error" versions set the third argument to -EFAULT if an error
267307 * occurs, and leave it unchanged on success.
268308 */
269
-#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
309
+#define __get_mem_asm(load, reg, x, addr, err) \
270310 asm volatile( \
271
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
272
- alt_instr " " reg "1, [%2]\n", feature) \
311
+ "1: " load " " reg "1, [%2]\n" \
273312 "2:\n" \
274313 " .section .fixup, \"ax\"\n" \
275314 " .align 2\n" \
....@@ -281,66 +320,90 @@
281320 : "+r" (err), "=&r" (x) \
282321 : "r" (addr), "i" (-EFAULT))
283322
284
-#define __get_user_err(x, ptr, err) \
323
+#define __raw_get_mem(ldr, x, ptr, err) \
285324 do { \
286325 unsigned long __gu_val; \
287
- __chk_user_ptr(ptr); \
288
- uaccess_enable_not_uao(); \
289326 switch (sizeof(*(ptr))) { \
290327 case 1: \
291
- __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
292
- (err), ARM64_HAS_UAO); \
328
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
293329 break; \
294330 case 2: \
295
- __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
296
- (err), ARM64_HAS_UAO); \
331
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
297332 break; \
298333 case 4: \
299
- __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
300
- (err), ARM64_HAS_UAO); \
334
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
301335 break; \
302336 case 8: \
303
- __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
304
- (err), ARM64_HAS_UAO); \
337
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
305338 break; \
306339 default: \
307340 BUILD_BUG(); \
308341 } \
309
- uaccess_disable_not_uao(); \
310342 (x) = (__force __typeof__(*(ptr)))__gu_val; \
311343 } while (0)
312344
313
-#define __get_user_check(x, ptr, err) \
314
-({ \
315
- __typeof__(*(ptr)) __user *__p = (ptr); \
316
- might_fault(); \
317
- if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
318
- __p = uaccess_mask_ptr(__p); \
319
- __get_user_err((x), __p, (err)); \
320
- } else { \
321
- (x) = 0; (err) = -EFAULT; \
322
- } \
323
-})
345
+/*
346
+ * We must not call into the scheduler between uaccess_enable_not_uao() and
347
+ * uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
348
+ * we must evaluate these outside of the critical section.
349
+ */
350
+#define __raw_get_user(x, ptr, err) \
351
+do { \
352
+ __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
353
+ __typeof__(x) __rgu_val; \
354
+ __chk_user_ptr(ptr); \
355
+ \
356
+ uaccess_enable_not_uao(); \
357
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
358
+ uaccess_disable_not_uao(); \
359
+ \
360
+ (x) = __rgu_val; \
361
+} while (0)
324362
325363 #define __get_user_error(x, ptr, err) \
326
-({ \
327
- __get_user_check((x), (ptr), (err)); \
328
- (void)0; \
329
-})
364
+do { \
365
+ __typeof__(*(ptr)) __user *__p = (ptr); \
366
+ might_fault(); \
367
+ if (access_ok(__p, sizeof(*__p))) { \
368
+ __p = uaccess_mask_ptr(__p); \
369
+ __raw_get_user((x), __p, (err)); \
370
+ } else { \
371
+ (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
372
+ } \
373
+} while (0)
330374
331375 #define __get_user(x, ptr) \
332376 ({ \
333377 int __gu_err = 0; \
334
- __get_user_check((x), (ptr), __gu_err); \
378
+ __get_user_error((x), (ptr), __gu_err); \
335379 __gu_err; \
336380 })
337381
338382 #define get_user __get_user
339383
340
-#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
384
+/*
385
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
386
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
387
+ * functions, we must evaluate these outside of the critical section.
388
+ */
389
+#define __get_kernel_nofault(dst, src, type, err_label) \
390
+do { \
391
+ __typeof__(dst) __gkn_dst = (dst); \
392
+ __typeof__(src) __gkn_src = (src); \
393
+ int __gkn_err = 0; \
394
+ \
395
+ __uaccess_enable_tco_async(); \
396
+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
397
+ (__force type *)(__gkn_src), __gkn_err); \
398
+ __uaccess_disable_tco_async(); \
399
+ \
400
+ if (unlikely(__gkn_err)) \
401
+ goto err_label; \
402
+} while (0)
403
+
404
+#define __put_mem_asm(store, reg, x, addr, err) \
341405 asm volatile( \
342
- "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
343
- alt_instr " " reg "1, [%2]\n", feature) \
406
+ "1: " store " " reg "1, [%2]\n" \
344407 "2:\n" \
345408 " .section .fixup,\"ax\"\n" \
346409 " .align 2\n" \
....@@ -351,78 +414,115 @@
351414 : "+r" (err) \
352415 : "r" (x), "r" (addr), "i" (-EFAULT))
353416
354
-#define __put_user_err(x, ptr, err) \
417
+#define __raw_put_mem(str, x, ptr, err) \
355418 do { \
356419 __typeof__(*(ptr)) __pu_val = (x); \
357
- __chk_user_ptr(ptr); \
358
- uaccess_enable_not_uao(); \
359420 switch (sizeof(*(ptr))) { \
360421 case 1: \
361
- __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
362
- (err), ARM64_HAS_UAO); \
422
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
363423 break; \
364424 case 2: \
365
- __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
366
- (err), ARM64_HAS_UAO); \
425
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
367426 break; \
368427 case 4: \
369
- __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
370
- (err), ARM64_HAS_UAO); \
428
+ __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
371429 break; \
372430 case 8: \
373
- __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
374
- (err), ARM64_HAS_UAO); \
431
+ __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
375432 break; \
376433 default: \
377434 BUILD_BUG(); \
378435 } \
436
+} while (0)
437
+
438
+/*
439
+ * We must not call into the scheduler between uaccess_enable_not_uao() and
440
+ * uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
441
+ * we must evaluate these outside of the critical section.
442
+ */
443
+#define __raw_put_user(x, ptr, err) \
444
+do { \
445
+ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
446
+ __typeof__(*(ptr)) __rpu_val = (x); \
447
+ __chk_user_ptr(__rpu_ptr); \
448
+ \
449
+ uaccess_enable_not_uao(); \
450
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
379451 uaccess_disable_not_uao(); \
380452 } while (0)
381453
382
-#define __put_user_check(x, ptr, err) \
383
-({ \
454
+#define __put_user_error(x, ptr, err) \
455
+do { \
384456 __typeof__(*(ptr)) __user *__p = (ptr); \
385457 might_fault(); \
386
- if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
458
+ if (access_ok(__p, sizeof(*__p))) { \
387459 __p = uaccess_mask_ptr(__p); \
388
- __put_user_err((x), __p, (err)); \
460
+ __raw_put_user((x), __p, (err)); \
389461 } else { \
390462 (err) = -EFAULT; \
391463 } \
392
-})
393
-
394
-#define __put_user_error(x, ptr, err) \
395
-({ \
396
- __put_user_check((x), (ptr), (err)); \
397
- (void)0; \
398
-})
464
+} while (0)
399465
400466 #define __put_user(x, ptr) \
401467 ({ \
402468 int __pu_err = 0; \
403
- __put_user_check((x), (ptr), __pu_err); \
469
+ __put_user_error((x), (ptr), __pu_err); \
404470 __pu_err; \
405471 })
406472
407473 #define put_user __put_user
408474
475
+/*
476
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
477
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
478
+ * functions, we must evaluate these outside of the critical section.
479
+ */
480
+#define __put_kernel_nofault(dst, src, type, err_label) \
481
+do { \
482
+ __typeof__(dst) __pkn_dst = (dst); \
483
+ __typeof__(src) __pkn_src = (src); \
484
+ int __pkn_err = 0; \
485
+ \
486
+ __uaccess_enable_tco_async(); \
487
+ __raw_put_mem("str", *((type *)(__pkn_src)), \
488
+ (__force type *)(__pkn_dst), __pkn_err); \
489
+ __uaccess_disable_tco_async(); \
490
+ \
491
+ if (unlikely(__pkn_err)) \
492
+ goto err_label; \
493
+} while(0)
494
+
409495 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
410496 #define raw_copy_from_user(to, from, n) \
411497 ({ \
412
- __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
498
+ unsigned long __acfu_ret; \
499
+ uaccess_enable_not_uao(); \
500
+ __acfu_ret = __arch_copy_from_user((to), \
501
+ __uaccess_mask_ptr(from), (n)); \
502
+ uaccess_disable_not_uao(); \
503
+ __acfu_ret; \
413504 })
414505
415506 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
416507 #define raw_copy_to_user(to, from, n) \
417508 ({ \
418
- __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
509
+ unsigned long __actu_ret; \
510
+ uaccess_enable_not_uao(); \
511
+ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
512
+ (from), (n)); \
513
+ uaccess_disable_not_uao(); \
514
+ __actu_ret; \
419515 })
420516
421517 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
422518 #define raw_copy_in_user(to, from, n) \
423519 ({ \
424
- __arch_copy_in_user(__uaccess_mask_ptr(to), \
425
- __uaccess_mask_ptr(from), (n)); \
520
+ unsigned long __aciu_ret; \
521
+ uaccess_enable_not_uao(); \
522
+ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
523
+ __uaccess_mask_ptr(from), (n)); \
524
+ uaccess_disable_not_uao(); \
525
+ __aciu_ret; \
426526 })
427527
428528 #define INLINE_COPY_TO_USER
....@@ -431,8 +531,11 @@
431531 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
432532 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
433533 {
434
- if (access_ok(VERIFY_WRITE, to, n))
534
+ if (access_ok(to, n)) {
535
+ uaccess_enable_not_uao();
435536 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
537
+ uaccess_disable_not_uao();
538
+ }
436539 return n;
437540 }
438541 #define clear_user __clear_user