hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/arch/arm64/include/asm/assembler.h
....@@ -1,20 +1,9 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
34 *
45 * Copyright (C) 1996-2000 Russell King
56 * Copyright (C) 2012 ARM Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
16
- * You should have received a copy of the GNU General Public License
17
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
187 */
198 #ifndef __ASSEMBLY__
209 #error "Only include this from assembly code"
....@@ -26,12 +15,23 @@
2615 #include <asm-generic/export.h>
2716
2817 #include <asm/asm-offsets.h>
18
+#include <asm/asm-bug.h>
19
+#include <asm/alternative.h>
2920 #include <asm/cpufeature.h>
21
+#include <asm/cputype.h>
3022 #include <asm/debug-monitors.h>
3123 #include <asm/page.h>
3224 #include <asm/pgtable-hwdef.h>
3325 #include <asm/ptrace.h>
3426 #include <asm/thread_info.h>
27
+
28
+ /*
29
+ * Provide a wxN alias for each wN register so what we can paste a xN
30
+ * reference after a 'w' to obtain the 32-bit version.
31
+ */
32
+ .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
33
+ wx\n .req w\n
34
+ .endr
3535
3636 .macro save_and_disable_daif, flags
3737 mrs \flags, daif
....@@ -50,28 +50,14 @@
5050 msr daif, \flags
5151 .endm
5252
53
- /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
54
- .macro inherit_daif, pstate:req, tmp:req
55
- and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
56
- msr daif, \tmp
57
- .endm
58
-
5953 /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
6054 .macro enable_da_f
6155 msr daifclr, #(8 | 4 | 1)
6256 .endm
6357
6458 /*
65
- * Enable and disable interrupts.
59
+ * Save/restore interrupts.
6660 */
67
- .macro disable_irq
68
- msr daifset, #2
69
- .endm
70
-
71
- .macro enable_irq
72
- msr daifclr, #2
73
- .endm
74
-
7561 .macro save_and_disable_irq, flags
7662 mrs \flags, daif
7763 msr daifset, #2
....@@ -79,18 +65,6 @@
7965
8066 .macro restore_irq, flags
8167 msr daif, \flags
82
- .endm
83
-
84
-/*
85
- * Save/disable and restore interrupts.
86
- */
87
- .macro save_and_disable_irqs, olddaif
88
- mrs \olddaif, daif
89
- disable_irq
90
- .endm
91
-
92
- .macro restore_irqs, olddaif
93
- msr daif, \olddaif
9468 .endm
9569
9670 .macro enable_dbg
....@@ -116,13 +90,6 @@
11690 .endm
11791
11892 /*
119
- * SMP data memory barrier
120
- */
121
- .macro smp_dmb, opt
122
- dmb \opt
123
- .endm
124
-
125
-/*
12693 * RAS Error Synchronization barrier
12794 */
12895 .macro esb
....@@ -141,22 +108,23 @@
141108 .endm
142109
143110 /*
144
- * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
145
- * of bounds.
111
+ * Clear Branch History instruction
146112 */
147
- .macro mask_nospec64, idx, limit, tmp
148
- sub \tmp, \idx, \limit
149
- bic \tmp, \tmp, \idx
150
- and \idx, \idx, \tmp, asr #63
151
- csdb
113
+ .macro clearbhb
114
+ hint #22
152115 .endm
153116
154117 /*
155118 * Speculation barrier
156119 */
157120 .macro sb
121
+alternative_if_not ARM64_HAS_SB
158122 dsb nsh
159123 isb
124
+alternative_else
125
+ SB_BARRIER_INSN
126
+ nop
127
+alternative_endif
160128 .endm
161129
162130 /*
....@@ -268,6 +236,23 @@
268236 .endm
269237
270238 /*
239
+ * @dst: destination register
240
+ */
241
+#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
242
+ .macro this_cpu_offset, dst
243
+ mrs \dst, tpidr_el2
244
+ .endm
245
+#else
246
+ .macro this_cpu_offset, dst
247
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
248
+ mrs \dst, tpidr_el1
249
+alternative_else
250
+ mrs \dst, tpidr_el2
251
+alternative_endif
252
+ .endm
253
+#endif
254
+
255
+ /*
271256 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
272257 * @sym: The name of the per-cpu variable
273258 * @tmp: scratch register
....@@ -275,11 +260,7 @@
275260 .macro adr_this_cpu, dst, sym, tmp
276261 adrp \tmp, \sym
277262 add \dst, \tmp, #:lo12:\sym
278
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
279
- mrs \tmp, tpidr_el1
280
-alternative_else
281
- mrs \tmp, tpidr_el2
282
-alternative_endif
263
+ this_cpu_offset \tmp
283264 add \dst, \dst, \tmp
284265 .endm
285266
....@@ -290,11 +271,7 @@
290271 */
291272 .macro ldr_this_cpu dst, sym, tmp
292273 adr_l \dst, \sym
293
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
294
- mrs \tmp, tpidr_el1
295
-alternative_else
296
- mrs \tmp, tpidr_el2
297
-alternative_endif
274
+ this_cpu_offset \tmp
298275 ldr \dst, [\dst, \tmp]
299276 .endm
300277
....@@ -306,23 +283,28 @@
306283 .endm
307284
308285 /*
309
- * mmid - get context id from mm pointer (mm->context.id)
310
- */
311
- .macro mmid, rd, rn
312
- ldr \rd, [\rn, #MM_CONTEXT_ID]
313
- .endm
314
-/*
315
- * read_ctr - read CTR_EL0. If the system has mismatched
316
- * cache line sizes, provide the system wide safe value
317
- * from arm64_ftr_reg_ctrel0.sys_val
286
+ * read_ctr - read CTR_EL0. If the system has mismatched register fields,
287
+ * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
318288 */
319289 .macro read_ctr, reg
320
-alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
290
+#ifndef __KVM_NVHE_HYPERVISOR__
291
+alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
321292 mrs \reg, ctr_el0 // read CTR
322293 nop
323294 alternative_else
324295 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
325296 alternative_endif
297
+#else
298
+alternative_if_not ARM64_KVM_PROTECTED_MODE
299
+ ASM_BUG()
300
+alternative_else_nop_endif
301
+alternative_cb kvm_compute_final_ctr_el0
302
+ movz \reg, #0
303
+ movk \reg, #0, lsl #16
304
+ movk \reg, #0, lsl #32
305
+ movk \reg, #0, lsl #48
306
+alternative_cb_end
307
+#endif
326308 .endm
327309
328310
....@@ -369,11 +351,17 @@
369351 .endm
370352
371353 /*
372
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
354
+ * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
373355 */
374
- .macro tcr_set_idmap_t0sz, valreg, tmpreg
375
- ldr_l \tmpreg, idmap_t0sz
376
- bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
356
+ .macro tcr_set_t0sz, valreg, t0sz
357
+ bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
358
+ .endm
359
+
360
+/*
361
+ * tcr_set_t1sz - update TCR.T1SZ
362
+ */
363
+ .macro tcr_set_t1sz, valreg, t1sz
364
+ bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
377365 .endm
378366
379367 /*
....@@ -427,7 +415,11 @@
427415 .ifc \op, cvap
428416 sys 3, c7, c12, 1, \kaddr // dc cvap
429417 .else
418
+ .ifc \op, cvadp
419
+ sys 3, c7, c13, 1, \kaddr // dc cvadp
420
+ .else
430421 dc \op, \kaddr
422
+ .endif
431423 .endif
432424 .endif
433425 .endif
....@@ -462,14 +454,24 @@
462454 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
463455 */
464456 .macro reset_pmuserenr_el0, tmpreg
465
- mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
466
- sbfx \tmpreg, \tmpreg, #8, #4
457
+ mrs \tmpreg, id_aa64dfr0_el1
458
+ sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
467459 cmp \tmpreg, #1 // Skip if no PMU present
468460 b.lt 9000f
469461 msr pmuserenr_el0, xzr // Disable PMU access from EL0
470462 9000:
471463 .endm
472464
465
+/*
466
+ * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
467
+ */
468
+ .macro reset_amuserenr_el0, tmpreg
469
+ mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
470
+ ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
471
+ cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
472
+ msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
473
+.Lskip_\@:
474
+ .endm
473475 /*
474476 * copy_page - copy src to dest using temp registers t1-t8
475477 */
....@@ -489,17 +491,6 @@
489491 .endm
490492
491493 /*
492
- * Annotate a function as position independent, i.e., safe to be called before
493
- * the kernel virtual mapping is activated.
494
- */
495
-#define ENDPIPROC(x) \
496
- .globl __pi_##x; \
497
- .type __pi_##x, %function; \
498
- .set __pi_##x, x; \
499
- .size __pi_##x, . - x; \
500
- ENDPROC(x)
501
-
502
-/*
503494 * Annotate a function as being unsuitable for kprobes.
504495 */
505496 #ifdef CONFIG_KPROBES
....@@ -511,7 +502,7 @@
511502 #define NOKPROBE(x)
512503 #endif
513504
514
-#ifdef CONFIG_KASAN
505
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
515506 #define EXPORT_SYMBOL_NOKASAN(name)
516507 #else
517508 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
....@@ -549,10 +540,37 @@
549540 .endm
550541
551542 /*
552
- * Return the current thread_info.
543
+ * Return the current task_struct.
553544 */
554
- .macro get_thread_info, rd
545
+ .macro get_current_task, rd
555546 mrs \rd, sp_el0
547
+ .endm
548
+
549
+/*
550
+ * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
551
+ * orr is used as it can cover the immediate value (and is idempotent).
552
+ * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
553
+ * ttbr: Value of ttbr to set, modified.
554
+ */
555
+ .macro offset_ttbr1, ttbr, tmp
556
+#ifdef CONFIG_ARM64_VA_BITS_52
557
+ mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
558
+ and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
559
+ cbnz \tmp, .Lskipoffs_\@
560
+ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
561
+.Lskipoffs_\@ :
562
+#endif
563
+ .endm
564
+
565
+/*
566
+ * Perform the reverse of offset_ttbr1.
567
+ * bic is used as it can cover the immediate value and, in future, won't need
568
+ * to be nop'ed out when dealing with 52-bit kernel VAs.
569
+ */
570
+ .macro restore_ttbr1, ttbr
571
+#ifdef CONFIG_ARM64_VA_BITS_52
572
+ bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
573
+#endif
556574 .endm
557575
558576 /*
....@@ -592,6 +610,25 @@
592610 #else
593611 and \phys, \pte, #PTE_ADDR_MASK
594612 #endif
613
+ .endm
614
+
615
+/*
616
+ * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
617
+ */
618
+ .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
619
+#ifdef CONFIG_FUJITSU_ERRATUM_010001
620
+ mrs \tmp1, midr_el1
621
+
622
+ mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
623
+ and \tmp1, \tmp1, \tmp2
624
+ mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
625
+ cmp \tmp1, \tmp2
626
+ b.ne 10f
627
+
628
+ mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
629
+ bic \tcr, \tcr, \tmp2
630
+10:
631
+#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
595632 .endm
596633
597634 /**
....@@ -668,76 +705,153 @@
668705 .endm
669706
670707 /*
671
- * Check whether to yield to another runnable task from kernel mode NEON code
672
- * (which runs with preemption disabled).
673
- *
674
- * if_will_cond_yield_neon
675
- * // pre-yield patchup code
676
- * do_cond_yield_neon
677
- * // post-yield patchup code
678
- * endif_yield_neon <label>
679
- *
680
- * where <label> is optional, and marks the point where execution will resume
681
- * after a yield has been performed. If omitted, execution resumes right after
682
- * the endif_yield_neon invocation. Note that the entire sequence, including
683
- * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
684
- * is not defined.
685
- *
686
- * As a convenience, in the case where no patchup code is required, the above
687
- * sequence may be abbreviated to
688
- *
689
- * cond_yield_neon <label>
690
- *
691
- * Note that the patchup code does not support assembler directives that change
692
- * the output section, any use of such directives is undefined.
693
- *
694
- * The yield itself consists of the following:
695
- * - Check whether the preempt count is exactly 1, in which case disabling
696
- * preemption once will make the task preemptible. If this is not the case,
697
- * yielding is pointless.
698
- * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
699
- * kernel mode NEON (which will trigger a reschedule), and branch to the
700
- * yield fixup code.
701
- *
702
- * This macro sequence may clobber all CPU state that is not guaranteed by the
703
- * AAPCS to be preserved across an ordinary function call.
708
+ * Set SCTLR_ELx to the @reg value, and invalidate the local icache
709
+ * in the process. This is called when setting the MMU on.
710
+ */
711
+.macro set_sctlr, sreg, reg
712
+ msr \sreg, \reg
713
+ isb
714
+ /*
715
+ * Invalidate the local I-cache so that any instructions fetched
716
+ * speculatively from the PoC are discarded, since they may have
717
+ * been dynamically patched at the PoU.
718
+ */
719
+ ic iallu
720
+ dsb nsh
721
+ isb
722
+.endm
723
+
724
+.macro set_sctlr_el1, reg
725
+ set_sctlr sctlr_el1, \reg
726
+.endm
727
+
728
+.macro set_sctlr_el2, reg
729
+ set_sctlr sctlr_el2, \reg
730
+.endm
731
+
732
+ /*
733
+ * Check whether preempt/bh-disabled asm code should yield as soon as
734
+ * it is able. This is the case if we are currently running in task
735
+ * context, and either a softirq is pending, or the TIF_NEED_RESCHED
736
+ * flag is set and re-enabling preemption a single time would result in
737
+ * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
738
+ * stored negated in the top word of the thread_info::preempt_count
739
+ * field)
740
+ */
741
+ .macro cond_yield, lbl:req, tmp:req, tmp2:req
742
+ get_current_task \tmp
743
+ ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
744
+ /*
745
+ * If we are serving a softirq, there is no point in yielding: the
746
+ * softirq will not be preempted no matter what we do, so we should
747
+ * run to completion as quickly as we can.
748
+ */
749
+ tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
750
+#ifdef CONFIG_PREEMPTION
751
+ sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
752
+ cbz \tmp, \lbl
753
+#endif
754
+ adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
755
+ this_cpu_offset \tmp2
756
+ ldr w\tmp, [\tmp, \tmp2]
757
+ cbnz w\tmp, \lbl // yield on pending softirq in task context
758
+.Lnoyield_\@:
759
+ .endm
760
+
761
+/*
762
+ * This macro emits a program property note section identifying
763
+ * architecture features which require special handling, mainly for
764
+ * use in assembly files included in the VDSO.
704765 */
705766
706
- .macro cond_yield_neon, lbl
707
- if_will_cond_yield_neon
708
- do_cond_yield_neon
709
- endif_yield_neon \lbl
710
- .endm
767
+#define NT_GNU_PROPERTY_TYPE_0 5
768
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
711769
712
- .macro if_will_cond_yield_neon
713
-#ifdef CONFIG_PREEMPT
714
- get_thread_info x0
715
- ldr w1, [x0, #TSK_TI_PREEMPT]
716
- ldr x0, [x0, #TSK_TI_FLAGS]
717
- cmp w1, #PREEMPT_DISABLE_OFFSET
718
- csel x0, x0, xzr, eq
719
- tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
720
- /* fall through to endif_yield_neon */
721
- .subsection 1
722
-.Lyield_\@ :
723
-#else
724
- .section ".discard.cond_yield_neon", "ax"
770
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
771
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1)
772
+
773
+#ifdef CONFIG_ARM64_BTI_KERNEL
774
+#define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT \
775
+ ((GNU_PROPERTY_AARCH64_FEATURE_1_BTI | \
776
+ GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
725777 #endif
778
+
779
+#ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
780
+.macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
781
+ .pushsection .note.gnu.property, "a"
782
+ .align 3
783
+ .long 2f - 1f
784
+ .long 6f - 3f
785
+ .long NT_GNU_PROPERTY_TYPE_0
786
+1: .string "GNU"
787
+2:
788
+ .align 3
789
+3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND
790
+ .long 5f - 4f
791
+4:
792
+ /*
793
+ * This is described with an array of char in the Linux API
794
+ * spec but the text and all other usage (including binutils,
795
+ * clang and GCC) treat this as a 32 bit value so no swizzling
796
+ * is required for big endian.
797
+ */
798
+ .long \feat
799
+5:
800
+ .align 3
801
+6:
802
+ .popsection
803
+.endm
804
+
805
+#else
806
+.macro emit_aarch64_feature_1_and, feat=0
807
+.endm
808
+
809
+#endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
810
+
811
+ .macro __mitigate_spectre_bhb_loop tmp
812
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
813
+alternative_cb spectre_bhb_patch_loop_iter
814
+ mov \tmp, #32 // Patched to correct the immediate
815
+alternative_cb_end
816
+.Lspectre_bhb_loop\@:
817
+ b . + 4
818
+ subs \tmp, \tmp, #1
819
+ b.ne .Lspectre_bhb_loop\@
820
+ sb
821
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
726822 .endm
727823
728
- .macro do_cond_yield_neon
729
- bl kernel_neon_end
730
- bl kernel_neon_begin
824
+ .macro mitigate_spectre_bhb_loop tmp
825
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
826
+alternative_cb spectre_bhb_patch_loop_mitigation_enable
827
+ b .L_spectre_bhb_loop_done\@ // Patched to NOP
828
+alternative_cb_end
829
+ __mitigate_spectre_bhb_loop \tmp
830
+.L_spectre_bhb_loop_done\@:
831
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
731832 .endm
732833
733
- .macro endif_yield_neon, lbl
734
- .ifnb \lbl
735
- b \lbl
736
- .else
737
- b .Lyield_out_\@
738
- .endif
739
- .previous
740
-.Lyield_out_\@ :
834
+ /* Save/restores x0-x3 to the stack */
835
+ .macro __mitigate_spectre_bhb_fw
836
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
837
+ stp x0, x1, [sp, #-16]!
838
+ stp x2, x3, [sp, #-16]!
839
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3
840
+alternative_cb smccc_patch_fw_mitigation_conduit
841
+ nop // Patched to SMC/HVC #0
842
+alternative_cb_end
843
+ ldp x2, x3, [sp], #16
844
+ ldp x0, x1, [sp], #16
845
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
741846 .endm
742847
848
+ .macro mitigate_spectre_bhb_clear_insn
849
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
850
+alternative_cb spectre_bhb_patch_clearbhb
851
+ /* Patched to NOP when not supported */
852
+ clearbhb
853
+ isb
854
+alternative_cb_end
855
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
856
+ .endm
743857 #endif /* __ASM_ASSEMBLER_H */