hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/powerpc/include/asm/book3s/64/kup-radix.h
....@@ -2,21 +2,204 @@
22 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
33 #define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
44
5
+#include <linux/const.h>
6
+#include <asm/reg.h>
7
+
8
+#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
9
+#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
10
+#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
11
+#define AMR_KUAP_SHIFT 62
12
+
13
+#ifdef __ASSEMBLY__
14
+
15
+.macro kuap_restore_amr gpr1, gpr2
16
+#ifdef CONFIG_PPC_KUAP
17
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
18
+ mfspr \gpr1, SPRN_AMR
19
+ ld \gpr2, STACK_REGS_KUAP(r1)
20
+ cmpd \gpr1, \gpr2
21
+ beq 998f
22
+ isync
23
+ mtspr SPRN_AMR, \gpr2
24
+ /* No isync required, see kuap_restore_amr() */
25
+998:
26
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
27
+#endif
28
+.endm
29
+
30
+#ifdef CONFIG_PPC_KUAP
31
+.macro kuap_check_amr gpr1, gpr2
32
+#ifdef CONFIG_PPC_KUAP_DEBUG
33
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
34
+ mfspr \gpr1, SPRN_AMR
35
+ li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
36
+ sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
37
+999: tdne \gpr1, \gpr2
38
+ EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
39
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
40
+#endif
41
+.endm
42
+#endif
43
+
44
+.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
45
+#ifdef CONFIG_PPC_KUAP
46
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
47
+ .ifnb \msr_pr_cr
48
+ bne \msr_pr_cr, 99f
49
+ .endif
50
+ mfspr \gpr1, SPRN_AMR
51
+ std \gpr1, STACK_REGS_KUAP(r1)
52
+ li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
53
+ sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
54
+ cmpd \use_cr, \gpr1, \gpr2
55
+ beq \use_cr, 99f
56
+ // We don't isync here because we very recently entered via rfid
57
+ mtspr SPRN_AMR, \gpr2
58
+ isync
59
+99:
60
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
61
+#endif
62
+.endm
63
+
64
+#else /* !__ASSEMBLY__ */
65
+
66
+#include <linux/jump_label.h>
67
+
568 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
669
7
-/* Prototype for function defined in exceptions-64s.S */
8
-void do_uaccess_flush(void);
70
+#ifdef CONFIG_PPC_KUAP
71
+
72
+#include <asm/mmu.h>
73
+#include <asm/ptrace.h>
74
+
75
+static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
76
+{
77
+ if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
78
+ isync();
79
+ mtspr(SPRN_AMR, regs->kuap);
80
+ /*
81
+ * No isync required here because we are about to RFI back to
82
+ * previous context before any user accesses would be made,
83
+ * which is a CSI.
84
+ */
85
+ }
86
+}
87
+
88
+static inline unsigned long kuap_get_and_check_amr(void)
89
+{
90
+ if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
91
+ unsigned long amr = mfspr(SPRN_AMR);
92
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
93
+ WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
94
+ return amr;
95
+ }
96
+ return 0;
97
+}
98
+
99
+static inline void kuap_check_amr(void)
100
+{
101
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
102
+ WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
103
+}
104
+
105
+/*
106
+ * We support individually allowing read or write, but we don't support nesting
107
+ * because that would require an expensive read/modify write of the AMR.
108
+ */
109
+
110
+static inline unsigned long get_kuap(void)
111
+{
112
+ /*
113
+ * We return AMR_KUAP_BLOCKED when we don't support KUAP because
114
+ * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
115
+ * cause restore_user_access to do a flush.
116
+ *
117
+ * This has no effect in terms of actually blocking things on hash,
118
+ * so it doesn't break anything.
119
+ */
120
+ if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
121
+ return AMR_KUAP_BLOCKED;
122
+
123
+ return mfspr(SPRN_AMR);
124
+}
125
+
126
+static inline void set_kuap(unsigned long value)
127
+{
128
+ if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
129
+ return;
130
+
131
+ /*
132
+ * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
133
+ * before and after the move to AMR. See table 6 on page 1134.
134
+ */
135
+ isync();
136
+ mtspr(SPRN_AMR, value);
137
+ isync();
138
+}
139
+
140
+static inline bool
141
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
142
+{
143
+ return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
144
+ (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
145
+ "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
146
+}
147
+#else /* CONFIG_PPC_KUAP */
148
+static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
149
+
150
+static inline unsigned long kuap_get_and_check_amr(void)
151
+{
152
+ return 0UL;
153
+}
154
+
155
+static inline unsigned long get_kuap(void)
156
+{
157
+ return AMR_KUAP_BLOCKED;
158
+}
159
+
160
+static inline void set_kuap(unsigned long value) { }
161
+#endif /* !CONFIG_PPC_KUAP */
9162
10163 static __always_inline void allow_user_access(void __user *to, const void __user *from,
11
- unsigned long size)
164
+ unsigned long size, unsigned long dir)
12165 {
166
+ // This is written so we can resolve to a single case at build time
167
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
168
+ if (dir == KUAP_READ)
169
+ set_kuap(AMR_KUAP_BLOCK_WRITE);
170
+ else if (dir == KUAP_WRITE)
171
+ set_kuap(AMR_KUAP_BLOCK_READ);
172
+ else if (dir == KUAP_READ_WRITE)
173
+ set_kuap(0);
174
+ else
175
+ BUILD_BUG();
13176 }
14177
15178 static inline void prevent_user_access(void __user *to, const void __user *from,
16
- unsigned long size)
179
+ unsigned long size, unsigned long dir)
17180 {
181
+ set_kuap(AMR_KUAP_BLOCKED);
18182 if (static_branch_unlikely(&uaccess_flush_key))
19183 do_uaccess_flush();
20184 }
21185
186
+static inline unsigned long prevent_user_access_return(void)
187
+{
188
+ unsigned long flags = get_kuap();
189
+
190
+ set_kuap(AMR_KUAP_BLOCKED);
191
+ if (static_branch_unlikely(&uaccess_flush_key))
192
+ do_uaccess_flush();
193
+
194
+ return flags;
195
+}
196
+
197
+static inline void restore_user_access(unsigned long flags)
198
+{
199
+ set_kuap(flags);
200
+ if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
201
+ do_uaccess_flush();
202
+}
203
+#endif /* __ASSEMBLY__ */
204
+
22205 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */