forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/arm64/kvm/va_layout.c
....@@ -1,18 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2017 ARM Ltd.
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
4
- *
5
- * This program is free software; you can redistribute it and/or modify
6
- * it under the terms of the GNU General Public License version 2 as
7
- * published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
165 */
176
187 #include <linux/kvm_host.h>
....@@ -22,53 +11,101 @@
2211 #include <asm/debug-monitors.h>
2312 #include <asm/insn.h>
2413 #include <asm/kvm_mmu.h>
14
+#include <asm/memory.h>
2515
2616 /*
27
- * The LSB of the random hyp VA tag or 0 if no randomization is used.
17
+ * The LSB of the HYP VA tag
2818 */
2919 static u8 tag_lsb;
3020 /*
31
- * The random hyp VA tag value with the region bit if hyp randomization is used
21
+ * The HYP VA tag value with the region bit
3222 */
3323 static u64 tag_val;
3424 static u64 va_mask;
3525
36
-static void compute_layout(void)
26
+/*
27
+ * Compute HYP VA by using the same computation as kern_hyp_va().
28
+ */
29
+static u64 __early_kern_hyp_va(u64 addr)
30
+{
31
+ addr &= va_mask;
32
+ addr |= tag_val << tag_lsb;
33
+ return addr;
34
+}
35
+
36
+/*
37
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
38
+ */
39
+static void init_hyp_physvirt_offset(void)
40
+{
41
+ u64 kern_va, hyp_va;
42
+
43
+ /* Compute the offset from the hyp VA and PA of a random symbol. */
44
+ kern_va = (u64)lm_alias(__hyp_text_start);
45
+ hyp_va = __early_kern_hyp_va(kern_va);
46
+ hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
47
+}
48
+
49
+/*
50
+ * We want to generate a hyp VA with the following format (with V ==
51
+ * vabits_actual):
52
+ *
53
+ * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
54
+ * ---------------------------------------------------------
55
+ * | 0000000 | hyp_va_msb | random tag | kern linear VA |
56
+ * |--------- tag_val -----------|----- va_mask ---|
57
+ *
58
+ * which does not conflict with the idmap regions.
59
+ */
60
+__init void kvm_compute_layout(void)
3761 {
3862 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
3963 u64 hyp_va_msb;
40
- int kva_msb;
4164
4265 /* Where is my RAM region? */
43
- hyp_va_msb = idmap_addr & BIT(VA_BITS - 1);
44
- hyp_va_msb ^= BIT(VA_BITS - 1);
66
+ hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
67
+ hyp_va_msb ^= BIT(vabits_actual - 1);
4568
46
- kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
69
+ tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
4770 (u64)(high_memory - 1));
4871
49
- if (kva_msb == (VA_BITS - 1)) {
72
+ va_mask = GENMASK_ULL(tag_lsb - 1, 0);
73
+ tag_val = hyp_va_msb;
74
+
75
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
76
+ /* We have some free bits to insert a random tag. */
77
+ tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
78
+ }
79
+ tag_val >>= tag_lsb;
80
+
81
+ init_hyp_physvirt_offset();
82
+}
83
+
84
+/*
85
+ * The .hyp.reloc ELF section contains a list of kimg positions that
86
+ * contains kimg VAs but will be accessed only in hyp execution context.
87
+ * Convert them to hyp VAs. See gen-hyprel.c for more details.
88
+ */
89
+__init void kvm_apply_hyp_relocations(void)
90
+{
91
+ int32_t *rel;
92
+ int32_t *begin = (int32_t *)__hyp_reloc_begin;
93
+ int32_t *end = (int32_t *)__hyp_reloc_end;
94
+
95
+ for (rel = begin; rel < end; ++rel) {
96
+ uintptr_t *ptr, kimg_va;
97
+
5098 /*
51
- * No space in the address, let's compute the mask so
52
- * that it covers (VA_BITS - 1) bits, and the region
53
- * bit. The tag stays set to zero.
99
+ * Each entry contains a 32-bit relative offset from itself
100
+ * to a kimg VA position.
54101 */
55
- va_mask = BIT(VA_BITS - 1) - 1;
56
- va_mask |= hyp_va_msb;
57
- } else {
58
- /*
59
- * We do have some free bits to insert a random tag.
60
- * Hyp VAs are now created from kernel linear map VAs
61
- * using the following formula (with V == VA_BITS):
62
- *
63
- * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
64
- * ---------------------------------------------------------
65
- * | 0000000 | hyp_va_msb | random tag | kern linear VA |
66
- */
67
- tag_lsb = kva_msb;
68
- va_mask = GENMASK_ULL(tag_lsb - 1, 0);
69
- tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb);
70
- tag_val |= hyp_va_msb;
71
- tag_val >>= tag_lsb;
102
+ ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
103
+
104
+ /* Read the kimg VA value at the relocation address. */
105
+ kimg_va = *ptr;
106
+
107
+ /* Convert to hyp VA and store back to the relocation address. */
108
+ *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
72109 }
73110 }
74111
....@@ -121,9 +158,6 @@
121158
122159 BUG_ON(nr_inst != 5);
123160
124
- if (!has_vhe() && !va_mask)
125
- compute_layout();
126
-
127161 for (i = 0; i < nr_inst; i++) {
128162 u32 rd, rn, insn, oinsn;
129163
....@@ -131,11 +165,11 @@
131165 * VHE doesn't need any address translation, let's NOP
132166 * everything.
133167 *
134
- * Alternatively, if we don't have any spare bits in
135
- * the address, NOP everything after masking that
136
- * kernel VA.
168
+ * Alternatively, if the tag is zero (because the layout
169
+ * dictates it and we don't have any spare bits in the
170
+ * address), NOP everything after masking the kernel VA.
137171 */
138
- if (has_vhe() || (!tag_lsb && i > 0)) {
172
+ if (has_vhe() || (!tag_val && i > 0)) {
139173 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
140174 continue;
141175 }
....@@ -151,50 +185,30 @@
151185 }
152186 }
153187
154
-void *__kvm_bp_vect_base;
155
-int __kvm_harden_el2_vector_slot;
156
-
157188 void kvm_patch_vector_branch(struct alt_instr *alt,
158189 __le32 *origptr, __le32 *updptr, int nr_inst)
159190 {
160191 u64 addr;
161192 u32 insn;
162193
163
- BUG_ON(nr_inst != 5);
194
+ BUG_ON(nr_inst != 4);
164195
165
- if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
166
- WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
196
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
167197 return;
168
- }
169
-
170
- if (!va_mask)
171
- compute_layout();
172198
173199 /*
174200 * Compute HYP VA by using the same computation as kern_hyp_va()
175201 */
176
- addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
177
- addr &= va_mask;
178
- addr |= tag_val << tag_lsb;
202
+ addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
179203
180204 /* Use PC[10:7] to branch to the same vector in KVM */
181205 addr |= ((u64)origptr & GENMASK_ULL(10, 7));
182206
183207 /*
184
- * Branch to the second instruction in the vectors in order to
185
- * avoid the initial store on the stack (which we already
186
- * perform in the hardening vectors).
208
+ * Branch over the preamble in order to avoid the initial store on
209
+ * the stack (which we already perform in the hardening vectors).
187210 */
188
- addr += AARCH64_INSN_SIZE;
189
-
190
- /* stp x0, x1, [sp, #-16]! */
191
- insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
192
- AARCH64_INSN_REG_1,
193
- AARCH64_INSN_REG_SP,
194
- -16,
195
- AARCH64_INSN_VARIANT_64BIT,
196
- AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
197
- *updptr++ = cpu_to_le32(insn);
211
+ addr += KVM_VECTOR_PREAMBLE;
198212
199213 /* movz x0, #(addr & 0xffff) */
200214 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
....@@ -225,3 +239,59 @@
225239 AARCH64_INSN_BRANCH_NOLINK);
226240 *updptr++ = cpu_to_le32(insn);
227241 }
242
+
243
+static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
244
+{
245
+ u32 insn, oinsn, rd;
246
+
247
+ BUG_ON(nr_inst != 4);
248
+
249
+ /* Compute target register */
250
+ oinsn = le32_to_cpu(*origptr);
251
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
252
+
253
+ /* movz rd, #(val & 0xffff) */
254
+ insn = aarch64_insn_gen_movewide(rd,
255
+ (u16)val,
256
+ 0,
257
+ AARCH64_INSN_VARIANT_64BIT,
258
+ AARCH64_INSN_MOVEWIDE_ZERO);
259
+ *updptr++ = cpu_to_le32(insn);
260
+
261
+ /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
262
+ insn = aarch64_insn_gen_movewide(rd,
263
+ (u16)(val >> 16),
264
+ 16,
265
+ AARCH64_INSN_VARIANT_64BIT,
266
+ AARCH64_INSN_MOVEWIDE_KEEP);
267
+ *updptr++ = cpu_to_le32(insn);
268
+
269
+ /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
270
+ insn = aarch64_insn_gen_movewide(rd,
271
+ (u16)(val >> 32),
272
+ 32,
273
+ AARCH64_INSN_VARIANT_64BIT,
274
+ AARCH64_INSN_MOVEWIDE_KEEP);
275
+ *updptr++ = cpu_to_le32(insn);
276
+
277
+ /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
278
+ insn = aarch64_insn_gen_movewide(rd,
279
+ (u16)(val >> 48),
280
+ 48,
281
+ AARCH64_INSN_VARIANT_64BIT,
282
+ AARCH64_INSN_MOVEWIDE_KEEP);
283
+ *updptr++ = cpu_to_le32(insn);
284
+}
285
+
286
+void kvm_get_kimage_voffset(struct alt_instr *alt,
287
+ __le32 *origptr, __le32 *updptr, int nr_inst)
288
+{
289
+ generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
290
+}
291
+
292
+void kvm_compute_final_ctr_el0(struct alt_instr *alt,
293
+ __le32 *origptr, __le32 *updptr, int nr_inst)
294
+{
295
+ generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
296
+ origptr, updptr, nr_inst);
297
+}