forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/arm64/kvm/handle_exit.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2012,2013 - ARM Ltd
34 * Author: Marc Zyngier <marc.zyngier@arm.com>
....@@ -5,38 +6,25 @@
56 * Derived from arch/arm/kvm/handle_exit.c:
67 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
78 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
209 */
2110
2211 #include <linux/kvm.h>
2312 #include <linux/kvm_host.h>
2413
25
-#include <kvm/arm_psci.h>
26
-
2714 #include <asm/esr.h>
2815 #include <asm/exception.h>
2916 #include <asm/kvm_asm.h>
30
-#include <asm/kvm_coproc.h>
3117 #include <asm/kvm_emulate.h>
3218 #include <asm/kvm_mmu.h>
3319 #include <asm/debug-monitors.h>
3420 #include <asm/traps.h>
3521
36
-#define CREATE_TRACE_POINTS
37
-#include "trace.h"
22
+#include <kvm/arm_hypercalls.h>
3823
39
-typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
24
+#define CREATE_TRACE_POINTS
25
+#include "trace_handle_exit.h"
26
+
27
+typedef int (*exit_handle_fn)(struct kvm_vcpu *);
4028
4129 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
4230 {
....@@ -44,7 +32,7 @@
4432 kvm_inject_vabt(vcpu);
4533 }
4634
47
-static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
35
+static int handle_hvc(struct kvm_vcpu *vcpu)
4836 {
4937 int ret;
5038
....@@ -61,7 +49,7 @@
6149 return ret;
6250 }
6351
64
-static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
52
+static int handle_smc(struct kvm_vcpu *vcpu)
6553 {
6654 /*
6755 * "If an SMC instruction executed at Non-secure EL1 is
....@@ -72,7 +60,7 @@
7260 * otherwise return to the same address...
7361 */
7462 vcpu_set_reg(vcpu, 0, ~0UL);
75
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
63
+ kvm_incr_pc(vcpu);
7664 return 1;
7765 }
7866
....@@ -80,7 +68,7 @@
8068 * Guest access to FP/ASIMD registers are routed to this handler only
8169 * when the system doesn't support FP/ASIMD.
8270 */
83
-static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
71
+static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
8472 {
8573 kvm_inject_undefined(vcpu);
8674 return 1;
....@@ -98,9 +86,9 @@
9886 * world-switches and schedule other host processes until there is an
9987 * incoming IRQ or FIQ to the VM.
10088 */
101
-static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
89
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
10290 {
103
- if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
91
+ if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
10492 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
10593 vcpu->stat.wfe_exit_stat++;
10694 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
....@@ -111,7 +99,7 @@
11199 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
112100 }
113101
114
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
102
+ kvm_incr_pc(vcpu);
115103
116104 return 1;
117105 }
....@@ -120,34 +108,34 @@
120108 * kvm_handle_guest_debug - handle a debug exception instruction
121109 *
122110 * @vcpu: the vcpu pointer
123
- * @run: access to the kvm_run structure for results
124111 *
125112 * We route all debug exceptions through the same handler. If both the
126113 * guest and host are using the same debug facilities it will be up to
127114 * userspace to re-inject the correct exception for guest delivery.
128115 *
129
- * @return: 0 (while setting run->exit_reason), -1 for error
116
+ * @return: 0 (while setting vcpu->run->exit_reason), -1 for error
130117 */
131
-static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
118
+static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
132119 {
133
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
120
+ struct kvm_run *run = vcpu->run;
121
+ u32 esr = kvm_vcpu_get_esr(vcpu);
134122 int ret = 0;
135123
136124 run->exit_reason = KVM_EXIT_DEBUG;
137
- run->debug.arch.hsr = hsr;
125
+ run->debug.arch.hsr = esr;
138126
139
- switch (ESR_ELx_EC(hsr)) {
127
+ switch (ESR_ELx_EC(esr)) {
140128 case ESR_ELx_EC_WATCHPT_LOW:
141129 run->debug.arch.far = vcpu->arch.fault.far_el2;
142
- /* fall through */
130
+ fallthrough;
143131 case ESR_ELx_EC_SOFTSTP_LOW:
144132 case ESR_ELx_EC_BREAKPT_LOW:
145133 case ESR_ELx_EC_BKPT32:
146134 case ESR_ELx_EC_BRK64:
147135 break;
148136 default:
149
- kvm_err("%s: un-handled case hsr: %#08x\n",
150
- __func__, (unsigned int) hsr);
137
+ kvm_err("%s: un-handled case esr: %#08x\n",
138
+ __func__, (unsigned int) esr);
151139 ret = -1;
152140 break;
153141 }
....@@ -155,20 +143,31 @@
155143 return ret;
156144 }
157145
158
-static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
146
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
159147 {
160
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
148
+ u32 esr = kvm_vcpu_get_esr(vcpu);
161149
162
- kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
163
- hsr, esr_get_class_string(hsr));
150
+ kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
151
+ esr, esr_get_class_string(esr));
164152
165153 kvm_inject_undefined(vcpu);
166154 return 1;
167155 }
168156
169
-static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
157
+static int handle_sve(struct kvm_vcpu *vcpu)
170158 {
171159 /* Until SVE is supported for guests: */
160
+ kvm_inject_undefined(vcpu);
161
+ return 1;
162
+}
163
+
164
+/*
165
+ * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
166
+ * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
167
+ * that we can do is give the guest an UNDEF.
168
+ */
169
+static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
170
+{
172171 kvm_inject_undefined(vcpu);
173172 return 1;
174173 }
....@@ -195,14 +194,15 @@
195194 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
196195 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
197196 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
197
+ [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
198198 };
199199
200200 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
201201 {
202
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
203
- u8 hsr_ec = ESR_ELx_EC(hsr);
202
+ u32 esr = kvm_vcpu_get_esr(vcpu);
203
+ u8 esr_ec = ESR_ELx_EC(esr);
204204
205
- return arm_exit_handlers[hsr_ec];
205
+ return arm_exit_handlers[esr_ec];
206206 }
207207
208208 /*
....@@ -211,7 +211,7 @@
211211 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
212212 * emulation first.
213213 */
214
-static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
214
+static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
215215 {
216216 int handled;
217217
....@@ -220,21 +220,14 @@
220220 * that fail their condition code check"
221221 */
222222 if (!kvm_condition_valid(vcpu)) {
223
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
223
+ kvm_incr_pc(vcpu);
224224 handled = 1;
225225 } else {
226226 exit_handle_fn exit_handler;
227227
228228 exit_handler = kvm_get_exit_handler(vcpu);
229
- handled = exit_handler(vcpu, run);
229
+ handled = exit_handler(vcpu);
230230 }
231
-
232
- /*
233
- * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
234
- * structure if we need to return to userspace.
235
- */
236
- if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
237
- handled = 0;
238231
239232 return handled;
240233 }
....@@ -243,23 +236,15 @@
243236 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
244237 * proper exit to userspace.
245238 */
246
-int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
247
- int exception_index)
239
+int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
248240 {
241
+ struct kvm_run *run = vcpu->run;
242
+
249243 if (ARM_SERROR_PENDING(exception_index)) {
250
- u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
251
-
252244 /*
253
- * HVC/SMC already have an adjusted PC, which we need
254
- * to correct in order to return to after having
255
- * injected the SError.
245
+ * The SError is handled by handle_exit_early(). If the guest
246
+ * survives it will re-execute the original instruction.
256247 */
257
- if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
258
- hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
259
- u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
260
- *vcpu_pc(vcpu) -= adj;
261
- }
262
-
263248 return 1;
264249 }
265250
....@@ -269,14 +254,9 @@
269254 case ARM_EXCEPTION_IRQ:
270255 return 1;
271256 case ARM_EXCEPTION_EL1_SERROR:
272
- /* We may still need to return for single-step */
273
- if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
274
- && kvm_arm_handle_step_debug(vcpu, run))
275
- return 0;
276
- else
277
- return 1;
257
+ return 1;
278258 case ARM_EXCEPTION_TRAP:
279
- return handle_trap_exceptions(vcpu, run);
259
+ return handle_trap_exceptions(vcpu);
280260 case ARM_EXCEPTION_HYP_GONE:
281261 /*
282262 * EL2 has been reset to the hyp-stub. This happens when a guest
....@@ -284,6 +264,13 @@
284264 */
285265 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
286266 return 0;
267
+ case ARM_EXCEPTION_IL:
268
+ /*
269
+ * We attempted an illegal exception return. Guest state must
270
+ * have been corrupted somehow. Give up.
271
+ */
272
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
273
+ return -EINVAL;
287274 default:
288275 kvm_pr_unimpl("Unsupported exception type: %d",
289276 exception_index);
....@@ -293,8 +280,7 @@
293280 }
294281
295282 /* For exit types that need handling before we can be preempted */
296
-void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
297
- int exception_index)
283
+void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
298284 {
299285 if (ARM_SERROR_PENDING(exception_index)) {
300286 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
....@@ -311,5 +297,5 @@
311297 exception_index = ARM_EXCEPTION_CODE(exception_index);
312298
313299 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
314
- kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
300
+ kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
315301 }