From ea08eeccae9297f7aabd2ef7f0c2517ac4549acc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 20 Feb 2024 01:18:26 +0000
Subject: [PATCH] write in 30M
---
kernel/arch/powerpc/kvm/book3s_hv_ras.c | 75 +++++++++++++------------------------
1 files changed, 27 insertions(+), 48 deletions(-)
diff --git a/kernel/arch/powerpc/kvm/book3s_hv_ras.c b/kernel/arch/powerpc/kvm/book3s_hv_ras.c
index b11043b..7c693b3 100644
--- a/kernel/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/kernel/arch/powerpc/kvm/book3s_hv_ras.c
@@ -1,7 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
@@ -11,6 +9,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>
+#include <asm/lppaca.h>
#include <asm/opal.h>
#include <asm/mce.h>
#include <asm/machdep.h>
@@ -66,10 +65,8 @@
/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
- *
- * Returns: 0 => exit guest, 1 => deliver machine check to guest
*/
-static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
+static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
struct machine_check_event mce_evt;
@@ -111,52 +108,24 @@
}
/*
- * See if we have already handled the condition in the linux host.
- * We assume that if the condition is recovered then linux host
- * will have generated an error log event that we will pick
- * up and log later.
- * Don't release mce event now. We will queue up the event so that
- * we can log the MCE event info on host console.
+ * Now get the event and stash it in the vcpu struct so it can
+ * be handled by the primary thread in virtual mode. We can't
+ * call machine_check_queue_event() here if we are running on
+ * an offline secondary thread.
*/
- if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
- goto out;
+ if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
+ if (handled && mce_evt.version == MCE_V1)
+ mce_evt.disposition = MCE_DISPOSITION_RECOVERED;
+ } else {
+ memset(&mce_evt, 0, sizeof(mce_evt));
+ }
- if (mce_evt.version == MCE_V1 &&
- (mce_evt.severity == MCE_SEV_NO_ERROR ||
- mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
- handled = 1;
-
-out:
- /*
- * For guest that supports FWNMI capability, hook the MCE event into
- * vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
- * exit reason. On our way to exit we will pull this event from vcpu
- * structure and print it from thread 0 of the core/subcore.
- *
- * For guest that does not support FWNMI capability (old QEMU):
- * We are now going enter guest either through machine check
- * interrupt (for unhandled errors) or will continue from
- * current HSRR0 (for handled errors) in guest. Hence
- * queue up the event so that we can log it from host console later.
- */
- if (vcpu->kvm->arch.fwnmi_enabled) {
- /*
- * Hook up the mce event on to vcpu structure.
- * First clear the old event.
- */
- memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt));
- if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
- vcpu->arch.mce_evt = mce_evt;
- }
- } else
- machine_check_queue_event();
-
- return handled;
+ vcpu->arch.mce_evt = mce_evt;
}
-long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
+void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
- return kvmppc_realmode_mc_power7(vcpu);
+ kvmppc_realmode_mc_power7(vcpu);
}
/* Check if dynamic split is in force and return subcore size accordingly. */
@@ -177,6 +146,7 @@
local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
}
+EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest);
void kvmppc_subcore_exit_guest(void)
{
@@ -187,6 +157,7 @@
local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
}
+EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest);
static bool kvmppc_tb_resync_required(void)
{
@@ -274,7 +245,7 @@
{
bool resync_req;
- __this_cpu_inc(irq_stat.hmi_exceptions);
+ local_paca->hmi_irqs++;
if (hmi_handle_debugtrig(NULL) >= 0)
return 1;
@@ -331,5 +302,13 @@
} else {
wait_for_tb_resync();
}
+
+ /*
+ * Reset tb_offset_applied so the guest exit code won't try
+ * to subtract the previous timebase offset from the timebase.
+ */
+ if (local_paca->kvm_hstate.kvm_vcore)
+ local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0;
+
return 0;
}
--
Gitblit v1.6.2