| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 3 | | - * it under the terms of the GNU General Public License, version 2, as |
|---|
| 4 | | - * published by the Free Software Foundation. |
|---|
| 5 | 3 | * |
|---|
| 6 | 4 | * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
|---|
| 7 | 5 | */ |
|---|
| .. | .. |
|---|
| 11 | 9 | #include <linux/kvm.h> |
|---|
| 12 | 10 | #include <linux/kvm_host.h> |
|---|
| 13 | 11 | #include <linux/kernel.h> |
|---|
| 12 | +#include <asm/lppaca.h> |
|---|
| 14 | 13 | #include <asm/opal.h> |
|---|
| 15 | 14 | #include <asm/mce.h> |
|---|
| 16 | 15 | #include <asm/machdep.h> |
|---|
| .. | .. |
|---|
| 66 | 65 | /* |
|---|
| 67 | 66 | * On POWER7, see if we can handle a machine check that occurred inside |
|---|
| 68 | 67 | * the guest in real mode, without switching to the host partition. |
|---|
| 69 | | - * |
|---|
| 70 | | - * Returns: 0 => exit guest, 1 => deliver machine check to guest |
|---|
| 71 | 68 | */ |
|---|
| 72 | | -static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) |
|---|
| 69 | +static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) |
|---|
| 73 | 70 | { |
|---|
| 74 | 71 | unsigned long srr1 = vcpu->arch.shregs.msr; |
|---|
| 75 | 72 | struct machine_check_event mce_evt; |
|---|
| .. | .. |
|---|
| 111 | 108 | } |
|---|
| 112 | 109 | |
|---|
| 113 | 110 | /* |
|---|
| 114 | | - * See if we have already handled the condition in the linux host. |
|---|
| 115 | | - * We assume that if the condition is recovered then linux host |
|---|
| 116 | | - * will have generated an error log event that we will pick |
|---|
| 117 | | - * up and log later. |
|---|
| 118 | | - * Don't release mce event now. We will queue up the event so that |
|---|
| 119 | | - * we can log the MCE event info on host console. |
|---|
| 111 | + * Now get the event and stash it in the vcpu struct so it can |
|---|
| 112 | + * be handled by the primary thread in virtual mode. We can't |
|---|
| 113 | + * call machine_check_queue_event() here if we are running on |
|---|
| 114 | + * an offline secondary thread. |
|---|
| 120 | 115 | */ |
|---|
| 121 | | - if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) |
|---|
| 122 | | - goto out; |
|---|
| 116 | + if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) { |
|---|
| 117 | + if (handled && mce_evt.version == MCE_V1) |
|---|
| 118 | + mce_evt.disposition = MCE_DISPOSITION_RECOVERED; |
|---|
| 119 | + } else { |
|---|
| 120 | + memset(&mce_evt, 0, sizeof(mce_evt)); |
|---|
| 121 | + } |
|---|
| 123 | 122 | |
|---|
| 124 | | - if (mce_evt.version == MCE_V1 && |
|---|
| 125 | | - (mce_evt.severity == MCE_SEV_NO_ERROR || |
|---|
| 126 | | - mce_evt.disposition == MCE_DISPOSITION_RECOVERED)) |
|---|
| 127 | | - handled = 1; |
|---|
| 128 | | - |
|---|
| 129 | | -out: |
|---|
| 130 | | - /* |
|---|
| 131 | | - * For guest that supports FWNMI capability, hook the MCE event into |
|---|
| 132 | | - * vcpu structure. We are going to exit the guest with KVM_EXIT_NMI |
|---|
| 133 | | - * exit reason. On our way to exit we will pull this event from vcpu |
|---|
| 134 | | - * structure and print it from thread 0 of the core/subcore. |
|---|
| 135 | | - * |
|---|
| 136 | | - * For guest that does not support FWNMI capability (old QEMU): |
|---|
| 137 | | - * We are now going enter guest either through machine check |
|---|
| 138 | | - * interrupt (for unhandled errors) or will continue from |
|---|
| 139 | | - * current HSRR0 (for handled errors) in guest. Hence |
|---|
| 140 | | - * queue up the event so that we can log it from host console later. |
|---|
| 141 | | - */ |
|---|
| 142 | | - if (vcpu->kvm->arch.fwnmi_enabled) { |
|---|
| 143 | | - /* |
|---|
| 144 | | - * Hook up the mce event on to vcpu structure. |
|---|
| 145 | | - * First clear the old event. |
|---|
| 146 | | - */ |
|---|
| 147 | | - memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt)); |
|---|
| 148 | | - if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) { |
|---|
| 149 | | - vcpu->arch.mce_evt = mce_evt; |
|---|
| 150 | | - } |
|---|
| 151 | | - } else |
|---|
| 152 | | - machine_check_queue_event(); |
|---|
| 153 | | - |
|---|
| 154 | | - return handled; |
|---|
| 123 | + vcpu->arch.mce_evt = mce_evt; |
|---|
| 155 | 124 | } |
|---|
| 156 | 125 | |
|---|
| 157 | | -long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) |
|---|
| 126 | +void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) |
|---|
| 158 | 127 | { |
|---|
| 159 | | - return kvmppc_realmode_mc_power7(vcpu); |
|---|
| 128 | + kvmppc_realmode_mc_power7(vcpu); |
|---|
| 160 | 129 | } |
|---|
| 161 | 130 | |
|---|
| 162 | 131 | /* Check if dynamic split is in force and return subcore size accordingly. */ |
|---|
| .. | .. |
|---|
| 177 | 146 | |
|---|
| 178 | 147 | local_paca->sibling_subcore_state->in_guest[subcore_id] = 1; |
|---|
| 179 | 148 | } |
|---|
| 149 | +EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest); |
|---|
| 180 | 150 | |
|---|
| 181 | 151 | void kvmppc_subcore_exit_guest(void) |
|---|
| 182 | 152 | { |
|---|
| .. | .. |
|---|
| 187 | 157 | |
|---|
| 188 | 158 | local_paca->sibling_subcore_state->in_guest[subcore_id] = 0; |
|---|
| 189 | 159 | } |
|---|
| 160 | +EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest); |
|---|
| 190 | 161 | |
|---|
| 191 | 162 | static bool kvmppc_tb_resync_required(void) |
|---|
| 192 | 163 | { |
|---|
| .. | .. |
|---|
| 274 | 245 | { |
|---|
| 275 | 246 | bool resync_req; |
|---|
| 276 | 247 | |
|---|
| 277 | | - __this_cpu_inc(irq_stat.hmi_exceptions); |
|---|
| 248 | + local_paca->hmi_irqs++; |
|---|
| 278 | 249 | |
|---|
| 279 | 250 | if (hmi_handle_debugtrig(NULL) >= 0) |
|---|
| 280 | 251 | return 1; |
|---|
| .. | .. |
|---|
| 331 | 302 | } else { |
|---|
| 332 | 303 | wait_for_tb_resync(); |
|---|
| 333 | 304 | } |
|---|
| 305 | + |
|---|
| 306 | + /* |
|---|
| 307 | + * Reset tb_offset_applied so the guest exit code won't try |
|---|
| 308 | + * to subtract the previous timebase offset from the timebase. |
|---|
| 309 | + */ |
|---|
| 310 | + if (local_paca->kvm_hstate.kvm_vcore) |
|---|
| 311 | + local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0; |
|---|
| 312 | + |
|---|
| 334 | 313 | return 0; |
|---|
| 335 | 314 | } |
|---|