| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Intel SMP support routines. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 6 | 7 | * (c) 2002,2003 Andi Kleen, SuSE Labs. |
|---|
| 7 | 8 | * |
|---|
| 8 | 9 | * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> |
|---|
| 9 | | - * |
|---|
| 10 | | - * This code is released under the GNU General Public License version 2 or |
|---|
| 11 | | - * later. |
|---|
| 12 | 10 | */ |
|---|
| 13 | 11 | |
|---|
| 14 | 12 | #include <linux/init.h> |
|---|
| .. | .. |
|---|
| 29 | 27 | #include <asm/mmu_context.h> |
|---|
| 30 | 28 | #include <asm/proto.h> |
|---|
| 31 | 29 | #include <asm/apic.h> |
|---|
| 30 | +#include <asm/idtentry.h> |
|---|
| 32 | 31 | #include <asm/nmi.h> |
|---|
| 33 | 32 | #include <asm/mce.h> |
|---|
| 34 | 33 | #include <asm/trace/irq_vectors.h> |
|---|
| 35 | 34 | #include <asm/kexec.h> |
|---|
| 36 | | -#include <asm/virtext.h> |
|---|
| 35 | +#include <asm/reboot.h> |
|---|
| 37 | 36 | |
|---|
| 38 | 37 | /* |
|---|
| 39 | 38 | * Some notes on x86 processor bugs affecting SMP operation: |
|---|
| .. | .. |
|---|
| 117 | 116 | static atomic_t stopping_cpu = ATOMIC_INIT(-1); |
|---|
| 118 | 117 | static bool smp_no_nmi_ipi = false; |
|---|
| 119 | 118 | |
|---|
| 120 | | -/* |
|---|
| 121 | | - * this function sends a 'reschedule' IPI to another CPU. |
|---|
| 122 | | - * it goes straight through and wastes no time serializing |
|---|
| 123 | | - * anything. Worst case is that we lose a reschedule ... |
|---|
| 124 | | - */ |
|---|
| 125 | | -static void native_smp_send_reschedule(int cpu) |
|---|
| 126 | | -{ |
|---|
| 127 | | - if (unlikely(cpu_is_offline(cpu))) { |
|---|
| 128 | | - WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); |
|---|
| 129 | | - return; |
|---|
| 130 | | - } |
|---|
| 131 | | - apic->send_IPI(cpu, RESCHEDULE_VECTOR); |
|---|
| 132 | | -} |
|---|
| 133 | | - |
|---|
| 134 | | -void native_send_call_func_single_ipi(int cpu) |
|---|
| 135 | | -{ |
|---|
| 136 | | - apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR); |
|---|
| 137 | | -} |
|---|
| 138 | | - |
|---|
| 139 | | -void native_send_call_func_ipi(const struct cpumask *mask) |
|---|
| 140 | | -{ |
|---|
| 141 | | - cpumask_var_t allbutself; |
|---|
| 142 | | - |
|---|
| 143 | | - if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { |
|---|
| 144 | | - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
|---|
| 145 | | - return; |
|---|
| 146 | | - } |
|---|
| 147 | | - |
|---|
| 148 | | - cpumask_copy(allbutself, cpu_online_mask); |
|---|
| 149 | | - cpumask_clear_cpu(smp_processor_id(), allbutself); |
|---|
| 150 | | - |
|---|
| 151 | | - if (cpumask_equal(mask, allbutself) && |
|---|
| 152 | | - cpumask_equal(cpu_online_mask, cpu_callout_mask)) |
|---|
| 153 | | - apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
|---|
| 154 | | - else |
|---|
| 155 | | - apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
|---|
| 156 | | - |
|---|
| 157 | | - free_cpumask_var(allbutself); |
|---|
| 158 | | -} |
|---|
| 159 | | - |
|---|
| 160 | 119 | static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) |
|---|
| 161 | 120 | { |
|---|
| 162 | 121 | /* We are registered on stopping cpu too, avoid spurious NMI */ |
|---|
| 163 | 122 | if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) |
|---|
| 164 | 123 | return NMI_HANDLED; |
|---|
| 165 | 124 | |
|---|
| 166 | | - cpu_emergency_vmxoff(); |
|---|
| 125 | + cpu_emergency_disable_virtualization(); |
|---|
| 167 | 126 | stop_this_cpu(NULL); |
|---|
| 168 | 127 | |
|---|
| 169 | 128 | return NMI_HANDLED; |
|---|
| .. | .. |
|---|
| 172 | 131 | /* |
|---|
| 173 | 132 | * this function calls the 'stop' function on all other CPUs in the system. |
|---|
| 174 | 133 | */ |
|---|
| 175 | | - |
|---|
| 176 | | -asmlinkage __visible void smp_reboot_interrupt(void) |
|---|
| 134 | +DEFINE_IDTENTRY_SYSVEC(sysvec_reboot) |
|---|
| 177 | 135 | { |
|---|
| 178 | | - ipi_entering_ack_irq(); |
|---|
| 179 | | - cpu_emergency_vmxoff(); |
|---|
| 136 | + ack_APIC_irq(); |
|---|
| 137 | + cpu_emergency_disable_virtualization(); |
|---|
| 180 | 138 | stop_this_cpu(NULL); |
|---|
| 181 | | - irq_exit(); |
|---|
| 182 | 139 | } |
|---|
| 183 | 140 | |
|---|
| 184 | 141 | static int register_stop_handler(void) |
|---|
| .. | .. |
|---|
| 217 | 174 | /* sync above data before sending IRQ */ |
|---|
| 218 | 175 | wmb(); |
|---|
| 219 | 176 | |
|---|
| 220 | | - apic->send_IPI_allbutself(REBOOT_VECTOR); |
|---|
| 177 | + apic_send_IPI_allbutself(REBOOT_VECTOR); |
|---|
| 221 | 178 | |
|---|
| 222 | 179 | /* |
|---|
| 223 | 180 | * Don't wait longer than a second for IPI completion. The |
|---|
| .. | .. |
|---|
| 243 | 200 | |
|---|
| 244 | 201 | pr_emerg("Shutting down cpus with NMI\n"); |
|---|
| 245 | 202 | |
|---|
| 246 | | - apic->send_IPI_allbutself(NMI_VECTOR); |
|---|
| 203 | + apic_send_IPI_allbutself(NMI_VECTOR); |
|---|
| 247 | 204 | } |
|---|
| 248 | 205 | /* |
|---|
| 249 | 206 | * Don't wait longer than 10 ms if the caller didn't |
|---|
| .. | .. |
|---|
| 263 | 220 | |
|---|
| 264 | 221 | /* |
|---|
| 265 | 222 | * Reschedule call back. KVM uses this interrupt to force a cpu out of |
|---|
| 266 | | - * guest mode |
|---|
| 223 | + * guest mode. |
|---|
| 267 | 224 | */ |
|---|
| 268 | | -__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs) |
|---|
| 225 | +DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi) |
|---|
| 269 | 226 | { |
|---|
| 270 | 227 | ack_APIC_irq(); |
|---|
| 228 | + trace_reschedule_entry(RESCHEDULE_VECTOR); |
|---|
| 271 | 229 | inc_irq_stat(irq_resched_count); |
|---|
| 272 | | - kvm_set_cpu_l1tf_flush_l1d(); |
|---|
| 273 | | - |
|---|
| 274 | | - if (trace_resched_ipi_enabled()) { |
|---|
| 275 | | - /* |
|---|
| 276 | | - * scheduler_ipi() might call irq_enter() as well, but |
|---|
| 277 | | - * nested calls are fine. |
|---|
| 278 | | - */ |
|---|
| 279 | | - irq_enter(); |
|---|
| 280 | | - trace_reschedule_entry(RESCHEDULE_VECTOR); |
|---|
| 281 | | - scheduler_ipi(); |
|---|
| 282 | | - trace_reschedule_exit(RESCHEDULE_VECTOR); |
|---|
| 283 | | - irq_exit(); |
|---|
| 284 | | - return; |
|---|
| 285 | | - } |
|---|
| 286 | 230 | scheduler_ipi(); |
|---|
| 231 | + trace_reschedule_exit(RESCHEDULE_VECTOR); |
|---|
| 287 | 232 | } |
|---|
| 288 | 233 | |
|---|
| 289 | | -__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs) |
|---|
| 234 | +DEFINE_IDTENTRY_SYSVEC(sysvec_call_function) |
|---|
| 290 | 235 | { |
|---|
| 291 | | - ipi_entering_ack_irq(); |
|---|
| 236 | + ack_APIC_irq(); |
|---|
| 292 | 237 | trace_call_function_entry(CALL_FUNCTION_VECTOR); |
|---|
| 293 | 238 | inc_irq_stat(irq_call_count); |
|---|
| 294 | 239 | generic_smp_call_function_interrupt(); |
|---|
| 295 | 240 | trace_call_function_exit(CALL_FUNCTION_VECTOR); |
|---|
| 296 | | - exiting_irq(); |
|---|
| 297 | 241 | } |
|---|
| 298 | 242 | |
|---|
| 299 | | -__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r) |
|---|
| 243 | +DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single) |
|---|
| 300 | 244 | { |
|---|
| 301 | | - ipi_entering_ack_irq(); |
|---|
| 245 | + ack_APIC_irq(); |
|---|
| 302 | 246 | trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); |
|---|
| 303 | 247 | inc_irq_stat(irq_call_count); |
|---|
| 304 | 248 | generic_smp_call_function_single_interrupt(); |
|---|
| 305 | 249 | trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); |
|---|
| 306 | | - exiting_irq(); |
|---|
| 307 | 250 | } |
|---|
| 308 | 251 | |
|---|
| 309 | 252 | static int __init nonmi_ipi_setup(char *str) |
|---|