hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/x86/kernel/smp.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Intel SMP support routines.
34 *
....@@ -6,9 +7,6 @@
67 * (c) 2002,2003 Andi Kleen, SuSE Labs.
78 *
89 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
9
- *
10
- * This code is released under the GNU General Public License version 2 or
11
- * later.
1210 */
1311
1412 #include <linux/init.h>
....@@ -29,11 +27,12 @@
2927 #include <asm/mmu_context.h>
3028 #include <asm/proto.h>
3129 #include <asm/apic.h>
30
+#include <asm/idtentry.h>
3231 #include <asm/nmi.h>
3332 #include <asm/mce.h>
3433 #include <asm/trace/irq_vectors.h>
3534 #include <asm/kexec.h>
36
-#include <asm/virtext.h>
35
+#include <asm/reboot.h>
3736
3837 /*
3938 * Some notes on x86 processor bugs affecting SMP operation:
....@@ -117,53 +116,13 @@
117116 static atomic_t stopping_cpu = ATOMIC_INIT(-1);
118117 static bool smp_no_nmi_ipi = false;
119118
120
-/*
121
- * this function sends a 'reschedule' IPI to another CPU.
122
- * it goes straight through and wastes no time serializing
123
- * anything. Worst case is that we lose a reschedule ...
124
- */
125
-static void native_smp_send_reschedule(int cpu)
126
-{
127
- if (unlikely(cpu_is_offline(cpu))) {
128
- WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
129
- return;
130
- }
131
- apic->send_IPI(cpu, RESCHEDULE_VECTOR);
132
-}
133
-
134
-void native_send_call_func_single_ipi(int cpu)
135
-{
136
- apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
137
-}
138
-
139
-void native_send_call_func_ipi(const struct cpumask *mask)
140
-{
141
- cpumask_var_t allbutself;
142
-
143
- if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
144
- apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
145
- return;
146
- }
147
-
148
- cpumask_copy(allbutself, cpu_online_mask);
149
- cpumask_clear_cpu(smp_processor_id(), allbutself);
150
-
151
- if (cpumask_equal(mask, allbutself) &&
152
- cpumask_equal(cpu_online_mask, cpu_callout_mask))
153
- apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
154
- else
155
- apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
156
-
157
- free_cpumask_var(allbutself);
158
-}
159
-
160119 static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
161120 {
162121 /* We are registered on stopping cpu too, avoid spurious NMI */
163122 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
164123 return NMI_HANDLED;
165124
166
- cpu_emergency_vmxoff();
125
+ cpu_emergency_disable_virtualization();
167126 stop_this_cpu(NULL);
168127
169128 return NMI_HANDLED;
....@@ -172,13 +131,11 @@
172131 /*
173132 * this function calls the 'stop' function on all other CPUs in the system.
174133 */
175
-
176
-asmlinkage __visible void smp_reboot_interrupt(void)
134
+DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
177135 {
178
- ipi_entering_ack_irq();
179
- cpu_emergency_vmxoff();
136
+ ack_APIC_irq();
137
+ cpu_emergency_disable_virtualization();
180138 stop_this_cpu(NULL);
181
- irq_exit();
182139 }
183140
184141 static int register_stop_handler(void)
....@@ -217,7 +174,7 @@
217174 /* sync above data before sending IRQ */
218175 wmb();
219176
220
- apic->send_IPI_allbutself(REBOOT_VECTOR);
177
+ apic_send_IPI_allbutself(REBOOT_VECTOR);
221178
222179 /*
223180 * Don't wait longer than a second for IPI completion. The
....@@ -243,7 +200,7 @@
243200
244201 pr_emerg("Shutting down cpus with NMI\n");
245202
246
- apic->send_IPI_allbutself(NMI_VECTOR);
203
+ apic_send_IPI_allbutself(NMI_VECTOR);
247204 }
248205 /*
249206 * Don't wait longer than 10 ms if the caller didn't
....@@ -263,47 +220,33 @@
263220
264221 /*
265222 * Reschedule call back. KVM uses this interrupt to force a cpu out of
266
- * guest mode
223
+ * guest mode.
267224 */
268
-__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
225
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi)
269226 {
270227 ack_APIC_irq();
228
+ trace_reschedule_entry(RESCHEDULE_VECTOR);
271229 inc_irq_stat(irq_resched_count);
272
- kvm_set_cpu_l1tf_flush_l1d();
273
-
274
- if (trace_resched_ipi_enabled()) {
275
- /*
276
- * scheduler_ipi() might call irq_enter() as well, but
277
- * nested calls are fine.
278
- */
279
- irq_enter();
280
- trace_reschedule_entry(RESCHEDULE_VECTOR);
281
- scheduler_ipi();
282
- trace_reschedule_exit(RESCHEDULE_VECTOR);
283
- irq_exit();
284
- return;
285
- }
286230 scheduler_ipi();
231
+ trace_reschedule_exit(RESCHEDULE_VECTOR);
287232 }
288233
289
-__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
234
+DEFINE_IDTENTRY_SYSVEC(sysvec_call_function)
290235 {
291
- ipi_entering_ack_irq();
236
+ ack_APIC_irq();
292237 trace_call_function_entry(CALL_FUNCTION_VECTOR);
293238 inc_irq_stat(irq_call_count);
294239 generic_smp_call_function_interrupt();
295240 trace_call_function_exit(CALL_FUNCTION_VECTOR);
296
- exiting_irq();
297241 }
298242
299
-__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
243
+DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single)
300244 {
301
- ipi_entering_ack_irq();
245
+ ack_APIC_irq();
302246 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
303247 inc_irq_stat(irq_call_count);
304248 generic_smp_call_function_single_interrupt();
305249 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
306
- exiting_irq();
307250 }
308251
309252 static int __init nonmi_ipi_setup(char *str)