hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/kernel/irq.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Common interrupt code for 32 and 64 bit
34 */
....@@ -12,21 +13,20 @@
1213 #include <linux/export.h>
1314 #include <linux/irq.h>
1415
16
+#include <asm/irq_stack.h>
1517 #include <asm/apic.h>
1618 #include <asm/io_apic.h>
1719 #include <asm/irq.h>
1820 #include <asm/mce.h>
1921 #include <asm/hw_irq.h>
2022 #include <asm/desc.h>
23
+#include <asm/traps.h>
2124
2225 #define CREATE_TRACE_POINTS
2326 #include <asm/trace/irq_vectors.h>
2427
2528 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
2629 EXPORT_PER_CPU_SYMBOL(irq_stat);
27
-
28
-DEFINE_PER_CPU(struct pt_regs *, irq_regs);
29
-EXPORT_PER_CPU_SYMBOL(irq_regs);
3030
3131 atomic_t irq_err_count;
3232
....@@ -134,7 +134,7 @@
134134 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
135135 seq_puts(p, " Machine check polls\n");
136136 #endif
137
-#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
137
+#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
138138 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
139139 seq_printf(p, "%*s: ", prec, "HYP");
140140 for_each_online_cpu(j)
....@@ -223,31 +223,35 @@
223223 return sum;
224224 }
225225
226
+static __always_inline void handle_irq(struct irq_desc *desc,
227
+ struct pt_regs *regs)
228
+{
229
+ if (IS_ENABLED(CONFIG_X86_64))
230
+ run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
231
+ else
232
+ __handle_irq(desc, regs);
233
+}
226234
227235 /*
228
- * do_IRQ handles all normal device IRQ's (the special
229
- * SMP cross-CPU interrupts have their own specific
230
- * handlers).
236
+ * common_interrupt() handles all normal device IRQ's (the special SMP
237
+ * cross-CPU interrupts have their own entry points).
231238 */
232
-__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
239
+DEFINE_IDTENTRY_IRQ(common_interrupt)
233240 {
234241 struct pt_regs *old_regs = set_irq_regs(regs);
235
- struct irq_desc * desc;
236
- /* high bit used in ret_from_ code */
237
- unsigned vector = ~regs->orig_ax;
242
+ struct irq_desc *desc;
238243
239
- entering_irq();
240
-
241
- /* entering_irq() tells RCU that we're not quiescent. Check it. */
244
+ /* entry code tells RCU that we're not quiescent. Check it. */
242245 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
243246
244247 desc = __this_cpu_read(vector_irq[vector]);
245
-
246
- if (!handle_irq(desc, regs)) {
248
+ if (likely(!IS_ERR_OR_NULL(desc))) {
249
+ handle_irq(desc, regs);
250
+ } else {
247251 ack_APIC_irq();
248252
249
- if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
250
- pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
253
+ if (desc == VECTOR_UNUSED) {
254
+ pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
251255 __func__, smp_processor_id(),
252256 vector);
253257 } else {
....@@ -255,10 +259,7 @@
255259 }
256260 }
257261
258
- exiting_irq();
259
-
260262 set_irq_regs(old_regs);
261
- return 1;
262263 }
263264
264265 #ifdef CONFIG_X86_LOCAL_APIC
....@@ -267,17 +268,16 @@
267268 /*
268269 * Handler for X86_PLATFORM_IPI_VECTOR.
269270 */
270
-__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
271
+DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
271272 {
272273 struct pt_regs *old_regs = set_irq_regs(regs);
273274
274
- entering_ack_irq();
275
+ ack_APIC_irq();
275276 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
276277 inc_irq_stat(x86_platform_ipis);
277278 if (x86_platform_ipi_callback)
278279 x86_platform_ipi_callback();
279280 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
280
- exiting_irq();
281281 set_irq_regs(old_regs);
282282 }
283283 #endif
....@@ -300,41 +300,29 @@
300300 /*
301301 * Handler for POSTED_INTERRUPT_VECTOR.
302302 */
303
-__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
303
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
304304 {
305
- struct pt_regs *old_regs = set_irq_regs(regs);
306
-
307
- entering_ack_irq();
305
+ ack_APIC_irq();
308306 inc_irq_stat(kvm_posted_intr_ipis);
309
- exiting_irq();
310
- set_irq_regs(old_regs);
311307 }
312308
313309 /*
314310 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
315311 */
316
-__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
312
+DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
317313 {
318
- struct pt_regs *old_regs = set_irq_regs(regs);
319
-
320
- entering_ack_irq();
314
+ ack_APIC_irq();
321315 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
322316 kvm_posted_intr_wakeup_handler();
323
- exiting_irq();
324
- set_irq_regs(old_regs);
325317 }
326318
327319 /*
328320 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
329321 */
330
-__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
322
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
331323 {
332
- struct pt_regs *old_regs = set_irq_regs(regs);
333
-
334
- entering_ack_irq();
324
+ ack_APIC_irq();
335325 inc_irq_stat(kvm_posted_intr_nested_ipis);
336
- exiting_irq();
337
- set_irq_regs(old_regs);
338326 }
339327 #endif
340328