.. | .. |
---|
31 | 31 | #include <linux/i8253.h> |
---|
32 | 32 | #include <linux/dmar.h> |
---|
33 | 33 | #include <linux/init.h> |
---|
| 34 | +#include <linux/irq.h> |
---|
34 | 35 | #include <linux/cpu.h> |
---|
35 | 36 | #include <linux/dmi.h> |
---|
36 | 37 | #include <linux/smp.h> |
---|
.. | .. |
---|
272 | 273 | { |
---|
273 | 274 | unsigned long flags; |
---|
274 | 275 | |
---|
275 | | - local_irq_save(flags); |
---|
| 276 | + flags = hard_local_irq_save(); |
---|
276 | 277 | apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id)); |
---|
277 | 278 | apic_write(APIC_ICR, low); |
---|
278 | | - local_irq_restore(flags); |
---|
| 279 | + hard_local_irq_restore(flags); |
---|
279 | 280 | } |
---|
280 | 281 | |
---|
281 | 282 | u64 native_apic_icr_read(void) |
---|
.. | .. |
---|
331 | 332 | static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen) |
---|
332 | 333 | { |
---|
333 | 334 | unsigned int lvtt_value, tmp_value; |
---|
| 335 | + unsigned long flags; |
---|
| 336 | + |
---|
| 337 | + flags = hard_cond_local_irq_save(); |
---|
334 | 338 | |
---|
335 | 339 | lvtt_value = LOCAL_TIMER_VECTOR; |
---|
336 | 340 | if (!oneshot) |
---|
.. | .. |
---|
353 | 357 | * According to Intel, MFENCE can do the serialization here. |
---|
354 | 358 | */ |
---|
355 | 359 | asm volatile("mfence" : : : "memory"); |
---|
| 360 | + hard_cond_local_irq_restore(flags); |
---|
| 361 | + printk_once(KERN_DEBUG "TSC deadline timer enabled\n"); |
---|
356 | 362 | return; |
---|
357 | 363 | } |
---|
358 | 364 | |
---|
.. | .. |
---|
366 | 372 | |
---|
367 | 373 | if (!oneshot) |
---|
368 | 374 | apic_write(APIC_TMICT, clocks / APIC_DIVISOR); |
---|
| 375 | + |
---|
| 376 | + hard_cond_local_irq_restore(flags); |
---|
369 | 377 | } |
---|
370 | 378 | |
---|
371 | 379 | /* |
---|
.. | .. |
---|
471 | 479 | static int lapic_next_deadline(unsigned long delta, |
---|
472 | 480 | struct clock_event_device *evt) |
---|
473 | 481 | { |
---|
| 482 | + unsigned long flags; |
---|
474 | 483 | u64 tsc; |
---|
475 | 484 | |
---|
476 | 485 | /* This MSR is special and need a special fence: */ |
---|
477 | 486 | weak_wrmsr_fence(); |
---|
478 | 487 | |
---|
| 488 | + flags = hard_local_irq_save(); |
---|
479 | 489 | tsc = rdtsc(); |
---|
480 | 490 | wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); |
---|
| 491 | + hard_local_irq_restore(flags); |
---|
481 | 492 | return 0; |
---|
482 | 493 | } |
---|
483 | 494 | |
---|
484 | 495 | static int lapic_timer_shutdown(struct clock_event_device *evt) |
---|
485 | 496 | { |
---|
| 497 | + unsigned long flags; |
---|
486 | 498 | unsigned int v; |
---|
487 | 499 | |
---|
488 | 500 | /* Lapic used as dummy for broadcast ? */ |
---|
489 | 501 | if (evt->features & CLOCK_EVT_FEAT_DUMMY) |
---|
490 | 502 | return 0; |
---|
491 | 503 | |
---|
| 504 | + flags = hard_local_irq_save(); |
---|
492 | 505 | v = apic_read(APIC_LVTT); |
---|
493 | 506 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); |
---|
494 | 507 | apic_write(APIC_LVTT, v); |
---|
495 | 508 | apic_write(APIC_TMICT, 0); |
---|
| 509 | + hard_local_irq_restore(flags); |
---|
496 | 510 | return 0; |
---|
497 | 511 | } |
---|
498 | 512 | |
---|
.. | .. |
---|
527 | 541 | #endif |
---|
528 | 542 | } |
---|
529 | 543 | |
---|
| 544 | +static DEFINE_PER_CPU(struct clock_event_device, lapic_events); |
---|
| 545 | + |
---|
| 546 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 547 | + |
---|
| 548 | +#define LAPIC_TIMER_IRQ apicm_vector_irq(LOCAL_TIMER_VECTOR) |
---|
| 549 | + |
---|
| 550 | +static irqreturn_t lapic_oob_handler(int irq, void *dev_id) |
---|
| 551 | +{ |
---|
| 552 | + struct clock_event_device *evt = this_cpu_ptr(&lapic_events); |
---|
| 553 | + |
---|
| 554 | + trace_local_timer_entry(LOCAL_TIMER_VECTOR); |
---|
| 555 | + clockevents_handle_event(evt); |
---|
| 556 | + trace_local_timer_exit(LOCAL_TIMER_VECTOR); |
---|
| 557 | + |
---|
| 558 | + return IRQ_HANDLED; |
---|
| 559 | +} |
---|
| 560 | + |
---|
| 561 | +static struct irqaction lapic_oob_action = { |
---|
| 562 | + .handler = lapic_oob_handler, |
---|
| 563 | + .name = "Out-of-band LAPIC timer interrupt", |
---|
| 564 | + .flags = IRQF_TIMER | IRQF_PERCPU, |
---|
| 565 | +}; |
---|
| 566 | + |
---|
| 567 | +#else |
---|
| 568 | +#define LAPIC_TIMER_IRQ -1 |
---|
| 569 | +#endif |
---|
530 | 570 | |
---|
531 | 571 | /* |
---|
532 | 572 | * The local apic timer can be used for any function which is CPU local. |
---|
.. | .. |
---|
534 | 574 | static struct clock_event_device lapic_clockevent = { |
---|
535 | 575 | .name = "lapic", |
---|
536 | 576 | .features = CLOCK_EVT_FEAT_PERIODIC | |
---|
537 | | - CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP |
---|
538 | | - | CLOCK_EVT_FEAT_DUMMY, |
---|
| 577 | + CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP | |
---|
| 578 | + CLOCK_EVT_FEAT_PIPELINE | CLOCK_EVT_FEAT_DUMMY, |
---|
539 | 579 | .shift = 32, |
---|
540 | 580 | .set_state_shutdown = lapic_timer_shutdown, |
---|
541 | 581 | .set_state_periodic = lapic_timer_set_periodic, |
---|
.. | .. |
---|
544 | 584 | .set_next_event = lapic_next_event, |
---|
545 | 585 | .broadcast = lapic_timer_broadcast, |
---|
546 | 586 | .rating = 100, |
---|
547 | | - .irq = -1, |
---|
| 587 | + .irq = LAPIC_TIMER_IRQ, |
---|
548 | 588 | }; |
---|
549 | | -static DEFINE_PER_CPU(struct clock_event_device, lapic_events); |
---|
550 | 589 | |
---|
551 | 590 | static const struct x86_cpu_id deadline_match[] __initconst = { |
---|
552 | 591 | X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */ |
---|
.. | .. |
---|
1042 | 1081 | /* Setup the lapic or request the broadcast */ |
---|
1043 | 1082 | setup_APIC_timer(); |
---|
1044 | 1083 | amd_e400_c1e_apic_setup(); |
---|
| 1084 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 1085 | + setup_percpu_irq(LAPIC_TIMER_IRQ, &lapic_oob_action); |
---|
| 1086 | +#endif |
---|
1045 | 1087 | } |
---|
1046 | 1088 | |
---|
1047 | 1089 | void setup_secondary_APIC_clock(void) |
---|
.. | .. |
---|
1092 | 1134 | * [ if a single-CPU system runs an SMP kernel then we call the local |
---|
1093 | 1135 | * interrupt as well. Thus we cannot inline the local irq ... ] |
---|
1094 | 1136 | */ |
---|
1095 | | -DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt) |
---|
| 1137 | +DEFINE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR, |
---|
| 1138 | + sysvec_apic_timer_interrupt) |
---|
1096 | 1139 | { |
---|
1097 | 1140 | struct pt_regs *old_regs = set_irq_regs(regs); |
---|
1098 | 1141 | |
---|
.. | .. |
---|
1513 | 1556 | * per set bit. |
---|
1514 | 1557 | */ |
---|
1515 | 1558 | for_each_set_bit(bit, isr->map, APIC_IR_BITS) |
---|
1516 | | - ack_APIC_irq(); |
---|
| 1559 | + __ack_APIC_irq(); |
---|
1517 | 1560 | return true; |
---|
1518 | 1561 | } |
---|
1519 | 1562 | |
---|
.. | .. |
---|
2131 | 2174 | * |
---|
2132 | 2175 | * Also called from sysvec_spurious_apic_interrupt(). |
---|
2133 | 2176 | */ |
---|
2134 | | -DEFINE_IDTENTRY_IRQ(spurious_interrupt) |
---|
| 2177 | +DEFINE_IDTENTRY_IRQ_PIPELINED(spurious_interrupt) |
---|
2135 | 2178 | { |
---|
2136 | 2179 | u32 v; |
---|
2137 | 2180 | |
---|
.. | .. |
---|
2157 | 2200 | if (v & (1 << (vector & 0x1f))) { |
---|
2158 | 2201 | pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n", |
---|
2159 | 2202 | vector, smp_processor_id()); |
---|
2160 | | - ack_APIC_irq(); |
---|
| 2203 | + __ack_APIC_irq(); |
---|
2161 | 2204 | } else { |
---|
2162 | 2205 | pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n", |
---|
2163 | 2206 | vector, smp_processor_id()); |
---|
.. | .. |
---|
2166 | 2209 | trace_spurious_apic_exit(vector); |
---|
2167 | 2210 | } |
---|
2168 | 2211 | |
---|
2169 | | -DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt) |
---|
| 2212 | +DEFINE_IDTENTRY_SYSVEC_PIPELINED(SPURIOUS_APIC_VECTOR, |
---|
| 2213 | + sysvec_spurious_apic_interrupt) |
---|
2170 | 2214 | { |
---|
2171 | 2215 | __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR); |
---|
2172 | 2216 | } |
---|
2173 | 2217 | |
---|
2174 | 2218 | /* |
---|
2175 | 2219 | * This interrupt should never happen with our APIC/SMP architecture |
---|
| 2220 | + * |
---|
| 2221 | + * irq_pipeline: same as spurious_interrupt, would run directly out of |
---|
| 2222 | + * the IDT, no deferral via the interrupt log which means that only |
---|
| 2223 | + * the hardware IRQ state is considered for masking. |
---|
2176 | 2224 | */ |
---|
2177 | 2225 | DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt) |
---|
2178 | 2226 | { |
---|