forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/x86/kernel/apic/apic.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Local APIC handling, local APIC timers
34 *
....@@ -20,7 +21,7 @@
2021 #include <linux/acpi_pmtmr.h>
2122 #include <linux/clockchips.h>
2223 #include <linux/interrupt.h>
23
-#include <linux/bootmem.h>
24
+#include <linux/memblock.h>
2425 #include <linux/ftrace.h>
2526 #include <linux/ioport.h>
2627 #include <linux/export.h>
....@@ -39,13 +40,14 @@
3940 #include <asm/irq_remapping.h>
4041 #include <asm/perf_event.h>
4142 #include <asm/x86_init.h>
42
-#include <asm/pgalloc.h>
4343 #include <linux/atomic.h>
4444 #include <asm/barrier.h>
4545 #include <asm/mpspec.h>
4646 #include <asm/i8259.h>
4747 #include <asm/proto.h>
48
+#include <asm/traps.h>
4849 #include <asm/apic.h>
50
+#include <asm/acpi.h>
4951 #include <asm/io_apic.h>
5052 #include <asm/desc.h>
5153 #include <asm/hpet.h>
....@@ -64,10 +66,10 @@
6466 unsigned disabled_cpus;
6567
6668 /* Processor that is doing the boot up */
67
-unsigned int boot_cpu_physical_apicid = -1U;
69
+unsigned int boot_cpu_physical_apicid __ro_after_init = -1U;
6870 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
6971
70
-u8 boot_cpu_apic_version;
72
+u8 boot_cpu_apic_version __ro_after_init;
7173
7274 /*
7375 * The highest APIC ID seen during enumeration.
....@@ -84,13 +86,13 @@
8486 * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
8587 * avoid undefined behaviour caused by sending INIT from AP to BSP.
8688 */
87
-static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
89
+static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
8890
8991 /*
9092 * This variable controls which CPUs receive external NMIs. By default,
9193 * external NMIs are delivered only to the BSP.
9294 */
93
-static int apic_extnmi = APIC_EXTNMI_BSP;
95
+static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP;
9496
9597 /*
9698 * Map cpu index to physical APIC ID
....@@ -113,7 +115,7 @@
113115 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
114116
115117 /* Local APIC was disabled by the BIOS and enabled by the kernel */
116
-static int enabled_via_apicbase;
118
+static int enabled_via_apicbase __ro_after_init;
117119
118120 /*
119121 * Handle interrupt mode configuration register (IMCR).
....@@ -166,39 +168,39 @@
166168 {
167169 apic_calibrate_pmtmr = 1;
168170 notsc_setup(NULL);
169
- return 0;
171
+ return 1;
170172 }
171173 __setup("apicpmtimer", setup_apicpmtimer);
172174 #endif
173175
174
-unsigned long mp_lapic_addr;
175
-int disable_apic;
176
+unsigned long mp_lapic_addr __ro_after_init;
177
+int disable_apic __ro_after_init;
176178 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
177179 static int disable_apic_timer __initdata;
178180 /* Local APIC timer works in C2 */
179
-int local_apic_timer_c2_ok;
181
+int local_apic_timer_c2_ok __ro_after_init;
180182 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
181183
182184 /*
183185 * Debug level, exported for io_apic.c
184186 */
185
-int apic_verbosity;
187
+int apic_verbosity __ro_after_init;
186188
187
-int pic_mode;
189
+int pic_mode __ro_after_init;
188190
189191 /* Have we found an MP table */
190
-int smp_found_config;
192
+int smp_found_config __ro_after_init;
191193
192194 static struct resource lapic_resource = {
193195 .name = "Local APIC",
194196 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
195197 };
196198
197
-unsigned int lapic_timer_frequency = 0;
199
+unsigned int lapic_timer_period = 0;
198200
199201 static void apic_pm_activate(void);
200202
201
-static unsigned long apic_phys;
203
+static unsigned long apic_phys __ro_after_init;
202204
203205 /*
204206 * Get the LAPIC version
....@@ -225,6 +227,11 @@
225227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
226228 boot_cpu_data.x86 >= 0xf)
227229 return 1;
230
+
231
+ /* Hygon systems use modern APIC */
232
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
233
+ return 1;
234
+
228235 return lapic_get_version() >= 0x14;
229236 }
230237
....@@ -496,7 +503,7 @@
496503 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
497504 return 0;
498505
499
- __setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1);
506
+ __setup_APIC_LVTT(lapic_timer_period, oneshot, 1);
500507 return 0;
501508 }
502509
....@@ -541,65 +548,33 @@
541548 };
542549 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
543550
544
-#define DEADLINE_MODEL_MATCH_FUNC(model, func) \
545
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&func }
546
-
547
-#define DEADLINE_MODEL_MATCH_REV(model, rev) \
548
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
549
-
550
-static __init u32 hsx_deadline_rev(void)
551
-{
552
- switch (boot_cpu_data.x86_stepping) {
553
- case 0x02: return 0x3a; /* EP */
554
- case 0x04: return 0x0f; /* EX */
555
- }
556
-
557
- return ~0U;
558
-}
559
-
560
-static __init u32 bdx_deadline_rev(void)
561
-{
562
- switch (boot_cpu_data.x86_stepping) {
563
- case 0x02: return 0x00000011;
564
- case 0x03: return 0x0700000e;
565
- case 0x04: return 0x0f00000c;
566
- case 0x05: return 0x0e000003;
567
- }
568
-
569
- return ~0U;
570
-}
571
-
572
-static __init u32 skx_deadline_rev(void)
573
-{
574
- switch (boot_cpu_data.x86_stepping) {
575
- case 0x03: return 0x01000136;
576
- case 0x04: return 0x02000014;
577
- }
578
-
579
- if (boot_cpu_data.x86_stepping > 4)
580
- return 0;
581
-
582
- return ~0U;
583
-}
584
-
585551 static const struct x86_cpu_id deadline_match[] __initconst = {
586
- DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
587
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
588
- DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
589
- DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
552
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
553
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
590554
591
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
592
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
593
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17),
555
+ X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
594556
595
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_CORE, 0x25),
596
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17),
557
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
558
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
559
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
560
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
597561
598
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE, 0xb2),
599
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_DESKTOP, 0xb2),
562
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
563
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
564
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
600565
601
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE, 0x52),
602
- DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_DESKTOP, 0x52),
566
+ X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
567
+ X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
568
+ X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
569
+
570
+ X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
571
+ X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
572
+
573
+ X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
574
+ X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
575
+
576
+ X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
577
+ X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
603578
604579 {},
605580 };
....@@ -618,14 +593,7 @@
618593 if (!m)
619594 return true;
620595
621
- /*
622
- * Function pointers will have the MSB set due to address layout,
623
- * immediate revisions will not.
624
- */
625
- if ((long)m->driver_data < 0)
626
- rev = ((u32 (*)(void))(m->driver_data))();
627
- else
628
- rev = (u32)m->driver_data;
596
+ rev = (u32)m->driver_data;
629597
630598 if (boot_cpu_data.microcode >= rev)
631599 return true;
....@@ -777,8 +745,8 @@
777745
778746 res = (((u64)deltapm) * mult) >> 22;
779747 do_div(res, 1000000);
780
- pr_warning("APIC calibration not consistent "
781
- "with PM-Timer: %ldms instead of 100ms\n",(long)res);
748
+ pr_warn("APIC calibration not consistent "
749
+ "with PM-Timer: %ldms instead of 100ms\n", (long)res);
782750
783751 /* Correct the lapic counter value */
784752 res = (((u64)(*delta)) * pm_100ms);
....@@ -800,6 +768,64 @@
800768 return 0;
801769 }
802770
771
+static int __init lapic_init_clockevent(void)
772
+{
773
+ if (!lapic_timer_period)
774
+ return -1;
775
+
776
+ /* Calculate the scaled math multiplication factor */
777
+ lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR,
778
+ TICK_NSEC, lapic_clockevent.shift);
779
+ lapic_clockevent.max_delta_ns =
780
+ clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
781
+ lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
782
+ lapic_clockevent.min_delta_ns =
783
+ clockevent_delta2ns(0xF, &lapic_clockevent);
784
+ lapic_clockevent.min_delta_ticks = 0xF;
785
+
786
+ return 0;
787
+}
788
+
789
+bool __init apic_needs_pit(void)
790
+{
791
+ /*
792
+ * If the frequencies are not known, PIT is required for both TSC
793
+ * and apic timer calibration.
794
+ */
795
+ if (!tsc_khz || !cpu_khz)
796
+ return true;
797
+
798
+ /* Is there an APIC at all or is it disabled? */
799
+ if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
800
+ return true;
801
+
802
+ /*
803
+ * If interrupt delivery mode is legacy PIC or virtual wire without
804
+ * configuration, the local APIC timer wont be set up. Make sure
805
+ * that the PIT is initialized.
806
+ */
807
+ if (apic_intr_mode == APIC_PIC ||
808
+ apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
809
+ return true;
810
+
811
+ /* Virt guests may lack ARAT, but still have DEADLINE */
812
+ if (!boot_cpu_has(X86_FEATURE_ARAT))
813
+ return true;
814
+
815
+ /* Deadline timer is based on TSC so no further PIT action required */
816
+ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
817
+ return false;
818
+
819
+ /* APIC timer disabled? */
820
+ if (disable_apic_timer)
821
+ return true;
822
+ /*
823
+ * The APIC timer frequency is known already, no PIT calibration
824
+ * required. If unknown, let the PIT be initialized.
825
+ */
826
+ return lapic_timer_period == 0;
827
+}
828
+
803829 static int __init calibrate_APIC_clock(void)
804830 {
805831 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
....@@ -809,25 +835,21 @@
809835 long delta, deltatsc;
810836 int pm_referenced = 0;
811837
812
- /**
813
- * check if lapic timer has already been calibrated by platform
814
- * specific routine, such as tsc calibration code. if so, we just fill
838
+ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
839
+ return 0;
840
+
841
+ /*
842
+ * Check if lapic timer has already been calibrated by platform
843
+ * specific routine, such as tsc calibration code. If so just fill
815844 * in the clockevent structure and return.
816845 */
817
-
818
- if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
819
- return 0;
820
- } else if (lapic_timer_frequency) {
846
+ if (!lapic_init_clockevent()) {
821847 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
822
- lapic_timer_frequency);
823
- lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
824
- TICK_NSEC, lapic_clockevent.shift);
825
- lapic_clockevent.max_delta_ns =
826
- clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
827
- lapic_clockevent.max_delta_ticks = 0x7FFFFF;
828
- lapic_clockevent.min_delta_ns =
829
- clockevent_delta2ns(0xF, &lapic_clockevent);
830
- lapic_clockevent.min_delta_ticks = 0xF;
848
+ lapic_timer_period);
849
+ /*
850
+ * Direct calibration methods must have an always running
851
+ * local APIC timer, no need for broadcast timer.
852
+ */
831853 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
832854 return 0;
833855 }
....@@ -904,22 +926,13 @@
904926 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
905927 &delta, &deltatsc);
906928
907
- /* Calculate the scaled math multiplication factor */
908
- lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
909
- lapic_clockevent.shift);
910
- lapic_clockevent.max_delta_ns =
911
- clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
912
- lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
913
- lapic_clockevent.min_delta_ns =
914
- clockevent_delta2ns(0xF, &lapic_clockevent);
915
- lapic_clockevent.min_delta_ticks = 0xF;
916
-
917
- lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
929
+ lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
930
+ lapic_init_clockevent();
918931
919932 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
920933 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
921934 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
922
- lapic_timer_frequency);
935
+ lapic_timer_period);
923936
924937 if (boot_cpu_has(X86_FEATURE_TSC)) {
925938 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
....@@ -930,15 +943,15 @@
930943
931944 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
932945 "%u.%04u MHz.\n",
933
- lapic_timer_frequency / (1000000 / HZ),
934
- lapic_timer_frequency % (1000000 / HZ));
946
+ lapic_timer_period / (1000000 / HZ),
947
+ lapic_timer_period % (1000000 / HZ));
935948
936949 /*
937950 * Do a sanity check on the APIC calibration result
938951 */
939
- if (lapic_timer_frequency < (1000000 / HZ)) {
952
+ if (lapic_timer_period < (1000000 / HZ)) {
940953 local_irq_enable();
941
- pr_warning("APIC frequency too slow, disabling apic timer\n");
954
+ pr_warn("APIC frequency too slow, disabling apic timer\n");
942955 return -1;
943956 }
944957
....@@ -982,7 +995,7 @@
982995 local_irq_enable();
983996
984997 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
985
- pr_warning("APIC timer disabled due to verification failure\n");
998
+ pr_warn("APIC timer disabled due to verification failure\n");
986999 return -1;
9871000 }
9881001
....@@ -1056,8 +1069,8 @@
10561069 * spurious.
10571070 */
10581071 if (!evt->event_handler) {
1059
- pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
1060
- smp_processor_id());
1072
+ pr_warn("Spurious LAPIC timer interrupt on cpu %d\n",
1073
+ smp_processor_id());
10611074 /* Switch it off */
10621075 lapic_timer_shutdown(evt);
10631076 return;
....@@ -1079,23 +1092,14 @@
10791092 * [ if a single-CPU system runs an SMP kernel then we call the local
10801093 * interrupt as well. Thus we cannot inline the local irq ... ]
10811094 */
1082
-__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
1095
+DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
10831096 {
10841097 struct pt_regs *old_regs = set_irq_regs(regs);
10851098
1086
- /*
1087
- * NOTE! We'd better ACK the irq immediately,
1088
- * because timer handling can be slow.
1089
- *
1090
- * update_process_times() expects us to have done irq_enter().
1091
- * Besides, if we don't timer interrupts ignore the global
1092
- * interrupt lock, which is the WrongThing (tm) to do.
1093
- */
1094
- entering_ack_irq();
1099
+ ack_APIC_irq();
10951100 trace_local_timer_entry(LOCAL_TIMER_VECTOR);
10961101 local_apic_timer_interrupt();
10971102 trace_local_timer_exit(LOCAL_TIMER_VECTOR);
1098
- exiting_irq();
10991103
11001104 set_irq_regs(old_regs);
11011105 }
....@@ -1185,25 +1189,38 @@
11851189 }
11861190
11871191 /**
1192
+ * apic_soft_disable - Clears and software disables the local APIC on hotplug
1193
+ *
1194
+ * Contrary to disable_local_APIC() this does not touch the enable bit in
1195
+ * MSR_IA32_APICBASE. Clearing that bit on systems based on the 3 wire APIC
1196
+ * bus would require a hardware reset as the APIC would lose track of bus
1197
+ * arbitration. On systems with FSB delivery APICBASE could be disabled,
1198
+ * but it has to be guaranteed that no interrupt is sent to the APIC while
1199
+ * in that state and it's not clear from the SDM whether it still responds
1200
+ * to INIT/SIPI messages. Stay on the safe side and use software disable.
1201
+ */
1202
+void apic_soft_disable(void)
1203
+{
1204
+ u32 value;
1205
+
1206
+ clear_local_APIC();
1207
+
1208
+ /* Soft disable APIC (implies clearing of registers for 82489DX!). */
1209
+ value = apic_read(APIC_SPIV);
1210
+ value &= ~APIC_SPIV_APIC_ENABLED;
1211
+ apic_write(APIC_SPIV, value);
1212
+}
1213
+
1214
+/**
11881215 * disable_local_APIC - clear and disable the local APIC
11891216 */
11901217 void disable_local_APIC(void)
11911218 {
1192
- unsigned int value;
1193
-
11941219 /* APIC hasn't been mapped yet */
11951220 if (!x2apic_mode && !apic_phys)
11961221 return;
11971222
1198
- clear_local_APIC();
1199
-
1200
- /*
1201
- * Disable APIC (implies clearing of registers
1202
- * for 82489DX!).
1203
- */
1204
- value = apic_read(APIC_SPIV);
1205
- value &= ~APIC_SPIV_APIC_ENABLED;
1206
- apic_write(APIC_SPIV, value);
1223
+ apic_soft_disable();
12071224
12081225 #ifdef CONFIG_X86_32
12091226 /*
....@@ -1268,9 +1285,9 @@
12681285 APIC_INT_LEVELTRIG | APIC_DM_INIT);
12691286 }
12701287
1271
-enum apic_intr_mode_id apic_intr_mode;
1288
+enum apic_intr_mode_id apic_intr_mode __ro_after_init;
12721289
1273
-static int __init apic_intr_mode_select(void)
1290
+static int __init __apic_intr_mode_select(void)
12741291 {
12751292 /* Check kernel option */
12761293 if (disable_apic) {
....@@ -1332,6 +1349,12 @@
13321349 return APIC_SYMMETRIC_IO;
13331350 }
13341351
1352
+/* Select the interrupt delivery mode for the BSP */
1353
+void __init apic_intr_mode_select(void)
1354
+{
1355
+ apic_intr_mode = __apic_intr_mode_select();
1356
+}
1357
+
13351358 /*
13361359 * An initial setup of the virtual wire mode.
13371360 */
....@@ -1381,12 +1404,12 @@
13811404 apic_write(APIC_LVT1, value);
13821405 }
13831406
1407
+static void __init apic_bsp_setup(bool upmode);
1408
+
13841409 /* Init the interrupt delivery mode for the BSP */
13851410 void __init apic_intr_mode_init(void)
13861411 {
13871412 bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
1388
-
1389
- apic_intr_mode = apic_intr_mode_select();
13901413
13911414 switch (apic_intr_mode) {
13921415 case APIC_PIC:
....@@ -1409,6 +1432,9 @@
14091432 pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
14101433 break;
14111434 }
1435
+
1436
+ if (x86_platform.apic_post_init)
1437
+ x86_platform.apic_post_init();
14121438
14131439 apic_bsp_setup(upmode);
14141440 }
....@@ -1533,7 +1559,6 @@
15331559 int cpu = smp_processor_id();
15341560 unsigned int value;
15351561
1536
-
15371562 if (disable_apic) {
15381563 disable_ioapic_support();
15391564 return;
....@@ -1556,8 +1581,6 @@
15561581 apic_write(APIC_ESR, 0);
15571582 }
15581583 #endif
1559
- perf_events_lapic_init();
1560
-
15611584 /*
15621585 * Double-check whether this APIC is really registered.
15631586 * This is meaningless in clustered apic mode, so we skip it.
....@@ -1590,11 +1613,14 @@
15901613 #endif
15911614
15921615 /*
1593
- * Set Task Priority to 'accept all'. We never change this
1594
- * later on.
1616
+ * Set Task Priority to 'accept all except vectors 0-31'. An APIC
1617
+ * vector in the 16-31 range could be delivered if TPR == 0, but we
1618
+ * would think it's an exception and terrible things will happen. We
1619
+ * never change this later on.
15951620 */
15961621 value = apic_read(APIC_TASKPRI);
15971622 value &= ~APIC_TPRI_MASK;
1623
+ value |= 0x10;
15981624 apic_write(APIC_TASKPRI, value);
15991625
16001626 /* Clear eventually stale ISR/IRR bits */
....@@ -1643,6 +1669,8 @@
16431669 */
16441670 value |= SPURIOUS_APIC_VECTOR;
16451671 apic_write(APIC_SPIV, value);
1672
+
1673
+ perf_events_lapic_init();
16461674
16471675 /*
16481676 * Set up LVT0, LVT1:
....@@ -1755,11 +1783,11 @@
17551783 int apicid = native_apic_msr_read(APIC_ID);
17561784
17571785 if (apicid >= 255) {
1758
- pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
1759
- apicid);
1786
+ pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
1787
+ apicid);
17601788 return 0;
17611789 }
1762
- pr_warning("x2apic already enabled.\n");
1790
+ pr_warn("x2apic already enabled.\n");
17631791 __x2apic_disable();
17641792 }
17651793 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
....@@ -1929,7 +1957,7 @@
19291957 */
19301958 features = cpuid_edx(1);
19311959 if (!(features & (1 << X86_FEATURE_APIC))) {
1932
- pr_warning("Could not enable APIC!\n");
1960
+ pr_warn("Could not enable APIC!\n");
19331961 return -1;
19341962 }
19351963 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
....@@ -1986,6 +2014,8 @@
19862014 (boot_cpu_data.x86 >= 15))
19872015 break;
19882016 goto no_apic;
2017
+ case X86_VENDOR_HYGON:
2018
+ break;
19892019 case X86_VENDOR_INTEL:
19902020 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
19912021 (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
....@@ -2090,15 +2120,21 @@
20902120 * Local APIC interrupts
20912121 */
20922122
2093
-/*
2094
- * This interrupt should _never_ happen with our APIC/SMP architecture
2123
+/**
2124
+ * spurious_interrupt - Catch all for interrupts raised on unused vectors
2125
+ * @regs: Pointer to pt_regs on stack
2126
+ * @vector: The vector number
2127
+ *
2128
+ * This is invoked from ASM entry code to catch all interrupts which
2129
+ * trigger on an entry which is routed to the common_spurious idtentry
2130
+ * point.
2131
+ *
2132
+ * Also called from sysvec_spurious_apic_interrupt().
20952133 */
2096
-__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
2134
+DEFINE_IDTENTRY_IRQ(spurious_interrupt)
20972135 {
2098
- u8 vector = ~regs->orig_ax;
20992136 u32 v;
21002137
2101
- entering_irq();
21022138 trace_spurious_apic_entry(vector);
21032139
21042140 inc_irq_stat(irq_spurious_count);
....@@ -2128,13 +2164,17 @@
21282164 }
21292165 out:
21302166 trace_spurious_apic_exit(vector);
2131
- exiting_irq();
2167
+}
2168
+
2169
+DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
2170
+{
2171
+ __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
21322172 }
21332173
21342174 /*
21352175 * This interrupt should never happen with our APIC/SMP architecture
21362176 */
2137
-__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
2177
+DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
21382178 {
21392179 static const char * const error_interrupt_reason[] = {
21402180 "Send CS error", /* APIC Error Bit 0 */
....@@ -2148,7 +2188,6 @@
21482188 };
21492189 u32 v, i = 0;
21502190
2151
- entering_irq();
21522191 trace_error_apic_entry(ERROR_APIC_VECTOR);
21532192
21542193 /* First tickle the hardware, only then report what went on. -- REW */
....@@ -2172,7 +2211,6 @@
21722211 apic_printk(APIC_DEBUG, KERN_CONT "\n");
21732212
21742213 trace_error_apic_exit(ERROR_APIC_VECTOR);
2175
- exiting_irq();
21762214 }
21772215
21782216 /**
....@@ -2287,7 +2325,7 @@
22872325 #ifdef CONFIG_SMP
22882326 /**
22892327 * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
2290
- * @id: APIC ID to check
2328
+ * @apicid: APIC ID to check
22912329 */
22922330 bool apic_id_is_primary_thread(unsigned int apicid)
22932331 {
....@@ -2360,9 +2398,8 @@
23602398 disabled_cpu_apicid == apicid) {
23612399 int thiscpu = num_processors + disabled_cpus;
23622400
2363
- pr_warning("APIC: Disabling requested cpu."
2364
- " Processor %d/0x%x ignored.\n",
2365
- thiscpu, apicid);
2401
+ pr_warn("APIC: Disabling requested cpu."
2402
+ " Processor %d/0x%x ignored.\n", thiscpu, apicid);
23662403
23672404 disabled_cpus++;
23682405 return -ENODEV;
....@@ -2376,8 +2413,7 @@
23762413 apicid != boot_cpu_physical_apicid) {
23772414 int thiscpu = max + disabled_cpus - 1;
23782415
2379
- pr_warning(
2380
- "APIC: NR_CPUS/possible_cpus limit of %i almost"
2416
+ pr_warn("APIC: NR_CPUS/possible_cpus limit of %i almost"
23812417 " reached. Keeping one slot for boot cpu."
23822418 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
23832419
....@@ -2388,9 +2424,8 @@
23882424 if (num_processors >= nr_cpu_ids) {
23892425 int thiscpu = max + disabled_cpus;
23902426
2391
- pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
2392
- "reached. Processor %d/0x%x ignored.\n",
2393
- max, thiscpu, apicid);
2427
+ pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. "
2428
+ "Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
23942429
23952430 disabled_cpus++;
23962431 return -EINVAL;
....@@ -2420,13 +2455,13 @@
24202455 * Validate version
24212456 */
24222457 if (version == 0x0) {
2423
- pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2424
- cpu, apicid);
2458
+ pr_warn("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2459
+ cpu, apicid);
24252460 version = 0x10;
24262461 }
24272462
24282463 if (version != boot_cpu_apic_version) {
2429
- pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2464
+ pr_warn("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
24302465 boot_cpu_apic_version, cpu, version);
24312466 }
24322467
....@@ -2492,11 +2527,8 @@
24922527 /**
24932528 * apic_bsp_setup - Setup function for local apic and io-apic
24942529 * @upmode: Force UP mode (for APIC_init_uniprocessor)
2495
- *
2496
- * Returns:
2497
- * apic_id of BSP APIC
24982530 */
2499
-void __init apic_bsp_setup(bool upmode)
2531
+static void __init apic_bsp_setup(bool upmode)
25002532 {
25012533 connect_bsp_APIC();
25022534 if (upmode)
....@@ -2583,6 +2615,13 @@
25832615 #endif
25842616
25852617 local_irq_save(flags);
2618
+
2619
+ /*
2620
+ * Mask IOAPIC before disabling the local APIC to prevent stale IRR
2621
+ * entries on some implementations.
2622
+ */
2623
+ mask_ioapic_entries();
2624
+
25862625 disable_local_APIC();
25872626
25882627 irq_remapping_disable();
....@@ -2799,7 +2838,7 @@
27992838 apic_verbosity = APIC_VERBOSE;
28002839 #ifdef CONFIG_X86_64
28012840 else {
2802
- pr_warning("APIC Verbosity level %s not recognised"
2841
+ pr_warn("APIC Verbosity level %s not recognised"
28032842 " use apic=verbose or apic=debug\n", arg);
28042843 return -EINVAL;
28052844 }