hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/x86/kernel/quirks.c
....@@ -7,6 +7,8 @@
77 #include <linux/irq.h>
88
99 #include <asm/hpet.h>
10
+#include <asm/setup.h>
11
+#include <asm/mce.h>
1012
1113 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
1214
....@@ -89,14 +91,12 @@
8991 BUG();
9092 else
9193 printk(KERN_DEBUG "Force enabled HPET at resume\n");
92
-
93
- return;
9494 }
9595
9696 static void ich_force_enable_hpet(struct pci_dev *dev)
9797 {
9898 u32 val;
99
- u32 uninitialized_var(rcba);
99
+ u32 rcba;
100100 int err = 0;
101101
102102 if (hpet_address || force_hpet_address)
....@@ -111,7 +111,7 @@
111111 }
112112
113113 /* use bits 31:14, 16 kB aligned */
114
- rcba_base = ioremap_nocache(rcba, 0x4000);
114
+ rcba_base = ioremap(rcba, 0x4000);
115115 if (rcba_base == NULL) {
116116 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
117117 "cannot force enable HPET\n");
....@@ -186,7 +186,7 @@
186186 static void old_ich_force_hpet_resume(void)
187187 {
188188 u32 val;
189
- u32 uninitialized_var(gen_cntl);
189
+ u32 gen_cntl;
190190
191191 if (!force_hpet_address || !cached_dev)
192192 return;
....@@ -208,7 +208,7 @@
208208 static void old_ich_force_enable_hpet(struct pci_dev *dev)
209209 {
210210 u32 val;
211
- u32 uninitialized_var(gen_cntl);
211
+ u32 gen_cntl;
212212
213213 if (hpet_address || force_hpet_address)
214214 return;
....@@ -299,7 +299,7 @@
299299
300300 static void vt8237_force_enable_hpet(struct pci_dev *dev)
301301 {
302
- u32 uninitialized_var(val);
302
+ u32 val;
303303
304304 if (hpet_address || force_hpet_address)
305305 return;
....@@ -430,7 +430,7 @@
430430
431431 static void nvidia_force_enable_hpet(struct pci_dev *dev)
432432 {
433
- u32 uninitialized_var(val);
433
+ u32 val;
434434
435435 if (hpet_address || force_hpet_address)
436436 return;
....@@ -447,7 +447,6 @@
447447 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
448448 force_hpet_address);
449449 cached_dev = dev;
450
- return;
451450 }
452451
453452 /* ISA Bridges */
....@@ -512,7 +511,6 @@
512511 force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
513512 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
514513 "0x%lx\n", force_hpet_address);
515
- return;
516514 }
517515 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
518516 e6xx_force_enable_hpet);
....@@ -627,10 +625,6 @@
627625 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
628626 amd_disable_seq_and_redirect_scrub);
629627
630
-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
631
-#include <linux/jump_label.h>
632
-#include <asm/string_64.h>
633
-
634628 /* Ivy Bridge, Haswell, Broadwell */
635629 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
636630 {
....@@ -639,7 +633,7 @@
639633 pci_read_config_dword(pdev, 0x84, &capid0);
640634
641635 if (capid0 & 0x10)
642
- static_branch_inc(&mcsafe_key);
636
+ enable_copy_mc_fragile();
643637 }
644638
645639 /* Skylake */
....@@ -656,14 +650,13 @@
656650 * enabled, so memory machine check recovery is also enabled.
657651 */
658652 if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
659
- static_branch_inc(&mcsafe_key);
653
+ enable_copy_mc_fragile();
660654
661655 }
662656 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
663657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
664658 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
665659 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
666
-#endif
667660 #endif
668661
669662 bool x86_apple_machine;