hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/processor.h
....@@ -7,6 +7,7 @@
77 /* Forward declaration, a strange C thing */
88 struct task_struct;
99 struct mm_struct;
10
+struct io_bitmap;
1011 struct vm86;
1112
1213 #include <asm/math_emu.h>
....@@ -24,6 +25,7 @@
2425 #include <asm/special_insns.h>
2526 #include <asm/fpu/types.h>
2627 #include <asm/unwind_hints.h>
28
+#include <asm/vmxfeatures.h>
2729 #include <asm/vdso/processor.h>
2830
2931 #include <linux/personality.h>
....@@ -43,18 +45,6 @@
4345 #define NET_IP_ALIGN 0
4446
4547 #define HBP_NUM 4
46
-/*
47
- * Default implementation of macro that returns current
48
- * instruction pointer ("program counter").
49
- */
50
-static inline void *current_text_addr(void)
51
-{
52
- void *pc;
53
-
54
- asm volatile("mov $1f, %0; 1:":"=r" (pc));
55
-
56
- return pc;
57
-}
5848
5949 /*
6050 * These alignment constraints are for performance in the vSMP case,
....@@ -97,6 +87,9 @@
9787 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
9888 int x86_tlbsize;
9989 #endif
90
+#ifdef CONFIG_X86_VMX_FEATURE_NAMES
91
+ __u32 vmx_capability[NVMXINTS];
92
+#endif
10093 __u8 x86_virt_bits;
10194 __u8 x86_phys_bits;
10295 /* CPUID returned core id bits: */
....@@ -106,19 +99,28 @@
10699 __u32 extended_cpuid_level;
107100 /* Maximum supported CPUID level, -1=no CPUID: */
108101 int cpuid_level;
109
- __u32 x86_capability[NCAPINTS + NBUGINTS];
102
+ /*
103
+ * Align to size of unsigned long because the x86_capability array
104
+ * is passed to bitops which require the alignment. Use unnamed
105
+ * union to enforce the array is aligned to size of unsigned long.
106
+ */
107
+ union {
108
+ __u32 x86_capability[NCAPINTS + NBUGINTS];
109
+ unsigned long x86_capability_alignment;
110
+ };
110111 char x86_vendor_id[16];
111112 char x86_model_id[64];
112113 /* in KB - valid for CPUS which support this call: */
113114 unsigned int x86_cache_size;
114115 int x86_cache_alignment; /* In bytes */
115
- /* Cache QoS architectural values: */
116
+ /* Cache QoS architectural values, valid only on the BSP: */
116117 int x86_cache_max_rmid; /* max index */
117118 int x86_cache_occ_scale; /* scale to bytes */
119
+ int x86_cache_mbm_width_offset;
118120 int x86_power;
119121 unsigned long loops_per_jiffy;
120122 /* cpuid returned max cores value: */
121
- u16 x86_max_cores;
123
+ u16 x86_max_cores;
122124 u16 apicid;
123125 u16 initial_apicid;
124126 u16 x86_clflush_size;
....@@ -130,6 +132,8 @@
130132 u16 logical_proc_id;
131133 /* Core id: */
132134 u16 cpu_core_id;
135
+ u16 cpu_die_id;
136
+ u16 logical_die_id;
133137 /* Index into per_cpu list: */
134138 u16 cpu_index;
135139 u32 microcode;
....@@ -156,7 +160,9 @@
156160 #define X86_VENDOR_CENTAUR 5
157161 #define X86_VENDOR_TRANSMETA 7
158162 #define X86_VENDOR_NSC 8
159
-#define X86_VENDOR_NUM 9
163
+#define X86_VENDOR_HYGON 9
164
+#define X86_VENDOR_ZHAOXIN 10
165
+#define X86_VENDOR_NUM 11
160166
161167 #define X86_VENDOR_UNKNOWN 0xff
162168
....@@ -166,7 +172,6 @@
166172 extern struct cpuinfo_x86 boot_cpu_data;
167173 extern struct cpuinfo_x86 new_cpu_data;
168174
169
-extern struct x86_hw_tss doublefault_tss;
170175 extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
171176 extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
172177
....@@ -316,7 +321,13 @@
316321 */
317322 u64 sp1;
318323
324
+ /*
325
+ * Since Linux does not use ring 2, the 'sp2' slot is unused by
326
+ * hardware. entry_SYSCALL_64 uses it as scratch space to stash
327
+ * the user RSP value.
328
+ */
319329 u64 sp2;
330
+
320331 u64 reserved2;
321332 u64 ist[7];
322333 u32 reserved3;
....@@ -331,10 +342,32 @@
331342 * IO-bitmap sizes:
332343 */
333344 #define IO_BITMAP_BITS 65536
334
-#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
335
-#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
336
-#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
337
-#define INVALID_IO_BITMAP_OFFSET 0x8000
345
+#define IO_BITMAP_BYTES (IO_BITMAP_BITS / BITS_PER_BYTE)
346
+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES / sizeof(long))
347
+
348
+#define IO_BITMAP_OFFSET_VALID_MAP \
349
+ (offsetof(struct tss_struct, io_bitmap.bitmap) - \
350
+ offsetof(struct tss_struct, x86_tss))
351
+
352
+#define IO_BITMAP_OFFSET_VALID_ALL \
353
+ (offsetof(struct tss_struct, io_bitmap.mapall) - \
354
+ offsetof(struct tss_struct, x86_tss))
355
+
356
+#ifdef CONFIG_X86_IOPL_IOPERM
357
+/*
358
+ * sizeof(unsigned long) coming from an extra "long" at the end of the
359
+ * iobitmap. The limit is inclusive, i.e. the last valid byte.
360
+ */
361
+# define __KERNEL_TSS_LIMIT \
362
+ (IO_BITMAP_OFFSET_VALID_ALL + IO_BITMAP_BYTES + \
363
+ sizeof(unsigned long) - 1)
364
+#else
365
+# define __KERNEL_TSS_LIMIT \
366
+ (offsetof(struct tss_struct, x86_tss) + sizeof(struct x86_hw_tss) - 1)
367
+#endif
368
+
369
+/* Base offset outside of TSS_LIMIT so unpriviledged IO causes #GP */
370
+#define IO_BITMAP_OFFSET_INVALID (__KERNEL_TSS_LIMIT + 1)
338371
339372 struct entry_stack {
340373 char stack[PAGE_SIZE];
....@@ -344,6 +377,37 @@
344377 struct entry_stack stack;
345378 } __aligned(PAGE_SIZE);
346379
380
+/*
381
+ * All IO bitmap related data stored in the TSS:
382
+ */
383
+struct x86_io_bitmap {
384
+ /* The sequence number of the last active bitmap. */
385
+ u64 prev_sequence;
386
+
387
+ /*
388
+ * Store the dirty size of the last io bitmap offender. The next
389
+ * one will have to do the cleanup as the switch out to a non io
390
+ * bitmap user will just set x86_tss.io_bitmap_base to a value
391
+ * outside of the TSS limit. So for sane tasks there is no need to
392
+ * actually touch the io_bitmap at all.
393
+ */
394
+ unsigned int prev_max;
395
+
396
+ /*
397
+ * The extra 1 is there because the CPU will access an
398
+ * additional byte beyond the end of the IO permission
399
+ * bitmap. The extra byte must be all 1 bits, and must
400
+ * be within the limit.
401
+ */
402
+ unsigned long bitmap[IO_BITMAP_LONGS + 1];
403
+
404
+ /*
405
+ * Special I/O bitmap to emulate IOPL(3). All bytes zero,
406
+ * except the additional byte at the end.
407
+ */
408
+ unsigned long mapall[IO_BITMAP_LONGS + 1];
409
+};
410
+
347411 struct tss_struct {
348412 /*
349413 * The fixed hardware portion. This must not cross a page boundary
....@@ -352,26 +416,17 @@
352416 */
353417 struct x86_hw_tss x86_tss;
354418
355
- /*
356
- * The extra 1 is there because the CPU will access an
357
- * additional byte beyond the end of the IO permission
358
- * bitmap. The extra byte must be all 1 bits, and must
359
- * be within the limit.
360
- */
361
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
419
+ struct x86_io_bitmap io_bitmap;
362420 } __aligned(PAGE_SIZE);
363421
364422 DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
365423
366
-/*
367
- * sizeof(unsigned long) coming from an extra "long" at the end
368
- * of the iobitmap.
369
- *
370
- * -1? seg base+limit should be pointing to the address of the
371
- * last valid byte
372
- */
373
-#define __KERNEL_TSS_LIMIT \
374
- (IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)
424
+/* Per CPU interrupt stacks */
425
+struct irq_stack {
426
+ char stack[IRQ_STACK_SIZE];
427
+} __aligned(IRQ_STACK_SIZE);
428
+
429
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
375430
376431 #ifdef CONFIG_X86_32
377432 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
....@@ -380,45 +435,30 @@
380435 #define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
381436 #endif
382437
383
-/*
384
- * Save the original ist values for checking stack pointers during debugging
385
- */
386
-struct orig_ist {
387
- unsigned long ist[7];
388
-};
389
-
390438 #ifdef CONFIG_X86_64
391
-DECLARE_PER_CPU(struct orig_ist, orig_ist);
392
-
393
-union irq_stack_union {
394
- char irq_stack[IRQ_STACK_SIZE];
439
+struct fixed_percpu_data {
395440 /*
396441 * GCC hardcodes the stack canary as %gs:40. Since the
397442 * irq_stack is the object at %gs:0, we reserve the bottom
398443 * 48 bytes of the irq stack for the canary.
399444 */
400
- struct {
401
- char gs_base[40];
402
- unsigned long stack_canary;
403
- };
445
+ char gs_base[40];
446
+ unsigned long stack_canary;
404447 };
405448
406
-DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
407
-DECLARE_INIT_PER_CPU(irq_stack_union);
449
+DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
450
+DECLARE_INIT_PER_CPU(fixed_percpu_data);
408451
409452 static inline unsigned long cpu_kernelmode_gs_base(int cpu)
410453 {
411
- return (unsigned long)per_cpu(irq_stack_union.gs_base, cpu);
454
+ return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
412455 }
413456
414
-DECLARE_PER_CPU(char *, irq_stack_ptr);
415457 DECLARE_PER_CPU(unsigned int, irq_count);
416458 extern asmlinkage void ignore_sysret(void);
417459
418
-#if IS_ENABLED(CONFIG_KVM)
419460 /* Save actual FS/GS selectors and bases to current->thread */
420
-void save_fsgs_for_kvm(void);
421
-#endif
461
+void current_save_fsgs(void);
422462 #else /* X86_64 */
423463 #ifdef CONFIG_STACKPROTECTOR
424464 /*
....@@ -433,25 +473,14 @@
433473 };
434474 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
435475 #endif
436
-/*
437
- * per-CPU IRQ handling stacks
438
- */
439
-struct irq_stack {
440
- u32 stack[THREAD_SIZE/sizeof(u32)];
441
-} __aligned(THREAD_SIZE);
442
-
443
-DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
444
-DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
476
+/* Per CPU softirq stack pointer */
477
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
445478 #endif /* X86_64 */
446479
447480 extern unsigned int fpu_kernel_xstate_size;
448481 extern unsigned int fpu_user_xstate_size;
449482
450483 struct perf_event;
451
-
452
-typedef struct {
453
- unsigned long seg;
454
-} mm_segment_t;
455484
456485 struct thread_struct {
457486 /* Cached TLS descriptors: */
....@@ -484,7 +513,7 @@
484513 /* Save middle states of ptrace breakpoints */
485514 struct perf_event *ptrace_bps[HBP_NUM];
486515 /* Debug status used for traps, single steps, etc... */
487
- unsigned long debugreg6;
516
+ unsigned long virtual_dr6;
488517 /* Keep track of the exact dr7 value set by the user */
489518 unsigned long ptrace_dr7;
490519 /* Fault info: */
....@@ -496,15 +525,17 @@
496525 struct vm86 *vm86;
497526 #endif
498527 /* IO permissions: */
499
- unsigned long *io_bitmap_ptr;
500
- unsigned long iopl;
501
- /* Max allowed port in the bitmap, in bytes: */
502
- unsigned io_bitmap_max;
528
+ struct io_bitmap *io_bitmap;
503529
504
- mm_segment_t addr_limit;
530
+ /*
531
+ * IOPL. Priviledge level dependent I/O permission which is
532
+ * emulated via the I/O bitmap to prevent user space from disabling
533
+ * interrupts.
534
+ */
535
+ unsigned long iopl_emul;
505536
537
+ unsigned int iopl_warn:1;
506538 unsigned int sig_on_uaccess_err:1;
507
- unsigned int uaccess_err:1; /* uaccess failed */
508539
509540 /* Floating point and extended processor state */
510541 struct fpu fpu;
....@@ -522,32 +553,13 @@
522553 *size = fpu_kernel_xstate_size;
523554 }
524555
525
-/*
526
- * Set IOPL bits in EFLAGS from given mask
527
- */
528
-static inline void native_set_iopl_mask(unsigned mask)
529
-{
530
-#ifdef CONFIG_X86_32
531
- unsigned int reg;
532
-
533
- asm volatile ("pushfl;"
534
- "popl %0;"
535
- "andl %1, %0;"
536
- "orl %2, %0;"
537
- "pushl %0;"
538
- "popfl"
539
- : "=&r" (reg)
540
- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
541
-#endif
542
-}
543
-
544556 static inline void
545557 native_load_sp0(unsigned long sp0)
546558 {
547559 this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
548560 }
549561
550
-static inline void native_swapgs(void)
562
+static __always_inline void native_swapgs(void)
551563 {
552564 #ifdef CONFIG_X86_64
553565 asm volatile("swapgs" ::: "memory");
....@@ -570,7 +582,7 @@
570582 current_stack_pointer) < THREAD_SIZE;
571583 }
572584
573
-#ifdef CONFIG_PARAVIRT
585
+#ifdef CONFIG_PARAVIRT_XXL
574586 #include <asm/paravirt.h>
575587 #else
576588 #define __cpuid native_cpuid
....@@ -580,8 +592,7 @@
580592 native_load_sp0(sp0);
581593 }
582594
583
-#define set_iopl_mask native_set_iopl_mask
584
-#endif /* CONFIG_PARAVIRT */
595
+#endif /* CONFIG_PARAVIRT_XXL */
585596
586597 /* Free all resources held by a thread. */
587598 extern void release_thread(struct task_struct *);
....@@ -651,72 +662,6 @@
651662 return edx;
652663 }
653664
654
-/*
655
- * This function forces the icache and prefetched instruction stream to
656
- * catch up with reality in two very specific cases:
657
- *
658
- * a) Text was modified using one virtual address and is about to be executed
659
- * from the same physical page at a different virtual address.
660
- *
661
- * b) Text was modified on a different CPU, may subsequently be
662
- * executed on this CPU, and you want to make sure the new version
663
- * gets executed. This generally means you're calling this in a IPI.
664
- *
665
- * If you're calling this for a different reason, you're probably doing
666
- * it wrong.
667
- */
668
-static inline void sync_core(void)
669
-{
670
- /*
671
- * There are quite a few ways to do this. IRET-to-self is nice
672
- * because it works on every CPU, at any CPL (so it's compatible
673
- * with paravirtualization), and it never exits to a hypervisor.
674
- * The only down sides are that it's a bit slow (it seems to be
675
- * a bit more than 2x slower than the fastest options) and that
676
- * it unmasks NMIs. The "push %cs" is needed because, in
677
- * paravirtual environments, __KERNEL_CS may not be a valid CS
678
- * value when we do IRET directly.
679
- *
680
- * In case NMI unmasking or performance ever becomes a problem,
681
- * the next best option appears to be MOV-to-CR2 and an
682
- * unconditional jump. That sequence also works on all CPUs,
683
- * but it will fault at CPL3 (i.e. Xen PV).
684
- *
685
- * CPUID is the conventional way, but it's nasty: it doesn't
686
- * exist on some 486-like CPUs, and it usually exits to a
687
- * hypervisor.
688
- *
689
- * Like all of Linux's memory ordering operations, this is a
690
- * compiler barrier as well.
691
- */
692
-#ifdef CONFIG_X86_32
693
- asm volatile (
694
- "pushfl\n\t"
695
- "pushl %%cs\n\t"
696
- "pushl $1f\n\t"
697
- "iret\n\t"
698
- "1:"
699
- : ASM_CALL_CONSTRAINT : : "memory");
700
-#else
701
- unsigned int tmp;
702
-
703
- asm volatile (
704
- UNWIND_HINT_SAVE
705
- "mov %%ss, %0\n\t"
706
- "pushq %q0\n\t"
707
- "pushq %%rsp\n\t"
708
- "addq $8, (%%rsp)\n\t"
709
- "pushfq\n\t"
710
- "mov %%cs, %0\n\t"
711
- "pushq %q0\n\t"
712
- "pushq $1f\n\t"
713
- "iretq\n\t"
714
- UNWIND_HINT_RESTORE
715
- "1:"
716
- : "=&r" (tmp), ASM_CALL_CONSTRAINT : : "cc", "memory");
717
-#endif
718
-}
719
-
720665 extern void select_idle_routine(const struct cpuinfo_x86 *c);
721666 extern void amd_e400_c1e_apic_setup(void);
722667
....@@ -728,7 +673,6 @@
728673 extern void enable_sep_cpu(void);
729674 extern int sysenter_setup(void);
730675
731
-void early_trap_pf_init(void);
732676
733677 /* Defined in head.S */
734678 extern struct desc_ptr early_gdt_descr;
....@@ -738,6 +682,9 @@
738682 extern void load_fixmap_gdt(int);
739683 extern void load_percpu_segment(int);
740684 extern void cpu_init(void);
685
+extern void cpu_init_secondary(void);
686
+extern void cpu_init_exception_handling(void);
687
+extern void cr4_init(void);
741688
742689 static inline unsigned long get_debugctlmsr(void)
743690 {
....@@ -798,7 +745,7 @@
798745 * Useful for spinlocks to avoid one state transition in the
799746 * cache coherency protocol:
800747 */
801
-static inline void prefetchw(const void *x)
748
+static __always_inline void prefetchw(const void *x)
802749 {
803750 alternative_input(BASE_PREFETCH, "prefetchw %P1",
804751 X86_FEATURE_3DNOWPREFETCH,
....@@ -823,68 +770,15 @@
823770 })
824771
825772 #ifdef CONFIG_X86_32
826
-/*
827
- * User space process size: 3GB (default).
828
- */
829
-#define IA32_PAGE_OFFSET PAGE_OFFSET
830
-#define TASK_SIZE PAGE_OFFSET
831
-#define TASK_SIZE_LOW TASK_SIZE
832
-#define TASK_SIZE_MAX TASK_SIZE
833
-#define DEFAULT_MAP_WINDOW TASK_SIZE
834
-#define STACK_TOP TASK_SIZE
835
-#define STACK_TOP_MAX STACK_TOP
836
-
837773 #define INIT_THREAD { \
838774 .sp0 = TOP_OF_INIT_STACK, \
839775 .sysenter_cs = __KERNEL_CS, \
840
- .io_bitmap_ptr = NULL, \
841
- .addr_limit = KERNEL_DS, \
842776 }
843777
844778 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
845779
846780 #else
847
-/*
848
- * User space process size. This is the first address outside the user range.
849
- * There are a few constraints that determine this:
850
- *
851
- * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
852
- * address, then that syscall will enter the kernel with a
853
- * non-canonical return address, and SYSRET will explode dangerously.
854
- * We avoid this particular problem by preventing anything executable
855
- * from being mapped at the maximum canonical address.
856
- *
857
- * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
858
- * CPUs malfunction if they execute code from the highest canonical page.
859
- * They'll speculate right off the end of the canonical space, and
860
- * bad things happen. This is worked around in the same way as the
861
- * Intel problem.
862
- *
863
- * With page table isolation enabled, we map the LDT in ... [stay tuned]
864
- */
865
-#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
866
-
867
-#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
868
-
869
-/* This decides where the kernel will search for a free chunk of vm
870
- * space during mmap's.
871
- */
872
-#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
873
- 0xc0000000 : 0xFFFFe000)
874
-
875
-#define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \
876
- IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW)
877
-#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
878
- IA32_PAGE_OFFSET : TASK_SIZE_MAX)
879
-#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
880
- IA32_PAGE_OFFSET : TASK_SIZE_MAX)
881
-
882
-#define STACK_TOP TASK_SIZE_LOW
883
-#define STACK_TOP_MAX TASK_SIZE_MAX
884
-
885
-#define INIT_THREAD { \
886
- .addr_limit = KERNEL_DS, \
887
-}
781
+#define INIT_THREAD { }
888782
889783 extern unsigned long KSTK_ESP(struct task_struct *task);
890784
....@@ -911,30 +805,16 @@
911805
912806 DECLARE_PER_CPU(u64, msr_misc_features_shadow);
913807
914
-/* Register/unregister a process' MPX related resource */
915
-#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
916
-#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
917
-
918
-#ifdef CONFIG_X86_INTEL_MPX
919
-extern int mpx_enable_management(void);
920
-extern int mpx_disable_management(void);
921
-#else
922
-static inline int mpx_enable_management(void)
923
-{
924
- return -EINVAL;
925
-}
926
-static inline int mpx_disable_management(void)
927
-{
928
- return -EINVAL;
929
-}
930
-#endif /* CONFIG_X86_INTEL_MPX */
931
-
932808 #ifdef CONFIG_CPU_SUP_AMD
933809 extern u16 amd_get_nb_id(int cpu);
934810 extern u32 amd_get_nodes_per_socket(void);
811
+extern bool cpu_has_ibpb_brtype_microcode(void);
812
+extern void amd_clear_divider(void);
935813 #else
936814 static inline u16 amd_get_nb_id(int cpu) { return 0; }
937815 static inline u32 amd_get_nodes_per_socket(void) { return 0; }
816
+static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
817
+static inline void amd_clear_divider(void) { }
938818 #endif
939819
940820 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
....@@ -953,8 +833,8 @@
953833 }
954834
955835 extern unsigned long arch_align_stack(unsigned long sp);
956
-extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
957
-extern void free_kernel_image_pages(void *begin, void *end);
836
+void free_init_pages(const char *what, unsigned long begin, unsigned long end);
837
+extern void free_kernel_image_pages(const char *what, void *begin, void *end);
958838
959839 void default_idle(void);
960840 #ifdef CONFIG_XEN
....@@ -963,9 +843,9 @@
963843 #define xen_set_default_idle 0
964844 #endif
965845
966
-void stop_this_cpu(void *dummy);
967
-void df_debug(struct pt_regs *regs, long error_code);
968
-void microcode_check(void);
846
+void __noreturn stop_this_cpu(void *dummy);
847
+void microcode_check(struct cpuinfo_x86 *prev_info);
848
+void store_cpu_caps(struct cpuinfo_x86 *info);
969849
970850 enum l1tf_mitigations {
971851 L1TF_MITIGATION_OFF,
....@@ -984,11 +864,6 @@
984864 MDS_MITIGATION_VMWERV,
985865 };
986866
987
-enum taa_mitigations {
988
- TAA_MITIGATION_OFF,
989
- TAA_MITIGATION_UCODE_NEEDED,
990
- TAA_MITIGATION_VERW,
991
- TAA_MITIGATION_TSX_DISABLED,
992
-};
867
+extern bool gds_ucode_mitigated(void);
993868
994869 #endif /* _ASM_X86_PROCESSOR_H */