| .. | .. | 
|---|
| 1 |  | -// SPDX-License-Identifier: GPL-2.0  | 
|---|
 | 1 | +/* SPDX-License-Identifier: GPL-2.0 */  | 
|---|
| 2 | 2 |   | 
|---|
| 3 | 3 |  #ifndef _ASM_X86_CPU_ENTRY_AREA_H | 
|---|
| 4 | 4 |  #define _ASM_X86_CPU_ENTRY_AREA_H | 
|---|
| .. | .. | 
|---|
| 6 | 6 |  #include <linux/percpu-defs.h> | 
|---|
| 7 | 7 |  #include <asm/processor.h> | 
|---|
| 8 | 8 |  #include <asm/intel_ds.h> | 
|---|
 | 9 | +#include <asm/pgtable_areas.h>  | 
|---|
 | 10 | +  | 
|---|
 | 11 | +#ifdef CONFIG_X86_64  | 
|---|
 | 12 | +  | 
|---|
 | 13 | +#ifdef CONFIG_AMD_MEM_ENCRYPT  | 
|---|
 | 14 | +#define VC_EXCEPTION_STKSZ	EXCEPTION_STKSZ  | 
|---|
 | 15 | +#else  | 
|---|
 | 16 | +#define VC_EXCEPTION_STKSZ	0  | 
|---|
 | 17 | +#endif  | 
|---|
 | 18 | +  | 
|---|
 | 19 | +/* Macro to enforce the same ordering and stack sizes */  | 
|---|
 | 20 | +#define ESTACKS_MEMBERS(guardsize, optional_stack_size)		\  | 
|---|
 | 21 | +	char	DF_stack_guard[guardsize];			\  | 
|---|
 | 22 | +	char	DF_stack[EXCEPTION_STKSZ];			\  | 
|---|
 | 23 | +	char	NMI_stack_guard[guardsize];			\  | 
|---|
 | 24 | +	char	NMI_stack[EXCEPTION_STKSZ];			\  | 
|---|
 | 25 | +	char	DB_stack_guard[guardsize];			\  | 
|---|
 | 26 | +	char	DB_stack[EXCEPTION_STKSZ];			\  | 
|---|
 | 27 | +	char	MCE_stack_guard[guardsize];			\  | 
|---|
 | 28 | +	char	MCE_stack[EXCEPTION_STKSZ];			\  | 
|---|
 | 29 | +	char	VC_stack_guard[guardsize];			\  | 
|---|
 | 30 | +	char	VC_stack[optional_stack_size];			\  | 
|---|
 | 31 | +	char	VC2_stack_guard[guardsize];			\  | 
|---|
 | 32 | +	char	VC2_stack[optional_stack_size];			\  | 
|---|
 | 33 | +	char	IST_top_guard[guardsize];			\  | 
|---|
 | 34 | +  | 
|---|
 | 35 | +/* The exception stacks' physical storage. No guard pages required */  | 
|---|
 | 36 | +struct exception_stacks {  | 
|---|
 | 37 | +	ESTACKS_MEMBERS(0, VC_EXCEPTION_STKSZ)  | 
|---|
 | 38 | +};  | 
|---|
 | 39 | +  | 
|---|
 | 40 | +/* The effective cpu entry area mapping with guard pages. */  | 
|---|
 | 41 | +struct cea_exception_stacks {  | 
|---|
 | 42 | +	ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)  | 
|---|
 | 43 | +};  | 
|---|
 | 44 | +  | 
|---|
 | 45 | +/*  | 
|---|
 | 46 | + * The exception stack ordering in [cea_]exception_stacks  | 
|---|
 | 47 | + */  | 
|---|
 | 48 | +enum exception_stack_ordering {  | 
|---|
 | 49 | +	ESTACK_DF,  | 
|---|
 | 50 | +	ESTACK_NMI,  | 
|---|
 | 51 | +	ESTACK_DB,  | 
|---|
 | 52 | +	ESTACK_MCE,  | 
|---|
 | 53 | +	ESTACK_VC,  | 
|---|
 | 54 | +	ESTACK_VC2,  | 
|---|
 | 55 | +	N_EXCEPTION_STACKS  | 
|---|
 | 56 | +};  | 
|---|
 | 57 | +  | 
|---|
 | 58 | +#define CEA_ESTACK_SIZE(st)					\  | 
|---|
 | 59 | +	sizeof(((struct cea_exception_stacks *)0)->st## _stack)  | 
|---|
 | 60 | +  | 
|---|
 | 61 | +#define CEA_ESTACK_BOT(ceastp, st)				\  | 
|---|
 | 62 | +	((unsigned long)&(ceastp)->st## _stack)  | 
|---|
 | 63 | +  | 
|---|
 | 64 | +#define CEA_ESTACK_TOP(ceastp, st)				\  | 
|---|
 | 65 | +	(CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))  | 
|---|
 | 66 | +  | 
|---|
 | 67 | +#define CEA_ESTACK_OFFS(st)					\  | 
|---|
 | 68 | +	offsetof(struct cea_exception_stacks, st## _stack)  | 
|---|
 | 69 | +  | 
|---|
 | 70 | +#define CEA_ESTACK_PAGES					\  | 
|---|
 | 71 | +	(sizeof(struct cea_exception_stacks) / PAGE_SIZE)  | 
|---|
 | 72 | +  | 
|---|
 | 73 | +#endif  | 
|---|
 | 74 | +  | 
|---|
 | 75 | +#ifdef CONFIG_X86_32  | 
|---|
 | 76 | +struct doublefault_stack {  | 
|---|
 | 77 | +	unsigned long stack[(PAGE_SIZE - sizeof(struct x86_hw_tss)) / sizeof(unsigned long)];  | 
|---|
 | 78 | +	struct x86_hw_tss tss;  | 
|---|
 | 79 | +} __aligned(PAGE_SIZE);  | 
|---|
 | 80 | +#endif  | 
|---|
| 9 | 81 |   | 
|---|
| 10 | 82 |  /* | 
|---|
| 11 | 83 |   * cpu_entry_area is a percpu region that contains things needed by the CPU | 
|---|
| .. | .. | 
|---|
| 20 | 92 |   | 
|---|
| 21 | 93 |  	/* | 
|---|
| 22 | 94 |  	 * The GDT is just below entry_stack and thus serves (on x86_64) as | 
|---|
| 23 |  | -	 * a a read-only guard page.  | 
|---|
 | 95 | +	 * a read-only guard page. On 32-bit the GDT must be writeable, so  | 
|---|
 | 96 | +	 * it needs an extra guard page.  | 
|---|
| 24 | 97 |  	 */ | 
|---|
 | 98 | +#ifdef CONFIG_X86_32  | 
|---|
 | 99 | +	char guard_entry_stack[PAGE_SIZE];  | 
|---|
 | 100 | +#endif  | 
|---|
| 25 | 101 |  	struct entry_stack_page entry_stack_page; | 
|---|
 | 102 | +  | 
|---|
 | 103 | +#ifdef CONFIG_X86_32  | 
|---|
 | 104 | +	char guard_doublefault_stack[PAGE_SIZE];  | 
|---|
 | 105 | +	struct doublefault_stack doublefault_stack;  | 
|---|
 | 106 | +#endif  | 
|---|
| 26 | 107 |   | 
|---|
| 27 | 108 |  	/* | 
|---|
| 28 | 109 |  	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because | 
|---|
| .. | .. | 
|---|
| 30 | 111 |  	 */ | 
|---|
| 31 | 112 |  	struct tss_struct tss; | 
|---|
| 32 | 113 |   | 
|---|
| 33 |  | -	char entry_trampoline[PAGE_SIZE];  | 
|---|
| 34 |  | -  | 
|---|
| 35 | 114 |  #ifdef CONFIG_X86_64 | 
|---|
| 36 | 115 |  	/* | 
|---|
| 37 |  | -	 * Exception stacks used for IST entries.  | 
|---|
| 38 |  | -	 *  | 
|---|
| 39 |  | -	 * In the future, this should have a separate slot for each stack  | 
|---|
| 40 |  | -	 * with guard pages between them.  | 
|---|
 | 116 | +	 * Exception stacks used for IST entries with guard pages.  | 
|---|
| 41 | 117 |  	 */ | 
|---|
| 42 |  | -	char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];  | 
|---|
 | 118 | +	struct cea_exception_stacks estacks;  | 
|---|
| 43 | 119 |  #endif | 
|---|
| 44 |  | -#ifdef CONFIG_CPU_SUP_INTEL  | 
|---|
| 45 | 120 |  	/* | 
|---|
| 46 | 121 |  	 * Per CPU debug store for Intel performance monitoring. Wastes a | 
|---|
| 47 | 122 |  	 * full page at the moment. | 
|---|
| .. | .. | 
|---|
| 52 | 127 |  	 * Reserve enough fixmap PTEs. | 
|---|
| 53 | 128 |  	 */ | 
|---|
| 54 | 129 |  	struct debug_store_buffers cpu_debug_buffers; | 
|---|
| 55 |  | -#endif  | 
|---|
| 56 | 130 |  }; | 
|---|
| 57 | 131 |   | 
|---|
| 58 |  | -#define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))  | 
|---|
| 59 |  | -#define CPU_ENTRY_AREA_TOT_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)  | 
|---|
 | 132 | +#define CPU_ENTRY_AREA_SIZE		(sizeof(struct cpu_entry_area))  | 
|---|
 | 133 | +#define CPU_ENTRY_AREA_ARRAY_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)  | 
|---|
 | 134 | +  | 
|---|
 | 135 | +/* Total size includes the readonly IDT mapping page as well: */  | 
|---|
 | 136 | +#define CPU_ENTRY_AREA_TOTAL_SIZE	(CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)  | 
|---|
| 60 | 137 |   | 
|---|
| 61 | 138 |  DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); | 
|---|
 | 139 | +DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);  | 
|---|
| 62 | 140 |   | 
|---|
| 63 | 141 |  extern void setup_cpu_entry_areas(void); | 
|---|
| 64 | 142 |  extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); | 
|---|
| 65 |  | -  | 
|---|
| 66 |  | -#define	CPU_ENTRY_AREA_RO_IDT		CPU_ENTRY_AREA_BASE  | 
|---|
| 67 |  | -#define CPU_ENTRY_AREA_PER_CPU		(CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)  | 
|---|
| 68 |  | -  | 
|---|
| 69 |  | -#define CPU_ENTRY_AREA_RO_IDT_VADDR	((void *)CPU_ENTRY_AREA_RO_IDT)  | 
|---|
| 70 |  | -  | 
|---|
| 71 |  | -#define CPU_ENTRY_AREA_MAP_SIZE			\  | 
|---|
| 72 |  | -	(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)  | 
|---|
| 73 | 143 |   | 
|---|
| 74 | 144 |  extern struct cpu_entry_area *get_cpu_entry_area(int cpu); | 
|---|
| 75 | 145 |   | 
|---|
| .. | .. | 
|---|
| 78 | 148 |  	return &get_cpu_entry_area(cpu)->entry_stack_page.stack; | 
|---|
| 79 | 149 |  } | 
|---|
| 80 | 150 |   | 
|---|
 | 151 | +#define __this_cpu_ist_top_va(name)					\  | 
|---|
 | 152 | +	CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)  | 
|---|
 | 153 | +  | 
|---|
 | 154 | +#define __this_cpu_ist_bottom_va(name)					\  | 
|---|
 | 155 | +	CEA_ESTACK_BOT(__this_cpu_read(cea_exception_stacks), name)  | 
|---|
 | 156 | +  | 
|---|
| 81 | 157 |  #endif | 
|---|