| .. | .. |
|---|
| 8 | 8 | |
|---|
| 9 | 9 | DECLARE_PER_CPU(unsigned long, cpu_dr7); |
|---|
| 10 | 10 | |
|---|
| 11 | | -#ifndef CONFIG_PARAVIRT |
|---|
| 11 | +#ifndef CONFIG_PARAVIRT_XXL |
|---|
| 12 | 12 | /* |
|---|
| 13 | 13 | * These special macros can be used to get or set a debugging register |
|---|
| 14 | 14 | */ |
|---|
| .. | .. |
|---|
| 18 | 18 | native_set_debugreg(register, value) |
|---|
| 19 | 19 | #endif |
|---|
| 20 | 20 | |
|---|
| 21 | | -static inline unsigned long native_get_debugreg(int regno) |
|---|
| 21 | +static __always_inline unsigned long native_get_debugreg(int regno) |
|---|
| 22 | 22 | { |
|---|
| 23 | 23 | unsigned long val = 0; /* Damn you, gcc! */ |
|---|
| 24 | 24 | |
|---|
| .. | .. |
|---|
| 39 | 39 | asm("mov %%db6, %0" :"=r" (val)); |
|---|
| 40 | 40 | break; |
|---|
| 41 | 41 | case 7: |
|---|
| 42 | | - asm("mov %%db7, %0" :"=r" (val)); |
|---|
| 42 | + /* |
|---|
| 43 | + * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them |
|---|
| 44 | + * with other code. |
|---|
| 45 | + * |
|---|
| 46 | + * This is needed because a DR7 access can cause a #VC exception |
|---|
| 47 | + * when running under SEV-ES. Taking a #VC exception is not a |
|---|
| 48 | + * safe thing to do just anywhere in the entry code and |
|---|
| 49 | + * re-ordering might place the access into an unsafe location. |
|---|
| 50 | + * |
|---|
| 51 | + * This happened in the NMI handler, where the DR7 read was |
|---|
| 52 | + * re-ordered to happen before the call to sev_es_ist_enter(), |
|---|
| 53 | + * causing stack recursion. |
|---|
| 54 | + */ |
|---|
| 55 | + asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER); |
|---|
| 43 | 56 | break; |
|---|
| 44 | 57 | default: |
|---|
| 45 | 58 | BUG(); |
|---|
| .. | .. |
|---|
| 47 | 60 | return val; |
|---|
| 48 | 61 | } |
|---|
| 49 | 62 | |
|---|
| 50 | | -static inline void native_set_debugreg(int regno, unsigned long value) |
|---|
| 63 | +static __always_inline void native_set_debugreg(int regno, unsigned long value) |
|---|
| 51 | 64 | { |
|---|
| 52 | 65 | switch (regno) { |
|---|
| 53 | 66 | case 0: |
|---|
| .. | .. |
|---|
| 66 | 79 | asm("mov %0, %%db6" ::"r" (value)); |
|---|
| 67 | 80 | break; |
|---|
| 68 | 81 | case 7: |
|---|
| 69 | | - asm("mov %0, %%db7" ::"r" (value)); |
|---|
| 82 | + /* |
|---|
| 83 | + * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them |
|---|
| 84 | + * with other code. |
|---|
| 85 | + * |
|---|
| 86 | + * While is didn't happen with a DR7 write (see the DR7 read |
|---|
| 87 | + * comment above which explains where it happened), add the |
|---|
| 88 | + * __FORCE_ORDER here too to avoid similar problems in the |
|---|
| 89 | + * future. |
|---|
| 90 | + */ |
|---|
| 91 | + asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER); |
|---|
| 70 | 92 | break; |
|---|
| 71 | 93 | default: |
|---|
| 72 | 94 | BUG(); |
|---|
| .. | .. |
|---|
| 85 | 107 | set_debugreg(0UL, 3); |
|---|
| 86 | 108 | } |
|---|
| 87 | 109 | |
|---|
| 88 | | -static inline int hw_breakpoint_active(void) |
|---|
| 110 | +static __always_inline bool hw_breakpoint_active(void) |
|---|
| 89 | 111 | { |
|---|
| 90 | 112 | return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; |
|---|
| 91 | 113 | } |
|---|
| 92 | 114 | |
|---|
| 93 | | -extern void aout_dump_debugregs(struct user *dump); |
|---|
| 94 | | - |
|---|
| 95 | 115 | extern void hw_breakpoint_restore(void); |
|---|
| 96 | 116 | |
|---|
| 97 | | -#ifdef CONFIG_X86_64 |
|---|
| 98 | | -DECLARE_PER_CPU(int, debug_stack_usage); |
|---|
| 99 | | -static inline void debug_stack_usage_inc(void) |
|---|
| 117 | +static __always_inline unsigned long local_db_save(void) |
|---|
| 100 | 118 | { |
|---|
| 101 | | - __this_cpu_inc(debug_stack_usage); |
|---|
| 119 | + unsigned long dr7; |
|---|
| 120 | + |
|---|
| 121 | + if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active()) |
|---|
| 122 | + return 0; |
|---|
| 123 | + |
|---|
| 124 | + get_debugreg(dr7, 7); |
|---|
| 125 | + dr7 &= ~0x400; /* architecturally set bit */ |
|---|
| 126 | + if (dr7) |
|---|
| 127 | + set_debugreg(0, 7); |
|---|
| 128 | + /* |
|---|
| 129 | + * Ensure the compiler doesn't lower the above statements into |
|---|
| 130 | + * the critical section; disabling breakpoints late would not |
|---|
| 131 | + * be good. |
|---|
| 132 | + */ |
|---|
| 133 | + barrier(); |
|---|
| 134 | + |
|---|
| 135 | + return dr7; |
|---|
| 102 | 136 | } |
|---|
| 103 | | -static inline void debug_stack_usage_dec(void) |
|---|
| 137 | + |
|---|
| 138 | +static __always_inline void local_db_restore(unsigned long dr7) |
|---|
| 104 | 139 | { |
|---|
| 105 | | - __this_cpu_dec(debug_stack_usage); |
|---|
| 140 | + /* |
|---|
| 141 | + * Ensure the compiler doesn't raise this statement into |
|---|
| 142 | + * the critical section; enabling breakpoints early would |
|---|
| 143 | + * not be good. |
|---|
| 144 | + */ |
|---|
| 145 | + barrier(); |
|---|
| 146 | + if (dr7) |
|---|
| 147 | + set_debugreg(dr7, 7); |
|---|
| 106 | 148 | } |
|---|
| 107 | | -int is_debug_stack(unsigned long addr); |
|---|
| 108 | | -void debug_stack_set_zero(void); |
|---|
| 109 | | -void debug_stack_reset(void); |
|---|
| 110 | | -#else /* !X86_64 */ |
|---|
| 111 | | -static inline int is_debug_stack(unsigned long addr) { return 0; } |
|---|
| 112 | | -static inline void debug_stack_set_zero(void) { } |
|---|
| 113 | | -static inline void debug_stack_reset(void) { } |
|---|
| 114 | | -static inline void debug_stack_usage_inc(void) { } |
|---|
| 115 | | -static inline void debug_stack_usage_dec(void) { } |
|---|
| 116 | | -#endif /* X86_64 */ |
|---|
| 117 | 149 | |
|---|
| 118 | 150 | #ifdef CONFIG_CPU_SUP_AMD |
|---|
| 119 | 151 | extern void set_dr_addr_mask(unsigned long mask, int dr); |
|---|