.. | .. |
---|
6 | 6 | #include <asm/percpu.h> |
---|
7 | 7 | #include <asm/asm-offsets.h> |
---|
8 | 8 | #include <asm/processor-flags.h> |
---|
| 9 | +#include <asm/msr.h> |
---|
| 10 | +#include <asm/nospec-branch.h> |
---|
9 | 11 | |
---|
10 | 12 | /* |
---|
11 | 13 | |
---|
.. | .. |
---|
146 | 148 | |
---|
147 | 149 | .endm |
---|
148 | 150 | |
---|
149 | | -.macro POP_REGS pop_rdi=1 skip_r11rcx=0 |
---|
| 151 | +.macro POP_REGS pop_rdi=1 |
---|
150 | 152 | popq %r15 |
---|
151 | 153 | popq %r14 |
---|
152 | 154 | popq %r13 |
---|
153 | 155 | popq %r12 |
---|
154 | 156 | popq %rbp |
---|
155 | 157 | popq %rbx |
---|
156 | | - .if \skip_r11rcx |
---|
157 | | - popq %rsi |
---|
158 | | - .else |
---|
159 | 158 | popq %r11 |
---|
160 | | - .endif |
---|
161 | 159 | popq %r10 |
---|
162 | 160 | popq %r9 |
---|
163 | 161 | popq %r8 |
---|
164 | 162 | popq %rax |
---|
165 | | - .if \skip_r11rcx |
---|
166 | | - popq %rsi |
---|
167 | | - .else |
---|
168 | 163 | popq %rcx |
---|
169 | | - .endif |
---|
170 | 164 | popq %rdx |
---|
171 | 165 | popq %rsi |
---|
172 | 166 | .if \pop_rdi |
---|
.. | .. |
---|
317 | 311 | #endif |
---|
318 | 312 | |
---|
319 | 313 | /* |
---|
| 314 | + * IBRS kernel mitigation for Spectre_v2. |
---|
| 315 | + * |
---|
| 316 | + * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers |
---|
| 317 | + * the regs it uses (AX, CX, DX). Must be called before the first RET |
---|
| 318 | + * instruction (NOTE! UNTRAIN_RET includes a RET instruction) |
---|
| 319 | + * |
---|
| 320 | + * The optional argument is used to save/restore the current value, |
---|
| 321 | + * which is used on the paranoid paths. |
---|
| 322 | + * |
---|
| 323 | + * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. |
---|
| 324 | + */ |
---|
| 325 | +.macro IBRS_ENTER save_reg |
---|
| 326 | +#ifdef CONFIG_CPU_IBRS_ENTRY |
---|
| 327 | + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS |
---|
| 328 | + movl $MSR_IA32_SPEC_CTRL, %ecx |
---|
| 329 | + |
---|
| 330 | +.ifnb \save_reg |
---|
| 331 | + rdmsr |
---|
| 332 | + shl $32, %rdx |
---|
| 333 | + or %rdx, %rax |
---|
| 334 | + mov %rax, \save_reg |
---|
| 335 | + test $SPEC_CTRL_IBRS, %eax |
---|
| 336 | + jz .Ldo_wrmsr_\@ |
---|
| 337 | + lfence |
---|
| 338 | + jmp .Lend_\@ |
---|
| 339 | +.Ldo_wrmsr_\@: |
---|
| 340 | +.endif |
---|
| 341 | + |
---|
| 342 | + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx |
---|
| 343 | + movl %edx, %eax |
---|
| 344 | + shr $32, %rdx |
---|
| 345 | + wrmsr |
---|
| 346 | +.Lend_\@: |
---|
| 347 | +#endif |
---|
| 348 | +.endm |
---|
| 349 | + |
---|
| 350 | +/* |
---|
| 351 | + * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) |
---|
| 352 | + * regs. Must be called after the last RET. |
---|
| 353 | + */ |
---|
| 354 | +.macro IBRS_EXIT save_reg |
---|
| 355 | +#ifdef CONFIG_CPU_IBRS_ENTRY |
---|
| 356 | + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS |
---|
| 357 | + movl $MSR_IA32_SPEC_CTRL, %ecx |
---|
| 358 | + |
---|
| 359 | +.ifnb \save_reg |
---|
| 360 | + mov \save_reg, %rdx |
---|
| 361 | +.else |
---|
| 362 | + movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx |
---|
| 363 | + andl $(~SPEC_CTRL_IBRS), %edx |
---|
| 364 | +.endif |
---|
| 365 | + |
---|
| 366 | + movl %edx, %eax |
---|
| 367 | + shr $32, %rdx |
---|
| 368 | + wrmsr |
---|
| 369 | +.Lend_\@: |
---|
| 370 | +#endif |
---|
| 371 | +.endm |
---|
| 372 | + |
---|
| 373 | +/* |
---|
320 | 374 | * Mitigate Spectre v1 for conditional swapgs code paths. |
---|
321 | 375 | * |
---|
322 | 376 | * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to |
---|
.. | .. |
---|
333 | 387 | ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL |
---|
334 | 388 | .endm |
---|
335 | 389 | |
---|
336 | | -#endif /* CONFIG_X86_64 */ |
---|
337 | | - |
---|
338 | | -/* |
---|
339 | | - * This does 'call enter_from_user_mode' unless we can avoid it based on |
---|
340 | | - * kernel config or using the static jump infrastructure. |
---|
341 | | - */ |
---|
342 | | -.macro CALL_enter_from_user_mode |
---|
343 | | -#ifdef CONFIG_CONTEXT_TRACKING |
---|
344 | | -#ifdef CONFIG_JUMP_LABEL |
---|
345 | | - STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 |
---|
346 | | -#endif |
---|
347 | | - call enter_from_user_mode |
---|
348 | | -.Lafter_call_\@: |
---|
| 390 | +.macro STACKLEAK_ERASE_NOCLOBBER |
---|
| 391 | +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
---|
| 392 | + PUSH_AND_CLEAR_REGS |
---|
| 393 | + call stackleak_erase |
---|
| 394 | + POP_REGS |
---|
349 | 395 | #endif |
---|
350 | 396 | .endm |
---|
| 397 | + |
---|
| 398 | +.macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req |
---|
| 399 | + rdgsbase \save_reg |
---|
| 400 | + GET_PERCPU_BASE \scratch_reg |
---|
| 401 | + wrgsbase \scratch_reg |
---|
| 402 | +.endm |
---|
| 403 | + |
---|
| 404 | +#else /* CONFIG_X86_64 */ |
---|
| 405 | +# undef UNWIND_HINT_IRET_REGS |
---|
| 406 | +# define UNWIND_HINT_IRET_REGS |
---|
| 407 | +#endif /* !CONFIG_X86_64 */ |
---|
| 408 | + |
---|
| 409 | +.macro STACKLEAK_ERASE |
---|
| 410 | +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
---|
| 411 | + call stackleak_erase |
---|
| 412 | +#endif |
---|
| 413 | +.endm |
---|
| 414 | + |
---|
| 415 | +#ifdef CONFIG_SMP |
---|
| 416 | + |
---|
| 417 | +/* |
---|
| 418 | + * CPU/node NR is loaded from the limit (size) field of a special segment |
---|
| 419 | + * descriptor entry in GDT. |
---|
| 420 | + */ |
---|
| 421 | +.macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req |
---|
| 422 | + movq $__CPUNODE_SEG, \reg |
---|
| 423 | +#ifdef __clang__ |
---|
| 424 | + .long 0xc0030f48 |
---|
| 425 | +#else |
---|
| 426 | + lsl \reg, \reg |
---|
| 427 | +#endif |
---|
| 428 | +.endm |
---|
| 429 | + |
---|
| 430 | +/* |
---|
| 431 | + * Fetch the per-CPU GSBASE value for this processor and put it in @reg. |
---|
| 432 | + * We normally use %gs for accessing per-CPU data, but we are setting up |
---|
| 433 | + * %gs here and obviously can not use %gs itself to access per-CPU data. |
---|
| 434 | + * |
---|
| 435 | + * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and |
---|
| 436 | + * may not restore the host's value until the CPU returns to userspace. |
---|
| 437 | + * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives |
---|
| 438 | + * while running KVM's run loop. |
---|
| 439 | + */ |
---|
| 440 | +.macro GET_PERCPU_BASE reg:req |
---|
| 441 | + LOAD_CPU_AND_NODE_SEG_LIMIT \reg |
---|
| 442 | + andq $VDSO_CPUNODE_MASK, \reg |
---|
| 443 | + movq __per_cpu_offset(, \reg, 8), \reg |
---|
| 444 | +.endm |
---|
| 445 | + |
---|
| 446 | +#else |
---|
| 447 | + |
---|
| 448 | +.macro GET_PERCPU_BASE reg:req |
---|
| 449 | + movq pcpu_unit_offsets(%rip), \reg |
---|
| 450 | +.endm |
---|
| 451 | + |
---|
| 452 | +#endif /* CONFIG_SMP */ |
---|