| .. | .. |
|---|
| 40 | 40 | #include <asm/processor-flags.h> |
|---|
| 41 | 41 | #include <asm/irq_vectors.h> |
|---|
| 42 | 42 | #include <asm/cpufeatures.h> |
|---|
| 43 | | -#include <asm/alternative-asm.h> |
|---|
| 43 | +#include <asm/alternative.h> |
|---|
| 44 | 44 | #include <asm/asm.h> |
|---|
| 45 | 45 | #include <asm/smap.h> |
|---|
| 46 | 46 | #include <asm/frame.h> |
|---|
| 47 | +#include <asm/trapnr.h> |
|---|
| 47 | 48 | #include <asm/nospec-branch.h> |
|---|
| 48 | 49 | |
|---|
| 50 | +#include "calling.h" |
|---|
| 51 | + |
|---|
| 49 | 52 | .section .entry.text, "ax" |
|---|
| 50 | | - |
|---|
| 51 | | -/* |
|---|
| 52 | | - * We use macros for low-level operations which need to be overridden |
|---|
| 53 | | - * for paravirtualization. The following will never clobber any registers: |
|---|
| 54 | | - * INTERRUPT_RETURN (aka. "iret") |
|---|
| 55 | | - * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") |
|---|
| 56 | | - * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
|---|
| 57 | | - * |
|---|
| 58 | | - * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must |
|---|
| 59 | | - * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). |
|---|
| 60 | | - * Allowing a register to be clobbered can shrink the paravirt replacement |
|---|
| 61 | | - * enough to patch inline, increasing performance. |
|---|
| 62 | | - */ |
|---|
| 63 | | - |
|---|
| 64 | | -#ifdef CONFIG_PREEMPT |
|---|
| 65 | | -# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
|---|
| 66 | | -#else |
|---|
| 67 | | -# define preempt_stop(clobbers) |
|---|
| 68 | | -# define resume_kernel restore_all_kernel |
|---|
| 69 | | -#endif |
|---|
| 70 | | - |
|---|
| 71 | | -.macro TRACE_IRQS_IRET |
|---|
| 72 | | -#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 73 | | - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
|---|
| 74 | | - jz 1f |
|---|
| 75 | | - TRACE_IRQS_ON |
|---|
| 76 | | -1: |
|---|
| 77 | | -#endif |
|---|
| 78 | | -.endm |
|---|
| 79 | 53 | |
|---|
| 80 | 54 | #define PTI_SWITCH_MASK (1 << PAGE_SHIFT) |
|---|
| 81 | 55 | |
|---|
| .. | .. |
|---|
| 171 | 145 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
|---|
| 172 | 146 | .if \no_user_check == 0 |
|---|
| 173 | 147 | /* coming from usermode? */ |
|---|
| 174 | | - testl $SEGMENT_RPL_MASK, PT_CS(%esp) |
|---|
| 148 | + testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) |
|---|
| 175 | 149 | jz .Lend_\@ |
|---|
| 176 | 150 | .endif |
|---|
| 177 | 151 | /* On user-cr3? */ |
|---|
| .. | .. |
|---|
| 201 | 175 | .Lend_\@: |
|---|
| 202 | 176 | .endm |
|---|
| 203 | 177 | |
|---|
| 204 | | -.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 |
|---|
| 178 | +#define CS_FROM_ENTRY_STACK (1 << 31) |
|---|
| 179 | +#define CS_FROM_USER_CR3 (1 << 30) |
|---|
| 180 | +#define CS_FROM_KERNEL (1 << 29) |
|---|
| 181 | +#define CS_FROM_ESPFIX (1 << 28) |
|---|
| 182 | + |
|---|
| 183 | +.macro FIXUP_FRAME |
|---|
| 184 | + /* |
|---|
| 185 | + * The high bits of the CS dword (__csh) are used for CS_FROM_*. |
|---|
| 186 | + * Clear them in case hardware didn't do this for us. |
|---|
| 187 | + */ |
|---|
| 188 | + andl $0x0000ffff, 4*4(%esp) |
|---|
| 189 | + |
|---|
| 190 | +#ifdef CONFIG_VM86 |
|---|
| 191 | + testl $X86_EFLAGS_VM, 5*4(%esp) |
|---|
| 192 | + jnz .Lfrom_usermode_no_fixup_\@ |
|---|
| 193 | +#endif |
|---|
| 194 | + testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) |
|---|
| 195 | + jnz .Lfrom_usermode_no_fixup_\@ |
|---|
| 196 | + |
|---|
| 197 | + orl $CS_FROM_KERNEL, 4*4(%esp) |
|---|
| 198 | + |
|---|
| 199 | + /* |
|---|
| 200 | + * When we're here from kernel mode; the (exception) stack looks like: |
|---|
| 201 | + * |
|---|
| 202 | + * 6*4(%esp) - <previous context> |
|---|
| 203 | + * 5*4(%esp) - flags |
|---|
| 204 | + * 4*4(%esp) - cs |
|---|
| 205 | + * 3*4(%esp) - ip |
|---|
| 206 | + * 2*4(%esp) - orig_eax |
|---|
| 207 | + * 1*4(%esp) - gs / function |
|---|
| 208 | + * 0*4(%esp) - fs |
|---|
| 209 | + * |
|---|
| 210 | + * Lets build a 5 entry IRET frame after that, such that struct pt_regs |
|---|
| 211 | + * is complete and in particular regs->sp is correct. This gives us |
|---|
| 212 | + * the original 6 enties as gap: |
|---|
| 213 | + * |
|---|
| 214 | + * 14*4(%esp) - <previous context> |
|---|
| 215 | + * 13*4(%esp) - gap / flags |
|---|
| 216 | + * 12*4(%esp) - gap / cs |
|---|
| 217 | + * 11*4(%esp) - gap / ip |
|---|
| 218 | + * 10*4(%esp) - gap / orig_eax |
|---|
| 219 | + * 9*4(%esp) - gap / gs / function |
|---|
| 220 | + * 8*4(%esp) - gap / fs |
|---|
| 221 | + * 7*4(%esp) - ss |
|---|
| 222 | + * 6*4(%esp) - sp |
|---|
| 223 | + * 5*4(%esp) - flags |
|---|
| 224 | + * 4*4(%esp) - cs |
|---|
| 225 | + * 3*4(%esp) - ip |
|---|
| 226 | + * 2*4(%esp) - orig_eax |
|---|
| 227 | + * 1*4(%esp) - gs / function |
|---|
| 228 | + * 0*4(%esp) - fs |
|---|
| 229 | + */ |
|---|
| 230 | + |
|---|
| 231 | + pushl %ss # ss |
|---|
| 232 | + pushl %esp # sp (points at ss) |
|---|
| 233 | + addl $7*4, (%esp) # point sp back at the previous context |
|---|
| 234 | + pushl 7*4(%esp) # flags |
|---|
| 235 | + pushl 7*4(%esp) # cs |
|---|
| 236 | + pushl 7*4(%esp) # ip |
|---|
| 237 | + pushl 7*4(%esp) # orig_eax |
|---|
| 238 | + pushl 7*4(%esp) # gs / function |
|---|
| 239 | + pushl 7*4(%esp) # fs |
|---|
| 240 | +.Lfrom_usermode_no_fixup_\@: |
|---|
| 241 | +.endm |
|---|
| 242 | + |
|---|
| 243 | +.macro IRET_FRAME |
|---|
| 244 | + /* |
|---|
| 245 | + * We're called with %ds, %es, %fs, and %gs from the interrupted |
|---|
| 246 | + * frame, so we shouldn't use them. Also, we may be in ESPFIX |
|---|
| 247 | + * mode and therefore have a nonzero SS base and an offset ESP, |
|---|
| 248 | + * so any attempt to access the stack needs to use SS. (except for |
|---|
| 249 | + * accesses through %esp, which automatically use SS.) |
|---|
| 250 | + */ |
|---|
| 251 | + testl $CS_FROM_KERNEL, 1*4(%esp) |
|---|
| 252 | + jz .Lfinished_frame_\@ |
|---|
| 253 | + |
|---|
| 254 | + /* |
|---|
| 255 | + * Reconstruct the 3 entry IRET frame right after the (modified) |
|---|
| 256 | + * regs->sp without lowering %esp in between, such that an NMI in the |
|---|
| 257 | + * middle doesn't scribble our stack. |
|---|
| 258 | + */ |
|---|
| 259 | + pushl %eax |
|---|
| 260 | + pushl %ecx |
|---|
| 261 | + movl 5*4(%esp), %eax # (modified) regs->sp |
|---|
| 262 | + |
|---|
| 263 | + movl 4*4(%esp), %ecx # flags |
|---|
| 264 | + movl %ecx, %ss:-1*4(%eax) |
|---|
| 265 | + |
|---|
| 266 | + movl 3*4(%esp), %ecx # cs |
|---|
| 267 | + andl $0x0000ffff, %ecx |
|---|
| 268 | + movl %ecx, %ss:-2*4(%eax) |
|---|
| 269 | + |
|---|
| 270 | + movl 2*4(%esp), %ecx # ip |
|---|
| 271 | + movl %ecx, %ss:-3*4(%eax) |
|---|
| 272 | + |
|---|
| 273 | + movl 1*4(%esp), %ecx # eax |
|---|
| 274 | + movl %ecx, %ss:-4*4(%eax) |
|---|
| 275 | + |
|---|
| 276 | + popl %ecx |
|---|
| 277 | + lea -4*4(%eax), %esp |
|---|
| 278 | + popl %eax |
|---|
| 279 | +.Lfinished_frame_\@: |
|---|
| 280 | +.endm |
|---|
| 281 | + |
|---|
| 282 | +.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 |
|---|
| 205 | 283 | cld |
|---|
| 284 | +.if \skip_gs == 0 |
|---|
| 206 | 285 | PUSH_GS |
|---|
| 286 | +.endif |
|---|
| 207 | 287 | pushl %fs |
|---|
| 288 | + |
|---|
| 289 | + pushl %eax |
|---|
| 290 | + movl $(__KERNEL_PERCPU), %eax |
|---|
| 291 | + movl %eax, %fs |
|---|
| 292 | +.if \unwind_espfix > 0 |
|---|
| 293 | + UNWIND_ESPFIX_STACK |
|---|
| 294 | +.endif |
|---|
| 295 | + popl %eax |
|---|
| 296 | + |
|---|
| 297 | + FIXUP_FRAME |
|---|
| 208 | 298 | pushl %es |
|---|
| 209 | 299 | pushl %ds |
|---|
| 210 | 300 | pushl \pt_regs_ax |
|---|
| .. | .. |
|---|
| 217 | 307 | movl $(__USER_DS), %edx |
|---|
| 218 | 308 | movl %edx, %ds |
|---|
| 219 | 309 | movl %edx, %es |
|---|
| 220 | | - movl $(__KERNEL_PERCPU), %edx |
|---|
| 221 | | - movl %edx, %fs |
|---|
| 310 | +.if \skip_gs == 0 |
|---|
| 222 | 311 | SET_KERNEL_GS %edx |
|---|
| 223 | | - |
|---|
| 312 | +.endif |
|---|
| 224 | 313 | /* Switch to kernel stack if necessary */ |
|---|
| 225 | 314 | .if \switch_stacks > 0 |
|---|
| 226 | 315 | SWITCH_TO_KERNEL_STACK |
|---|
| 227 | 316 | .endif |
|---|
| 228 | | - |
|---|
| 229 | 317 | .endm |
|---|
| 230 | 318 | |
|---|
| 231 | | -.macro SAVE_ALL_NMI cr3_reg:req |
|---|
| 232 | | - SAVE_ALL |
|---|
| 319 | +.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 |
|---|
| 320 | + SAVE_ALL unwind_espfix=\unwind_espfix |
|---|
| 233 | 321 | |
|---|
| 234 | 322 | BUG_IF_WRONG_CR3 |
|---|
| 235 | 323 | |
|---|
| .. | .. |
|---|
| 261 | 349 | 2: popl %es |
|---|
| 262 | 350 | 3: popl %fs |
|---|
| 263 | 351 | POP_GS \pop |
|---|
| 352 | + IRET_FRAME |
|---|
| 264 | 353 | .pushsection .fixup, "ax" |
|---|
| 265 | 354 | 4: movl $0, (%esp) |
|---|
| 266 | 355 | jmp 1b |
|---|
| .. | .. |
|---|
| 299 | 388 | |
|---|
| 300 | 389 | .macro CHECK_AND_APPLY_ESPFIX |
|---|
| 301 | 390 | #ifdef CONFIG_X86_ESPFIX32 |
|---|
| 302 | | -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
|---|
| 391 | +#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) |
|---|
| 392 | +#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET |
|---|
| 303 | 393 | |
|---|
| 304 | 394 | ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX |
|---|
| 305 | 395 | |
|---|
| .. | .. |
|---|
| 357 | 447 | * switch to it before we do any copying. |
|---|
| 358 | 448 | */ |
|---|
| 359 | 449 | |
|---|
| 360 | | -#define CS_FROM_ENTRY_STACK (1 << 31) |
|---|
| 361 | | -#define CS_FROM_USER_CR3 (1 << 30) |
|---|
| 362 | | - |
|---|
| 363 | 450 | .macro SWITCH_TO_KERNEL_STACK |
|---|
| 364 | | - |
|---|
| 365 | | - ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV |
|---|
| 366 | 451 | |
|---|
| 367 | 452 | BUG_IF_WRONG_CR3 |
|---|
| 368 | 453 | |
|---|
| .. | .. |
|---|
| 372 | 457 | * %eax now contains the entry cr3 and we carry it forward in |
|---|
| 373 | 458 | * that register for the time this macro runs |
|---|
| 374 | 459 | */ |
|---|
| 375 | | - |
|---|
| 376 | | - /* |
|---|
| 377 | | - * The high bits of the CS dword (__csh) are used for |
|---|
| 378 | | - * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case |
|---|
| 379 | | - * hardware didn't do this for us. |
|---|
| 380 | | - */ |
|---|
| 381 | | - andl $(0x0000ffff), PT_CS(%esp) |
|---|
| 382 | 460 | |
|---|
| 383 | 461 | /* Are we on the entry stack? Bail out if not! */ |
|---|
| 384 | 462 | movl PER_CPU_VAR(cpu_entry_area), %ecx |
|---|
| .. | .. |
|---|
| 519 | 597 | */ |
|---|
| 520 | 598 | .macro SWITCH_TO_ENTRY_STACK |
|---|
| 521 | 599 | |
|---|
| 522 | | - ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV |
|---|
| 523 | | - |
|---|
| 524 | 600 | /* Bytes to copy */ |
|---|
| 525 | 601 | movl $PTREGS_SIZE, %ecx |
|---|
| 526 | 602 | |
|---|
| .. | .. |
|---|
| 619 | 695 | |
|---|
| 620 | 696 | .Lend_\@: |
|---|
| 621 | 697 | .endm |
|---|
| 698 | + |
|---|
| 699 | +/** |
|---|
| 700 | + * idtentry - Macro to generate entry stubs for simple IDT entries |
|---|
| 701 | + * @vector: Vector number |
|---|
| 702 | + * @asmsym: ASM symbol for the entry point |
|---|
| 703 | + * @cfunc: C function to be called |
|---|
| 704 | + * @has_error_code: Hardware pushed error code on stack |
|---|
| 705 | + */ |
|---|
| 706 | +.macro idtentry vector asmsym cfunc has_error_code:req |
|---|
| 707 | +SYM_CODE_START(\asmsym) |
|---|
| 708 | + ASM_CLAC |
|---|
| 709 | + cld |
|---|
| 710 | + |
|---|
| 711 | + .if \has_error_code == 0 |
|---|
| 712 | + pushl $0 /* Clear the error code */ |
|---|
| 713 | + .endif |
|---|
| 714 | + |
|---|
| 715 | + /* Push the C-function address into the GS slot */ |
|---|
| 716 | + pushl $\cfunc |
|---|
| 717 | + /* Invoke the common exception entry */ |
|---|
| 718 | + jmp handle_exception |
|---|
| 719 | +SYM_CODE_END(\asmsym) |
|---|
| 720 | +.endm |
|---|
| 721 | + |
|---|
| 722 | +.macro idtentry_irq vector cfunc |
|---|
| 723 | + .p2align CONFIG_X86_L1_CACHE_SHIFT |
|---|
| 724 | +SYM_CODE_START_LOCAL(asm_\cfunc) |
|---|
| 725 | + ASM_CLAC |
|---|
| 726 | + SAVE_ALL switch_stacks=1 |
|---|
| 727 | + ENCODE_FRAME_POINTER |
|---|
| 728 | + movl %esp, %eax |
|---|
| 729 | + movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ |
|---|
| 730 | + movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ |
|---|
| 731 | + call \cfunc |
|---|
| 732 | + jmp handle_exception_return |
|---|
| 733 | +SYM_CODE_END(asm_\cfunc) |
|---|
| 734 | +.endm |
|---|
| 735 | + |
|---|
| 736 | +.macro idtentry_sysvec vector cfunc |
|---|
| 737 | + idtentry \vector asm_\cfunc \cfunc has_error_code=0 |
|---|
| 738 | +.endm |
|---|
| 739 | + |
|---|
| 740 | +/* |
|---|
| 741 | + * Include the defines which emit the idt entries which are shared |
|---|
| 742 | + * shared between 32 and 64 bit and emit the __irqentry_text_* markers |
|---|
| 743 | + * so the stacktrace boundary checks work. |
|---|
| 744 | + */ |
|---|
| 745 | + .align 16 |
|---|
| 746 | + .globl __irqentry_text_start |
|---|
| 747 | +__irqentry_text_start: |
|---|
| 748 | + |
|---|
| 749 | +#include <asm/idtentry.h> |
|---|
| 750 | + |
|---|
| 751 | + .align 16 |
|---|
| 752 | + .globl __irqentry_text_end |
|---|
| 753 | +__irqentry_text_end: |
|---|
| 754 | + |
|---|
| 622 | 755 | /* |
|---|
| 623 | 756 | * %eax: prev task |
|---|
| 624 | 757 | * %edx: next task |
|---|
| 625 | 758 | */ |
|---|
| 626 | | -ENTRY(__switch_to_asm) |
|---|
| 759 | +.pushsection .text, "ax" |
|---|
| 760 | +SYM_CODE_START(__switch_to_asm) |
|---|
| 627 | 761 | /* |
|---|
| 628 | 762 | * Save callee-saved registers |
|---|
| 629 | 763 | * This must match the order in struct inactive_task_frame |
|---|
| .. | .. |
|---|
| 632 | 766 | pushl %ebx |
|---|
| 633 | 767 | pushl %edi |
|---|
| 634 | 768 | pushl %esi |
|---|
| 769 | + /* |
|---|
| 770 | + * Flags are saved to prevent AC leakage. This could go |
|---|
| 771 | + * away if objtool would have 32bit support to verify |
|---|
| 772 | + * the STAC/CLAC correctness. |
|---|
| 773 | + */ |
|---|
| 635 | 774 | pushfl |
|---|
| 636 | 775 | |
|---|
| 637 | 776 | /* switch stack */ |
|---|
| .. | .. |
|---|
| 643 | 782 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset |
|---|
| 644 | 783 | #endif |
|---|
| 645 | 784 | |
|---|
| 646 | | -#ifdef CONFIG_RETPOLINE |
|---|
| 647 | 785 | /* |
|---|
| 648 | 786 | * When switching from a shallower to a deeper call stack |
|---|
| 649 | 787 | * the RSB may either underflow or use entries populated |
|---|
| .. | .. |
|---|
| 652 | 790 | * speculative execution to prevent attack. |
|---|
| 653 | 791 | */ |
|---|
| 654 | 792 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
|---|
| 655 | | -#endif |
|---|
| 656 | 793 | |
|---|
| 657 | | - /* restore callee-saved registers */ |
|---|
| 794 | + /* Restore flags or the incoming task to restore AC state. */ |
|---|
| 658 | 795 | popfl |
|---|
| 796 | + /* restore callee-saved registers */ |
|---|
| 659 | 797 | popl %esi |
|---|
| 660 | 798 | popl %edi |
|---|
| 661 | 799 | popl %ebx |
|---|
| 662 | 800 | popl %ebp |
|---|
| 663 | 801 | |
|---|
| 664 | 802 | jmp __switch_to |
|---|
| 665 | | -END(__switch_to_asm) |
|---|
| 803 | +SYM_CODE_END(__switch_to_asm) |
|---|
| 804 | +.popsection |
|---|
| 666 | 805 | |
|---|
| 667 | 806 | /* |
|---|
| 668 | 807 | * The unwinder expects the last frame on the stack to always be at the same |
|---|
| .. | .. |
|---|
| 671 | 810 | * asmlinkage function so its argument has to be pushed on the stack. This |
|---|
| 672 | 811 | * wrapper creates a proper "end of stack" frame header before the call. |
|---|
| 673 | 812 | */ |
|---|
| 674 | | -ENTRY(schedule_tail_wrapper) |
|---|
| 813 | +.pushsection .text, "ax" |
|---|
| 814 | +SYM_FUNC_START(schedule_tail_wrapper) |
|---|
| 675 | 815 | FRAME_BEGIN |
|---|
| 676 | 816 | |
|---|
| 677 | 817 | pushl %eax |
|---|
| .. | .. |
|---|
| 679 | 819 | popl %eax |
|---|
| 680 | 820 | |
|---|
| 681 | 821 | FRAME_END |
|---|
| 682 | | - ret |
|---|
| 683 | | -ENDPROC(schedule_tail_wrapper) |
|---|
| 822 | + RET |
|---|
| 823 | +SYM_FUNC_END(schedule_tail_wrapper) |
|---|
| 824 | +.popsection |
|---|
| 825 | + |
|---|
| 684 | 826 | /* |
|---|
| 685 | 827 | * A newly forked process directly context switches into this address. |
|---|
| 686 | 828 | * |
|---|
| .. | .. |
|---|
| 688 | 830 | * ebx: kernel thread func (NULL for user thread) |
|---|
| 689 | 831 | * edi: kernel thread arg |
|---|
| 690 | 832 | */ |
|---|
| 691 | | -ENTRY(ret_from_fork) |
|---|
| 833 | +.pushsection .text, "ax" |
|---|
| 834 | +SYM_CODE_START(ret_from_fork) |
|---|
| 692 | 835 | call schedule_tail_wrapper |
|---|
| 693 | 836 | |
|---|
| 694 | 837 | testl %ebx, %ebx |
|---|
| .. | .. |
|---|
| 697 | 840 | 2: |
|---|
| 698 | 841 | /* When we fork, we trace the syscall return in the child, too. */ |
|---|
| 699 | 842 | movl %esp, %eax |
|---|
| 700 | | - call syscall_return_slowpath |
|---|
| 701 | | - jmp restore_all |
|---|
| 843 | + call syscall_exit_to_user_mode |
|---|
| 844 | + jmp .Lsyscall_32_done |
|---|
| 702 | 845 | |
|---|
| 703 | 846 | /* kernel thread */ |
|---|
| 704 | 847 | 1: movl %edi, %eax |
|---|
| 705 | | - CALL_NOSPEC %ebx |
|---|
| 848 | + CALL_NOSPEC ebx |
|---|
| 706 | 849 | /* |
|---|
| 707 | 850 | * A kernel thread is allowed to return here after successfully |
|---|
| 708 | | - * calling do_execve(). Exit to userspace to complete the execve() |
|---|
| 851 | + * calling kernel_execve(). Exit to userspace to complete the execve() |
|---|
| 709 | 852 | * syscall. |
|---|
| 710 | 853 | */ |
|---|
| 711 | 854 | movl $0, PT_EAX(%esp) |
|---|
| 712 | 855 | jmp 2b |
|---|
| 713 | | -END(ret_from_fork) |
|---|
| 856 | +SYM_CODE_END(ret_from_fork) |
|---|
| 857 | +.popsection |
|---|
| 714 | 858 | |
|---|
| 715 | | -/* |
|---|
| 716 | | - * Return to user mode is not as complex as all this looks, |
|---|
| 717 | | - * but we want the default path for a system call return to |
|---|
| 718 | | - * go as quickly as possible which is why some of this is |
|---|
| 719 | | - * less clear than it otherwise should be. |
|---|
| 720 | | - */ |
|---|
| 721 | | - |
|---|
| 722 | | - # userspace resumption stub bypassing syscall exit tracing |
|---|
| 723 | | - ALIGN |
|---|
| 724 | | -ret_from_exception: |
|---|
| 725 | | - preempt_stop(CLBR_ANY) |
|---|
| 726 | | -ret_from_intr: |
|---|
| 727 | | -#ifdef CONFIG_VM86 |
|---|
| 728 | | - movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
|---|
| 729 | | - movb PT_CS(%esp), %al |
|---|
| 730 | | - andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
|---|
| 731 | | -#else |
|---|
| 732 | | - /* |
|---|
| 733 | | - * We can be coming here from child spawned by kernel_thread(). |
|---|
| 734 | | - */ |
|---|
| 735 | | - movl PT_CS(%esp), %eax |
|---|
| 736 | | - andl $SEGMENT_RPL_MASK, %eax |
|---|
| 737 | | -#endif |
|---|
| 738 | | - cmpl $USER_RPL, %eax |
|---|
| 739 | | - jb resume_kernel # not returning to v8086 or userspace |
|---|
| 740 | | - |
|---|
| 741 | | -ENTRY(resume_userspace) |
|---|
| 742 | | - DISABLE_INTERRUPTS(CLBR_ANY) |
|---|
| 743 | | - TRACE_IRQS_OFF |
|---|
| 744 | | - movl %esp, %eax |
|---|
| 745 | | - call prepare_exit_to_usermode |
|---|
| 746 | | - jmp restore_all |
|---|
| 747 | | -END(ret_from_exception) |
|---|
| 748 | | - |
|---|
| 749 | | -#ifdef CONFIG_PREEMPT |
|---|
| 750 | | -ENTRY(resume_kernel) |
|---|
| 751 | | - DISABLE_INTERRUPTS(CLBR_ANY) |
|---|
| 752 | | -.Lneed_resched: |
|---|
| 753 | | - # preempt count == 0 + NEED_RS set? |
|---|
| 754 | | - cmpl $0, PER_CPU_VAR(__preempt_count) |
|---|
| 755 | | -#ifndef CONFIG_PREEMPT_LAZY |
|---|
| 756 | | - jnz restore_all_kernel |
|---|
| 757 | | -#else |
|---|
| 758 | | - jz test_int_off |
|---|
| 759 | | - |
|---|
| 760 | | - # atleast preempt count == 0 ? |
|---|
| 761 | | - cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) |
|---|
| 762 | | - jne restore_all_kernel |
|---|
| 763 | | - |
|---|
| 764 | | - movl PER_CPU_VAR(current_task), %ebp |
|---|
| 765 | | - cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? |
|---|
| 766 | | - jnz restore_all_kernel |
|---|
| 767 | | - |
|---|
| 768 | | - testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) |
|---|
| 769 | | - jz restore_all_kernel |
|---|
| 770 | | -test_int_off: |
|---|
| 771 | | -#endif |
|---|
| 772 | | - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
|---|
| 773 | | - jz restore_all_kernel |
|---|
| 774 | | - call preempt_schedule_irq |
|---|
| 775 | | - jmp .Lneed_resched |
|---|
| 776 | | -END(resume_kernel) |
|---|
| 777 | | -#endif |
|---|
| 778 | | - |
|---|
| 779 | | -GLOBAL(__begin_SYSENTER_singlestep_region) |
|---|
| 859 | +SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) |
|---|
| 780 | 860 | /* |
|---|
| 781 | 861 | * All code from here through __end_SYSENTER_singlestep_region is subject |
|---|
| 782 | 862 | * to being single-stepped if a user program sets TF and executes SYSENTER. |
|---|
| .. | .. |
|---|
| 785 | 865 | * possible, we handle TF just like AC and NT, except that our #DB handler |
|---|
| 786 | 866 | * will ignore all of the single-step traps generated in this range. |
|---|
| 787 | 867 | */ |
|---|
| 788 | | - |
|---|
| 789 | | -#ifdef CONFIG_XEN |
|---|
| 790 | | -/* |
|---|
| 791 | | - * Xen doesn't set %esp to be precisely what the normal SYSENTER |
|---|
| 792 | | - * entry point expects, so fix it up before using the normal path. |
|---|
| 793 | | - */ |
|---|
| 794 | | -ENTRY(xen_sysenter_target) |
|---|
| 795 | | - addl $5*4, %esp /* remove xen-provided frame */ |
|---|
| 796 | | - jmp .Lsysenter_past_esp |
|---|
| 797 | | -#endif |
|---|
| 798 | 868 | |
|---|
| 799 | 869 | /* |
|---|
| 800 | 870 | * 32-bit SYSENTER entry. |
|---|
| .. | .. |
|---|
| 828 | 898 | * ebp user stack |
|---|
| 829 | 899 | * 0(%ebp) arg6 |
|---|
| 830 | 900 | */ |
|---|
| 831 | | -ENTRY(entry_SYSENTER_32) |
|---|
| 901 | +SYM_FUNC_START(entry_SYSENTER_32) |
|---|
| 832 | 902 | /* |
|---|
| 833 | 903 | * On entry-stack with all userspace-regs live - save and |
|---|
| 834 | 904 | * restore eflags and %eax to use it as scratch-reg for the cr3 |
|---|
| .. | .. |
|---|
| 846 | 916 | |
|---|
| 847 | 917 | .Lsysenter_past_esp: |
|---|
| 848 | 918 | pushl $__USER_DS /* pt_regs->ss */ |
|---|
| 849 | | - pushl %ebp /* pt_regs->sp (stashed in bp) */ |
|---|
| 919 | + pushl $0 /* pt_regs->sp (placeholder) */ |
|---|
| 850 | 920 | pushfl /* pt_regs->flags (except IF = 0) */ |
|---|
| 851 | | - orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ |
|---|
| 852 | 921 | pushl $__USER_CS /* pt_regs->cs */ |
|---|
| 853 | 922 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ |
|---|
| 854 | 923 | pushl %eax /* pt_regs->orig_ax */ |
|---|
| .. | .. |
|---|
| 877 | 946 | jnz .Lsysenter_fix_flags |
|---|
| 878 | 947 | .Lsysenter_flags_fixed: |
|---|
| 879 | 948 | |
|---|
| 880 | | - /* |
|---|
| 881 | | - * User mode is traced as though IRQs are on, and SYSENTER |
|---|
| 882 | | - * turned them off. |
|---|
| 883 | | - */ |
|---|
| 884 | | - TRACE_IRQS_OFF |
|---|
| 885 | | - |
|---|
| 886 | 949 | movl %esp, %eax |
|---|
| 887 | | - call do_fast_syscall_32 |
|---|
| 888 | | - /* XEN PV guests always use IRET path */ |
|---|
| 889 | | - ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
|---|
| 890 | | - "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
|---|
| 950 | + call do_SYSENTER_32 |
|---|
| 951 | + testl %eax, %eax |
|---|
| 952 | + jz .Lsyscall_32_done |
|---|
| 891 | 953 | |
|---|
| 892 | | -/* Opportunistic SYSEXIT */ |
|---|
| 893 | | - TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
|---|
| 954 | + STACKLEAK_ERASE |
|---|
| 955 | + |
|---|
| 956 | + /* Opportunistic SYSEXIT */ |
|---|
| 894 | 957 | |
|---|
| 895 | 958 | /* |
|---|
| 896 | 959 | * Setup entry stack - we keep the pointer in %eax and do the |
|---|
| .. | .. |
|---|
| 953 | 1016 | pushl $X86_EFLAGS_FIXED |
|---|
| 954 | 1017 | popfl |
|---|
| 955 | 1018 | jmp .Lsysenter_flags_fixed |
|---|
| 956 | | -GLOBAL(__end_SYSENTER_singlestep_region) |
|---|
| 957 | | -ENDPROC(entry_SYSENTER_32) |
|---|
| 1019 | +SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) |
|---|
| 1020 | +SYM_FUNC_END(entry_SYSENTER_32) |
|---|
| 958 | 1021 | |
|---|
| 959 | 1022 | /* |
|---|
| 960 | 1023 | * 32-bit legacy system call entry. |
|---|
| .. | .. |
|---|
| 984 | 1047 | * edi arg5 |
|---|
| 985 | 1048 | * ebp arg6 |
|---|
| 986 | 1049 | */ |
|---|
| 987 | | -ENTRY(entry_INT80_32) |
|---|
| 1050 | +SYM_FUNC_START(entry_INT80_32) |
|---|
| 988 | 1051 | ASM_CLAC |
|---|
| 989 | 1052 | pushl %eax /* pt_regs->orig_ax */ |
|---|
| 990 | 1053 | |
|---|
| 991 | 1054 | SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ |
|---|
| 992 | 1055 | |
|---|
| 993 | | - /* |
|---|
| 994 | | - * User mode is traced as though IRQs are on, and the interrupt gate |
|---|
| 995 | | - * turned them off. |
|---|
| 996 | | - */ |
|---|
| 997 | | - TRACE_IRQS_OFF |
|---|
| 998 | | - |
|---|
| 999 | 1056 | movl %esp, %eax |
|---|
| 1000 | 1057 | call do_int80_syscall_32 |
|---|
| 1001 | 1058 | .Lsyscall_32_done: |
|---|
| 1059 | + STACKLEAK_ERASE |
|---|
| 1002 | 1060 | |
|---|
| 1003 | | -restore_all: |
|---|
| 1004 | | - TRACE_IRQS_IRET |
|---|
| 1061 | +restore_all_switch_stack: |
|---|
| 1005 | 1062 | SWITCH_TO_ENTRY_STACK |
|---|
| 1006 | | -.Lrestore_all_notrace: |
|---|
| 1007 | 1063 | CHECK_AND_APPLY_ESPFIX |
|---|
| 1008 | | -.Lrestore_nocheck: |
|---|
| 1064 | + |
|---|
| 1009 | 1065 | /* Switch back to user CR3 */ |
|---|
| 1010 | 1066 | SWITCH_TO_USER_CR3 scratch_reg=%eax |
|---|
| 1011 | 1067 | |
|---|
| .. | .. |
|---|
| 1021 | 1077 | */ |
|---|
| 1022 | 1078 | INTERRUPT_RETURN |
|---|
| 1023 | 1079 | |
|---|
| 1024 | | -restore_all_kernel: |
|---|
| 1025 | | - TRACE_IRQS_IRET |
|---|
| 1026 | | - PARANOID_EXIT_TO_KERNEL_MODE |
|---|
| 1027 | | - BUG_IF_WRONG_CR3 |
|---|
| 1028 | | - RESTORE_REGS 4 |
|---|
| 1029 | | - jmp .Lirq_return |
|---|
| 1030 | | - |
|---|
| 1031 | 1080 | .section .fixup, "ax" |
|---|
| 1032 | | -ENTRY(iret_exc ) |
|---|
| 1081 | +SYM_CODE_START(asm_iret_error) |
|---|
| 1033 | 1082 | pushl $0 # no error code |
|---|
| 1034 | | - pushl $do_iret_error |
|---|
| 1083 | + pushl $iret_error |
|---|
| 1035 | 1084 | |
|---|
| 1036 | 1085 | #ifdef CONFIG_DEBUG_ENTRY |
|---|
| 1037 | 1086 | /* |
|---|
| .. | .. |
|---|
| 1045 | 1094 | popl %eax |
|---|
| 1046 | 1095 | #endif |
|---|
| 1047 | 1096 | |
|---|
| 1048 | | - jmp common_exception |
|---|
| 1097 | + jmp handle_exception |
|---|
| 1098 | +SYM_CODE_END(asm_iret_error) |
|---|
| 1049 | 1099 | .previous |
|---|
| 1050 | | - _ASM_EXTABLE(.Lirq_return, iret_exc) |
|---|
| 1051 | | -ENDPROC(entry_INT80_32) |
|---|
| 1100 | + _ASM_EXTABLE(.Lirq_return, asm_iret_error) |
|---|
| 1101 | +SYM_FUNC_END(entry_INT80_32) |
|---|
| 1052 | 1102 | |
|---|
| 1053 | 1103 | .macro FIXUP_ESPFIX_STACK |
|---|
| 1054 | 1104 | /* |
|---|
| .. | .. |
|---|
| 1057 | 1107 | * We can't call C functions using the ESPFIX stack. This code reads |
|---|
| 1058 | 1108 | * the high word of the segment base from the GDT and swiches to the |
|---|
| 1059 | 1109 | * normal stack and adjusts ESP with the matching offset. |
|---|
| 1110 | + * |
|---|
| 1111 | + * We might be on user CR3 here, so percpu data is not mapped and we can't |
|---|
| 1112 | + * access the GDT through the percpu segment. Instead, use SGDT to find |
|---|
| 1113 | + * the cpu_entry_area alias of the GDT. |
|---|
| 1060 | 1114 | */ |
|---|
| 1061 | 1115 | #ifdef CONFIG_X86_ESPFIX32 |
|---|
| 1062 | 1116 | /* fixup the stack */ |
|---|
| 1063 | | - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
|---|
| 1064 | | - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
|---|
| 1117 | + pushl %ecx |
|---|
| 1118 | + subl $2*4, %esp |
|---|
| 1119 | + sgdt (%esp) |
|---|
| 1120 | + movl 2(%esp), %ecx /* GDT address */ |
|---|
| 1121 | + /* |
|---|
| 1122 | + * Careful: ECX is a linear pointer, so we need to force base |
|---|
| 1123 | + * zero. %cs is the only known-linear segment we have right now. |
|---|
| 1124 | + */ |
|---|
| 1125 | + mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ |
|---|
| 1126 | + mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ |
|---|
| 1065 | 1127 | shl $16, %eax |
|---|
| 1128 | + addl $2*4, %esp |
|---|
| 1129 | + popl %ecx |
|---|
| 1066 | 1130 | addl %esp, %eax /* the adjusted stack pointer */ |
|---|
| 1067 | 1131 | pushl $__KERNEL_DS |
|---|
| 1068 | 1132 | pushl %eax |
|---|
| 1069 | 1133 | lss (%esp), %esp /* switch to the normal stack segment */ |
|---|
| 1070 | 1134 | #endif |
|---|
| 1071 | 1135 | .endm |
|---|
| 1136 | + |
|---|
| 1072 | 1137 | .macro UNWIND_ESPFIX_STACK |
|---|
| 1138 | + /* It's safe to clobber %eax, all other regs need to be preserved */ |
|---|
| 1073 | 1139 | #ifdef CONFIG_X86_ESPFIX32 |
|---|
| 1074 | 1140 | movl %ss, %eax |
|---|
| 1075 | 1141 | /* see if on espfix stack */ |
|---|
| 1076 | 1142 | cmpw $__ESPFIX_SS, %ax |
|---|
| 1077 | | - jne 27f |
|---|
| 1078 | | - movl $__KERNEL_DS, %eax |
|---|
| 1079 | | - movl %eax, %ds |
|---|
| 1080 | | - movl %eax, %es |
|---|
| 1143 | + jne .Lno_fixup_\@ |
|---|
| 1081 | 1144 | /* switch to normal stack */ |
|---|
| 1082 | 1145 | FIXUP_ESPFIX_STACK |
|---|
| 1083 | | -27: |
|---|
| 1146 | +.Lno_fixup_\@: |
|---|
| 1084 | 1147 | #endif |
|---|
| 1085 | 1148 | .endm |
|---|
| 1086 | 1149 | |
|---|
| 1087 | | -/* |
|---|
| 1088 | | - * Build the entry stubs with some assembler magic. |
|---|
| 1089 | | - * We pack 1 stub into every 8-byte block. |
|---|
| 1090 | | - */ |
|---|
| 1091 | | - .align 8 |
|---|
| 1092 | | -ENTRY(irq_entries_start) |
|---|
| 1093 | | - vector=FIRST_EXTERNAL_VECTOR |
|---|
| 1094 | | - .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
|---|
| 1095 | | - pushl $(~vector+0x80) /* Note: always in signed byte range */ |
|---|
| 1096 | | - vector=vector+1 |
|---|
| 1097 | | - jmp common_interrupt |
|---|
| 1098 | | - .align 8 |
|---|
| 1099 | | - .endr |
|---|
| 1100 | | -END(irq_entries_start) |
|---|
| 1101 | | - |
|---|
| 1102 | | -#ifdef CONFIG_X86_LOCAL_APIC |
|---|
| 1103 | | - .align 8 |
|---|
| 1104 | | -ENTRY(spurious_entries_start) |
|---|
| 1105 | | - vector=FIRST_SYSTEM_VECTOR |
|---|
| 1106 | | - .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) |
|---|
| 1107 | | - pushl $(~vector+0x80) /* Note: always in signed byte range */ |
|---|
| 1108 | | - vector=vector+1 |
|---|
| 1109 | | - jmp common_spurious |
|---|
| 1110 | | - .align 8 |
|---|
| 1111 | | - .endr |
|---|
| 1112 | | -END(spurious_entries_start) |
|---|
| 1113 | | - |
|---|
| 1114 | | -common_spurious: |
|---|
| 1115 | | - ASM_CLAC |
|---|
| 1116 | | - addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
|---|
| 1117 | | - SAVE_ALL switch_stacks=1 |
|---|
| 1118 | | - ENCODE_FRAME_POINTER |
|---|
| 1119 | | - TRACE_IRQS_OFF |
|---|
| 1120 | | - movl %esp, %eax |
|---|
| 1121 | | - call smp_spurious_interrupt |
|---|
| 1122 | | - jmp ret_from_intr |
|---|
| 1123 | | -ENDPROC(common_spurious) |
|---|
| 1124 | | -#endif |
|---|
| 1125 | | - |
|---|
| 1126 | | -/* |
|---|
| 1127 | | - * the CPU automatically disables interrupts when executing an IRQ vector, |
|---|
| 1128 | | - * so IRQ-flags tracing has to follow that: |
|---|
| 1129 | | - */ |
|---|
| 1130 | | - .p2align CONFIG_X86_L1_CACHE_SHIFT |
|---|
| 1131 | | -common_interrupt: |
|---|
| 1132 | | - ASM_CLAC |
|---|
| 1133 | | - addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
|---|
| 1134 | | - |
|---|
| 1135 | | - SAVE_ALL switch_stacks=1 |
|---|
| 1136 | | - ENCODE_FRAME_POINTER |
|---|
| 1137 | | - TRACE_IRQS_OFF |
|---|
| 1138 | | - movl %esp, %eax |
|---|
| 1139 | | - call do_IRQ |
|---|
| 1140 | | - jmp ret_from_intr |
|---|
| 1141 | | -ENDPROC(common_interrupt) |
|---|
| 1142 | | - |
|---|
| 1143 | | -#define BUILD_INTERRUPT3(name, nr, fn) \ |
|---|
| 1144 | | -ENTRY(name) \ |
|---|
| 1145 | | - ASM_CLAC; \ |
|---|
| 1146 | | - pushl $~(nr); \ |
|---|
| 1147 | | - SAVE_ALL switch_stacks=1; \ |
|---|
| 1148 | | - ENCODE_FRAME_POINTER; \ |
|---|
| 1149 | | - TRACE_IRQS_OFF \ |
|---|
| 1150 | | - movl %esp, %eax; \ |
|---|
| 1151 | | - call fn; \ |
|---|
| 1152 | | - jmp ret_from_intr; \ |
|---|
| 1153 | | -ENDPROC(name) |
|---|
| 1154 | | - |
|---|
| 1155 | | -#define BUILD_INTERRUPT(name, nr) \ |
|---|
| 1156 | | - BUILD_INTERRUPT3(name, nr, smp_##name); \ |
|---|
| 1157 | | - |
|---|
| 1158 | | -/* The include is where all of the SMP etc. interrupts come from */ |
|---|
| 1159 | | -#include <asm/entry_arch.h> |
|---|
| 1160 | | - |
|---|
| 1161 | | -ENTRY(coprocessor_error) |
|---|
| 1162 | | - ASM_CLAC |
|---|
| 1163 | | - pushl $0 |
|---|
| 1164 | | - pushl $do_coprocessor_error |
|---|
| 1165 | | - jmp common_exception |
|---|
| 1166 | | -END(coprocessor_error) |
|---|
| 1167 | | - |
|---|
| 1168 | | -ENTRY(simd_coprocessor_error) |
|---|
| 1169 | | - ASM_CLAC |
|---|
| 1170 | | - pushl $0 |
|---|
| 1171 | | -#ifdef CONFIG_X86_INVD_BUG |
|---|
| 1172 | | - /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
|---|
| 1173 | | - ALTERNATIVE "pushl $do_general_protection", \ |
|---|
| 1174 | | - "pushl $do_simd_coprocessor_error", \ |
|---|
| 1175 | | - X86_FEATURE_XMM |
|---|
| 1176 | | -#else |
|---|
| 1177 | | - pushl $do_simd_coprocessor_error |
|---|
| 1178 | | -#endif |
|---|
| 1179 | | - jmp common_exception |
|---|
| 1180 | | -END(simd_coprocessor_error) |
|---|
| 1181 | | - |
|---|
| 1182 | | -ENTRY(device_not_available) |
|---|
| 1183 | | - ASM_CLAC |
|---|
| 1184 | | - pushl $-1 # mark this as an int |
|---|
| 1185 | | - pushl $do_device_not_available |
|---|
| 1186 | | - jmp common_exception |
|---|
| 1187 | | -END(device_not_available) |
|---|
| 1188 | | - |
|---|
| 1189 | | -#ifdef CONFIG_PARAVIRT |
|---|
| 1190 | | -ENTRY(native_iret) |
|---|
| 1191 | | - iret |
|---|
| 1192 | | - _ASM_EXTABLE(native_iret, iret_exc) |
|---|
| 1193 | | -END(native_iret) |
|---|
| 1194 | | -#endif |
|---|
| 1195 | | - |
|---|
| 1196 | | -ENTRY(overflow) |
|---|
| 1197 | | - ASM_CLAC |
|---|
| 1198 | | - pushl $0 |
|---|
| 1199 | | - pushl $do_overflow |
|---|
| 1200 | | - jmp common_exception |
|---|
| 1201 | | -END(overflow) |
|---|
| 1202 | | - |
|---|
| 1203 | | -ENTRY(bounds) |
|---|
| 1204 | | - ASM_CLAC |
|---|
| 1205 | | - pushl $0 |
|---|
| 1206 | | - pushl $do_bounds |
|---|
| 1207 | | - jmp common_exception |
|---|
| 1208 | | -END(bounds) |
|---|
| 1209 | | - |
|---|
| 1210 | | -ENTRY(invalid_op) |
|---|
| 1211 | | - ASM_CLAC |
|---|
| 1212 | | - pushl $0 |
|---|
| 1213 | | - pushl $do_invalid_op |
|---|
| 1214 | | - jmp common_exception |
|---|
| 1215 | | -END(invalid_op) |
|---|
| 1216 | | - |
|---|
| 1217 | | -ENTRY(coprocessor_segment_overrun) |
|---|
| 1218 | | - ASM_CLAC |
|---|
| 1219 | | - pushl $0 |
|---|
| 1220 | | - pushl $do_coprocessor_segment_overrun |
|---|
| 1221 | | - jmp common_exception |
|---|
| 1222 | | -END(coprocessor_segment_overrun) |
|---|
| 1223 | | - |
|---|
| 1224 | | -ENTRY(invalid_TSS) |
|---|
| 1225 | | - ASM_CLAC |
|---|
| 1226 | | - pushl $do_invalid_TSS |
|---|
| 1227 | | - jmp common_exception |
|---|
| 1228 | | -END(invalid_TSS) |
|---|
| 1229 | | - |
|---|
| 1230 | | -ENTRY(segment_not_present) |
|---|
| 1231 | | - ASM_CLAC |
|---|
| 1232 | | - pushl $do_segment_not_present |
|---|
| 1233 | | - jmp common_exception |
|---|
| 1234 | | -END(segment_not_present) |
|---|
| 1235 | | - |
|---|
| 1236 | | -ENTRY(stack_segment) |
|---|
| 1237 | | - ASM_CLAC |
|---|
| 1238 | | - pushl $do_stack_segment |
|---|
| 1239 | | - jmp common_exception |
|---|
| 1240 | | -END(stack_segment) |
|---|
| 1241 | | - |
|---|
| 1242 | | -ENTRY(alignment_check) |
|---|
| 1243 | | - ASM_CLAC |
|---|
| 1244 | | - pushl $do_alignment_check |
|---|
| 1245 | | - jmp common_exception |
|---|
| 1246 | | -END(alignment_check) |
|---|
| 1247 | | - |
|---|
| 1248 | | -ENTRY(divide_error) |
|---|
| 1249 | | - ASM_CLAC |
|---|
| 1250 | | - pushl $0 # no error code |
|---|
| 1251 | | - pushl $do_divide_error |
|---|
| 1252 | | - jmp common_exception |
|---|
| 1253 | | -END(divide_error) |
|---|
| 1254 | | - |
|---|
| 1255 | | -#ifdef CONFIG_X86_MCE |
|---|
| 1256 | | -ENTRY(machine_check) |
|---|
| 1257 | | - ASM_CLAC |
|---|
| 1258 | | - pushl $0 |
|---|
| 1259 | | - pushl machine_check_vector |
|---|
| 1260 | | - jmp common_exception |
|---|
| 1261 | | -END(machine_check) |
|---|
| 1262 | | -#endif |
|---|
| 1263 | | - |
|---|
| 1264 | | -ENTRY(spurious_interrupt_bug) |
|---|
| 1265 | | - ASM_CLAC |
|---|
| 1266 | | - pushl $0 |
|---|
| 1267 | | - pushl $do_spurious_interrupt_bug |
|---|
| 1268 | | - jmp common_exception |
|---|
| 1269 | | -END(spurious_interrupt_bug) |
|---|
| 1270 | | - |
|---|
| 1271 | | -#ifdef CONFIG_XEN |
|---|
| 1272 | | -ENTRY(xen_hypervisor_callback) |
|---|
| 1273 | | - pushl $-1 /* orig_ax = -1 => not a system call */ |
|---|
| 1274 | | - SAVE_ALL |
|---|
| 1275 | | - ENCODE_FRAME_POINTER |
|---|
| 1276 | | - TRACE_IRQS_OFF |
|---|
| 1277 | | - |
|---|
| 1278 | | - /* |
|---|
| 1279 | | - * Check to see if we got the event in the critical |
|---|
| 1280 | | - * region in xen_iret_direct, after we've reenabled |
|---|
| 1281 | | - * events and checked for pending events. This simulates |
|---|
| 1282 | | - * iret instruction's behaviour where it delivers a |
|---|
| 1283 | | - * pending interrupt when enabling interrupts: |
|---|
| 1284 | | - */ |
|---|
| 1285 | | - movl PT_EIP(%esp), %eax |
|---|
| 1286 | | - cmpl $xen_iret_start_crit, %eax |
|---|
| 1287 | | - jb 1f |
|---|
| 1288 | | - cmpl $xen_iret_end_crit, %eax |
|---|
| 1289 | | - jae 1f |
|---|
| 1290 | | - |
|---|
| 1291 | | - jmp xen_iret_crit_fixup |
|---|
| 1292 | | - |
|---|
| 1293 | | -ENTRY(xen_do_upcall) |
|---|
| 1294 | | -1: mov %esp, %eax |
|---|
| 1295 | | - call xen_evtchn_do_upcall |
|---|
| 1296 | | -#ifndef CONFIG_PREEMPT |
|---|
| 1297 | | - call xen_maybe_preempt_hcall |
|---|
| 1298 | | -#endif |
|---|
| 1299 | | - jmp ret_from_intr |
|---|
| 1300 | | -ENDPROC(xen_hypervisor_callback) |
|---|
| 1301 | | - |
|---|
| 1302 | | -/* |
|---|
| 1303 | | - * Hypervisor uses this for application faults while it executes. |
|---|
| 1304 | | - * We get here for two reasons: |
|---|
| 1305 | | - * 1. Fault while reloading DS, ES, FS or GS |
|---|
| 1306 | | - * 2. Fault while executing IRET |
|---|
| 1307 | | - * Category 1 we fix up by reattempting the load, and zeroing the segment |
|---|
| 1308 | | - * register if the load fails. |
|---|
| 1309 | | - * Category 2 we fix up by jumping to do_iret_error. We cannot use the |
|---|
| 1310 | | - * normal Linux return path in this case because if we use the IRET hypercall |
|---|
| 1311 | | - * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
|---|
| 1312 | | - * We distinguish between categories by maintaining a status value in EAX. |
|---|
| 1313 | | - */ |
|---|
| 1314 | | -ENTRY(xen_failsafe_callback) |
|---|
| 1315 | | - pushl %eax |
|---|
| 1316 | | - movl $1, %eax |
|---|
| 1317 | | -1: mov 4(%esp), %ds |
|---|
| 1318 | | -2: mov 8(%esp), %es |
|---|
| 1319 | | -3: mov 12(%esp), %fs |
|---|
| 1320 | | -4: mov 16(%esp), %gs |
|---|
| 1321 | | - /* EAX == 0 => Category 1 (Bad segment) |
|---|
| 1322 | | - EAX != 0 => Category 2 (Bad IRET) */ |
|---|
| 1323 | | - testl %eax, %eax |
|---|
| 1324 | | - popl %eax |
|---|
| 1325 | | - lea 16(%esp), %esp |
|---|
| 1326 | | - jz 5f |
|---|
| 1327 | | - jmp iret_exc |
|---|
| 1328 | | -5: pushl $-1 /* orig_ax = -1 => not a system call */ |
|---|
| 1329 | | - SAVE_ALL |
|---|
| 1330 | | - ENCODE_FRAME_POINTER |
|---|
| 1331 | | - jmp ret_from_exception |
|---|
| 1332 | | - |
|---|
| 1333 | | -.section .fixup, "ax" |
|---|
| 1334 | | -6: xorl %eax, %eax |
|---|
| 1335 | | - movl %eax, 4(%esp) |
|---|
| 1336 | | - jmp 1b |
|---|
| 1337 | | -7: xorl %eax, %eax |
|---|
| 1338 | | - movl %eax, 8(%esp) |
|---|
| 1339 | | - jmp 2b |
|---|
| 1340 | | -8: xorl %eax, %eax |
|---|
| 1341 | | - movl %eax, 12(%esp) |
|---|
| 1342 | | - jmp 3b |
|---|
| 1343 | | -9: xorl %eax, %eax |
|---|
| 1344 | | - movl %eax, 16(%esp) |
|---|
| 1345 | | - jmp 4b |
|---|
| 1346 | | -.previous |
|---|
| 1347 | | - _ASM_EXTABLE(1b, 6b) |
|---|
| 1348 | | - _ASM_EXTABLE(2b, 7b) |
|---|
| 1349 | | - _ASM_EXTABLE(3b, 8b) |
|---|
| 1350 | | - _ASM_EXTABLE(4b, 9b) |
|---|
| 1351 | | -ENDPROC(xen_failsafe_callback) |
|---|
| 1352 | | - |
|---|
| 1353 | | -BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
|---|
| 1354 | | - xen_evtchn_do_upcall) |
|---|
| 1355 | | - |
|---|
| 1356 | | -#endif /* CONFIG_XEN */ |
|---|
| 1357 | | - |
|---|
| 1358 | | -#if IS_ENABLED(CONFIG_HYPERV) |
|---|
| 1359 | | - |
|---|
| 1360 | | -BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
|---|
| 1361 | | - hyperv_vector_handler) |
|---|
| 1362 | | - |
|---|
| 1363 | | -BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, |
|---|
| 1364 | | - hyperv_reenlightenment_intr) |
|---|
| 1365 | | - |
|---|
| 1366 | | -BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, |
|---|
| 1367 | | - hv_stimer0_vector_handler) |
|---|
| 1368 | | - |
|---|
| 1369 | | -#endif /* CONFIG_HYPERV */ |
|---|
| 1370 | | - |
|---|
| 1371 | | -ENTRY(page_fault) |
|---|
| 1372 | | - ASM_CLAC |
|---|
| 1373 | | - pushl $do_page_fault |
|---|
| 1374 | | - ALIGN |
|---|
| 1375 | | - jmp common_exception |
|---|
| 1376 | | -END(page_fault) |
|---|
| 1377 | | - |
|---|
| 1378 | | -common_exception: |
|---|
| 1150 | +SYM_CODE_START_LOCAL_NOALIGN(handle_exception) |
|---|
| 1379 | 1151 | /* the function address is in %gs's slot on the stack */ |
|---|
| 1380 | | - pushl %fs |
|---|
| 1381 | | - pushl %es |
|---|
| 1382 | | - pushl %ds |
|---|
| 1383 | | - pushl %eax |
|---|
| 1384 | | - movl $(__USER_DS), %eax |
|---|
| 1385 | | - movl %eax, %ds |
|---|
| 1386 | | - movl %eax, %es |
|---|
| 1387 | | - movl $(__KERNEL_PERCPU), %eax |
|---|
| 1388 | | - movl %eax, %fs |
|---|
| 1389 | | - pushl %ebp |
|---|
| 1390 | | - pushl %edi |
|---|
| 1391 | | - pushl %esi |
|---|
| 1392 | | - pushl %edx |
|---|
| 1393 | | - pushl %ecx |
|---|
| 1394 | | - pushl %ebx |
|---|
| 1395 | | - SWITCH_TO_KERNEL_STACK |
|---|
| 1152 | + SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
|---|
| 1396 | 1153 | ENCODE_FRAME_POINTER |
|---|
| 1397 | | - cld |
|---|
| 1398 | | - UNWIND_ESPFIX_STACK |
|---|
| 1154 | + |
|---|
| 1155 | + /* fixup %gs */ |
|---|
| 1399 | 1156 | GS_TO_REG %ecx |
|---|
| 1400 | 1157 | movl PT_GS(%esp), %edi # get the function address |
|---|
| 1401 | | - movl PT_ORIG_EAX(%esp), %edx # get the error code |
|---|
| 1402 | | - movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
|---|
| 1403 | 1158 | REG_TO_PTGS %ecx |
|---|
| 1404 | 1159 | SET_KERNEL_GS %ecx |
|---|
| 1405 | | - TRACE_IRQS_OFF |
|---|
| 1406 | | - movl %esp, %eax # pt_regs pointer |
|---|
| 1407 | | - CALL_NOSPEC %edi |
|---|
| 1408 | | - jmp ret_from_exception |
|---|
| 1409 | | -END(common_exception) |
|---|
| 1410 | 1160 | |
|---|
| 1411 | | -ENTRY(debug) |
|---|
| 1161 | + /* fixup orig %eax */ |
|---|
| 1162 | + movl PT_ORIG_EAX(%esp), %edx # get the error code |
|---|
| 1163 | + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
|---|
| 1164 | + |
|---|
| 1165 | + movl %esp, %eax # pt_regs pointer |
|---|
| 1166 | + CALL_NOSPEC edi |
|---|
| 1167 | + |
|---|
| 1168 | +handle_exception_return: |
|---|
| 1169 | +#ifdef CONFIG_VM86 |
|---|
| 1170 | + movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
|---|
| 1171 | + movb PT_CS(%esp), %al |
|---|
| 1172 | + andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
|---|
| 1173 | +#else |
|---|
| 1412 | 1174 | /* |
|---|
| 1413 | | - * Entry from sysenter is now handled in common_exception |
|---|
| 1175 | + * We can be coming here from child spawned by kernel_thread(). |
|---|
| 1414 | 1176 | */ |
|---|
| 1415 | | - ASM_CLAC |
|---|
| 1416 | | - pushl $-1 # mark this as an int |
|---|
| 1417 | | - pushl $do_debug |
|---|
| 1418 | | - jmp common_exception |
|---|
| 1419 | | -END(debug) |
|---|
| 1177 | + movl PT_CS(%esp), %eax |
|---|
| 1178 | + andl $SEGMENT_RPL_MASK, %eax |
|---|
| 1179 | +#endif |
|---|
| 1180 | + cmpl $USER_RPL, %eax # returning to v8086 or userspace ? |
|---|
| 1181 | + jnb ret_to_user |
|---|
| 1182 | + |
|---|
| 1183 | + PARANOID_EXIT_TO_KERNEL_MODE |
|---|
| 1184 | + BUG_IF_WRONG_CR3 |
|---|
| 1185 | + RESTORE_REGS 4 |
|---|
| 1186 | + jmp .Lirq_return |
|---|
| 1187 | + |
|---|
| 1188 | +ret_to_user: |
|---|
| 1189 | + movl %esp, %eax |
|---|
| 1190 | + jmp restore_all_switch_stack |
|---|
| 1191 | +SYM_CODE_END(handle_exception) |
|---|
| 1192 | + |
|---|
| 1193 | +SYM_CODE_START(asm_exc_double_fault) |
|---|
| 1194 | +1: |
|---|
| 1195 | + /* |
|---|
| 1196 | + * This is a task gate handler, not an interrupt gate handler. |
|---|
| 1197 | + * The error code is on the stack, but the stack is otherwise |
|---|
| 1198 | + * empty. Interrupts are off. Our state is sane with the following |
|---|
| 1199 | + * exceptions: |
|---|
| 1200 | + * |
|---|
| 1201 | + * - CR0.TS is set. "TS" literally means "task switched". |
|---|
| 1202 | + * - EFLAGS.NT is set because we're a "nested task". |
|---|
| 1203 | + * - The doublefault TSS has back_link set and has been marked busy. |
|---|
| 1204 | + * - TR points to the doublefault TSS and the normal TSS is busy. |
|---|
| 1205 | + * - CR3 is the normal kernel PGD. This would be delightful, except |
|---|
| 1206 | + * that the CPU didn't bother to save the old CR3 anywhere. This |
|---|
| 1207 | + * would make it very awkward to return back to the context we came |
|---|
| 1208 | + * from. |
|---|
| 1209 | + * |
|---|
| 1210 | + * The rest of EFLAGS is sanitized for us, so we don't need to |
|---|
| 1211 | + * worry about AC or DF. |
|---|
| 1212 | + * |
|---|
| 1213 | + * Don't even bother popping the error code. It's always zero, |
|---|
| 1214 | + * and ignoring it makes us a bit more robust against buggy |
|---|
| 1215 | + * hypervisor task gate implementations. |
|---|
| 1216 | + * |
|---|
| 1217 | + * We will manually undo the task switch instead of doing a |
|---|
| 1218 | + * task-switching IRET. |
|---|
| 1219 | + */ |
|---|
| 1220 | + |
|---|
| 1221 | + clts /* clear CR0.TS */ |
|---|
| 1222 | + pushl $X86_EFLAGS_FIXED |
|---|
| 1223 | + popfl /* clear EFLAGS.NT */ |
|---|
| 1224 | + |
|---|
| 1225 | + call doublefault_shim |
|---|
| 1226 | + |
|---|
| 1227 | + /* We don't support returning, so we have no IRET here. */ |
|---|
| 1228 | +1: |
|---|
| 1229 | + hlt |
|---|
| 1230 | + jmp 1b |
|---|
| 1231 | +SYM_CODE_END(asm_exc_double_fault) |
|---|
| 1420 | 1232 | |
|---|
| 1421 | 1233 | /* |
|---|
| 1422 | 1234 | * NMI is doubly nasty. It can happen on the first instruction of |
|---|
| .. | .. |
|---|
| 1425 | 1237 | * switched stacks. We handle both conditions by simply checking whether we |
|---|
| 1426 | 1238 | * interrupted kernel code running on the SYSENTER stack. |
|---|
| 1427 | 1239 | */ |
|---|
| 1428 | | -ENTRY(nmi) |
|---|
| 1240 | +SYM_CODE_START(asm_exc_nmi) |
|---|
| 1429 | 1241 | ASM_CLAC |
|---|
| 1430 | 1242 | |
|---|
| 1431 | 1243 | #ifdef CONFIG_X86_ESPFIX32 |
|---|
| 1244 | + /* |
|---|
| 1245 | + * ESPFIX_SS is only ever set on the return to user path |
|---|
| 1246 | + * after we've switched to the entry stack. |
|---|
| 1247 | + */ |
|---|
| 1432 | 1248 | pushl %eax |
|---|
| 1433 | 1249 | movl %ss, %eax |
|---|
| 1434 | 1250 | cmpw $__ESPFIX_SS, %ax |
|---|
| .. | .. |
|---|
| 1450 | 1266 | jb .Lnmi_from_sysenter_stack |
|---|
| 1451 | 1267 | |
|---|
| 1452 | 1268 | /* Not on SYSENTER stack. */ |
|---|
| 1453 | | - call do_nmi |
|---|
| 1269 | + call exc_nmi |
|---|
| 1454 | 1270 | jmp .Lnmi_return |
|---|
| 1455 | 1271 | |
|---|
| 1456 | 1272 | .Lnmi_from_sysenter_stack: |
|---|
| .. | .. |
|---|
| 1460 | 1276 | */ |
|---|
| 1461 | 1277 | movl %esp, %ebx |
|---|
| 1462 | 1278 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
|---|
| 1463 | | - call do_nmi |
|---|
| 1279 | + call exc_nmi |
|---|
| 1464 | 1280 | movl %ebx, %esp |
|---|
| 1465 | 1281 | |
|---|
| 1466 | 1282 | .Lnmi_return: |
|---|
| 1283 | +#ifdef CONFIG_X86_ESPFIX32 |
|---|
| 1284 | + testl $CS_FROM_ESPFIX, PT_CS(%esp) |
|---|
| 1285 | + jnz .Lnmi_from_espfix |
|---|
| 1286 | +#endif |
|---|
| 1287 | + |
|---|
| 1467 | 1288 | CHECK_AND_APPLY_ESPFIX |
|---|
| 1468 | 1289 | RESTORE_ALL_NMI cr3_reg=%edi pop=4 |
|---|
| 1469 | 1290 | jmp .Lirq_return |
|---|
| .. | .. |
|---|
| 1471 | 1292 | #ifdef CONFIG_X86_ESPFIX32 |
|---|
| 1472 | 1293 | .Lnmi_espfix_stack: |
|---|
| 1473 | 1294 | /* |
|---|
| 1474 | | - * create the pointer to lss back |
|---|
| 1295 | + * Create the pointer to LSS back |
|---|
| 1475 | 1296 | */ |
|---|
| 1476 | 1297 | pushl %ss |
|---|
| 1477 | 1298 | pushl %esp |
|---|
| 1478 | 1299 | addl $4, (%esp) |
|---|
| 1479 | | - /* copy the iret frame of 12 bytes */ |
|---|
| 1480 | | - .rept 3 |
|---|
| 1481 | | - pushl 16(%esp) |
|---|
| 1482 | | - .endr |
|---|
| 1483 | | - pushl %eax |
|---|
| 1484 | | - SAVE_ALL_NMI cr3_reg=%edi |
|---|
| 1485 | | - ENCODE_FRAME_POINTER |
|---|
| 1486 | | - FIXUP_ESPFIX_STACK # %eax == %esp |
|---|
| 1487 | | - xorl %edx, %edx # zero error code |
|---|
| 1488 | | - call do_nmi |
|---|
| 1489 | | - RESTORE_ALL_NMI cr3_reg=%edi |
|---|
| 1490 | | - lss 12+4(%esp), %esp # back to espfix stack |
|---|
| 1491 | | - jmp .Lirq_return |
|---|
| 1492 | | -#endif |
|---|
| 1493 | | -END(nmi) |
|---|
| 1494 | 1300 | |
|---|
| 1495 | | -ENTRY(int3) |
|---|
| 1496 | | - ASM_CLAC |
|---|
| 1497 | | - pushl $-1 # mark this as an int |
|---|
| 1301 | + /* Copy the (short) IRET frame */ |
|---|
| 1302 | + pushl 4*4(%esp) # flags |
|---|
| 1303 | + pushl 4*4(%esp) # cs |
|---|
| 1304 | + pushl 4*4(%esp) # ip |
|---|
| 1498 | 1305 | |
|---|
| 1499 | | - SAVE_ALL switch_stacks=1 |
|---|
| 1306 | + pushl %eax # orig_ax |
|---|
| 1307 | + |
|---|
| 1308 | + SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 |
|---|
| 1500 | 1309 | ENCODE_FRAME_POINTER |
|---|
| 1501 | | - TRACE_IRQS_OFF |
|---|
| 1310 | + |
|---|
| 1311 | + /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ |
|---|
| 1312 | + xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) |
|---|
| 1313 | + |
|---|
| 1502 | 1314 | xorl %edx, %edx # zero error code |
|---|
| 1503 | 1315 | movl %esp, %eax # pt_regs pointer |
|---|
| 1504 | | - call do_int3 |
|---|
| 1505 | | - jmp ret_from_exception |
|---|
| 1506 | | -END(int3) |
|---|
| 1316 | + jmp .Lnmi_from_sysenter_stack |
|---|
| 1507 | 1317 | |
|---|
| 1508 | | -ENTRY(general_protection) |
|---|
| 1509 | | - ASM_CLAC |
|---|
| 1510 | | - pushl $do_general_protection |
|---|
| 1511 | | - jmp common_exception |
|---|
| 1512 | | -END(general_protection) |
|---|
| 1513 | | - |
|---|
| 1514 | | -#ifdef CONFIG_KVM_GUEST |
|---|
| 1515 | | -ENTRY(async_page_fault) |
|---|
| 1516 | | - ASM_CLAC |
|---|
| 1517 | | - pushl $do_async_page_fault |
|---|
| 1518 | | - jmp common_exception |
|---|
| 1519 | | -END(async_page_fault) |
|---|
| 1318 | +.Lnmi_from_espfix: |
|---|
| 1319 | + RESTORE_ALL_NMI cr3_reg=%edi |
|---|
| 1320 | + /* |
|---|
| 1321 | + * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to |
|---|
| 1322 | + * fix up the gap and long frame: |
|---|
| 1323 | + * |
|---|
| 1324 | + * 3 - original frame (exception) |
|---|
| 1325 | + * 2 - ESPFIX block (above) |
|---|
| 1326 | + * 6 - gap (FIXUP_FRAME) |
|---|
| 1327 | + * 5 - long frame (FIXUP_FRAME) |
|---|
| 1328 | + * 1 - orig_ax |
|---|
| 1329 | + */ |
|---|
| 1330 | + lss (1+5+6)*4(%esp), %esp # back to espfix stack |
|---|
| 1331 | + jmp .Lirq_return |
|---|
| 1520 | 1332 | #endif |
|---|
| 1333 | +SYM_CODE_END(asm_exc_nmi) |
|---|
| 1521 | 1334 | |
|---|
| 1522 | | -ENTRY(rewind_stack_do_exit) |
|---|
| 1335 | +.pushsection .text, "ax" |
|---|
| 1336 | +SYM_CODE_START(rewind_stack_do_exit) |
|---|
| 1523 | 1337 | /* Prevent any naive code from trying to unwind to our caller. */ |
|---|
| 1524 | 1338 | xorl %ebp, %ebp |
|---|
| 1525 | 1339 | |
|---|
| .. | .. |
|---|
| 1528 | 1342 | |
|---|
| 1529 | 1343 | call do_exit |
|---|
| 1530 | 1344 | 1: jmp 1b |
|---|
| 1531 | | -END(rewind_stack_do_exit) |
|---|
| 1345 | +SYM_CODE_END(rewind_stack_do_exit) |
|---|
| 1346 | +.popsection |
|---|