.. | .. |
---|
6 | 6 | * operations here; the indirect forms are better handled in C. |
---|
7 | 7 | */ |
---|
8 | 8 | |
---|
| 9 | +#include <asm/errno.h> |
---|
9 | 10 | #include <asm/asm-offsets.h> |
---|
10 | 11 | #include <asm/percpu.h> |
---|
11 | 12 | #include <asm/processor-flags.h> |
---|
| 13 | +#include <asm/segment.h> |
---|
| 14 | +#include <asm/thread_info.h> |
---|
| 15 | +#include <asm/asm.h> |
---|
12 | 16 | #include <asm/frame.h> |
---|
| 17 | +#include <asm/unwind_hints.h> |
---|
13 | 18 | |
---|
| 19 | +#include <xen/interface/xen.h> |
---|
| 20 | + |
---|
| 21 | +#include <linux/init.h> |
---|
14 | 22 | #include <linux/linkage.h> |
---|
| 23 | +#include <../entry/calling.h> |
---|
15 | 24 | |
---|
16 | 25 | /* |
---|
17 | 26 | * Enable events. This clears the event mask and tests the pending |
---|
18 | 27 | * event status with one and operation. If there are pending events, |
---|
19 | 28 | * then enter the hypervisor to get them handled. |
---|
20 | 29 | */ |
---|
21 | | -ENTRY(xen_irq_enable_direct) |
---|
| 30 | +SYM_FUNC_START(xen_irq_enable_direct) |
---|
22 | 31 | FRAME_BEGIN |
---|
23 | 32 | /* Unmask events */ |
---|
24 | 33 | movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
---|
.. | .. |
---|
36 | 45 | call check_events |
---|
37 | 46 | 1: |
---|
38 | 47 | FRAME_END |
---|
39 | | - ret |
---|
40 | | - ENDPROC(xen_irq_enable_direct) |
---|
| 48 | + RET |
---|
| 49 | +SYM_FUNC_END(xen_irq_enable_direct) |
---|
41 | 50 | |
---|
42 | 51 | |
---|
43 | 52 | /* |
---|
44 | 53 | * Disabling events is simply a matter of making the event mask |
---|
45 | 54 | * non-zero. |
---|
46 | 55 | */ |
---|
47 | | -ENTRY(xen_irq_disable_direct) |
---|
| 56 | +SYM_FUNC_START(xen_irq_disable_direct) |
---|
48 | 57 | movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
---|
49 | | - ret |
---|
50 | | -ENDPROC(xen_irq_disable_direct) |
---|
| 58 | + RET |
---|
| 59 | +SYM_FUNC_END(xen_irq_disable_direct) |
---|
51 | 60 | |
---|
52 | 61 | /* |
---|
53 | 62 | * (xen_)save_fl is used to get the current interrupt enable status. |
---|
.. | .. |
---|
58 | 67 | * undefined. We need to toggle the state of the bit, because Xen and |
---|
59 | 68 | * x86 use opposite senses (mask vs enable). |
---|
60 | 69 | */ |
---|
61 | | -ENTRY(xen_save_fl_direct) |
---|
| 70 | +SYM_FUNC_START(xen_save_fl_direct) |
---|
62 | 71 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
---|
63 | 72 | setz %ah |
---|
64 | 73 | addb %ah, %ah |
---|
65 | | - ret |
---|
66 | | - ENDPROC(xen_save_fl_direct) |
---|
| 74 | + RET |
---|
| 75 | +SYM_FUNC_END(xen_save_fl_direct) |
---|
67 | 76 | |
---|
68 | 77 | |
---|
69 | 78 | /* |
---|
.. | .. |
---|
73 | 82 | * interrupt mask state, it checks for unmasked pending events and |
---|
74 | 83 | * enters the hypervisor to get them delivered if so. |
---|
75 | 84 | */ |
---|
76 | | -ENTRY(xen_restore_fl_direct) |
---|
| 85 | +SYM_FUNC_START(xen_restore_fl_direct) |
---|
77 | 86 | FRAME_BEGIN |
---|
78 | | -#ifdef CONFIG_X86_64 |
---|
79 | 87 | testw $X86_EFLAGS_IF, %di |
---|
80 | | -#else |
---|
81 | | - testb $X86_EFLAGS_IF>>8, %ah |
---|
82 | | -#endif |
---|
83 | 88 | setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask |
---|
84 | 89 | /* |
---|
85 | 90 | * Preempt here doesn't matter because that will deal with any |
---|
.. | .. |
---|
93 | 98 | call check_events |
---|
94 | 99 | 1: |
---|
95 | 100 | FRAME_END |
---|
96 | | - ret |
---|
97 | | - ENDPROC(xen_restore_fl_direct) |
---|
| 101 | + RET |
---|
| 102 | +SYM_FUNC_END(xen_restore_fl_direct) |
---|
98 | 103 | |
---|
99 | 104 | |
---|
100 | 105 | /* |
---|
101 | 106 | * Force an event check by making a hypercall, but preserve regs |
---|
102 | 107 | * before making the call. |
---|
103 | 108 | */ |
---|
104 | | -ENTRY(check_events) |
---|
| 109 | +SYM_FUNC_START(check_events) |
---|
105 | 110 | FRAME_BEGIN |
---|
106 | | -#ifdef CONFIG_X86_32 |
---|
107 | | - push %eax |
---|
108 | | - push %ecx |
---|
109 | | - push %edx |
---|
110 | | - call xen_force_evtchn_callback |
---|
111 | | - pop %edx |
---|
112 | | - pop %ecx |
---|
113 | | - pop %eax |
---|
114 | | -#else |
---|
115 | 111 | push %rax |
---|
116 | 112 | push %rcx |
---|
117 | 113 | push %rdx |
---|
.. | .. |
---|
131 | 127 | pop %rdx |
---|
132 | 128 | pop %rcx |
---|
133 | 129 | pop %rax |
---|
134 | | -#endif |
---|
135 | 130 | FRAME_END |
---|
136 | | - ret |
---|
137 | | -ENDPROC(check_events) |
---|
| 131 | + RET |
---|
| 132 | +SYM_FUNC_END(check_events) |
---|
| 133 | + |
---|
| 134 | +SYM_FUNC_START(xen_read_cr2) |
---|
| 135 | + FRAME_BEGIN |
---|
| 136 | + _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX |
---|
| 137 | + _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX |
---|
| 138 | + FRAME_END |
---|
| 139 | + RET |
---|
| 140 | +SYM_FUNC_END(xen_read_cr2); |
---|
| 141 | + |
---|
| 142 | +SYM_FUNC_START(xen_read_cr2_direct) |
---|
| 143 | + FRAME_BEGIN |
---|
| 144 | + _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX |
---|
| 145 | + FRAME_END |
---|
| 146 | + RET |
---|
| 147 | +SYM_FUNC_END(xen_read_cr2_direct); |
---|
| 148 | + |
---|
| 149 | +.macro xen_pv_trap name |
---|
| 150 | +SYM_CODE_START(xen_\name) |
---|
| 151 | + UNWIND_HINT_ENTRY |
---|
| 152 | + pop %rcx |
---|
| 153 | + pop %r11 |
---|
| 154 | + jmp \name |
---|
| 155 | +SYM_CODE_END(xen_\name) |
---|
| 156 | +_ASM_NOKPROBE(xen_\name) |
---|
| 157 | +.endm |
---|
| 158 | + |
---|
| 159 | +xen_pv_trap asm_exc_divide_error |
---|
| 160 | +xen_pv_trap asm_xenpv_exc_debug |
---|
| 161 | +xen_pv_trap asm_exc_int3 |
---|
| 162 | +xen_pv_trap asm_xenpv_exc_nmi |
---|
| 163 | +xen_pv_trap asm_exc_overflow |
---|
| 164 | +xen_pv_trap asm_exc_bounds |
---|
| 165 | +xen_pv_trap asm_exc_invalid_op |
---|
| 166 | +xen_pv_trap asm_exc_device_not_available |
---|
| 167 | +xen_pv_trap asm_exc_double_fault |
---|
| 168 | +xen_pv_trap asm_exc_coproc_segment_overrun |
---|
| 169 | +xen_pv_trap asm_exc_invalid_tss |
---|
| 170 | +xen_pv_trap asm_exc_segment_not_present |
---|
| 171 | +xen_pv_trap asm_exc_stack_segment |
---|
| 172 | +xen_pv_trap asm_exc_general_protection |
---|
| 173 | +xen_pv_trap asm_exc_page_fault |
---|
| 174 | +xen_pv_trap asm_exc_spurious_interrupt_bug |
---|
| 175 | +xen_pv_trap asm_exc_coprocessor_error |
---|
| 176 | +xen_pv_trap asm_exc_alignment_check |
---|
| 177 | +#ifdef CONFIG_X86_MCE |
---|
| 178 | +xen_pv_trap asm_exc_machine_check |
---|
| 179 | +#endif /* CONFIG_X86_MCE */ |
---|
| 180 | +xen_pv_trap asm_exc_simd_coprocessor_error |
---|
| 181 | +#ifdef CONFIG_IA32_EMULATION |
---|
| 182 | +xen_pv_trap entry_INT80_compat |
---|
| 183 | +#endif |
---|
| 184 | +xen_pv_trap asm_exc_xen_unknown_trap |
---|
| 185 | +xen_pv_trap asm_exc_xen_hypervisor_callback |
---|
| 186 | + |
---|
| 187 | + __INIT |
---|
| 188 | +SYM_CODE_START(xen_early_idt_handler_array) |
---|
| 189 | + i = 0 |
---|
| 190 | + .rept NUM_EXCEPTION_VECTORS |
---|
| 191 | + UNWIND_HINT_EMPTY |
---|
| 192 | + pop %rcx |
---|
| 193 | + pop %r11 |
---|
| 194 | + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE |
---|
| 195 | + i = i + 1 |
---|
| 196 | + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc |
---|
| 197 | + .endr |
---|
| 198 | +SYM_CODE_END(xen_early_idt_handler_array) |
---|
| 199 | + __FINIT |
---|
| 200 | + |
---|
| 201 | +hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
---|
| 202 | +/* |
---|
| 203 | + * Xen64 iret frame: |
---|
| 204 | + * |
---|
| 205 | + * ss |
---|
| 206 | + * rsp |
---|
| 207 | + * rflags |
---|
| 208 | + * cs |
---|
| 209 | + * rip <-- standard iret frame |
---|
| 210 | + * |
---|
| 211 | + * flags |
---|
| 212 | + * |
---|
| 213 | + * rcx } |
---|
| 214 | + * r11 }<-- pushed by hypercall page |
---|
| 215 | + * rsp->rax } |
---|
| 216 | + */ |
---|
| 217 | +SYM_CODE_START(xen_iret) |
---|
| 218 | + UNWIND_HINT_EMPTY |
---|
| 219 | + pushq $0 |
---|
| 220 | + jmp hypercall_iret |
---|
| 221 | +SYM_CODE_END(xen_iret) |
---|
| 222 | + |
---|
| 223 | +SYM_CODE_START(xen_sysret64) |
---|
| 224 | + UNWIND_HINT_EMPTY |
---|
| 225 | + /* |
---|
| 226 | + * We're already on the usermode stack at this point, but |
---|
| 227 | + * still with the kernel gs, so we can easily switch back. |
---|
| 228 | + * |
---|
| 229 | + * tss.sp2 is scratch space. |
---|
| 230 | + */ |
---|
| 231 | + movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) |
---|
| 232 | + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
---|
| 233 | + |
---|
| 234 | + pushq $__USER_DS |
---|
| 235 | + pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) |
---|
| 236 | + pushq %r11 |
---|
| 237 | + pushq $__USER_CS |
---|
| 238 | + pushq %rcx |
---|
| 239 | + |
---|
| 240 | + pushq $VGCF_in_syscall |
---|
| 241 | + jmp hypercall_iret |
---|
| 242 | +SYM_CODE_END(xen_sysret64) |
---|
| 243 | + |
---|
| 244 | +/* |
---|
| 245 | + * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is |
---|
| 246 | + * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() |
---|
| 247 | + * in XEN pv would cause %rsp to move up to the top of the kernel stack and |
---|
| 248 | + * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI |
---|
| 249 | + * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET |
---|
| 250 | + * frame at the same address is useless. |
---|
| 251 | + */ |
---|
| 252 | +SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) |
---|
| 253 | + UNWIND_HINT_REGS |
---|
| 254 | + POP_REGS |
---|
| 255 | + |
---|
| 256 | + /* stackleak_erase() can work safely on the kernel stack. */ |
---|
| 257 | + STACKLEAK_ERASE_NOCLOBBER |
---|
| 258 | + |
---|
| 259 | + addq $8, %rsp /* skip regs->orig_ax */ |
---|
| 260 | + jmp xen_iret |
---|
| 261 | +SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) |
---|
| 262 | + |
---|
| 263 | +/* |
---|
| 264 | + * Xen handles syscall callbacks much like ordinary exceptions, which |
---|
| 265 | + * means we have: |
---|
| 266 | + * - kernel gs |
---|
| 267 | + * - kernel rsp |
---|
| 268 | + * - an iret-like stack frame on the stack (including rcx and r11): |
---|
| 269 | + * ss |
---|
| 270 | + * rsp |
---|
| 271 | + * rflags |
---|
| 272 | + * cs |
---|
| 273 | + * rip |
---|
| 274 | + * r11 |
---|
| 275 | + * rsp->rcx |
---|
| 276 | + */ |
---|
| 277 | + |
---|
| 278 | +/* Normal 64-bit system call target */ |
---|
| 279 | +SYM_CODE_START(xen_entry_SYSCALL_64) |
---|
| 280 | + UNWIND_HINT_ENTRY |
---|
| 281 | + popq %rcx |
---|
| 282 | + popq %r11 |
---|
| 283 | + |
---|
| 284 | + /* |
---|
| 285 | + * Neither Xen nor the kernel really knows what the old SS and |
---|
| 286 | + * CS were. The kernel expects __USER_DS and __USER_CS, so |
---|
| 287 | + * report those values even though Xen will guess its own values. |
---|
| 288 | + */ |
---|
| 289 | + movq $__USER_DS, 4*8(%rsp) |
---|
| 290 | + movq $__USER_CS, 1*8(%rsp) |
---|
| 291 | + |
---|
| 292 | + jmp entry_SYSCALL_64_after_hwframe |
---|
| 293 | +SYM_CODE_END(xen_entry_SYSCALL_64) |
---|
| 294 | + |
---|
| 295 | +#ifdef CONFIG_IA32_EMULATION |
---|
| 296 | + |
---|
| 297 | +/* 32-bit compat syscall target */ |
---|
| 298 | +SYM_CODE_START(xen_entry_SYSCALL_compat) |
---|
| 299 | + UNWIND_HINT_ENTRY |
---|
| 300 | + popq %rcx |
---|
| 301 | + popq %r11 |
---|
| 302 | + |
---|
| 303 | + /* |
---|
| 304 | + * Neither Xen nor the kernel really knows what the old SS and |
---|
| 305 | + * CS were. The kernel expects __USER32_DS and __USER32_CS, so |
---|
| 306 | + * report those values even though Xen will guess its own values. |
---|
| 307 | + */ |
---|
| 308 | + movq $__USER32_DS, 4*8(%rsp) |
---|
| 309 | + movq $__USER32_CS, 1*8(%rsp) |
---|
| 310 | + |
---|
| 311 | + jmp entry_SYSCALL_compat_after_hwframe |
---|
| 312 | +SYM_CODE_END(xen_entry_SYSCALL_compat) |
---|
| 313 | + |
---|
| 314 | +/* 32-bit compat sysenter target */ |
---|
| 315 | +SYM_CODE_START(xen_entry_SYSENTER_compat) |
---|
| 316 | + UNWIND_HINT_ENTRY |
---|
| 317 | + /* |
---|
| 318 | + * NB: Xen is polite and clears TF from EFLAGS for us. This means |
---|
| 319 | + * that we don't need to guard against single step exceptions here. |
---|
| 320 | + */ |
---|
| 321 | + popq %rcx |
---|
| 322 | + popq %r11 |
---|
| 323 | + |
---|
| 324 | + /* |
---|
| 325 | + * Neither Xen nor the kernel really knows what the old SS and |
---|
| 326 | + * CS were. The kernel expects __USER32_DS and __USER32_CS, so |
---|
| 327 | + * report those values even though Xen will guess its own values. |
---|
| 328 | + */ |
---|
| 329 | + movq $__USER32_DS, 4*8(%rsp) |
---|
| 330 | + movq $__USER32_CS, 1*8(%rsp) |
---|
| 331 | + |
---|
| 332 | + jmp entry_SYSENTER_compat_after_hwframe |
---|
| 333 | +SYM_CODE_END(xen_entry_SYSENTER_compat) |
---|
| 334 | + |
---|
| 335 | +#else /* !CONFIG_IA32_EMULATION */ |
---|
| 336 | + |
---|
| 337 | +SYM_CODE_START(xen_entry_SYSCALL_compat) |
---|
| 338 | +SYM_CODE_START(xen_entry_SYSENTER_compat) |
---|
| 339 | + UNWIND_HINT_ENTRY |
---|
| 340 | + lea 16(%rsp), %rsp /* strip %rcx, %r11 */ |
---|
| 341 | + mov $-ENOSYS, %rax |
---|
| 342 | + pushq $0 |
---|
| 343 | + jmp hypercall_iret |
---|
| 344 | +SYM_CODE_END(xen_entry_SYSENTER_compat) |
---|
| 345 | +SYM_CODE_END(xen_entry_SYSCALL_compat) |
---|
| 346 | + |
---|
| 347 | +#endif /* CONFIG_IA32_EMULATION */ |
---|