.. | .. |
---|
74 | 74 | |
---|
75 | 75 | static inline void cond_local_irq_enable(struct pt_regs *regs) |
---|
76 | 76 | { |
---|
77 | | - if (regs->flags & X86_EFLAGS_IF) |
---|
78 | | - local_irq_enable(); |
---|
| 77 | + if (regs->flags & X86_EFLAGS_IF) { |
---|
| 78 | + if (running_inband()) |
---|
| 79 | + local_irq_enable_full(); |
---|
| 80 | + else |
---|
| 81 | + hard_local_irq_enable(); |
---|
| 82 | + } |
---|
79 | 83 | } |
---|
80 | 84 | |
---|
81 | 85 | static inline void cond_local_irq_disable(struct pt_regs *regs) |
---|
82 | 86 | { |
---|
83 | | - if (regs->flags & X86_EFLAGS_IF) |
---|
84 | | - local_irq_disable(); |
---|
| 87 | + if (regs->flags & X86_EFLAGS_IF) { |
---|
| 88 | + if (running_inband()) |
---|
| 89 | + local_irq_disable_full(); |
---|
| 90 | + else |
---|
| 91 | + hard_local_irq_disable(); |
---|
| 92 | + } |
---|
85 | 93 | } |
---|
86 | 94 | |
---|
87 | 95 | __always_inline int is_valid_bugaddr(unsigned long addr) |
---|
.. | .. |
---|
148 | 156 | } |
---|
149 | 157 | } |
---|
150 | 158 | |
---|
| 159 | +static __always_inline |
---|
| 160 | +bool mark_trap_entry(int trapnr, struct pt_regs *regs) |
---|
| 161 | +{ |
---|
| 162 | + oob_trap_notify(trapnr, regs); |
---|
| 163 | + |
---|
| 164 | + if (likely(running_inband())) { |
---|
| 165 | + hard_cond_local_irq_enable(); |
---|
| 166 | + return true; |
---|
| 167 | + } |
---|
| 168 | + |
---|
| 169 | + return false; |
---|
| 170 | +} |
---|
| 171 | + |
---|
| 172 | +static __always_inline |
---|
| 173 | +void mark_trap_exit(int trapnr, struct pt_regs *regs) |
---|
| 174 | +{ |
---|
| 175 | + oob_trap_unwind(trapnr, regs); |
---|
| 176 | + hard_cond_local_irq_disable(); |
---|
| 177 | +} |
---|
| 178 | + |
---|
| 179 | +static __always_inline |
---|
| 180 | +bool mark_trap_entry_raw(int trapnr, struct pt_regs *regs) |
---|
| 181 | +{ |
---|
| 182 | + oob_trap_notify(trapnr, regs); |
---|
| 183 | + return running_inband(); |
---|
| 184 | +} |
---|
| 185 | + |
---|
| 186 | +static __always_inline |
---|
| 187 | +void mark_trap_exit_raw(int trapnr, struct pt_regs *regs) |
---|
| 188 | +{ |
---|
| 189 | + oob_trap_unwind(trapnr, regs); |
---|
| 190 | +} |
---|
| 191 | + |
---|
151 | 192 | static void |
---|
152 | 193 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
---|
153 | 194 | long error_code, int sicode, void __user *addr) |
---|
.. | .. |
---|
171 | 212 | { |
---|
172 | 213 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); |
---|
173 | 214 | |
---|
| 215 | + if (!mark_trap_entry(trapnr, regs)) |
---|
| 216 | + return; |
---|
| 217 | + |
---|
174 | 218 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != |
---|
175 | 219 | NOTIFY_STOP) { |
---|
176 | 220 | cond_local_irq_enable(regs); |
---|
177 | 221 | do_trap(trapnr, signr, str, regs, error_code, sicode, addr); |
---|
178 | 222 | cond_local_irq_disable(regs); |
---|
179 | 223 | } |
---|
| 224 | + |
---|
| 225 | + mark_trap_exit(trapnr, regs); |
---|
180 | 226 | } |
---|
181 | 227 | |
---|
182 | 228 | /* |
---|
.. | .. |
---|
230 | 276 | * Since we're emulating a CALL with exceptions, restore the interrupt |
---|
231 | 277 | * state to what it was at the exception site. |
---|
232 | 278 | */ |
---|
233 | | - if (regs->flags & X86_EFLAGS_IF) |
---|
234 | | - raw_local_irq_enable(); |
---|
| 279 | + if (regs->flags & X86_EFLAGS_IF) { |
---|
| 280 | + if (running_oob()) |
---|
| 281 | + hard_local_irq_enable(); |
---|
| 282 | + else |
---|
| 283 | + local_irq_enable_full(); |
---|
| 284 | + } |
---|
235 | 285 | if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) { |
---|
236 | 286 | regs->ip += LEN_UD2; |
---|
237 | 287 | handled = true; |
---|
238 | 288 | } |
---|
239 | | - if (regs->flags & X86_EFLAGS_IF) |
---|
240 | | - raw_local_irq_disable(); |
---|
| 289 | + if (regs->flags & X86_EFLAGS_IF) { |
---|
| 290 | + if (running_oob()) |
---|
| 291 | + hard_local_irq_disable(); |
---|
| 292 | + else |
---|
| 293 | + local_irq_disable_full(); |
---|
| 294 | + } |
---|
241 | 295 | instrumentation_end(); |
---|
242 | 296 | |
---|
243 | 297 | return handled; |
---|
.. | .. |
---|
251 | 305 | * We use UD2 as a short encoding for 'CALL __WARN', as such |
---|
252 | 306 | * handle it before exception entry to avoid recursive WARN |
---|
253 | 307 | * in case exception entry is the one triggering WARNs. |
---|
| 308 | + * |
---|
| 309 | + * dovetail: handle_bug() may run oob, so we do not downgrade |
---|
| 310 | + * in-band upon a failed __WARN assertion since it might have |
---|
| 311 | + * tripped in a section of code which would not be happy to |
---|
| 312 | + * switch stage. However, anything else should be notified to |
---|
| 313 | + * the core, because the kernel execution might be about to |
---|
| 314 | + * stop, so we'd need to switch in-band to get any output |
---|
| 315 | + * before this happens. |
---|
254 | 316 | */ |
---|
255 | 317 | if (!user_mode(regs) && handle_bug(regs)) |
---|
256 | 318 | return; |
---|
257 | 319 | |
---|
258 | | - state = irqentry_enter(regs); |
---|
259 | | - instrumentation_begin(); |
---|
260 | | - handle_invalid_op(regs); |
---|
261 | | - instrumentation_end(); |
---|
262 | | - irqentry_exit(regs, state); |
---|
| 320 | + if (mark_trap_entry_raw(X86_TRAP_UD, regs)) { |
---|
| 321 | + state = irqentry_enter(regs); |
---|
| 322 | + instrumentation_begin(); |
---|
| 323 | + handle_invalid_op(regs); |
---|
| 324 | + instrumentation_end(); |
---|
| 325 | + irqentry_exit(regs, state); |
---|
| 326 | + mark_trap_exit_raw(X86_TRAP_UD, regs); |
---|
| 327 | + } |
---|
263 | 328 | } |
---|
264 | 329 | |
---|
265 | 330 | DEFINE_IDTENTRY(exc_coproc_segment_overrun) |
---|
.. | .. |
---|
290 | 355 | { |
---|
291 | 356 | char *str = "alignment check"; |
---|
292 | 357 | |
---|
293 | | - if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) |
---|
| 358 | + if (!mark_trap_entry(X86_TRAP_AC, regs)) |
---|
294 | 359 | return; |
---|
| 360 | + |
---|
| 361 | + if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP) |
---|
| 362 | + goto mark_exit; |
---|
295 | 363 | |
---|
296 | 364 | if (!user_mode(regs)) |
---|
297 | 365 | die("Split lock detected\n", regs, error_code); |
---|
.. | .. |
---|
306 | 374 | |
---|
307 | 375 | out: |
---|
308 | 376 | local_irq_disable(); |
---|
| 377 | + |
---|
| 378 | +mark_exit: |
---|
| 379 | + mark_trap_exit(X86_TRAP_AC, regs); |
---|
309 | 380 | } |
---|
310 | 381 | |
---|
311 | 382 | #ifdef CONFIG_VMAP_STACK |
---|
.. | .. |
---|
341 | 412 | * |
---|
342 | 413 | * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs |
---|
343 | 414 | * to be read before doing anything else. |
---|
| 415 | + * |
---|
| 416 | + * Dovetail: do not even ask the companion core to try restoring the |
---|
| 417 | + * in-band stage on double-fault, this would be a lost cause. |
---|
344 | 418 | */ |
---|
345 | 419 | DEFINE_IDTENTRY_DF(exc_double_fault) |
---|
346 | 420 | { |
---|
.. | .. |
---|
465 | 539 | |
---|
466 | 540 | DEFINE_IDTENTRY(exc_bounds) |
---|
467 | 541 | { |
---|
| 542 | + if (!mark_trap_entry(X86_TRAP_BR, regs)) |
---|
| 543 | + return; |
---|
| 544 | + |
---|
468 | 545 | if (notify_die(DIE_TRAP, "bounds", regs, 0, |
---|
469 | 546 | X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) |
---|
470 | | - return; |
---|
| 547 | + goto out; |
---|
471 | 548 | cond_local_irq_enable(regs); |
---|
472 | 549 | |
---|
473 | 550 | if (!user_mode(regs)) |
---|
.. | .. |
---|
476 | 553 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL); |
---|
477 | 554 | |
---|
478 | 555 | cond_local_irq_disable(regs); |
---|
| 556 | +out: |
---|
| 557 | + mark_trap_exit(X86_TRAP_BR, regs); |
---|
479 | 558 | } |
---|
480 | 559 | |
---|
481 | 560 | enum kernel_gp_hint { |
---|
.. | .. |
---|
570 | 649 | } |
---|
571 | 650 | |
---|
572 | 651 | if (v8086_mode(regs)) { |
---|
573 | | - local_irq_enable(); |
---|
| 652 | + local_irq_enable_full(); |
---|
574 | 653 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); |
---|
575 | | - local_irq_disable(); |
---|
| 654 | + local_irq_disable_full(); |
---|
576 | 655 | return; |
---|
577 | 656 | } |
---|
578 | 657 | |
---|
.. | .. |
---|
585 | 664 | tsk->thread.error_code = error_code; |
---|
586 | 665 | tsk->thread.trap_nr = X86_TRAP_GP; |
---|
587 | 666 | |
---|
| 667 | + if (!mark_trap_entry(X86_TRAP_GP, regs)) |
---|
| 668 | + goto exit; |
---|
| 669 | + |
---|
588 | 670 | show_signal(tsk, SIGSEGV, "", desc, regs, error_code); |
---|
589 | 671 | force_sig(SIGSEGV); |
---|
590 | | - goto exit; |
---|
| 672 | + goto mark_exit; |
---|
591 | 673 | } |
---|
592 | 674 | |
---|
593 | 675 | if (fixup_exception(regs, X86_TRAP_GP, error_code, 0)) |
---|
.. | .. |
---|
605 | 687 | kprobe_fault_handler(regs, X86_TRAP_GP)) |
---|
606 | 688 | goto exit; |
---|
607 | 689 | |
---|
| 690 | + if (!mark_trap_entry(X86_TRAP_GP, regs)) |
---|
| 691 | + goto exit; |
---|
| 692 | + |
---|
608 | 693 | ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV); |
---|
609 | 694 | if (ret == NOTIFY_STOP) |
---|
610 | | - goto exit; |
---|
| 695 | + goto mark_exit; |
---|
611 | 696 | |
---|
612 | 697 | if (error_code) |
---|
613 | 698 | snprintf(desc, sizeof(desc), "segment-related " GPFSTR); |
---|
.. | .. |
---|
629 | 714 | |
---|
630 | 715 | die_addr(desc, regs, error_code, gp_addr); |
---|
631 | 716 | |
---|
| 717 | +mark_exit: |
---|
| 718 | + mark_trap_exit(X86_TRAP_GP, regs); |
---|
632 | 719 | exit: |
---|
633 | 720 | cond_local_irq_disable(regs); |
---|
634 | 721 | } |
---|
.. | .. |
---|
673 | 760 | if (poke_int3_handler(regs)) |
---|
674 | 761 | return; |
---|
675 | 762 | |
---|
| 763 | + if (!mark_trap_entry_raw(X86_TRAP_BP, regs)) |
---|
| 764 | + return; |
---|
| 765 | + |
---|
676 | 766 | /* |
---|
677 | 767 | * irqentry_enter_from_user_mode() uses static_branch_{,un}likely() |
---|
678 | 768 | * and therefore can trigger INT3, hence poke_int3_handler() must |
---|
.. | .. |
---|
695 | 785 | instrumentation_end(); |
---|
696 | 786 | irqentry_nmi_exit(regs, irq_state); |
---|
697 | 787 | } |
---|
| 788 | + |
---|
| 789 | + mark_trap_exit_raw(X86_TRAP_BP, regs); |
---|
698 | 790 | } |
---|
699 | 791 | |
---|
700 | 792 | #ifdef CONFIG_X86_64 |
---|
.. | .. |
---|
999 | 1091 | goto out; |
---|
1000 | 1092 | |
---|
1001 | 1093 | /* It's safe to allow irq's after DR6 has been saved */ |
---|
1002 | | - local_irq_enable(); |
---|
| 1094 | + local_irq_enable_full(); |
---|
1003 | 1095 | |
---|
1004 | 1096 | if (v8086_mode(regs)) { |
---|
1005 | 1097 | handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB); |
---|
.. | .. |
---|
1012 | 1104 | send_sigtrap(regs, 0, get_si_code(dr6)); |
---|
1013 | 1105 | |
---|
1014 | 1106 | out_irq: |
---|
1015 | | - local_irq_disable(); |
---|
| 1107 | + local_irq_disable_full(); |
---|
1016 | 1108 | out: |
---|
1017 | 1109 | instrumentation_end(); |
---|
1018 | 1110 | irqentry_exit_to_user_mode(regs); |
---|
.. | .. |
---|
1022 | 1114 | /* IST stack entry */ |
---|
1023 | 1115 | DEFINE_IDTENTRY_DEBUG(exc_debug) |
---|
1024 | 1116 | { |
---|
1025 | | - exc_debug_kernel(regs, debug_read_clear_dr6()); |
---|
| 1117 | + if (mark_trap_entry_raw(X86_TRAP_DB, regs)) { |
---|
| 1118 | + exc_debug_kernel(regs, debug_read_clear_dr6()); |
---|
| 1119 | + mark_trap_exit_raw(X86_TRAP_DB, regs); |
---|
| 1120 | + } |
---|
1026 | 1121 | } |
---|
1027 | 1122 | |
---|
1028 | 1123 | /* User entry, runs on regular task stack */ |
---|
1029 | 1124 | DEFINE_IDTENTRY_DEBUG_USER(exc_debug) |
---|
1030 | 1125 | { |
---|
1031 | | - exc_debug_user(regs, debug_read_clear_dr6()); |
---|
| 1126 | + if (mark_trap_entry_raw(X86_TRAP_DB, regs)) { |
---|
| 1127 | + exc_debug_user(regs, debug_read_clear_dr6()); |
---|
| 1128 | + mark_trap_exit_raw(X86_TRAP_DB, regs); |
---|
| 1129 | + } |
---|
1032 | 1130 | } |
---|
1033 | 1131 | #else |
---|
1034 | 1132 | /* 32 bit does not have separate entry points. */ |
---|
.. | .. |
---|
1062 | 1160 | if (fixup_exception(regs, trapnr, 0, 0)) |
---|
1063 | 1161 | goto exit; |
---|
1064 | 1162 | |
---|
| 1163 | + if (!mark_trap_entry(trapnr, regs)) |
---|
| 1164 | + goto exit; |
---|
| 1165 | + |
---|
1065 | 1166 | task->thread.error_code = 0; |
---|
1066 | 1167 | task->thread.trap_nr = trapnr; |
---|
1067 | 1168 | |
---|
1068 | 1169 | if (notify_die(DIE_TRAP, str, regs, 0, trapnr, |
---|
1069 | 1170 | SIGFPE) != NOTIFY_STOP) |
---|
1070 | 1171 | die(str, regs, 0); |
---|
1071 | | - goto exit; |
---|
| 1172 | + goto mark_exit; |
---|
1072 | 1173 | } |
---|
1073 | 1174 | |
---|
1074 | 1175 | /* |
---|
.. | .. |
---|
1084 | 1185 | if (!si_code) |
---|
1085 | 1186 | goto exit; |
---|
1086 | 1187 | |
---|
| 1188 | + if (!mark_trap_entry(trapnr, regs)) |
---|
| 1189 | + goto exit; |
---|
| 1190 | + |
---|
1087 | 1191 | force_sig_fault(SIGFPE, si_code, |
---|
1088 | 1192 | (void __user *)uprobe_get_trap_addr(regs)); |
---|
| 1193 | +mark_exit: |
---|
| 1194 | + mark_trap_exit(trapnr, regs); |
---|
1089 | 1195 | exit: |
---|
1090 | 1196 | cond_local_irq_disable(regs); |
---|
1091 | 1197 | } |
---|
.. | .. |
---|
1158 | 1264 | * to kill the task than getting stuck in a never-ending |
---|
1159 | 1265 | * loop of #NM faults. |
---|
1160 | 1266 | */ |
---|
1161 | | - die("unexpected #NM exception", regs, 0); |
---|
| 1267 | + if (mark_trap_entry(X86_TRAP_NM, regs)) { |
---|
| 1268 | + die("unexpected #NM exception", regs, 0); |
---|
| 1269 | + mark_trap_exit(X86_TRAP_NM, regs); |
---|
| 1270 | + } |
---|
1162 | 1271 | } |
---|
1163 | 1272 | } |
---|
1164 | 1273 | |
---|