.. | .. |
---|
9 | 9 | #include <linux/signal.h> |
---|
10 | 10 | #include <linux/mm.h> |
---|
11 | 11 | #include <linux/hardirq.h> |
---|
| 12 | +#include <linux/irq_pipeline.h> |
---|
12 | 13 | #include <linux/init.h> |
---|
13 | 14 | #include <linux/kprobes.h> |
---|
14 | 15 | #include <linux/uaccess.h> |
---|
.. | .. |
---|
21 | 22 | #include <asm/system_misc.h> |
---|
22 | 23 | #include <asm/system_info.h> |
---|
23 | 24 | #include <asm/tlbflush.h> |
---|
| 25 | +#include <asm/dovetail.h> |
---|
| 26 | +#define CREATE_TRACE_POINTS |
---|
| 27 | +#include <asm/trace/exceptions.h> |
---|
24 | 28 | |
---|
25 | 29 | #include "fault.h" |
---|
26 | 30 | |
---|
27 | 31 | #ifdef CONFIG_MMU |
---|
| 32 | + |
---|
| 33 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 34 | +/* |
---|
| 35 | + * We need to synchronize the virtual interrupt state with the hard |
---|
| 36 | + * interrupt state we received on entry, then turn hardirqs back on to |
---|
| 37 | + * allow code which does not require strict serialization to be |
---|
| 38 | + * preempted by an out-of-band activity. |
---|
| 39 | + */ |
---|
| 40 | +static inline |
---|
| 41 | +unsigned long fault_entry(int exception, struct pt_regs *regs) |
---|
| 42 | +{ |
---|
| 43 | + unsigned long flags; |
---|
| 44 | + |
---|
| 45 | + trace_ARM_trap_entry(exception, regs); |
---|
| 46 | + |
---|
| 47 | + flags = hard_local_save_flags(); |
---|
| 48 | + |
---|
| 49 | + /* |
---|
| 50 | + * The companion core must demote the current context to |
---|
| 51 | + * in-band stage if running oob on entry. |
---|
| 52 | + */ |
---|
| 53 | + mark_trap_entry(exception, regs); |
---|
| 54 | + |
---|
| 55 | + if (raw_irqs_disabled_flags(flags)) { |
---|
| 56 | + stall_inband(); |
---|
| 57 | + trace_hardirqs_off(); |
---|
| 58 | + } |
---|
| 59 | + |
---|
| 60 | + hard_local_irq_enable(); |
---|
| 61 | + |
---|
| 62 | + return flags; |
---|
| 63 | +} |
---|
| 64 | + |
---|
| 65 | +static inline |
---|
| 66 | +void fault_exit(int exception, struct pt_regs *regs, |
---|
| 67 | + unsigned long flags) |
---|
| 68 | +{ |
---|
| 69 | + WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()); |
---|
| 70 | + |
---|
| 71 | + /* |
---|
| 72 | + * We expect kentry_exit_pipelined() to clear the stall bit if |
---|
| 73 | + * kentry_enter_pipelined() observed it that way. |
---|
| 74 | + */ |
---|
| 75 | + mark_trap_exit(exception, regs); |
---|
| 76 | + trace_ARM_trap_exit(exception, regs); |
---|
| 77 | + hard_local_irq_restore(flags); |
---|
| 78 | +} |
---|
| 79 | + |
---|
| 80 | +#else /* !CONFIG_IRQ_PIPELINE */ |
---|
| 81 | + |
---|
| 82 | +#define fault_entry(__exception, __regs) ({ 0; }) |
---|
| 83 | +#define fault_exit(__exception, __regs, __flags) \ |
---|
| 84 | + do { (void)(__flags); } while (0) |
---|
| 85 | + |
---|
| 86 | +#endif /* !CONFIG_IRQ_PIPELINE */ |
---|
28 | 87 | |
---|
29 | 88 | /* |
---|
30 | 89 | * This is useful to dump out the page tables associated with |
---|
.. | .. |
---|
96 | 155 | pr_cont("\n"); |
---|
97 | 156 | } |
---|
98 | 157 | #else /* CONFIG_MMU */ |
---|
| 158 | +unsigned long fault_entry(int exception, struct pt_regs *regs) |
---|
| 159 | +{ |
---|
| 160 | + return 0; |
---|
| 161 | +} |
---|
| 162 | + |
---|
| 163 | +static inline void fault_exit(int exception, struct pt_regs *regs, |
---|
| 164 | + unsigned long combo) |
---|
| 165 | +{ } |
---|
| 166 | + |
---|
99 | 167 | void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) |
---|
100 | 168 | { } |
---|
101 | 169 | #endif /* CONFIG_MMU */ |
---|
.. | .. |
---|
116 | 184 | /* |
---|
117 | 185 | * No handler, we'll have to terminate things with extreme prejudice. |
---|
118 | 186 | */ |
---|
| 187 | + irq_pipeline_oops(); |
---|
119 | 188 | bust_spinlocks(1); |
---|
120 | 189 | pr_alert("8<--- cut here ---\n"); |
---|
121 | 190 | pr_alert("Unable to handle kernel %s at virtual address %08lx\n", |
---|
.. | .. |
---|
168 | 237 | { |
---|
169 | 238 | struct task_struct *tsk = current; |
---|
170 | 239 | struct mm_struct *mm = tsk->active_mm; |
---|
| 240 | + unsigned long irqflags; |
---|
171 | 241 | |
---|
172 | 242 | /* |
---|
173 | 243 | * If we are in kernel mode at this point, we |
---|
174 | 244 | * have no context to handle this fault with. |
---|
175 | 245 | */ |
---|
176 | | - if (user_mode(regs)) |
---|
| 246 | + if (user_mode(regs)) { |
---|
| 247 | + irqflags = fault_entry(ARM_TRAP_ACCESS, regs); |
---|
177 | 248 | __do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs); |
---|
178 | | - else |
---|
| 249 | + fault_exit(ARM_TRAP_ACCESS, regs, irqflags); |
---|
| 250 | + } else |
---|
| 251 | + /* |
---|
| 252 | + * irq_pipeline: kernel faults are either quickly |
---|
| 253 | + * recoverable via fixup, or lethal. In both cases, we |
---|
| 254 | + * can skip the interrupt state synchronization. |
---|
| 255 | + */ |
---|
179 | 256 | __do_kernel_fault(mm, addr, fsr, regs); |
---|
180 | 257 | } |
---|
181 | 258 | |
---|
.. | .. |
---|
244 | 321 | int sig, code; |
---|
245 | 322 | vm_fault_t fault; |
---|
246 | 323 | unsigned int flags = FAULT_FLAG_DEFAULT; |
---|
| 324 | + unsigned long irqflags; |
---|
| 325 | + |
---|
| 326 | + irqflags = fault_entry(ARM_TRAP_ACCESS, regs); |
---|
247 | 327 | |
---|
248 | 328 | if (kprobe_page_fault(regs, fsr)) |
---|
249 | | - return 0; |
---|
| 329 | + goto out; |
---|
250 | 330 | |
---|
251 | 331 | tsk = current; |
---|
252 | 332 | mm = tsk->mm; |
---|
.. | .. |
---|
302 | 382 | if (fault_signal_pending(fault, regs)) { |
---|
303 | 383 | if (!user_mode(regs)) |
---|
304 | 384 | goto no_context; |
---|
305 | | - return 0; |
---|
| 385 | + goto out; |
---|
306 | 386 | } |
---|
307 | 387 | |
---|
308 | 388 | if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { |
---|
.. | .. |
---|
318 | 398 | * Handle the "normal" case first - VM_FAULT_MAJOR |
---|
319 | 399 | */ |
---|
320 | 400 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) |
---|
321 | | - return 0; |
---|
| 401 | + goto out; |
---|
322 | 402 | |
---|
323 | 403 | /* |
---|
324 | 404 | * If we are in kernel mode at this point, we |
---|
.. | .. |
---|
334 | 414 | * got oom-killed) |
---|
335 | 415 | */ |
---|
336 | 416 | pagefault_out_of_memory(); |
---|
337 | | - return 0; |
---|
| 417 | + goto out; |
---|
338 | 418 | } |
---|
339 | 419 | |
---|
340 | 420 | if (fault & VM_FAULT_SIGBUS) { |
---|
.. | .. |
---|
355 | 435 | } |
---|
356 | 436 | |
---|
357 | 437 | __do_user_fault(addr, fsr, sig, code, regs); |
---|
358 | | - return 0; |
---|
| 438 | + goto out; |
---|
359 | 439 | |
---|
360 | 440 | no_context: |
---|
361 | 441 | __do_kernel_fault(mm, addr, fsr, regs); |
---|
| 442 | +out: |
---|
| 443 | + fault_exit(ARM_TRAP_ACCESS, regs, irqflags); |
---|
| 444 | + |
---|
362 | 445 | return 0; |
---|
363 | 446 | } |
---|
364 | 447 | #else /* CONFIG_MMU */ |
---|
.. | .. |
---|
396 | 479 | p4d_t *p4d, *p4d_k; |
---|
397 | 480 | pud_t *pud, *pud_k; |
---|
398 | 481 | pmd_t *pmd, *pmd_k; |
---|
| 482 | + |
---|
| 483 | + WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled()); |
---|
399 | 484 | |
---|
400 | 485 | if (addr < TASK_SIZE) |
---|
401 | 486 | return do_page_fault(addr, fsr, regs); |
---|
.. | .. |
---|
470 | 555 | static int |
---|
471 | 556 | do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
---|
472 | 557 | { |
---|
| 558 | + unsigned long irqflags; |
---|
| 559 | + |
---|
| 560 | + irqflags = fault_entry(ARM_TRAP_SECTION, regs); |
---|
473 | 561 | do_bad_area(addr, fsr, regs); |
---|
| 562 | + fault_exit(ARM_TRAP_SECTION, regs, irqflags); |
---|
474 | 563 | return 0; |
---|
475 | 564 | } |
---|
476 | 565 | #endif /* CONFIG_ARM_LPAE */ |
---|
.. | .. |
---|
518 | 607 | do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
---|
519 | 608 | { |
---|
520 | 609 | const struct fsr_info *inf = fsr_info + fsr_fs(fsr); |
---|
| 610 | + unsigned long irqflags; |
---|
521 | 611 | |
---|
522 | 612 | if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) |
---|
523 | 613 | return; |
---|
524 | 614 | |
---|
| 615 | + irqflags = fault_entry(ARM_TRAP_DABT, regs); |
---|
525 | 616 | pr_alert("8<--- cut here ---\n"); |
---|
526 | 617 | pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", |
---|
527 | 618 | inf->name, fsr, addr); |
---|
.. | .. |
---|
529 | 620 | |
---|
530 | 621 | arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, |
---|
531 | 622 | fsr, 0); |
---|
| 623 | + fault_exit(ARM_TRAP_DABT, regs, irqflags); |
---|
532 | 624 | } |
---|
533 | 625 | |
---|
534 | 626 | void __init |
---|
.. | .. |
---|
548 | 640 | do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) |
---|
549 | 641 | { |
---|
550 | 642 | const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); |
---|
| 643 | + unsigned long irqflags; |
---|
551 | 644 | |
---|
552 | 645 | if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) |
---|
553 | 646 | return; |
---|
554 | 647 | |
---|
| 648 | + irqflags = fault_entry(ARM_TRAP_PABT, regs); |
---|
555 | 649 | pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", |
---|
556 | 650 | inf->name, ifsr, addr); |
---|
557 | 651 | |
---|
558 | 652 | arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr, |
---|
559 | 653 | ifsr, 0); |
---|
| 654 | + fault_exit(ARM_TRAP_PABT, regs, irqflags); |
---|
560 | 655 | } |
---|
561 | 656 | |
---|
562 | 657 | /* |
---|