hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm/mm/fault.c
....@@ -9,6 +9,7 @@
99 #include <linux/signal.h>
1010 #include <linux/mm.h>
1111 #include <linux/hardirq.h>
12
+#include <linux/irq_pipeline.h>
1213 #include <linux/init.h>
1314 #include <linux/kprobes.h>
1415 #include <linux/uaccess.h>
....@@ -21,10 +22,68 @@
2122 #include <asm/system_misc.h>
2223 #include <asm/system_info.h>
2324 #include <asm/tlbflush.h>
25
+#include <asm/dovetail.h>
26
+#define CREATE_TRACE_POINTS
27
+#include <asm/trace/exceptions.h>
2428
2529 #include "fault.h"
2630
2731 #ifdef CONFIG_MMU
32
+
33
+#ifdef CONFIG_IRQ_PIPELINE
34
+/*
35
+ * We need to synchronize the virtual interrupt state with the hard
36
+ * interrupt state we received on entry, then turn hardirqs back on to
37
+ * allow code which does not require strict serialization to be
38
+ * preempted by an out-of-band activity.
39
+ */
40
+static inline
41
+unsigned long fault_entry(int exception, struct pt_regs *regs)
42
+{
43
+ unsigned long flags;
44
+
45
+ trace_ARM_trap_entry(exception, regs);
46
+
47
+ flags = hard_local_save_flags();
48
+
49
+ /*
50
+ * The companion core must demote the current context to
51
+ * in-band stage if running oob on entry.
52
+ */
53
+ mark_trap_entry(exception, regs);
54
+
55
+ if (raw_irqs_disabled_flags(flags)) {
56
+ stall_inband();
57
+ trace_hardirqs_off();
58
+ }
59
+
60
+ hard_local_irq_enable();
61
+
62
+ return flags;
63
+}
64
+
65
+static inline
66
+void fault_exit(int exception, struct pt_regs *regs,
67
+ unsigned long flags)
68
+{
69
+ WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
70
+
71
+ /*
72
+ * We expect kentry_exit_pipelined() to clear the stall bit if
73
+ * kentry_enter_pipelined() observed it that way.
74
+ */
75
+ mark_trap_exit(exception, regs);
76
+ trace_ARM_trap_exit(exception, regs);
77
+ hard_local_irq_restore(flags);
78
+}
79
+
80
+#else /* !CONFIG_IRQ_PIPELINE */
81
+
82
+#define fault_entry(__exception, __regs) ({ 0; })
83
+#define fault_exit(__exception, __regs, __flags) \
84
+ do { (void)(__flags); } while (0)
85
+
86
+#endif /* !CONFIG_IRQ_PIPELINE */
2887
2988 /*
3089 * This is useful to dump out the page tables associated with
....@@ -96,6 +155,15 @@
96155 pr_cont("\n");
97156 }
98157 #else /* CONFIG_MMU */
158
+unsigned long fault_entry(int exception, struct pt_regs *regs)
159
+{
160
+ return 0;
161
+}
162
+
163
+static inline void fault_exit(int exception, struct pt_regs *regs,
164
+ unsigned long combo)
165
+{ }
166
+
99167 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
100168 { }
101169 #endif /* CONFIG_MMU */
....@@ -116,6 +184,7 @@
116184 /*
117185 * No handler, we'll have to terminate things with extreme prejudice.
118186 */
187
+ irq_pipeline_oops();
119188 bust_spinlocks(1);
120189 pr_alert("8<--- cut here ---\n");
121190 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
....@@ -168,14 +237,22 @@
168237 {
169238 struct task_struct *tsk = current;
170239 struct mm_struct *mm = tsk->active_mm;
240
+ unsigned long irqflags;
171241
172242 /*
173243 * If we are in kernel mode at this point, we
174244 * have no context to handle this fault with.
175245 */
176
- if (user_mode(regs))
246
+ if (user_mode(regs)) {
247
+ irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
177248 __do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
178
- else
249
+ fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
250
+ } else
251
+ /*
252
+ * irq_pipeline: kernel faults are either quickly
253
+ * recoverable via fixup, or lethal. In both cases, we
254
+ * can skip the interrupt state synchronization.
255
+ */
179256 __do_kernel_fault(mm, addr, fsr, regs);
180257 }
181258
....@@ -244,9 +321,12 @@
244321 int sig, code;
245322 vm_fault_t fault;
246323 unsigned int flags = FAULT_FLAG_DEFAULT;
324
+ unsigned long irqflags;
325
+
326
+ irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
247327
248328 if (kprobe_page_fault(regs, fsr))
249
- return 0;
329
+ goto out;
250330
251331 tsk = current;
252332 mm = tsk->mm;
....@@ -302,7 +382,7 @@
302382 if (fault_signal_pending(fault, regs)) {
303383 if (!user_mode(regs))
304384 goto no_context;
305
- return 0;
385
+ goto out;
306386 }
307387
308388 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
....@@ -318,7 +398,7 @@
318398 * Handle the "normal" case first - VM_FAULT_MAJOR
319399 */
320400 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
321
- return 0;
401
+ goto out;
322402
323403 /*
324404 * If we are in kernel mode at this point, we
....@@ -334,7 +414,7 @@
334414 * got oom-killed)
335415 */
336416 pagefault_out_of_memory();
337
- return 0;
417
+ goto out;
338418 }
339419
340420 if (fault & VM_FAULT_SIGBUS) {
....@@ -355,10 +435,13 @@
355435 }
356436
357437 __do_user_fault(addr, fsr, sig, code, regs);
358
- return 0;
438
+ goto out;
359439
360440 no_context:
361441 __do_kernel_fault(mm, addr, fsr, regs);
442
+out:
443
+ fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
444
+
362445 return 0;
363446 }
364447 #else /* CONFIG_MMU */
....@@ -396,6 +479,8 @@
396479 p4d_t *p4d, *p4d_k;
397480 pud_t *pud, *pud_k;
398481 pmd_t *pmd, *pmd_k;
482
+
483
+ WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
399484
400485 if (addr < TASK_SIZE)
401486 return do_page_fault(addr, fsr, regs);
....@@ -470,7 +555,11 @@
470555 static int
471556 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
472557 {
558
+ unsigned long irqflags;
559
+
560
+ irqflags = fault_entry(ARM_TRAP_SECTION, regs);
473561 do_bad_area(addr, fsr, regs);
562
+ fault_exit(ARM_TRAP_SECTION, regs, irqflags);
474563 return 0;
475564 }
476565 #endif /* CONFIG_ARM_LPAE */
....@@ -518,10 +607,12 @@
518607 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
519608 {
520609 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
610
+ unsigned long irqflags;
521611
522612 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
523613 return;
524614
615
+ irqflags = fault_entry(ARM_TRAP_DABT, regs);
525616 pr_alert("8<--- cut here ---\n");
526617 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
527618 inf->name, fsr, addr);
....@@ -529,6 +620,7 @@
529620
530621 arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
531622 fsr, 0);
623
+ fault_exit(ARM_TRAP_DABT, regs, irqflags);
532624 }
533625
534626 void __init
....@@ -548,15 +640,18 @@
548640 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
549641 {
550642 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
643
+ unsigned long irqflags;
551644
552645 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
553646 return;
554647
648
+ irqflags = fault_entry(ARM_TRAP_PABT, regs);
555649 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
556650 inf->name, ifsr, addr);
557651
558652 arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
559653 ifsr, 0);
654
+ fault_exit(ARM_TRAP_PABT, regs, irqflags);
560655 }
561656
562657 /*