From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai

---
 kernel/arch/arm/mm/fault.c |  109 +++++++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 102 insertions(+), 7 deletions(-)

diff --git a/kernel/arch/arm/mm/fault.c b/kernel/arch/arm/mm/fault.c
index efa4020..e23d0ff 100644
--- a/kernel/arch/arm/mm/fault.c
+++ b/kernel/arch/arm/mm/fault.c
@@ -9,6 +9,7 @@
 #include <linux/signal.h>
 #include <linux/mm.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/init.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
@@ -21,10 +22,68 @@
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
 #include <asm/tlbflush.h>
+#include <asm/dovetail.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace/exceptions.h>
 
 #include "fault.h"
 
 #ifdef CONFIG_MMU
+
+#ifdef CONFIG_IRQ_PIPELINE
+/*
+ * We need to synchronize the virtual interrupt state with the hard
+ * interrupt state we received on entry, then turn hardirqs back on to
+ * allow code which does not require strict serialization to be
+ * preempted by an out-of-band activity.
+ */
+static inline
+unsigned long fault_entry(int exception, struct pt_regs *regs)
+{
+	unsigned long flags;
+
+	trace_ARM_trap_entry(exception, regs);
+
+	flags = hard_local_save_flags();
+
+	/*
+	 * The companion core must demote the current context to
+	 * in-band stage if running oob on entry.
+	 */
+	mark_trap_entry(exception, regs);
+
+	if (raw_irqs_disabled_flags(flags)) {
+		stall_inband();
+		trace_hardirqs_off();
+	}
+
+	hard_local_irq_enable();
+
+	return flags;
+}
+
+static inline
+void fault_exit(int exception, struct pt_regs *regs,
+		unsigned long flags)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
+
+	/*
+	 * We expect kentry_exit_pipelined() to clear the stall bit if
+	 * kentry_enter_pipelined() observed it that way.
+	 */
+	mark_trap_exit(exception, regs);
+	trace_ARM_trap_exit(exception, regs);
+	hard_local_irq_restore(flags);
+}
+
+#else	/* !CONFIG_IRQ_PIPELINE */
+
+#define fault_entry(__exception, __regs)  ({ 0; })
+#define fault_exit(__exception, __regs, __flags)  \
+	do { (void)(__flags); } while (0)
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
 
 /*
  * This is useful to dump out the page tables associated with
@@ -96,6 +155,15 @@
 	pr_cont("\n");
 }
 #else					/* CONFIG_MMU */
+unsigned long fault_entry(int exception, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static inline void fault_exit(int exception, struct pt_regs *regs,
+			unsigned long combo)
+{ }
+
 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 { }
 #endif					/* CONFIG_MMU */
@@ -116,6 +184,7 @@
 	/*
 	 * No handler, we'll have to terminate things with extreme prejudice.
 	 */
+	irq_pipeline_oops();
 	bust_spinlocks(1);
 	pr_alert("8<--- cut here ---\n");
 	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
@@ -168,14 +237,22 @@
 {
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->active_mm;
+	unsigned long irqflags;
 
 	/*
 	 * If we are in kernel mode at this point, we
 	 * have no context to handle this fault with.
 	 */
-	if (user_mode(regs))
+	  if (user_mode(regs)) {
+		irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
 		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
-	else
+		fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
+	  } else
+		/*
+		 * irq_pipeline: kernel faults are either quickly
+		 * recoverable via fixup, or lethal. In both cases, we
+		 * can skip the interrupt state synchronization.
+		 */
 		__do_kernel_fault(mm, addr, fsr, regs);
 }
 
@@ -244,9 +321,12 @@
 	int sig, code;
 	vm_fault_t fault;
 	unsigned int flags = FAULT_FLAG_DEFAULT;
+	unsigned long irqflags;
+
+	irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
 
 	if (kprobe_page_fault(regs, fsr))
-		return 0;
+		goto out;
 
 	tsk = current;
 	mm  = tsk->mm;
@@ -302,7 +382,7 @@
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
 			goto no_context;
-		return 0;
+		goto out;
 	}
 
 	if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
@@ -318,7 +398,7 @@
 	 * Handle the "normal" case first - VM_FAULT_MAJOR
 	 */
 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
-		return 0;
+		goto out;
 
 	/*
 	 * If we are in kernel mode at this point, we
@@ -334,7 +414,7 @@
 		 * got oom-killed)
 		 */
 		pagefault_out_of_memory();
-		return 0;
+		goto out;
 	}
 
 	if (fault & VM_FAULT_SIGBUS) {
@@ -355,10 +435,13 @@
 	}
 
 	__do_user_fault(addr, fsr, sig, code, regs);
-	return 0;
+	goto out;
 
 no_context:
 	__do_kernel_fault(mm, addr, fsr, regs);
+out:
+	fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
+
 	return 0;
 }
 #else					/* CONFIG_MMU */
@@ -396,6 +479,8 @@
 	p4d_t *p4d, *p4d_k;
 	pud_t *pud, *pud_k;
 	pmd_t *pmd, *pmd_k;
+
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
 
 	if (addr < TASK_SIZE)
 		return do_page_fault(addr, fsr, regs);
@@ -470,7 +555,11 @@
 static int
 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
+	unsigned long irqflags;
+
+	irqflags = fault_entry(ARM_TRAP_SECTION, regs);
 	do_bad_area(addr, fsr, regs);
+	fault_exit(ARM_TRAP_SECTION, regs, irqflags);
 	return 0;
 }
 #endif /* CONFIG_ARM_LPAE */
@@ -518,10 +607,12 @@
 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+	unsigned long irqflags;
 
 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
 		return;
 
+	irqflags = fault_entry(ARM_TRAP_DABT, regs);
 	pr_alert("8<--- cut here ---\n");
 	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 		inf->name, fsr, addr);
@@ -529,6 +620,7 @@
 
 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
 		       fsr, 0);
+	fault_exit(ARM_TRAP_DABT, regs, irqflags);
 }
 
 void __init
@@ -548,15 +640,18 @@
 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+	unsigned long irqflags;
 
 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
 		return;
 
+	irqflags = fault_entry(ARM_TRAP_PABT, regs);
 	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
 		inf->name, ifsr, addr);
 
 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
 		       ifsr, 0);
+	fault_exit(ARM_TRAP_PABT, regs, irqflags);
 }
 
 /*

--
Gitblit v1.6.2