From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai

---
 kernel/arch/arm/include/asm/mmu_context.h |   40 +++++++++++++++++++++++++++++++---------
 1 files changed, 31 insertions(+), 9 deletions(-)

diff --git a/kernel/arch/arm/include/asm/mmu_context.h b/kernel/arch/arm/include/asm/mmu_context.h
index f99ed52..32cded3 100644
--- a/kernel/arch/arm/include/asm/mmu_context.h
+++ b/kernel/arch/arm/include/asm/mmu_context.h
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/mm_types.h>
 #include <linux/preempt.h>
+#include <linux/irq_pipeline.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
@@ -72,6 +73,7 @@
 static inline void finish_arch_post_lock_switch(void)
 {
 	struct mm_struct *mm = current->mm;
+	unsigned long flags;
 
 	if (mm && mm->context.switch_pending) {
 		/*
@@ -83,7 +85,9 @@
 		preempt_disable();
 		if (mm->context.switch_pending) {
 			mm->context.switch_pending = 0;
+			protect_inband_mm(flags);
 			cpu_switch_mm(mm->pgd, mm);
+			unprotect_inband_mm(flags);
 		}
 		preempt_enable_no_resched();
 	}
@@ -102,7 +106,7 @@
 #endif	/* CONFIG_CPU_HAS_ASID */
 
 #define destroy_context(mm)		do { } while(0)
-#define activate_mm(prev,next)		switch_mm(prev, next, NULL)
+#define activate_mm(prev,next)		__switch_mm(prev, next, NULL)
 
 /*
  * This is called when "tsk" is about to enter lazy TLB mode.
@@ -118,15 +122,9 @@
 {
 }
 
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned.  No registers are touched.  We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
 static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-	  struct task_struct *tsk)
+__switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	    struct task_struct *tsk)
 {
 #ifdef CONFIG_MMU
 	unsigned int cpu = smp_processor_id();
@@ -149,6 +147,30 @@
 #endif
 }
 
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned.  No registers are touched.  We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+	__switch_mm(prev, next, tsk);
+	unprotect_inband_mm(flags);
+}
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk)
+{
+	__switch_mm(prev, next, tsk);
+}
+
 #endif

--
Gitblit v1.6.2