From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai

---
 kernel/arch/arm64/kernel/entry.S |   62 ++++++++++++++++++++++++++++++-
 1 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/kernel/arch/arm64/kernel/entry.S b/kernel/arch/arm64/kernel/entry.S
index 9f19e6b..49a7349 100644
--- a/kernel/arch/arm64/kernel/entry.S
+++ b/kernel/arch/arm64/kernel/entry.S
@@ -39,6 +39,12 @@
 #endif
 	.endm
 
+	.macro user_exit_el0_irq
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+	bl	enter_el0_irq
+#endif
+	.endm
+
 	.macro user_enter_irqoff
 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
 	bl	exit_to_user_mode
@@ -534,6 +540,21 @@
 	mov	x24, scs_sp		// preserve the original shadow stack
 #endif
 
+#ifdef CONFIG_DOVETAIL
+	/*
+	 * When the pipeline is enabled, context switches over the irq
+	 * stack are allowed (for the co-kernel), and more interrupts
+	 * can be taken over sibling stack contexts. So we need a not so
+	 * subtle way of figuring out whether the irq stack was actually
+	 * exited, which cannot depend on the current task pointer.
+	 */
+	adr_this_cpu x25, irq_nesting, x26
+	ldr	w26, [x25]
+	cmp	w26, #0
+	add	w26, w26, #1
+	str	w26, [x25]
+	b.ne	9998f
+#else
 	/*
 	 * Compare sp with the base of the task stack.
 	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
@@ -543,6 +564,7 @@
 	eor	x25, x25, x19
 	and	x25, x25, #~(THREAD_SIZE - 1)
 	cbnz	x25, 9998f
+#endif
 
 	ldr_this_cpu x25, irq_stack_ptr, x26
 	mov	x26, #IRQ_STACK_SIZE
@@ -563,11 +585,18 @@
 	 * The callee-saved regs (x19-x29) should be preserved between
 	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
 	 * uses x20-x23 to store data for later use.
+	 * IRQ_PIPELINE: caution, we have to preserve w0.
 	 */
 	.macro	irq_stack_exit
 	mov	sp, x19
 #ifdef CONFIG_SHADOW_CALL_STACK
 	mov	scs_sp, x24
+#endif
+#ifdef CONFIG_DOVETAIL
+	adr_this_cpu x1, irq_nesting, x2
+	ldr	w2, [x1]
+	add	w2, w2, #-1
+	str	w2, [x1]
 #endif
 	.endm
 
@@ -578,7 +607,15 @@
  * Interrupt handling.
  */
 	.macro	irq_handler, handler:req
+#ifdef CONFIG_IRQ_PIPELINE
+#	.if	\handler == handle_arch_irq
+	ldr	x1, =handle_arch_irq_pipelined
+#	.else
+#	.error	"irq_pipeline: cannot handle interrupt"
+#	.endif
+#else
 	ldr_l	x1, \handler
+#endif
 	mov	x0, sp
 	irq_stack_entry
 	blr	x1
@@ -616,6 +653,9 @@
 
 	irq_handler	\handler
 
+#ifdef CONFIG_IRQ_PIPELINE
+	cbz     w0, 66f				// skip epilogue if oob or in-band stalled
+#endif
 #ifdef CONFIG_PREEMPTION
 	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -630,13 +670,13 @@
 	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
 1:
 #endif
-
+66:
 	mov	x0, sp
 	bl	exit_el1_irq_or_nmi
 	.endm
 
 	.macro el0_interrupt_handler, handler:req
-	user_exit_irqoff
+	user_exit_el0_irq
 	enable_da_f
 
 	tbz	x22, #55, 1f
@@ -815,6 +855,9 @@
 	kernel_entry 0
 el0_irq_naked:
 	el0_interrupt_handler handle_arch_irq
+#ifdef CONFIG_IRQ_PIPELINE
+	cbz	w0, fast_ret_from_el0_irq	// skip epilogue if oob
+#endif
 	b	ret_to_user
 SYM_CODE_END(el0_irq)
 
@@ -846,6 +889,11 @@
 SYM_CODE_START_LOCAL(ret_to_user)
 	disable_daif
 	gic_prio_kentry_setup tmp=x3
+#ifdef CONFIG_IRQ_PIPELINE
+	ldr	x0, [tsk, #TSK_TI_LOCAL_FLAGS]
+	tst	x0, #_TLF_OOB
+	b.ne	fast_ret_to_user
+#endif
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
 #endif
@@ -854,12 +902,22 @@
 	cbnz	x2, work_pending
 finish_ret_to_user:
 	user_enter_irqoff
+ret_to_user_naked:
 	enable_step_tsk x19, x2
 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
 	bl	stackleak_erase
 #endif
 	kernel_exit 0
 
+#ifdef CONFIG_IRQ_PIPELINE
+fast_ret_from_el0_irq:	
+ 	disable_daif
+ 	gic_prio_kentry_setup tmp=x3
+fast_ret_to_user:
+	ldr	x19, [tsk, #TSK_TI_FLAGS]
+	b	ret_to_user_naked
+#endif
+
 /*
  * Ok, we need to do extra processing, enter the slow path.
  */

--
Gitblit v1.6.2