From 748e4f3d702def1a4bff191e0cf93b6a05340f01 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:41:34 +0000
Subject: [PATCH] add gpio led uart

---
 kernel/arch/xtensa/kernel/entry.S |  147 +++++++++++++++++++++++++++++++-----------------
 1 files changed, 94 insertions(+), 53 deletions(-)

diff --git a/kernel/arch/xtensa/kernel/entry.S b/kernel/arch/xtensa/kernel/entry.S
index 9cbc380..647b162 100644
--- a/kernel/arch/xtensa/kernel/entry.S
+++ b/kernel/arch/xtensa/kernel/entry.S
@@ -13,6 +13,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/pgtable.h>
 #include <asm/asm-offsets.h>
 #include <asm/asmmacro.h>
 #include <asm/processor.h>
@@ -22,7 +23,6 @@
 #include <asm/unistd.h>
 #include <asm/ptrace.h>
 #include <asm/current.h>
-#include <asm/pgtable.h>
 #include <asm/page.h>
 #include <asm/signal.h>
 #include <asm/tlbflush.h>
@@ -364,7 +364,7 @@
 	s32i	a2, a1, PT_DEBUGCAUSE
 	s32i	a3, a1, PT_PC
 
-	movi	a2, -1
+	movi	a2, NO_SYSCALL
 	rsr	a3, excvaddr
 	s32i	a2, a1, PT_SYSCALL
 	movi	a2, 0
@@ -372,6 +372,11 @@
 #if XCHAL_HAVE_LOOPS
 	xsr	a2, lcount
 	s32i	a2, a1, PT_LCOUNT
+#endif
+
+#if XCHAL_HAVE_EXCLUSIVE
+	/* Clear exclusive access monitor set by interrupted code */
+	clrex
 #endif
 
 	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
@@ -414,7 +419,7 @@
 	movi	a3, LOCKLEVEL
 
 .Lexception:
-	movi	a0, 1 << PS_WOE_BIT
+	movi	a0, PS_WOE_MASK
 	or	a3, a3, a0
 #else
 	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
@@ -422,7 +427,7 @@
 	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
 					# a3 = PS.INTLEVEL
 	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
-	movi	a2, 1 << PS_WOE_BIT
+	movi	a2, PS_WOE_MASK
 	or	a3, a3, a2
 	rsr	a2, exccause
 #endif
@@ -495,8 +500,8 @@
 	 */
 
 	_bbsi.l	a4, TIF_NEED_RESCHED, 3f
-	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f
-	_bbci.l	a4, TIF_SIGPENDING, 5f
+	movi	a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL
+	bnone	a4, a2, 5f
 
 2:	l32i	a4, a1, PT_DEPC
 	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
@@ -520,7 +525,7 @@
 	call4	schedule	# void schedule (void)
 	j	1b
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 6:
 	_bbci.l	a4, TIF_NEED_RESCHED, 4f
 
@@ -529,7 +534,7 @@
 	l32i	a4, a2, TI_PRE_COUNT
 	bnez	a4, 4f
 	call4	preempt_schedule_irq
-	j	1b
+	j	4f
 #endif
 
 #if XTENSA_FAKE_NMI
@@ -922,7 +927,7 @@
 	wsr	a1, windowbase
 	rsync
 
-	movi	a1, (1 << PS_WOE_BIT) | LOCKLEVEL
+	movi	a1, PS_WOE_MASK | LOCKLEVEL
 	wsr	a1, ps
 	rsync
 
@@ -938,6 +943,9 @@
 ENDPROC(unrecoverable_exception)
 
 /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */
+
+	__XTENSA_HANDLER
+	.literal_position
 
 /*
  * Fast-handler for alloca exceptions
@@ -956,14 +964,14 @@
  * of the proper size instead.
  *
  * This algorithm simply backs out the register changes started by the user
- * excpetion handler, makes it appear that we have started a window underflow
+ * exception handler, makes it appear that we have started a window underflow
  * by rotating the window back and then setting the old window base (OWB) in
  * the 'ps' register with the rolled back window base. The 'movsp' instruction
  * will be re-executed and this time since the next window frames is in the
  * active AR registers it won't cause an exception.
  *
  * If the WindowUnderflow code gets a TLB miss the page will get mapped
- * the the partial windeowUnderflow will be handeled in the double exception
+ * the partial WindowUnderflow will be handled in the double exception
  * handler.
  *
  * Entry condition:
@@ -1003,7 +1011,43 @@
 4:	j	_WindowUnderflow4
 ENDPROC(fast_alloca)
 
+#ifdef CONFIG_USER_ABI_CALL0_PROBE
 /*
+ * fast illegal instruction handler.
+ *
+ * This is used to fix up user PS.WOE on the exception caused
+ * by the first opcode related to register window. If PS.WOE is
+ * already set it goes directly to the common user exception handler.
+ *
+ * Entry condition:
+ *
+ *   a0:	trashed, original value saved on stack (PT_AREG0)
+ *   a1:	a1
+ *   a2:	new stack pointer, original in DEPC
+ *   a3:	a3
+ *   depc:	a2, original value saved on stack (PT_DEPC)
+ *   excsave_1:	dispatch table
+ */
+
+ENTRY(fast_illegal_instruction_user)
+
+	rsr	a0, ps
+	bbsi.l	a0, PS_WOE_BIT, 1f
+	s32i	a3, a2, PT_AREG3
+	movi	a3, PS_WOE_MASK
+	or	a0, a0, a3
+	wsr	a0, ps
+	l32i	a3, a2, PT_AREG3
+	l32i	a0, a2, PT_AREG0
+	rsr	a2, depc
+	rfe
+1:
+	call0	user_exception
+
+ENDPROC(fast_illegal_instruction_user)
+#endif
+
+	/*
  * fast system calls.
  *
  * WARNING:  The kernel doesn't save the entire user context before
@@ -1022,25 +1066,6 @@
  *   excsave_1:	dispatch table
  */
 
-ENTRY(fast_syscall_kernel)
-
-	/* Skip syscall. */
-
-	rsr	a0, epc1
-	addi	a0, a0, 3
-	wsr	a0, epc1
-
-	l32i	a0, a2, PT_DEPC
-	bgeui	a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable
-
-	rsr	a0, depc			# get syscall-nr
-	_beqz	a0, fast_syscall_spill_registers
-	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
-
-	j	kernel_exception
-
-ENDPROC(fast_syscall_kernel)
-
 ENTRY(fast_syscall_user)
 
 	/* Skip syscall. */
@@ -1056,7 +1081,7 @@
 	_beqz	a0, fast_syscall_spill_registers
 	_beqi	a0, __NR_xtensa, fast_syscall_xtensa
 
-	j	user_exception
+	call0	user_exception
 
 ENDPROC(fast_syscall_user)
 
@@ -1378,7 +1403,7 @@
 	rsr	a3, excsave1
 	l32i	a1, a3, EXC_TABLE_KSTK
 
-	movi	a4, (1 << PS_WOE_BIT) | LOCKLEVEL
+	movi	a4, PS_WOE_MASK | LOCKLEVEL
 	wsr	a4, ps
 	rsync
 
@@ -1747,8 +1772,8 @@
 
 	rsr	a2, ps
 	bbsi.l	a2, PS_UM_BIT, 1f
-	j	_kernel_exception
-1:	j	_user_exception
+	call0	_kernel_exception
+1:	call0	_user_exception
 
 ENDPROC(fast_second_level_miss)
 
@@ -1844,13 +1869,14 @@
 
 	rsr	a2, ps
 	bbsi.l	a2, PS_UM_BIT, 1f
-	j	_kernel_exception
-1:	j	_user_exception
+	call0	_kernel_exception
+1:	call0	_user_exception
 
 ENDPROC(fast_store_prohibited)
 
 #endif /* CONFIG_MMU */
 
+	.text
 /*
  * System Calls.
  *
@@ -1861,27 +1887,34 @@
 
 ENTRY(system_call)
 
-	entry	a1, 32
+	abi_entry_default
 
 	/* regs->syscall = regs->areg[2] */
 
-	l32i	a3, a2, PT_AREG2
-	mov	a6, a2
-	s32i	a3, a2, PT_SYSCALL
-	call4	do_syscall_trace_enter
-	mov	a3, a6
+	l32i	a7, a2, PT_AREG2
+	s32i	a7, a2, PT_SYSCALL
 
+	GET_THREAD_INFO(a4, a1)
+	l32i	a3, a4, TI_FLAGS
+	movi	a4, _TIF_WORK_MASK
+	and	a3, a3, a4
+	beqz	a3, 1f
+
+	mov	a6, a2
+	call4	do_syscall_trace_enter
+	beqz	a6, .Lsyscall_exit
+	l32i	a7, a2, PT_SYSCALL
+
+1:
 	/* syscall = sys_call_table[syscall_nr] */
 
 	movi	a4, sys_call_table
-	movi	a5, __NR_syscall_count
+	movi	a5, __NR_syscalls
 	movi	a6, -ENOSYS
-	bgeu	a3, a5, 1f
+	bgeu	a7, a5, 1f
 
-	addx4	a4, a3, a4
+	addx4	a4, a7, a4
 	l32i	a4, a4, 0
-	movi	a5, sys_ni_syscall;
-	beq	a4, a5, 1f
 
 	/* Load args: arg0 - arg5 are passed via regs. */
 
@@ -1892,17 +1925,19 @@
 	l32i	a10, a2, PT_AREG8
 	l32i	a11, a2, PT_AREG9
 
-	/* Pass one additional argument to the syscall: pt_regs (on stack) */
-	s32i	a2, a1, 0
-
 	callx4	a4
 
 1:	/* regs->areg[2] = return_value */
 
 	s32i	a6, a2, PT_AREG2
+	bnez	a3, 1f
+.Lsyscall_exit:
+	abi_ret_default
+
+1:
 	mov	a6, a2
 	call4	do_syscall_trace_leave
-	retw
+	abi_ret_default
 
 ENDPROC(system_call)
 
@@ -1953,7 +1988,7 @@
 
 ENTRY(_switch_to)
 
-	entry	a1, 48
+	abi_entry(XTENSA_SPILL_STACK_RESERVE)
 
 	mov	a11, a3			# and 'next' (a3)
 
@@ -1990,6 +2025,12 @@
 	s32i	a3, a4, THREAD_CPENABLE
 #endif
 
+#if XCHAL_HAVE_EXCLUSIVE
+	l32i	a3, a5, THREAD_ATOMCTL8
+	getex	a3
+	s32i	a3, a4, THREAD_ATOMCTL8
+#endif
+
 	/* Flush register file. */
 
 	spill_registers_kernel
@@ -2014,7 +2055,7 @@
 	wsr	a14, ps
 	rsync
 
-	retw
+	abi_ret(XTENSA_SPILL_STACK_RESERVE)
 
 ENDPROC(_switch_to)
 

--
Gitblit v1.6.2