From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom

---
 kernel/arch/mips/include/asm/cmpxchg.h |  170 +++++++++++++++++++++++++++++++++++++++++++++++---------
 1 files changed, 141 insertions(+), 29 deletions(-)

diff --git a/kernel/arch/mips/include/asm/cmpxchg.h b/kernel/arch/mips/include/asm/cmpxchg.h
index 520ca16..3e9c41f 100644
--- a/kernel/arch/mips/include/asm/cmpxchg.h
+++ b/kernel/arch/mips/include/asm/cmpxchg.h
@@ -11,18 +11,9 @@
 #include <linux/bug.h>
 #include <linux/irqflags.h>
 #include <asm/compiler.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
 #include <asm/war.h>
-
-/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
 
 /*
  * These functions doesn't exist, so if they are called you'll either:
@@ -36,6 +27,8 @@
  */
 extern unsigned long __cmpxchg_called_with_bad_pointer(void)
 	__compiletime_error("Bad argument size for cmpxchg");
+extern unsigned long __cmpxchg64_unsupported(void)
+	__compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false");
 extern unsigned long __xchg_called_with_bad_pointer(void)
 	__compiletime_error("Bad argument size for xchg");
 
@@ -47,17 +40,19 @@
 		__asm__ __volatile__(					\
 		"	.set	push				\n"	\
 		"	.set	noat				\n"	\
+		"	.set	push				\n"	\
 		"	.set	" MIPS_ISA_ARCH_LEVEL "		\n"	\
+		"	" __SYNC(full, loongson3_war) "		\n"	\
 		"1:	" ld "	%0, %2		# __xchg_asm	\n"	\
-		"	.set	mips0				\n"	\
+		"	.set	pop				\n"	\
 		"	move	$1, %z3				\n"	\
 		"	.set	" MIPS_ISA_ARCH_LEVEL "		\n"	\
 		"	" st "	$1, %1				\n"	\
-		"\t" __scbeqz "	$1, 1b				\n"	\
+		"\t" __SC_BEQZ	"$1, 1b				\n"	\
 		"	.set	pop				\n"	\
 		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
 		: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)			\
-		: "memory");						\
+		: __LLSC_CLOBBER);					\
 	} else {							\
 		unsigned long __flags;					\
 									\
@@ -99,7 +94,13 @@
 ({									\
 	__typeof__(*(ptr)) __res;					\
 									\
-	smp_mb__before_llsc();						\
+	/*								\
+	 * In the Loongson3 workaround case __xchg_asm() already	\
+	 * contains a completion barrier prior to the LL, so we don't	\
+	 * need to emit an extra one here.				\
+	 */								\
+	if (__SYNC_loongson3_war == 0)					\
+		smp_mb__before_llsc();					\
 									\
 	__res = (__typeof__(*(ptr)))					\
 		__xchg((ptr), (unsigned long)(x), sizeof(*(ptr)));	\
@@ -117,19 +118,21 @@
 		__asm__ __volatile__(					\
 		"	.set	push				\n"	\
 		"	.set	noat				\n"	\
+		"	.set	push				\n"	\
 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
+		"	" __SYNC(full, loongson3_war) "		\n"	\
 		"1:	" ld "	%0, %2		# __cmpxchg_asm \n"	\
 		"	bne	%0, %z3, 2f			\n"	\
-		"	.set	mips0				\n"	\
+		"	.set	pop				\n"	\
 		"	move	$1, %z4				\n"	\
 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"	\
 		"	" st "	$1, %1				\n"	\
-		"\t" __scbeqz "	$1, 1b				\n"	\
+		"\t" __SC_BEQZ	"$1, 1b				\n"	\
 		"	.set	pop				\n"	\
-		"2:						\n"	\
+		"2:	" __SYNC(full, loongson3_war) "		\n"	\
 		: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)		\
-		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)		\
-		: "memory");						\
+		: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)	\
+		: __LLSC_CLOBBER);					\
 	} else {							\
 		unsigned long __flags;					\
 									\
@@ -183,9 +186,23 @@
 ({									\
 	__typeof__(*(ptr)) __res;					\
 									\
-	smp_mb__before_llsc();						\
+	/*								\
+	 * In the Loongson3 workaround case __cmpxchg_asm() already	\
+	 * contains a completion barrier prior to the LL, so we don't	\
+	 * need to emit an extra one here.				\
+	 */								\
+	if (__SYNC_loongson3_war == 0)					\
+		smp_mb__before_llsc();					\
+									\
 	__res = cmpxchg_local((ptr), (old), (new));			\
-	smp_llsc_mb();							\
+									\
+	/*								\
+	 * In the Loongson3 workaround case __cmpxchg_asm() already	\
+	 * contains a completion barrier after the SC, so we don't	\
+	 * need to emit an extra one here.				\
+	 */								\
+	if (__SYNC_loongson3_war == 0)					\
+		smp_llsc_mb();						\
 									\
 	__res;								\
 })
@@ -203,13 +220,108 @@
 	cmpxchg((ptr), (o), (n));					\
   })
 #else
-#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#ifndef CONFIG_SMP
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
-#endif
-#endif
 
-#undef __scbeqz
+# include <asm-generic/cmpxchg-local.h>
+# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+# ifdef CONFIG_SMP
+
+static inline unsigned long __cmpxchg64(volatile void *ptr,
+					unsigned long long old,
+					unsigned long long new)
+{
+	unsigned long long tmp, ret;
+	unsigned long flags;
+
+	/*
+	 * The assembly below has to combine 32 bit values into a 64 bit
+	 * register, and split 64 bit values from one register into two. If we
+	 * were to take an interrupt in the middle of this we'd only save the
+	 * least significant 32 bits of each register & probably clobber the
+	 * most significant 32 bits of the 64 bit values we're using. In order
+	 * to avoid this we must disable interrupts.
+	 */
+	local_irq_save(flags);
+
+	asm volatile(
+	"	.set	push				\n"
+	"	.set	" MIPS_ISA_ARCH_LEVEL "		\n"
+	/* Load 64 bits from ptr */
+	"	" __SYNC(full, loongson3_war) "		\n"
+	"1:	lld	%L0, %3		# __cmpxchg64	\n"
+	"	.set	pop				\n"
+	/*
+	 * Split the 64 bit value we loaded into the 2 registers that hold the
+	 * ret variable.
+	 */
+	"	dsra	%M0, %L0, 32			\n"
+	"	sll	%L0, %L0, 0			\n"
+	/*
+	 * Compare ret against old, breaking out of the loop if they don't
+	 * match.
+	 */
+	"	bne	%M0, %M4, 2f			\n"
+	"	bne	%L0, %L4, 2f			\n"
+	/*
+	 * Combine the 32 bit halves from the 2 registers that hold the new
+	 * variable into a single 64 bit register.
+	 */
+#  if MIPS_ISA_REV >= 2
+	"	move	%L1, %L5			\n"
+	"	dins	%L1, %M5, 32, 32		\n"
+#  else
+	"	dsll	%L1, %L5, 32			\n"
+	"	dsrl	%L1, %L1, 32			\n"
+	"	.set	noat				\n"
+	"	dsll	$at, %M5, 32			\n"
+	"	or	%L1, %L1, $at			\n"
+	"	.set	at				\n"
+#  endif
+	"	.set	push				\n"
+	"	.set	" MIPS_ISA_ARCH_LEVEL "		\n"
+	/* Attempt to store new at ptr */
+	"	scd	%L1, %2				\n"
+	/* If we failed, loop! */
+	"\t" __SC_BEQZ "%L1, 1b				\n"
+	"2:	" __SYNC(full, loongson3_war) "		\n"
+	"	.set	pop				\n"
+	: "=&r"(ret),
+	  "=&r"(tmp),
+	  "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
+	: GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr),
+	  "r" (old),
+	  "r" (new)
+	: "memory");
+
+	local_irq_restore(flags);
+	return ret;
+}
+
+#  define cmpxchg64(ptr, o, n) ({					\
+	unsigned long long __old = (__typeof__(*(ptr)))(o);		\
+	unsigned long long __new = (__typeof__(*(ptr)))(n);		\
+	__typeof__(*(ptr)) __res;					\
+									\
+	/*								\
+	 * We can only use cmpxchg64 if we know that the CPU supports	\
+	 * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported	\
+	 * will cause a build error unless cpu_has_64bits is a		\
+	 * compile-time constant 1.					\
+	 */								\
+	if (cpu_has_64bits && kernel_uses_llsc) {			\
+		smp_mb__before_llsc();					\
+		__res = __cmpxchg64((ptr), __old, __new);		\
+		smp_llsc_mb();						\
+	} else {							\
+		__res = __cmpxchg64_unsupported();			\
+	}								\
+									\
+	__res;								\
+})
+
+# else /* !CONFIG_SMP */
+#  define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# endif /* !CONFIG_SMP */
+#endif /* !CONFIG_64BIT */
 
 #endif /* __ASM_CMPXCHG_H */

--
Gitblit v1.6.2