From 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:44:59 +0000
Subject: [PATCH] gmac get mac form eeprom
---
kernel/arch/arc/include/asm/spinlock.h | 60 +++++++++++++++---------------------------------------------
1 files changed, 15 insertions(+), 45 deletions(-)
diff --git a/kernel/arch/arc/include/asm/spinlock.h b/kernel/arch/arc/include/asm/spinlock.h
index 2ba04a7..1928716 100644
--- a/kernel/arch/arc/include/asm/spinlock.h
+++ b/kernel/arch/arc/include/asm/spinlock.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __ASM_SPINLOCK_H
@@ -21,8 +18,6 @@
{
unsigned int val;
- smp_mb();
-
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
@@ -34,6 +29,14 @@
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
: "memory", "cc");
+ /*
+ * ACQUIRE barrier to ensure load/store after taking the lock
+ * don't "bleed-up" out of the critical section (leak-in is allowed)
+ * http://www.spinics.net/lists/kernel/msg2010409.html
+ *
+ * ARCv2 only has load-load, store-store and all-all barrier
+ * thus need the full all-all barrier
+ */
smp_mb();
}
@@ -41,8 +44,6 @@
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int val, got_it = 0;
-
- smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[slock]] \n"
@@ -67,9 +68,7 @@
{
smp_mb();
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
- smp_mb();
+ WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
}
/*
@@ -80,8 +79,6 @@
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int val;
-
- smp_mb();
/*
* zero means writer holds the lock exclusively, deny Reader.
@@ -113,8 +110,6 @@
{
unsigned int val, got_it = 0;
- smp_mb();
-
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
@@ -139,8 +134,6 @@
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int val;
-
- smp_mb();
/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
@@ -174,8 +167,6 @@
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int val, got_it = 0;
-
- smp_mb();
__asm__ __volatile__(
"1: llock %[val], [%[rwlock]] \n"
@@ -217,17 +208,13 @@
: [val] "=&r" (val)
: [rwlock] "r" (&(rw->counter))
: "memory", "cc");
-
- smp_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
- rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-
- smp_mb();
+ WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
}
#else /* !CONFIG_ARC_HAS_LLSC */
@@ -237,34 +224,19 @@
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
/*
- * This smp_mb() is technically superfluous, we only need the one
- * after the lock for providing the ACQUIRE semantics.
- * However doing the "right" thing was regressing hackbench
- * so keeping this, pending further investigation
+ * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
+ * for ACQ and REL semantics respectively. However EX based spinlocks
+ * need the extra smp_mb to workaround a hardware quirk.
*/
smp_mb();
__asm__ __volatile__(
"1: ex %0, [%1] \n"
-#ifdef CONFIG_EZNPS_MTM_EXT
- " .word %3 \n"
-#endif
" breq %0, %2, 1b \n"
: "+&r" (val)
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
-#ifdef CONFIG_EZNPS_MTM_EXT
- , "i"(CTOP_INST_SCHD_RW)
-#endif
: "memory");
- /*
- * ACQUIRE barrier to ensure load/store after taking the lock
- * don't "bleed-up" out of the critical section (leak-in is allowed)
- * http://www.spinics.net/lists/kernel/msg2010409.html
- *
- * ARCv2 only has load-load, store-store and all-all barrier
- * thus need the full all-all barrier
- */
smp_mb();
}
@@ -309,8 +281,7 @@
: "memory");
/*
- * superfluous, but keeping for now - see pairing version in
- * arch_spin_lock above
+ * see pairing version/comment in arch_spin_lock above
*/
smp_mb();
}
@@ -344,7 +315,6 @@
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
- smp_mb();
return ret;
}
--
Gitblit v1.6.2