From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Thu, 04 Jan 2024 10:08:02 +0000 Subject: [PATCH] disable FB --- kernel/arch/parisc/include/asm/spinlock.h | 172 +++++++++++++++++++++++++-------------------------------- 1 files changed, 76 insertions(+), 96 deletions(-) diff --git a/kernel/arch/parisc/include/asm/spinlock.h b/kernel/arch/parisc/include/asm/spinlock.h index 8a63515..fa5ee8a 100644 --- a/kernel/arch/parisc/include/asm/spinlock.h +++ b/kernel/arch/parisc/include/asm/spinlock.h @@ -10,13 +10,21 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); - return *a == 0; + return READ_ONCE(*a) == 0; } -#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +static inline void arch_spin_lock(arch_spinlock_t *x) +{ + volatile unsigned int *a; + + a = __ldcw_align(x); + while (__ldcw(a) == 0) + while (*a == 0) + continue; +} static inline void arch_spin_lock_flags(arch_spinlock_t *x, - unsigned long flags) + unsigned long flags) { volatile unsigned int *a; @@ -25,10 +33,8 @@ while (*a == 0) if (flags & PSW_SM_I) { local_irq_enable(); - cpu_relax(); local_irq_disable(); - } else - cpu_relax(); + } } #define arch_spin_lock_flags arch_spin_lock_flags @@ -37,133 +43,107 @@ volatile unsigned int *a; a = __ldcw_align(x); - mb(); - *a = 1; + /* Release with ordered store. */ + __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; - int ret; a = __ldcw_align(x); - ret = __ldcw(a) != 0; - - return ret; + return __ldcw(a) != 0; } /* * Read-write spinlocks, allowing multiple readers but only one writer. - * Linux rwlocks are unfair to writers; they can be starved for an indefinite - * time by readers. With care, they can also be taken in interrupt context. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * - * In the PA-RISC implementation, we have a spinlock and a counter. - * Readers use the lock to serialise their access to the counter (which - * records how many readers currently hold the lock). - * Writers hold the spinlock, preventing any readers or other writers from - * grabbing the rwlock. + * The spinlock itself is contained in @counter and access to it is + * serialized with @lock_mutex. */ -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void arch_read_lock(arch_rwlock_t *rw) +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) { + int ret = 0; unsigned long flags; - local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - rw->counter++; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); -} -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned long flags; local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - rw->counter--; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); -} + arch_spin_lock(&(rw->lock_mutex)); -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int arch_read_trylock(arch_rwlock_t *rw) -{ - unsigned long flags; - retry: - local_irq_save(flags); - if (arch_spin_trylock(&rw->lock)) { - rw->counter++; - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); - return 1; + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + */ + if (rw->counter > 0) { + rw->counter--; + ret = 1; } + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); - /* If write-locked, we fail to acquire the lock */ - if (rw->counter < 0) - return 0; - /* Wait until we have a realistic chance at the lock */ - while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) + return ret; +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + int ret = 0; + unsigned long flags; + + local_irq_save(flags); + arch_spin_lock(&(rw->lock_mutex)); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + */ + if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + rw->counter = 0; + ret = 1; + } + arch_spin_unlock(&(rw->lock_mutex)); + local_irq_restore(flags); + + return ret; +} + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + while (!arch_read_trylock(rw)) cpu_relax(); - - goto retry; } -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void arch_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + while (!arch_write_trylock(rw)) + cpu_relax(); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; -retry: + local_irq_save(flags); - arch_spin_lock_flags(&rw->lock, flags); - - if (rw->counter != 0) { - arch_spin_unlock(&rw->lock); - local_irq_restore(flags); - - while (rw->counter != 0) - cpu_relax(); - - goto retry; - } - - rw->counter = -1; /* mark as write-locked */ - mb(); + arch_spin_lock(&(rw->lock_mutex)); + rw->counter++; + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } -static __inline__ void arch_write_unlock(arch_rwlock_t *rw) -{ - rw->counter = 0; - arch_spin_unlock(&rw->lock); -} - -/* Note that we have to ensure interrupts are disabled in case we're - * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int arch_write_trylock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long flags; - int result = 0; local_irq_save(flags); - if (arch_spin_trylock(&rw->lock)) { - if (rw->counter == 0) { - rw->counter = -1; - result = 1; - } else { - /* Read-locked. Oh well. */ - arch_spin_unlock(&rw->lock); - } - } + arch_spin_lock(&(rw->lock_mutex)); + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); - - return result; } #endif /* __ASM_SPINLOCK_H */ -- Gitblit v1.6.2