From 748e4f3d702def1a4bff191e0cf93b6a05340f01 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:41:34 +0000 Subject: [PATCH] add gpio led uart --- kernel/arch/x86/include/asm/qspinlock.h | 35 ++++++++++++++++++++++++----------- 1 files changed, 24 insertions(+), 11 deletions(-) diff --git a/kernel/arch/x86/include/asm/qspinlock.h b/kernel/arch/x86/include/asm/qspinlock.h index 055c60a..d86ab94 100644 --- a/kernel/arch/x86/include/asm/qspinlock.h +++ b/kernel/arch/x86/include/asm/qspinlock.h @@ -11,20 +11,17 @@ #define _Q_PENDING_LOOPS (1 << 9) #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire - -static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock) -{ - GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, - "I", _Q_PENDING_OFFSET, "%0", c); -} - static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) { - u32 val = 0; + u32 val; - if (__queued_RMW_btsl(lock)) - val |= _Q_PENDING_VAL; - + /* + * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto + * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a + * statement expression, which GCC doesn't like. + */ + val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, + "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL; val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; return val; @@ -35,6 +32,7 @@ extern void __pv_init_lock_hash(void); extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); +extern bool nopvspin; #define queued_spin_unlock queued_spin_unlock /** @@ -66,10 +64,25 @@ #endif #ifdef CONFIG_PARAVIRT +/* + * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. + * + * Native (and PV wanting native due to vCPU pinning) should disable this key. + * It is done in this backwards fashion to only have a single direction change, + * which removes ordering between native_pv_spin_init() and HV setup. + */ DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); void native_pv_lock_init(void) __init; +/* + * Shortcut for the queued_spin_lock_slowpath() function that allows + * virt to hijack it. + * + * Returns: + * true - lock has been negotiated, all done; + * false - queued_spin_lock_slowpath() will do its thing. + */ #define virt_spin_lock virt_spin_lock static inline bool virt_spin_lock(struct qspinlock *lock) { -- Gitblit v1.6.2