| .. | .. |
|---|
| 11 | 11 | #define _Q_PENDING_LOOPS (1 << 9) |
|---|
| 12 | 12 | |
|---|
| 13 | 13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire |
|---|
| 14 | | - |
|---|
| 15 | | -static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock) |
|---|
| 16 | | -{ |
|---|
| 17 | | - GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, |
|---|
| 18 | | - "I", _Q_PENDING_OFFSET, "%0", c); |
|---|
| 19 | | -} |
|---|
| 20 | | - |
|---|
| 21 | 14 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) |
|---|
| 22 | 15 | { |
|---|
| 23 | | - u32 val = 0; |
|---|
| 16 | + u32 val; |
|---|
| 24 | 17 | |
|---|
| 25 | | - if (__queued_RMW_btsl(lock)) |
|---|
| 26 | | - val |= _Q_PENDING_VAL; |
|---|
| 27 | | - |
|---|
| 18 | + /* |
|---|
| 19 | + * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto |
|---|
| 20 | + * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a |
|---|
| 21 | + * statement expression, which GCC doesn't like. |
|---|
| 22 | + */ |
|---|
| 23 | + val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, |
|---|
| 24 | + "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL; |
|---|
| 28 | 25 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; |
|---|
| 29 | 26 | |
|---|
| 30 | 27 | return val; |
|---|
| .. | .. |
|---|
| 35 | 32 | extern void __pv_init_lock_hash(void); |
|---|
| 36 | 33 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); |
|---|
| 37 | 34 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); |
|---|
| 35 | +extern bool nopvspin; |
|---|
| 38 | 36 | |
|---|
| 39 | 37 | #define queued_spin_unlock queued_spin_unlock |
|---|
| 40 | 38 | /** |
|---|
| .. | .. |
|---|
| 66 | 64 | #endif |
|---|
| 67 | 65 | |
|---|
| 68 | 66 | #ifdef CONFIG_PARAVIRT |
|---|
| 67 | +/* |
|---|
| 68 | + * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. |
|---|
| 69 | + * |
|---|
| 70 | + * Native (and PV wanting native due to vCPU pinning) should disable this key. |
|---|
| 71 | + * It is done in this backwards fashion to only have a single direction change, |
|---|
| 72 | + * which removes ordering between native_pv_spin_init() and HV setup. |
|---|
| 73 | + */ |
|---|
| 69 | 74 | DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); |
|---|
| 70 | 75 | |
|---|
| 71 | 76 | void native_pv_lock_init(void) __init; |
|---|
| 72 | 77 | |
|---|
| 78 | +/* |
|---|
| 79 | + * Shortcut for the queued_spin_lock_slowpath() function that allows |
|---|
| 80 | + * virt to hijack it. |
|---|
| 81 | + * |
|---|
| 82 | + * Returns: |
|---|
| 83 | + * true - lock has been negotiated, all done; |
|---|
| 84 | + * false - queued_spin_lock_slowpath() will do its thing. |
|---|
| 85 | + */ |
|---|
| 73 | 86 | #define virt_spin_lock virt_spin_lock |
|---|
| 74 | 87 | static inline bool virt_spin_lock(struct qspinlock *lock) |
|---|
| 75 | 88 | { |
|---|