| .. | .. |
|---|
| 22 | 22 | #include <linux/debug_locks.h> |
|---|
| 23 | 23 | #include <linux/export.h> |
|---|
| 24 | 24 | |
|---|
| 25 | +#ifdef CONFIG_MMIOWB |
|---|
| 26 | +#ifndef arch_mmiowb_state |
|---|
| 27 | +DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state); |
|---|
| 28 | +EXPORT_PER_CPU_SYMBOL(__mmiowb_state); |
|---|
| 29 | +#endif |
|---|
| 30 | +#endif |
|---|
| 31 | + |
|---|
| 25 | 32 | /* |
|---|
| 26 | 33 | * If lockdep is enabled then we use the non-preemption spin-ops |
|---|
| 27 | 34 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
|---|
| .. | .. |
|---|
| 117 | 124 | * __[spin|read|write]_lock_bh() |
|---|
| 118 | 125 | */ |
|---|
| 119 | 126 | BUILD_LOCK_OPS(spin, raw_spinlock); |
|---|
| 120 | | - |
|---|
| 121 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 122 | 127 | BUILD_LOCK_OPS(read, rwlock); |
|---|
| 123 | 128 | BUILD_LOCK_OPS(write, rwlock); |
|---|
| 124 | | -#endif |
|---|
| 125 | 129 | |
|---|
| 126 | 130 | #endif |
|---|
| 127 | 131 | |
|---|
| .. | .. |
|---|
| 204 | 208 | } |
|---|
| 205 | 209 | EXPORT_SYMBOL(_raw_spin_unlock_bh); |
|---|
| 206 | 210 | #endif |
|---|
| 207 | | - |
|---|
| 208 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 209 | 211 | |
|---|
| 210 | 212 | #ifndef CONFIG_INLINE_READ_TRYLOCK |
|---|
| 211 | 213 | int __lockfunc _raw_read_trylock(rwlock_t *lock) |
|---|
| .. | .. |
|---|
| 350 | 352 | } |
|---|
| 351 | 353 | EXPORT_SYMBOL(_raw_write_unlock_bh); |
|---|
| 352 | 354 | #endif |
|---|
| 353 | | - |
|---|
| 354 | | -#endif /* !PREEMPT_RT_FULL */ |
|---|
| 355 | 355 | |
|---|
| 356 | 356 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
|---|
| 357 | 357 | |
|---|