.. | .. |
---|
22 | 22 | #include <linux/debug_locks.h> |
---|
23 | 23 | #include <linux/export.h> |
---|
24 | 24 | |
---|
| 25 | +#ifdef CONFIG_MMIOWB |
---|
| 26 | +#ifndef arch_mmiowb_state |
---|
| 27 | +DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state); |
---|
| 28 | +EXPORT_PER_CPU_SYMBOL(__mmiowb_state); |
---|
| 29 | +#endif |
---|
| 30 | +#endif |
---|
| 31 | + |
---|
25 | 32 | /* |
---|
26 | 33 | * If lockdep is enabled then we use the non-preemption spin-ops |
---|
27 | 34 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
---|
.. | .. |
---|
117 | 124 | * __[spin|read|write]_lock_bh() |
---|
118 | 125 | */ |
---|
119 | 126 | BUILD_LOCK_OPS(spin, raw_spinlock); |
---|
| 127 | + |
---|
| 128 | +#ifndef CONFIG_PREEMPT_RT |
---|
120 | 129 | BUILD_LOCK_OPS(read, rwlock); |
---|
121 | 130 | BUILD_LOCK_OPS(write, rwlock); |
---|
| 131 | +#endif |
---|
122 | 132 | |
---|
123 | 133 | #endif |
---|
124 | 134 | |
---|
.. | .. |
---|
201 | 211 | } |
---|
202 | 212 | EXPORT_SYMBOL(_raw_spin_unlock_bh); |
---|
203 | 213 | #endif |
---|
| 214 | + |
---|
| 215 | +#ifndef CONFIG_PREEMPT_RT |
---|
204 | 216 | |
---|
205 | 217 | #ifndef CONFIG_INLINE_READ_TRYLOCK |
---|
206 | 218 | int __lockfunc _raw_read_trylock(rwlock_t *lock) |
---|
.. | .. |
---|
346 | 358 | EXPORT_SYMBOL(_raw_write_unlock_bh); |
---|
347 | 359 | #endif |
---|
348 | 360 | |
---|
| 361 | +#endif /* !PREEMPT_RT */ |
---|
| 362 | + |
---|
349 | 363 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
350 | 364 | |
---|
351 | 365 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
---|