.. | .. |
---|
22 | 22 | #include <linux/debug_locks.h> |
---|
23 | 23 | #include <linux/export.h> |
---|
24 | 24 | |
---|
| 25 | +#ifdef CONFIG_MMIOWB |
---|
| 26 | +#ifndef arch_mmiowb_state |
---|
| 27 | +DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state); |
---|
| 28 | +EXPORT_PER_CPU_SYMBOL(__mmiowb_state); |
---|
| 29 | +#endif |
---|
| 30 | +#endif |
---|
| 31 | + |
---|
25 | 32 | /* |
---|
26 | 33 | * If lockdep is enabled then we use the non-preemption spin-ops |
---|
27 | 34 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
---|
.. | .. |
---|
118 | 125 | */ |
---|
119 | 126 | BUILD_LOCK_OPS(spin, raw_spinlock); |
---|
120 | 127 | |
---|
121 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 128 | +#ifndef CONFIG_PREEMPT_RT |
---|
122 | 129 | BUILD_LOCK_OPS(read, rwlock); |
---|
123 | 130 | BUILD_LOCK_OPS(write, rwlock); |
---|
124 | 131 | #endif |
---|
.. | .. |
---|
205 | 212 | EXPORT_SYMBOL(_raw_spin_unlock_bh); |
---|
206 | 213 | #endif |
---|
207 | 214 | |
---|
208 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 215 | +#ifndef CONFIG_PREEMPT_RT |
---|
209 | 216 | |
---|
210 | 217 | #ifndef CONFIG_INLINE_READ_TRYLOCK |
---|
211 | 218 | int __lockfunc _raw_read_trylock(rwlock_t *lock) |
---|
.. | .. |
---|
351 | 358 | EXPORT_SYMBOL(_raw_write_unlock_bh); |
---|
352 | 359 | #endif |
---|
353 | 360 | |
---|
354 | | -#endif /* !PREEMPT_RT_FULL */ |
---|
| 361 | +#endif /* !PREEMPT_RT */ |
---|
355 | 362 | |
---|
356 | 363 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
357 | 364 | |
---|