.. | .. |
---|
99 | 99 | */ |
---|
100 | 100 | static bool rcu_read_lock_held_common(bool *ret) |
---|
101 | 101 | { |
---|
| 102 | + if (irqs_pipelined() && |
---|
| 103 | + (hard_irqs_disabled() || running_oob())) { |
---|
| 104 | + *ret = 1; |
---|
| 105 | + return true; |
---|
| 106 | + } |
---|
102 | 107 | if (!debug_lockdep_rcu_enabled()) { |
---|
103 | 108 | *ret = true; |
---|
104 | 109 | return true; |
---|
.. | .. |
---|
209 | 214 | |
---|
210 | 215 | #endif /* #ifndef CONFIG_TINY_RCU */ |
---|
211 | 216 | |
---|
| 217 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 218 | + |
---|
| 219 | +/* |
---|
| 220 | + * Prepare for taking the RCU read lock when running out-of-band. Nop |
---|
| 221 | + * otherwise. |
---|
| 222 | + */ |
---|
| 223 | +void rcu_oob_prepare_lock(void) |
---|
| 224 | +{ |
---|
| 225 | + if (!on_pipeline_entry() && running_oob()) |
---|
| 226 | + rcu_nmi_enter(); |
---|
| 227 | +} |
---|
| 228 | +EXPORT_SYMBOL_GPL(rcu_oob_prepare_lock); |
---|
| 229 | + |
---|
| 230 | +/* |
---|
| 231 | + * Converse to rcu_oob_prepare_lock(), after dropping the RCU read |
---|
| 232 | + * lock. |
---|
| 233 | + */ |
---|
| 234 | +void rcu_oob_finish_lock(void) |
---|
| 235 | +{ |
---|
| 236 | + if (!on_pipeline_entry() && running_oob()) |
---|
| 237 | + rcu_nmi_exit(); |
---|
| 238 | +} |
---|
| 239 | +EXPORT_SYMBOL_GPL(rcu_oob_finish_lock); |
---|
| 240 | + |
---|
| 241 | +#endif /* CONFIG_IRQ_PIPELINE */ |
---|
| 242 | + |
---|
212 | 243 | /* |
---|
213 | 244 | * Test each non-SRCU synchronous grace-period wait API. This is |
---|
214 | 245 | * useful just after a change in mode for these primitives, and |
---|