.. | .. |
---|
13 | 13 | #include <linux/kernel_stat.h> |
---|
14 | 14 | #include <linux/interrupt.h> |
---|
15 | 15 | #include <linux/init.h> |
---|
16 | | -#include <linux/local_lock.h> |
---|
17 | 16 | #include <linux/mm.h> |
---|
18 | 17 | #include <linux/notifier.h> |
---|
19 | 18 | #include <linux/percpu.h> |
---|
.. | .. |
---|
26 | 25 | #include <linux/smpboot.h> |
---|
27 | 26 | #include <linux/tick.h> |
---|
28 | 27 | #include <linux/irq.h> |
---|
29 | | -#include <linux/wait_bit.h> |
---|
30 | 28 | |
---|
31 | 29 | #define CREATE_TRACE_POINTS |
---|
32 | 30 | #include <trace/events/irq.h> |
---|
.. | .. |
---|
90 | 88 | } |
---|
91 | 89 | |
---|
92 | 90 | /* |
---|
93 | | - * If ksoftirqd is scheduled, we do not want to process pending softirqs |
---|
94 | | - * right now. Let ksoftirqd handle this at its own rate, to get fairness, |
---|
95 | | - * unless we're doing some of the synchronous softirqs. |
---|
| 91 | + * preempt_count and SOFTIRQ_OFFSET usage: |
---|
| 92 | + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving |
---|
| 93 | + * softirq processing. |
---|
| 94 | + * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
---|
| 95 | + * on local_bh_disable or local_bh_enable. |
---|
| 96 | + * This lets us distinguish between whether we are currently processing |
---|
| 97 | + * softirq and whether we just have bh disabled. |
---|
96 | 98 | */ |
---|
97 | | -#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) |
---|
98 | | -static bool ksoftirqd_running(unsigned long pending) |
---|
99 | | -{ |
---|
100 | | - struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
---|
101 | 99 | |
---|
102 | | - if (pending & SOFTIRQ_NOW_MASK) |
---|
103 | | - return false; |
---|
104 | | - return tsk && (tsk->state == TASK_RUNNING) && |
---|
105 | | - !__kthread_should_park(tsk); |
---|
106 | | -} |
---|
107 | | - |
---|
| 100 | +/* |
---|
| 101 | + * This one is for softirq.c-internal use, |
---|
| 102 | + * where hardirqs are disabled legitimately: |
---|
| 103 | + */ |
---|
108 | 104 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
| 105 | + |
---|
109 | 106 | DEFINE_PER_CPU(int, hardirqs_enabled); |
---|
110 | 107 | DEFINE_PER_CPU(int, hardirq_context); |
---|
111 | 108 | EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); |
---|
112 | 109 | EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); |
---|
113 | | -#endif |
---|
114 | 110 | |
---|
115 | | -/* |
---|
116 | | - * SOFTIRQ_OFFSET usage: |
---|
117 | | - * |
---|
118 | | - * On !RT kernels 'count' is the preempt counter, on RT kernels this applies |
---|
119 | | - * to a per CPU counter and to task::softirqs_disabled_cnt. |
---|
120 | | - * |
---|
121 | | - * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq |
---|
122 | | - * processing. |
---|
123 | | - * |
---|
124 | | - * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
---|
125 | | - * on local_bh_disable or local_bh_enable. |
---|
126 | | - * |
---|
127 | | - * This lets us distinguish between whether we are currently processing |
---|
128 | | - * softirq and whether we just have bh disabled. |
---|
129 | | - */ |
---|
130 | | -#ifdef CONFIG_PREEMPT_RT |
---|
131 | | - |
---|
132 | | -/* |
---|
133 | | - * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and |
---|
134 | | - * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a |
---|
135 | | - * softirq disabled section to be preempted. |
---|
136 | | - * |
---|
137 | | - * The per task counter is used for softirq_count(), in_softirq() and |
---|
138 | | - * in_serving_softirqs() because these counts are only valid when the task |
---|
139 | | - * holding softirq_ctrl::lock is running. |
---|
140 | | - * |
---|
141 | | - * The per CPU counter prevents pointless wakeups of ksoftirqd in case that |
---|
142 | | - * the task which is in a softirq disabled section is preempted or blocks. |
---|
143 | | - */ |
---|
144 | | -struct softirq_ctrl { |
---|
145 | | - local_lock_t lock; |
---|
146 | | - int cnt; |
---|
147 | | -}; |
---|
148 | | - |
---|
149 | | -static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { |
---|
150 | | - .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), |
---|
151 | | -}; |
---|
152 | | - |
---|
153 | | -/** |
---|
154 | | - * local_bh_blocked() - Check for idle whether BH processing is blocked |
---|
155 | | - * |
---|
156 | | - * Returns false if the per CPU softirq::cnt is 0 otherwise true. |
---|
157 | | - * |
---|
158 | | - * This is invoked from the idle task to guard against false positive |
---|
159 | | - * softirq pending warnings, which would happen when the task which holds |
---|
160 | | - * softirq_ctrl::lock was the only running task on the CPU and blocks on |
---|
161 | | - * some other lock. |
---|
162 | | - */ |
---|
163 | | -bool local_bh_blocked(void) |
---|
164 | | -{ |
---|
165 | | - return __this_cpu_read(softirq_ctrl.cnt) != 0; |
---|
166 | | -} |
---|
167 | | - |
---|
168 | | -void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
---|
169 | | -{ |
---|
170 | | - unsigned long flags; |
---|
171 | | - int newcnt; |
---|
172 | | - |
---|
173 | | - WARN_ON_ONCE(in_hardirq()); |
---|
174 | | - |
---|
175 | | - /* First entry of a task into a BH disabled section? */ |
---|
176 | | - if (!current->softirq_disable_cnt) { |
---|
177 | | - if (preemptible()) { |
---|
178 | | - local_lock(&softirq_ctrl.lock); |
---|
179 | | - /* Required to meet the RCU bottomhalf requirements. */ |
---|
180 | | - rcu_read_lock(); |
---|
181 | | - } else { |
---|
182 | | - DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); |
---|
183 | | - } |
---|
184 | | - } |
---|
185 | | - |
---|
186 | | - /* |
---|
187 | | - * Track the per CPU softirq disabled state. On RT this is per CPU |
---|
188 | | - * state to allow preemption of bottom half disabled sections. |
---|
189 | | - */ |
---|
190 | | - newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); |
---|
191 | | - /* |
---|
192 | | - * Reflect the result in the task state to prevent recursion on the |
---|
193 | | - * local lock and to make softirq_count() & al work. |
---|
194 | | - */ |
---|
195 | | - current->softirq_disable_cnt = newcnt; |
---|
196 | | - |
---|
197 | | - if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { |
---|
198 | | - raw_local_irq_save(flags); |
---|
199 | | - lockdep_softirqs_off(ip); |
---|
200 | | - raw_local_irq_restore(flags); |
---|
201 | | - } |
---|
202 | | -} |
---|
203 | | -EXPORT_SYMBOL(__local_bh_disable_ip); |
---|
204 | | - |
---|
205 | | -static void __local_bh_enable(unsigned int cnt, bool unlock) |
---|
206 | | -{ |
---|
207 | | - unsigned long flags; |
---|
208 | | - int newcnt; |
---|
209 | | - |
---|
210 | | - DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != |
---|
211 | | - this_cpu_read(softirq_ctrl.cnt)); |
---|
212 | | - |
---|
213 | | - if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { |
---|
214 | | - raw_local_irq_save(flags); |
---|
215 | | - lockdep_softirqs_on(_RET_IP_); |
---|
216 | | - raw_local_irq_restore(flags); |
---|
217 | | - } |
---|
218 | | - |
---|
219 | | - newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); |
---|
220 | | - current->softirq_disable_cnt = newcnt; |
---|
221 | | - |
---|
222 | | - if (!newcnt && unlock) { |
---|
223 | | - rcu_read_unlock(); |
---|
224 | | - local_unlock(&softirq_ctrl.lock); |
---|
225 | | - } |
---|
226 | | -} |
---|
227 | | - |
---|
228 | | -void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
---|
229 | | -{ |
---|
230 | | - bool preempt_on = preemptible(); |
---|
231 | | - unsigned long flags; |
---|
232 | | - u32 pending; |
---|
233 | | - int curcnt; |
---|
234 | | - |
---|
235 | | - WARN_ON_ONCE(in_irq()); |
---|
236 | | - lockdep_assert_irqs_enabled(); |
---|
237 | | - |
---|
238 | | - local_irq_save(flags); |
---|
239 | | - curcnt = __this_cpu_read(softirq_ctrl.cnt); |
---|
240 | | - |
---|
241 | | - /* |
---|
242 | | - * If this is not reenabling soft interrupts, no point in trying to |
---|
243 | | - * run pending ones. |
---|
244 | | - */ |
---|
245 | | - if (curcnt != cnt) |
---|
246 | | - goto out; |
---|
247 | | - |
---|
248 | | - pending = local_softirq_pending(); |
---|
249 | | - if (!pending || ksoftirqd_running(pending)) |
---|
250 | | - goto out; |
---|
251 | | - |
---|
252 | | - /* |
---|
253 | | - * If this was called from non preemptible context, wake up the |
---|
254 | | - * softirq daemon. |
---|
255 | | - */ |
---|
256 | | - if (!preempt_on) { |
---|
257 | | - wakeup_softirqd(); |
---|
258 | | - goto out; |
---|
259 | | - } |
---|
260 | | - |
---|
261 | | - /* |
---|
262 | | - * Adjust softirq count to SOFTIRQ_OFFSET which makes |
---|
263 | | - * in_serving_softirq() become true. |
---|
264 | | - */ |
---|
265 | | - cnt = SOFTIRQ_OFFSET; |
---|
266 | | - __local_bh_enable(cnt, false); |
---|
267 | | - __do_softirq(); |
---|
268 | | - |
---|
269 | | -out: |
---|
270 | | - __local_bh_enable(cnt, preempt_on); |
---|
271 | | - local_irq_restore(flags); |
---|
272 | | -} |
---|
273 | | -EXPORT_SYMBOL(__local_bh_enable_ip); |
---|
274 | | - |
---|
275 | | -/* |
---|
276 | | - * Invoked from ksoftirqd_run() outside of the interrupt disabled section |
---|
277 | | - * to acquire the per CPU local lock for reentrancy protection. |
---|
278 | | - */ |
---|
279 | | -static inline void ksoftirqd_run_begin(void) |
---|
280 | | -{ |
---|
281 | | - __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
282 | | - local_irq_disable(); |
---|
283 | | -} |
---|
284 | | - |
---|
285 | | -/* Counterpart to ksoftirqd_run_begin() */ |
---|
286 | | -static inline void ksoftirqd_run_end(void) |
---|
287 | | -{ |
---|
288 | | - __local_bh_enable(SOFTIRQ_OFFSET, true); |
---|
289 | | - WARN_ON_ONCE(in_interrupt()); |
---|
290 | | - local_irq_enable(); |
---|
291 | | -} |
---|
292 | | - |
---|
293 | | -static inline void softirq_handle_begin(void) { } |
---|
294 | | -static inline void softirq_handle_end(void) { } |
---|
295 | | - |
---|
296 | | -static inline bool should_wake_ksoftirqd(void) |
---|
297 | | -{ |
---|
298 | | - return !this_cpu_read(softirq_ctrl.cnt); |
---|
299 | | -} |
---|
300 | | - |
---|
301 | | -static inline void invoke_softirq(void) |
---|
302 | | -{ |
---|
303 | | - if (should_wake_ksoftirqd()) |
---|
304 | | - wakeup_softirqd(); |
---|
305 | | -} |
---|
306 | | - |
---|
307 | | -#else /* CONFIG_PREEMPT_RT */ |
---|
308 | | - |
---|
309 | | -/* |
---|
310 | | - * This one is for softirq.c-internal use, where hardirqs are disabled |
---|
311 | | - * legitimately: |
---|
312 | | - */ |
---|
313 | | -#ifdef CONFIG_TRACE_IRQFLAGS |
---|
314 | 111 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
---|
315 | 112 | { |
---|
316 | 113 | unsigned long flags; |
---|
.. | .. |
---|
401 | 198 | } |
---|
402 | 199 | EXPORT_SYMBOL(__local_bh_enable_ip); |
---|
403 | 200 | |
---|
404 | | -static inline void softirq_handle_begin(void) |
---|
405 | | -{ |
---|
406 | | - __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
407 | | -} |
---|
408 | | - |
---|
409 | | -static inline void softirq_handle_end(void) |
---|
410 | | -{ |
---|
411 | | - __local_bh_enable(SOFTIRQ_OFFSET); |
---|
412 | | - WARN_ON_ONCE(in_interrupt()); |
---|
413 | | -} |
---|
414 | | - |
---|
415 | | -static inline void ksoftirqd_run_begin(void) |
---|
416 | | -{ |
---|
417 | | - local_irq_disable(); |
---|
418 | | -} |
---|
419 | | - |
---|
420 | | -static inline void ksoftirqd_run_end(void) |
---|
421 | | -{ |
---|
422 | | - local_irq_enable(); |
---|
423 | | -} |
---|
424 | | - |
---|
425 | | -static inline bool should_wake_ksoftirqd(void) |
---|
426 | | -{ |
---|
427 | | - return true; |
---|
428 | | -} |
---|
429 | | - |
---|
430 | | -static inline void invoke_softirq(void) |
---|
431 | | -{ |
---|
432 | | - if (ksoftirqd_running(local_softirq_pending())) |
---|
433 | | - return; |
---|
434 | | - |
---|
435 | | - if (!force_irqthreads) { |
---|
436 | | -#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
---|
437 | | - /* |
---|
438 | | - * We can safely execute softirq on the current stack if |
---|
439 | | - * it is the irq stack, because it should be near empty |
---|
440 | | - * at this stage. |
---|
441 | | - */ |
---|
442 | | - __do_softirq(); |
---|
443 | | -#else |
---|
444 | | - /* |
---|
445 | | - * Otherwise, irq_exit() is called on the task stack that can |
---|
446 | | - * be potentially deep already. So call softirq in its own stack |
---|
447 | | - * to prevent from any overrun. |
---|
448 | | - */ |
---|
449 | | - do_softirq_own_stack(); |
---|
450 | | -#endif |
---|
451 | | - } else { |
---|
452 | | - wakeup_softirqd(); |
---|
453 | | - } |
---|
454 | | -} |
---|
455 | | - |
---|
456 | | -asmlinkage __visible void do_softirq(void) |
---|
457 | | -{ |
---|
458 | | - __u32 pending; |
---|
459 | | - unsigned long flags; |
---|
460 | | - |
---|
461 | | - if (in_interrupt()) |
---|
462 | | - return; |
---|
463 | | - |
---|
464 | | - local_irq_save(flags); |
---|
465 | | - |
---|
466 | | - pending = local_softirq_pending(); |
---|
467 | | - |
---|
468 | | - if (pending && !ksoftirqd_running(pending)) |
---|
469 | | - do_softirq_own_stack(); |
---|
470 | | - |
---|
471 | | - local_irq_restore(flags); |
---|
472 | | -} |
---|
473 | | - |
---|
474 | | -#endif /* !CONFIG_PREEMPT_RT */ |
---|
475 | | - |
---|
476 | 201 | /* |
---|
477 | 202 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
---|
478 | 203 | * but break the loop if need_resched() is set or after 2 ms. |
---|
.. | .. |
---|
552 | 277 | |
---|
553 | 278 | pending = local_softirq_pending(); |
---|
554 | 279 | deferred = softirq_deferred_for_rt(pending); |
---|
555 | | - softirq_handle_begin(); |
---|
| 280 | + account_irq_enter_time(current); |
---|
| 281 | + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
556 | 282 | in_hardirq = lockdep_softirq_start(); |
---|
557 | | - account_softirq_enter(current); |
---|
558 | 283 | |
---|
559 | 284 | restart: |
---|
560 | 285 | /* Reset the pending bitmask before enabling irqs */ |
---|
.. | .. |
---|
590 | 315 | } |
---|
591 | 316 | |
---|
592 | 317 | __this_cpu_write(active_softirqs, 0); |
---|
593 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT) && |
---|
594 | | - __this_cpu_read(ksoftirqd) == current) |
---|
| 318 | + if (__this_cpu_read(ksoftirqd) == current) |
---|
595 | 319 | rcu_softirq_qs(); |
---|
596 | | - |
---|
597 | 320 | local_irq_disable(); |
---|
598 | 321 | |
---|
599 | 322 | pending = local_softirq_pending(); |
---|
.. | .. |
---|
613 | 336 | if (pending | deferred) |
---|
614 | 337 | wakeup_softirqd(); |
---|
615 | 338 | #endif |
---|
616 | | - account_softirq_exit(current); |
---|
617 | 339 | lockdep_softirq_end(in_hardirq); |
---|
618 | | - softirq_handle_end(); |
---|
| 340 | + account_irq_exit_time(current); |
---|
| 341 | + __local_bh_enable(SOFTIRQ_OFFSET); |
---|
| 342 | + WARN_ON_ONCE(in_interrupt()); |
---|
619 | 343 | current_restore_flags(old_flags, PF_MEMALLOC); |
---|
| 344 | +} |
---|
| 345 | + |
---|
| 346 | +asmlinkage __visible void do_softirq(void) |
---|
| 347 | +{ |
---|
| 348 | + __u32 pending; |
---|
| 349 | + unsigned long flags; |
---|
| 350 | + |
---|
| 351 | + if (in_interrupt()) |
---|
| 352 | + return; |
---|
| 353 | + |
---|
| 354 | + local_irq_save(flags); |
---|
| 355 | + |
---|
| 356 | + pending = local_softirq_pending(); |
---|
| 357 | + |
---|
| 358 | + if (pending) |
---|
| 359 | + do_softirq_own_stack(); |
---|
| 360 | + |
---|
| 361 | + local_irq_restore(flags); |
---|
620 | 362 | } |
---|
621 | 363 | |
---|
622 | 364 | /** |
---|
.. | .. |
---|
624 | 366 | */ |
---|
625 | 367 | void irq_enter_rcu(void) |
---|
626 | 368 | { |
---|
627 | | - __irq_enter_raw(); |
---|
628 | | - |
---|
629 | | - if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) |
---|
630 | | - tick_irq_enter(); |
---|
631 | | - |
---|
632 | | - account_hardirq_enter(current); |
---|
| 369 | + if (is_idle_task(current) && !in_interrupt()) { |
---|
| 370 | + /* |
---|
| 371 | + * Prevent raise_softirq from needlessly waking up ksoftirqd |
---|
| 372 | + * here, as softirq will be serviced on return from interrupt. |
---|
| 373 | + */ |
---|
| 374 | + local_bh_disable(); |
---|
| 375 | + tick_irq_enter(); |
---|
| 376 | + _local_bh_enable(); |
---|
| 377 | + } |
---|
| 378 | + __irq_enter(); |
---|
633 | 379 | } |
---|
634 | 380 | |
---|
635 | 381 | /** |
---|
.. | .. |
---|
639 | 385 | { |
---|
640 | 386 | rcu_irq_enter(); |
---|
641 | 387 | irq_enter_rcu(); |
---|
| 388 | +} |
---|
| 389 | + |
---|
| 390 | +static inline void invoke_softirq(void) |
---|
| 391 | +{ |
---|
| 392 | + if (!force_irqthreads) { |
---|
| 393 | +#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
---|
| 394 | + /* |
---|
| 395 | + * We can safely execute softirq on the current stack if |
---|
| 396 | + * it is the irq stack, because it should be near empty |
---|
| 397 | + * at this stage. |
---|
| 398 | + */ |
---|
| 399 | + __do_softirq(); |
---|
| 400 | +#else |
---|
| 401 | + /* |
---|
| 402 | + * Otherwise, irq_exit() is called on the task stack that can |
---|
| 403 | + * be potentially deep already. So call softirq in its own stack |
---|
| 404 | + * to prevent from any overrun. |
---|
| 405 | + */ |
---|
| 406 | + do_softirq_own_stack(); |
---|
| 407 | +#endif |
---|
| 408 | + } else { |
---|
| 409 | + wakeup_softirqd(); |
---|
| 410 | + } |
---|
642 | 411 | } |
---|
643 | 412 | |
---|
644 | 413 | static inline void tick_irq_exit(void) |
---|
.. | .. |
---|
661 | 430 | #else |
---|
662 | 431 | lockdep_assert_irqs_disabled(); |
---|
663 | 432 | #endif |
---|
664 | | - account_hardirq_exit(current); |
---|
| 433 | + account_irq_exit_time(current); |
---|
665 | 434 | preempt_count_sub(HARDIRQ_OFFSET); |
---|
666 | 435 | if (!in_interrupt() && local_softirq_pending()) |
---|
667 | 436 | invoke_softirq(); |
---|
.. | .. |
---|
710 | 479 | * Otherwise we wake up ksoftirqd to make sure we |
---|
711 | 480 | * schedule the softirq soon. |
---|
712 | 481 | */ |
---|
713 | | - if (!in_interrupt() && should_wake_ksoftirqd()) |
---|
| 482 | + if (!in_interrupt()) |
---|
714 | 483 | wakeup_softirqd(); |
---|
715 | 484 | } |
---|
716 | 485 | |
---|
.. | .. |
---|
776 | 545 | } |
---|
777 | 546 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
---|
778 | 547 | |
---|
779 | | -static inline bool tasklet_clear_sched(struct tasklet_struct *t) |
---|
780 | | -{ |
---|
781 | | - if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
782 | | - wake_up_var(&t->state); |
---|
783 | | - return true; |
---|
784 | | - } |
---|
785 | | - |
---|
786 | | - return false; |
---|
787 | | -} |
---|
788 | | - |
---|
789 | 548 | static void tasklet_action_common(struct softirq_action *a, |
---|
790 | 549 | struct tasklet_head *tl_head, |
---|
791 | 550 | unsigned int softirq_nr) |
---|
.. | .. |
---|
805 | 564 | |
---|
806 | 565 | if (tasklet_trylock(t)) { |
---|
807 | 566 | if (!atomic_read(&t->count)) { |
---|
808 | | - if (!tasklet_clear_sched(t)) |
---|
| 567 | + if (!test_and_clear_bit(TASKLET_STATE_SCHED, |
---|
| 568 | + &t->state)) |
---|
809 | 569 | BUG(); |
---|
810 | 570 | if (t->use_callback) { |
---|
811 | 571 | trace_tasklet_entry(t->callback); |
---|
.. | .. |
---|
865 | 625 | } |
---|
866 | 626 | EXPORT_SYMBOL(tasklet_init); |
---|
867 | 627 | |
---|
868 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
869 | | -/* |
---|
870 | | - * Do not use in new code. Waiting for tasklets from atomic contexts is |
---|
871 | | - * error prone and should be avoided. |
---|
872 | | - */ |
---|
873 | | -void tasklet_unlock_spin_wait(struct tasklet_struct *t) |
---|
874 | | -{ |
---|
875 | | - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { |
---|
876 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
---|
877 | | - /* |
---|
878 | | - * Prevent a live lock when current preempted soft |
---|
879 | | - * interrupt processing or prevents ksoftirqd from |
---|
880 | | - * running. If the tasklet runs on a different CPU |
---|
881 | | - * then this has no effect other than doing the BH |
---|
882 | | - * disable/enable dance for nothing. |
---|
883 | | - */ |
---|
884 | | - local_bh_disable(); |
---|
885 | | - local_bh_enable(); |
---|
886 | | - } else { |
---|
887 | | - cpu_relax(); |
---|
888 | | - } |
---|
889 | | - } |
---|
890 | | -} |
---|
891 | | -EXPORT_SYMBOL(tasklet_unlock_spin_wait); |
---|
892 | | -#endif |
---|
893 | | - |
---|
894 | 628 | void tasklet_kill(struct tasklet_struct *t) |
---|
895 | 629 | { |
---|
896 | 630 | if (in_interrupt()) |
---|
897 | 631 | pr_notice("Attempt to kill tasklet from interrupt\n"); |
---|
898 | 632 | |
---|
899 | | - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
---|
900 | | - wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); |
---|
901 | | - |
---|
| 633 | + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
| 634 | + do { |
---|
| 635 | + yield(); |
---|
| 636 | + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
---|
| 637 | + } |
---|
902 | 638 | tasklet_unlock_wait(t); |
---|
903 | | - tasklet_clear_sched(t); |
---|
| 639 | + clear_bit(TASKLET_STATE_SCHED, &t->state); |
---|
904 | 640 | } |
---|
905 | 641 | EXPORT_SYMBOL(tasklet_kill); |
---|
906 | | - |
---|
907 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
908 | | -void tasklet_unlock(struct tasklet_struct *t) |
---|
909 | | -{ |
---|
910 | | - smp_mb__before_atomic(); |
---|
911 | | - clear_bit(TASKLET_STATE_RUN, &t->state); |
---|
912 | | - smp_mb__after_atomic(); |
---|
913 | | - wake_up_var(&t->state); |
---|
914 | | -} |
---|
915 | | -EXPORT_SYMBOL_GPL(tasklet_unlock); |
---|
916 | | - |
---|
917 | | -void tasklet_unlock_wait(struct tasklet_struct *t) |
---|
918 | | -{ |
---|
919 | | - wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); |
---|
920 | | -} |
---|
921 | | -EXPORT_SYMBOL_GPL(tasklet_unlock_wait); |
---|
922 | | -#endif |
---|
923 | 642 | |
---|
924 | 643 | void __init softirq_init(void) |
---|
925 | 644 | { |
---|
.. | .. |
---|
943 | 662 | |
---|
944 | 663 | static void run_ksoftirqd(unsigned int cpu) |
---|
945 | 664 | { |
---|
946 | | - ksoftirqd_run_begin(); |
---|
| 665 | + local_irq_disable(); |
---|
947 | 666 | if (local_softirq_pending()) { |
---|
948 | 667 | /* |
---|
949 | 668 | * We can safely run softirq on inline stack, as we are not deep |
---|
950 | 669 | * in the task stack here. |
---|
951 | 670 | */ |
---|
952 | 671 | __do_softirq(); |
---|
953 | | - ksoftirqd_run_end(); |
---|
| 672 | + local_irq_enable(); |
---|
954 | 673 | cond_resched(); |
---|
955 | 674 | return; |
---|
956 | 675 | } |
---|
957 | | - ksoftirqd_run_end(); |
---|
| 676 | + local_irq_enable(); |
---|
958 | 677 | } |
---|
959 | 678 | |
---|
960 | 679 | #ifdef CONFIG_HOTPLUG_CPU |
---|