.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/kernel/softirq.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1992 Linus Torvalds |
---|
5 | | - * |
---|
6 | | - * Distribute under GPLv2. |
---|
7 | 6 | * |
---|
8 | 7 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
---|
9 | 8 | */ |
---|
.. | .. |
---|
14 | 13 | #include <linux/kernel_stat.h> |
---|
15 | 14 | #include <linux/interrupt.h> |
---|
16 | 15 | #include <linux/init.h> |
---|
| 16 | +#include <linux/local_lock.h> |
---|
17 | 17 | #include <linux/mm.h> |
---|
18 | 18 | #include <linux/notifier.h> |
---|
19 | 19 | #include <linux/percpu.h> |
---|
.. | .. |
---|
26 | 26 | #include <linux/smpboot.h> |
---|
27 | 27 | #include <linux/tick.h> |
---|
28 | 28 | #include <linux/irq.h> |
---|
| 29 | +#include <linux/wait_bit.h> |
---|
29 | 30 | |
---|
30 | 31 | #define CREATE_TRACE_POINTS |
---|
31 | 32 | #include <trace/events/irq.h> |
---|
| 33 | + |
---|
| 34 | +EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry); |
---|
| 35 | +EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit); |
---|
32 | 36 | |
---|
33 | 37 | /* |
---|
34 | 38 | - No shared variables, all the data are CPU local. |
---|
.. | .. |
---|
56 | 60 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
---|
57 | 61 | |
---|
58 | 62 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
---|
| 63 | +EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd); |
---|
| 64 | + |
---|
| 65 | +/* |
---|
| 66 | + * active_softirqs -- per cpu, a mask of softirqs that are being handled, |
---|
| 67 | + * with the expectation that approximate answers are acceptable and therefore |
---|
| 68 | + * no synchronization. |
---|
| 69 | + */ |
---|
| 70 | +DEFINE_PER_CPU(__u32, active_softirqs); |
---|
59 | 71 | |
---|
60 | 72 | const char * const softirq_to_name[NR_SOFTIRQS] = { |
---|
61 | 73 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
---|
.. | .. |
---|
89 | 101 | |
---|
90 | 102 | if (pending & SOFTIRQ_NOW_MASK) |
---|
91 | 103 | return false; |
---|
92 | | - return tsk && (tsk->state == TASK_RUNNING); |
---|
| 104 | + return tsk && (tsk->state == TASK_RUNNING) && |
---|
| 105 | + !__kthread_should_park(tsk); |
---|
93 | 106 | } |
---|
94 | 107 | |
---|
| 108 | +#ifdef CONFIG_TRACE_IRQFLAGS |
---|
| 109 | +DEFINE_PER_CPU(int, hardirqs_enabled); |
---|
| 110 | +DEFINE_PER_CPU(int, hardirq_context); |
---|
| 111 | +EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); |
---|
| 112 | +EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); |
---|
| 113 | +#endif |
---|
| 114 | + |
---|
95 | 115 | /* |
---|
96 | | - * preempt_count and SOFTIRQ_OFFSET usage: |
---|
97 | | - * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving |
---|
98 | | - * softirq processing. |
---|
99 | | - * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
---|
| 116 | + * SOFTIRQ_OFFSET usage: |
---|
| 117 | + * |
---|
| 118 | + * On !RT kernels 'count' is the preempt counter, on RT kernels this applies |
---|
| 119 | + * to a per CPU counter and to task::softirqs_disabled_cnt. |
---|
| 120 | + * |
---|
| 121 | + * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq |
---|
| 122 | + * processing. |
---|
| 123 | + * |
---|
| 124 | + * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
---|
100 | 125 | * on local_bh_disable or local_bh_enable. |
---|
| 126 | + * |
---|
101 | 127 | * This lets us distinguish between whether we are currently processing |
---|
102 | 128 | * softirq and whether we just have bh disabled. |
---|
103 | 129 | */ |
---|
| 130 | +#ifdef CONFIG_PREEMPT_RT |
---|
104 | 131 | |
---|
105 | 132 | /* |
---|
106 | | - * This one is for softirq.c-internal use, |
---|
107 | | - * where hardirqs are disabled legitimately: |
---|
| 133 | + * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and |
---|
| 134 | + * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a |
---|
| 135 | + * softirq disabled section to be preempted. |
---|
| 136 | + * |
---|
| 137 | + * The per task counter is used for softirq_count(), in_softirq() and |
---|
| 138 | + * in_serving_softirqs() because these counts are only valid when the task |
---|
| 139 | + * holding softirq_ctrl::lock is running. |
---|
| 140 | + * |
---|
| 141 | + * The per CPU counter prevents pointless wakeups of ksoftirqd in case that |
---|
| 142 | + * the task which is in a softirq disabled section is preempted or blocks. |
---|
| 143 | + */ |
---|
| 144 | +struct softirq_ctrl { |
---|
| 145 | + local_lock_t lock; |
---|
| 146 | + int cnt; |
---|
| 147 | +}; |
---|
| 148 | + |
---|
| 149 | +static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { |
---|
| 150 | + .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), |
---|
| 151 | +}; |
---|
| 152 | + |
---|
| 153 | +/** |
---|
| 154 | + * local_bh_blocked() - Check for idle whether BH processing is blocked |
---|
| 155 | + * |
---|
| 156 | + * Returns false if the per CPU softirq::cnt is 0 otherwise true. |
---|
| 157 | + * |
---|
| 158 | + * This is invoked from the idle task to guard against false positive |
---|
| 159 | + * softirq pending warnings, which would happen when the task which holds |
---|
| 160 | + * softirq_ctrl::lock was the only running task on the CPU and blocks on |
---|
| 161 | + * some other lock. |
---|
| 162 | + */ |
---|
| 163 | +bool local_bh_blocked(void) |
---|
| 164 | +{ |
---|
| 165 | + return __this_cpu_read(softirq_ctrl.cnt) != 0; |
---|
| 166 | +} |
---|
| 167 | + |
---|
| 168 | +void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
---|
| 169 | +{ |
---|
| 170 | + unsigned long flags; |
---|
| 171 | + int newcnt; |
---|
| 172 | + |
---|
| 173 | + WARN_ON_ONCE(in_hardirq()); |
---|
| 174 | + |
---|
| 175 | + /* First entry of a task into a BH disabled section? */ |
---|
| 176 | + if (!current->softirq_disable_cnt) { |
---|
| 177 | + if (preemptible()) { |
---|
| 178 | + local_lock(&softirq_ctrl.lock); |
---|
| 179 | + /* Required to meet the RCU bottomhalf requirements. */ |
---|
| 180 | + rcu_read_lock(); |
---|
| 181 | + } else { |
---|
| 182 | + DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); |
---|
| 183 | + } |
---|
| 184 | + } |
---|
| 185 | + |
---|
| 186 | + /* |
---|
| 187 | + * Track the per CPU softirq disabled state. On RT this is per CPU |
---|
| 188 | + * state to allow preemption of bottom half disabled sections. |
---|
| 189 | + */ |
---|
| 190 | + newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); |
---|
| 191 | + /* |
---|
| 192 | + * Reflect the result in the task state to prevent recursion on the |
---|
| 193 | + * local lock and to make softirq_count() & al work. |
---|
| 194 | + */ |
---|
| 195 | + current->softirq_disable_cnt = newcnt; |
---|
| 196 | + |
---|
| 197 | + if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { |
---|
| 198 | + raw_local_irq_save(flags); |
---|
| 199 | + lockdep_softirqs_off(ip); |
---|
| 200 | + raw_local_irq_restore(flags); |
---|
| 201 | + } |
---|
| 202 | +} |
---|
| 203 | +EXPORT_SYMBOL(__local_bh_disable_ip); |
---|
| 204 | + |
---|
| 205 | +static void __local_bh_enable(unsigned int cnt, bool unlock) |
---|
| 206 | +{ |
---|
| 207 | + unsigned long flags; |
---|
| 208 | + int newcnt; |
---|
| 209 | + |
---|
| 210 | + DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != |
---|
| 211 | + this_cpu_read(softirq_ctrl.cnt)); |
---|
| 212 | + |
---|
| 213 | + if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { |
---|
| 214 | + raw_local_irq_save(flags); |
---|
| 215 | + lockdep_softirqs_on(_RET_IP_); |
---|
| 216 | + raw_local_irq_restore(flags); |
---|
| 217 | + } |
---|
| 218 | + |
---|
| 219 | + newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); |
---|
| 220 | + current->softirq_disable_cnt = newcnt; |
---|
| 221 | + |
---|
| 222 | + if (!newcnt && unlock) { |
---|
| 223 | + rcu_read_unlock(); |
---|
| 224 | + local_unlock(&softirq_ctrl.lock); |
---|
| 225 | + } |
---|
| 226 | +} |
---|
| 227 | + |
---|
| 228 | +void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
---|
| 229 | +{ |
---|
| 230 | + bool preempt_on = preemptible(); |
---|
| 231 | + unsigned long flags; |
---|
| 232 | + u32 pending; |
---|
| 233 | + int curcnt; |
---|
| 234 | + |
---|
| 235 | + WARN_ON_ONCE(in_irq()); |
---|
| 236 | + lockdep_assert_irqs_enabled(); |
---|
| 237 | + |
---|
| 238 | + local_irq_save(flags); |
---|
| 239 | + curcnt = __this_cpu_read(softirq_ctrl.cnt); |
---|
| 240 | + |
---|
| 241 | + /* |
---|
| 242 | + * If this is not reenabling soft interrupts, no point in trying to |
---|
| 243 | + * run pending ones. |
---|
| 244 | + */ |
---|
| 245 | + if (curcnt != cnt) |
---|
| 246 | + goto out; |
---|
| 247 | + |
---|
| 248 | + pending = local_softirq_pending(); |
---|
| 249 | + if (!pending || ksoftirqd_running(pending)) |
---|
| 250 | + goto out; |
---|
| 251 | + |
---|
| 252 | + /* |
---|
| 253 | + * If this was called from non preemptible context, wake up the |
---|
| 254 | + * softirq daemon. |
---|
| 255 | + */ |
---|
| 256 | + if (!preempt_on) { |
---|
| 257 | + wakeup_softirqd(); |
---|
| 258 | + goto out; |
---|
| 259 | + } |
---|
| 260 | + |
---|
| 261 | + /* |
---|
| 262 | + * Adjust softirq count to SOFTIRQ_OFFSET which makes |
---|
| 263 | + * in_serving_softirq() become true. |
---|
| 264 | + */ |
---|
| 265 | + cnt = SOFTIRQ_OFFSET; |
---|
| 266 | + __local_bh_enable(cnt, false); |
---|
| 267 | + __do_softirq(); |
---|
| 268 | + |
---|
| 269 | +out: |
---|
| 270 | + __local_bh_enable(cnt, preempt_on); |
---|
| 271 | + local_irq_restore(flags); |
---|
| 272 | +} |
---|
| 273 | +EXPORT_SYMBOL(__local_bh_enable_ip); |
---|
| 274 | + |
---|
| 275 | +/* |
---|
| 276 | + * Invoked from ksoftirqd_run() outside of the interrupt disabled section |
---|
| 277 | + * to acquire the per CPU local lock for reentrancy protection. |
---|
| 278 | + */ |
---|
| 279 | +static inline void ksoftirqd_run_begin(void) |
---|
| 280 | +{ |
---|
| 281 | + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
| 282 | + local_irq_disable(); |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +/* Counterpart to ksoftirqd_run_begin() */ |
---|
| 286 | +static inline void ksoftirqd_run_end(void) |
---|
| 287 | +{ |
---|
| 288 | + __local_bh_enable(SOFTIRQ_OFFSET, true); |
---|
| 289 | + WARN_ON_ONCE(in_interrupt()); |
---|
| 290 | + local_irq_enable(); |
---|
| 291 | +} |
---|
| 292 | + |
---|
| 293 | +static inline void softirq_handle_begin(void) { } |
---|
| 294 | +static inline void softirq_handle_end(void) { } |
---|
| 295 | + |
---|
| 296 | +static inline bool should_wake_ksoftirqd(void) |
---|
| 297 | +{ |
---|
| 298 | + return !this_cpu_read(softirq_ctrl.cnt); |
---|
| 299 | +} |
---|
| 300 | + |
---|
| 301 | +static inline void invoke_softirq(void) |
---|
| 302 | +{ |
---|
| 303 | + if (should_wake_ksoftirqd()) |
---|
| 304 | + wakeup_softirqd(); |
---|
| 305 | +} |
---|
| 306 | + |
---|
| 307 | +#else /* CONFIG_PREEMPT_RT */ |
---|
| 308 | + |
---|
| 309 | +/* |
---|
| 310 | + * This one is for softirq.c-internal use, where hardirqs are disabled |
---|
| 311 | + * legitimately: |
---|
108 | 312 | */ |
---|
109 | 313 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
110 | 314 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
---|
.. | .. |
---|
126 | 330 | * Were softirqs turned off above: |
---|
127 | 331 | */ |
---|
128 | 332 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
---|
129 | | - trace_softirqs_off(ip); |
---|
| 333 | + lockdep_softirqs_off(ip); |
---|
130 | 334 | raw_local_irq_restore(flags); |
---|
131 | 335 | |
---|
132 | 336 | if (preempt_count() == cnt) { |
---|
.. | .. |
---|
147 | 351 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); |
---|
148 | 352 | |
---|
149 | 353 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
---|
150 | | - trace_softirqs_on(_RET_IP_); |
---|
| 354 | + lockdep_softirqs_on(_RET_IP_); |
---|
151 | 355 | |
---|
152 | 356 | __preempt_count_sub(cnt); |
---|
153 | 357 | } |
---|
.. | .. |
---|
174 | 378 | * Are softirqs going to be turned on now: |
---|
175 | 379 | */ |
---|
176 | 380 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
---|
177 | | - trace_softirqs_on(ip); |
---|
| 381 | + lockdep_softirqs_on(ip); |
---|
178 | 382 | /* |
---|
179 | 383 | * Keep preemption disabled until we are done with |
---|
180 | 384 | * softirq processing: |
---|
.. | .. |
---|
196 | 400 | preempt_check_resched(); |
---|
197 | 401 | } |
---|
198 | 402 | EXPORT_SYMBOL(__local_bh_enable_ip); |
---|
| 403 | + |
---|
| 404 | +static inline void softirq_handle_begin(void) |
---|
| 405 | +{ |
---|
| 406 | + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
| 407 | +} |
---|
| 408 | + |
---|
| 409 | +static inline void softirq_handle_end(void) |
---|
| 410 | +{ |
---|
| 411 | + __local_bh_enable(SOFTIRQ_OFFSET); |
---|
| 412 | + WARN_ON_ONCE(in_interrupt()); |
---|
| 413 | +} |
---|
| 414 | + |
---|
| 415 | +static inline void ksoftirqd_run_begin(void) |
---|
| 416 | +{ |
---|
| 417 | + local_irq_disable(); |
---|
| 418 | +} |
---|
| 419 | + |
---|
| 420 | +static inline void ksoftirqd_run_end(void) |
---|
| 421 | +{ |
---|
| 422 | + local_irq_enable(); |
---|
| 423 | +} |
---|
| 424 | + |
---|
| 425 | +static inline bool should_wake_ksoftirqd(void) |
---|
| 426 | +{ |
---|
| 427 | + return true; |
---|
| 428 | +} |
---|
| 429 | + |
---|
| 430 | +static inline void invoke_softirq(void) |
---|
| 431 | +{ |
---|
| 432 | + if (ksoftirqd_running(local_softirq_pending())) |
---|
| 433 | + return; |
---|
| 434 | + |
---|
| 435 | + if (!force_irqthreads) { |
---|
| 436 | +#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
---|
| 437 | + /* |
---|
| 438 | + * We can safely execute softirq on the current stack if |
---|
| 439 | + * it is the irq stack, because it should be near empty |
---|
| 440 | + * at this stage. |
---|
| 441 | + */ |
---|
| 442 | + __do_softirq(); |
---|
| 443 | +#else |
---|
| 444 | + /* |
---|
| 445 | + * Otherwise, irq_exit() is called on the task stack that can |
---|
| 446 | + * be potentially deep already. So call softirq in its own stack |
---|
| 447 | + * to prevent from any overrun. |
---|
| 448 | + */ |
---|
| 449 | + do_softirq_own_stack(); |
---|
| 450 | +#endif |
---|
| 451 | + } else { |
---|
| 452 | + wakeup_softirqd(); |
---|
| 453 | + } |
---|
| 454 | +} |
---|
| 455 | + |
---|
| 456 | +asmlinkage __visible void do_softirq(void) |
---|
| 457 | +{ |
---|
| 458 | + __u32 pending; |
---|
| 459 | + unsigned long flags; |
---|
| 460 | + |
---|
| 461 | + if (in_interrupt()) |
---|
| 462 | + return; |
---|
| 463 | + |
---|
| 464 | + local_irq_save(flags); |
---|
| 465 | + |
---|
| 466 | + pending = local_softirq_pending(); |
---|
| 467 | + |
---|
| 468 | + if (pending && !ksoftirqd_running(pending)) |
---|
| 469 | + do_softirq_own_stack(); |
---|
| 470 | + |
---|
| 471 | + local_irq_restore(flags); |
---|
| 472 | +} |
---|
| 473 | + |
---|
| 474 | +#endif /* !CONFIG_PREEMPT_RT */ |
---|
199 | 475 | |
---|
200 | 476 | /* |
---|
201 | 477 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
---|
.. | .. |
---|
224 | 500 | { |
---|
225 | 501 | bool in_hardirq = false; |
---|
226 | 502 | |
---|
227 | | - if (trace_hardirq_context(current)) { |
---|
| 503 | + if (lockdep_hardirq_context()) { |
---|
228 | 504 | in_hardirq = true; |
---|
229 | | - trace_hardirq_exit(); |
---|
| 505 | + lockdep_hardirq_exit(); |
---|
230 | 506 | } |
---|
231 | 507 | |
---|
232 | 508 | lockdep_softirq_enter(); |
---|
.. | .. |
---|
239 | 515 | lockdep_softirq_exit(); |
---|
240 | 516 | |
---|
241 | 517 | if (in_hardirq) |
---|
242 | | - trace_hardirq_enter(); |
---|
| 518 | + lockdep_hardirq_enter(); |
---|
243 | 519 | } |
---|
244 | 520 | #else |
---|
245 | 521 | static inline bool lockdep_softirq_start(void) { return false; } |
---|
246 | 522 | static inline void lockdep_softirq_end(bool in_hardirq) { } |
---|
247 | 523 | #endif |
---|
| 524 | + |
---|
| 525 | +#define softirq_deferred_for_rt(pending) \ |
---|
| 526 | +({ \ |
---|
| 527 | + __u32 deferred = 0; \ |
---|
| 528 | + if (cpupri_check_rt()) { \ |
---|
| 529 | + deferred = pending & LONG_SOFTIRQ_MASK; \ |
---|
| 530 | + pending &= ~LONG_SOFTIRQ_MASK; \ |
---|
| 531 | + } \ |
---|
| 532 | + deferred; \ |
---|
| 533 | +}) |
---|
248 | 534 | |
---|
249 | 535 | asmlinkage __visible void __softirq_entry __do_softirq(void) |
---|
250 | 536 | { |
---|
.. | .. |
---|
253 | 539 | int max_restart = MAX_SOFTIRQ_RESTART; |
---|
254 | 540 | struct softirq_action *h; |
---|
255 | 541 | bool in_hardirq; |
---|
| 542 | + __u32 deferred; |
---|
256 | 543 | __u32 pending; |
---|
257 | 544 | int softirq_bit; |
---|
258 | 545 | |
---|
259 | 546 | /* |
---|
260 | | - * Mask out PF_MEMALLOC s current task context is borrowed for the |
---|
261 | | - * softirq. A softirq handled such as network RX might set PF_MEMALLOC |
---|
262 | | - * again if the socket is related to swap |
---|
| 547 | + * Mask out PF_MEMALLOC as the current task context is borrowed for the |
---|
| 548 | + * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC |
---|
| 549 | + * again if the socket is related to swapping. |
---|
263 | 550 | */ |
---|
264 | 551 | current->flags &= ~PF_MEMALLOC; |
---|
265 | 552 | |
---|
266 | 553 | pending = local_softirq_pending(); |
---|
267 | | - account_irq_enter_time(current); |
---|
268 | | - |
---|
269 | | - __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
| 554 | + deferred = softirq_deferred_for_rt(pending); |
---|
| 555 | + softirq_handle_begin(); |
---|
270 | 556 | in_hardirq = lockdep_softirq_start(); |
---|
| 557 | + account_softirq_enter(current); |
---|
271 | 558 | |
---|
272 | 559 | restart: |
---|
273 | 560 | /* Reset the pending bitmask before enabling irqs */ |
---|
274 | | - set_softirq_pending(0); |
---|
| 561 | + set_softirq_pending(deferred); |
---|
| 562 | + __this_cpu_write(active_softirqs, pending); |
---|
275 | 563 | |
---|
276 | 564 | local_irq_enable(); |
---|
277 | 565 | |
---|
.. | .. |
---|
301 | 589 | pending >>= softirq_bit; |
---|
302 | 590 | } |
---|
303 | 591 | |
---|
304 | | - rcu_bh_qs(); |
---|
| 592 | + __this_cpu_write(active_softirqs, 0); |
---|
| 593 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && |
---|
| 594 | + __this_cpu_read(ksoftirqd) == current) |
---|
| 595 | + rcu_softirq_qs(); |
---|
| 596 | + |
---|
305 | 597 | local_irq_disable(); |
---|
306 | 598 | |
---|
307 | 599 | pending = local_softirq_pending(); |
---|
| 600 | + deferred = softirq_deferred_for_rt(pending); |
---|
| 601 | + |
---|
308 | 602 | if (pending) { |
---|
309 | 603 | if (time_before(jiffies, end) && !need_resched() && |
---|
310 | 604 | --max_restart) |
---|
311 | 605 | goto restart; |
---|
312 | 606 | |
---|
| 607 | +#ifndef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
313 | 608 | wakeup_softirqd(); |
---|
| 609 | +#endif |
---|
314 | 610 | } |
---|
315 | 611 | |
---|
| 612 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 613 | + if (pending | deferred) |
---|
| 614 | + wakeup_softirqd(); |
---|
| 615 | +#endif |
---|
| 616 | + account_softirq_exit(current); |
---|
316 | 617 | lockdep_softirq_end(in_hardirq); |
---|
317 | | - account_irq_exit_time(current); |
---|
318 | | - __local_bh_enable(SOFTIRQ_OFFSET); |
---|
319 | | - WARN_ON_ONCE(in_interrupt()); |
---|
| 618 | + softirq_handle_end(); |
---|
320 | 619 | current_restore_flags(old_flags, PF_MEMALLOC); |
---|
321 | 620 | } |
---|
322 | 621 | |
---|
323 | | -asmlinkage __visible void do_softirq(void) |
---|
| 622 | +/** |
---|
| 623 | + * irq_enter_rcu - Enter an interrupt context with RCU watching |
---|
| 624 | + */ |
---|
| 625 | +void irq_enter_rcu(void) |
---|
324 | 626 | { |
---|
325 | | - __u32 pending; |
---|
326 | | - unsigned long flags; |
---|
| 627 | + __irq_enter_raw(); |
---|
327 | 628 | |
---|
328 | | - if (in_interrupt()) |
---|
329 | | - return; |
---|
| 629 | + if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) |
---|
| 630 | + tick_irq_enter(); |
---|
330 | 631 | |
---|
331 | | - local_irq_save(flags); |
---|
332 | | - |
---|
333 | | - pending = local_softirq_pending(); |
---|
334 | | - |
---|
335 | | - if (pending && !ksoftirqd_running(pending)) |
---|
336 | | - do_softirq_own_stack(); |
---|
337 | | - |
---|
338 | | - local_irq_restore(flags); |
---|
| 632 | + account_hardirq_enter(current); |
---|
339 | 633 | } |
---|
340 | 634 | |
---|
341 | | -/* |
---|
342 | | - * Enter an interrupt context. |
---|
| 635 | +/** |
---|
| 636 | + * irq_enter - Enter an interrupt context including RCU update |
---|
343 | 637 | */ |
---|
344 | 638 | void irq_enter(void) |
---|
345 | 639 | { |
---|
346 | 640 | rcu_irq_enter(); |
---|
347 | | - if (is_idle_task(current) && !in_interrupt()) { |
---|
348 | | - /* |
---|
349 | | - * Prevent raise_softirq from needlessly waking up ksoftirqd |
---|
350 | | - * here, as softirq will be serviced on return from interrupt. |
---|
351 | | - */ |
---|
352 | | - local_bh_disable(); |
---|
353 | | - tick_irq_enter(); |
---|
354 | | - _local_bh_enable(); |
---|
355 | | - } |
---|
356 | | - |
---|
357 | | - __irq_enter(); |
---|
358 | | -} |
---|
359 | | - |
---|
360 | | -static inline void invoke_softirq(void) |
---|
361 | | -{ |
---|
362 | | - if (ksoftirqd_running(local_softirq_pending())) |
---|
363 | | - return; |
---|
364 | | - |
---|
365 | | - if (!force_irqthreads) { |
---|
366 | | -#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
---|
367 | | - /* |
---|
368 | | - * We can safely execute softirq on the current stack if |
---|
369 | | - * it is the irq stack, because it should be near empty |
---|
370 | | - * at this stage. |
---|
371 | | - */ |
---|
372 | | - __do_softirq(); |
---|
373 | | -#else |
---|
374 | | - /* |
---|
375 | | - * Otherwise, irq_exit() is called on the task stack that can |
---|
376 | | - * be potentially deep already. So call softirq in its own stack |
---|
377 | | - * to prevent from any overrun. |
---|
378 | | - */ |
---|
379 | | - do_softirq_own_stack(); |
---|
380 | | -#endif |
---|
381 | | - } else { |
---|
382 | | - wakeup_softirqd(); |
---|
383 | | - } |
---|
| 641 | + irq_enter_rcu(); |
---|
384 | 642 | } |
---|
385 | 643 | |
---|
386 | 644 | static inline void tick_irq_exit(void) |
---|
.. | .. |
---|
396 | 654 | #endif |
---|
397 | 655 | } |
---|
398 | 656 | |
---|
399 | | -/* |
---|
400 | | - * Exit an interrupt context. Process softirqs if needed and possible: |
---|
401 | | - */ |
---|
402 | | -void irq_exit(void) |
---|
| 657 | +static inline void __irq_exit_rcu(void) |
---|
403 | 658 | { |
---|
404 | 659 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
---|
405 | 660 | local_irq_disable(); |
---|
406 | 661 | #else |
---|
407 | 662 | lockdep_assert_irqs_disabled(); |
---|
408 | 663 | #endif |
---|
409 | | - account_irq_exit_time(current); |
---|
| 664 | + account_hardirq_exit(current); |
---|
410 | 665 | preempt_count_sub(HARDIRQ_OFFSET); |
---|
411 | 666 | if (!in_interrupt() && local_softirq_pending()) |
---|
412 | 667 | invoke_softirq(); |
---|
413 | 668 | |
---|
414 | 669 | tick_irq_exit(); |
---|
| 670 | +} |
---|
| 671 | + |
---|
| 672 | +/** |
---|
| 673 | + * irq_exit_rcu() - Exit an interrupt context without updating RCU |
---|
| 674 | + * |
---|
| 675 | + * Also processes softirqs if needed and possible. |
---|
| 676 | + */ |
---|
| 677 | +void irq_exit_rcu(void) |
---|
| 678 | +{ |
---|
| 679 | + __irq_exit_rcu(); |
---|
| 680 | + /* must be last! */ |
---|
| 681 | + lockdep_hardirq_exit(); |
---|
| 682 | +} |
---|
| 683 | + |
---|
| 684 | +/** |
---|
| 685 | + * irq_exit - Exit an interrupt context, update RCU and lockdep |
---|
| 686 | + * |
---|
| 687 | + * Also processes softirqs if needed and possible. |
---|
| 688 | + */ |
---|
| 689 | +void irq_exit(void) |
---|
| 690 | +{ |
---|
| 691 | + __irq_exit_rcu(); |
---|
415 | 692 | rcu_irq_exit(); |
---|
416 | | - trace_hardirq_exit(); /* must be last! */ |
---|
| 693 | + /* must be last! */ |
---|
| 694 | + lockdep_hardirq_exit(); |
---|
417 | 695 | } |
---|
418 | 696 | |
---|
419 | 697 | /* |
---|
.. | .. |
---|
432 | 710 | * Otherwise we wake up ksoftirqd to make sure we |
---|
433 | 711 | * schedule the softirq soon. |
---|
434 | 712 | */ |
---|
435 | | - if (!in_interrupt()) |
---|
| 713 | + if (!in_interrupt() && should_wake_ksoftirqd()) |
---|
436 | 714 | wakeup_softirqd(); |
---|
437 | 715 | } |
---|
438 | 716 | |
---|
.. | .. |
---|
447 | 725 | |
---|
448 | 726 | void __raise_softirq_irqoff(unsigned int nr) |
---|
449 | 727 | { |
---|
| 728 | + lockdep_assert_irqs_disabled(); |
---|
450 | 729 | trace_softirq_raise(nr); |
---|
451 | 730 | or_softirq_pending(1UL << nr); |
---|
452 | 731 | } |
---|
.. | .. |
---|
497 | 776 | } |
---|
498 | 777 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
---|
499 | 778 | |
---|
| 779 | +static inline bool tasklet_clear_sched(struct tasklet_struct *t) |
---|
| 780 | +{ |
---|
| 781 | + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
| 782 | + wake_up_var(&t->state); |
---|
| 783 | + return true; |
---|
| 784 | + } |
---|
| 785 | + |
---|
| 786 | + return false; |
---|
| 787 | +} |
---|
| 788 | + |
---|
500 | 789 | static void tasklet_action_common(struct softirq_action *a, |
---|
501 | 790 | struct tasklet_head *tl_head, |
---|
502 | 791 | unsigned int softirq_nr) |
---|
.. | .. |
---|
516 | 805 | |
---|
517 | 806 | if (tasklet_trylock(t)) { |
---|
518 | 807 | if (!atomic_read(&t->count)) { |
---|
519 | | - if (!test_and_clear_bit(TASKLET_STATE_SCHED, |
---|
520 | | - &t->state)) |
---|
| 808 | + if (!tasklet_clear_sched(t)) |
---|
521 | 809 | BUG(); |
---|
522 | | - t->func(t->data); |
---|
| 810 | + if (t->use_callback) { |
---|
| 811 | + trace_tasklet_entry(t->callback); |
---|
| 812 | + t->callback(t); |
---|
| 813 | + trace_tasklet_exit(t->callback); |
---|
| 814 | + } else { |
---|
| 815 | + trace_tasklet_entry(t->func); |
---|
| 816 | + t->func(t->data); |
---|
| 817 | + trace_tasklet_exit(t->func); |
---|
| 818 | + } |
---|
523 | 819 | tasklet_unlock(t); |
---|
524 | 820 | continue; |
---|
525 | 821 | } |
---|
.. | .. |
---|
545 | 841 | tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); |
---|
546 | 842 | } |
---|
547 | 843 | |
---|
| 844 | +void tasklet_setup(struct tasklet_struct *t, |
---|
| 845 | + void (*callback)(struct tasklet_struct *)) |
---|
| 846 | +{ |
---|
| 847 | + t->next = NULL; |
---|
| 848 | + t->state = 0; |
---|
| 849 | + atomic_set(&t->count, 0); |
---|
| 850 | + t->callback = callback; |
---|
| 851 | + t->use_callback = true; |
---|
| 852 | + t->data = 0; |
---|
| 853 | +} |
---|
| 854 | +EXPORT_SYMBOL(tasklet_setup); |
---|
| 855 | + |
---|
548 | 856 | void tasklet_init(struct tasklet_struct *t, |
---|
549 | 857 | void (*func)(unsigned long), unsigned long data) |
---|
550 | 858 | { |
---|
.. | .. |
---|
552 | 860 | t->state = 0; |
---|
553 | 861 | atomic_set(&t->count, 0); |
---|
554 | 862 | t->func = func; |
---|
| 863 | + t->use_callback = false; |
---|
555 | 864 | t->data = data; |
---|
556 | 865 | } |
---|
557 | 866 | EXPORT_SYMBOL(tasklet_init); |
---|
| 867 | + |
---|
| 868 | +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
| 869 | +/* |
---|
| 870 | + * Do not use in new code. Waiting for tasklets from atomic contexts is |
---|
| 871 | + * error prone and should be avoided. |
---|
| 872 | + */ |
---|
| 873 | +void tasklet_unlock_spin_wait(struct tasklet_struct *t) |
---|
| 874 | +{ |
---|
| 875 | + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { |
---|
| 876 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
---|
| 877 | + /* |
---|
| 878 | + * Prevent a live lock when current preempted soft |
---|
| 879 | + * interrupt processing or prevents ksoftirqd from |
---|
| 880 | + * running. If the tasklet runs on a different CPU |
---|
| 881 | + * then this has no effect other than doing the BH |
---|
| 882 | + * disable/enable dance for nothing. |
---|
| 883 | + */ |
---|
| 884 | + local_bh_disable(); |
---|
| 885 | + local_bh_enable(); |
---|
| 886 | + } else { |
---|
| 887 | + cpu_relax(); |
---|
| 888 | + } |
---|
| 889 | + } |
---|
| 890 | +} |
---|
| 891 | +EXPORT_SYMBOL(tasklet_unlock_spin_wait); |
---|
| 892 | +#endif |
---|
558 | 893 | |
---|
559 | 894 | void tasklet_kill(struct tasklet_struct *t) |
---|
560 | 895 | { |
---|
561 | 896 | if (in_interrupt()) |
---|
562 | 897 | pr_notice("Attempt to kill tasklet from interrupt\n"); |
---|
563 | 898 | |
---|
564 | | - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
565 | | - do { |
---|
566 | | - yield(); |
---|
567 | | - } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
---|
568 | | - } |
---|
| 899 | + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
---|
| 900 | + wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); |
---|
| 901 | + |
---|
569 | 902 | tasklet_unlock_wait(t); |
---|
570 | | - clear_bit(TASKLET_STATE_SCHED, &t->state); |
---|
| 903 | + tasklet_clear_sched(t); |
---|
571 | 904 | } |
---|
572 | 905 | EXPORT_SYMBOL(tasklet_kill); |
---|
573 | 906 | |
---|
574 | | -/* |
---|
575 | | - * tasklet_hrtimer |
---|
576 | | - */ |
---|
577 | | - |
---|
578 | | -/* |
---|
579 | | - * The trampoline is called when the hrtimer expires. It schedules a tasklet |
---|
580 | | - * to run __tasklet_hrtimer_trampoline() which in turn will call the intended |
---|
581 | | - * hrtimer callback, but from softirq context. |
---|
582 | | - */ |
---|
583 | | -static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) |
---|
| 907 | +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
| 908 | +void tasklet_unlock(struct tasklet_struct *t) |
---|
584 | 909 | { |
---|
585 | | - struct tasklet_hrtimer *ttimer = |
---|
586 | | - container_of(timer, struct tasklet_hrtimer, timer); |
---|
587 | | - |
---|
588 | | - tasklet_hi_schedule(&ttimer->tasklet); |
---|
589 | | - return HRTIMER_NORESTART; |
---|
| 910 | + smp_mb__before_atomic(); |
---|
| 911 | + clear_bit(TASKLET_STATE_RUN, &t->state); |
---|
| 912 | + smp_mb__after_atomic(); |
---|
| 913 | + wake_up_var(&t->state); |
---|
590 | 914 | } |
---|
| 915 | +EXPORT_SYMBOL_GPL(tasklet_unlock); |
---|
591 | 916 | |
---|
592 | | -/* |
---|
593 | | - * Helper function which calls the hrtimer callback from |
---|
594 | | - * tasklet/softirq context |
---|
595 | | - */ |
---|
596 | | -static void __tasklet_hrtimer_trampoline(unsigned long data) |
---|
| 917 | +void tasklet_unlock_wait(struct tasklet_struct *t) |
---|
597 | 918 | { |
---|
598 | | - struct tasklet_hrtimer *ttimer = (void *)data; |
---|
599 | | - enum hrtimer_restart restart; |
---|
600 | | - |
---|
601 | | - restart = ttimer->function(&ttimer->timer); |
---|
602 | | - if (restart != HRTIMER_NORESTART) |
---|
603 | | - hrtimer_restart(&ttimer->timer); |
---|
| 919 | + wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); |
---|
604 | 920 | } |
---|
605 | | - |
---|
606 | | -/** |
---|
607 | | - * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks |
---|
608 | | - * @ttimer: tasklet_hrtimer which is initialized |
---|
609 | | - * @function: hrtimer callback function which gets called from softirq context |
---|
610 | | - * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) |
---|
611 | | - * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) |
---|
612 | | - */ |
---|
613 | | -void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
---|
614 | | - enum hrtimer_restart (*function)(struct hrtimer *), |
---|
615 | | - clockid_t which_clock, enum hrtimer_mode mode) |
---|
616 | | -{ |
---|
617 | | - hrtimer_init(&ttimer->timer, which_clock, mode); |
---|
618 | | - ttimer->timer.function = __hrtimer_tasklet_trampoline; |
---|
619 | | - tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, |
---|
620 | | - (unsigned long)ttimer); |
---|
621 | | - ttimer->function = function; |
---|
622 | | -} |
---|
623 | | -EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
---|
| 921 | +EXPORT_SYMBOL_GPL(tasklet_unlock_wait); |
---|
| 922 | +#endif |
---|
624 | 923 | |
---|
625 | 924 | void __init softirq_init(void) |
---|
626 | 925 | { |
---|
.. | .. |
---|
644 | 943 | |
---|
645 | 944 | static void run_ksoftirqd(unsigned int cpu) |
---|
646 | 945 | { |
---|
647 | | - local_irq_disable(); |
---|
| 946 | + ksoftirqd_run_begin(); |
---|
648 | 947 | if (local_softirq_pending()) { |
---|
649 | 948 | /* |
---|
650 | 949 | * We can safely run softirq on inline stack, as we are not deep |
---|
651 | 950 | * in the task stack here. |
---|
652 | 951 | */ |
---|
653 | 952 | __do_softirq(); |
---|
654 | | - local_irq_enable(); |
---|
| 953 | + ksoftirqd_run_end(); |
---|
655 | 954 | cond_resched(); |
---|
656 | 955 | return; |
---|
657 | 956 | } |
---|
658 | | - local_irq_enable(); |
---|
| 957 | + ksoftirqd_run_end(); |
---|
659 | 958 | } |
---|
660 | 959 | |
---|
661 | 960 | #ifdef CONFIG_HOTPLUG_CPU |
---|
.. | .. |
---|
699 | 998 | /* Find end, append list for that CPU. */ |
---|
700 | 999 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
---|
701 | 1000 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
---|
702 | | - this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
---|
| 1001 | + __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
---|
703 | 1002 | per_cpu(tasklet_vec, cpu).head = NULL; |
---|
704 | 1003 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
---|
705 | 1004 | } |
---|