.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/kernel/softirq.c |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 1992 Linus Torvalds |
---|
5 | | - * |
---|
6 | | - * Distribute under GPLv2. |
---|
7 | 6 | * |
---|
8 | 7 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
---|
9 | 8 | */ |
---|
.. | .. |
---|
14 | 13 | #include <linux/kernel_stat.h> |
---|
15 | 14 | #include <linux/interrupt.h> |
---|
16 | 15 | #include <linux/init.h> |
---|
| 16 | +#include <linux/local_lock.h> |
---|
17 | 17 | #include <linux/mm.h> |
---|
18 | 18 | #include <linux/notifier.h> |
---|
19 | 19 | #include <linux/percpu.h> |
---|
.. | .. |
---|
21 | 21 | #include <linux/freezer.h> |
---|
22 | 22 | #include <linux/kthread.h> |
---|
23 | 23 | #include <linux/rcupdate.h> |
---|
24 | | -#include <linux/delay.h> |
---|
25 | 24 | #include <linux/ftrace.h> |
---|
26 | 25 | #include <linux/smp.h> |
---|
27 | 26 | #include <linux/smpboot.h> |
---|
28 | 27 | #include <linux/tick.h> |
---|
29 | | -#include <linux/locallock.h> |
---|
30 | 28 | #include <linux/irq.h> |
---|
31 | | -#include <linux/sched/types.h> |
---|
| 29 | +#include <linux/wait_bit.h> |
---|
32 | 30 | |
---|
33 | 31 | #define CREATE_TRACE_POINTS |
---|
34 | 32 | #include <trace/events/irq.h> |
---|
| 33 | + |
---|
| 34 | +EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry); |
---|
| 35 | +EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit); |
---|
35 | 36 | |
---|
36 | 37 | /* |
---|
37 | 38 | - No shared variables, all the data are CPU local. |
---|
.. | .. |
---|
59 | 60 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
---|
60 | 61 | |
---|
61 | 62 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
---|
62 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
63 | | -#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) |
---|
64 | | -DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); |
---|
65 | | -#endif |
---|
| 63 | +EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd); |
---|
| 64 | + |
---|
| 65 | +/* |
---|
| 66 | + * active_softirqs -- per cpu, a mask of softirqs that are being handled, |
---|
| 67 | + * with the expectation that approximate answers are acceptable and therefore |
---|
| 68 | + * no synchronization. |
---|
| 69 | + */ |
---|
| 70 | +DEFINE_PER_CPU(__u32, active_softirqs); |
---|
66 | 71 | |
---|
67 | 72 | const char * const softirq_to_name[NR_SOFTIRQS] = { |
---|
68 | 73 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
---|
69 | 74 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
---|
70 | 75 | }; |
---|
71 | | - |
---|
72 | | -#ifdef CONFIG_NO_HZ_COMMON |
---|
73 | | -# ifdef CONFIG_PREEMPT_RT_FULL |
---|
74 | | - |
---|
75 | | -struct softirq_runner { |
---|
76 | | - struct task_struct *runner[NR_SOFTIRQS]; |
---|
77 | | -}; |
---|
78 | | - |
---|
79 | | -static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); |
---|
80 | | - |
---|
81 | | -static inline void softirq_set_runner(unsigned int sirq) |
---|
82 | | -{ |
---|
83 | | - struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); |
---|
84 | | - |
---|
85 | | - sr->runner[sirq] = current; |
---|
86 | | -} |
---|
87 | | - |
---|
88 | | -static inline void softirq_clr_runner(unsigned int sirq) |
---|
89 | | -{ |
---|
90 | | - struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); |
---|
91 | | - |
---|
92 | | - sr->runner[sirq] = NULL; |
---|
93 | | -} |
---|
94 | | - |
---|
95 | | -static bool softirq_check_runner_tsk(struct task_struct *tsk, |
---|
96 | | - unsigned int *pending) |
---|
97 | | -{ |
---|
98 | | - bool ret = false; |
---|
99 | | - |
---|
100 | | - if (!tsk) |
---|
101 | | - return ret; |
---|
102 | | - |
---|
103 | | - /* |
---|
104 | | - * The wakeup code in rtmutex.c wakes up the task |
---|
105 | | - * _before_ it sets pi_blocked_on to NULL under |
---|
106 | | - * tsk->pi_lock. So we need to check for both: state |
---|
107 | | - * and pi_blocked_on. |
---|
108 | | - * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the |
---|
109 | | - * task does cpu_chill(). |
---|
110 | | - */ |
---|
111 | | - raw_spin_lock(&tsk->pi_lock); |
---|
112 | | - if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING || |
---|
113 | | - (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) { |
---|
114 | | - /* Clear all bits pending in that task */ |
---|
115 | | - *pending &= ~(tsk->softirqs_raised); |
---|
116 | | - ret = true; |
---|
117 | | - } |
---|
118 | | - raw_spin_unlock(&tsk->pi_lock); |
---|
119 | | - |
---|
120 | | - return ret; |
---|
121 | | -} |
---|
122 | | - |
---|
123 | | -/* |
---|
124 | | - * On preempt-rt a softirq running context might be blocked on a |
---|
125 | | - * lock. There might be no other runnable task on this CPU because the |
---|
126 | | - * lock owner runs on some other CPU. So we have to go into idle with |
---|
127 | | - * the pending bit set. Therefor we need to check this otherwise we |
---|
128 | | - * warn about false positives which confuses users and defeats the |
---|
129 | | - * whole purpose of this test. |
---|
130 | | - * |
---|
131 | | - * This code is called with interrupts disabled. |
---|
132 | | - */ |
---|
133 | | -void softirq_check_pending_idle(void) |
---|
134 | | -{ |
---|
135 | | - struct task_struct *tsk; |
---|
136 | | - static int rate_limit; |
---|
137 | | - struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); |
---|
138 | | - u32 warnpending; |
---|
139 | | - int i; |
---|
140 | | - |
---|
141 | | - if (rate_limit >= 10) |
---|
142 | | - return; |
---|
143 | | - |
---|
144 | | - warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; |
---|
145 | | - if (!warnpending) |
---|
146 | | - return; |
---|
147 | | - for (i = 0; i < NR_SOFTIRQS; i++) { |
---|
148 | | - tsk = sr->runner[i]; |
---|
149 | | - |
---|
150 | | - if (softirq_check_runner_tsk(tsk, &warnpending)) |
---|
151 | | - warnpending &= ~(1 << i); |
---|
152 | | - } |
---|
153 | | - |
---|
154 | | - if (warnpending) { |
---|
155 | | - tsk = __this_cpu_read(ksoftirqd); |
---|
156 | | - softirq_check_runner_tsk(tsk, &warnpending); |
---|
157 | | - } |
---|
158 | | - |
---|
159 | | - if (warnpending) { |
---|
160 | | - tsk = __this_cpu_read(ktimer_softirqd); |
---|
161 | | - softirq_check_runner_tsk(tsk, &warnpending); |
---|
162 | | - } |
---|
163 | | - |
---|
164 | | - if (warnpending) { |
---|
165 | | - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
---|
166 | | - warnpending); |
---|
167 | | - rate_limit++; |
---|
168 | | - } |
---|
169 | | -} |
---|
170 | | -# else |
---|
171 | | -/* |
---|
172 | | - * On !PREEMPT_RT we just printk rate limited: |
---|
173 | | - */ |
---|
174 | | -void softirq_check_pending_idle(void) |
---|
175 | | -{ |
---|
176 | | - static int rate_limit; |
---|
177 | | - |
---|
178 | | - if (rate_limit < 10 && |
---|
179 | | - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { |
---|
180 | | - printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", |
---|
181 | | - local_softirq_pending()); |
---|
182 | | - rate_limit++; |
---|
183 | | - } |
---|
184 | | -} |
---|
185 | | -# endif |
---|
186 | | - |
---|
187 | | -#else /* !CONFIG_NO_HZ_COMMON */ |
---|
188 | | -static inline void softirq_set_runner(unsigned int sirq) { } |
---|
189 | | -static inline void softirq_clr_runner(unsigned int sirq) { } |
---|
190 | | -#endif |
---|
191 | 76 | |
---|
192 | 77 | /* |
---|
193 | 78 | * we cannot loop indefinitely here to avoid userspace starvation, |
---|
.. | .. |
---|
204 | 89 | wake_up_process(tsk); |
---|
205 | 90 | } |
---|
206 | 91 | |
---|
207 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
208 | | -static void wakeup_timer_softirqd(void) |
---|
209 | | -{ |
---|
210 | | - /* Interrupts are disabled: no need to stop preemption */ |
---|
211 | | - struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); |
---|
212 | | - |
---|
213 | | - if (tsk && tsk->state != TASK_RUNNING) |
---|
214 | | - wake_up_process(tsk); |
---|
215 | | -} |
---|
216 | | -#endif |
---|
217 | | - |
---|
218 | | -static void handle_softirq(unsigned int vec_nr) |
---|
219 | | -{ |
---|
220 | | - struct softirq_action *h = softirq_vec + vec_nr; |
---|
221 | | - int prev_count; |
---|
222 | | - |
---|
223 | | - prev_count = preempt_count(); |
---|
224 | | - |
---|
225 | | - kstat_incr_softirqs_this_cpu(vec_nr); |
---|
226 | | - |
---|
227 | | - trace_softirq_entry(vec_nr); |
---|
228 | | - h->action(h); |
---|
229 | | - trace_softirq_exit(vec_nr); |
---|
230 | | - if (unlikely(prev_count != preempt_count())) { |
---|
231 | | - pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", |
---|
232 | | - vec_nr, softirq_to_name[vec_nr], h->action, |
---|
233 | | - prev_count, preempt_count()); |
---|
234 | | - preempt_count_set(prev_count); |
---|
235 | | - } |
---|
236 | | -} |
---|
237 | | - |
---|
238 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
239 | 92 | /* |
---|
240 | 93 | * If ksoftirqd is scheduled, we do not want to process pending softirqs |
---|
241 | 94 | * right now. Let ksoftirqd handle this at its own rate, to get fairness, |
---|
.. | .. |
---|
248 | 101 | |
---|
249 | 102 | if (pending & SOFTIRQ_NOW_MASK) |
---|
250 | 103 | return false; |
---|
251 | | - return tsk && (tsk->state == TASK_RUNNING); |
---|
| 104 | + return tsk && (tsk->state == TASK_RUNNING) && |
---|
| 105 | + !__kthread_should_park(tsk); |
---|
252 | 106 | } |
---|
253 | 107 | |
---|
254 | | -static inline int ksoftirqd_softirq_pending(void) |
---|
255 | | -{ |
---|
256 | | - return local_softirq_pending(); |
---|
257 | | -} |
---|
258 | | - |
---|
259 | | -static void handle_pending_softirqs(u32 pending) |
---|
260 | | -{ |
---|
261 | | - struct softirq_action *h = softirq_vec; |
---|
262 | | - int softirq_bit; |
---|
263 | | - |
---|
264 | | - local_irq_enable(); |
---|
265 | | - |
---|
266 | | - h = softirq_vec; |
---|
267 | | - |
---|
268 | | - while ((softirq_bit = ffs(pending))) { |
---|
269 | | - unsigned int vec_nr; |
---|
270 | | - |
---|
271 | | - h += softirq_bit - 1; |
---|
272 | | - vec_nr = h - softirq_vec; |
---|
273 | | - handle_softirq(vec_nr); |
---|
274 | | - |
---|
275 | | - h++; |
---|
276 | | - pending >>= softirq_bit; |
---|
277 | | - } |
---|
278 | | - |
---|
279 | | - rcu_bh_qs(); |
---|
280 | | - local_irq_disable(); |
---|
281 | | -} |
---|
282 | | - |
---|
283 | | -static void run_ksoftirqd(unsigned int cpu) |
---|
284 | | -{ |
---|
285 | | - local_irq_disable(); |
---|
286 | | - if (ksoftirqd_softirq_pending()) { |
---|
287 | | - __do_softirq(); |
---|
288 | | - local_irq_enable(); |
---|
289 | | - cond_resched(); |
---|
290 | | - return; |
---|
291 | | - } |
---|
292 | | - local_irq_enable(); |
---|
293 | | -} |
---|
| 108 | +#ifdef CONFIG_TRACE_IRQFLAGS |
---|
| 109 | +DEFINE_PER_CPU(int, hardirqs_enabled); |
---|
| 110 | +DEFINE_PER_CPU(int, hardirq_context); |
---|
| 111 | +EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); |
---|
| 112 | +EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); |
---|
| 113 | +#endif |
---|
294 | 114 | |
---|
295 | 115 | /* |
---|
296 | | - * preempt_count and SOFTIRQ_OFFSET usage: |
---|
297 | | - * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving |
---|
298 | | - * softirq processing. |
---|
299 | | - * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
---|
| 116 | + * SOFTIRQ_OFFSET usage: |
---|
| 117 | + * |
---|
| 118 | + * On !RT kernels 'count' is the preempt counter, on RT kernels this applies |
---|
| 119 | + * to a per CPU counter and to task::softirqs_disabled_cnt. |
---|
| 120 | + * |
---|
| 121 | + * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq |
---|
| 122 | + * processing. |
---|
| 123 | + * |
---|
| 124 | + * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
---|
300 | 125 | * on local_bh_disable or local_bh_enable. |
---|
| 126 | + * |
---|
301 | 127 | * This lets us distinguish between whether we are currently processing |
---|
302 | 128 | * softirq and whether we just have bh disabled. |
---|
303 | 129 | */ |
---|
| 130 | +#ifdef CONFIG_PREEMPT_RT |
---|
304 | 131 | |
---|
305 | 132 | /* |
---|
306 | | - * This one is for softirq.c-internal use, |
---|
307 | | - * where hardirqs are disabled legitimately: |
---|
| 133 | + * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and |
---|
| 134 | + * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a |
---|
| 135 | + * softirq disabled section to be preempted. |
---|
| 136 | + * |
---|
| 137 | + * The per task counter is used for softirq_count(), in_softirq() and |
---|
| 138 | + * in_serving_softirqs() because these counts are only valid when the task |
---|
| 139 | + * holding softirq_ctrl::lock is running. |
---|
| 140 | + * |
---|
| 141 | + * The per CPU counter prevents pointless wakeups of ksoftirqd in case that |
---|
| 142 | + * the task which is in a softirq disabled section is preempted or blocks. |
---|
| 143 | + */ |
---|
| 144 | +struct softirq_ctrl { |
---|
| 145 | + local_lock_t lock; |
---|
| 146 | + int cnt; |
---|
| 147 | +}; |
---|
| 148 | + |
---|
| 149 | +static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { |
---|
| 150 | + .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), |
---|
| 151 | +}; |
---|
| 152 | + |
---|
| 153 | +/** |
---|
| 154 | + * local_bh_blocked() - Check for idle whether BH processing is blocked |
---|
| 155 | + * |
---|
| 156 | + * Returns false if the per CPU softirq::cnt is 0 otherwise true. |
---|
| 157 | + * |
---|
| 158 | + * This is invoked from the idle task to guard against false positive |
---|
| 159 | + * softirq pending warnings, which would happen when the task which holds |
---|
| 160 | + * softirq_ctrl::lock was the only running task on the CPU and blocks on |
---|
| 161 | + * some other lock. |
---|
| 162 | + */ |
---|
| 163 | +bool local_bh_blocked(void) |
---|
| 164 | +{ |
---|
| 165 | + return __this_cpu_read(softirq_ctrl.cnt) != 0; |
---|
| 166 | +} |
---|
| 167 | + |
---|
| 168 | +void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
---|
| 169 | +{ |
---|
| 170 | + unsigned long flags; |
---|
| 171 | + int newcnt; |
---|
| 172 | + |
---|
| 173 | + WARN_ON_ONCE(in_hardirq()); |
---|
| 174 | + |
---|
| 175 | + /* First entry of a task into a BH disabled section? */ |
---|
| 176 | + if (!current->softirq_disable_cnt) { |
---|
| 177 | + if (preemptible()) { |
---|
| 178 | + local_lock(&softirq_ctrl.lock); |
---|
| 179 | + /* Required to meet the RCU bottomhalf requirements. */ |
---|
| 180 | + rcu_read_lock(); |
---|
| 181 | + } else { |
---|
| 182 | + DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); |
---|
| 183 | + } |
---|
| 184 | + } |
---|
| 185 | + |
---|
| 186 | + /* |
---|
| 187 | + * Track the per CPU softirq disabled state. On RT this is per CPU |
---|
| 188 | + * state to allow preemption of bottom half disabled sections. |
---|
| 189 | + */ |
---|
| 190 | + newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); |
---|
| 191 | + /* |
---|
| 192 | + * Reflect the result in the task state to prevent recursion on the |
---|
| 193 | + * local lock and to make softirq_count() & al work. |
---|
| 194 | + */ |
---|
| 195 | + current->softirq_disable_cnt = newcnt; |
---|
| 196 | + |
---|
| 197 | + if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { |
---|
| 198 | + raw_local_irq_save(flags); |
---|
| 199 | + lockdep_softirqs_off(ip); |
---|
| 200 | + raw_local_irq_restore(flags); |
---|
| 201 | + } |
---|
| 202 | +} |
---|
| 203 | +EXPORT_SYMBOL(__local_bh_disable_ip); |
---|
| 204 | + |
---|
| 205 | +static void __local_bh_enable(unsigned int cnt, bool unlock) |
---|
| 206 | +{ |
---|
| 207 | + unsigned long flags; |
---|
| 208 | + int newcnt; |
---|
| 209 | + |
---|
| 210 | + DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != |
---|
| 211 | + this_cpu_read(softirq_ctrl.cnt)); |
---|
| 212 | + |
---|
| 213 | + if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { |
---|
| 214 | + raw_local_irq_save(flags); |
---|
| 215 | + lockdep_softirqs_on(_RET_IP_); |
---|
| 216 | + raw_local_irq_restore(flags); |
---|
| 217 | + } |
---|
| 218 | + |
---|
| 219 | + newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); |
---|
| 220 | + current->softirq_disable_cnt = newcnt; |
---|
| 221 | + |
---|
| 222 | + if (!newcnt && unlock) { |
---|
| 223 | + rcu_read_unlock(); |
---|
| 224 | + local_unlock(&softirq_ctrl.lock); |
---|
| 225 | + } |
---|
| 226 | +} |
---|
| 227 | + |
---|
| 228 | +void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
---|
| 229 | +{ |
---|
| 230 | + bool preempt_on = preemptible(); |
---|
| 231 | + unsigned long flags; |
---|
| 232 | + u32 pending; |
---|
| 233 | + int curcnt; |
---|
| 234 | + |
---|
| 235 | + WARN_ON_ONCE(in_irq()); |
---|
| 236 | + lockdep_assert_irqs_enabled(); |
---|
| 237 | + |
---|
| 238 | + local_irq_save(flags); |
---|
| 239 | + curcnt = __this_cpu_read(softirq_ctrl.cnt); |
---|
| 240 | + |
---|
| 241 | + /* |
---|
| 242 | + * If this is not reenabling soft interrupts, no point in trying to |
---|
| 243 | + * run pending ones. |
---|
| 244 | + */ |
---|
| 245 | + if (curcnt != cnt) |
---|
| 246 | + goto out; |
---|
| 247 | + |
---|
| 248 | + pending = local_softirq_pending(); |
---|
| 249 | + if (!pending || ksoftirqd_running(pending)) |
---|
| 250 | + goto out; |
---|
| 251 | + |
---|
| 252 | + /* |
---|
| 253 | + * If this was called from non preemptible context, wake up the |
---|
| 254 | + * softirq daemon. |
---|
| 255 | + */ |
---|
| 256 | + if (!preempt_on) { |
---|
| 257 | + wakeup_softirqd(); |
---|
| 258 | + goto out; |
---|
| 259 | + } |
---|
| 260 | + |
---|
| 261 | + /* |
---|
| 262 | + * Adjust softirq count to SOFTIRQ_OFFSET which makes |
---|
| 263 | + * in_serving_softirq() become true. |
---|
| 264 | + */ |
---|
| 265 | + cnt = SOFTIRQ_OFFSET; |
---|
| 266 | + __local_bh_enable(cnt, false); |
---|
| 267 | + __do_softirq(); |
---|
| 268 | + |
---|
| 269 | +out: |
---|
| 270 | + __local_bh_enable(cnt, preempt_on); |
---|
| 271 | + local_irq_restore(flags); |
---|
| 272 | +} |
---|
| 273 | +EXPORT_SYMBOL(__local_bh_enable_ip); |
---|
| 274 | + |
---|
| 275 | +/* |
---|
| 276 | + * Invoked from ksoftirqd_run() outside of the interrupt disabled section |
---|
| 277 | + * to acquire the per CPU local lock for reentrancy protection. |
---|
| 278 | + */ |
---|
| 279 | +static inline void ksoftirqd_run_begin(void) |
---|
| 280 | +{ |
---|
| 281 | + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
| 282 | + local_irq_disable(); |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +/* Counterpart to ksoftirqd_run_begin() */ |
---|
| 286 | +static inline void ksoftirqd_run_end(void) |
---|
| 287 | +{ |
---|
| 288 | + __local_bh_enable(SOFTIRQ_OFFSET, true); |
---|
| 289 | + WARN_ON_ONCE(in_interrupt()); |
---|
| 290 | + local_irq_enable(); |
---|
| 291 | +} |
---|
| 292 | + |
---|
| 293 | +static inline void softirq_handle_begin(void) { } |
---|
| 294 | +static inline void softirq_handle_end(void) { } |
---|
| 295 | + |
---|
| 296 | +static inline bool should_wake_ksoftirqd(void) |
---|
| 297 | +{ |
---|
| 298 | + return !this_cpu_read(softirq_ctrl.cnt); |
---|
| 299 | +} |
---|
| 300 | + |
---|
| 301 | +static inline void invoke_softirq(void) |
---|
| 302 | +{ |
---|
| 303 | + if (should_wake_ksoftirqd()) |
---|
| 304 | + wakeup_softirqd(); |
---|
| 305 | +} |
---|
| 306 | + |
---|
| 307 | +#else /* CONFIG_PREEMPT_RT */ |
---|
| 308 | + |
---|
| 309 | +/* |
---|
| 310 | + * This one is for softirq.c-internal use, where hardirqs are disabled |
---|
| 311 | + * legitimately: |
---|
308 | 312 | */ |
---|
309 | 313 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
310 | 314 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
---|
.. | .. |
---|
326 | 330 | * Were softirqs turned off above: |
---|
327 | 331 | */ |
---|
328 | 332 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
---|
329 | | - trace_softirqs_off(ip); |
---|
| 333 | + lockdep_softirqs_off(ip); |
---|
330 | 334 | raw_local_irq_restore(flags); |
---|
331 | 335 | |
---|
332 | 336 | if (preempt_count() == cnt) { |
---|
.. | .. |
---|
347 | 351 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); |
---|
348 | 352 | |
---|
349 | 353 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
---|
350 | | - trace_softirqs_on(_RET_IP_); |
---|
| 354 | + lockdep_softirqs_on(_RET_IP_); |
---|
351 | 355 | |
---|
352 | 356 | __preempt_count_sub(cnt); |
---|
353 | 357 | } |
---|
.. | .. |
---|
374 | 378 | * Are softirqs going to be turned on now: |
---|
375 | 379 | */ |
---|
376 | 380 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
---|
377 | | - trace_softirqs_on(ip); |
---|
| 381 | + lockdep_softirqs_on(ip); |
---|
378 | 382 | /* |
---|
379 | 383 | * Keep preemption disabled until we are done with |
---|
380 | 384 | * softirq processing: |
---|
.. | .. |
---|
396 | 400 | preempt_check_resched(); |
---|
397 | 401 | } |
---|
398 | 402 | EXPORT_SYMBOL(__local_bh_enable_ip); |
---|
| 403 | + |
---|
| 404 | +static inline void softirq_handle_begin(void) |
---|
| 405 | +{ |
---|
| 406 | + __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
| 407 | +} |
---|
| 408 | + |
---|
| 409 | +static inline void softirq_handle_end(void) |
---|
| 410 | +{ |
---|
| 411 | + __local_bh_enable(SOFTIRQ_OFFSET); |
---|
| 412 | + WARN_ON_ONCE(in_interrupt()); |
---|
| 413 | +} |
---|
| 414 | + |
---|
| 415 | +static inline void ksoftirqd_run_begin(void) |
---|
| 416 | +{ |
---|
| 417 | + local_irq_disable(); |
---|
| 418 | +} |
---|
| 419 | + |
---|
| 420 | +static inline void ksoftirqd_run_end(void) |
---|
| 421 | +{ |
---|
| 422 | + local_irq_enable(); |
---|
| 423 | +} |
---|
| 424 | + |
---|
| 425 | +static inline bool should_wake_ksoftirqd(void) |
---|
| 426 | +{ |
---|
| 427 | + return true; |
---|
| 428 | +} |
---|
| 429 | + |
---|
| 430 | +static inline void invoke_softirq(void) |
---|
| 431 | +{ |
---|
| 432 | + if (ksoftirqd_running(local_softirq_pending())) |
---|
| 433 | + return; |
---|
| 434 | + |
---|
| 435 | + if (!force_irqthreads) { |
---|
| 436 | +#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
---|
| 437 | + /* |
---|
| 438 | + * We can safely execute softirq on the current stack if |
---|
| 439 | + * it is the irq stack, because it should be near empty |
---|
| 440 | + * at this stage. |
---|
| 441 | + */ |
---|
| 442 | + __do_softirq(); |
---|
| 443 | +#else |
---|
| 444 | + /* |
---|
| 445 | + * Otherwise, irq_exit() is called on the task stack that can |
---|
| 446 | + * be potentially deep already. So call softirq in its own stack |
---|
| 447 | + * to prevent from any overrun. |
---|
| 448 | + */ |
---|
| 449 | + do_softirq_own_stack(); |
---|
| 450 | +#endif |
---|
| 451 | + } else { |
---|
| 452 | + wakeup_softirqd(); |
---|
| 453 | + } |
---|
| 454 | +} |
---|
| 455 | + |
---|
| 456 | +asmlinkage __visible void do_softirq(void) |
---|
| 457 | +{ |
---|
| 458 | + __u32 pending; |
---|
| 459 | + unsigned long flags; |
---|
| 460 | + |
---|
| 461 | + if (in_interrupt()) |
---|
| 462 | + return; |
---|
| 463 | + |
---|
| 464 | + local_irq_save(flags); |
---|
| 465 | + |
---|
| 466 | + pending = local_softirq_pending(); |
---|
| 467 | + |
---|
| 468 | + if (pending && !ksoftirqd_running(pending)) |
---|
| 469 | + do_softirq_own_stack(); |
---|
| 470 | + |
---|
| 471 | + local_irq_restore(flags); |
---|
| 472 | +} |
---|
| 473 | + |
---|
| 474 | +#endif /* !CONFIG_PREEMPT_RT */ |
---|
399 | 475 | |
---|
400 | 476 | /* |
---|
401 | 477 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
---|
.. | .. |
---|
424 | 500 | { |
---|
425 | 501 | bool in_hardirq = false; |
---|
426 | 502 | |
---|
427 | | - if (trace_hardirq_context(current)) { |
---|
| 503 | + if (lockdep_hardirq_context()) { |
---|
428 | 504 | in_hardirq = true; |
---|
429 | | - trace_hardirq_exit(); |
---|
| 505 | + lockdep_hardirq_exit(); |
---|
430 | 506 | } |
---|
431 | 507 | |
---|
432 | 508 | lockdep_softirq_enter(); |
---|
.. | .. |
---|
439 | 515 | lockdep_softirq_exit(); |
---|
440 | 516 | |
---|
441 | 517 | if (in_hardirq) |
---|
442 | | - trace_hardirq_enter(); |
---|
| 518 | + lockdep_hardirq_enter(); |
---|
443 | 519 | } |
---|
444 | 520 | #else |
---|
445 | 521 | static inline bool lockdep_softirq_start(void) { return false; } |
---|
446 | 522 | static inline void lockdep_softirq_end(bool in_hardirq) { } |
---|
447 | 523 | #endif |
---|
448 | 524 | |
---|
| 525 | +#define softirq_deferred_for_rt(pending) \ |
---|
| 526 | +({ \ |
---|
| 527 | + __u32 deferred = 0; \ |
---|
| 528 | + if (cpupri_check_rt()) { \ |
---|
| 529 | + deferred = pending & LONG_SOFTIRQ_MASK; \ |
---|
| 530 | + pending &= ~LONG_SOFTIRQ_MASK; \ |
---|
| 531 | + } \ |
---|
| 532 | + deferred; \ |
---|
| 533 | +}) |
---|
| 534 | + |
---|
449 | 535 | asmlinkage __visible void __softirq_entry __do_softirq(void) |
---|
450 | 536 | { |
---|
451 | 537 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
---|
452 | 538 | unsigned long old_flags = current->flags; |
---|
453 | 539 | int max_restart = MAX_SOFTIRQ_RESTART; |
---|
| 540 | + struct softirq_action *h; |
---|
454 | 541 | bool in_hardirq; |
---|
| 542 | + __u32 deferred; |
---|
455 | 543 | __u32 pending; |
---|
| 544 | + int softirq_bit; |
---|
456 | 545 | |
---|
457 | 546 | /* |
---|
458 | | - * Mask out PF_MEMALLOC s current task context is borrowed for the |
---|
459 | | - * softirq. A softirq handled such as network RX might set PF_MEMALLOC |
---|
460 | | - * again if the socket is related to swap |
---|
| 547 | + * Mask out PF_MEMALLOC as the current task context is borrowed for the |
---|
| 548 | + * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC |
---|
| 549 | + * again if the socket is related to swapping. |
---|
461 | 550 | */ |
---|
462 | 551 | current->flags &= ~PF_MEMALLOC; |
---|
463 | 552 | |
---|
464 | 553 | pending = local_softirq_pending(); |
---|
465 | | - account_irq_enter_time(current); |
---|
466 | | - |
---|
467 | | - __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
---|
| 554 | + deferred = softirq_deferred_for_rt(pending); |
---|
| 555 | + softirq_handle_begin(); |
---|
468 | 556 | in_hardirq = lockdep_softirq_start(); |
---|
| 557 | + account_softirq_enter(current); |
---|
469 | 558 | |
---|
470 | 559 | restart: |
---|
471 | 560 | /* Reset the pending bitmask before enabling irqs */ |
---|
472 | | - set_softirq_pending(0); |
---|
| 561 | + set_softirq_pending(deferred); |
---|
| 562 | + __this_cpu_write(active_softirqs, pending); |
---|
473 | 563 | |
---|
474 | | - handle_pending_softirqs(pending); |
---|
| 564 | + local_irq_enable(); |
---|
| 565 | + |
---|
| 566 | + h = softirq_vec; |
---|
| 567 | + |
---|
| 568 | + while ((softirq_bit = ffs(pending))) { |
---|
| 569 | + unsigned int vec_nr; |
---|
| 570 | + int prev_count; |
---|
| 571 | + |
---|
| 572 | + h += softirq_bit - 1; |
---|
| 573 | + |
---|
| 574 | + vec_nr = h - softirq_vec; |
---|
| 575 | + prev_count = preempt_count(); |
---|
| 576 | + |
---|
| 577 | + kstat_incr_softirqs_this_cpu(vec_nr); |
---|
| 578 | + |
---|
| 579 | + trace_softirq_entry(vec_nr); |
---|
| 580 | + h->action(h); |
---|
| 581 | + trace_softirq_exit(vec_nr); |
---|
| 582 | + if (unlikely(prev_count != preempt_count())) { |
---|
| 583 | + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", |
---|
| 584 | + vec_nr, softirq_to_name[vec_nr], h->action, |
---|
| 585 | + prev_count, preempt_count()); |
---|
| 586 | + preempt_count_set(prev_count); |
---|
| 587 | + } |
---|
| 588 | + h++; |
---|
| 589 | + pending >>= softirq_bit; |
---|
| 590 | + } |
---|
| 591 | + |
---|
| 592 | + __this_cpu_write(active_softirqs, 0); |
---|
| 593 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && |
---|
| 594 | + __this_cpu_read(ksoftirqd) == current) |
---|
| 595 | + rcu_softirq_qs(); |
---|
| 596 | + |
---|
| 597 | + local_irq_disable(); |
---|
475 | 598 | |
---|
476 | 599 | pending = local_softirq_pending(); |
---|
| 600 | + deferred = softirq_deferred_for_rt(pending); |
---|
| 601 | + |
---|
477 | 602 | if (pending) { |
---|
478 | 603 | if (time_before(jiffies, end) && !need_resched() && |
---|
479 | 604 | --max_restart) |
---|
480 | 605 | goto restart; |
---|
481 | 606 | |
---|
| 607 | +#ifndef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
482 | 608 | wakeup_softirqd(); |
---|
| 609 | +#endif |
---|
483 | 610 | } |
---|
484 | 611 | |
---|
| 612 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 613 | + if (pending | deferred) |
---|
| 614 | + wakeup_softirqd(); |
---|
| 615 | +#endif |
---|
| 616 | + account_softirq_exit(current); |
---|
485 | 617 | lockdep_softirq_end(in_hardirq); |
---|
486 | | - account_irq_exit_time(current); |
---|
487 | | - __local_bh_enable(SOFTIRQ_OFFSET); |
---|
488 | | - WARN_ON_ONCE(in_interrupt()); |
---|
| 618 | + softirq_handle_end(); |
---|
489 | 619 | current_restore_flags(old_flags, PF_MEMALLOC); |
---|
490 | 620 | } |
---|
491 | 621 | |
---|
492 | | -asmlinkage __visible void do_softirq(void) |
---|
493 | | -{ |
---|
494 | | - __u32 pending; |
---|
495 | | - unsigned long flags; |
---|
496 | | - |
---|
497 | | - if (in_interrupt()) |
---|
498 | | - return; |
---|
499 | | - |
---|
500 | | - local_irq_save(flags); |
---|
501 | | - |
---|
502 | | - pending = local_softirq_pending(); |
---|
503 | | - |
---|
504 | | - if (pending && !ksoftirqd_running(pending)) |
---|
505 | | - do_softirq_own_stack(); |
---|
506 | | - |
---|
507 | | - local_irq_restore(flags); |
---|
508 | | -} |
---|
509 | | - |
---|
510 | | -/* |
---|
511 | | - * This function must run with irqs disabled! |
---|
| 622 | +/** |
---|
| 623 | + * irq_enter_rcu - Enter an interrupt context with RCU watching |
---|
512 | 624 | */ |
---|
513 | | -void raise_softirq_irqoff(unsigned int nr) |
---|
| 625 | +void irq_enter_rcu(void) |
---|
514 | 626 | { |
---|
515 | | - __raise_softirq_irqoff(nr); |
---|
| 627 | + __irq_enter_raw(); |
---|
516 | 628 | |
---|
517 | | - /* |
---|
518 | | - * If we're in an interrupt or softirq, we're done |
---|
519 | | - * (this also catches softirq-disabled code). We will |
---|
520 | | - * actually run the softirq once we return from |
---|
521 | | - * the irq or softirq. |
---|
522 | | - * |
---|
523 | | - * Otherwise we wake up ksoftirqd to make sure we |
---|
524 | | - * schedule the softirq soon. |
---|
525 | | - */ |
---|
526 | | - if (!in_interrupt()) |
---|
527 | | - wakeup_softirqd(); |
---|
| 629 | + if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) |
---|
| 630 | + tick_irq_enter(); |
---|
| 631 | + |
---|
| 632 | + account_hardirq_enter(current); |
---|
528 | 633 | } |
---|
529 | 634 | |
---|
530 | | -void __raise_softirq_irqoff(unsigned int nr) |
---|
531 | | -{ |
---|
532 | | - trace_softirq_raise(nr); |
---|
533 | | - or_softirq_pending(1UL << nr); |
---|
534 | | -} |
---|
535 | | - |
---|
536 | | -static inline void local_bh_disable_nort(void) { local_bh_disable(); } |
---|
537 | | -static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } |
---|
538 | | -static void ksoftirqd_set_sched_params(unsigned int cpu) { } |
---|
539 | | - |
---|
540 | | -#else /* !PREEMPT_RT_FULL */ |
---|
541 | | - |
---|
542 | | -/* |
---|
543 | | - * On RT we serialize softirq execution with a cpu local lock per softirq |
---|
544 | | - */ |
---|
545 | | -static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); |
---|
546 | | - |
---|
547 | | -void __init softirq_early_init(void) |
---|
548 | | -{ |
---|
549 | | - int i; |
---|
550 | | - |
---|
551 | | - for (i = 0; i < NR_SOFTIRQS; i++) |
---|
552 | | - local_irq_lock_init(local_softirq_locks[i]); |
---|
553 | | -} |
---|
554 | | - |
---|
555 | | -static void lock_softirq(int which) |
---|
556 | | -{ |
---|
557 | | - local_lock(local_softirq_locks[which]); |
---|
558 | | -} |
---|
559 | | - |
---|
560 | | -static void unlock_softirq(int which) |
---|
561 | | -{ |
---|
562 | | - local_unlock(local_softirq_locks[which]); |
---|
563 | | -} |
---|
564 | | - |
---|
565 | | -static void do_single_softirq(int which) |
---|
566 | | -{ |
---|
567 | | - unsigned long old_flags = current->flags; |
---|
568 | | - |
---|
569 | | - current->flags &= ~PF_MEMALLOC; |
---|
570 | | - vtime_account_irq_enter(current); |
---|
571 | | - current->flags |= PF_IN_SOFTIRQ; |
---|
572 | | - lockdep_softirq_enter(); |
---|
573 | | - local_irq_enable(); |
---|
574 | | - handle_softirq(which); |
---|
575 | | - local_irq_disable(); |
---|
576 | | - lockdep_softirq_exit(); |
---|
577 | | - current->flags &= ~PF_IN_SOFTIRQ; |
---|
578 | | - vtime_account_irq_enter(current); |
---|
579 | | - current_restore_flags(old_flags, PF_MEMALLOC); |
---|
580 | | -} |
---|
581 | | - |
---|
582 | | -/* |
---|
583 | | - * Called with interrupts disabled. Process softirqs which were raised |
---|
584 | | - * in current context (or on behalf of ksoftirqd). |
---|
585 | | - */ |
---|
586 | | -static void do_current_softirqs(void) |
---|
587 | | -{ |
---|
588 | | - while (current->softirqs_raised) { |
---|
589 | | - int i = __ffs(current->softirqs_raised); |
---|
590 | | - unsigned int pending, mask = (1U << i); |
---|
591 | | - |
---|
592 | | - current->softirqs_raised &= ~mask; |
---|
593 | | - local_irq_enable(); |
---|
594 | | - |
---|
595 | | - /* |
---|
596 | | - * If the lock is contended, we boost the owner to |
---|
597 | | - * process the softirq or leave the critical section |
---|
598 | | - * now. |
---|
599 | | - */ |
---|
600 | | - lock_softirq(i); |
---|
601 | | - local_irq_disable(); |
---|
602 | | - softirq_set_runner(i); |
---|
603 | | - /* |
---|
604 | | - * Check with the local_softirq_pending() bits, |
---|
605 | | - * whether we need to process this still or if someone |
---|
606 | | - * else took care of it. |
---|
607 | | - */ |
---|
608 | | - pending = local_softirq_pending(); |
---|
609 | | - if (pending & mask) { |
---|
610 | | - set_softirq_pending(pending & ~mask); |
---|
611 | | - do_single_softirq(i); |
---|
612 | | - } |
---|
613 | | - softirq_clr_runner(i); |
---|
614 | | - WARN_ON(current->softirq_nestcnt != 1); |
---|
615 | | - local_irq_enable(); |
---|
616 | | - unlock_softirq(i); |
---|
617 | | - local_irq_disable(); |
---|
618 | | - } |
---|
619 | | -} |
---|
620 | | - |
---|
621 | | -void __local_bh_disable(void) |
---|
622 | | -{ |
---|
623 | | - if (++current->softirq_nestcnt == 1) |
---|
624 | | - migrate_disable(); |
---|
625 | | -} |
---|
626 | | -EXPORT_SYMBOL(__local_bh_disable); |
---|
627 | | - |
---|
628 | | -void __local_bh_enable(void) |
---|
629 | | -{ |
---|
630 | | - if (WARN_ON(current->softirq_nestcnt == 0)) |
---|
631 | | - return; |
---|
632 | | - |
---|
633 | | - local_irq_disable(); |
---|
634 | | - if (current->softirq_nestcnt == 1 && current->softirqs_raised) |
---|
635 | | - do_current_softirqs(); |
---|
636 | | - local_irq_enable(); |
---|
637 | | - |
---|
638 | | - if (--current->softirq_nestcnt == 0) |
---|
639 | | - migrate_enable(); |
---|
640 | | -} |
---|
641 | | -EXPORT_SYMBOL(__local_bh_enable); |
---|
642 | | - |
---|
643 | | -void _local_bh_enable(void) |
---|
644 | | -{ |
---|
645 | | - if (WARN_ON(current->softirq_nestcnt == 0)) |
---|
646 | | - return; |
---|
647 | | - if (--current->softirq_nestcnt == 0) |
---|
648 | | - migrate_enable(); |
---|
649 | | -} |
---|
650 | | -EXPORT_SYMBOL(_local_bh_enable); |
---|
651 | | - |
---|
652 | | -int in_serving_softirq(void) |
---|
653 | | -{ |
---|
654 | | - return current->flags & PF_IN_SOFTIRQ; |
---|
655 | | -} |
---|
656 | | -EXPORT_SYMBOL(in_serving_softirq); |
---|
657 | | - |
---|
658 | | -/* Called with preemption disabled */ |
---|
659 | | -static void run_ksoftirqd(unsigned int cpu) |
---|
660 | | -{ |
---|
661 | | - local_irq_disable(); |
---|
662 | | - current->softirq_nestcnt++; |
---|
663 | | - |
---|
664 | | - do_current_softirqs(); |
---|
665 | | - current->softirq_nestcnt--; |
---|
666 | | - local_irq_enable(); |
---|
667 | | - cond_resched(); |
---|
668 | | -} |
---|
669 | | - |
---|
670 | | -/* |
---|
671 | | - * Called from netif_rx_ni(). Preemption enabled, but migration |
---|
672 | | - * disabled. So the cpu can't go away under us. |
---|
673 | | - */ |
---|
674 | | -void thread_do_softirq(void) |
---|
675 | | -{ |
---|
676 | | - if (!in_serving_softirq() && current->softirqs_raised) { |
---|
677 | | - current->softirq_nestcnt++; |
---|
678 | | - do_current_softirqs(); |
---|
679 | | - current->softirq_nestcnt--; |
---|
680 | | - } |
---|
681 | | -} |
---|
682 | | - |
---|
683 | | -static void do_raise_softirq_irqoff(unsigned int nr) |
---|
684 | | -{ |
---|
685 | | - unsigned int mask; |
---|
686 | | - |
---|
687 | | - mask = 1UL << nr; |
---|
688 | | - |
---|
689 | | - trace_softirq_raise(nr); |
---|
690 | | - or_softirq_pending(mask); |
---|
691 | | - |
---|
692 | | - /* |
---|
693 | | - * If we are not in a hard interrupt and inside a bh disabled |
---|
694 | | - * region, we simply raise the flag on current. local_bh_enable() |
---|
695 | | - * will make sure that the softirq is executed. Otherwise we |
---|
696 | | - * delegate it to ksoftirqd. |
---|
697 | | - */ |
---|
698 | | - if (!in_irq() && current->softirq_nestcnt) |
---|
699 | | - current->softirqs_raised |= mask; |
---|
700 | | - else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) |
---|
701 | | - return; |
---|
702 | | - |
---|
703 | | - if (mask & TIMER_SOFTIRQS) |
---|
704 | | - __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; |
---|
705 | | - else |
---|
706 | | - __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; |
---|
707 | | -} |
---|
708 | | - |
---|
709 | | -static void wakeup_proper_softirq(unsigned int nr) |
---|
710 | | -{ |
---|
711 | | - if ((1UL << nr) & TIMER_SOFTIRQS) |
---|
712 | | - wakeup_timer_softirqd(); |
---|
713 | | - else |
---|
714 | | - wakeup_softirqd(); |
---|
715 | | -} |
---|
716 | | - |
---|
717 | | -void __raise_softirq_irqoff(unsigned int nr) |
---|
718 | | -{ |
---|
719 | | - do_raise_softirq_irqoff(nr); |
---|
720 | | - if (!in_irq() && !current->softirq_nestcnt) |
---|
721 | | - wakeup_proper_softirq(nr); |
---|
722 | | -} |
---|
723 | | - |
---|
724 | | -/* |
---|
725 | | - * Same as __raise_softirq_irqoff() but will process them in ksoftirqd |
---|
726 | | - */ |
---|
727 | | -void __raise_softirq_irqoff_ksoft(unsigned int nr) |
---|
728 | | -{ |
---|
729 | | - unsigned int mask; |
---|
730 | | - |
---|
731 | | - if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || |
---|
732 | | - !__this_cpu_read(ktimer_softirqd))) |
---|
733 | | - return; |
---|
734 | | - mask = 1UL << nr; |
---|
735 | | - |
---|
736 | | - trace_softirq_raise(nr); |
---|
737 | | - or_softirq_pending(mask); |
---|
738 | | - if (mask & TIMER_SOFTIRQS) |
---|
739 | | - __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; |
---|
740 | | - else |
---|
741 | | - __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; |
---|
742 | | - wakeup_proper_softirq(nr); |
---|
743 | | -} |
---|
744 | | - |
---|
745 | | -/* |
---|
746 | | - * This function must run with irqs disabled! |
---|
747 | | - */ |
---|
748 | | -void raise_softirq_irqoff(unsigned int nr) |
---|
749 | | -{ |
---|
750 | | - do_raise_softirq_irqoff(nr); |
---|
751 | | - |
---|
752 | | - /* |
---|
753 | | - * If we're in an hard interrupt we let irq return code deal |
---|
754 | | - * with the wakeup of ksoftirqd. |
---|
755 | | - */ |
---|
756 | | - if (in_irq()) |
---|
757 | | - return; |
---|
758 | | - /* |
---|
759 | | - * If we are in thread context but outside of a bh disabled |
---|
760 | | - * region, we need to wake ksoftirqd as well. |
---|
761 | | - * |
---|
762 | | - * CHECKME: Some of the places which do that could be wrapped |
---|
763 | | - * into local_bh_disable/enable pairs. Though it's unclear |
---|
764 | | - * whether this is worth the effort. To find those places just |
---|
765 | | - * raise a WARN() if the condition is met. |
---|
766 | | - */ |
---|
767 | | - if (!current->softirq_nestcnt) |
---|
768 | | - wakeup_proper_softirq(nr); |
---|
769 | | -} |
---|
770 | | - |
---|
771 | | -static inline int ksoftirqd_softirq_pending(void) |
---|
772 | | -{ |
---|
773 | | - return current->softirqs_raised; |
---|
774 | | -} |
---|
775 | | - |
---|
776 | | -static inline void local_bh_disable_nort(void) { } |
---|
777 | | -static inline void _local_bh_enable_nort(void) { } |
---|
778 | | - |
---|
779 | | -static inline void ksoftirqd_set_sched_params(unsigned int cpu) |
---|
780 | | -{ |
---|
781 | | - /* Take over all but timer pending softirqs when starting */ |
---|
782 | | - local_irq_disable(); |
---|
783 | | - current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; |
---|
784 | | - local_irq_enable(); |
---|
785 | | -} |
---|
786 | | - |
---|
787 | | -static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) |
---|
788 | | -{ |
---|
789 | | - struct sched_param param = { .sched_priority = 1 }; |
---|
790 | | - |
---|
791 | | - sched_setscheduler(current, SCHED_FIFO, ¶m); |
---|
792 | | - |
---|
793 | | - /* Take over timer pending softirqs when starting */ |
---|
794 | | - local_irq_disable(); |
---|
795 | | - current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; |
---|
796 | | - local_irq_enable(); |
---|
797 | | -} |
---|
798 | | - |
---|
799 | | -static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, |
---|
800 | | - bool online) |
---|
801 | | -{ |
---|
802 | | - struct sched_param param = { .sched_priority = 0 }; |
---|
803 | | - |
---|
804 | | - sched_setscheduler(current, SCHED_NORMAL, ¶m); |
---|
805 | | -} |
---|
806 | | - |
---|
807 | | -static int ktimer_softirqd_should_run(unsigned int cpu) |
---|
808 | | -{ |
---|
809 | | - return current->softirqs_raised; |
---|
810 | | -} |
---|
811 | | - |
---|
812 | | -#endif /* PREEMPT_RT_FULL */ |
---|
813 | | -/* |
---|
814 | | - * Enter an interrupt context. |
---|
| 635 | +/** |
---|
| 636 | + * irq_enter - Enter an interrupt context including RCU update |
---|
815 | 637 | */ |
---|
816 | 638 | void irq_enter(void) |
---|
817 | 639 | { |
---|
818 | 640 | rcu_irq_enter(); |
---|
819 | | - if (is_idle_task(current) && !in_interrupt()) { |
---|
820 | | - /* |
---|
821 | | - * Prevent raise_softirq from needlessly waking up ksoftirqd |
---|
822 | | - * here, as softirq will be serviced on return from interrupt. |
---|
823 | | - */ |
---|
824 | | - local_bh_disable_nort(); |
---|
825 | | - tick_irq_enter(); |
---|
826 | | - _local_bh_enable_nort(); |
---|
827 | | - } |
---|
828 | | - |
---|
829 | | - __irq_enter(); |
---|
830 | | -} |
---|
831 | | - |
---|
832 | | -static inline void invoke_softirq(void) |
---|
833 | | -{ |
---|
834 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
835 | | - if (ksoftirqd_running(local_softirq_pending())) |
---|
836 | | - return; |
---|
837 | | - |
---|
838 | | - if (!force_irqthreads) { |
---|
839 | | -#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
---|
840 | | - /* |
---|
841 | | - * We can safely execute softirq on the current stack if |
---|
842 | | - * it is the irq stack, because it should be near empty |
---|
843 | | - * at this stage. |
---|
844 | | - */ |
---|
845 | | - __do_softirq(); |
---|
846 | | -#else |
---|
847 | | - /* |
---|
848 | | - * Otherwise, irq_exit() is called on the task stack that can |
---|
849 | | - * be potentially deep already. So call softirq in its own stack |
---|
850 | | - * to prevent from any overrun. |
---|
851 | | - */ |
---|
852 | | - do_softirq_own_stack(); |
---|
853 | | -#endif |
---|
854 | | - } else { |
---|
855 | | - wakeup_softirqd(); |
---|
856 | | - } |
---|
857 | | -#else /* PREEMPT_RT_FULL */ |
---|
858 | | - unsigned long flags; |
---|
859 | | - |
---|
860 | | - local_irq_save(flags); |
---|
861 | | - if (__this_cpu_read(ksoftirqd) && |
---|
862 | | - __this_cpu_read(ksoftirqd)->softirqs_raised) |
---|
863 | | - wakeup_softirqd(); |
---|
864 | | - if (__this_cpu_read(ktimer_softirqd) && |
---|
865 | | - __this_cpu_read(ktimer_softirqd)->softirqs_raised) |
---|
866 | | - wakeup_timer_softirqd(); |
---|
867 | | - local_irq_restore(flags); |
---|
868 | | -#endif |
---|
| 641 | + irq_enter_rcu(); |
---|
869 | 642 | } |
---|
870 | 643 | |
---|
871 | 644 | static inline void tick_irq_exit(void) |
---|
.. | .. |
---|
881 | 654 | #endif |
---|
882 | 655 | } |
---|
883 | 656 | |
---|
884 | | -/* |
---|
885 | | - * Exit an interrupt context. Process softirqs if needed and possible: |
---|
886 | | - */ |
---|
887 | | -void irq_exit(void) |
---|
| 657 | +static inline void __irq_exit_rcu(void) |
---|
888 | 658 | { |
---|
889 | 659 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
---|
890 | 660 | local_irq_disable(); |
---|
891 | 661 | #else |
---|
892 | 662 | lockdep_assert_irqs_disabled(); |
---|
893 | 663 | #endif |
---|
894 | | - account_irq_exit_time(current); |
---|
| 664 | + account_hardirq_exit(current); |
---|
895 | 665 | preempt_count_sub(HARDIRQ_OFFSET); |
---|
896 | 666 | if (!in_interrupt() && local_softirq_pending()) |
---|
897 | 667 | invoke_softirq(); |
---|
898 | 668 | |
---|
899 | 669 | tick_irq_exit(); |
---|
| 670 | +} |
---|
| 671 | + |
---|
| 672 | +/** |
---|
| 673 | + * irq_exit_rcu() - Exit an interrupt context without updating RCU |
---|
| 674 | + * |
---|
| 675 | + * Also processes softirqs if needed and possible. |
---|
| 676 | + */ |
---|
| 677 | +void irq_exit_rcu(void) |
---|
| 678 | +{ |
---|
| 679 | + __irq_exit_rcu(); |
---|
| 680 | + /* must be last! */ |
---|
| 681 | + lockdep_hardirq_exit(); |
---|
| 682 | +} |
---|
| 683 | + |
---|
| 684 | +/** |
---|
| 685 | + * irq_exit - Exit an interrupt context, update RCU and lockdep |
---|
| 686 | + * |
---|
| 687 | + * Also processes softirqs if needed and possible. |
---|
| 688 | + */ |
---|
| 689 | +void irq_exit(void) |
---|
| 690 | +{ |
---|
| 691 | + __irq_exit_rcu(); |
---|
900 | 692 | rcu_irq_exit(); |
---|
901 | | - trace_hardirq_exit(); /* must be last! */ |
---|
| 693 | + /* must be last! */ |
---|
| 694 | + lockdep_hardirq_exit(); |
---|
| 695 | +} |
---|
| 696 | + |
---|
| 697 | +/* |
---|
| 698 | + * This function must run with irqs disabled! |
---|
| 699 | + */ |
---|
| 700 | +inline void raise_softirq_irqoff(unsigned int nr) |
---|
| 701 | +{ |
---|
| 702 | + __raise_softirq_irqoff(nr); |
---|
| 703 | + |
---|
| 704 | + /* |
---|
| 705 | + * If we're in an interrupt or softirq, we're done |
---|
| 706 | + * (this also catches softirq-disabled code). We will |
---|
| 707 | + * actually run the softirq once we return from |
---|
| 708 | + * the irq or softirq. |
---|
| 709 | + * |
---|
| 710 | + * Otherwise we wake up ksoftirqd to make sure we |
---|
| 711 | + * schedule the softirq soon. |
---|
| 712 | + */ |
---|
| 713 | + if (!in_interrupt() && should_wake_ksoftirqd()) |
---|
| 714 | + wakeup_softirqd(); |
---|
902 | 715 | } |
---|
903 | 716 | |
---|
904 | 717 | void raise_softirq(unsigned int nr) |
---|
.. | .. |
---|
908 | 721 | local_irq_save(flags); |
---|
909 | 722 | raise_softirq_irqoff(nr); |
---|
910 | 723 | local_irq_restore(flags); |
---|
| 724 | +} |
---|
| 725 | + |
---|
| 726 | +void __raise_softirq_irqoff(unsigned int nr) |
---|
| 727 | +{ |
---|
| 728 | + lockdep_assert_irqs_disabled(); |
---|
| 729 | + trace_softirq_raise(nr); |
---|
| 730 | + or_softirq_pending(1UL << nr); |
---|
911 | 731 | } |
---|
912 | 732 | |
---|
913 | 733 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
---|
.. | .. |
---|
934 | 754 | unsigned long flags; |
---|
935 | 755 | |
---|
936 | 756 | local_irq_save(flags); |
---|
937 | | - if (!tasklet_trylock(t)) { |
---|
938 | | - local_irq_restore(flags); |
---|
939 | | - return; |
---|
940 | | - } |
---|
941 | | - |
---|
942 | 757 | head = this_cpu_ptr(headp); |
---|
943 | | -again: |
---|
944 | | - /* We may have been preempted before tasklet_trylock |
---|
945 | | - * and __tasklet_action may have already run. |
---|
946 | | - * So double check the sched bit while the takslet |
---|
947 | | - * is locked before adding it to the list. |
---|
948 | | - */ |
---|
949 | | - if (test_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
950 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) |
---|
951 | | - if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) { |
---|
952 | | - tasklet_unlock(t); |
---|
953 | | - return; |
---|
954 | | - } |
---|
955 | | -#endif |
---|
956 | | - t->next = NULL; |
---|
957 | | - *head->tail = t; |
---|
958 | | - head->tail = &(t->next); |
---|
959 | | - raise_softirq_irqoff(softirq_nr); |
---|
960 | | - tasklet_unlock(t); |
---|
961 | | - } else { |
---|
962 | | - /* This is subtle. If we hit the corner case above |
---|
963 | | - * It is possible that we get preempted right here, |
---|
964 | | - * and another task has successfully called |
---|
965 | | - * tasklet_schedule(), then this function, and |
---|
966 | | - * failed on the trylock. Thus we must be sure |
---|
967 | | - * before releasing the tasklet lock, that the |
---|
968 | | - * SCHED_BIT is clear. Otherwise the tasklet |
---|
969 | | - * may get its SCHED_BIT set, but not added to the |
---|
970 | | - * list |
---|
971 | | - */ |
---|
972 | | - if (!tasklet_tryunlock(t)) |
---|
973 | | - goto again; |
---|
974 | | - } |
---|
| 758 | + t->next = NULL; |
---|
| 759 | + *head->tail = t; |
---|
| 760 | + head->tail = &(t->next); |
---|
| 761 | + raise_softirq_irqoff(softirq_nr); |
---|
975 | 762 | local_irq_restore(flags); |
---|
976 | 763 | } |
---|
977 | 764 | |
---|
.. | .. |
---|
989 | 776 | } |
---|
990 | 777 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
---|
991 | 778 | |
---|
992 | | -void tasklet_enable(struct tasklet_struct *t) |
---|
| 779 | +static inline bool tasklet_clear_sched(struct tasklet_struct *t) |
---|
993 | 780 | { |
---|
994 | | - if (!atomic_dec_and_test(&t->count)) |
---|
995 | | - return; |
---|
996 | | - if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) |
---|
997 | | - tasklet_schedule(t); |
---|
| 781 | + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
| 782 | + wake_up_var(&t->state); |
---|
| 783 | + return true; |
---|
| 784 | + } |
---|
| 785 | + |
---|
| 786 | + return false; |
---|
998 | 787 | } |
---|
999 | | -EXPORT_SYMBOL(tasklet_enable); |
---|
1000 | 788 | |
---|
1001 | 789 | static void tasklet_action_common(struct softirq_action *a, |
---|
1002 | 790 | struct tasklet_head *tl_head, |
---|
1003 | 791 | unsigned int softirq_nr) |
---|
1004 | 792 | { |
---|
1005 | 793 | struct tasklet_struct *list; |
---|
1006 | | - int loops = 1000000; |
---|
1007 | 794 | |
---|
1008 | 795 | local_irq_disable(); |
---|
1009 | 796 | list = tl_head->head; |
---|
.. | .. |
---|
1015 | 802 | struct tasklet_struct *t = list; |
---|
1016 | 803 | |
---|
1017 | 804 | list = list->next; |
---|
1018 | | - /* |
---|
1019 | | - * Should always succeed - after a tasklist got on the |
---|
1020 | | - * list (after getting the SCHED bit set from 0 to 1), |
---|
1021 | | - * nothing but the tasklet softirq it got queued to can |
---|
1022 | | - * lock it: |
---|
1023 | | - */ |
---|
1024 | | - if (!tasklet_trylock(t)) { |
---|
1025 | | - WARN_ON(1); |
---|
1026 | | - continue; |
---|
1027 | | - } |
---|
1028 | 805 | |
---|
1029 | | - t->next = NULL; |
---|
1030 | | - |
---|
1031 | | - if (unlikely(atomic_read(&t->count))) { |
---|
1032 | | -out_disabled: |
---|
1033 | | - /* implicit unlock: */ |
---|
1034 | | - wmb(); |
---|
1035 | | - t->state = TASKLET_STATEF_PENDING; |
---|
1036 | | - continue; |
---|
1037 | | - } |
---|
1038 | | - /* |
---|
1039 | | - * After this point on the tasklet might be rescheduled |
---|
1040 | | - * on another CPU, but it can only be added to another |
---|
1041 | | - * CPU's tasklet list if we unlock the tasklet (which we |
---|
1042 | | - * dont do yet). |
---|
1043 | | - */ |
---|
1044 | | - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
---|
1045 | | - WARN_ON(1); |
---|
1046 | | -again: |
---|
1047 | | - t->func(t->data); |
---|
1048 | | - |
---|
1049 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) |
---|
1050 | | - while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) { |
---|
1051 | | -#else |
---|
1052 | | - while (!tasklet_tryunlock(t)) { |
---|
1053 | | -#endif |
---|
1054 | | - /* |
---|
1055 | | - * If it got disabled meanwhile, bail out: |
---|
1056 | | - */ |
---|
1057 | | - if (atomic_read(&t->count)) |
---|
1058 | | - goto out_disabled; |
---|
1059 | | - /* |
---|
1060 | | - * If it got scheduled meanwhile, re-execute |
---|
1061 | | - * the tasklet function: |
---|
1062 | | - */ |
---|
1063 | | - if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
---|
1064 | | - goto again; |
---|
1065 | | - if (!--loops) { |
---|
1066 | | - printk("hm, tasklet state: %08lx\n", t->state); |
---|
1067 | | - WARN_ON(1); |
---|
| 806 | + if (tasklet_trylock(t)) { |
---|
| 807 | + if (!atomic_read(&t->count)) { |
---|
| 808 | + if (!tasklet_clear_sched(t)) |
---|
| 809 | + BUG(); |
---|
| 810 | + if (t->use_callback) { |
---|
| 811 | + trace_tasklet_entry(t->callback); |
---|
| 812 | + t->callback(t); |
---|
| 813 | + trace_tasklet_exit(t->callback); |
---|
| 814 | + } else { |
---|
| 815 | + trace_tasklet_entry(t->func); |
---|
| 816 | + t->func(t->data); |
---|
| 817 | + trace_tasklet_exit(t->func); |
---|
| 818 | + } |
---|
1068 | 819 | tasklet_unlock(t); |
---|
1069 | | - break; |
---|
| 820 | + continue; |
---|
1070 | 821 | } |
---|
| 822 | + tasklet_unlock(t); |
---|
1071 | 823 | } |
---|
| 824 | + |
---|
| 825 | + local_irq_disable(); |
---|
| 826 | + t->next = NULL; |
---|
| 827 | + *tl_head->tail = t; |
---|
| 828 | + tl_head->tail = &t->next; |
---|
| 829 | + __raise_softirq_irqoff(softirq_nr); |
---|
| 830 | + local_irq_enable(); |
---|
1072 | 831 | } |
---|
1073 | 832 | } |
---|
1074 | 833 | |
---|
.. | .. |
---|
1082 | 841 | tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); |
---|
1083 | 842 | } |
---|
1084 | 843 | |
---|
| 844 | +void tasklet_setup(struct tasklet_struct *t, |
---|
| 845 | + void (*callback)(struct tasklet_struct *)) |
---|
| 846 | +{ |
---|
| 847 | + t->next = NULL; |
---|
| 848 | + t->state = 0; |
---|
| 849 | + atomic_set(&t->count, 0); |
---|
| 850 | + t->callback = callback; |
---|
| 851 | + t->use_callback = true; |
---|
| 852 | + t->data = 0; |
---|
| 853 | +} |
---|
| 854 | +EXPORT_SYMBOL(tasklet_setup); |
---|
| 855 | + |
---|
1085 | 856 | void tasklet_init(struct tasklet_struct *t, |
---|
1086 | 857 | void (*func)(unsigned long), unsigned long data) |
---|
1087 | 858 | { |
---|
.. | .. |
---|
1089 | 860 | t->state = 0; |
---|
1090 | 861 | atomic_set(&t->count, 0); |
---|
1091 | 862 | t->func = func; |
---|
| 863 | + t->use_callback = false; |
---|
1092 | 864 | t->data = data; |
---|
1093 | 865 | } |
---|
1094 | 866 | EXPORT_SYMBOL(tasklet_init); |
---|
| 867 | + |
---|
| 868 | +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
| 869 | +/* |
---|
| 870 | + * Do not use in new code. Waiting for tasklets from atomic contexts is |
---|
| 871 | + * error prone and should be avoided. |
---|
| 872 | + */ |
---|
| 873 | +void tasklet_unlock_spin_wait(struct tasklet_struct *t) |
---|
| 874 | +{ |
---|
| 875 | + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { |
---|
| 876 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
---|
| 877 | + /* |
---|
| 878 | + * Prevent a live lock when current preempted soft |
---|
| 879 | + * interrupt processing or prevents ksoftirqd from |
---|
| 880 | + * running. If the tasklet runs on a different CPU |
---|
| 881 | + * then this has no effect other than doing the BH |
---|
| 882 | + * disable/enable dance for nothing. |
---|
| 883 | + */ |
---|
| 884 | + local_bh_disable(); |
---|
| 885 | + local_bh_enable(); |
---|
| 886 | + } else { |
---|
| 887 | + cpu_relax(); |
---|
| 888 | + } |
---|
| 889 | + } |
---|
| 890 | +} |
---|
| 891 | +EXPORT_SYMBOL(tasklet_unlock_spin_wait); |
---|
| 892 | +#endif |
---|
1095 | 893 | |
---|
1096 | 894 | void tasklet_kill(struct tasklet_struct *t) |
---|
1097 | 895 | { |
---|
1098 | 896 | if (in_interrupt()) |
---|
1099 | 897 | pr_notice("Attempt to kill tasklet from interrupt\n"); |
---|
1100 | 898 | |
---|
1101 | | - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
---|
1102 | | - do { |
---|
1103 | | - msleep(1); |
---|
1104 | | - } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
---|
1105 | | - } |
---|
| 899 | + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
---|
| 900 | + wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); |
---|
| 901 | + |
---|
1106 | 902 | tasklet_unlock_wait(t); |
---|
1107 | | - clear_bit(TASKLET_STATE_SCHED, &t->state); |
---|
| 903 | + tasklet_clear_sched(t); |
---|
1108 | 904 | } |
---|
1109 | 905 | EXPORT_SYMBOL(tasklet_kill); |
---|
1110 | 906 | |
---|
1111 | | -/* |
---|
1112 | | - * tasklet_hrtimer |
---|
1113 | | - */ |
---|
1114 | | - |
---|
1115 | | -/* |
---|
1116 | | - * The trampoline is called when the hrtimer expires. It schedules a tasklet |
---|
1117 | | - * to run __tasklet_hrtimer_trampoline() which in turn will call the intended |
---|
1118 | | - * hrtimer callback, but from softirq context. |
---|
1119 | | - */ |
---|
1120 | | -static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) |
---|
| 907 | +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
| 908 | +void tasklet_unlock(struct tasklet_struct *t) |
---|
1121 | 909 | { |
---|
1122 | | - struct tasklet_hrtimer *ttimer = |
---|
1123 | | - container_of(timer, struct tasklet_hrtimer, timer); |
---|
1124 | | - |
---|
1125 | | - tasklet_hi_schedule(&ttimer->tasklet); |
---|
1126 | | - return HRTIMER_NORESTART; |
---|
| 910 | + smp_mb__before_atomic(); |
---|
| 911 | + clear_bit(TASKLET_STATE_RUN, &t->state); |
---|
| 912 | + smp_mb__after_atomic(); |
---|
| 913 | + wake_up_var(&t->state); |
---|
1127 | 914 | } |
---|
| 915 | +EXPORT_SYMBOL_GPL(tasklet_unlock); |
---|
1128 | 916 | |
---|
1129 | | -/* |
---|
1130 | | - * Helper function which calls the hrtimer callback from |
---|
1131 | | - * tasklet/softirq context |
---|
1132 | | - */ |
---|
1133 | | -static void __tasklet_hrtimer_trampoline(unsigned long data) |
---|
| 917 | +void tasklet_unlock_wait(struct tasklet_struct *t) |
---|
1134 | 918 | { |
---|
1135 | | - struct tasklet_hrtimer *ttimer = (void *)data; |
---|
1136 | | - enum hrtimer_restart restart; |
---|
1137 | | - |
---|
1138 | | - restart = ttimer->function(&ttimer->timer); |
---|
1139 | | - if (restart != HRTIMER_NORESTART) |
---|
1140 | | - hrtimer_restart(&ttimer->timer); |
---|
| 919 | + wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); |
---|
1141 | 920 | } |
---|
1142 | | - |
---|
1143 | | -/** |
---|
1144 | | - * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks |
---|
1145 | | - * @ttimer: tasklet_hrtimer which is initialized |
---|
1146 | | - * @function: hrtimer callback function which gets called from softirq context |
---|
1147 | | - * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) |
---|
1148 | | - * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) |
---|
1149 | | - */ |
---|
1150 | | -void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
---|
1151 | | - enum hrtimer_restart (*function)(struct hrtimer *), |
---|
1152 | | - clockid_t which_clock, enum hrtimer_mode mode) |
---|
1153 | | -{ |
---|
1154 | | - hrtimer_init(&ttimer->timer, which_clock, mode); |
---|
1155 | | - ttimer->timer.function = __hrtimer_tasklet_trampoline; |
---|
1156 | | - tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, |
---|
1157 | | - (unsigned long)ttimer); |
---|
1158 | | - ttimer->function = function; |
---|
1159 | | -} |
---|
1160 | | -EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
---|
| 921 | +EXPORT_SYMBOL_GPL(tasklet_unlock_wait); |
---|
| 922 | +#endif |
---|
1161 | 923 | |
---|
1162 | 924 | void __init softirq_init(void) |
---|
1163 | 925 | { |
---|
.. | .. |
---|
1174 | 936 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
---|
1175 | 937 | } |
---|
1176 | 938 | |
---|
1177 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) |
---|
1178 | | -void tasklet_unlock_wait(struct tasklet_struct *t) |
---|
1179 | | -{ |
---|
1180 | | - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { |
---|
1181 | | - /* |
---|
1182 | | - * Hack for now to avoid this busy-loop: |
---|
1183 | | - */ |
---|
1184 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1185 | | - msleep(1); |
---|
1186 | | -#else |
---|
1187 | | - barrier(); |
---|
1188 | | -#endif |
---|
1189 | | - } |
---|
1190 | | -} |
---|
1191 | | -EXPORT_SYMBOL(tasklet_unlock_wait); |
---|
1192 | | -#endif |
---|
1193 | | - |
---|
1194 | 939 | static int ksoftirqd_should_run(unsigned int cpu) |
---|
1195 | 940 | { |
---|
1196 | | - return ksoftirqd_softirq_pending(); |
---|
| 941 | + return local_softirq_pending(); |
---|
| 942 | +} |
---|
| 943 | + |
---|
| 944 | +static void run_ksoftirqd(unsigned int cpu) |
---|
| 945 | +{ |
---|
| 946 | + ksoftirqd_run_begin(); |
---|
| 947 | + if (local_softirq_pending()) { |
---|
| 948 | + /* |
---|
| 949 | + * We can safely run softirq on inline stack, as we are not deep |
---|
| 950 | + * in the task stack here. |
---|
| 951 | + */ |
---|
| 952 | + __do_softirq(); |
---|
| 953 | + ksoftirqd_run_end(); |
---|
| 954 | + cond_resched(); |
---|
| 955 | + return; |
---|
| 956 | + } |
---|
| 957 | + ksoftirqd_run_end(); |
---|
1197 | 958 | } |
---|
1198 | 959 | |
---|
1199 | 960 | #ifdef CONFIG_HOTPLUG_CPU |
---|
.. | .. |
---|
1237 | 998 | /* Find end, append list for that CPU. */ |
---|
1238 | 999 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
---|
1239 | 1000 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
---|
1240 | | - this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
---|
| 1001 | + __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
---|
1241 | 1002 | per_cpu(tasklet_vec, cpu).head = NULL; |
---|
1242 | 1003 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
---|
1243 | 1004 | } |
---|
.. | .. |
---|
1260 | 1021 | |
---|
1261 | 1022 | static struct smp_hotplug_thread softirq_threads = { |
---|
1262 | 1023 | .store = &ksoftirqd, |
---|
1263 | | - .setup = ksoftirqd_set_sched_params, |
---|
1264 | 1024 | .thread_should_run = ksoftirqd_should_run, |
---|
1265 | 1025 | .thread_fn = run_ksoftirqd, |
---|
1266 | 1026 | .thread_comm = "ksoftirqd/%u", |
---|
1267 | 1027 | }; |
---|
1268 | | - |
---|
1269 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1270 | | -static struct smp_hotplug_thread softirq_timer_threads = { |
---|
1271 | | - .store = &ktimer_softirqd, |
---|
1272 | | - .setup = ktimer_softirqd_set_sched_params, |
---|
1273 | | - .cleanup = ktimer_softirqd_clr_sched_params, |
---|
1274 | | - .thread_should_run = ktimer_softirqd_should_run, |
---|
1275 | | - .thread_fn = run_ksoftirqd, |
---|
1276 | | - .thread_comm = "ktimersoftd/%u", |
---|
1277 | | -}; |
---|
1278 | | -#endif |
---|
1279 | 1028 | |
---|
1280 | 1029 | static __init int spawn_ksoftirqd(void) |
---|
1281 | 1030 | { |
---|
1282 | 1031 | cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, |
---|
1283 | 1032 | takeover_tasklets); |
---|
1284 | 1033 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
---|
1285 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
1286 | | - BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); |
---|
1287 | | -#endif |
---|
| 1034 | + |
---|
1288 | 1035 | return 0; |
---|
1289 | 1036 | } |
---|
1290 | 1037 | early_initcall(spawn_ksoftirqd); |
---|