.. | .. |
---|
18 | 18 | #include <linux/cpu.h> |
---|
19 | 19 | #include <linux/notifier.h> |
---|
20 | 20 | #include <linux/smp.h> |
---|
21 | | -#include <linux/smpboot.h> |
---|
22 | | -#include <linux/interrupt.h> |
---|
23 | 21 | #include <asm/processor.h> |
---|
24 | 22 | |
---|
25 | 23 | |
---|
26 | 24 | static DEFINE_PER_CPU(struct llist_head, raised_list); |
---|
27 | 25 | static DEFINE_PER_CPU(struct llist_head, lazy_list); |
---|
28 | | -static DEFINE_PER_CPU(struct task_struct *, irq_workd); |
---|
29 | | - |
---|
30 | | -static void wake_irq_workd(void) |
---|
31 | | -{ |
---|
32 | | - struct task_struct *tsk = __this_cpu_read(irq_workd); |
---|
33 | | - |
---|
34 | | - if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) |
---|
35 | | - wake_up_process(tsk); |
---|
36 | | -} |
---|
37 | | - |
---|
38 | | -#ifdef CONFIG_SMP |
---|
39 | | -static void irq_work_wake(struct irq_work *entry) |
---|
40 | | -{ |
---|
41 | | - wake_irq_workd(); |
---|
42 | | -} |
---|
43 | | - |
---|
44 | | -static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = |
---|
45 | | - IRQ_WORK_INIT_HARD(irq_work_wake); |
---|
46 | | -#endif |
---|
47 | | - |
---|
48 | | -static int irq_workd_should_run(unsigned int cpu) |
---|
49 | | -{ |
---|
50 | | - return !llist_empty(this_cpu_ptr(&lazy_list)); |
---|
51 | | -} |
---|
52 | 26 | |
---|
53 | 27 | /* |
---|
54 | 28 | * Claim the entry so that no one else will poke at it. |
---|
.. | .. |
---|
78 | 52 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
---|
79 | 53 | static void __irq_work_queue_local(struct irq_work *work) |
---|
80 | 54 | { |
---|
81 | | - struct llist_head *list; |
---|
82 | | - bool rt_lazy_work = false; |
---|
83 | | - bool lazy_work = false; |
---|
84 | | - int work_flags; |
---|
85 | | - |
---|
86 | | - work_flags = atomic_read(&work->flags); |
---|
87 | | - if (work_flags & IRQ_WORK_LAZY) |
---|
88 | | - lazy_work = true; |
---|
89 | | - else if (IS_ENABLED(CONFIG_PREEMPT_RT) && |
---|
90 | | - !(work_flags & IRQ_WORK_HARD_IRQ)) |
---|
91 | | - rt_lazy_work = true; |
---|
92 | | - |
---|
93 | | - if (lazy_work || rt_lazy_work) |
---|
94 | | - list = this_cpu_ptr(&lazy_list); |
---|
95 | | - else |
---|
96 | | - list = this_cpu_ptr(&raised_list); |
---|
97 | | - |
---|
98 | | - if (!llist_add(&work->llnode, list)) |
---|
99 | | - return; |
---|
100 | | - |
---|
101 | 55 | /* If the work is "lazy", handle it from next tick if any */ |
---|
102 | | - if (!lazy_work || tick_nohz_tick_stopped()) |
---|
103 | | - arch_irq_work_raise(); |
---|
| 56 | + if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { |
---|
| 57 | + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
---|
| 58 | + tick_nohz_tick_stopped()) |
---|
| 59 | + arch_irq_work_raise(); |
---|
| 60 | + } else { |
---|
| 61 | + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
---|
| 62 | + arch_irq_work_raise(); |
---|
| 63 | + } |
---|
104 | 64 | } |
---|
105 | 65 | |
---|
106 | 66 | /* Enqueue the irq work @work on the current CPU */ |
---|
.. | .. |
---|
142 | 102 | if (cpu != smp_processor_id()) { |
---|
143 | 103 | /* Arch remote IPI send/receive backend aren't NMI safe */ |
---|
144 | 104 | WARN_ON_ONCE(in_nmi()); |
---|
145 | | - |
---|
146 | | - /* |
---|
147 | | - * On PREEMPT_RT the items which are not marked as |
---|
148 | | - * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work |
---|
149 | | - * item is used on the remote CPU to wake the thread. |
---|
150 | | - */ |
---|
151 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT) && |
---|
152 | | - !(atomic_read(&work->flags) & IRQ_WORK_HARD_IRQ)) { |
---|
153 | | - |
---|
154 | | - if (!llist_add(&work->llnode, &per_cpu(lazy_list, cpu))) |
---|
155 | | - goto out; |
---|
156 | | - |
---|
157 | | - work = &per_cpu(irq_work_wakeup, cpu); |
---|
158 | | - if (!irq_work_claim(work)) |
---|
159 | | - goto out; |
---|
160 | | - } |
---|
161 | | - |
---|
162 | 105 | __smp_call_single_queue(cpu, &work->llnode); |
---|
163 | 106 | } else { |
---|
164 | 107 | __irq_work_queue_local(work); |
---|
165 | 108 | } |
---|
166 | | -out: |
---|
167 | 109 | preempt_enable(); |
---|
168 | 110 | |
---|
169 | 111 | return true; |
---|
.. | .. |
---|
178 | 120 | raised = this_cpu_ptr(&raised_list); |
---|
179 | 121 | lazy = this_cpu_ptr(&lazy_list); |
---|
180 | 122 | |
---|
181 | | - if (llist_empty(raised) && llist_empty(lazy)) |
---|
182 | | - return false; |
---|
| 123 | + if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
---|
| 124 | + if (llist_empty(lazy)) |
---|
| 125 | + return false; |
---|
183 | 126 | |
---|
184 | 127 | /* All work should have been flushed before going offline */ |
---|
185 | 128 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
---|
.. | .. |
---|
210 | 153 | */ |
---|
211 | 154 | flags &= ~IRQ_WORK_PENDING; |
---|
212 | 155 | (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
---|
213 | | - |
---|
214 | | - if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || |
---|
215 | | - !arch_irq_work_has_interrupt()) |
---|
216 | | - rcuwait_wake_up(&work->irqwait); |
---|
217 | 156 | } |
---|
218 | 157 | |
---|
219 | 158 | static void irq_work_run_list(struct llist_head *list) |
---|
.. | .. |
---|
221 | 160 | struct irq_work *work, *tmp; |
---|
222 | 161 | struct llist_node *llnode; |
---|
223 | 162 | |
---|
224 | | - /* |
---|
225 | | - * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed |
---|
226 | | - * in a per-CPU thread in preemptible context. Only the items which are |
---|
227 | | - * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. |
---|
228 | | - */ |
---|
229 | | - BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); |
---|
| 163 | + BUG_ON(!irqs_disabled()); |
---|
230 | 164 | |
---|
231 | 165 | if (llist_empty(list)) |
---|
232 | 166 | return; |
---|
.. | .. |
---|
243 | 177 | void irq_work_run(void) |
---|
244 | 178 | { |
---|
245 | 179 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
---|
246 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
247 | | - irq_work_run_list(this_cpu_ptr(&lazy_list)); |
---|
248 | | - else |
---|
249 | | - wake_irq_workd(); |
---|
| 180 | + irq_work_run_list(this_cpu_ptr(&lazy_list)); |
---|
250 | 181 | } |
---|
251 | 182 | EXPORT_SYMBOL_GPL(irq_work_run); |
---|
252 | 183 | |
---|
.. | .. |
---|
256 | 187 | |
---|
257 | 188 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) |
---|
258 | 189 | irq_work_run_list(raised); |
---|
259 | | - |
---|
260 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
261 | | - irq_work_run_list(this_cpu_ptr(&lazy_list)); |
---|
262 | | - else |
---|
263 | | - wake_irq_workd(); |
---|
| 190 | + irq_work_run_list(this_cpu_ptr(&lazy_list)); |
---|
264 | 191 | } |
---|
265 | 192 | |
---|
266 | 193 | /* |
---|
.. | .. |
---|
270 | 197 | void irq_work_sync(struct irq_work *work) |
---|
271 | 198 | { |
---|
272 | 199 | lockdep_assert_irqs_enabled(); |
---|
273 | | - might_sleep(); |
---|
274 | | - |
---|
275 | | - if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || |
---|
276 | | - !arch_irq_work_has_interrupt()) { |
---|
277 | | - rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), |
---|
278 | | - TASK_UNINTERRUPTIBLE); |
---|
279 | | - return; |
---|
280 | | - } |
---|
281 | 200 | |
---|
282 | 201 | while (atomic_read(&work->flags) & IRQ_WORK_BUSY) |
---|
283 | 202 | cpu_relax(); |
---|
284 | 203 | } |
---|
285 | 204 | EXPORT_SYMBOL_GPL(irq_work_sync); |
---|
286 | | - |
---|
287 | | -static void run_irq_workd(unsigned int cpu) |
---|
288 | | -{ |
---|
289 | | - irq_work_run_list(this_cpu_ptr(&lazy_list)); |
---|
290 | | -} |
---|
291 | | - |
---|
292 | | -static void irq_workd_setup(unsigned int cpu) |
---|
293 | | -{ |
---|
294 | | - sched_set_fifo_low(current); |
---|
295 | | -} |
---|
296 | | - |
---|
297 | | -static struct smp_hotplug_thread irqwork_threads = { |
---|
298 | | - .store = &irq_workd, |
---|
299 | | - .setup = irq_workd_setup, |
---|
300 | | - .thread_should_run = irq_workd_should_run, |
---|
301 | | - .thread_fn = run_irq_workd, |
---|
302 | | - .thread_comm = "irq_work/%u", |
---|
303 | | -}; |
---|
304 | | - |
---|
305 | | -static __init int irq_work_init_threads(void) |
---|
306 | | -{ |
---|
307 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
308 | | - BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); |
---|
309 | | - return 0; |
---|
310 | | -} |
---|
311 | | -early_initcall(irq_work_init_threads); |
---|