| .. | .. |
|---|
| 17 | 17 | #include <linux/cpu.h> |
|---|
| 18 | 18 | #include <linux/notifier.h> |
|---|
| 19 | 19 | #include <linux/smp.h> |
|---|
| 20 | +#include <linux/interrupt.h> |
|---|
| 20 | 21 | #include <asm/processor.h> |
|---|
| 21 | 22 | |
|---|
| 22 | 23 | |
|---|
| .. | .. |
|---|
| 57 | 58 | } |
|---|
| 58 | 59 | |
|---|
| 59 | 60 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
|---|
| 60 | | -static void __irq_work_queue_local(struct irq_work *work) |
|---|
| 61 | +static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) |
|---|
| 61 | 62 | { |
|---|
| 62 | | - /* If the work is "lazy", handle it from next tick if any */ |
|---|
| 63 | | - if (work->flags & IRQ_WORK_LAZY) { |
|---|
| 64 | | - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
|---|
| 65 | | - tick_nohz_tick_stopped()) |
|---|
| 66 | | - arch_irq_work_raise(); |
|---|
| 67 | | - } else { |
|---|
| 68 | | - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
|---|
| 69 | | - arch_irq_work_raise(); |
|---|
| 70 | | - } |
|---|
| 63 | + bool empty; |
|---|
| 64 | + |
|---|
| 65 | + empty = llist_add(&work->llnode, list); |
|---|
| 66 | + |
|---|
| 67 | + if (empty && |
|---|
| 68 | + (!(work->flags & IRQ_WORK_LAZY) || |
|---|
| 69 | + tick_nohz_tick_stopped())) |
|---|
| 70 | + arch_irq_work_raise(); |
|---|
| 71 | +} |
|---|
| 72 | + |
|---|
| 73 | +static inline bool use_lazy_list(struct irq_work *work) |
|---|
| 74 | +{ |
|---|
| 75 | + return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) |
|---|
| 76 | + || (work->flags & IRQ_WORK_LAZY); |
|---|
| 71 | 77 | } |
|---|
| 72 | 78 | |
|---|
| 73 | 79 | /* Enqueue the irq work @work on the current CPU */ |
|---|
| 74 | 80 | bool irq_work_queue(struct irq_work *work) |
|---|
| 75 | 81 | { |
|---|
| 82 | + struct llist_head *list; |
|---|
| 83 | + |
|---|
| 76 | 84 | /* Only queue if not already pending */ |
|---|
| 77 | 85 | if (!irq_work_claim(work)) |
|---|
| 78 | 86 | return false; |
|---|
| 79 | 87 | |
|---|
| 80 | 88 | /* Queue the entry and raise the IPI if needed. */ |
|---|
| 81 | 89 | preempt_disable(); |
|---|
| 82 | | - __irq_work_queue_local(work); |
|---|
| 90 | + if (use_lazy_list(work)) |
|---|
| 91 | + list = this_cpu_ptr(&lazy_list); |
|---|
| 92 | + else |
|---|
| 93 | + list = this_cpu_ptr(&raised_list); |
|---|
| 94 | + __irq_work_queue_local(work, list); |
|---|
| 83 | 95 | preempt_enable(); |
|---|
| 84 | 96 | |
|---|
| 85 | 97 | return true; |
|---|
| .. | .. |
|---|
| 98 | 110 | return irq_work_queue(work); |
|---|
| 99 | 111 | |
|---|
| 100 | 112 | #else /* CONFIG_SMP: */ |
|---|
| 113 | + struct llist_head *list; |
|---|
| 114 | + |
|---|
| 101 | 115 | /* All work should have been flushed before going offline */ |
|---|
| 102 | 116 | WARN_ON_ONCE(cpu_is_offline(cpu)); |
|---|
| 103 | 117 | |
|---|
| .. | .. |
|---|
| 106 | 120 | return false; |
|---|
| 107 | 121 | |
|---|
| 108 | 122 | preempt_disable(); |
|---|
| 123 | + if (use_lazy_list(work)) |
|---|
| 124 | + list = &per_cpu(lazy_list, cpu); |
|---|
| 125 | + else |
|---|
| 126 | + list = &per_cpu(raised_list, cpu); |
|---|
| 127 | + |
|---|
| 109 | 128 | if (cpu != smp_processor_id()) { |
|---|
| 110 | 129 | /* Arch remote IPI send/receive backend aren't NMI safe */ |
|---|
| 111 | 130 | WARN_ON_ONCE(in_nmi()); |
|---|
| 112 | | - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) |
|---|
| 131 | + if (llist_add(&work->llnode, list)) |
|---|
| 113 | 132 | arch_send_call_function_single_ipi(cpu); |
|---|
| 114 | 133 | } else { |
|---|
| 115 | | - __irq_work_queue_local(work); |
|---|
| 134 | + __irq_work_queue_local(work, list); |
|---|
| 116 | 135 | } |
|---|
| 117 | 136 | preempt_enable(); |
|---|
| 118 | 137 | |
|---|
| .. | .. |
|---|
| 128 | 147 | raised = this_cpu_ptr(&raised_list); |
|---|
| 129 | 148 | lazy = this_cpu_ptr(&lazy_list); |
|---|
| 130 | 149 | |
|---|
| 131 | | - if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
|---|
| 132 | | - if (llist_empty(lazy)) |
|---|
| 133 | | - return false; |
|---|
| 150 | + if (llist_empty(raised) && llist_empty(lazy)) |
|---|
| 151 | + return false; |
|---|
| 134 | 152 | |
|---|
| 135 | 153 | /* All work should have been flushed before going offline */ |
|---|
| 136 | 154 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
|---|
| .. | .. |
|---|
| 144 | 162 | struct llist_node *llnode; |
|---|
| 145 | 163 | unsigned long flags; |
|---|
| 146 | 164 | |
|---|
| 165 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 166 | + /* |
|---|
| 167 | + * nort: On RT IRQ-work may run in SOFTIRQ context. |
|---|
| 168 | + */ |
|---|
| 147 | 169 | BUG_ON(!irqs_disabled()); |
|---|
| 148 | | - |
|---|
| 170 | +#endif |
|---|
| 149 | 171 | if (llist_empty(list)) |
|---|
| 150 | 172 | return; |
|---|
| 151 | 173 | |
|---|
| .. | .. |
|---|
| 177 | 199 | void irq_work_run(void) |
|---|
| 178 | 200 | { |
|---|
| 179 | 201 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
|---|
| 180 | | - irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 202 | + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { |
|---|
| 203 | + /* |
|---|
| 204 | + * NOTE: we raise softirq via IPI for safety, |
|---|
| 205 | + * and execute in irq_work_tick() to move the |
|---|
| 206 | + * overhead from hard to soft irq context. |
|---|
| 207 | + */ |
|---|
| 208 | + if (!llist_empty(this_cpu_ptr(&lazy_list))) |
|---|
| 209 | + raise_softirq(TIMER_SOFTIRQ); |
|---|
| 210 | + } else |
|---|
| 211 | + irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 181 | 212 | } |
|---|
| 182 | 213 | EXPORT_SYMBOL_GPL(irq_work_run); |
|---|
| 183 | 214 | |
|---|
| .. | .. |
|---|
| 187 | 218 | |
|---|
| 188 | 219 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) |
|---|
| 189 | 220 | irq_work_run_list(raised); |
|---|
| 221 | + |
|---|
| 222 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) |
|---|
| 223 | + irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 224 | +} |
|---|
| 225 | + |
|---|
| 226 | +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) |
|---|
| 227 | +void irq_work_tick_soft(void) |
|---|
| 228 | +{ |
|---|
| 190 | 229 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 191 | 230 | } |
|---|
| 231 | +#endif |
|---|
| 192 | 232 | |
|---|
| 193 | 233 | /* |
|---|
| 194 | 234 | * Synchronize against the irq_work @entry, ensures the entry is not |
|---|