| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 17 | 18 | #include <linux/cpu.h> |
|---|
| 18 | 19 | #include <linux/notifier.h> |
|---|
| 19 | 20 | #include <linux/smp.h> |
|---|
| 20 | | -#include <linux/interrupt.h> |
|---|
| 21 | 21 | #include <asm/processor.h> |
|---|
| 22 | 22 | |
|---|
| 23 | 23 | |
|---|
| .. | .. |
|---|
| 29 | 29 | */ |
|---|
| 30 | 30 | static bool irq_work_claim(struct irq_work *work) |
|---|
| 31 | 31 | { |
|---|
| 32 | | - unsigned long flags, oflags, nflags; |
|---|
| 32 | + int oflags; |
|---|
| 33 | 33 | |
|---|
| 34 | + oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); |
|---|
| 34 | 35 | /* |
|---|
| 35 | | - * Start with our best wish as a premise but only trust any |
|---|
| 36 | | - * flag value after cmpxchg() result. |
|---|
| 36 | + * If the work is already pending, no need to raise the IPI. |
|---|
| 37 | + * The pairing atomic_fetch_andnot() in irq_work_run() makes sure |
|---|
| 38 | + * everything we did before is visible. |
|---|
| 37 | 39 | */ |
|---|
| 38 | | - flags = work->flags & ~IRQ_WORK_PENDING; |
|---|
| 39 | | - for (;;) { |
|---|
| 40 | | - nflags = flags | IRQ_WORK_CLAIMED; |
|---|
| 41 | | - oflags = cmpxchg(&work->flags, flags, nflags); |
|---|
| 42 | | - if (oflags == flags) |
|---|
| 43 | | - break; |
|---|
| 44 | | - if (oflags & IRQ_WORK_PENDING) |
|---|
| 45 | | - return false; |
|---|
| 46 | | - flags = oflags; |
|---|
| 47 | | - cpu_relax(); |
|---|
| 48 | | - } |
|---|
| 49 | | - |
|---|
| 40 | + if (oflags & IRQ_WORK_PENDING) |
|---|
| 41 | + return false; |
|---|
| 50 | 42 | return true; |
|---|
| 51 | 43 | } |
|---|
| 52 | 44 | |
|---|
| .. | .. |
|---|
| 58 | 50 | } |
|---|
| 59 | 51 | |
|---|
| 60 | 52 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
|---|
| 61 | | -static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list) |
|---|
| 53 | +static void __irq_work_queue_local(struct irq_work *work) |
|---|
| 62 | 54 | { |
|---|
| 63 | | - bool empty; |
|---|
| 64 | | - |
|---|
| 65 | | - empty = llist_add(&work->llnode, list); |
|---|
| 66 | | - |
|---|
| 67 | | - if (empty && |
|---|
| 68 | | - (!(work->flags & IRQ_WORK_LAZY) || |
|---|
| 69 | | - tick_nohz_tick_stopped())) |
|---|
| 70 | | - arch_irq_work_raise(); |
|---|
| 71 | | -} |
|---|
| 72 | | - |
|---|
| 73 | | -static inline bool use_lazy_list(struct irq_work *work) |
|---|
| 74 | | -{ |
|---|
| 75 | | - return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) |
|---|
| 76 | | - || (work->flags & IRQ_WORK_LAZY); |
|---|
| 55 | + /* If the work is "lazy", handle it from next tick if any */ |
|---|
| 56 | + if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { |
|---|
| 57 | + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
|---|
| 58 | + tick_nohz_tick_stopped()) |
|---|
| 59 | + arch_irq_work_raise(); |
|---|
| 60 | + } else { |
|---|
| 61 | + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
|---|
| 62 | + arch_irq_work_raise(); |
|---|
| 63 | + } |
|---|
| 77 | 64 | } |
|---|
| 78 | 65 | |
|---|
| 79 | 66 | /* Enqueue the irq work @work on the current CPU */ |
|---|
| 80 | 67 | bool irq_work_queue(struct irq_work *work) |
|---|
| 81 | 68 | { |
|---|
| 82 | | - struct llist_head *list; |
|---|
| 83 | | - |
|---|
| 84 | 69 | /* Only queue if not already pending */ |
|---|
| 85 | 70 | if (!irq_work_claim(work)) |
|---|
| 86 | 71 | return false; |
|---|
| 87 | 72 | |
|---|
| 88 | 73 | /* Queue the entry and raise the IPI if needed. */ |
|---|
| 89 | 74 | preempt_disable(); |
|---|
| 90 | | - if (use_lazy_list(work)) |
|---|
| 91 | | - list = this_cpu_ptr(&lazy_list); |
|---|
| 92 | | - else |
|---|
| 93 | | - list = this_cpu_ptr(&raised_list); |
|---|
| 94 | | - __irq_work_queue_local(work, list); |
|---|
| 75 | + __irq_work_queue_local(work); |
|---|
| 95 | 76 | preempt_enable(); |
|---|
| 96 | 77 | |
|---|
| 97 | 78 | return true; |
|---|
| .. | .. |
|---|
| 110 | 91 | return irq_work_queue(work); |
|---|
| 111 | 92 | |
|---|
| 112 | 93 | #else /* CONFIG_SMP: */ |
|---|
| 113 | | - struct llist_head *list; |
|---|
| 114 | | - |
|---|
| 115 | 94 | /* All work should have been flushed before going offline */ |
|---|
| 116 | 95 | WARN_ON_ONCE(cpu_is_offline(cpu)); |
|---|
| 117 | 96 | |
|---|
| .. | .. |
|---|
| 120 | 99 | return false; |
|---|
| 121 | 100 | |
|---|
| 122 | 101 | preempt_disable(); |
|---|
| 123 | | - if (use_lazy_list(work)) |
|---|
| 124 | | - list = &per_cpu(lazy_list, cpu); |
|---|
| 125 | | - else |
|---|
| 126 | | - list = &per_cpu(raised_list, cpu); |
|---|
| 127 | | - |
|---|
| 128 | 102 | if (cpu != smp_processor_id()) { |
|---|
| 129 | 103 | /* Arch remote IPI send/receive backend aren't NMI safe */ |
|---|
| 130 | 104 | WARN_ON_ONCE(in_nmi()); |
|---|
| 131 | | - if (llist_add(&work->llnode, list)) |
|---|
| 132 | | - arch_send_call_function_single_ipi(cpu); |
|---|
| 105 | + __smp_call_single_queue(cpu, &work->llnode); |
|---|
| 133 | 106 | } else { |
|---|
| 134 | | - __irq_work_queue_local(work, list); |
|---|
| 107 | + __irq_work_queue_local(work); |
|---|
| 135 | 108 | } |
|---|
| 136 | 109 | preempt_enable(); |
|---|
| 137 | 110 | |
|---|
| 138 | 111 | return true; |
|---|
| 139 | 112 | #endif /* CONFIG_SMP */ |
|---|
| 140 | 113 | } |
|---|
| 141 | | - |
|---|
| 114 | +EXPORT_SYMBOL_GPL(irq_work_queue_on); |
|---|
| 142 | 115 | |
|---|
| 143 | 116 | bool irq_work_needs_cpu(void) |
|---|
| 144 | 117 | { |
|---|
| .. | .. |
|---|
| 147 | 120 | raised = this_cpu_ptr(&raised_list); |
|---|
| 148 | 121 | lazy = this_cpu_ptr(&lazy_list); |
|---|
| 149 | 122 | |
|---|
| 150 | | - if (llist_empty(raised) && llist_empty(lazy)) |
|---|
| 151 | | - return false; |
|---|
| 123 | + if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
|---|
| 124 | + if (llist_empty(lazy)) |
|---|
| 125 | + return false; |
|---|
| 152 | 126 | |
|---|
| 153 | 127 | /* All work should have been flushed before going offline */ |
|---|
| 154 | 128 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
|---|
| .. | .. |
|---|
| 156 | 130 | return true; |
|---|
| 157 | 131 | } |
|---|
| 158 | 132 | |
|---|
| 133 | +void irq_work_single(void *arg) |
|---|
| 134 | +{ |
|---|
| 135 | + struct irq_work *work = arg; |
|---|
| 136 | + int flags; |
|---|
| 137 | + |
|---|
| 138 | + /* |
|---|
| 139 | + * Clear the PENDING bit, after this point the @work |
|---|
| 140 | + * can be re-used. |
|---|
| 141 | + * Make it immediately visible so that other CPUs trying |
|---|
| 142 | + * to claim that work don't rely on us to handle their data |
|---|
| 143 | + * while we are in the middle of the func. |
|---|
| 144 | + */ |
|---|
| 145 | + flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags); |
|---|
| 146 | + |
|---|
| 147 | + lockdep_irq_work_enter(work); |
|---|
| 148 | + work->func(work); |
|---|
| 149 | + lockdep_irq_work_exit(work); |
|---|
| 150 | + /* |
|---|
| 151 | + * Clear the BUSY bit and return to the free state if |
|---|
| 152 | + * no-one else claimed it meanwhile. |
|---|
| 153 | + */ |
|---|
| 154 | + flags &= ~IRQ_WORK_PENDING; |
|---|
| 155 | + (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
|---|
| 156 | +} |
|---|
| 157 | + |
|---|
| 159 | 158 | static void irq_work_run_list(struct llist_head *list) |
|---|
| 160 | 159 | { |
|---|
| 161 | 160 | struct irq_work *work, *tmp; |
|---|
| 162 | 161 | struct llist_node *llnode; |
|---|
| 163 | | - unsigned long flags; |
|---|
| 164 | 162 | |
|---|
| 165 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 166 | | - /* |
|---|
| 167 | | - * nort: On RT IRQ-work may run in SOFTIRQ context. |
|---|
| 168 | | - */ |
|---|
| 169 | 163 | BUG_ON(!irqs_disabled()); |
|---|
| 170 | | -#endif |
|---|
| 164 | + |
|---|
| 171 | 165 | if (llist_empty(list)) |
|---|
| 172 | 166 | return; |
|---|
| 173 | 167 | |
|---|
| 174 | 168 | llnode = llist_del_all(list); |
|---|
| 175 | | - llist_for_each_entry_safe(work, tmp, llnode, llnode) { |
|---|
| 176 | | - /* |
|---|
| 177 | | - * Clear the PENDING bit, after this point the @work |
|---|
| 178 | | - * can be re-used. |
|---|
| 179 | | - * Make it immediately visible so that other CPUs trying |
|---|
| 180 | | - * to claim that work don't rely on us to handle their data |
|---|
| 181 | | - * while we are in the middle of the func. |
|---|
| 182 | | - */ |
|---|
| 183 | | - flags = work->flags & ~IRQ_WORK_PENDING; |
|---|
| 184 | | - xchg(&work->flags, flags); |
|---|
| 185 | | - |
|---|
| 186 | | - work->func(work); |
|---|
| 187 | | - /* |
|---|
| 188 | | - * Clear the BUSY bit and return to the free state if |
|---|
| 189 | | - * no-one else claimed it meanwhile. |
|---|
| 190 | | - */ |
|---|
| 191 | | - (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
|---|
| 192 | | - } |
|---|
| 169 | + llist_for_each_entry_safe(work, tmp, llnode, llnode) |
|---|
| 170 | + irq_work_single(work); |
|---|
| 193 | 171 | } |
|---|
| 194 | 172 | |
|---|
| 195 | 173 | /* |
|---|
| .. | .. |
|---|
| 199 | 177 | void irq_work_run(void) |
|---|
| 200 | 178 | { |
|---|
| 201 | 179 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
|---|
| 202 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { |
|---|
| 203 | | - /* |
|---|
| 204 | | - * NOTE: we raise softirq via IPI for safety, |
|---|
| 205 | | - * and execute in irq_work_tick() to move the |
|---|
| 206 | | - * overhead from hard to soft irq context. |
|---|
| 207 | | - */ |
|---|
| 208 | | - if (!llist_empty(this_cpu_ptr(&lazy_list))) |
|---|
| 209 | | - raise_softirq(TIMER_SOFTIRQ); |
|---|
| 210 | | - } else |
|---|
| 211 | | - irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 180 | + irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 212 | 181 | } |
|---|
| 213 | 182 | EXPORT_SYMBOL_GPL(irq_work_run); |
|---|
| 214 | 183 | |
|---|
| .. | .. |
|---|
| 218 | 187 | |
|---|
| 219 | 188 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) |
|---|
| 220 | 189 | irq_work_run_list(raised); |
|---|
| 221 | | - |
|---|
| 222 | | - if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) |
|---|
| 223 | | - irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 224 | | -} |
|---|
| 225 | | - |
|---|
| 226 | | -#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) |
|---|
| 227 | | -void irq_work_tick_soft(void) |
|---|
| 228 | | -{ |
|---|
| 229 | 190 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
|---|
| 230 | 191 | } |
|---|
| 231 | | -#endif |
|---|
| 232 | 192 | |
|---|
| 233 | 193 | /* |
|---|
| 234 | 194 | * Synchronize against the irq_work @entry, ensures the entry is not |
|---|
| .. | .. |
|---|
| 238 | 198 | { |
|---|
| 239 | 199 | lockdep_assert_irqs_enabled(); |
|---|
| 240 | 200 | |
|---|
| 241 | | - while (work->flags & IRQ_WORK_BUSY) |
|---|
| 201 | + while (atomic_read(&work->flags) & IRQ_WORK_BUSY) |
|---|
| 242 | 202 | cpu_relax(); |
|---|
| 243 | 203 | } |
|---|
| 244 | 204 | EXPORT_SYMBOL_GPL(irq_work_sync); |
|---|