hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/irq_work.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
34 *
....@@ -17,7 +18,6 @@
1718 #include <linux/cpu.h>
1819 #include <linux/notifier.h>
1920 #include <linux/smp.h>
20
-#include <linux/interrupt.h>
2121 #include <asm/processor.h>
2222
2323
....@@ -29,24 +29,16 @@
2929 */
3030 static bool irq_work_claim(struct irq_work *work)
3131 {
32
- unsigned long flags, oflags, nflags;
32
+ int oflags;
3333
34
+ oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
3435 /*
35
- * Start with our best wish as a premise but only trust any
36
- * flag value after cmpxchg() result.
36
+ * If the work is already pending, no need to raise the IPI.
37
+ * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
38
+ * everything we did before is visible.
3739 */
38
- flags = work->flags & ~IRQ_WORK_PENDING;
39
- for (;;) {
40
- nflags = flags | IRQ_WORK_CLAIMED;
41
- oflags = cmpxchg(&work->flags, flags, nflags);
42
- if (oflags == flags)
43
- break;
44
- if (oflags & IRQ_WORK_PENDING)
45
- return false;
46
- flags = oflags;
47
- cpu_relax();
48
- }
49
-
40
+ if (oflags & IRQ_WORK_PENDING)
41
+ return false;
5042 return true;
5143 }
5244
....@@ -58,40 +50,29 @@
5850 }
5951
6052 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
61
-static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list)
53
+static void __irq_work_queue_local(struct irq_work *work)
6254 {
63
- bool empty;
64
-
65
- empty = llist_add(&work->llnode, list);
66
-
67
- if (empty &&
68
- (!(work->flags & IRQ_WORK_LAZY) ||
69
- tick_nohz_tick_stopped()))
70
- arch_irq_work_raise();
71
-}
72
-
73
-static inline bool use_lazy_list(struct irq_work *work)
74
-{
75
- return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
76
- || (work->flags & IRQ_WORK_LAZY);
55
+ /* If the work is "lazy", handle it from next tick if any */
56
+ if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
57
+ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
58
+ tick_nohz_tick_stopped())
59
+ arch_irq_work_raise();
60
+ } else {
61
+ if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
62
+ arch_irq_work_raise();
63
+ }
7764 }
7865
7966 /* Enqueue the irq work @work on the current CPU */
8067 bool irq_work_queue(struct irq_work *work)
8168 {
82
- struct llist_head *list;
83
-
8469 /* Only queue if not already pending */
8570 if (!irq_work_claim(work))
8671 return false;
8772
8873 /* Queue the entry and raise the IPI if needed. */
8974 preempt_disable();
90
- if (use_lazy_list(work))
91
- list = this_cpu_ptr(&lazy_list);
92
- else
93
- list = this_cpu_ptr(&raised_list);
94
- __irq_work_queue_local(work, list);
75
+ __irq_work_queue_local(work);
9576 preempt_enable();
9677
9778 return true;
....@@ -110,8 +91,6 @@
11091 return irq_work_queue(work);
11192
11293 #else /* CONFIG_SMP: */
113
- struct llist_head *list;
114
-
11594 /* All work should have been flushed before going offline */
11695 WARN_ON_ONCE(cpu_is_offline(cpu));
11796
....@@ -120,25 +99,19 @@
12099 return false;
121100
122101 preempt_disable();
123
- if (use_lazy_list(work))
124
- list = &per_cpu(lazy_list, cpu);
125
- else
126
- list = &per_cpu(raised_list, cpu);
127
-
128102 if (cpu != smp_processor_id()) {
129103 /* Arch remote IPI send/receive backend aren't NMI safe */
130104 WARN_ON_ONCE(in_nmi());
131
- if (llist_add(&work->llnode, list))
132
- arch_send_call_function_single_ipi(cpu);
105
+ __smp_call_single_queue(cpu, &work->llnode);
133106 } else {
134
- __irq_work_queue_local(work, list);
107
+ __irq_work_queue_local(work);
135108 }
136109 preempt_enable();
137110
138111 return true;
139112 #endif /* CONFIG_SMP */
140113 }
141
-
114
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
142115
143116 bool irq_work_needs_cpu(void)
144117 {
....@@ -147,8 +120,9 @@
147120 raised = this_cpu_ptr(&raised_list);
148121 lazy = this_cpu_ptr(&lazy_list);
149122
150
- if (llist_empty(raised) && llist_empty(lazy))
151
- return false;
123
+ if (llist_empty(raised) || arch_irq_work_has_interrupt())
124
+ if (llist_empty(lazy))
125
+ return false;
152126
153127 /* All work should have been flushed before going offline */
154128 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
....@@ -156,40 +130,44 @@
156130 return true;
157131 }
158132
133
+void irq_work_single(void *arg)
134
+{
135
+ struct irq_work *work = arg;
136
+ int flags;
137
+
138
+ /*
139
+ * Clear the PENDING bit, after this point the @work
140
+ * can be re-used.
141
+ * Make it immediately visible so that other CPUs trying
142
+ * to claim that work don't rely on us to handle their data
143
+ * while we are in the middle of the func.
144
+ */
145
+ flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
146
+
147
+ lockdep_irq_work_enter(work);
148
+ work->func(work);
149
+ lockdep_irq_work_exit(work);
150
+ /*
151
+ * Clear the BUSY bit and return to the free state if
152
+ * no-one else claimed it meanwhile.
153
+ */
154
+ flags &= ~IRQ_WORK_PENDING;
155
+ (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
156
+}
157
+
159158 static void irq_work_run_list(struct llist_head *list)
160159 {
161160 struct irq_work *work, *tmp;
162161 struct llist_node *llnode;
163
- unsigned long flags;
164162
165
-#ifndef CONFIG_PREEMPT_RT_FULL
166
- /*
167
- * nort: On RT IRQ-work may run in SOFTIRQ context.
168
- */
169163 BUG_ON(!irqs_disabled());
170
-#endif
164
+
171165 if (llist_empty(list))
172166 return;
173167
174168 llnode = llist_del_all(list);
175
- llist_for_each_entry_safe(work, tmp, llnode, llnode) {
176
- /*
177
- * Clear the PENDING bit, after this point the @work
178
- * can be re-used.
179
- * Make it immediately visible so that other CPUs trying
180
- * to claim that work don't rely on us to handle their data
181
- * while we are in the middle of the func.
182
- */
183
- flags = work->flags & ~IRQ_WORK_PENDING;
184
- xchg(&work->flags, flags);
185
-
186
- work->func(work);
187
- /*
188
- * Clear the BUSY bit and return to the free state if
189
- * no-one else claimed it meanwhile.
190
- */
191
- (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
192
- }
169
+ llist_for_each_entry_safe(work, tmp, llnode, llnode)
170
+ irq_work_single(work);
193171 }
194172
195173 /*
....@@ -199,16 +177,7 @@
199177 void irq_work_run(void)
200178 {
201179 irq_work_run_list(this_cpu_ptr(&raised_list));
202
- if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
203
- /*
204
- * NOTE: we raise softirq via IPI for safety,
205
- * and execute in irq_work_tick() to move the
206
- * overhead from hard to soft irq context.
207
- */
208
- if (!llist_empty(this_cpu_ptr(&lazy_list)))
209
- raise_softirq(TIMER_SOFTIRQ);
210
- } else
211
- irq_work_run_list(this_cpu_ptr(&lazy_list));
180
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
212181 }
213182 EXPORT_SYMBOL_GPL(irq_work_run);
214183
....@@ -218,17 +187,8 @@
218187
219188 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
220189 irq_work_run_list(raised);
221
-
222
- if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
223
- irq_work_run_list(this_cpu_ptr(&lazy_list));
224
-}
225
-
226
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
227
-void irq_work_tick_soft(void)
228
-{
229190 irq_work_run_list(this_cpu_ptr(&lazy_list));
230191 }
231
-#endif
232192
233193 /*
234194 * Synchronize against the irq_work @entry, ensures the entry is not
....@@ -238,7 +198,7 @@
238198 {
239199 lockdep_assert_irqs_enabled();
240200
241
- while (work->flags & IRQ_WORK_BUSY)
201
+ while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
242202 cpu_relax();
243203 }
244204 EXPORT_SYMBOL_GPL(irq_work_sync);