hc
2023-11-07 f45e756958099c35d6afb746df1d40a1c6302cfc
kernel/kernel/irq_work.c
....@@ -17,6 +17,7 @@
1717 #include <linux/cpu.h>
1818 #include <linux/notifier.h>
1919 #include <linux/smp.h>
20
+#include <linux/interrupt.h>
2021 #include <asm/processor.h>
2122
2223
....@@ -57,29 +58,40 @@
5758 }
5859
5960 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
60
-static void __irq_work_queue_local(struct irq_work *work)
61
+static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list)
6162 {
62
- /* If the work is "lazy", handle it from next tick if any */
63
- if (work->flags & IRQ_WORK_LAZY) {
64
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
65
- tick_nohz_tick_stopped())
66
- arch_irq_work_raise();
67
- } else {
68
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
69
- arch_irq_work_raise();
70
- }
63
+ bool empty;
64
+
65
+ empty = llist_add(&work->llnode, list);
66
+
67
+ if (empty &&
68
+ (!(work->flags & IRQ_WORK_LAZY) ||
69
+ tick_nohz_tick_stopped()))
70
+ arch_irq_work_raise();
71
+}
72
+
73
+static inline bool use_lazy_list(struct irq_work *work)
74
+{
75
+ return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
76
+ || (work->flags & IRQ_WORK_LAZY);
7177 }
7278
7379 /* Enqueue the irq work @work on the current CPU */
7480 bool irq_work_queue(struct irq_work *work)
7581 {
82
+ struct llist_head *list;
83
+
7684 /* Only queue if not already pending */
7785 if (!irq_work_claim(work))
7886 return false;
7987
8088 /* Queue the entry and raise the IPI if needed. */
8189 preempt_disable();
82
- __irq_work_queue_local(work);
90
+ if (use_lazy_list(work))
91
+ list = this_cpu_ptr(&lazy_list);
92
+ else
93
+ list = this_cpu_ptr(&raised_list);
94
+ __irq_work_queue_local(work, list);
8395 preempt_enable();
8496
8597 return true;
....@@ -98,6 +110,8 @@
98110 return irq_work_queue(work);
99111
100112 #else /* CONFIG_SMP: */
113
+ struct llist_head *list;
114
+
101115 /* All work should have been flushed before going offline */
102116 WARN_ON_ONCE(cpu_is_offline(cpu));
103117
....@@ -106,13 +120,18 @@
106120 return false;
107121
108122 preempt_disable();
123
+ if (use_lazy_list(work))
124
+ list = &per_cpu(lazy_list, cpu);
125
+ else
126
+ list = &per_cpu(raised_list, cpu);
127
+
109128 if (cpu != smp_processor_id()) {
110129 /* Arch remote IPI send/receive backend aren't NMI safe */
111130 WARN_ON_ONCE(in_nmi());
112
- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
131
+ if (llist_add(&work->llnode, list))
113132 arch_send_call_function_single_ipi(cpu);
114133 } else {
115
- __irq_work_queue_local(work);
134
+ __irq_work_queue_local(work, list);
116135 }
117136 preempt_enable();
118137
....@@ -128,9 +147,8 @@
128147 raised = this_cpu_ptr(&raised_list);
129148 lazy = this_cpu_ptr(&lazy_list);
130149
131
- if (llist_empty(raised) || arch_irq_work_has_interrupt())
132
- if (llist_empty(lazy))
133
- return false;
150
+ if (llist_empty(raised) && llist_empty(lazy))
151
+ return false;
134152
135153 /* All work should have been flushed before going offline */
136154 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
....@@ -144,8 +162,12 @@
144162 struct llist_node *llnode;
145163 unsigned long flags;
146164
165
+#ifndef CONFIG_PREEMPT_RT_FULL
166
+ /*
167
+ * nort: On RT IRQ-work may run in SOFTIRQ context.
168
+ */
147169 BUG_ON(!irqs_disabled());
148
-
170
+#endif
149171 if (llist_empty(list))
150172 return;
151173
....@@ -177,7 +199,16 @@
177199 void irq_work_run(void)
178200 {
179201 irq_work_run_list(this_cpu_ptr(&raised_list));
180
- irq_work_run_list(this_cpu_ptr(&lazy_list));
202
+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
203
+ /*
204
+ * NOTE: we raise softirq via IPI for safety,
205
+ * and execute in irq_work_tick() to move the
206
+ * overhead from hard to soft irq context.
207
+ */
208
+ if (!llist_empty(this_cpu_ptr(&lazy_list)))
209
+ raise_softirq(TIMER_SOFTIRQ);
210
+ } else
211
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
181212 }
182213 EXPORT_SYMBOL_GPL(irq_work_run);
183214
....@@ -187,8 +218,17 @@
187218
188219 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
189220 irq_work_run_list(raised);
221
+
222
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
223
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
224
+}
225
+
226
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
227
+void irq_work_tick_soft(void)
228
+{
190229 irq_work_run_list(this_cpu_ptr(&lazy_list));
191230 }
231
+#endif
192232
193233 /*
194234 * Synchronize against the irq_work @entry, ensures the entry is not