hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/include/linux/irq_work.h
....@@ -2,7 +2,8 @@
22 #ifndef _LINUX_IRQ_WORK_H
33 #define _LINUX_IRQ_WORK_H
44
5
-#include <linux/llist.h>
5
+#include <linux/smp_types.h>
6
+#include <linux/rcuwait.h>
67
78 /*
89 * An entry can be in one of four states:
....@@ -13,30 +14,51 @@
1314 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
1415 */
1516
16
-#define IRQ_WORK_PENDING BIT(0)
17
-#define IRQ_WORK_BUSY BIT(1)
18
-
19
-/* Doesn't want IPI, wait for tick: */
20
-#define IRQ_WORK_LAZY BIT(2)
21
-/* Run hard IRQ context, even on RT */
22
-#define IRQ_WORK_HARD_IRQ BIT(3)
23
-
24
-#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
25
-
2617 struct irq_work {
27
- unsigned long flags;
28
- struct llist_node llnode;
18
+ union {
19
+ struct __call_single_node node;
20
+ struct {
21
+ struct llist_node llnode;
22
+ atomic_t flags;
23
+ };
24
+ };
2925 void (*func)(struct irq_work *);
26
+ struct rcuwait irqwait;
3027 };
3128
3229 static inline
3330 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
3431 {
35
- work->flags = 0;
32
+ atomic_set(&work->flags, 0);
3633 work->func = func;
34
+ rcuwait_init(&work->irqwait);
3735 }
3836
39
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
37
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
38
+ .flags = ATOMIC_INIT(0), \
39
+ .func = (_f), \
40
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
41
+}
42
+
43
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
44
+ .flags = ATOMIC_INIT(_flags), \
45
+ .func = (_func), \
46
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
47
+}
48
+
49
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
50
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
51
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
52
+
53
+static inline bool irq_work_is_busy(struct irq_work *work)
54
+{
55
+ return atomic_read(&work->flags) & IRQ_WORK_BUSY;
56
+}
57
+
58
+static inline bool irq_work_is_hard(struct irq_work *work)
59
+{
60
+ return atomic_read(&work->flags) & IRQ_WORK_HARD_IRQ;
61
+}
4062
4163 bool irq_work_queue(struct irq_work *work);
4264 bool irq_work_queue_on(struct irq_work *work, int cpu);
....@@ -49,15 +71,11 @@
4971
5072 void irq_work_run(void);
5173 bool irq_work_needs_cpu(void);
74
+void irq_work_single(void *arg);
5275 #else
5376 static inline bool irq_work_needs_cpu(void) { return false; }
5477 static inline void irq_work_run(void) { }
55
-#endif
56
-
57
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
58
-void irq_work_tick_soft(void);
59
-#else
60
-static inline void irq_work_tick_soft(void) { }
78
+static inline void irq_work_single(void *arg) { }
6179 #endif
6280
6381 #endif /* _LINUX_IRQ_WORK_H */