hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/include/linux/irq_work.h
....@@ -2,7 +2,8 @@
22 #ifndef _LINUX_IRQ_WORK_H
33 #define _LINUX_IRQ_WORK_H
44
5
-#include <linux/llist.h>
5
+#include <linux/smp_types.h>
6
+#include <linux/rcuwait.h>
67
78 /*
89 * An entry can be in one of four states:
....@@ -13,28 +14,51 @@
1314 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
1415 */
1516
16
-#define IRQ_WORK_PENDING BIT(0)
17
-#define IRQ_WORK_BUSY BIT(1)
18
-
19
-/* Doesn't want IPI, wait for tick: */
20
-#define IRQ_WORK_LAZY BIT(2)
21
-
22
-#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
23
-
2417 struct irq_work {
25
- unsigned long flags;
26
- struct llist_node llnode;
18
+ union {
19
+ struct __call_single_node node;
20
+ struct {
21
+ struct llist_node llnode;
22
+ atomic_t flags;
23
+ };
24
+ };
2725 void (*func)(struct irq_work *);
26
+ struct rcuwait irqwait;
2827 };
2928
3029 static inline
3130 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
3231 {
33
- work->flags = 0;
32
+ atomic_set(&work->flags, 0);
3433 work->func = func;
34
+ rcuwait_init(&work->irqwait);
3535 }
3636
37
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
37
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
38
+ .flags = ATOMIC_INIT(0), \
39
+ .func = (_f), \
40
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
41
+}
42
+
43
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
44
+ .flags = ATOMIC_INIT(_flags), \
45
+ .func = (_func), \
46
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
47
+}
48
+
49
+#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
50
+#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY)
51
+#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ)
52
+
53
+static inline bool irq_work_is_busy(struct irq_work *work)
54
+{
55
+ return atomic_read(&work->flags) & IRQ_WORK_BUSY;
56
+}
57
+
58
+static inline bool irq_work_is_hard(struct irq_work *work)
59
+{
60
+ return atomic_read(&work->flags) & IRQ_WORK_HARD_IRQ;
61
+}
3862
3963 bool irq_work_queue(struct irq_work *work);
4064 bool irq_work_queue_on(struct irq_work *work, int cpu);
....@@ -47,9 +71,11 @@
4771
4872 void irq_work_run(void);
4973 bool irq_work_needs_cpu(void);
74
+void irq_work_single(void *arg);
5075 #else
5176 static inline bool irq_work_needs_cpu(void) { return false; }
5277 static inline void irq_work_run(void) { }
78
+static inline void irq_work_single(void *arg) { }
5379 #endif
5480
5581 #endif /* _LINUX_IRQ_WORK_H */