hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/net/wireguard/queueing.c
....@@ -4,13 +4,13 @@
44 */
55
66 #include "queueing.h"
7
+#include <linux/skb_array.h>
78
89 struct multicore_worker __percpu *
910 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
1011 {
1112 int cpu;
12
- struct multicore_worker __percpu *worker =
13
- alloc_percpu(struct multicore_worker);
13
+ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
1414
1515 if (!worker)
1616 return NULL;
....@@ -23,7 +23,7 @@
2323 }
2424
2525 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
26
- bool multicore, unsigned int len)
26
+ unsigned int len)
2727 {
2828 int ret;
2929
....@@ -31,25 +31,78 @@
3131 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
3232 if (ret)
3333 return ret;
34
- if (function) {
35
- if (multicore) {
36
- queue->worker = wg_packet_percpu_multicore_worker_alloc(
37
- function, queue);
38
- if (!queue->worker) {
39
- ptr_ring_cleanup(&queue->ring, NULL);
40
- return -ENOMEM;
41
- }
42
- } else {
43
- INIT_WORK(&queue->work, function);
44
- }
34
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
35
+ if (!queue->worker) {
36
+ ptr_ring_cleanup(&queue->ring, NULL);
37
+ return -ENOMEM;
4538 }
4639 return 0;
4740 }
4841
49
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
42
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
5043 {
51
- if (multicore)
52
- free_percpu(queue->worker);
53
- WARN_ON(!__ptr_ring_empty(&queue->ring));
54
- ptr_ring_cleanup(&queue->ring, NULL);
44
+ free_percpu(queue->worker);
45
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
46
+ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
5547 }
48
+
49
+#define NEXT(skb) ((skb)->prev)
50
+#define STUB(queue) ((struct sk_buff *)&queue->empty)
51
+
52
+void wg_prev_queue_init(struct prev_queue *queue)
53
+{
54
+ NEXT(STUB(queue)) = NULL;
55
+ queue->head = queue->tail = STUB(queue);
56
+ queue->peeked = NULL;
57
+ atomic_set(&queue->count, 0);
58
+ BUILD_BUG_ON(
59
+ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
60
+ offsetof(struct prev_queue, empty) ||
61
+ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
62
+ offsetof(struct prev_queue, empty));
63
+}
64
+
65
+static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
66
+{
67
+ WRITE_ONCE(NEXT(skb), NULL);
68
+ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
69
+}
70
+
71
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
72
+{
73
+ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
74
+ return false;
75
+ __wg_prev_queue_enqueue(queue, skb);
76
+ return true;
77
+}
78
+
79
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
80
+{
81
+ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
82
+
83
+ if (tail == STUB(queue)) {
84
+ if (!next)
85
+ return NULL;
86
+ queue->tail = next;
87
+ tail = next;
88
+ next = smp_load_acquire(&NEXT(next));
89
+ }
90
+ if (next) {
91
+ queue->tail = next;
92
+ atomic_dec(&queue->count);
93
+ return tail;
94
+ }
95
+ if (tail != READ_ONCE(queue->head))
96
+ return NULL;
97
+ __wg_prev_queue_enqueue(queue, STUB(queue));
98
+ next = smp_load_acquire(&NEXT(tail));
99
+ if (next) {
100
+ queue->tail = next;
101
+ atomic_dec(&queue->count);
102
+ return tail;
103
+ }
104
+ return NULL;
105
+}
106
+
107
+#undef NEXT
108
+#undef STUB