hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/wireguard/queueing.c
....@@ -4,13 +4,13 @@
44 */
55
66 #include "queueing.h"
7
+#include <linux/skb_array.h>
78
89 struct multicore_worker __percpu *
910 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
1011 {
1112 int cpu;
12
- struct multicore_worker __percpu *worker =
13
- alloc_percpu(struct multicore_worker);
13
+ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
1414
1515 if (!worker)
1616 return NULL;
....@@ -23,33 +23,87 @@
2323 }
2424
2525 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
26
- bool multicore, unsigned int len)
26
+ unsigned int len)
2727 {
2828 int ret;
2929
3030 memset(queue, 0, sizeof(*queue));
31
+ queue->last_cpu = -1;
3132 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
3233 if (ret)
3334 return ret;
34
- if (function) {
35
- if (multicore) {
36
- queue->worker = wg_packet_percpu_multicore_worker_alloc(
37
- function, queue);
38
- if (!queue->worker) {
39
- ptr_ring_cleanup(&queue->ring, NULL);
40
- return -ENOMEM;
41
- }
42
- } else {
43
- INIT_WORK(&queue->work, function);
44
- }
35
+ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
36
+ if (!queue->worker) {
37
+ ptr_ring_cleanup(&queue->ring, NULL);
38
+ return -ENOMEM;
4539 }
4640 return 0;
4741 }
4842
49
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore)
43
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
5044 {
51
- if (multicore)
52
- free_percpu(queue->worker);
53
- WARN_ON(!__ptr_ring_empty(&queue->ring));
54
- ptr_ring_cleanup(&queue->ring, NULL);
45
+ free_percpu(queue->worker);
46
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
47
+ ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
5548 }
49
+
50
+#define NEXT(skb) ((skb)->prev)
51
+#define STUB(queue) ((struct sk_buff *)&queue->empty)
52
+
53
+void wg_prev_queue_init(struct prev_queue *queue)
54
+{
55
+ NEXT(STUB(queue)) = NULL;
56
+ queue->head = queue->tail = STUB(queue);
57
+ queue->peeked = NULL;
58
+ atomic_set(&queue->count, 0);
59
+ BUILD_BUG_ON(
60
+ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
61
+ offsetof(struct prev_queue, empty) ||
62
+ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
63
+ offsetof(struct prev_queue, empty));
64
+}
65
+
66
+static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
67
+{
68
+ WRITE_ONCE(NEXT(skb), NULL);
69
+ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
70
+}
71
+
72
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
73
+{
74
+ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
75
+ return false;
76
+ __wg_prev_queue_enqueue(queue, skb);
77
+ return true;
78
+}
79
+
80
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
81
+{
82
+ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
83
+
84
+ if (tail == STUB(queue)) {
85
+ if (!next)
86
+ return NULL;
87
+ queue->tail = next;
88
+ tail = next;
89
+ next = smp_load_acquire(&NEXT(next));
90
+ }
91
+ if (next) {
92
+ queue->tail = next;
93
+ atomic_dec(&queue->count);
94
+ return tail;
95
+ }
96
+ if (tail != READ_ONCE(queue->head))
97
+ return NULL;
98
+ __wg_prev_queue_enqueue(queue, STUB(queue));
99
+ next = smp_load_acquire(&NEXT(tail));
100
+ if (next) {
101
+ queue->tail = next;
102
+ atomic_dec(&queue->count);
103
+ return tail;
104
+ }
105
+ return NULL;
106
+}
107
+
108
+#undef NEXT
109
+#undef STUB