.. | .. |
---|
4 | 4 | */ |
---|
5 | 5 | |
---|
6 | 6 | #include "queueing.h" |
---|
| 7 | +#include <linux/skb_array.h> |
---|
7 | 8 | |
---|
8 | 9 | struct multicore_worker __percpu * |
---|
9 | 10 | wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) |
---|
10 | 11 | { |
---|
11 | 12 | int cpu; |
---|
12 | | - struct multicore_worker __percpu *worker = |
---|
13 | | - alloc_percpu(struct multicore_worker); |
---|
| 13 | + struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); |
---|
14 | 14 | |
---|
15 | 15 | if (!worker) |
---|
16 | 16 | return NULL; |
---|
.. | .. |
---|
23 | 23 | } |
---|
24 | 24 | |
---|
25 | 25 | int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, |
---|
26 | | - bool multicore, unsigned int len) |
---|
| 26 | + unsigned int len) |
---|
27 | 27 | { |
---|
28 | 28 | int ret; |
---|
29 | 29 | |
---|
.. | .. |
---|
31 | 31 | ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); |
---|
32 | 32 | if (ret) |
---|
33 | 33 | return ret; |
---|
34 | | - if (function) { |
---|
35 | | - if (multicore) { |
---|
36 | | - queue->worker = wg_packet_percpu_multicore_worker_alloc( |
---|
37 | | - function, queue); |
---|
38 | | - if (!queue->worker) { |
---|
39 | | - ptr_ring_cleanup(&queue->ring, NULL); |
---|
40 | | - return -ENOMEM; |
---|
41 | | - } |
---|
42 | | - } else { |
---|
43 | | - INIT_WORK(&queue->work, function); |
---|
44 | | - } |
---|
| 34 | + queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); |
---|
| 35 | + if (!queue->worker) { |
---|
| 36 | + ptr_ring_cleanup(&queue->ring, NULL); |
---|
| 37 | + return -ENOMEM; |
---|
45 | 38 | } |
---|
46 | 39 | return 0; |
---|
47 | 40 | } |
---|
48 | 41 | |
---|
49 | | -void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) |
---|
| 42 | +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) |
---|
50 | 43 | { |
---|
51 | | - if (multicore) |
---|
52 | | - free_percpu(queue->worker); |
---|
53 | | - WARN_ON(!__ptr_ring_empty(&queue->ring)); |
---|
54 | | - ptr_ring_cleanup(&queue->ring, NULL); |
---|
| 44 | + free_percpu(queue->worker); |
---|
| 45 | + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); |
---|
| 46 | + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); |
---|
55 | 47 | } |
---|
| 48 | + |
---|
| 49 | +#define NEXT(skb) ((skb)->prev) |
---|
| 50 | +#define STUB(queue) ((struct sk_buff *)&queue->empty) |
---|
| 51 | + |
---|
| 52 | +void wg_prev_queue_init(struct prev_queue *queue) |
---|
| 53 | +{ |
---|
| 54 | + NEXT(STUB(queue)) = NULL; |
---|
| 55 | + queue->head = queue->tail = STUB(queue); |
---|
| 56 | + queue->peeked = NULL; |
---|
| 57 | + atomic_set(&queue->count, 0); |
---|
| 58 | + BUILD_BUG_ON( |
---|
| 59 | + offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - |
---|
| 60 | + offsetof(struct prev_queue, empty) || |
---|
| 61 | + offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - |
---|
| 62 | + offsetof(struct prev_queue, empty)); |
---|
| 63 | +} |
---|
| 64 | + |
---|
| 65 | +static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
---|
| 66 | +{ |
---|
| 67 | + WRITE_ONCE(NEXT(skb), NULL); |
---|
| 68 | + WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); |
---|
| 69 | +} |
---|
| 70 | + |
---|
| 71 | +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
---|
| 72 | +{ |
---|
| 73 | + if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) |
---|
| 74 | + return false; |
---|
| 75 | + __wg_prev_queue_enqueue(queue, skb); |
---|
| 76 | + return true; |
---|
| 77 | +} |
---|
| 78 | + |
---|
| 79 | +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) |
---|
| 80 | +{ |
---|
| 81 | + struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); |
---|
| 82 | + |
---|
| 83 | + if (tail == STUB(queue)) { |
---|
| 84 | + if (!next) |
---|
| 85 | + return NULL; |
---|
| 86 | + queue->tail = next; |
---|
| 87 | + tail = next; |
---|
| 88 | + next = smp_load_acquire(&NEXT(next)); |
---|
| 89 | + } |
---|
| 90 | + if (next) { |
---|
| 91 | + queue->tail = next; |
---|
| 92 | + atomic_dec(&queue->count); |
---|
| 93 | + return tail; |
---|
| 94 | + } |
---|
| 95 | + if (tail != READ_ONCE(queue->head)) |
---|
| 96 | + return NULL; |
---|
| 97 | + __wg_prev_queue_enqueue(queue, STUB(queue)); |
---|
| 98 | + next = smp_load_acquire(&NEXT(tail)); |
---|
| 99 | + if (next) { |
---|
| 100 | + queue->tail = next; |
---|
| 101 | + atomic_dec(&queue->count); |
---|
| 102 | + return tail; |
---|
| 103 | + } |
---|
| 104 | + return NULL; |
---|
| 105 | +} |
---|
| 106 | + |
---|
| 107 | +#undef NEXT |
---|
| 108 | +#undef STUB |
---|