| .. | .. |
|---|
| 4 | 4 | */ |
|---|
| 5 | 5 | |
|---|
| 6 | 6 | #include "queueing.h" |
|---|
| 7 | +#include <linux/skb_array.h> |
|---|
| 7 | 8 | |
|---|
| 8 | 9 | struct multicore_worker __percpu * |
|---|
| 9 | 10 | wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) |
|---|
| 10 | 11 | { |
|---|
| 11 | 12 | int cpu; |
|---|
| 12 | | - struct multicore_worker __percpu *worker = |
|---|
| 13 | | - alloc_percpu(struct multicore_worker); |
|---|
| 13 | + struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); |
|---|
| 14 | 14 | |
|---|
| 15 | 15 | if (!worker) |
|---|
| 16 | 16 | return NULL; |
|---|
| .. | .. |
|---|
| 23 | 23 | } |
|---|
| 24 | 24 | |
|---|
| 25 | 25 | int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, |
|---|
| 26 | | - bool multicore, unsigned int len) |
|---|
| 26 | + unsigned int len) |
|---|
| 27 | 27 | { |
|---|
| 28 | 28 | int ret; |
|---|
| 29 | 29 | |
|---|
| 30 | 30 | memset(queue, 0, sizeof(*queue)); |
|---|
| 31 | + queue->last_cpu = -1; |
|---|
| 31 | 32 | ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); |
|---|
| 32 | 33 | if (ret) |
|---|
| 33 | 34 | return ret; |
|---|
| 34 | | - if (function) { |
|---|
| 35 | | - if (multicore) { |
|---|
| 36 | | - queue->worker = wg_packet_percpu_multicore_worker_alloc( |
|---|
| 37 | | - function, queue); |
|---|
| 38 | | - if (!queue->worker) { |
|---|
| 39 | | - ptr_ring_cleanup(&queue->ring, NULL); |
|---|
| 40 | | - return -ENOMEM; |
|---|
| 41 | | - } |
|---|
| 42 | | - } else { |
|---|
| 43 | | - INIT_WORK(&queue->work, function); |
|---|
| 44 | | - } |
|---|
| 35 | + queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); |
|---|
| 36 | + if (!queue->worker) { |
|---|
| 37 | + ptr_ring_cleanup(&queue->ring, NULL); |
|---|
| 38 | + return -ENOMEM; |
|---|
| 45 | 39 | } |
|---|
| 46 | 40 | return 0; |
|---|
| 47 | 41 | } |
|---|
| 48 | 42 | |
|---|
| 49 | | -void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) |
|---|
| 43 | +void wg_packet_queue_free(struct crypt_queue *queue, bool purge) |
|---|
| 50 | 44 | { |
|---|
| 51 | | - if (multicore) |
|---|
| 52 | | - free_percpu(queue->worker); |
|---|
| 53 | | - WARN_ON(!__ptr_ring_empty(&queue->ring)); |
|---|
| 54 | | - ptr_ring_cleanup(&queue->ring, NULL); |
|---|
| 45 | + free_percpu(queue->worker); |
|---|
| 46 | + WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); |
|---|
| 47 | + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); |
|---|
| 55 | 48 | } |
|---|
| 49 | + |
|---|
| 50 | +#define NEXT(skb) ((skb)->prev) |
|---|
| 51 | +#define STUB(queue) ((struct sk_buff *)&queue->empty) |
|---|
| 52 | + |
|---|
| 53 | +void wg_prev_queue_init(struct prev_queue *queue) |
|---|
| 54 | +{ |
|---|
| 55 | + NEXT(STUB(queue)) = NULL; |
|---|
| 56 | + queue->head = queue->tail = STUB(queue); |
|---|
| 57 | + queue->peeked = NULL; |
|---|
| 58 | + atomic_set(&queue->count, 0); |
|---|
| 59 | + BUILD_BUG_ON( |
|---|
| 60 | + offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - |
|---|
| 61 | + offsetof(struct prev_queue, empty) || |
|---|
| 62 | + offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - |
|---|
| 63 | + offsetof(struct prev_queue, empty)); |
|---|
| 64 | +} |
|---|
| 65 | + |
|---|
| 66 | +static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
|---|
| 67 | +{ |
|---|
| 68 | + WRITE_ONCE(NEXT(skb), NULL); |
|---|
| 69 | + WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); |
|---|
| 70 | +} |
|---|
| 71 | + |
|---|
| 72 | +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) |
|---|
| 73 | +{ |
|---|
| 74 | + if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) |
|---|
| 75 | + return false; |
|---|
| 76 | + __wg_prev_queue_enqueue(queue, skb); |
|---|
| 77 | + return true; |
|---|
| 78 | +} |
|---|
| 79 | + |
|---|
| 80 | +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) |
|---|
| 81 | +{ |
|---|
| 82 | + struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); |
|---|
| 83 | + |
|---|
| 84 | + if (tail == STUB(queue)) { |
|---|
| 85 | + if (!next) |
|---|
| 86 | + return NULL; |
|---|
| 87 | + queue->tail = next; |
|---|
| 88 | + tail = next; |
|---|
| 89 | + next = smp_load_acquire(&NEXT(next)); |
|---|
| 90 | + } |
|---|
| 91 | + if (next) { |
|---|
| 92 | + queue->tail = next; |
|---|
| 93 | + atomic_dec(&queue->count); |
|---|
| 94 | + return tail; |
|---|
| 95 | + } |
|---|
| 96 | + if (tail != READ_ONCE(queue->head)) |
|---|
| 97 | + return NULL; |
|---|
| 98 | + __wg_prev_queue_enqueue(queue, STUB(queue)); |
|---|
| 99 | + next = smp_load_acquire(&NEXT(tail)); |
|---|
| 100 | + if (next) { |
|---|
| 101 | + queue->tail = next; |
|---|
| 102 | + atomic_dec(&queue->count); |
|---|
| 103 | + return tail; |
|---|
| 104 | + } |
|---|
| 105 | + return NULL; |
|---|
| 106 | +} |
|---|
| 107 | + |
|---|
| 108 | +#undef NEXT |
|---|
| 109 | +#undef STUB |
|---|