.. | .. |
---|
17 | 17 | struct wg_peer; |
---|
18 | 18 | struct multicore_worker; |
---|
19 | 19 | struct crypt_queue; |
---|
| 20 | +struct prev_queue; |
---|
20 | 21 | struct sk_buff; |
---|
21 | 22 | |
---|
22 | 23 | /* queueing.c APIs: */ |
---|
23 | 24 | int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, |
---|
24 | | - bool multicore, unsigned int len); |
---|
25 | | -void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); |
---|
| 25 | + unsigned int len); |
---|
| 26 | +void wg_packet_queue_free(struct crypt_queue *queue, bool purge); |
---|
26 | 27 | struct multicore_worker __percpu * |
---|
27 | 28 | wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); |
---|
28 | 29 | |
---|
.. | .. |
---|
93 | 94 | skb->dev = NULL; |
---|
94 | 95 | #ifdef CONFIG_NET_SCHED |
---|
95 | 96 | skb->tc_index = 0; |
---|
96 | | - skb_reset_tc(skb); |
---|
97 | 97 | #endif |
---|
| 98 | + skb_reset_redirect(skb); |
---|
98 | 99 | skb->hdr_len = skb_headroom(skb); |
---|
99 | 100 | skb_reset_mac_header(skb); |
---|
100 | 101 | skb_reset_network_header(skb); |
---|
101 | 102 | skb_reset_transport_header(skb); |
---|
102 | | - skb_probe_transport_header(skb, 0); |
---|
| 103 | + skb_probe_transport_header(skb); |
---|
103 | 104 | skb_reset_inner_headers(skb); |
---|
104 | 105 | } |
---|
105 | 106 | |
---|
.. | .. |
---|
135 | 136 | return cpu; |
---|
136 | 137 | } |
---|
137 | 138 | |
---|
| 139 | +void wg_prev_queue_init(struct prev_queue *queue); |
---|
| 140 | + |
---|
| 141 | +/* Multi producer */ |
---|
| 142 | +bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); |
---|
| 143 | + |
---|
| 144 | +/* Single consumer */ |
---|
| 145 | +struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); |
---|
| 146 | + |
---|
| 147 | +/* Single consumer */ |
---|
| 148 | +static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) |
---|
| 149 | +{ |
---|
| 150 | + if (queue->peeked) |
---|
| 151 | + return queue->peeked; |
---|
| 152 | + queue->peeked = wg_prev_queue_dequeue(queue); |
---|
| 153 | + return queue->peeked; |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +/* Single consumer */ |
---|
| 157 | +static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) |
---|
| 158 | +{ |
---|
| 159 | + queue->peeked = NULL; |
---|
| 160 | +} |
---|
| 161 | + |
---|
138 | 162 | static inline int wg_queue_enqueue_per_device_and_peer( |
---|
139 | | - struct crypt_queue *device_queue, struct crypt_queue *peer_queue, |
---|
| 163 | + struct crypt_queue *device_queue, struct prev_queue *peer_queue, |
---|
140 | 164 | struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) |
---|
141 | 165 | { |
---|
142 | 166 | int cpu; |
---|
.. | .. |
---|
145 | 169 | /* We first queue this up for the peer ingestion, but the consumer |
---|
146 | 170 | * will wait for the state to change to CRYPTED or DEAD before. |
---|
147 | 171 | */ |
---|
148 | | - if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) |
---|
| 172 | + if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) |
---|
149 | 173 | return -ENOSPC; |
---|
| 174 | + |
---|
150 | 175 | /* Then we queue it up in the device queue, which consumes the |
---|
151 | 176 | * packet as soon as it can. |
---|
152 | 177 | */ |
---|
.. | .. |
---|
157 | 182 | return 0; |
---|
158 | 183 | } |
---|
159 | 184 | |
---|
160 | | -static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, |
---|
161 | | - struct sk_buff *skb, |
---|
162 | | - enum packet_state state) |
---|
| 185 | +static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) |
---|
163 | 186 | { |
---|
164 | 187 | /* We take a reference, because as soon as we call atomic_set, the |
---|
165 | 188 | * peer can be freed from below us. |
---|
.. | .. |
---|
167 | 190 | struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); |
---|
168 | 191 | |
---|
169 | 192 | atomic_set_release(&PACKET_CB(skb)->state, state); |
---|
170 | | - queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, |
---|
171 | | - peer->internal_id), |
---|
172 | | - peer->device->packet_crypt_wq, &queue->work); |
---|
| 193 | + queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), |
---|
| 194 | + peer->device->packet_crypt_wq, &peer->transmit_packet_work); |
---|
173 | 195 | wg_peer_put(peer); |
---|
174 | 196 | } |
---|
175 | 197 | |
---|
176 | | -static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, |
---|
177 | | - enum packet_state state) |
---|
| 198 | +static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) |
---|
178 | 199 | { |
---|
179 | 200 | /* We take a reference, because as soon as we call atomic_set, the |
---|
180 | 201 | * peer can be freed from below us. |
---|