| .. | .. |
|---|
| 239 | 239 | wg_packet_send_staged_packets(peer); |
|---|
| 240 | 240 | } |
|---|
| 241 | 241 | |
|---|
| 242 | | -static void wg_packet_create_data_done(struct sk_buff *first, |
|---|
| 243 | | - struct wg_peer *peer) |
|---|
| 242 | +static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) |
|---|
| 244 | 243 | { |
|---|
| 245 | 244 | struct sk_buff *skb, *next; |
|---|
| 246 | 245 | bool is_keepalive, data_sent = false; |
|---|
| .. | .. |
|---|
| 262 | 261 | |
|---|
| 263 | 262 | void wg_packet_tx_worker(struct work_struct *work) |
|---|
| 264 | 263 | { |
|---|
| 265 | | - struct crypt_queue *queue = container_of(work, struct crypt_queue, |
|---|
| 266 | | - work); |
|---|
| 264 | + struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); |
|---|
| 267 | 265 | struct noise_keypair *keypair; |
|---|
| 268 | 266 | enum packet_state state; |
|---|
| 269 | 267 | struct sk_buff *first; |
|---|
| 270 | | - struct wg_peer *peer; |
|---|
| 271 | 268 | |
|---|
| 272 | | - while ((first = __ptr_ring_peek(&queue->ring)) != NULL && |
|---|
| 269 | + while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && |
|---|
| 273 | 270 | (state = atomic_read_acquire(&PACKET_CB(first)->state)) != |
|---|
| 274 | 271 | PACKET_STATE_UNCRYPTED) { |
|---|
| 275 | | - __ptr_ring_discard_one(&queue->ring); |
|---|
| 276 | | - peer = PACKET_PEER(first); |
|---|
| 272 | + wg_prev_queue_drop_peeked(&peer->tx_queue); |
|---|
| 277 | 273 | keypair = PACKET_CB(first)->keypair; |
|---|
| 278 | 274 | |
|---|
| 279 | 275 | if (likely(state == PACKET_STATE_CRYPTED)) |
|---|
| 280 | | - wg_packet_create_data_done(first, peer); |
|---|
| 276 | + wg_packet_create_data_done(peer, first); |
|---|
| 281 | 277 | else |
|---|
| 282 | 278 | kfree_skb_list(first); |
|---|
| 283 | 279 | |
|---|
| .. | .. |
|---|
| 306 | 302 | break; |
|---|
| 307 | 303 | } |
|---|
| 308 | 304 | } |
|---|
| 309 | | - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, |
|---|
| 310 | | - state); |
|---|
| 305 | + wg_queue_enqueue_per_peer_tx(first, state); |
|---|
| 311 | 306 | if (need_resched()) |
|---|
| 312 | 307 | cond_resched(); |
|---|
| 313 | 308 | } |
|---|
| 314 | 309 | } |
|---|
| 315 | 310 | |
|---|
| 316 | | -static void wg_packet_create_data(struct sk_buff *first) |
|---|
| 311 | +static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) |
|---|
| 317 | 312 | { |
|---|
| 318 | | - struct wg_peer *peer = PACKET_PEER(first); |
|---|
| 319 | 313 | struct wg_device *wg = peer->device; |
|---|
| 320 | 314 | int ret = -EINVAL; |
|---|
| 321 | 315 | |
|---|
| .. | .. |
|---|
| 323 | 317 | if (unlikely(READ_ONCE(peer->is_dead))) |
|---|
| 324 | 318 | goto err; |
|---|
| 325 | 319 | |
|---|
| 326 | | - ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, |
|---|
| 327 | | - &peer->tx_queue, first, |
|---|
| 328 | | - wg->packet_crypt_wq, |
|---|
| 329 | | - &wg->encrypt_queue.last_cpu); |
|---|
| 320 | + ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, |
|---|
| 321 | + wg->packet_crypt_wq); |
|---|
| 330 | 322 | if (unlikely(ret == -EPIPE)) |
|---|
| 331 | | - wg_queue_enqueue_per_peer(&peer->tx_queue, first, |
|---|
| 332 | | - PACKET_STATE_DEAD); |
|---|
| 323 | + wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); |
|---|
| 333 | 324 | err: |
|---|
| 334 | 325 | rcu_read_unlock_bh(); |
|---|
| 335 | 326 | if (likely(!ret || ret == -EPIPE)) |
|---|
| .. | .. |
|---|
| 393 | 384 | packets.prev->next = NULL; |
|---|
| 394 | 385 | wg_peer_get(keypair->entry.peer); |
|---|
| 395 | 386 | PACKET_CB(packets.next)->keypair = keypair; |
|---|
| 396 | | - wg_packet_create_data(packets.next); |
|---|
| 387 | + wg_packet_create_data(peer, packets.next); |
|---|
| 397 | 388 | return; |
|---|
| 398 | 389 | |
|---|
| 399 | 390 | out_invalid: |
|---|