.. | .. |
---|
116 | 116 | return; |
---|
117 | 117 | } |
---|
118 | 118 | |
---|
119 | | - under_load = skb_queue_len(&wg->incoming_handshakes) >= |
---|
120 | | - MAX_QUEUED_INCOMING_HANDSHAKES / 8; |
---|
| 119 | + under_load = atomic_read(&wg->handshake_queue_len) >= |
---|
| 120 | + MAX_QUEUED_INCOMING_HANDSHAKES / 8; |
---|
121 | 121 | if (under_load) { |
---|
122 | 122 | last_under_load = ktime_get_coarse_boottime_ns(); |
---|
123 | 123 | } else if (last_under_load) { |
---|
.. | .. |
---|
212 | 212 | |
---|
213 | 213 | void wg_packet_handshake_receive_worker(struct work_struct *work) |
---|
214 | 214 | { |
---|
215 | | - struct wg_device *wg = container_of(work, struct multicore_worker, |
---|
216 | | - work)->ptr; |
---|
| 215 | + struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; |
---|
| 216 | + struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); |
---|
217 | 217 | struct sk_buff *skb; |
---|
218 | 218 | |
---|
219 | | - while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { |
---|
| 219 | + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { |
---|
220 | 220 | wg_receive_handshake_packet(wg, skb); |
---|
221 | 221 | dev_kfree_skb(skb); |
---|
| 222 | + atomic_dec(&wg->handshake_queue_len); |
---|
222 | 223 | cond_resched(); |
---|
223 | 224 | } |
---|
224 | 225 | } |
---|
.. | .. |
---|
444 | 445 | int wg_packet_rx_poll(struct napi_struct *napi, int budget) |
---|
445 | 446 | { |
---|
446 | 447 | struct wg_peer *peer = container_of(napi, struct wg_peer, napi); |
---|
447 | | - struct crypt_queue *queue = &peer->rx_queue; |
---|
448 | 448 | struct noise_keypair *keypair; |
---|
449 | 449 | struct endpoint endpoint; |
---|
450 | 450 | enum packet_state state; |
---|
.. | .. |
---|
455 | 455 | if (unlikely(budget <= 0)) |
---|
456 | 456 | return 0; |
---|
457 | 457 | |
---|
458 | | - while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && |
---|
| 458 | + while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && |
---|
459 | 459 | (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != |
---|
460 | 460 | PACKET_STATE_UNCRYPTED) { |
---|
461 | | - __ptr_ring_discard_one(&queue->ring); |
---|
462 | | - peer = PACKET_PEER(skb); |
---|
| 461 | + wg_prev_queue_drop_peeked(&peer->rx_queue); |
---|
463 | 462 | keypair = PACKET_CB(skb)->keypair; |
---|
464 | 463 | free = true; |
---|
465 | 464 | |
---|
.. | .. |
---|
508 | 507 | enum packet_state state = |
---|
509 | 508 | likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? |
---|
510 | 509 | PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; |
---|
511 | | - wg_queue_enqueue_per_peer_napi(skb, state); |
---|
| 510 | + wg_queue_enqueue_per_peer_rx(skb, state); |
---|
512 | 511 | if (need_resched()) |
---|
513 | 512 | cond_resched(); |
---|
514 | 513 | } |
---|
.. | .. |
---|
531 | 530 | if (unlikely(READ_ONCE(peer->is_dead))) |
---|
532 | 531 | goto err; |
---|
533 | 532 | |
---|
534 | | - ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, |
---|
535 | | - &peer->rx_queue, skb, |
---|
536 | | - wg->packet_crypt_wq, |
---|
537 | | - &wg->decrypt_queue.last_cpu); |
---|
| 533 | + ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, |
---|
| 534 | + wg->packet_crypt_wq); |
---|
538 | 535 | if (unlikely(ret == -EPIPE)) |
---|
539 | | - wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); |
---|
| 536 | + wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); |
---|
540 | 537 | if (likely(!ret || ret == -EPIPE)) { |
---|
541 | 538 | rcu_read_unlock_bh(); |
---|
542 | 539 | return; |
---|
.. | .. |
---|
557 | 554 | case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): |
---|
558 | 555 | case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): |
---|
559 | 556 | case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { |
---|
560 | | - int cpu; |
---|
| 557 | + int cpu, ret = -EBUSY; |
---|
561 | 558 | |
---|
562 | | - if (skb_queue_len(&wg->incoming_handshakes) > |
---|
563 | | - MAX_QUEUED_INCOMING_HANDSHAKES || |
---|
564 | | - unlikely(!rng_is_initialized())) { |
---|
| 559 | + if (unlikely(!rng_is_initialized())) |
---|
| 560 | + goto drop; |
---|
| 561 | + if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { |
---|
| 562 | + if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { |
---|
| 563 | + ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); |
---|
| 564 | + spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); |
---|
| 565 | + } |
---|
| 566 | + } else |
---|
| 567 | + ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); |
---|
| 568 | + if (ret) { |
---|
| 569 | + drop: |
---|
565 | 570 | net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", |
---|
566 | 571 | wg->dev->name, skb); |
---|
567 | 572 | goto err; |
---|
568 | 573 | } |
---|
569 | | - skb_queue_tail(&wg->incoming_handshakes, skb); |
---|
570 | | - /* Queues up a call to packet_process_queued_handshake_ |
---|
571 | | - * packets(skb): |
---|
572 | | - */ |
---|
573 | | - cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); |
---|
| 574 | + atomic_inc(&wg->handshake_queue_len); |
---|
| 575 | + cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); |
---|
| 576 | + /* Queues up a call to packet_process_queued_handshake_packets(skb): */ |
---|
574 | 577 | queue_work_on(cpu, wg->handshake_receive_wq, |
---|
575 | | - &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); |
---|
| 578 | + &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); |
---|
576 | 579 | break; |
---|
577 | 580 | } |
---|
578 | 581 | case cpu_to_le32(MESSAGE_DATA): |
---|