hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/wireguard/queueing.h
....@@ -17,12 +17,13 @@
1717 struct wg_peer;
1818 struct multicore_worker;
1919 struct crypt_queue;
20
+struct prev_queue;
2021 struct sk_buff;
2122
2223 /* queueing.c APIs: */
2324 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
24
- bool multicore, unsigned int len);
25
-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
25
+ unsigned int len);
26
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
2627 struct multicore_worker __percpu *
2728 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
2829
....@@ -93,13 +94,13 @@
9394 skb->dev = NULL;
9495 #ifdef CONFIG_NET_SCHED
9596 skb->tc_index = 0;
96
- skb_reset_tc(skb);
9797 #endif
98
+ skb_reset_redirect(skb);
9899 skb->hdr_len = skb_headroom(skb);
99100 skb_reset_mac_header(skb);
100101 skb_reset_network_header(skb);
101102 skb_reset_transport_header(skb);
102
- skb_probe_transport_header(skb, 0);
103
+ skb_probe_transport_header(skb);
103104 skb_reset_inner_headers(skb);
104105 }
105106
....@@ -135,8 +136,31 @@
135136 return cpu;
136137 }
137138
139
+void wg_prev_queue_init(struct prev_queue *queue);
140
+
141
+/* Multi producer */
142
+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
143
+
144
+/* Single consumer */
145
+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
146
+
147
+/* Single consumer */
148
+static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
149
+{
150
+ if (queue->peeked)
151
+ return queue->peeked;
152
+ queue->peeked = wg_prev_queue_dequeue(queue);
153
+ return queue->peeked;
154
+}
155
+
156
+/* Single consumer */
157
+static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
158
+{
159
+ queue->peeked = NULL;
160
+}
161
+
138162 static inline int wg_queue_enqueue_per_device_and_peer(
139
- struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
163
+ struct crypt_queue *device_queue, struct prev_queue *peer_queue,
140164 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
141165 {
142166 int cpu;
....@@ -145,8 +169,9 @@
145169 /* We first queue this up for the peer ingestion, but the consumer
146170 * will wait for the state to change to CRYPTED or DEAD before.
147171 */
148
- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
172
+ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
149173 return -ENOSPC;
174
+
150175 /* Then we queue it up in the device queue, which consumes the
151176 * packet as soon as it can.
152177 */
....@@ -157,9 +182,7 @@
157182 return 0;
158183 }
159184
160
-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
161
- struct sk_buff *skb,
162
- enum packet_state state)
185
+static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
163186 {
164187 /* We take a reference, because as soon as we call atomic_set, the
165188 * peer can be freed from below us.
....@@ -167,14 +190,12 @@
167190 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
168191
169192 atomic_set_release(&PACKET_CB(skb)->state, state);
170
- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
171
- peer->internal_id),
172
- peer->device->packet_crypt_wq, &queue->work);
193
+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
194
+ peer->device->packet_crypt_wq, &peer->transmit_packet_work);
173195 wg_peer_put(peer);
174196 }
175197
176
-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
177
- enum packet_state state)
198
+static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
178199 {
179200 /* We take a reference, because as soon as we call atomic_set, the
180201 * peer can be freed from below us.