forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/net/xdp/xsk.c
....@@ -22,159 +22,315 @@
2222 #include <linux/net.h>
2323 #include <linux/netdevice.h>
2424 #include <linux/rculist.h>
25
-#include <net/xdp_sock.h>
25
+#include <net/xdp_sock_drv.h>
2626 #include <net/xdp.h>
2727
2828 #include "xsk_queue.h"
2929 #include "xdp_umem.h"
30
+#include "xsk.h"
3031
3132 #define TX_BATCH_SIZE 16
3233
33
-static struct xdp_sock *xdp_sk(struct sock *sk)
34
+static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
+
36
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
3437 {
35
- return (struct xdp_sock *)sk;
38
+ if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
39
+ return;
40
+
41
+ pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
42
+ pool->cached_need_wakeup |= XDP_WAKEUP_RX;
3643 }
44
+EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
3745
38
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
46
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
3947 {
40
- return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41
- READ_ONCE(xs->umem->fq);
42
-}
48
+ struct xdp_sock *xs;
4349
44
-u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
45
-{
46
- return xskq_peek_addr(umem->fq, addr);
47
-}
48
-EXPORT_SYMBOL(xsk_umem_peek_addr);
50
+ if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
51
+ return;
4952
50
-void xsk_umem_discard_addr(struct xdp_umem *umem)
51
-{
52
- xskq_discard_addr(umem->fq);
53
-}
54
-EXPORT_SYMBOL(xsk_umem_discard_addr);
55
-
56
-static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
57
-{
58
- void *buffer;
59
- u64 addr;
60
- int err;
61
-
62
- if (!xskq_peek_addr(xs->umem->fq, &addr) ||
63
- len > xs->umem->chunk_size_nohr) {
64
- xs->rx_dropped++;
65
- return -ENOSPC;
53
+ rcu_read_lock();
54
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
55
+ xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
6656 }
57
+ rcu_read_unlock();
6758
68
- addr += xs->umem->headroom;
59
+ pool->cached_need_wakeup |= XDP_WAKEUP_TX;
60
+}
61
+EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
6962
70
- buffer = xdp_umem_get_data(xs->umem, addr);
71
- memcpy(buffer, xdp->data, len);
72
- err = xskq_produce_batch_desc(xs->rx, addr, len);
73
- if (!err) {
74
- xskq_discard_addr(xs->umem->fq);
75
- xdp_return_buff(xdp);
76
- return 0;
63
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
64
+{
65
+ if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
66
+ return;
67
+
68
+ pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
69
+ pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
70
+}
71
+EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
72
+
73
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
74
+{
75
+ struct xdp_sock *xs;
76
+
77
+ if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
78
+ return;
79
+
80
+ rcu_read_lock();
81
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
82
+ xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
7783 }
84
+ rcu_read_unlock();
7885
79
- xs->rx_dropped++;
80
- return err;
86
+ pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
87
+}
88
+EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
89
+
90
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
91
+{
92
+ return pool->uses_need_wakeup;
93
+}
94
+EXPORT_SYMBOL(xsk_uses_need_wakeup);
95
+
96
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
97
+ u16 queue_id)
98
+{
99
+ if (queue_id < dev->real_num_rx_queues)
100
+ return dev->_rx[queue_id].pool;
101
+ if (queue_id < dev->real_num_tx_queues)
102
+ return dev->_tx[queue_id].pool;
103
+
104
+ return NULL;
105
+}
106
+EXPORT_SYMBOL(xsk_get_pool_from_qid);
107
+
108
+void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
109
+{
110
+ if (queue_id < dev->num_rx_queues)
111
+ dev->_rx[queue_id].pool = NULL;
112
+ if (queue_id < dev->num_tx_queues)
113
+ dev->_tx[queue_id].pool = NULL;
114
+}
115
+
116
+/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
117
+ * not know if the device has more tx queues than rx, or the opposite.
118
+ * This might also change during run time.
119
+ */
120
+int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
121
+ u16 queue_id)
122
+{
123
+ if (queue_id >= max_t(unsigned int,
124
+ dev->real_num_rx_queues,
125
+ dev->real_num_tx_queues))
126
+ return -EINVAL;
127
+
128
+ if (queue_id < dev->real_num_rx_queues)
129
+ dev->_rx[queue_id].pool = pool;
130
+ if (queue_id < dev->real_num_tx_queues)
131
+ dev->_tx[queue_id].pool = pool;
132
+
133
+ return 0;
134
+}
135
+
136
+void xp_release(struct xdp_buff_xsk *xskb)
137
+{
138
+ xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
139
+}
140
+
141
+static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
142
+{
143
+ u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
144
+
145
+ offset += xskb->pool->headroom;
146
+ if (!xskb->pool->unaligned)
147
+ return xskb->orig_addr + offset;
148
+ return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
81149 }
82150
83151 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
84152 {
85
- int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
153
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
154
+ u64 addr;
155
+ int err;
86156
87
- if (err)
88
- xs->rx_dropped++;
157
+ addr = xp_get_handle(xskb);
158
+ err = xskq_prod_reserve_desc(xs->rx, addr, len);
159
+ if (err) {
160
+ xs->rx_queue_full++;
161
+ return err;
162
+ }
89163
90
- return err;
164
+ xp_release(xskb);
165
+ return 0;
91166 }
92167
93
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
168
+static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
169
+{
170
+ void *from_buf, *to_buf;
171
+ u32 metalen;
172
+
173
+ if (unlikely(xdp_data_meta_unsupported(from))) {
174
+ from_buf = from->data;
175
+ to_buf = to->data;
176
+ metalen = 0;
177
+ } else {
178
+ from_buf = from->data_meta;
179
+ metalen = from->data - from->data_meta;
180
+ to_buf = to->data - metalen;
181
+ }
182
+
183
+ memcpy(to_buf, from_buf, len + metalen);
184
+}
185
+
186
+static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
187
+ bool explicit_free)
188
+{
189
+ struct xdp_buff *xsk_xdp;
190
+ int err;
191
+
192
+ if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
193
+ xs->rx_dropped++;
194
+ return -ENOSPC;
195
+ }
196
+
197
+ xsk_xdp = xsk_buff_alloc(xs->pool);
198
+ if (!xsk_xdp) {
199
+ xs->rx_dropped++;
200
+ return -ENOSPC;
201
+ }
202
+
203
+ xsk_copy_xdp(xsk_xdp, xdp, len);
204
+ err = __xsk_rcv_zc(xs, xsk_xdp, len);
205
+ if (err) {
206
+ xsk_buff_free(xsk_xdp);
207
+ return err;
208
+ }
209
+ if (explicit_free)
210
+ xdp_return_buff(xdp);
211
+ return 0;
212
+}
213
+
214
+static bool xsk_tx_writeable(struct xdp_sock *xs)
215
+{
216
+ if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
217
+ return false;
218
+
219
+ return true;
220
+}
221
+
222
+static bool xsk_is_bound(struct xdp_sock *xs)
223
+{
224
+ if (READ_ONCE(xs->state) == XSK_BOUND) {
225
+ /* Matches smp_wmb() in bind(). */
226
+ smp_rmb();
227
+ return true;
228
+ }
229
+ return false;
230
+}
231
+
232
+static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
233
+ bool explicit_free)
94234 {
95235 u32 len;
236
+
237
+ if (!xsk_is_bound(xs))
238
+ return -EINVAL;
96239
97240 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
98241 return -EINVAL;
99242
100243 len = xdp->data_end - xdp->data;
101244
102
- return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
103
- __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
245
+ return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
246
+ __xsk_rcv_zc(xs, xdp, len) :
247
+ __xsk_rcv(xs, xdp, len, explicit_free);
104248 }
105249
106
-void xsk_flush(struct xdp_sock *xs)
250
+static void xsk_flush(struct xdp_sock *xs)
107251 {
108
- xskq_produce_flush_desc(xs->rx);
109
- xs->sk.sk_data_ready(&xs->sk);
252
+ xskq_prod_submit(xs->rx);
253
+ __xskq_cons_release(xs->pool->fq);
254
+ sock_def_readable(&xs->sk);
110255 }
111256
112257 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
113258 {
114
- u32 len = xdp->data_end - xdp->data;
115
- void *buffer;
116
- u64 addr;
117259 int err;
118260
119
- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
120
- return -EINVAL;
121
-
122
- if (!xskq_peek_addr(xs->umem->fq, &addr) ||
123
- len > xs->umem->chunk_size_nohr) {
124
- xs->rx_dropped++;
125
- return -ENOSPC;
126
- }
127
-
128
- addr += xs->umem->headroom;
129
-
130
- buffer = xdp_umem_get_data(xs->umem, addr);
131
- memcpy(buffer, xdp->data, len);
132
- err = xskq_produce_batch_desc(xs->rx, addr, len);
133
- if (!err) {
134
- xskq_discard_addr(xs->umem->fq);
135
- xsk_flush(xs);
136
- return 0;
137
- }
138
-
139
- xs->rx_dropped++;
261
+ spin_lock_bh(&xs->rx_lock);
262
+ err = xsk_rcv(xs, xdp, false);
263
+ xsk_flush(xs);
264
+ spin_unlock_bh(&xs->rx_lock);
140265 return err;
141266 }
142267
143
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
268
+int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
144269 {
145
- xskq_produce_flush_addr_n(umem->cq, nb_entries);
146
-}
147
-EXPORT_SYMBOL(xsk_umem_complete_tx);
270
+ struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
271
+ int err;
148272
149
-void xsk_umem_consume_tx_done(struct xdp_umem *umem)
273
+ err = xsk_rcv(xs, xdp, true);
274
+ if (err)
275
+ return err;
276
+
277
+ if (!xs->flush_node.prev)
278
+ list_add(&xs->flush_node, flush_list);
279
+
280
+ return 0;
281
+}
282
+
283
+void __xsk_map_flush(void)
284
+{
285
+ struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
286
+ struct xdp_sock *xs, *tmp;
287
+
288
+ list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
289
+ xsk_flush(xs);
290
+ __list_del_clearprev(&xs->flush_node);
291
+ }
292
+}
293
+
294
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
295
+{
296
+ xskq_prod_submit_n(pool->cq, nb_entries);
297
+}
298
+EXPORT_SYMBOL(xsk_tx_completed);
299
+
300
+void xsk_tx_release(struct xsk_buff_pool *pool)
150301 {
151302 struct xdp_sock *xs;
152303
153304 rcu_read_lock();
154
- list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
155
- xs->sk.sk_write_space(&xs->sk);
305
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
306
+ __xskq_cons_release(xs->tx);
307
+ if (xsk_tx_writeable(xs))
308
+ xs->sk.sk_write_space(&xs->sk);
156309 }
157310 rcu_read_unlock();
158311 }
159
-EXPORT_SYMBOL(xsk_umem_consume_tx_done);
312
+EXPORT_SYMBOL(xsk_tx_release);
160313
161
-bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
314
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
162315 {
163
- struct xdp_desc desc;
164316 struct xdp_sock *xs;
165317
166318 rcu_read_lock();
167
- list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
168
- if (!xskq_peek_desc(xs->tx, &desc))
319
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
320
+ if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
321
+ xs->tx->queue_empty_descs++;
169322 continue;
323
+ }
170324
171
- if (xskq_produce_addr_lazy(umem->cq, desc.addr))
325
+ /* This is the backpressure mechanism for the Tx path.
326
+ * Reserve space in the completion queue and only proceed
327
+ * if there is space in it. This avoids having to implement
328
+ * any buffering in the Tx path.
329
+ */
330
+ if (xskq_prod_reserve_addr(pool->cq, desc->addr))
172331 goto out;
173332
174
- *dma = xdp_umem_get_dma(umem, desc.addr);
175
- *len = desc.len;
176
-
177
- xskq_discard_desc(xs->tx);
333
+ xskq_cons_release(xs->tx);
178334 rcu_read_unlock();
179335 return true;
180336 }
....@@ -183,14 +339,23 @@
183339 rcu_read_unlock();
184340 return false;
185341 }
186
-EXPORT_SYMBOL(xsk_umem_consume_tx);
342
+EXPORT_SYMBOL(xsk_tx_peek_desc);
187343
188
-static int xsk_zc_xmit(struct sock *sk)
344
+static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
189345 {
190
- struct xdp_sock *xs = xdp_sk(sk);
191346 struct net_device *dev = xs->dev;
347
+ int err;
192348
193
- return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
349
+ rcu_read_lock();
350
+ err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
351
+ rcu_read_unlock();
352
+
353
+ return err;
354
+}
355
+
356
+static int xsk_zc_xmit(struct xdp_sock *xs)
357
+{
358
+ return xsk_wakeup(xs, XDP_WAKEUP_TX);
194359 }
195360
196361 static void xsk_destruct_skb(struct sk_buff *skb)
....@@ -199,29 +364,33 @@
199364 struct xdp_sock *xs = xdp_sk(skb->sk);
200365 unsigned long flags;
201366
202
- spin_lock_irqsave(&xs->tx_completion_lock, flags);
203
- WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
204
- spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
367
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
368
+ xskq_prod_submit_addr(xs->pool->cq, addr);
369
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
205370
206371 sock_wfree(skb);
207372 }
208373
209
-static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
210
- size_t total_len)
374
+static int xsk_generic_xmit(struct sock *sk)
211375 {
212
- u32 max_batch = TX_BATCH_SIZE;
213376 struct xdp_sock *xs = xdp_sk(sk);
377
+ u32 max_batch = TX_BATCH_SIZE;
214378 bool sent_frame = false;
215379 struct xdp_desc desc;
216380 struct sk_buff *skb;
381
+ unsigned long flags;
217382 int err = 0;
383
+ u32 hr, tr;
218384
219385 mutex_lock(&xs->mutex);
220386
221387 if (xs->queue_id >= xs->dev->real_num_tx_queues)
222388 goto out;
223389
224
- while (xskq_peek_desc(xs->tx, &desc)) {
390
+ hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
391
+ tr = xs->dev->needed_tailroom;
392
+
393
+ while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
225394 char *buffer;
226395 u64 addr;
227396 u32 len;
....@@ -232,29 +401,51 @@
232401 }
233402
234403 len = desc.len;
235
- skb = sock_alloc_send_skb(sk, len, 1, &err);
404
+ skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
236405 if (unlikely(!skb))
237406 goto out;
238407
408
+ skb_reserve(skb, hr);
239409 skb_put(skb, len);
410
+
240411 addr = desc.addr;
241
- buffer = xdp_umem_get_data(xs->umem, addr);
412
+ buffer = xsk_buff_raw_get_data(xs->pool, addr);
242413 err = skb_store_bits(skb, 0, buffer, len);
243
- if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
414
+ /* This is the backpressure mechanism for the Tx path.
415
+ * Reserve space in the completion queue and only proceed
416
+ * if there is space in it. This avoids having to implement
417
+ * any buffering in the Tx path.
418
+ */
419
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
420
+ if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
421
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
244422 kfree_skb(skb);
245423 goto out;
246424 }
425
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
247426
248427 skb->dev = xs->dev;
249428 skb->priority = sk->sk_priority;
250429 skb->mark = sk->sk_mark;
251
- skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
430
+ skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
252431 skb->destructor = xsk_destruct_skb;
253432
254
- err = dev_direct_xmit(skb, xs->queue_id);
255
- xskq_discard_desc(xs->tx);
433
+ err = __dev_direct_xmit(skb, xs->queue_id);
434
+ if (err == NETDEV_TX_BUSY) {
435
+ /* Tell user-space to retry the send */
436
+ skb->destructor = sock_wfree;
437
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
438
+ xskq_prod_cancel(xs->pool->cq);
439
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
440
+ /* Free skb without triggering the perf drop trace */
441
+ consume_skb(skb);
442
+ err = -EAGAIN;
443
+ goto out;
444
+ }
445
+
446
+ xskq_cons_release(xs->tx);
256447 /* Ignore NET_XMIT_CN as packet might have been sent */
257
- if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
448
+ if (err == NET_XMIT_DROP) {
258449 /* SKB completed but not sent */
259450 err = -EBUSY;
260451 goto out;
....@@ -263,12 +454,27 @@
263454 sent_frame = true;
264455 }
265456
457
+ xs->tx->queue_empty_descs++;
458
+
266459 out:
267460 if (sent_frame)
268
- sk->sk_write_space(sk);
461
+ if (xsk_tx_writeable(xs))
462
+ sk->sk_write_space(sk);
269463
270464 mutex_unlock(&xs->mutex);
271465 return err;
466
+}
467
+
468
+static int __xsk_sendmsg(struct sock *sk)
469
+{
470
+ struct xdp_sock *xs = xdp_sk(sk);
471
+
472
+ if (unlikely(!(xs->dev->flags & IFF_UP)))
473
+ return -ENETDOWN;
474
+ if (unlikely(!xs->tx))
475
+ return -ENOBUFS;
476
+
477
+ return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
272478 }
273479
274480 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
....@@ -277,28 +483,40 @@
277483 struct sock *sk = sock->sk;
278484 struct xdp_sock *xs = xdp_sk(sk);
279485
280
- if (unlikely(!xs->dev))
486
+ if (unlikely(!xsk_is_bound(xs)))
281487 return -ENXIO;
282
- if (unlikely(!(xs->dev->flags & IFF_UP)))
283
- return -ENETDOWN;
284
- if (unlikely(!xs->tx))
285
- return -ENOBUFS;
286
- if (need_wait)
488
+ if (unlikely(need_wait))
287489 return -EOPNOTSUPP;
288490
289
- return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
491
+ return __xsk_sendmsg(sk);
290492 }
291493
292494 static __poll_t xsk_poll(struct file *file, struct socket *sock,
293495 struct poll_table_struct *wait)
294496 {
295
- __poll_t mask = datagram_poll(file, sock, wait);
497
+ __poll_t mask = 0;
296498 struct sock *sk = sock->sk;
297499 struct xdp_sock *xs = xdp_sk(sk);
500
+ struct xsk_buff_pool *pool;
298501
299
- if (xs->rx && !xskq_empty_desc(xs->rx))
502
+ sock_poll_wait(file, sock, wait);
503
+
504
+ if (unlikely(!xsk_is_bound(xs)))
505
+ return mask;
506
+
507
+ pool = xs->pool;
508
+
509
+ if (pool->cached_need_wakeup) {
510
+ if (xs->zc)
511
+ xsk_wakeup(xs, pool->cached_need_wakeup);
512
+ else
513
+ /* Poll needs to drive Tx also in copy mode */
514
+ __xsk_sendmsg(sk);
515
+ }
516
+
517
+ if (xs->rx && !xskq_prod_is_empty(xs->rx))
300518 mask |= EPOLLIN | EPOLLRDNORM;
301
- if (xs->tx && !xskq_full_desc(xs->tx))
519
+ if (xs->tx && xsk_tx_writeable(xs))
302520 mask |= EPOLLOUT | EPOLLWRNORM;
303521
304522 return mask;
....@@ -322,6 +540,67 @@
322540 return 0;
323541 }
324542
543
+static void xsk_unbind_dev(struct xdp_sock *xs)
544
+{
545
+ struct net_device *dev = xs->dev;
546
+
547
+ if (xs->state != XSK_BOUND)
548
+ return;
549
+ WRITE_ONCE(xs->state, XSK_UNBOUND);
550
+
551
+ /* Wait for driver to stop using the xdp socket. */
552
+ xp_del_xsk(xs->pool, xs);
553
+ xs->dev = NULL;
554
+ synchronize_net();
555
+ dev_put(dev);
556
+}
557
+
558
+static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
559
+ struct xdp_sock ***map_entry)
560
+{
561
+ struct xsk_map *map = NULL;
562
+ struct xsk_map_node *node;
563
+
564
+ *map_entry = NULL;
565
+
566
+ spin_lock_bh(&xs->map_list_lock);
567
+ node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
568
+ node);
569
+ if (node) {
570
+ WARN_ON(xsk_map_inc(node->map));
571
+ map = node->map;
572
+ *map_entry = node->map_entry;
573
+ }
574
+ spin_unlock_bh(&xs->map_list_lock);
575
+ return map;
576
+}
577
+
578
+static void xsk_delete_from_maps(struct xdp_sock *xs)
579
+{
580
+ /* This function removes the current XDP socket from all the
581
+ * maps it resides in. We need to take extra care here, due to
582
+ * the two locks involved. Each map has a lock synchronizing
583
+ * updates to the entries, and each socket has a lock that
584
+ * synchronizes access to the list of maps (map_list). For
585
+ * deadlock avoidance the locks need to be taken in the order
586
+ * "map lock"->"socket map list lock". We start off by
587
+ * accessing the socket map list, and take a reference to the
588
+ * map to guarantee existence between the
589
+ * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
590
+ * calls. Then we ask the map to remove the socket, which
591
+ * tries to remove the socket from the map. Note that there
592
+ * might be updates to the map between
593
+ * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
594
+ */
595
+ struct xdp_sock **map_entry = NULL;
596
+ struct xsk_map *map;
597
+
598
+ while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
599
+ xsk_map_try_sock_delete(map, xs, map_entry);
600
+ xsk_map_put(map);
601
+ }
602
+}
603
+
325604 static int xsk_release(struct socket *sock)
326605 {
327606 struct sock *sk = sock->sk;
....@@ -333,22 +612,23 @@
333612
334613 net = sock_net(sk);
335614
615
+ mutex_lock(&net->xdp.lock);
616
+ sk_del_node_init_rcu(sk);
617
+ mutex_unlock(&net->xdp.lock);
618
+
336619 local_bh_disable();
337620 sock_prot_inuse_add(net, sk->sk_prot, -1);
338621 local_bh_enable();
339622
340
- if (xs->dev) {
341
- struct net_device *dev = xs->dev;
342
-
343
- /* Wait for driver to stop using the xdp socket. */
344
- xdp_del_sk_umem(xs->umem, xs);
345
- xs->dev = NULL;
346
- synchronize_net();
347
- dev_put(dev);
348
- }
623
+ xsk_delete_from_maps(xs);
624
+ mutex_lock(&xs->mutex);
625
+ xsk_unbind_dev(xs);
626
+ mutex_unlock(&xs->mutex);
349627
350628 xskq_destroy(xs->rx);
351629 xskq_destroy(xs->tx);
630
+ xskq_destroy(xs->fq_tmp);
631
+ xskq_destroy(xs->cq_tmp);
352632
353633 sock_orphan(sk);
354634 sock->sk = NULL;
....@@ -376,6 +656,11 @@
376656 return sock;
377657 }
378658
659
+static bool xsk_validate_queues(struct xdp_sock *xs)
660
+{
661
+ return xs->fq_tmp && xs->cq_tmp;
662
+}
663
+
379664 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
380665 {
381666 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
....@@ -390,8 +675,14 @@
390675 if (sxdp->sxdp_family != AF_XDP)
391676 return -EINVAL;
392677
678
+ flags = sxdp->sxdp_flags;
679
+ if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
680
+ XDP_USE_NEED_WAKEUP))
681
+ return -EINVAL;
682
+
683
+ rtnl_lock();
393684 mutex_lock(&xs->mutex);
394
- if (xs->dev) {
685
+ if (xs->state != XSK_READY) {
395686 err = -EBUSY;
396687 goto out_release;
397688 }
....@@ -409,19 +700,12 @@
409700
410701 qid = sxdp->sxdp_queue_id;
411702
412
- if ((xs->rx && qid >= dev->real_num_rx_queues) ||
413
- (xs->tx && qid >= dev->real_num_tx_queues)) {
414
- err = -EINVAL;
415
- goto out_unlock;
416
- }
417
-
418
- flags = sxdp->sxdp_flags;
419
-
420703 if (flags & XDP_SHARED_UMEM) {
421704 struct xdp_sock *umem_xs;
422705 struct socket *sock;
423706
424
- if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
707
+ if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
708
+ (flags & XDP_USE_NEED_WAKEUP)) {
425709 /* Cannot specify flags for shared sockets. */
426710 err = -EINVAL;
427711 goto out_unlock;
....@@ -440,50 +724,101 @@
440724 }
441725
442726 umem_xs = xdp_sk(sock->sk);
443
- if (!umem_xs->umem) {
444
- /* No umem to inherit. */
727
+ if (!xsk_is_bound(umem_xs)) {
445728 err = -EBADF;
446729 sockfd_put(sock);
447730 goto out_unlock;
448
- } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
449
- err = -EINVAL;
450
- sockfd_put(sock);
451
- goto out_unlock;
731
+ }
732
+
733
+ if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
734
+ /* Share the umem with another socket on another qid
735
+ * and/or device.
736
+ */
737
+ xs->pool = xp_create_and_assign_umem(xs,
738
+ umem_xs->umem);
739
+ if (!xs->pool) {
740
+ err = -ENOMEM;
741
+ sockfd_put(sock);
742
+ goto out_unlock;
743
+ }
744
+
745
+ err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
746
+ qid);
747
+ if (err) {
748
+ xp_destroy(xs->pool);
749
+ xs->pool = NULL;
750
+ sockfd_put(sock);
751
+ goto out_unlock;
752
+ }
753
+ } else {
754
+ /* Share the buffer pool with the other socket. */
755
+ if (xs->fq_tmp || xs->cq_tmp) {
756
+ /* Do not allow setting your own fq or cq. */
757
+ err = -EINVAL;
758
+ sockfd_put(sock);
759
+ goto out_unlock;
760
+ }
761
+
762
+ xp_get_pool(umem_xs->pool);
763
+ xs->pool = umem_xs->pool;
452764 }
453765
454766 xdp_get_umem(umem_xs->umem);
455767 WRITE_ONCE(xs->umem, umem_xs->umem);
456768 sockfd_put(sock);
457
- } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
769
+ } else if (!xs->umem || !xsk_validate_queues(xs)) {
458770 err = -EINVAL;
459771 goto out_unlock;
460772 } else {
461773 /* This xsk has its own umem. */
462
- xskq_set_umem(xs->umem->fq, &xs->umem->props);
463
- xskq_set_umem(xs->umem->cq, &xs->umem->props);
464
-
465
- err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
466
- if (err)
774
+ xs->pool = xp_create_and_assign_umem(xs, xs->umem);
775
+ if (!xs->pool) {
776
+ err = -ENOMEM;
467777 goto out_unlock;
778
+ }
779
+
780
+ err = xp_assign_dev(xs->pool, dev, qid, flags);
781
+ if (err) {
782
+ xp_destroy(xs->pool);
783
+ xs->pool = NULL;
784
+ goto out_unlock;
785
+ }
468786 }
787
+
788
+ /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
789
+ xs->fq_tmp = NULL;
790
+ xs->cq_tmp = NULL;
469791
470792 xs->dev = dev;
471793 xs->zc = xs->umem->zc;
472794 xs->queue_id = qid;
473
- xskq_set_umem(xs->rx, &xs->umem->props);
474
- xskq_set_umem(xs->tx, &xs->umem->props);
475
- xdp_add_sk_umem(xs->umem, xs);
795
+ xp_add_xsk(xs->pool, xs);
476796
477797 out_unlock:
478
- if (err)
798
+ if (err) {
479799 dev_put(dev);
800
+ } else {
801
+ /* Matches smp_rmb() in bind() for shared umem
802
+ * sockets, and xsk_is_bound().
803
+ */
804
+ smp_wmb();
805
+ WRITE_ONCE(xs->state, XSK_BOUND);
806
+ }
480807 out_release:
481808 mutex_unlock(&xs->mutex);
809
+ rtnl_unlock();
482810 return err;
483811 }
484812
813
+struct xdp_umem_reg_v1 {
814
+ __u64 addr; /* Start of packet data area */
815
+ __u64 len; /* Length of packet data area */
816
+ __u32 chunk_size;
817
+ __u32 headroom;
818
+};
819
+
485820 static int xsk_setsockopt(struct socket *sock, int level, int optname,
486
- char __user *optval, unsigned int optlen)
821
+ sockptr_t optval, unsigned int optlen)
487822 {
488823 struct sock *sk = sock->sk;
489824 struct xdp_sock *xs = xdp_sk(sk);
....@@ -501,25 +836,38 @@
501836
502837 if (optlen < sizeof(entries))
503838 return -EINVAL;
504
- if (copy_from_user(&entries, optval, sizeof(entries)))
839
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
505840 return -EFAULT;
506841
507842 mutex_lock(&xs->mutex);
843
+ if (xs->state != XSK_READY) {
844
+ mutex_unlock(&xs->mutex);
845
+ return -EBUSY;
846
+ }
508847 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
509848 err = xsk_init_queue(entries, q, false);
849
+ if (!err && optname == XDP_TX_RING)
850
+ /* Tx needs to be explicitly woken up the first time */
851
+ xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
510852 mutex_unlock(&xs->mutex);
511853 return err;
512854 }
513855 case XDP_UMEM_REG:
514856 {
515
- struct xdp_umem_reg mr;
857
+ size_t mr_size = sizeof(struct xdp_umem_reg);
858
+ struct xdp_umem_reg mr = {};
516859 struct xdp_umem *umem;
517860
518
- if (copy_from_user(&mr, optval, sizeof(mr)))
861
+ if (optlen < sizeof(struct xdp_umem_reg_v1))
862
+ return -EINVAL;
863
+ else if (optlen < sizeof(mr))
864
+ mr_size = sizeof(struct xdp_umem_reg_v1);
865
+
866
+ if (copy_from_sockptr(&mr, optval, mr_size))
519867 return -EFAULT;
520868
521869 mutex_lock(&xs->mutex);
522
- if (xs->umem) {
870
+ if (xs->state != XSK_READY || xs->umem) {
523871 mutex_unlock(&xs->mutex);
524872 return -EBUSY;
525873 }
....@@ -542,17 +890,17 @@
542890 struct xsk_queue **q;
543891 int entries;
544892
545
- if (copy_from_user(&entries, optval, sizeof(entries)))
893
+ if (copy_from_sockptr(&entries, optval, sizeof(entries)))
546894 return -EFAULT;
547895
548896 mutex_lock(&xs->mutex);
549
- if (!xs->umem) {
897
+ if (xs->state != XSK_READY) {
550898 mutex_unlock(&xs->mutex);
551
- return -EINVAL;
899
+ return -EBUSY;
552900 }
553901
554
- q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
555
- &xs->umem->cq;
902
+ q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
903
+ &xs->cq_tmp;
556904 err = xsk_init_queue(entries, q, true);
557905 mutex_unlock(&xs->mutex);
558906 return err;
....@@ -563,6 +911,26 @@
563911
564912 return -ENOPROTOOPT;
565913 }
914
+
915
+static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
916
+{
917
+ ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
918
+ ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
919
+ ring->desc = offsetof(struct xdp_rxtx_ring, desc);
920
+}
921
+
922
+static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
923
+{
924
+ ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
925
+ ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
926
+ ring->desc = offsetof(struct xdp_umem_ring, desc);
927
+}
928
+
929
+struct xdp_statistics_v1 {
930
+ __u64 rx_dropped;
931
+ __u64 rx_invalid_descs;
932
+ __u64 tx_invalid_descs;
933
+};
566934
567935 static int xsk_getsockopt(struct socket *sock, int level, int optname,
568936 char __user *optval, int __user *optlen)
....@@ -582,20 +950,36 @@
582950 switch (optname) {
583951 case XDP_STATISTICS:
584952 {
585
- struct xdp_statistics stats;
953
+ struct xdp_statistics stats = {};
954
+ bool extra_stats = true;
955
+ size_t stats_size;
586956
587
- if (len < sizeof(stats))
957
+ if (len < sizeof(struct xdp_statistics_v1)) {
588958 return -EINVAL;
959
+ } else if (len < sizeof(stats)) {
960
+ extra_stats = false;
961
+ stats_size = sizeof(struct xdp_statistics_v1);
962
+ } else {
963
+ stats_size = sizeof(stats);
964
+ }
589965
590966 mutex_lock(&xs->mutex);
591967 stats.rx_dropped = xs->rx_dropped;
968
+ if (extra_stats) {
969
+ stats.rx_ring_full = xs->rx_queue_full;
970
+ stats.rx_fill_ring_empty_descs =
971
+ xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
972
+ stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
973
+ } else {
974
+ stats.rx_dropped += xs->rx_queue_full;
975
+ }
592976 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
593977 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
594978 mutex_unlock(&xs->mutex);
595979
596
- if (copy_to_user(optval, &stats, sizeof(stats)))
980
+ if (copy_to_user(optval, &stats, stats_size))
597981 return -EFAULT;
598
- if (put_user(sizeof(stats), optlen))
982
+ if (put_user(stats_size, optlen))
599983 return -EFAULT;
600984
601985 return 0;
....@@ -603,26 +987,69 @@
603987 case XDP_MMAP_OFFSETS:
604988 {
605989 struct xdp_mmap_offsets off;
990
+ struct xdp_mmap_offsets_v1 off_v1;
991
+ bool flags_supported = true;
992
+ void *to_copy;
606993
607
- if (len < sizeof(off))
994
+ if (len < sizeof(off_v1))
995
+ return -EINVAL;
996
+ else if (len < sizeof(off))
997
+ flags_supported = false;
998
+
999
+ if (flags_supported) {
1000
+ /* xdp_ring_offset is identical to xdp_ring_offset_v1
1001
+ * except for the flags field added to the end.
1002
+ */
1003
+ xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1004
+ &off.rx);
1005
+ xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1006
+ &off.tx);
1007
+ xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1008
+ &off.fr);
1009
+ xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1010
+ &off.cr);
1011
+ off.rx.flags = offsetof(struct xdp_rxtx_ring,
1012
+ ptrs.flags);
1013
+ off.tx.flags = offsetof(struct xdp_rxtx_ring,
1014
+ ptrs.flags);
1015
+ off.fr.flags = offsetof(struct xdp_umem_ring,
1016
+ ptrs.flags);
1017
+ off.cr.flags = offsetof(struct xdp_umem_ring,
1018
+ ptrs.flags);
1019
+
1020
+ len = sizeof(off);
1021
+ to_copy = &off;
1022
+ } else {
1023
+ xsk_enter_rxtx_offsets(&off_v1.rx);
1024
+ xsk_enter_rxtx_offsets(&off_v1.tx);
1025
+ xsk_enter_umem_offsets(&off_v1.fr);
1026
+ xsk_enter_umem_offsets(&off_v1.cr);
1027
+
1028
+ len = sizeof(off_v1);
1029
+ to_copy = &off_v1;
1030
+ }
1031
+
1032
+ if (copy_to_user(optval, to_copy, len))
1033
+ return -EFAULT;
1034
+ if (put_user(len, optlen))
1035
+ return -EFAULT;
1036
+
1037
+ return 0;
1038
+ }
1039
+ case XDP_OPTIONS:
1040
+ {
1041
+ struct xdp_options opts = {};
1042
+
1043
+ if (len < sizeof(opts))
6081044 return -EINVAL;
6091045
610
- off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
611
- off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
612
- off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
613
- off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
614
- off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
615
- off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
1046
+ mutex_lock(&xs->mutex);
1047
+ if (xs->zc)
1048
+ opts.flags |= XDP_OPTIONS_ZEROCOPY;
1049
+ mutex_unlock(&xs->mutex);
6161050
617
- off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
618
- off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
619
- off.fr.desc = offsetof(struct xdp_umem_ring, desc);
620
- off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
621
- off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
622
- off.cr.desc = offsetof(struct xdp_umem_ring, desc);
623
-
624
- len = sizeof(off);
625
- if (copy_to_user(optval, &off, len))
1051
+ len = sizeof(opts);
1052
+ if (copy_to_user(optval, &opts, len))
6261053 return -EFAULT;
6271054 if (put_user(len, optlen))
6281055 return -EFAULT;
....@@ -643,25 +1070,23 @@
6431070 unsigned long size = vma->vm_end - vma->vm_start;
6441071 struct xdp_sock *xs = xdp_sk(sock->sk);
6451072 struct xsk_queue *q = NULL;
646
- struct xdp_umem *umem;
6471073 unsigned long pfn;
6481074 struct page *qpg;
1075
+
1076
+ if (READ_ONCE(xs->state) != XSK_READY)
1077
+ return -EBUSY;
6491078
6501079 if (offset == XDP_PGOFF_RX_RING) {
6511080 q = READ_ONCE(xs->rx);
6521081 } else if (offset == XDP_PGOFF_TX_RING) {
6531082 q = READ_ONCE(xs->tx);
6541083 } else {
655
- umem = READ_ONCE(xs->umem);
656
- if (!umem)
657
- return -EINVAL;
658
-
6591084 /* Matches the smp_wmb() in XDP_UMEM_REG */
6601085 smp_rmb();
6611086 if (offset == XDP_UMEM_PGOFF_FILL_RING)
662
- q = READ_ONCE(umem->fq);
1087
+ q = READ_ONCE(xs->fq_tmp);
6631088 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
664
- q = READ_ONCE(umem->cq);
1089
+ q = READ_ONCE(xs->cq_tmp);
6651090 }
6661091
6671092 if (!q)
....@@ -670,12 +1095,44 @@
6701095 /* Matches the smp_wmb() in xsk_init_queue */
6711096 smp_rmb();
6721097 qpg = virt_to_head_page(q->ring);
673
- if (size > (PAGE_SIZE << compound_order(qpg)))
1098
+ if (size > page_size(qpg))
6741099 return -EINVAL;
6751100
6761101 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
6771102 return remap_pfn_range(vma, vma->vm_start, pfn,
6781103 size, vma->vm_page_prot);
1104
+}
1105
+
1106
+static int xsk_notifier(struct notifier_block *this,
1107
+ unsigned long msg, void *ptr)
1108
+{
1109
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1110
+ struct net *net = dev_net(dev);
1111
+ struct sock *sk;
1112
+
1113
+ switch (msg) {
1114
+ case NETDEV_UNREGISTER:
1115
+ mutex_lock(&net->xdp.lock);
1116
+ sk_for_each(sk, &net->xdp.list) {
1117
+ struct xdp_sock *xs = xdp_sk(sk);
1118
+
1119
+ mutex_lock(&xs->mutex);
1120
+ if (xs->dev == dev) {
1121
+ sk->sk_err = ENETDOWN;
1122
+ if (!sock_flag(sk, SOCK_DEAD))
1123
+ sk->sk_error_report(sk);
1124
+
1125
+ xsk_unbind_dev(xs);
1126
+
1127
+ /* Clear device references. */
1128
+ xp_clear_dev(xs->pool);
1129
+ }
1130
+ mutex_unlock(&xs->mutex);
1131
+ }
1132
+ mutex_unlock(&net->xdp.lock);
1133
+ break;
1134
+ }
1135
+ return NOTIFY_DONE;
6791136 }
6801137
6811138 static struct proto xsk_proto = {
....@@ -712,7 +1169,8 @@
7121169 if (!sock_flag(sk, SOCK_DEAD))
7131170 return;
7141171
715
- xdp_put_umem(xs->umem);
1172
+ if (!xp_put_pool(xs->pool))
1173
+ xdp_put_umem(xs->umem, !xs->pool);
7161174
7171175 sk_refcnt_debug_dec(sk);
7181176 }
....@@ -720,8 +1178,8 @@
7201178 static int xsk_create(struct net *net, struct socket *sock, int protocol,
7211179 int kern)
7221180 {
723
- struct sock *sk;
7241181 struct xdp_sock *xs;
1182
+ struct sock *sk;
7251183
7261184 if (!ns_capable(net->user_ns, CAP_NET_RAW))
7271185 return -EPERM;
....@@ -749,8 +1207,16 @@
7491207 sock_set_flag(sk, SOCK_RCU_FREE);
7501208
7511209 xs = xdp_sk(sk);
1210
+ xs->state = XSK_READY;
7521211 mutex_init(&xs->mutex);
753
- spin_lock_init(&xs->tx_completion_lock);
1212
+ spin_lock_init(&xs->rx_lock);
1213
+
1214
+ INIT_LIST_HEAD(&xs->map_list);
1215
+ spin_lock_init(&xs->map_list_lock);
1216
+
1217
+ mutex_lock(&net->xdp.lock);
1218
+ sk_add_node_rcu(sk, &net->xdp.list);
1219
+ mutex_unlock(&net->xdp.lock);
7541220
7551221 local_bh_disable();
7561222 sock_prot_inuse_add(net, &xsk_proto, 1);
....@@ -765,9 +1231,30 @@
7651231 .owner = THIS_MODULE,
7661232 };
7671233
1234
+static struct notifier_block xsk_netdev_notifier = {
1235
+ .notifier_call = xsk_notifier,
1236
+};
1237
+
1238
+static int __net_init xsk_net_init(struct net *net)
1239
+{
1240
+ mutex_init(&net->xdp.lock);
1241
+ INIT_HLIST_HEAD(&net->xdp.list);
1242
+ return 0;
1243
+}
1244
+
1245
+static void __net_exit xsk_net_exit(struct net *net)
1246
+{
1247
+ WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1248
+}
1249
+
1250
+static struct pernet_operations xsk_net_ops = {
1251
+ .init = xsk_net_init,
1252
+ .exit = xsk_net_exit,
1253
+};
1254
+
7681255 static int __init xsk_init(void)
7691256 {
770
- int err;
1257
+ int err, cpu;
7711258
7721259 err = proto_register(&xsk_proto, 0 /* no slab */);
7731260 if (err)
....@@ -777,8 +1264,22 @@
7771264 if (err)
7781265 goto out_proto;
7791266
1267
+ err = register_pernet_subsys(&xsk_net_ops);
1268
+ if (err)
1269
+ goto out_sk;
1270
+
1271
+ err = register_netdevice_notifier(&xsk_netdev_notifier);
1272
+ if (err)
1273
+ goto out_pernet;
1274
+
1275
+ for_each_possible_cpu(cpu)
1276
+ INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
7801277 return 0;
7811278
1279
+out_pernet:
1280
+ unregister_pernet_subsys(&xsk_net_ops);
1281
+out_sk:
1282
+ sock_unregister(PF_XDP);
7821283 out_proto:
7831284 proto_unregister(&xsk_proto);
7841285 out: