.. | .. |
---|
3 | 3 | * Copyright(c) 2018 Intel Corporation. |
---|
4 | 4 | */ |
---|
5 | 5 | |
---|
| 6 | +#include <linux/log2.h> |
---|
6 | 7 | #include <linux/slab.h> |
---|
| 8 | +#include <linux/overflow.h> |
---|
| 9 | +#include <net/xdp_sock_drv.h> |
---|
7 | 10 | |
---|
8 | 11 | #include "xsk_queue.h" |
---|
9 | 12 | |
---|
10 | | -void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props) |
---|
| 13 | +static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue) |
---|
11 | 14 | { |
---|
12 | | - if (!q) |
---|
13 | | - return; |
---|
| 15 | + struct xdp_umem_ring *umem_ring; |
---|
| 16 | + struct xdp_rxtx_ring *rxtx_ring; |
---|
14 | 17 | |
---|
15 | | - q->umem_props = *umem_props; |
---|
16 | | -} |
---|
17 | | - |
---|
18 | | -static u32 xskq_umem_get_ring_size(struct xsk_queue *q) |
---|
19 | | -{ |
---|
20 | | - return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64); |
---|
21 | | -} |
---|
22 | | - |
---|
23 | | -static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q) |
---|
24 | | -{ |
---|
25 | | - return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc); |
---|
| 18 | + if (umem_queue) |
---|
| 19 | + return struct_size(umem_ring, desc, q->nentries); |
---|
| 20 | + return struct_size(rxtx_ring, desc, q->nentries); |
---|
26 | 21 | } |
---|
27 | 22 | |
---|
28 | 23 | struct xsk_queue *xskq_create(u32 nentries, bool umem_queue) |
---|
.. | .. |
---|
40 | 35 | |
---|
41 | 36 | gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | |
---|
42 | 37 | __GFP_COMP | __GFP_NORETRY; |
---|
43 | | - size = umem_queue ? xskq_umem_get_ring_size(q) : |
---|
44 | | - xskq_rxtx_get_ring_size(q); |
---|
| 38 | + size = xskq_get_ring_size(q, umem_queue); |
---|
45 | 39 | |
---|
46 | 40 | q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, |
---|
47 | 41 | get_order(size)); |
---|