.. | .. |
---|
9 | 9 | #include <linux/types.h> |
---|
10 | 10 | #include <linux/if_xdp.h> |
---|
11 | 11 | #include <net/xdp_sock.h> |
---|
| 12 | +#include <net/xsk_buff_pool.h> |
---|
12 | 13 | |
---|
13 | | -#define RX_BATCH_SIZE 16 |
---|
14 | | -#define LAZY_UPDATE_THRESHOLD 128 |
---|
| 14 | +#include "xsk.h" |
---|
15 | 15 | |
---|
16 | 16 | struct xdp_ring { |
---|
17 | 17 | u32 producer ____cacheline_aligned_in_smp; |
---|
| 18 | + /* Hinder the adjacent cache prefetcher to prefetch the consumer |
---|
| 19 | + * pointer if the producer pointer is touched and vice versa. |
---|
| 20 | + */ |
---|
| 21 | + u32 pad ____cacheline_aligned_in_smp; |
---|
18 | 22 | u32 consumer ____cacheline_aligned_in_smp; |
---|
| 23 | + u32 flags; |
---|
19 | 24 | }; |
---|
20 | 25 | |
---|
21 | 26 | /* Used for the RX and TX queues for packets */ |
---|
22 | 27 | struct xdp_rxtx_ring { |
---|
23 | 28 | struct xdp_ring ptrs; |
---|
24 | | - struct xdp_desc desc[0] ____cacheline_aligned_in_smp; |
---|
| 29 | + struct xdp_desc desc[] ____cacheline_aligned_in_smp; |
---|
25 | 30 | }; |
---|
26 | 31 | |
---|
27 | 32 | /* Used for the fill and completion queues for buffers */ |
---|
28 | 33 | struct xdp_umem_ring { |
---|
29 | 34 | struct xdp_ring ptrs; |
---|
30 | | - u64 desc[0] ____cacheline_aligned_in_smp; |
---|
| 35 | + u64 desc[] ____cacheline_aligned_in_smp; |
---|
31 | 36 | }; |
---|
32 | 37 | |
---|
33 | 38 | struct xsk_queue { |
---|
34 | | - struct xdp_umem_props umem_props; |
---|
35 | 39 | u32 ring_mask; |
---|
36 | 40 | u32 nentries; |
---|
37 | | - u32 prod_head; |
---|
38 | | - u32 prod_tail; |
---|
39 | | - u32 cons_head; |
---|
40 | | - u32 cons_tail; |
---|
| 41 | + u32 cached_prod; |
---|
| 42 | + u32 cached_cons; |
---|
41 | 43 | struct xdp_ring *ring; |
---|
42 | 44 | u64 invalid_descs; |
---|
| 45 | + u64 queue_empty_descs; |
---|
43 | 46 | }; |
---|
44 | 47 | |
---|
45 | | -/* Common functions operating for both RXTX and umem queues */ |
---|
| 48 | +/* The structure of the shared state of the rings are the same as the |
---|
| 49 | + * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion |
---|
| 50 | + * ring, the kernel is the producer and user space is the consumer. For |
---|
| 51 | + * the Tx and fill rings, the kernel is the consumer and user space is |
---|
| 52 | + * the producer. |
---|
| 53 | + * |
---|
| 54 | + * producer consumer |
---|
| 55 | + * |
---|
| 56 | + * if (LOAD ->consumer) { LOAD ->producer |
---|
| 57 | + * (A) smp_rmb() (C) |
---|
| 58 | + * STORE $data LOAD $data |
---|
| 59 | + * smp_wmb() (B) smp_mb() (D) |
---|
| 60 | + * STORE ->producer STORE ->consumer |
---|
| 61 | + * } |
---|
| 62 | + * |
---|
| 63 | + * (A) pairs with (D), and (B) pairs with (C). |
---|
| 64 | + * |
---|
| 65 | + * Starting with (B), it protects the data from being written after |
---|
| 66 | + * the producer pointer. If this barrier was missing, the consumer |
---|
| 67 | + * could observe the producer pointer being set and thus load the data |
---|
| 68 | + * before the producer has written the new data. The consumer would in |
---|
| 69 | + * this case load the old data. |
---|
| 70 | + * |
---|
| 71 | + * (C) protects the consumer from speculatively loading the data before |
---|
| 72 | + * the producer pointer actually has been read. If we do not have this |
---|
| 73 | + * barrier, some architectures could load old data as speculative loads |
---|
| 74 | + * are not discarded as the CPU does not know there is a dependency |
---|
| 75 | + * between ->producer and data. |
---|
| 76 | + * |
---|
| 77 | + * (A) is a control dependency that separates the load of ->consumer |
---|
| 78 | + * from the stores of $data. In case ->consumer indicates there is no |
---|
| 79 | + * room in the buffer to store $data we do not. So no barrier is needed. |
---|
| 80 | + * |
---|
| 81 | + * (D) protects the load of the data to be observed to happen after the |
---|
| 82 | + * store of the consumer pointer. If we did not have this memory |
---|
| 83 | + * barrier, the producer could observe the consumer pointer being set |
---|
| 84 | + * and overwrite the data with a new value before the consumer got the |
---|
| 85 | + * chance to read the old value. The consumer would thus miss reading |
---|
| 86 | + * the old entry and very likely read the new entry twice, once right |
---|
| 87 | + * now and again after circling through the ring. |
---|
| 88 | + */ |
---|
46 | 89 | |
---|
47 | | -static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) |
---|
48 | | -{ |
---|
49 | | - return q ? q->invalid_descs : 0; |
---|
50 | | -} |
---|
| 90 | +/* The operations on the rings are the following: |
---|
| 91 | + * |
---|
| 92 | + * producer consumer |
---|
| 93 | + * |
---|
| 94 | + * RESERVE entries PEEK in the ring for entries |
---|
| 95 | + * WRITE data into the ring READ data from the ring |
---|
| 96 | + * SUBMIT entries RELEASE entries |
---|
| 97 | + * |
---|
| 98 | + * The producer reserves one or more entries in the ring. It can then |
---|
| 99 | + * fill in these entries and finally submit them so that they can be |
---|
| 100 | + * seen and read by the consumer. |
---|
| 101 | + * |
---|
| 102 | + * The consumer peeks into the ring to see if the producer has written |
---|
| 103 | + * any new entries. If so, the consumer can then read these entries |
---|
| 104 | + * and when it is done reading them release them back to the producer |
---|
| 105 | + * so that the producer can use these slots to fill in new entries. |
---|
| 106 | + * |
---|
| 107 | + * The function names below reflect these operations. |
---|
| 108 | + */ |
---|
51 | 109 | |
---|
52 | | -static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) |
---|
53 | | -{ |
---|
54 | | - u32 entries = q->prod_tail - q->cons_tail; |
---|
| 110 | +/* Functions that read and validate content from consumer rings. */ |
---|
55 | 111 | |
---|
56 | | - if (entries == 0) { |
---|
57 | | - /* Refresh the local pointer */ |
---|
58 | | - q->prod_tail = READ_ONCE(q->ring->producer); |
---|
59 | | - entries = q->prod_tail - q->cons_tail; |
---|
60 | | - } |
---|
61 | | - |
---|
62 | | - return (entries > dcnt) ? dcnt : entries; |
---|
63 | | -} |
---|
64 | | - |
---|
65 | | -static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) |
---|
66 | | -{ |
---|
67 | | - u32 free_entries = q->nentries - (producer - q->cons_tail); |
---|
68 | | - |
---|
69 | | - if (free_entries >= dcnt) |
---|
70 | | - return free_entries; |
---|
71 | | - |
---|
72 | | - /* Refresh the local tail pointer */ |
---|
73 | | - q->cons_tail = READ_ONCE(q->ring->consumer); |
---|
74 | | - return q->nentries - (producer - q->cons_tail); |
---|
75 | | -} |
---|
76 | | - |
---|
77 | | -/* UMEM queue */ |
---|
78 | | - |
---|
79 | | -static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) |
---|
80 | | -{ |
---|
81 | | - if (addr >= q->umem_props.size) { |
---|
82 | | - q->invalid_descs++; |
---|
83 | | - return false; |
---|
84 | | - } |
---|
85 | | - |
---|
86 | | - return true; |
---|
87 | | -} |
---|
88 | | - |
---|
89 | | -static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr) |
---|
90 | | -{ |
---|
91 | | - while (q->cons_tail != q->cons_head) { |
---|
92 | | - struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
---|
93 | | - unsigned int idx = q->cons_tail & q->ring_mask; |
---|
94 | | - |
---|
95 | | - *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask; |
---|
96 | | - if (xskq_is_valid_addr(q, *addr)) |
---|
97 | | - return addr; |
---|
98 | | - |
---|
99 | | - q->cons_tail++; |
---|
100 | | - } |
---|
101 | | - |
---|
102 | | - return NULL; |
---|
103 | | -} |
---|
104 | | - |
---|
105 | | -static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr) |
---|
106 | | -{ |
---|
107 | | - if (q->cons_tail == q->cons_head) { |
---|
108 | | - WRITE_ONCE(q->ring->consumer, q->cons_tail); |
---|
109 | | - q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); |
---|
110 | | - |
---|
111 | | - /* Order consumer and data */ |
---|
112 | | - smp_rmb(); |
---|
113 | | - } |
---|
114 | | - |
---|
115 | | - return xskq_validate_addr(q, addr); |
---|
116 | | -} |
---|
117 | | - |
---|
118 | | -static inline void xskq_discard_addr(struct xsk_queue *q) |
---|
119 | | -{ |
---|
120 | | - q->cons_tail++; |
---|
121 | | -} |
---|
122 | | - |
---|
123 | | -static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) |
---|
| 112 | +static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) |
---|
124 | 113 | { |
---|
125 | 114 | struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
---|
126 | 115 | |
---|
127 | | - if (xskq_nb_free(q, q->prod_tail, 1) == 0) |
---|
128 | | - return -ENOSPC; |
---|
| 116 | + if (q->cached_cons != q->cached_prod) { |
---|
| 117 | + u32 idx = q->cached_cons & q->ring_mask; |
---|
129 | 118 | |
---|
130 | | - ring->desc[q->prod_tail++ & q->ring_mask] = addr; |
---|
131 | | - |
---|
132 | | - /* Order producer and data */ |
---|
133 | | - smp_wmb(); |
---|
134 | | - |
---|
135 | | - WRITE_ONCE(q->ring->producer, q->prod_tail); |
---|
136 | | - return 0; |
---|
137 | | -} |
---|
138 | | - |
---|
139 | | -static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr) |
---|
140 | | -{ |
---|
141 | | - struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
---|
142 | | - |
---|
143 | | - if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0) |
---|
144 | | - return -ENOSPC; |
---|
145 | | - |
---|
146 | | - ring->desc[q->prod_head++ & q->ring_mask] = addr; |
---|
147 | | - return 0; |
---|
148 | | -} |
---|
149 | | - |
---|
150 | | -static inline void xskq_produce_flush_addr_n(struct xsk_queue *q, |
---|
151 | | - u32 nb_entries) |
---|
152 | | -{ |
---|
153 | | - /* Order producer and data */ |
---|
154 | | - smp_wmb(); |
---|
155 | | - |
---|
156 | | - q->prod_tail += nb_entries; |
---|
157 | | - WRITE_ONCE(q->ring->producer, q->prod_tail); |
---|
158 | | -} |
---|
159 | | - |
---|
160 | | -static inline int xskq_reserve_addr(struct xsk_queue *q) |
---|
161 | | -{ |
---|
162 | | - if (xskq_nb_free(q, q->prod_head, 1) == 0) |
---|
163 | | - return -ENOSPC; |
---|
164 | | - |
---|
165 | | - q->prod_head++; |
---|
166 | | - return 0; |
---|
167 | | -} |
---|
168 | | - |
---|
169 | | -/* Rx/Tx queue */ |
---|
170 | | - |
---|
171 | | -static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) |
---|
172 | | -{ |
---|
173 | | - if (!xskq_is_valid_addr(q, d->addr)) |
---|
174 | | - return false; |
---|
175 | | - |
---|
176 | | - if (((d->addr + d->len) & q->umem_props.chunk_mask) != |
---|
177 | | - (d->addr & q->umem_props.chunk_mask)) { |
---|
178 | | - q->invalid_descs++; |
---|
179 | | - return false; |
---|
| 119 | + *addr = ring->desc[idx]; |
---|
| 120 | + return true; |
---|
180 | 121 | } |
---|
181 | 122 | |
---|
| 123 | + return false; |
---|
| 124 | +} |
---|
| 125 | + |
---|
| 126 | +static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, |
---|
| 127 | + struct xdp_desc *desc) |
---|
| 128 | +{ |
---|
| 129 | + u64 chunk, chunk_end; |
---|
| 130 | + |
---|
| 131 | + chunk = xp_aligned_extract_addr(pool, desc->addr); |
---|
| 132 | + if (likely(desc->len)) { |
---|
| 133 | + chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1); |
---|
| 134 | + if (chunk != chunk_end) |
---|
| 135 | + return false; |
---|
| 136 | + } |
---|
| 137 | + |
---|
| 138 | + if (chunk >= pool->addrs_cnt) |
---|
| 139 | + return false; |
---|
| 140 | + |
---|
| 141 | + if (desc->options) |
---|
| 142 | + return false; |
---|
182 | 143 | return true; |
---|
183 | 144 | } |
---|
184 | 145 | |
---|
185 | | -static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, |
---|
186 | | - struct xdp_desc *desc) |
---|
187 | | -{ |
---|
188 | | - while (q->cons_tail != q->cons_head) { |
---|
189 | | - struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
---|
190 | | - unsigned int idx = q->cons_tail & q->ring_mask; |
---|
191 | | - |
---|
192 | | - *desc = READ_ONCE(ring->desc[idx]); |
---|
193 | | - if (xskq_is_valid_desc(q, desc)) |
---|
194 | | - return desc; |
---|
195 | | - |
---|
196 | | - q->cons_tail++; |
---|
197 | | - } |
---|
198 | | - |
---|
199 | | - return NULL; |
---|
200 | | -} |
---|
201 | | - |
---|
202 | | -static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, |
---|
| 146 | +static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, |
---|
203 | 147 | struct xdp_desc *desc) |
---|
204 | 148 | { |
---|
205 | | - if (q->cons_tail == q->cons_head) { |
---|
206 | | - WRITE_ONCE(q->ring->consumer, q->cons_tail); |
---|
207 | | - q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); |
---|
| 149 | + u64 addr, base_addr; |
---|
208 | 150 | |
---|
209 | | - /* Order consumer and data */ |
---|
210 | | - smp_rmb(); |
---|
| 151 | + base_addr = xp_unaligned_extract_addr(desc->addr); |
---|
| 152 | + addr = xp_unaligned_add_offset_to_addr(desc->addr); |
---|
| 153 | + |
---|
| 154 | + if (desc->len > pool->chunk_size) |
---|
| 155 | + return false; |
---|
| 156 | + |
---|
| 157 | + if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || |
---|
| 158 | + addr + desc->len > pool->addrs_cnt || |
---|
| 159 | + xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) |
---|
| 160 | + return false; |
---|
| 161 | + |
---|
| 162 | + if (desc->options) |
---|
| 163 | + return false; |
---|
| 164 | + return true; |
---|
| 165 | +} |
---|
| 166 | + |
---|
| 167 | +static inline bool xp_validate_desc(struct xsk_buff_pool *pool, |
---|
| 168 | + struct xdp_desc *desc) |
---|
| 169 | +{ |
---|
| 170 | + return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : |
---|
| 171 | + xp_aligned_validate_desc(pool, desc); |
---|
| 172 | +} |
---|
| 173 | + |
---|
| 174 | +static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, |
---|
| 175 | + struct xdp_desc *d, |
---|
| 176 | + struct xsk_buff_pool *pool) |
---|
| 177 | +{ |
---|
| 178 | + if (!xp_validate_desc(pool, d)) { |
---|
| 179 | + q->invalid_descs++; |
---|
| 180 | + return false; |
---|
| 181 | + } |
---|
| 182 | + return true; |
---|
| 183 | +} |
---|
| 184 | + |
---|
| 185 | +static inline bool xskq_cons_read_desc(struct xsk_queue *q, |
---|
| 186 | + struct xdp_desc *desc, |
---|
| 187 | + struct xsk_buff_pool *pool) |
---|
| 188 | +{ |
---|
| 189 | + while (q->cached_cons != q->cached_prod) { |
---|
| 190 | + struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
---|
| 191 | + u32 idx = q->cached_cons & q->ring_mask; |
---|
| 192 | + |
---|
| 193 | + *desc = ring->desc[idx]; |
---|
| 194 | + if (xskq_cons_is_valid_desc(q, desc, pool)) |
---|
| 195 | + return true; |
---|
| 196 | + |
---|
| 197 | + q->cached_cons++; |
---|
211 | 198 | } |
---|
212 | 199 | |
---|
213 | | - return xskq_validate_desc(q, desc); |
---|
| 200 | + return false; |
---|
214 | 201 | } |
---|
215 | 202 | |
---|
216 | | -static inline void xskq_discard_desc(struct xsk_queue *q) |
---|
| 203 | +/* Functions for consumers */ |
---|
| 204 | + |
---|
| 205 | +static inline void __xskq_cons_release(struct xsk_queue *q) |
---|
217 | 206 | { |
---|
218 | | - q->cons_tail++; |
---|
| 207 | + smp_mb(); /* D, matches A */ |
---|
| 208 | + WRITE_ONCE(q->ring->consumer, q->cached_cons); |
---|
219 | 209 | } |
---|
220 | 210 | |
---|
221 | | -static inline int xskq_produce_batch_desc(struct xsk_queue *q, |
---|
222 | | - u64 addr, u32 len) |
---|
| 211 | +static inline void __xskq_cons_peek(struct xsk_queue *q) |
---|
223 | 212 | { |
---|
224 | | - struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
---|
225 | | - unsigned int idx; |
---|
226 | | - |
---|
227 | | - if (xskq_nb_free(q, q->prod_head, 1) == 0) |
---|
228 | | - return -ENOSPC; |
---|
229 | | - |
---|
230 | | - idx = (q->prod_head++) & q->ring_mask; |
---|
231 | | - ring->desc[idx].addr = addr; |
---|
232 | | - ring->desc[idx].len = len; |
---|
233 | | - |
---|
234 | | - return 0; |
---|
| 213 | + /* Refresh the local pointer */ |
---|
| 214 | + q->cached_prod = READ_ONCE(q->ring->producer); |
---|
| 215 | + smp_rmb(); /* C, matches B */ |
---|
235 | 216 | } |
---|
236 | 217 | |
---|
237 | | -static inline void xskq_produce_flush_desc(struct xsk_queue *q) |
---|
| 218 | +static inline void xskq_cons_get_entries(struct xsk_queue *q) |
---|
238 | 219 | { |
---|
239 | | - /* Order producer and data */ |
---|
240 | | - smp_wmb(); |
---|
241 | | - |
---|
242 | | - q->prod_tail = q->prod_head; |
---|
243 | | - WRITE_ONCE(q->ring->producer, q->prod_tail); |
---|
| 220 | + __xskq_cons_release(q); |
---|
| 221 | + __xskq_cons_peek(q); |
---|
244 | 222 | } |
---|
245 | 223 | |
---|
246 | | -static inline bool xskq_full_desc(struct xsk_queue *q) |
---|
| 224 | +static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) |
---|
| 225 | +{ |
---|
| 226 | + u32 entries = q->cached_prod - q->cached_cons; |
---|
| 227 | + |
---|
| 228 | + if (entries >= cnt) |
---|
| 229 | + return true; |
---|
| 230 | + |
---|
| 231 | + __xskq_cons_peek(q); |
---|
| 232 | + entries = q->cached_prod - q->cached_cons; |
---|
| 233 | + |
---|
| 234 | + return entries >= cnt; |
---|
| 235 | +} |
---|
| 236 | + |
---|
| 237 | +static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) |
---|
| 238 | +{ |
---|
| 239 | + if (q->cached_prod == q->cached_cons) |
---|
| 240 | + xskq_cons_get_entries(q); |
---|
| 241 | + return xskq_cons_read_addr_unchecked(q, addr); |
---|
| 242 | +} |
---|
| 243 | + |
---|
| 244 | +static inline bool xskq_cons_peek_desc(struct xsk_queue *q, |
---|
| 245 | + struct xdp_desc *desc, |
---|
| 246 | + struct xsk_buff_pool *pool) |
---|
| 247 | +{ |
---|
| 248 | + if (q->cached_prod == q->cached_cons) |
---|
| 249 | + xskq_cons_get_entries(q); |
---|
| 250 | + return xskq_cons_read_desc(q, desc, pool); |
---|
| 251 | +} |
---|
| 252 | + |
---|
| 253 | +static inline void xskq_cons_release(struct xsk_queue *q) |
---|
| 254 | +{ |
---|
| 255 | + /* To improve performance, only update local state here. |
---|
| 256 | + * Reflect this to global state when we get new entries |
---|
| 257 | + * from the ring in xskq_cons_get_entries() and whenever |
---|
| 258 | + * Rx or Tx processing are completed in the NAPI loop. |
---|
| 259 | + */ |
---|
| 260 | + q->cached_cons++; |
---|
| 261 | +} |
---|
| 262 | + |
---|
| 263 | +static inline bool xskq_cons_is_full(struct xsk_queue *q) |
---|
247 | 264 | { |
---|
248 | 265 | /* No barriers needed since data is not accessed */ |
---|
249 | 266 | return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == |
---|
250 | 267 | q->nentries; |
---|
251 | 268 | } |
---|
252 | 269 | |
---|
253 | | -static inline bool xskq_empty_desc(struct xsk_queue *q) |
---|
| 270 | +static inline u32 xskq_cons_present_entries(struct xsk_queue *q) |
---|
| 271 | +{ |
---|
| 272 | + /* No barriers needed since data is not accessed */ |
---|
| 273 | + return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); |
---|
| 274 | +} |
---|
| 275 | + |
---|
| 276 | +/* Functions for producers */ |
---|
| 277 | + |
---|
| 278 | +static inline bool xskq_prod_is_full(struct xsk_queue *q) |
---|
| 279 | +{ |
---|
| 280 | + u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); |
---|
| 281 | + |
---|
| 282 | + if (free_entries) |
---|
| 283 | + return false; |
---|
| 284 | + |
---|
| 285 | + /* Refresh the local tail pointer */ |
---|
| 286 | + q->cached_cons = READ_ONCE(q->ring->consumer); |
---|
| 287 | + free_entries = q->nentries - (q->cached_prod - q->cached_cons); |
---|
| 288 | + |
---|
| 289 | + return !free_entries; |
---|
| 290 | +} |
---|
| 291 | + |
---|
| 292 | +static inline void xskq_prod_cancel(struct xsk_queue *q) |
---|
| 293 | +{ |
---|
| 294 | + q->cached_prod--; |
---|
| 295 | +} |
---|
| 296 | + |
---|
| 297 | +static inline int xskq_prod_reserve(struct xsk_queue *q) |
---|
| 298 | +{ |
---|
| 299 | + if (xskq_prod_is_full(q)) |
---|
| 300 | + return -ENOSPC; |
---|
| 301 | + |
---|
| 302 | + /* A, matches D */ |
---|
| 303 | + q->cached_prod++; |
---|
| 304 | + return 0; |
---|
| 305 | +} |
---|
| 306 | + |
---|
| 307 | +static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) |
---|
| 308 | +{ |
---|
| 309 | + struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
---|
| 310 | + |
---|
| 311 | + if (xskq_prod_is_full(q)) |
---|
| 312 | + return -ENOSPC; |
---|
| 313 | + |
---|
| 314 | + /* A, matches D */ |
---|
| 315 | + ring->desc[q->cached_prod++ & q->ring_mask] = addr; |
---|
| 316 | + return 0; |
---|
| 317 | +} |
---|
| 318 | + |
---|
| 319 | +static inline int xskq_prod_reserve_desc(struct xsk_queue *q, |
---|
| 320 | + u64 addr, u32 len) |
---|
| 321 | +{ |
---|
| 322 | + struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; |
---|
| 323 | + u32 idx; |
---|
| 324 | + |
---|
| 325 | + if (xskq_prod_is_full(q)) |
---|
| 326 | + return -ENOSPC; |
---|
| 327 | + |
---|
| 328 | + /* A, matches D */ |
---|
| 329 | + idx = q->cached_prod++ & q->ring_mask; |
---|
| 330 | + ring->desc[idx].addr = addr; |
---|
| 331 | + ring->desc[idx].len = len; |
---|
| 332 | + |
---|
| 333 | + return 0; |
---|
| 334 | +} |
---|
| 335 | + |
---|
| 336 | +static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) |
---|
| 337 | +{ |
---|
| 338 | + smp_wmb(); /* B, matches C */ |
---|
| 339 | + |
---|
| 340 | + WRITE_ONCE(q->ring->producer, idx); |
---|
| 341 | +} |
---|
| 342 | + |
---|
| 343 | +static inline void xskq_prod_submit(struct xsk_queue *q) |
---|
| 344 | +{ |
---|
| 345 | + __xskq_prod_submit(q, q->cached_prod); |
---|
| 346 | +} |
---|
| 347 | + |
---|
| 348 | +static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) |
---|
| 349 | +{ |
---|
| 350 | + struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; |
---|
| 351 | + u32 idx = q->ring->producer; |
---|
| 352 | + |
---|
| 353 | + ring->desc[idx++ & q->ring_mask] = addr; |
---|
| 354 | + |
---|
| 355 | + __xskq_prod_submit(q, idx); |
---|
| 356 | +} |
---|
| 357 | + |
---|
| 358 | +static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) |
---|
| 359 | +{ |
---|
| 360 | + __xskq_prod_submit(q, q->ring->producer + nb_entries); |
---|
| 361 | +} |
---|
| 362 | + |
---|
| 363 | +static inline bool xskq_prod_is_empty(struct xsk_queue *q) |
---|
254 | 364 | { |
---|
255 | 365 | /* No barriers needed since data is not accessed */ |
---|
256 | 366 | return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); |
---|
257 | 367 | } |
---|
258 | 368 | |
---|
259 | | -void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); |
---|
| 369 | +/* For both producers and consumers */ |
---|
| 370 | + |
---|
| 371 | +static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) |
---|
| 372 | +{ |
---|
| 373 | + return q ? q->invalid_descs : 0; |
---|
| 374 | +} |
---|
| 375 | + |
---|
| 376 | +static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) |
---|
| 377 | +{ |
---|
| 378 | + return q ? q->queue_empty_descs : 0; |
---|
| 379 | +} |
---|
| 380 | + |
---|
260 | 381 | struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); |
---|
261 | 382 | void xskq_destroy(struct xsk_queue *q_ops); |
---|
262 | 383 | |
---|