forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/net/xdp/xsk_queue.h
....@@ -9,254 +9,374 @@
99 #include <linux/types.h>
1010 #include <linux/if_xdp.h>
1111 #include <net/xdp_sock.h>
12
+#include <net/xsk_buff_pool.h>
1213
13
-#define RX_BATCH_SIZE 16
14
-#define LAZY_UPDATE_THRESHOLD 128
14
+#include "xsk.h"
1515
1616 struct xdp_ring {
1717 u32 producer ____cacheline_aligned_in_smp;
18
+ /* Hinder the adjacent cache prefetcher to prefetch the consumer
19
+ * pointer if the producer pointer is touched and vice versa.
20
+ */
21
+ u32 pad ____cacheline_aligned_in_smp;
1822 u32 consumer ____cacheline_aligned_in_smp;
23
+ u32 flags;
1924 };
2025
2126 /* Used for the RX and TX queues for packets */
2227 struct xdp_rxtx_ring {
2328 struct xdp_ring ptrs;
24
- struct xdp_desc desc[0] ____cacheline_aligned_in_smp;
29
+ struct xdp_desc desc[] ____cacheline_aligned_in_smp;
2530 };
2631
2732 /* Used for the fill and completion queues for buffers */
2833 struct xdp_umem_ring {
2934 struct xdp_ring ptrs;
30
- u64 desc[0] ____cacheline_aligned_in_smp;
35
+ u64 desc[] ____cacheline_aligned_in_smp;
3136 };
3237
3338 struct xsk_queue {
34
- struct xdp_umem_props umem_props;
3539 u32 ring_mask;
3640 u32 nentries;
37
- u32 prod_head;
38
- u32 prod_tail;
39
- u32 cons_head;
40
- u32 cons_tail;
41
+ u32 cached_prod;
42
+ u32 cached_cons;
4143 struct xdp_ring *ring;
4244 u64 invalid_descs;
45
+ u64 queue_empty_descs;
4346 };
4447
45
-/* Common functions operating for both RXTX and umem queues */
48
+/* The structure of the shared state of the rings are the same as the
49
+ * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
50
+ * ring, the kernel is the producer and user space is the consumer. For
51
+ * the Tx and fill rings, the kernel is the consumer and user space is
52
+ * the producer.
53
+ *
54
+ * producer consumer
55
+ *
56
+ * if (LOAD ->consumer) { LOAD ->producer
57
+ * (A) smp_rmb() (C)
58
+ * STORE $data LOAD $data
59
+ * smp_wmb() (B) smp_mb() (D)
60
+ * STORE ->producer STORE ->consumer
61
+ * }
62
+ *
63
+ * (A) pairs with (D), and (B) pairs with (C).
64
+ *
65
+ * Starting with (B), it protects the data from being written after
66
+ * the producer pointer. If this barrier was missing, the consumer
67
+ * could observe the producer pointer being set and thus load the data
68
+ * before the producer has written the new data. The consumer would in
69
+ * this case load the old data.
70
+ *
71
+ * (C) protects the consumer from speculatively loading the data before
72
+ * the producer pointer actually has been read. If we do not have this
73
+ * barrier, some architectures could load old data as speculative loads
74
+ * are not discarded as the CPU does not know there is a dependency
75
+ * between ->producer and data.
76
+ *
77
+ * (A) is a control dependency that separates the load of ->consumer
78
+ * from the stores of $data. In case ->consumer indicates there is no
79
+ * room in the buffer to store $data we do not. So no barrier is needed.
80
+ *
81
+ * (D) protects the load of the data to be observed to happen after the
82
+ * store of the consumer pointer. If we did not have this memory
83
+ * barrier, the producer could observe the consumer pointer being set
84
+ * and overwrite the data with a new value before the consumer got the
85
+ * chance to read the old value. The consumer would thus miss reading
86
+ * the old entry and very likely read the new entry twice, once right
87
+ * now and again after circling through the ring.
88
+ */
4689
47
-static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
48
-{
49
- return q ? q->invalid_descs : 0;
50
-}
90
+/* The operations on the rings are the following:
91
+ *
92
+ * producer consumer
93
+ *
94
+ * RESERVE entries PEEK in the ring for entries
95
+ * WRITE data into the ring READ data from the ring
96
+ * SUBMIT entries RELEASE entries
97
+ *
98
+ * The producer reserves one or more entries in the ring. It can then
99
+ * fill in these entries and finally submit them so that they can be
100
+ * seen and read by the consumer.
101
+ *
102
+ * The consumer peeks into the ring to see if the producer has written
103
+ * any new entries. If so, the consumer can then read these entries
104
+ * and when it is done reading them release them back to the producer
105
+ * so that the producer can use these slots to fill in new entries.
106
+ *
107
+ * The function names below reflect these operations.
108
+ */
51109
52
-static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
53
-{
54
- u32 entries = q->prod_tail - q->cons_tail;
110
+/* Functions that read and validate content from consumer rings. */
55111
56
- if (entries == 0) {
57
- /* Refresh the local pointer */
58
- q->prod_tail = READ_ONCE(q->ring->producer);
59
- entries = q->prod_tail - q->cons_tail;
60
- }
61
-
62
- return (entries > dcnt) ? dcnt : entries;
63
-}
64
-
65
-static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
66
-{
67
- u32 free_entries = q->nentries - (producer - q->cons_tail);
68
-
69
- if (free_entries >= dcnt)
70
- return free_entries;
71
-
72
- /* Refresh the local tail pointer */
73
- q->cons_tail = READ_ONCE(q->ring->consumer);
74
- return q->nentries - (producer - q->cons_tail);
75
-}
76
-
77
-/* UMEM queue */
78
-
79
-static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
80
-{
81
- if (addr >= q->umem_props.size) {
82
- q->invalid_descs++;
83
- return false;
84
- }
85
-
86
- return true;
87
-}
88
-
89
-static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
90
-{
91
- while (q->cons_tail != q->cons_head) {
92
- struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
93
- unsigned int idx = q->cons_tail & q->ring_mask;
94
-
95
- *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
96
- if (xskq_is_valid_addr(q, *addr))
97
- return addr;
98
-
99
- q->cons_tail++;
100
- }
101
-
102
- return NULL;
103
-}
104
-
105
-static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
106
-{
107
- if (q->cons_tail == q->cons_head) {
108
- WRITE_ONCE(q->ring->consumer, q->cons_tail);
109
- q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
110
-
111
- /* Order consumer and data */
112
- smp_rmb();
113
- }
114
-
115
- return xskq_validate_addr(q, addr);
116
-}
117
-
118
-static inline void xskq_discard_addr(struct xsk_queue *q)
119
-{
120
- q->cons_tail++;
121
-}
122
-
123
-static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
112
+static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
124113 {
125114 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
126115
127
- if (xskq_nb_free(q, q->prod_tail, 1) == 0)
128
- return -ENOSPC;
116
+ if (q->cached_cons != q->cached_prod) {
117
+ u32 idx = q->cached_cons & q->ring_mask;
129118
130
- ring->desc[q->prod_tail++ & q->ring_mask] = addr;
131
-
132
- /* Order producer and data */
133
- smp_wmb();
134
-
135
- WRITE_ONCE(q->ring->producer, q->prod_tail);
136
- return 0;
137
-}
138
-
139
-static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
140
-{
141
- struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
142
-
143
- if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
144
- return -ENOSPC;
145
-
146
- ring->desc[q->prod_head++ & q->ring_mask] = addr;
147
- return 0;
148
-}
149
-
150
-static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
151
- u32 nb_entries)
152
-{
153
- /* Order producer and data */
154
- smp_wmb();
155
-
156
- q->prod_tail += nb_entries;
157
- WRITE_ONCE(q->ring->producer, q->prod_tail);
158
-}
159
-
160
-static inline int xskq_reserve_addr(struct xsk_queue *q)
161
-{
162
- if (xskq_nb_free(q, q->prod_head, 1) == 0)
163
- return -ENOSPC;
164
-
165
- q->prod_head++;
166
- return 0;
167
-}
168
-
169
-/* Rx/Tx queue */
170
-
171
-static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
172
-{
173
- if (!xskq_is_valid_addr(q, d->addr))
174
- return false;
175
-
176
- if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
177
- (d->addr & q->umem_props.chunk_mask)) {
178
- q->invalid_descs++;
179
- return false;
119
+ *addr = ring->desc[idx];
120
+ return true;
180121 }
181122
123
+ return false;
124
+}
125
+
126
+static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
127
+ struct xdp_desc *desc)
128
+{
129
+ u64 chunk, chunk_end;
130
+
131
+ chunk = xp_aligned_extract_addr(pool, desc->addr);
132
+ if (likely(desc->len)) {
133
+ chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
134
+ if (chunk != chunk_end)
135
+ return false;
136
+ }
137
+
138
+ if (chunk >= pool->addrs_cnt)
139
+ return false;
140
+
141
+ if (desc->options)
142
+ return false;
182143 return true;
183144 }
184145
185
-static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
186
- struct xdp_desc *desc)
187
-{
188
- while (q->cons_tail != q->cons_head) {
189
- struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
190
- unsigned int idx = q->cons_tail & q->ring_mask;
191
-
192
- *desc = READ_ONCE(ring->desc[idx]);
193
- if (xskq_is_valid_desc(q, desc))
194
- return desc;
195
-
196
- q->cons_tail++;
197
- }
198
-
199
- return NULL;
200
-}
201
-
202
-static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
146
+static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
203147 struct xdp_desc *desc)
204148 {
205
- if (q->cons_tail == q->cons_head) {
206
- WRITE_ONCE(q->ring->consumer, q->cons_tail);
207
- q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
149
+ u64 addr, base_addr;
208150
209
- /* Order consumer and data */
210
- smp_rmb();
151
+ base_addr = xp_unaligned_extract_addr(desc->addr);
152
+ addr = xp_unaligned_add_offset_to_addr(desc->addr);
153
+
154
+ if (desc->len > pool->chunk_size)
155
+ return false;
156
+
157
+ if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
158
+ xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
159
+ return false;
160
+
161
+ if (desc->options)
162
+ return false;
163
+ return true;
164
+}
165
+
166
+static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
167
+ struct xdp_desc *desc)
168
+{
169
+ return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
170
+ xp_aligned_validate_desc(pool, desc);
171
+}
172
+
173
+static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
174
+ struct xdp_desc *d,
175
+ struct xsk_buff_pool *pool)
176
+{
177
+ if (!xp_validate_desc(pool, d)) {
178
+ q->invalid_descs++;
179
+ return false;
180
+ }
181
+ return true;
182
+}
183
+
184
+static inline bool xskq_cons_read_desc(struct xsk_queue *q,
185
+ struct xdp_desc *desc,
186
+ struct xsk_buff_pool *pool)
187
+{
188
+ while (q->cached_cons != q->cached_prod) {
189
+ struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
190
+ u32 idx = q->cached_cons & q->ring_mask;
191
+
192
+ *desc = ring->desc[idx];
193
+ if (xskq_cons_is_valid_desc(q, desc, pool))
194
+ return true;
195
+
196
+ q->cached_cons++;
211197 }
212198
213
- return xskq_validate_desc(q, desc);
199
+ return false;
214200 }
215201
216
-static inline void xskq_discard_desc(struct xsk_queue *q)
202
+/* Functions for consumers */
203
+
204
+static inline void __xskq_cons_release(struct xsk_queue *q)
217205 {
218
- q->cons_tail++;
206
+ smp_mb(); /* D, matches A */
207
+ WRITE_ONCE(q->ring->consumer, q->cached_cons);
219208 }
220209
221
-static inline int xskq_produce_batch_desc(struct xsk_queue *q,
222
- u64 addr, u32 len)
210
+static inline void __xskq_cons_peek(struct xsk_queue *q)
223211 {
224
- struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
225
- unsigned int idx;
226
-
227
- if (xskq_nb_free(q, q->prod_head, 1) == 0)
228
- return -ENOSPC;
229
-
230
- idx = (q->prod_head++) & q->ring_mask;
231
- ring->desc[idx].addr = addr;
232
- ring->desc[idx].len = len;
233
-
234
- return 0;
212
+ /* Refresh the local pointer */
213
+ q->cached_prod = READ_ONCE(q->ring->producer);
214
+ smp_rmb(); /* C, matches B */
235215 }
236216
237
-static inline void xskq_produce_flush_desc(struct xsk_queue *q)
217
+static inline void xskq_cons_get_entries(struct xsk_queue *q)
238218 {
239
- /* Order producer and data */
240
- smp_wmb();
241
-
242
- q->prod_tail = q->prod_head;
243
- WRITE_ONCE(q->ring->producer, q->prod_tail);
219
+ __xskq_cons_release(q);
220
+ __xskq_cons_peek(q);
244221 }
245222
246
-static inline bool xskq_full_desc(struct xsk_queue *q)
223
+static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
224
+{
225
+ u32 entries = q->cached_prod - q->cached_cons;
226
+
227
+ if (entries >= cnt)
228
+ return true;
229
+
230
+ __xskq_cons_peek(q);
231
+ entries = q->cached_prod - q->cached_cons;
232
+
233
+ return entries >= cnt;
234
+}
235
+
236
+static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
237
+{
238
+ if (q->cached_prod == q->cached_cons)
239
+ xskq_cons_get_entries(q);
240
+ return xskq_cons_read_addr_unchecked(q, addr);
241
+}
242
+
243
+static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
244
+ struct xdp_desc *desc,
245
+ struct xsk_buff_pool *pool)
246
+{
247
+ if (q->cached_prod == q->cached_cons)
248
+ xskq_cons_get_entries(q);
249
+ return xskq_cons_read_desc(q, desc, pool);
250
+}
251
+
252
+static inline void xskq_cons_release(struct xsk_queue *q)
253
+{
254
+ /* To improve performance, only update local state here.
255
+ * Reflect this to global state when we get new entries
256
+ * from the ring in xskq_cons_get_entries() and whenever
257
+ * Rx or Tx processing are completed in the NAPI loop.
258
+ */
259
+ q->cached_cons++;
260
+}
261
+
262
+static inline bool xskq_cons_is_full(struct xsk_queue *q)
247263 {
248264 /* No barriers needed since data is not accessed */
249265 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
250266 q->nentries;
251267 }
252268
253
-static inline bool xskq_empty_desc(struct xsk_queue *q)
269
+static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
270
+{
271
+ /* No barriers needed since data is not accessed */
272
+ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
273
+}
274
+
275
+/* Functions for producers */
276
+
277
+static inline bool xskq_prod_is_full(struct xsk_queue *q)
278
+{
279
+ u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
280
+
281
+ if (free_entries)
282
+ return false;
283
+
284
+ /* Refresh the local tail pointer */
285
+ q->cached_cons = READ_ONCE(q->ring->consumer);
286
+ free_entries = q->nentries - (q->cached_prod - q->cached_cons);
287
+
288
+ return !free_entries;
289
+}
290
+
291
+static inline void xskq_prod_cancel(struct xsk_queue *q)
292
+{
293
+ q->cached_prod--;
294
+}
295
+
296
+static inline int xskq_prod_reserve(struct xsk_queue *q)
297
+{
298
+ if (xskq_prod_is_full(q))
299
+ return -ENOSPC;
300
+
301
+ /* A, matches D */
302
+ q->cached_prod++;
303
+ return 0;
304
+}
305
+
306
+static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
307
+{
308
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
309
+
310
+ if (xskq_prod_is_full(q))
311
+ return -ENOSPC;
312
+
313
+ /* A, matches D */
314
+ ring->desc[q->cached_prod++ & q->ring_mask] = addr;
315
+ return 0;
316
+}
317
+
318
+static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
319
+ u64 addr, u32 len)
320
+{
321
+ struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
322
+ u32 idx;
323
+
324
+ if (xskq_prod_is_full(q))
325
+ return -ENOSPC;
326
+
327
+ /* A, matches D */
328
+ idx = q->cached_prod++ & q->ring_mask;
329
+ ring->desc[idx].addr = addr;
330
+ ring->desc[idx].len = len;
331
+
332
+ return 0;
333
+}
334
+
335
+static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
336
+{
337
+ smp_wmb(); /* B, matches C */
338
+
339
+ WRITE_ONCE(q->ring->producer, idx);
340
+}
341
+
342
+static inline void xskq_prod_submit(struct xsk_queue *q)
343
+{
344
+ __xskq_prod_submit(q, q->cached_prod);
345
+}
346
+
347
+static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
348
+{
349
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
350
+ u32 idx = q->ring->producer;
351
+
352
+ ring->desc[idx++ & q->ring_mask] = addr;
353
+
354
+ __xskq_prod_submit(q, idx);
355
+}
356
+
357
+static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
358
+{
359
+ __xskq_prod_submit(q, q->ring->producer + nb_entries);
360
+}
361
+
362
+static inline bool xskq_prod_is_empty(struct xsk_queue *q)
254363 {
255364 /* No barriers needed since data is not accessed */
256365 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
257366 }
258367
259
-void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
368
+/* For both producers and consumers */
369
+
370
+static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
371
+{
372
+ return q ? q->invalid_descs : 0;
373
+}
374
+
375
+static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
376
+{
377
+ return q ? q->queue_empty_descs : 0;
378
+}
379
+
260380 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
261381 void xskq_destroy(struct xsk_queue *q_ops);
262382