forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-02-20 e636c8d336489bf3eed5878299e6cc045bbad077
kernel/include/trace/events/xdp.h
....@@ -22,7 +22,7 @@
2222 #define __XDP_ACT_SYM_FN(x) \
2323 { XDP_##x, #x },
2424 #define __XDP_ACT_SYM_TAB \
25
- __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
25
+ __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
2626 __XDP_ACT_MAP(__XDP_ACT_TP_FN)
2727
2828 TRACE_EVENT(xdp_exception,
....@@ -50,14 +50,55 @@
5050 __entry->ifindex)
5151 );
5252
53
+TRACE_EVENT(xdp_bulk_tx,
54
+
55
+ TP_PROTO(const struct net_device *dev,
56
+ int sent, int drops, int err),
57
+
58
+ TP_ARGS(dev, sent, drops, err),
59
+
60
+ TP_STRUCT__entry(
61
+ __field(int, ifindex)
62
+ __field(u32, act)
63
+ __field(int, drops)
64
+ __field(int, sent)
65
+ __field(int, err)
66
+ ),
67
+
68
+ TP_fast_assign(
69
+ __entry->ifindex = dev->ifindex;
70
+ __entry->act = XDP_TX;
71
+ __entry->drops = drops;
72
+ __entry->sent = sent;
73
+ __entry->err = err;
74
+ ),
75
+
76
+ TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77
+ __entry->ifindex,
78
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79
+ __entry->sent, __entry->drops, __entry->err)
80
+);
81
+
82
+#ifndef __DEVMAP_OBJ_TYPE
83
+#define __DEVMAP_OBJ_TYPE
84
+struct _bpf_dtab_netdev {
85
+ struct net_device *dev;
86
+};
87
+#endif /* __DEVMAP_OBJ_TYPE */
88
+
89
+#define devmap_ifindex(tgt, map) \
90
+ (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
91
+ map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
92
+ ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
93
+
5394 DECLARE_EVENT_CLASS(xdp_redirect_template,
5495
5596 TP_PROTO(const struct net_device *dev,
5697 const struct bpf_prog *xdp,
57
- int to_ifindex, int err,
58
- const struct bpf_map *map, u32 map_index),
98
+ const void *tgt, int err,
99
+ const struct bpf_map *map, u32 index),
59100
60
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
101
+ TP_ARGS(dev, xdp, tgt, err, map, index),
61102
62103 TP_STRUCT__entry(
63104 __field(int, prog_id)
....@@ -74,96 +115,71 @@
74115 __entry->act = XDP_REDIRECT;
75116 __entry->ifindex = dev->ifindex;
76117 __entry->err = err;
77
- __entry->to_ifindex = to_ifindex;
118
+ __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
119
+ index;
78120 __entry->map_id = map ? map->id : 0;
79
- __entry->map_index = map_index;
121
+ __entry->map_index = map ? index : 0;
80122 ),
81123
82
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
124
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
125
+ " map_id=%d map_index=%d",
83126 __entry->prog_id,
84127 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
85128 __entry->ifindex, __entry->to_ifindex,
86
- __entry->err)
129
+ __entry->err, __entry->map_id, __entry->map_index)
87130 );
88131
89132 DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
90133 TP_PROTO(const struct net_device *dev,
91134 const struct bpf_prog *xdp,
92
- int to_ifindex, int err,
93
- const struct bpf_map *map, u32 map_index),
94
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
135
+ const void *tgt, int err,
136
+ const struct bpf_map *map, u32 index),
137
+ TP_ARGS(dev, xdp, tgt, err, map, index)
95138 );
96139
97140 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
98141 TP_PROTO(const struct net_device *dev,
99142 const struct bpf_prog *xdp,
100
- int to_ifindex, int err,
101
- const struct bpf_map *map, u32 map_index),
102
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
143
+ const void *tgt, int err,
144
+ const struct bpf_map *map, u32 index),
145
+ TP_ARGS(dev, xdp, tgt, err, map, index)
103146 );
104147
105148 #define _trace_xdp_redirect(dev, xdp, to) \
106
- trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
149
+ trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
107150
108151 #define _trace_xdp_redirect_err(dev, xdp, to, err) \
109
- trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
152
+ trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
110153
111
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
154
+#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
155
+ trace_xdp_redirect(dev, xdp, to, 0, map, index);
156
+
157
+#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
158
+ trace_xdp_redirect_err(dev, xdp, to, err, map, index);
159
+
160
+/* not used anymore, but kept around so as not to break old programs */
161
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
112162 TP_PROTO(const struct net_device *dev,
113163 const struct bpf_prog *xdp,
114
- int to_ifindex, int err,
115
- const struct bpf_map *map, u32 map_index),
116
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
117
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
118
- " map_id=%d map_index=%d",
119
- __entry->prog_id,
120
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
121
- __entry->ifindex, __entry->to_ifindex,
122
- __entry->err,
123
- __entry->map_id, __entry->map_index)
164
+ const void *tgt, int err,
165
+ const struct bpf_map *map, u32 index),
166
+ TP_ARGS(dev, xdp, tgt, err, map, index)
124167 );
125168
126
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
169
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
127170 TP_PROTO(const struct net_device *dev,
128171 const struct bpf_prog *xdp,
129
- int to_ifindex, int err,
130
- const struct bpf_map *map, u32 map_index),
131
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
132
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
133
- " map_id=%d map_index=%d",
134
- __entry->prog_id,
135
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
136
- __entry->ifindex, __entry->to_ifindex,
137
- __entry->err,
138
- __entry->map_id, __entry->map_index)
172
+ const void *tgt, int err,
173
+ const struct bpf_map *map, u32 index),
174
+ TP_ARGS(dev, xdp, tgt, err, map, index)
139175 );
140
-
141
-#ifndef __DEVMAP_OBJ_TYPE
142
-#define __DEVMAP_OBJ_TYPE
143
-struct _bpf_dtab_netdev {
144
- struct net_device *dev;
145
-};
146
-#endif /* __DEVMAP_OBJ_TYPE */
147
-
148
-#define devmap_ifindex(fwd, map) \
149
- (!fwd ? 0 : \
150
- ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
151
- ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0))
152
-
153
-#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
154
- trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
155
- 0, map, idx)
156
-
157
-#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
158
- trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
159
- err, map, idx)
160176
161177 TRACE_EVENT(xdp_cpumap_kthread,
162178
163179 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
164
- int sched),
180
+ int sched, struct xdp_cpumap_stats *xdp_stats),
165181
166
- TP_ARGS(map_id, processed, drops, sched),
182
+ TP_ARGS(map_id, processed, drops, sched, xdp_stats),
167183
168184 TP_STRUCT__entry(
169185 __field(int, map_id)
....@@ -172,6 +188,9 @@
172188 __field(unsigned int, drops)
173189 __field(unsigned int, processed)
174190 __field(int, sched)
191
+ __field(unsigned int, xdp_pass)
192
+ __field(unsigned int, xdp_drop)
193
+ __field(unsigned int, xdp_redirect)
175194 ),
176195
177196 TP_fast_assign(
....@@ -181,16 +200,21 @@
181200 __entry->drops = drops;
182201 __entry->processed = processed;
183202 __entry->sched = sched;
203
+ __entry->xdp_pass = xdp_stats->pass;
204
+ __entry->xdp_drop = xdp_stats->drop;
205
+ __entry->xdp_redirect = xdp_stats->redirect;
184206 ),
185207
186208 TP_printk("kthread"
187209 " cpu=%d map_id=%d action=%s"
188210 " processed=%u drops=%u"
189
- " sched=%d",
211
+ " sched=%d"
212
+ " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
190213 __entry->cpu, __entry->map_id,
191214 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
192215 __entry->processed, __entry->drops,
193
- __entry->sched)
216
+ __entry->sched,
217
+ __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
194218 );
195219
196220 TRACE_EVENT(xdp_cpumap_enqueue,
....@@ -230,43 +254,142 @@
230254
231255 TRACE_EVENT(xdp_devmap_xmit,
232256
233
- TP_PROTO(const struct bpf_map *map, u32 map_index,
234
- int sent, int drops,
235
- const struct net_device *from_dev,
236
- const struct net_device *to_dev, int err),
257
+ TP_PROTO(const struct net_device *from_dev,
258
+ const struct net_device *to_dev,
259
+ int sent, int drops, int err),
237260
238
- TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
261
+ TP_ARGS(from_dev, to_dev, sent, drops, err),
239262
240263 TP_STRUCT__entry(
241
- __field(int, map_id)
264
+ __field(int, from_ifindex)
242265 __field(u32, act)
243
- __field(u32, map_index)
266
+ __field(int, to_ifindex)
244267 __field(int, drops)
245268 __field(int, sent)
246
- __field(int, from_ifindex)
247
- __field(int, to_ifindex)
248269 __field(int, err)
249270 ),
250271
251272 TP_fast_assign(
252
- __entry->map_id = map->id;
273
+ __entry->from_ifindex = from_dev->ifindex;
253274 __entry->act = XDP_REDIRECT;
254
- __entry->map_index = map_index;
275
+ __entry->to_ifindex = to_dev->ifindex;
255276 __entry->drops = drops;
256277 __entry->sent = sent;
257
- __entry->from_ifindex = from_dev->ifindex;
258
- __entry->to_ifindex = to_dev->ifindex;
259278 __entry->err = err;
260279 ),
261280
262281 TP_printk("ndo_xdp_xmit"
263
- " map_id=%d map_index=%d action=%s"
282
+ " from_ifindex=%d to_ifindex=%d action=%s"
264283 " sent=%d drops=%d"
265
- " from_ifindex=%d to_ifindex=%d err=%d",
266
- __entry->map_id, __entry->map_index,
284
+ " err=%d",
285
+ __entry->from_ifindex, __entry->to_ifindex,
267286 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
268287 __entry->sent, __entry->drops,
269
- __entry->from_ifindex, __entry->to_ifindex, __entry->err)
288
+ __entry->err)
289
+);
290
+
291
+/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
292
+#include <net/xdp_priv.h>
293
+
294
+#define __MEM_TYPE_MAP(FN) \
295
+ FN(PAGE_SHARED) \
296
+ FN(PAGE_ORDER0) \
297
+ FN(PAGE_POOL) \
298
+ FN(XSK_BUFF_POOL)
299
+
300
+#define __MEM_TYPE_TP_FN(x) \
301
+ TRACE_DEFINE_ENUM(MEM_TYPE_##x);
302
+#define __MEM_TYPE_SYM_FN(x) \
303
+ { MEM_TYPE_##x, #x },
304
+#define __MEM_TYPE_SYM_TAB \
305
+ __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
306
+__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
307
+
308
+TRACE_EVENT(mem_disconnect,
309
+
310
+ TP_PROTO(const struct xdp_mem_allocator *xa),
311
+
312
+ TP_ARGS(xa),
313
+
314
+ TP_STRUCT__entry(
315
+ __field(const struct xdp_mem_allocator *, xa)
316
+ __field(u32, mem_id)
317
+ __field(u32, mem_type)
318
+ __field(const void *, allocator)
319
+ ),
320
+
321
+ TP_fast_assign(
322
+ __entry->xa = xa;
323
+ __entry->mem_id = xa->mem.id;
324
+ __entry->mem_type = xa->mem.type;
325
+ __entry->allocator = xa->allocator;
326
+ ),
327
+
328
+ TP_printk("mem_id=%d mem_type=%s allocator=%p",
329
+ __entry->mem_id,
330
+ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
331
+ __entry->allocator
332
+ )
333
+);
334
+
335
+TRACE_EVENT(mem_connect,
336
+
337
+ TP_PROTO(const struct xdp_mem_allocator *xa,
338
+ const struct xdp_rxq_info *rxq),
339
+
340
+ TP_ARGS(xa, rxq),
341
+
342
+ TP_STRUCT__entry(
343
+ __field(const struct xdp_mem_allocator *, xa)
344
+ __field(u32, mem_id)
345
+ __field(u32, mem_type)
346
+ __field(const void *, allocator)
347
+ __field(const struct xdp_rxq_info *, rxq)
348
+ __field(int, ifindex)
349
+ ),
350
+
351
+ TP_fast_assign(
352
+ __entry->xa = xa;
353
+ __entry->mem_id = xa->mem.id;
354
+ __entry->mem_type = xa->mem.type;
355
+ __entry->allocator = xa->allocator;
356
+ __entry->rxq = rxq;
357
+ __entry->ifindex = rxq->dev->ifindex;
358
+ ),
359
+
360
+ TP_printk("mem_id=%d mem_type=%s allocator=%p"
361
+ " ifindex=%d",
362
+ __entry->mem_id,
363
+ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
364
+ __entry->allocator,
365
+ __entry->ifindex
366
+ )
367
+);
368
+
369
+TRACE_EVENT(mem_return_failed,
370
+
371
+ TP_PROTO(const struct xdp_mem_info *mem,
372
+ const struct page *page),
373
+
374
+ TP_ARGS(mem, page),
375
+
376
+ TP_STRUCT__entry(
377
+ __field(const struct page *, page)
378
+ __field(u32, mem_id)
379
+ __field(u32, mem_type)
380
+ ),
381
+
382
+ TP_fast_assign(
383
+ __entry->page = page;
384
+ __entry->mem_id = mem->id;
385
+ __entry->mem_type = mem->type;
386
+ ),
387
+
388
+ TP_printk("mem_id=%d mem_type=%s page=%p",
389
+ __entry->mem_id,
390
+ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
391
+ __entry->page
392
+ )
270393 );
271394
272395 #endif /* _TRACE_XDP_H */