hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
....@@ -30,9 +30,74 @@
3030 * SOFTWARE.
3131 */
3232
33
+#include "lib/mlx5.h"
3334 #include "en.h"
34
-#include "en_accel/ipsec.h"
3535 #include "en_accel/tls.h"
36
+#include "en_accel/en_accel.h"
37
+
38
+static unsigned int stats_grps_num(struct mlx5e_priv *priv)
39
+{
40
+ return !priv->profile->stats_grps_num ? 0 :
41
+ priv->profile->stats_grps_num(priv);
42
+}
43
+
44
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
45
+{
46
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47
+ const unsigned int num_stats_grps = stats_grps_num(priv);
48
+ unsigned int total = 0;
49
+ int i;
50
+
51
+ for (i = 0; i < num_stats_grps; i++)
52
+ total += stats_grps[i]->get_num_stats(priv);
53
+
54
+ return total;
55
+}
56
+
57
+void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
58
+{
59
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60
+ const unsigned int num_stats_grps = stats_grps_num(priv);
61
+ int i;
62
+
63
+ for (i = num_stats_grps - 1; i >= 0; i--)
64
+ if (stats_grps[i]->update_stats &&
65
+ stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
66
+ stats_grps[i]->update_stats(priv);
67
+}
68
+
69
+void mlx5e_stats_update(struct mlx5e_priv *priv)
70
+{
71
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
72
+ const unsigned int num_stats_grps = stats_grps_num(priv);
73
+ int i;
74
+
75
+ for (i = num_stats_grps - 1; i >= 0; i--)
76
+ if (stats_grps[i]->update_stats)
77
+ stats_grps[i]->update_stats(priv);
78
+}
79
+
80
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
81
+{
82
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83
+ const unsigned int num_stats_grps = stats_grps_num(priv);
84
+ int i;
85
+
86
+ for (i = 0; i < num_stats_grps; i++)
87
+ idx = stats_grps[i]->fill_stats(priv, data, idx);
88
+}
89
+
90
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
91
+{
92
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
93
+ const unsigned int num_stats_grps = stats_grps_num(priv);
94
+ int i, idx = 0;
95
+
96
+ for (i = 0; i < num_stats_grps; i++)
97
+ idx = stats_grps[i]->fill_strings(priv, data, idx);
98
+}
99
+
100
+/* Concrete NIC Stats */
36101
37102 static const struct counter_desc sw_stats_desc[] = {
38103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
....@@ -45,10 +110,20 @@
45110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
46111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
47112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
113
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
114
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
48115
49116 #ifdef CONFIG_MLX5_EN_TLS
117
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
118
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
119
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
50120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
121
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
122
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
51123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
124
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
125
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
126
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
52127 #endif
53128
54129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
....@@ -64,6 +139,9 @@
64139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
65140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
66141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
142
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
143
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
144
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
67145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
68146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
69147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
....@@ -78,6 +156,9 @@
78156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
79157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
80158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
159
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
160
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
161
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
81162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
82163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
83164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
....@@ -88,28 +169,68 @@
88169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
89170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
90171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
91
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
92172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
93173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
94174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
95175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
96176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
97177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
178
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
179
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
180
+#ifdef CONFIG_MLX5_EN_TLS
181
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
182
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
183
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
184
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
185
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
186
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
187
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
188
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
189
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
190
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
191
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
192
+#endif
98193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
99194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
100195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
101196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
197
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
102198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
199
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
200
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
201
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
202
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
203
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
204
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
205
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
206
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
207
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
208
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
209
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
210
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
211
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
212
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
213
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
214
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
215
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
216
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
217
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
218
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
219
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
220
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
221
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
222
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
223
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
103224 };
104225
105226 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
106227
107
-static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
228
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
108229 {
109230 return NUM_SW_COUNTERS;
110231 }
111232
112
-static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
233
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
113234 {
114235 int i;
115236
....@@ -118,7 +239,7 @@
118239 return idx;
119240 }
120241
121
-static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
242
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
122243 {
123244 int i;
124245
....@@ -127,18 +248,20 @@
127248 return idx;
128249 }
129250
130
-void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
251
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
131252 {
132
- struct mlx5e_sw_stats temp, *s = &temp;
253
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
133254 int i;
134255
135256 memset(s, 0, sizeof(*s));
136257
137
- for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
258
+ for (i = 0; i < priv->max_nch; i++) {
138259 struct mlx5e_channel_stats *channel_stats =
139260 &priv->channel_stats[i];
140261 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
141262 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
263
+ struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
264
+ struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
142265 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
143266 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
144267 int j;
....@@ -158,6 +281,9 @@
158281 s->rx_xdp_drop += rq_stats->xdp_drop;
159282 s->rx_xdp_redirect += rq_stats->xdp_redirect;
160283 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
284
+ s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
285
+ s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
286
+ s->rx_xdp_tx_nops += xdpsq_stats->nops;
161287 s->rx_xdp_tx_full += xdpsq_stats->full;
162288 s->rx_xdp_tx_err += xdpsq_stats->err;
163289 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
....@@ -168,23 +294,67 @@
168294 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
169295 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
170296 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
171
- s->rx_page_reuse += rq_stats->page_reuse;
172297 s->rx_cache_reuse += rq_stats->cache_reuse;
173298 s->rx_cache_full += rq_stats->cache_full;
174299 s->rx_cache_empty += rq_stats->cache_empty;
175300 s->rx_cache_busy += rq_stats->cache_busy;
176301 s->rx_cache_waive += rq_stats->cache_waive;
177302 s->rx_congst_umr += rq_stats->congst_umr;
303
+ s->rx_arfs_err += rq_stats->arfs_err;
304
+ s->rx_recover += rq_stats->recover;
305
+#ifdef CONFIG_MLX5_EN_TLS
306
+ s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
307
+ s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
308
+ s->rx_tls_ctx += rq_stats->tls_ctx;
309
+ s->rx_tls_del += rq_stats->tls_del;
310
+ s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
311
+ s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
312
+ s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
313
+ s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
314
+ s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
315
+ s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
316
+ s->rx_tls_err += rq_stats->tls_err;
317
+#endif
178318 s->ch_events += ch_stats->events;
179319 s->ch_poll += ch_stats->poll;
180320 s->ch_arm += ch_stats->arm;
181321 s->ch_aff_change += ch_stats->aff_change;
322
+ s->ch_force_irq += ch_stats->force_irq;
182323 s->ch_eq_rearm += ch_stats->eq_rearm;
183324 /* xdp redirect */
184325 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
326
+ s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
327
+ s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
328
+ s->tx_xdp_nops += xdpsq_red_stats->nops;
185329 s->tx_xdp_full += xdpsq_red_stats->full;
186330 s->tx_xdp_err += xdpsq_red_stats->err;
187331 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
332
+ /* AF_XDP zero-copy */
333
+ s->rx_xsk_packets += xskrq_stats->packets;
334
+ s->rx_xsk_bytes += xskrq_stats->bytes;
335
+ s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
336
+ s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
337
+ s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
338
+ s->rx_xsk_csum_none += xskrq_stats->csum_none;
339
+ s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
340
+ s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
341
+ s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
342
+ s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
343
+ s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
344
+ s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
345
+ s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
346
+ s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
347
+ s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
348
+ s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
349
+ s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
350
+ s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
351
+ s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
352
+ s->tx_xsk_xmit += xsksq_stats->xmit;
353
+ s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
354
+ s->tx_xsk_inlnw += xsksq_stats->inlnw;
355
+ s->tx_xsk_full += xsksq_stats->full;
356
+ s->tx_xsk_err += xsksq_stats->err;
357
+ s->tx_xsk_cqes += xsksq_stats->cqes;
188358
189359 for (j = 0; j < priv->max_opened_tc; j++) {
190360 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
....@@ -197,6 +367,8 @@
197367 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
198368 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
199369 s->tx_nop += sq_stats->nop;
370
+ s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
371
+ s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
200372 s->tx_queue_stopped += sq_stats->stopped;
201373 s->tx_queue_wake += sq_stats->wake;
202374 s->tx_queue_dropped += sq_stats->dropped;
....@@ -207,8 +379,16 @@
207379 s->tx_csum_none += sq_stats->csum_none;
208380 s->tx_csum_partial += sq_stats->csum_partial;
209381 #ifdef CONFIG_MLX5_EN_TLS
210
- s->tx_tls_ooo += sq_stats->tls_ooo;
211
- s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
382
+ s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
383
+ s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
384
+ s->tx_tls_ctx += sq_stats->tls_ctx;
385
+ s->tx_tls_ooo += sq_stats->tls_ooo;
386
+ s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
387
+ s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
388
+ s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
389
+ s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
390
+ s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
391
+ s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
212392 #endif
213393 s->tx_cqes += sq_stats->cqes;
214394
....@@ -216,8 +396,6 @@
216396 barrier();
217397 }
218398 }
219
-
220
- memcpy(&priv->stats.sw, s, sizeof(*s));
221399 }
222400
223401 static const struct counter_desc q_stats_desc[] = {
....@@ -231,7 +409,7 @@
231409 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
232410 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
233411
234
-static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
412
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
235413 {
236414 int num_stats = 0;
237415
....@@ -244,7 +422,7 @@
244422 return num_stats;
245423 }
246424
247
-static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
425
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
248426 {
249427 int i;
250428
....@@ -259,7 +437,7 @@
259437 return idx;
260438 }
261439
262
-static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
440
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
263441 {
264442 int i;
265443
....@@ -272,80 +450,97 @@
272450 return idx;
273451 }
274452
275
-static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
453
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
276454 {
277455 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
278
- u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
456
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
457
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
458
+ int ret;
279459
280
- if (priv->q_counter &&
281
- !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
282
- sizeof(out)))
283
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
284
- out, out_of_buffer);
285
- if (priv->drop_rq_q_counter &&
286
- !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
287
- out, sizeof(out)))
288
- qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
289
- out_of_buffer);
460
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
461
+
462
+ if (priv->q_counter) {
463
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
464
+ priv->q_counter);
465
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
466
+ if (!ret)
467
+ qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
468
+ out, out_of_buffer);
469
+ }
470
+
471
+ if (priv->drop_rq_q_counter) {
472
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
473
+ priv->drop_rq_q_counter);
474
+ ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
475
+ if (!ret)
476
+ qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
477
+ out, out_of_buffer);
478
+ }
290479 }
291480
292481 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
293
-static const struct counter_desc vnic_env_stats_desc[] = {
482
+static const struct counter_desc vnic_env_stats_steer_desc[] = {
294483 { "rx_steer_missed_packets",
295484 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
296485 };
297486
298
-#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
487
+static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
488
+ { "dev_internal_queue_oob",
489
+ VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
490
+};
299491
300
-static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
492
+#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
493
+ (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
494
+ ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
495
+#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
496
+ (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
497
+ ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
498
+
499
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
301500 {
302
- return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
303
- NUM_VNIC_ENV_COUNTERS : 0;
501
+ return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
502
+ NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
304503 }
305504
306
-static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
307
- int idx)
505
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
308506 {
309507 int i;
310508
311
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
312
- return idx;
313
-
314
- for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
509
+ for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
315510 strcpy(data + (idx++) * ETH_GSTRING_LEN,
316
- vnic_env_stats_desc[i].format);
511
+ vnic_env_stats_steer_desc[i].format);
512
+
513
+ for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
514
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
515
+ vnic_env_stats_dev_oob_desc[i].format);
317516 return idx;
318517 }
319518
320
-static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
321
- int idx)
519
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
322520 {
323521 int i;
324522
325
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
326
- return idx;
327
-
328
- for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
523
+ for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
329524 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
330
- vnic_env_stats_desc, i);
525
+ vnic_env_stats_steer_desc, i);
526
+
527
+ for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
528
+ data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
529
+ vnic_env_stats_dev_oob_desc, i);
331530 return idx;
332531 }
333532
334
-static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
533
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
335534 {
336535 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
337
- int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
338
- u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
536
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
339537 struct mlx5_core_dev *mdev = priv->mdev;
340538
341
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
539
+ if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
342540 return;
343541
344
- MLX5_SET(query_vnic_env_in, in, opcode,
345
- MLX5_CMD_OP_QUERY_VNIC_ENV);
346
- MLX5_SET(query_vnic_env_in, in, op_mod, 0);
347
- MLX5_SET(query_vnic_env_in, in, other_vport, 0);
348
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
542
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
543
+ mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
349544 }
350545
351546 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
....@@ -394,13 +589,12 @@
394589
395590 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
396591
397
-static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
592
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
398593 {
399594 return NUM_VPORT_COUNTERS;
400595 }
401596
402
-static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
403
- int idx)
597
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
404598 {
405599 int i;
406600
....@@ -409,8 +603,7 @@
409603 return idx;
410604 }
411605
412
-static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
413
- int idx)
606
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
414607 {
415608 int i;
416609
....@@ -420,17 +613,14 @@
420613 return idx;
421614 }
422615
423
-static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
616
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
424617 {
425
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
426618 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
427
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
619
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
428620 struct mlx5_core_dev *mdev = priv->mdev;
429621
430622 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
431
- MLX5_SET(query_vport_counter_in, in, op_mod, 0);
432
- MLX5_SET(query_vport_counter_in, in, other_vport, 0);
433
- mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
623
+ mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
434624 }
435625
436626 #define PPORT_802_3_OFF(c) \
....@@ -459,13 +649,12 @@
459649
460650 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
461651
462
-static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
652
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
463653 {
464654 return NUM_PPORT_802_3_COUNTERS;
465655 }
466656
467
-static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
468
- int idx)
657
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
469658 {
470659 int i;
471660
....@@ -474,8 +663,7 @@
474663 return idx;
475664 }
476665
477
-static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
478
- int idx)
666
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
479667 {
480668 int i;
481669
....@@ -485,7 +673,10 @@
485673 return idx;
486674 }
487675
488
-static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
676
+#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
677
+ (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
678
+
679
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
489680 {
490681 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
491682 struct mlx5_core_dev *mdev = priv->mdev;
....@@ -493,10 +684,42 @@
493684 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
494685 void *out;
495686
687
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
688
+ return;
689
+
496690 MLX5_SET(ppcnt_reg, in, local_port, 1);
497691 out = pstats->IEEE_802_3_counters;
498692 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
499693 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
694
+}
695
+
696
+#define MLX5E_READ_CTR64_BE_F(ptr, c) \
697
+ be64_to_cpu(*(__be64 *)((char *)ptr + \
698
+ MLX5_BYTE_OFF(ppcnt_reg, \
699
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
700
+
701
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
702
+ struct ethtool_pause_stats *pause_stats)
703
+{
704
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
705
+ struct mlx5_core_dev *mdev = priv->mdev;
706
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
707
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
708
+
709
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
710
+ return;
711
+
712
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
713
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
714
+ mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
715
+ sz, MLX5_REG_PPCNT, 0, 0);
716
+
717
+ pause_stats->tx_pause_frames =
718
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
719
+ a_pause_mac_ctrl_frames_transmitted);
720
+ pause_stats->rx_pause_frames =
721
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
722
+ a_pause_mac_ctrl_frames_received);
500723 }
501724
502725 #define PPORT_2863_OFF(c) \
....@@ -510,13 +733,12 @@
510733
511734 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
512735
513
-static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
736
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
514737 {
515738 return NUM_PPORT_2863_COUNTERS;
516739 }
517740
518
-static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
519
- int idx)
741
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
520742 {
521743 int i;
522744
....@@ -525,8 +747,7 @@
525747 return idx;
526748 }
527749
528
-static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
529
- int idx)
750
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
530751 {
531752 int i;
532753
....@@ -536,7 +757,7 @@
536757 return idx;
537758 }
538759
539
-static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
760
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
540761 {
541762 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
542763 struct mlx5_core_dev *mdev = priv->mdev;
....@@ -571,13 +792,12 @@
571792
572793 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
573794
574
-static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
795
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
575796 {
576797 return NUM_PPORT_2819_COUNTERS;
577798 }
578799
579
-static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
580
- int idx)
800
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
581801 {
582802 int i;
583803
....@@ -586,8 +806,7 @@
586806 return idx;
587807 }
588808
589
-static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
590
- int idx)
809
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
591810 {
592811 int i;
593812
....@@ -597,13 +816,16 @@
597816 return idx;
598817 }
599818
600
-static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
819
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
601820 {
602821 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
603822 struct mlx5_core_dev *mdev = priv->mdev;
604823 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
605824 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
606825 void *out;
826
+
827
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
828
+ return;
607829
608830 MLX5_SET(ppcnt_reg, in, local_port, 1);
609831 out = pstats->RFC_2819_counters;
....@@ -619,50 +841,85 @@
619841 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
620842 };
621843
622
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
844
+static const struct counter_desc
845
+pport_phy_statistical_err_lanes_stats_desc[] = {
846
+ { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
847
+ { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
848
+ { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
849
+ { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
850
+};
623851
624
-static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
852
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
853
+ ARRAY_SIZE(pport_phy_statistical_stats_desc)
854
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
855
+ ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
856
+
857
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
625858 {
859
+ struct mlx5_core_dev *mdev = priv->mdev;
860
+ int num_stats;
861
+
626862 /* "1" for link_down_events special counter */
627
- return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
628
- NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
863
+ num_stats = 1;
864
+
865
+ num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
866
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
867
+
868
+ num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
869
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
870
+
871
+ return num_stats;
629872 }
630873
631
-static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
632
- int idx)
874
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
633875 {
876
+ struct mlx5_core_dev *mdev = priv->mdev;
634877 int i;
635878
636879 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
637880
638
- if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
881
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
639882 return idx;
640883
641884 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
642885 strcpy(data + (idx++) * ETH_GSTRING_LEN,
643886 pport_phy_statistical_stats_desc[i].format);
887
+
888
+ if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
889
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
890
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
891
+ pport_phy_statistical_err_lanes_stats_desc[i].format);
892
+
644893 return idx;
645894 }
646895
647
-static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
896
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
648897 {
898
+ struct mlx5_core_dev *mdev = priv->mdev;
649899 int i;
650900
651901 /* link_down_events_phy has special handling since it is not stored in __be64 format */
652902 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
653903 counter_set.phys_layer_cntrs.link_down_events);
654904
655
- if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
905
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
656906 return idx;
657907
658908 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
659909 data[idx++] =
660910 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
661911 pport_phy_statistical_stats_desc, i);
912
+
913
+ if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
914
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
915
+ data[idx++] =
916
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
917
+ pport_phy_statistical_err_lanes_stats_desc,
918
+ i);
662919 return idx;
663920 }
664921
665
-static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
922
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
666923 {
667924 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
668925 struct mlx5_core_dev *mdev = priv->mdev;
....@@ -692,7 +949,7 @@
692949
693950 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
694951
695
-static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
952
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
696953 {
697954 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
698955 return NUM_PPORT_ETH_EXT_COUNTERS;
....@@ -700,8 +957,7 @@
700957 return 0;
701958 }
702959
703
-static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
704
- int idx)
960
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
705961 {
706962 int i;
707963
....@@ -712,8 +968,7 @@
712968 return idx;
713969 }
714970
715
-static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
716
- int idx)
971
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
717972 {
718973 int i;
719974
....@@ -725,7 +980,7 @@
725980 return idx;
726981 }
727982
728
-static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
983
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
729984 {
730985 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
731986 struct mlx5_core_dev *mdev = priv->mdev;
....@@ -766,7 +1021,7 @@
7661021 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
7671022 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
7681023
769
-static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
1024
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
7701025 {
7711026 int num_stats = 0;
7721027
....@@ -782,8 +1037,7 @@
7821037 return num_stats;
7831038 }
7841039
785
-static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
786
- int idx)
1040
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
7871041 {
7881042 int i;
7891043
....@@ -804,8 +1058,7 @@
8041058 return idx;
8051059 }
8061060
807
-static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
808
- int idx)
1061
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
8091062 {
8101063 int i;
8111064
....@@ -829,7 +1082,7 @@
8291082 return idx;
8301083 }
8311084
832
-static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
1085
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
8331086 {
8341087 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
8351088 struct mlx5_core_dev *mdev = priv->mdev;
....@@ -845,12 +1098,152 @@
8451098 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
8461099 }
8471100
1101
+#define PPORT_PER_TC_PRIO_OFF(c) \
1102
+ MLX5_BYTE_OFF(ppcnt_reg, \
1103
+ counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1104
+
1105
+static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1106
+ { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1107
+};
1108
+
1109
+#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1110
+
1111
+#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1112
+ MLX5_BYTE_OFF(ppcnt_reg, \
1113
+ counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1114
+
1115
+static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1116
+ { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1117
+ { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1118
+};
1119
+
1120
+#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1121
+ ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1122
+
1123
+static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1124
+{
1125
+ struct mlx5_core_dev *mdev = priv->mdev;
1126
+
1127
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1128
+ return 0;
1129
+
1130
+ return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1131
+}
1132
+
1133
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1134
+{
1135
+ struct mlx5_core_dev *mdev = priv->mdev;
1136
+ int i, prio;
1137
+
1138
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1139
+ return idx;
1140
+
1141
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1142
+ for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1143
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
1144
+ pport_per_tc_prio_stats_desc[i].format, prio);
1145
+ for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1146
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
1147
+ pport_per_tc_congest_prio_stats_desc[i].format, prio);
1148
+ }
1149
+
1150
+ return idx;
1151
+}
1152
+
1153
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1154
+{
1155
+ struct mlx5e_pport_stats *pport = &priv->stats.pport;
1156
+ struct mlx5_core_dev *mdev = priv->mdev;
1157
+ int i, prio;
1158
+
1159
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1160
+ return idx;
1161
+
1162
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1163
+ for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1164
+ data[idx++] =
1165
+ MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1166
+ pport_per_tc_prio_stats_desc, i);
1167
+ for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1168
+ data[idx++] =
1169
+ MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1170
+ pport_per_tc_congest_prio_stats_desc, i);
1171
+ }
1172
+
1173
+ return idx;
1174
+}
1175
+
1176
+static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1177
+{
1178
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1179
+ struct mlx5_core_dev *mdev = priv->mdev;
1180
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1181
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1182
+ void *out;
1183
+ int prio;
1184
+
1185
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1186
+ return;
1187
+
1188
+ MLX5_SET(ppcnt_reg, in, pnat, 2);
1189
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1190
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1191
+ out = pstats->per_tc_prio_counters[prio];
1192
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1193
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1194
+ }
1195
+}
1196
+
1197
+static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1198
+{
1199
+ struct mlx5_core_dev *mdev = priv->mdev;
1200
+
1201
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1202
+ return 0;
1203
+
1204
+ return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1205
+}
1206
+
1207
+static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1208
+{
1209
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1210
+ struct mlx5_core_dev *mdev = priv->mdev;
1211
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1212
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1213
+ void *out;
1214
+ int prio;
1215
+
1216
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1217
+ return;
1218
+
1219
+ MLX5_SET(ppcnt_reg, in, pnat, 2);
1220
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1221
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1222
+ out = pstats->per_tc_congest_prio_counters[prio];
1223
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1224
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1225
+ }
1226
+}
1227
+
1228
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1229
+{
1230
+ return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1231
+ mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1232
+}
1233
+
1234
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1235
+{
1236
+ mlx5e_grp_per_tc_prio_update_stats(priv);
1237
+ mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1238
+}
1239
+
8481240 #define PPORT_PER_PRIO_OFF(c) \
8491241 MLX5_BYTE_OFF(ppcnt_reg, \
8501242 counter_set.eth_per_prio_grp_data_layout.c##_high)
8511243 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
8521244 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
8531245 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1246
+ { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
8541247 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
8551248 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
8561249 };
....@@ -903,7 +1296,7 @@
9031296 };
9041297
9051298 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
906
- { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1299
+ { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
9071300 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
9081301 };
9091302
....@@ -1013,29 +1406,27 @@
10131406 return idx;
10141407 }
10151408
1016
-static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1409
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
10171410 {
10181411 return mlx5e_grp_per_prio_traffic_get_num_stats() +
10191412 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
10201413 }
10211414
1022
-static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1023
- int idx)
1415
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
10241416 {
10251417 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
10261418 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
10271419 return idx;
10281420 }
10291421
1030
-static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1031
- int idx)
1422
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
10321423 {
10331424 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
10341425 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
10351426 return idx;
10361427 }
10371428
1038
-static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1429
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
10391430 {
10401431 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
10411432 struct mlx5_core_dev *mdev = priv->mdev;
....@@ -1043,6 +1434,9 @@
10431434 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
10441435 int prio;
10451436 void *out;
1437
+
1438
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1439
+ return;
10461440
10471441 MLX5_SET(ppcnt_reg, in, local_port, 1);
10481442 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
....@@ -1055,25 +1449,24 @@
10551449 }
10561450
10571451 static const struct counter_desc mlx5e_pme_status_desc[] = {
1058
- { "module_unplug", 8 },
1452
+ { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
10591453 };
10601454
10611455 static const struct counter_desc mlx5e_pme_error_desc[] = {
1062
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
1063
- { "module_high_temp", 48 }, /* high temperature */
1064
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
1456
+ { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1457
+ { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1458
+ { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
10651459 };
10661460
10671461 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
10681462 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
10691463
1070
-static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1464
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
10711465 {
10721466 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
10731467 }
10741468
1075
-static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1076
- int idx)
1469
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
10771470 {
10781471 int i;
10791472
....@@ -1086,61 +1479,42 @@
10861479 return idx;
10871480 }
10881481
1089
-static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1090
- int idx)
1482
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
10911483 {
1092
- struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
1484
+ struct mlx5_pme_stats pme_stats;
10931485 int i;
10941486
1487
+ mlx5_get_pme_stats(priv->mdev, &pme_stats);
1488
+
10951489 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1096
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
1490
+ data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
10971491 mlx5e_pme_status_desc, i);
10981492
10991493 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1100
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
1494
+ data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
11011495 mlx5e_pme_error_desc, i);
11021496
11031497 return idx;
11041498 }
11051499
1106
-static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1107
-{
1108
- return mlx5e_ipsec_get_count(priv);
1109
-}
1500
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
11101501
1111
-static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1112
- int idx)
1113
-{
1114
- return idx + mlx5e_ipsec_get_strings(priv,
1115
- data + idx * ETH_GSTRING_LEN);
1116
-}
1117
-
1118
-static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1119
- int idx)
1120
-{
1121
- return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1122
-}
1123
-
1124
-static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1125
-{
1126
- mlx5e_ipsec_update_stats(priv);
1127
-}
1128
-
1129
-static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1502
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
11301503 {
11311504 return mlx5e_tls_get_count(priv);
11321505 }
11331506
1134
-static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1135
- int idx)
1507
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
11361508 {
11371509 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
11381510 }
11391511
1140
-static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1512
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
11411513 {
11421514 return idx + mlx5e_tls_get_stats(priv, data + idx);
11431515 }
1516
+
1517
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
11441518
11451519 static const struct counter_desc rq_stats_desc[] = {
11461520 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
....@@ -1164,13 +1538,27 @@
11641538 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
11651539 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
11661540 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1167
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
11681541 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
11691542 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
11701543 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
11711544 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
11721545 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
11731546 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1547
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1548
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1549
+#ifdef CONFIG_MLX5_EN_TLS
1550
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1551
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1552
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
1553
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
1554
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1555
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1556
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1557
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1558
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1559
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1560
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1561
+#endif
11741562 };
11751563
11761564 static const struct counter_desc sq_stats_desc[] = {
....@@ -1184,6 +1572,20 @@
11841572 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
11851573 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
11861574 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1575
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1576
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1577
+#ifdef CONFIG_MLX5_EN_TLS
1578
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1579
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1580
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1581
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1582
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1583
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1584
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1585
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1586
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1587
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1588
+#endif
11871589 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
11881590 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
11891591 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
....@@ -1196,6 +1598,9 @@
11961598
11971599 static const struct counter_desc rq_xdpsq_stats_desc[] = {
11981600 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1601
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1602
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1603
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
11991604 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
12001605 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
12011606 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
....@@ -1203,9 +1608,43 @@
12031608
12041609 static const struct counter_desc xdpsq_stats_desc[] = {
12051610 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1611
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1612
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1613
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
12061614 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
12071615 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
12081616 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1617
+};
1618
+
1619
+static const struct counter_desc xskrq_stats_desc[] = {
1620
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1621
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1622
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1623
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1624
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1625
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1626
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1627
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1628
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1629
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1630
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1631
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1632
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1633
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1634
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1635
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1636
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1637
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1638
+ { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1639
+};
1640
+
1641
+static const struct counter_desc xsksq_stats_desc[] = {
1642
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1643
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1644
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1645
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1646
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1647
+ { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
12091648 };
12101649
12111650 static const struct counter_desc ch_stats_desc[] = {
....@@ -1213,6 +1652,7 @@
12131652 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
12141653 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
12151654 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1655
+ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
12161656 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
12171657 };
12181658
....@@ -1220,23 +1660,27 @@
12201660 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
12211661 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
12221662 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1663
+#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1664
+#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
12231665 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
12241666
1225
-static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1667
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
12261668 {
1227
- int max_nch = priv->profile->max_nch(priv->mdev);
1669
+ int max_nch = priv->max_nch;
12281670
12291671 return (NUM_RQ_STATS * max_nch) +
12301672 (NUM_CH_STATS * max_nch) +
12311673 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
12321674 (NUM_RQ_XDPSQ_STATS * max_nch) +
1233
- (NUM_XDPSQ_STATS * max_nch);
1675
+ (NUM_XDPSQ_STATS * max_nch) +
1676
+ (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1677
+ (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
12341678 }
12351679
1236
-static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1237
- int idx)
1680
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
12381681 {
1239
- int max_nch = priv->profile->max_nch(priv->mdev);
1682
+ bool is_xsk = priv->xsk.ever_used;
1683
+ int max_nch = priv->max_nch;
12401684 int i, j, tc;
12411685
12421686 for (i = 0; i < max_nch; i++)
....@@ -1248,6 +1692,9 @@
12481692 for (j = 0; j < NUM_RQ_STATS; j++)
12491693 sprintf(data + (idx++) * ETH_GSTRING_LEN,
12501694 rq_stats_desc[j].format, i);
1695
+ for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1696
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
1697
+ xskrq_stats_desc[j].format, i);
12511698 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
12521699 sprintf(data + (idx++) * ETH_GSTRING_LEN,
12531700 rq_xdpsq_stats_desc[j].format, i);
....@@ -1258,20 +1705,24 @@
12581705 for (j = 0; j < NUM_SQ_STATS; j++)
12591706 sprintf(data + (idx++) * ETH_GSTRING_LEN,
12601707 sq_stats_desc[j].format,
1261
- priv->channel_tc2txq[i][tc]);
1708
+ i + tc * max_nch);
12621709
1263
- for (i = 0; i < max_nch; i++)
1710
+ for (i = 0; i < max_nch; i++) {
1711
+ for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1712
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
1713
+ xsksq_stats_desc[j].format, i);
12641714 for (j = 0; j < NUM_XDPSQ_STATS; j++)
12651715 sprintf(data + (idx++) * ETH_GSTRING_LEN,
12661716 xdpsq_stats_desc[j].format, i);
1717
+ }
12671718
12681719 return idx;
12691720 }
12701721
1271
-static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1272
- int idx)
1722
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
12731723 {
1274
- int max_nch = priv->profile->max_nch(priv->mdev);
1724
+ bool is_xsk = priv->xsk.ever_used;
1725
+ int max_nch = priv->max_nch;
12751726 int i, j, tc;
12761727
12771728 for (i = 0; i < max_nch; i++)
....@@ -1285,6 +1736,10 @@
12851736 data[idx++] =
12861737 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
12871738 rq_stats_desc, j);
1739
+ for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1740
+ data[idx++] =
1741
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1742
+ xskrq_stats_desc, j);
12881743 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
12891744 data[idx++] =
12901745 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
....@@ -1298,107 +1753,62 @@
12981753 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
12991754 sq_stats_desc, j);
13001755
1301
- for (i = 0; i < max_nch; i++)
1756
+ for (i = 0; i < max_nch; i++) {
1757
+ for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1758
+ data[idx++] =
1759
+ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1760
+ xsksq_stats_desc, j);
13021761 for (j = 0; j < NUM_XDPSQ_STATS; j++)
13031762 data[idx++] =
13041763 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
13051764 xdpsq_stats_desc, j);
1765
+ }
13061766
13071767 return idx;
13081768 }
13091769
1770
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
1771
+
1772
+MLX5E_DEFINE_STATS_GRP(sw, 0);
1773
+MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
1774
+MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
1775
+MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
1776
+MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
1777
+MLX5E_DEFINE_STATS_GRP(2863, 0);
1778
+MLX5E_DEFINE_STATS_GRP(2819, 0);
1779
+MLX5E_DEFINE_STATS_GRP(phy, 0);
1780
+MLX5E_DEFINE_STATS_GRP(pcie, 0);
1781
+MLX5E_DEFINE_STATS_GRP(per_prio, 0);
1782
+MLX5E_DEFINE_STATS_GRP(pme, 0);
1783
+MLX5E_DEFINE_STATS_GRP(channels, 0);
1784
+MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
1785
+MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
1786
+static MLX5E_DEFINE_STATS_GRP(tls, 0);
1787
+
13101788 /* The stats groups order is opposite to the update_stats() order calls */
1311
-const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1312
- {
1313
- .get_num_stats = mlx5e_grp_sw_get_num_stats,
1314
- .fill_strings = mlx5e_grp_sw_fill_strings,
1315
- .fill_stats = mlx5e_grp_sw_fill_stats,
1316
- .update_stats = mlx5e_grp_sw_update_stats,
1317
- },
1318
- {
1319
- .get_num_stats = mlx5e_grp_q_get_num_stats,
1320
- .fill_strings = mlx5e_grp_q_fill_strings,
1321
- .fill_stats = mlx5e_grp_q_fill_stats,
1322
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1323
- .update_stats = mlx5e_grp_q_update_stats,
1324
- },
1325
- {
1326
- .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1327
- .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1328
- .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1329
- .update_stats = mlx5e_grp_vnic_env_update_stats,
1330
- },
1331
- {
1332
- .get_num_stats = mlx5e_grp_vport_get_num_stats,
1333
- .fill_strings = mlx5e_grp_vport_fill_strings,
1334
- .fill_stats = mlx5e_grp_vport_fill_stats,
1335
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1336
- .update_stats = mlx5e_grp_vport_update_stats,
1337
- },
1338
- {
1339
- .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1340
- .fill_strings = mlx5e_grp_802_3_fill_strings,
1341
- .fill_stats = mlx5e_grp_802_3_fill_stats,
1342
- .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1343
- .update_stats = mlx5e_grp_802_3_update_stats,
1344
- },
1345
- {
1346
- .get_num_stats = mlx5e_grp_2863_get_num_stats,
1347
- .fill_strings = mlx5e_grp_2863_fill_strings,
1348
- .fill_stats = mlx5e_grp_2863_fill_stats,
1349
- .update_stats = mlx5e_grp_2863_update_stats,
1350
- },
1351
- {
1352
- .get_num_stats = mlx5e_grp_2819_get_num_stats,
1353
- .fill_strings = mlx5e_grp_2819_fill_strings,
1354
- .fill_stats = mlx5e_grp_2819_fill_stats,
1355
- .update_stats = mlx5e_grp_2819_update_stats,
1356
- },
1357
- {
1358
- .get_num_stats = mlx5e_grp_phy_get_num_stats,
1359
- .fill_strings = mlx5e_grp_phy_fill_strings,
1360
- .fill_stats = mlx5e_grp_phy_fill_stats,
1361
- .update_stats = mlx5e_grp_phy_update_stats,
1362
- },
1363
- {
1364
- .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1365
- .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1366
- .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1367
- .update_stats = mlx5e_grp_eth_ext_update_stats,
1368
- },
1369
- {
1370
- .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1371
- .fill_strings = mlx5e_grp_pcie_fill_strings,
1372
- .fill_stats = mlx5e_grp_pcie_fill_stats,
1373
- .update_stats = mlx5e_grp_pcie_update_stats,
1374
- },
1375
- {
1376
- .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1377
- .fill_strings = mlx5e_grp_per_prio_fill_strings,
1378
- .fill_stats = mlx5e_grp_per_prio_fill_stats,
1379
- .update_stats = mlx5e_grp_per_prio_update_stats,
1380
- },
1381
- {
1382
- .get_num_stats = mlx5e_grp_pme_get_num_stats,
1383
- .fill_strings = mlx5e_grp_pme_fill_strings,
1384
- .fill_stats = mlx5e_grp_pme_fill_stats,
1385
- },
1386
- {
1387
- .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1388
- .fill_strings = mlx5e_grp_ipsec_fill_strings,
1389
- .fill_stats = mlx5e_grp_ipsec_fill_stats,
1390
- .update_stats = mlx5e_grp_ipsec_update_stats,
1391
- },
1392
- {
1393
- .get_num_stats = mlx5e_grp_tls_get_num_stats,
1394
- .fill_strings = mlx5e_grp_tls_fill_strings,
1395
- .fill_stats = mlx5e_grp_tls_fill_stats,
1396
- },
1397
- {
1398
- .get_num_stats = mlx5e_grp_channels_get_num_stats,
1399
- .fill_strings = mlx5e_grp_channels_fill_strings,
1400
- .fill_stats = mlx5e_grp_channels_fill_stats,
1401
- }
1789
+mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
1790
+ &MLX5E_STATS_GRP(sw),
1791
+ &MLX5E_STATS_GRP(qcnt),
1792
+ &MLX5E_STATS_GRP(vnic_env),
1793
+ &MLX5E_STATS_GRP(vport),
1794
+ &MLX5E_STATS_GRP(802_3),
1795
+ &MLX5E_STATS_GRP(2863),
1796
+ &MLX5E_STATS_GRP(2819),
1797
+ &MLX5E_STATS_GRP(phy),
1798
+ &MLX5E_STATS_GRP(eth_ext),
1799
+ &MLX5E_STATS_GRP(pcie),
1800
+ &MLX5E_STATS_GRP(per_prio),
1801
+ &MLX5E_STATS_GRP(pme),
1802
+#ifdef CONFIG_MLX5_EN_IPSEC
1803
+ &MLX5E_STATS_GRP(ipsec_sw),
1804
+ &MLX5E_STATS_GRP(ipsec_hw),
1805
+#endif
1806
+ &MLX5E_STATS_GRP(tls),
1807
+ &MLX5E_STATS_GRP(channels),
1808
+ &MLX5E_STATS_GRP(per_port_buff_congest),
14021809 };
14031810
1404
-const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
1811
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
1812
+{
1813
+ return ARRAY_SIZE(mlx5e_nic_stats_grps);
1814
+}