forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-31 f70575805708cabdedea7498aaa3f710fde4d920
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
....@@ -29,6 +29,7 @@
2929 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
3030 * SOFTWARE.
3131 */
32
+
3233 #ifndef __MLX5_EN_STATS_H__
3334 #define __MLX5_EN_STATS_H__
3435
....@@ -46,12 +47,68 @@
4647 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
4748 #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
4849 #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
50
+#define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
51
+#define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
4952 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
5053
5154 struct counter_desc {
5255 char format[ETH_GSTRING_LEN];
5356 size_t offset; /* Byte offset */
5457 };
58
+
59
+enum {
60
+ MLX5E_NDO_UPDATE_STATS = BIT(0x1),
61
+};
62
+
63
+struct mlx5e_priv;
64
+struct mlx5e_stats_grp {
65
+ u16 update_stats_mask;
66
+ int (*get_num_stats)(struct mlx5e_priv *priv);
67
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
68
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
69
+ void (*update_stats)(struct mlx5e_priv *priv);
70
+};
71
+
72
+typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
73
+
74
+#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
75
+
76
+#define MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(grp) \
77
+ int MLX5E_STATS_GRP_OP(grp, num_stats)(struct mlx5e_priv *priv)
78
+
79
+#define MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(grp) \
80
+ void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
81
+
82
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
83
+ int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
84
+
85
+#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
86
+ int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
87
+
88
+#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
89
+
90
+#define MLX5E_DECLARE_STATS_GRP(grp) \
91
+ const struct mlx5e_stats_grp MLX5E_STATS_GRP(grp)
92
+
93
+#define MLX5E_DEFINE_STATS_GRP(grp, mask) \
94
+MLX5E_DECLARE_STATS_GRP(grp) = { \
95
+ .get_num_stats = MLX5E_STATS_GRP_OP(grp, num_stats), \
96
+ .fill_stats = MLX5E_STATS_GRP_OP(grp, fill_stats), \
97
+ .fill_strings = MLX5E_STATS_GRP_OP(grp, fill_strings), \
98
+ .update_stats = MLX5E_STATS_GRP_OP(grp, update_stats), \
99
+ .update_stats_mask = mask, \
100
+}
101
+
102
+unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
103
+void mlx5e_stats_update(struct mlx5e_priv *priv);
104
+void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
105
+void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
106
+void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
107
+
108
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
109
+ struct ethtool_pause_stats *pause_stats);
110
+
111
+/* Concrete NIC Stats */
55112
56113 struct mlx5e_sw_stats {
57114 u64 rx_packets;
....@@ -64,8 +121,11 @@
64121 u64 tx_tso_inner_bytes;
65122 u64 tx_added_vlan_packets;
66123 u64 tx_nop;
124
+ u64 tx_mpwqe_blks;
125
+ u64 tx_mpwqe_pkts;
67126 u64 rx_lro_packets;
68127 u64 rx_lro_bytes;
128
+ u64 rx_mcast_packets;
69129 u64 rx_ecn_mark;
70130 u64 rx_removed_vlan_packets;
71131 u64 rx_csum_unnecessary;
....@@ -77,6 +137,9 @@
77137 u64 rx_xdp_drop;
78138 u64 rx_xdp_redirect;
79139 u64 rx_xdp_tx_xmit;
140
+ u64 rx_xdp_tx_mpwqe;
141
+ u64 rx_xdp_tx_inlnw;
142
+ u64 rx_xdp_tx_nops;
80143 u64 rx_xdp_tx_full;
81144 u64 rx_xdp_tx_err;
82145 u64 rx_xdp_tx_cqe;
....@@ -91,6 +154,9 @@
91154 u64 tx_queue_wake;
92155 u64 tx_cqe_err;
93156 u64 tx_xdp_xmit;
157
+ u64 tx_xdp_mpwqe;
158
+ u64 tx_xdp_inlnw;
159
+ u64 tx_xdp_nops;
94160 u64 tx_xdp_full;
95161 u64 tx_xdp_err;
96162 u64 tx_xdp_cqes;
....@@ -101,23 +167,71 @@
101167 u64 rx_buff_alloc_err;
102168 u64 rx_cqe_compress_blks;
103169 u64 rx_cqe_compress_pkts;
104
- u64 rx_page_reuse;
105170 u64 rx_cache_reuse;
106171 u64 rx_cache_full;
107172 u64 rx_cache_empty;
108173 u64 rx_cache_busy;
109174 u64 rx_cache_waive;
110175 u64 rx_congst_umr;
176
+ u64 rx_arfs_err;
177
+ u64 rx_recover;
111178 u64 ch_events;
112179 u64 ch_poll;
113180 u64 ch_arm;
114181 u64 ch_aff_change;
182
+ u64 ch_force_irq;
115183 u64 ch_eq_rearm;
116184
117185 #ifdef CONFIG_MLX5_EN_TLS
186
+ u64 tx_tls_encrypted_packets;
187
+ u64 tx_tls_encrypted_bytes;
188
+ u64 tx_tls_ctx;
118189 u64 tx_tls_ooo;
190
+ u64 tx_tls_dump_packets;
191
+ u64 tx_tls_dump_bytes;
119192 u64 tx_tls_resync_bytes;
193
+ u64 tx_tls_skip_no_sync_data;
194
+ u64 tx_tls_drop_no_sync_data;
195
+ u64 tx_tls_drop_bypass_req;
196
+
197
+ u64 rx_tls_decrypted_packets;
198
+ u64 rx_tls_decrypted_bytes;
199
+ u64 rx_tls_ctx;
200
+ u64 rx_tls_del;
201
+ u64 rx_tls_resync_req_pkt;
202
+ u64 rx_tls_resync_req_start;
203
+ u64 rx_tls_resync_req_end;
204
+ u64 rx_tls_resync_req_skip;
205
+ u64 rx_tls_resync_res_ok;
206
+ u64 rx_tls_resync_res_skip;
207
+ u64 rx_tls_err;
120208 #endif
209
+
210
+ u64 rx_xsk_packets;
211
+ u64 rx_xsk_bytes;
212
+ u64 rx_xsk_csum_complete;
213
+ u64 rx_xsk_csum_unnecessary;
214
+ u64 rx_xsk_csum_unnecessary_inner;
215
+ u64 rx_xsk_csum_none;
216
+ u64 rx_xsk_ecn_mark;
217
+ u64 rx_xsk_removed_vlan_packets;
218
+ u64 rx_xsk_xdp_drop;
219
+ u64 rx_xsk_xdp_redirect;
220
+ u64 rx_xsk_wqe_err;
221
+ u64 rx_xsk_mpwqe_filler_cqes;
222
+ u64 rx_xsk_mpwqe_filler_strides;
223
+ u64 rx_xsk_oversize_pkts_sw_drop;
224
+ u64 rx_xsk_buff_alloc_err;
225
+ u64 rx_xsk_cqe_compress_blks;
226
+ u64 rx_xsk_cqe_compress_pkts;
227
+ u64 rx_xsk_congst_umr;
228
+ u64 rx_xsk_arfs_err;
229
+ u64 tx_xsk_xmit;
230
+ u64 tx_xsk_mpwqe;
231
+ u64 tx_xsk_inlnw;
232
+ u64 tx_xsk_full;
233
+ u64 tx_xsk_err;
234
+ u64 tx_xsk_cqes;
121235 };
122236
123237 struct mlx5e_qcounter_stats {
....@@ -164,6 +278,8 @@
164278 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
165279 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
166280 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
281
+ __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
282
+ __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
167283 };
168284
169285 #define PCIE_PERF_GET(pcie_stats, c) \
....@@ -189,6 +305,7 @@
189305 u64 csum_none;
190306 u64 lro_packets;
191307 u64 lro_bytes;
308
+ u64 mcast_packets;
192309 u64 ecn_mark;
193310 u64 removed_vlan_packets;
194311 u64 xdp_drop;
....@@ -200,13 +317,27 @@
200317 u64 buff_alloc_err;
201318 u64 cqe_compress_blks;
202319 u64 cqe_compress_pkts;
203
- u64 page_reuse;
204320 u64 cache_reuse;
205321 u64 cache_full;
206322 u64 cache_empty;
207323 u64 cache_busy;
208324 u64 cache_waive;
209325 u64 congst_umr;
326
+ u64 arfs_err;
327
+ u64 recover;
328
+#ifdef CONFIG_MLX5_EN_TLS
329
+ u64 tls_decrypted_packets;
330
+ u64 tls_decrypted_bytes;
331
+ u64 tls_ctx;
332
+ u64 tls_del;
333
+ u64 tls_resync_req_pkt;
334
+ u64 tls_resync_req_start;
335
+ u64 tls_resync_req_end;
336
+ u64 tls_resync_req_skip;
337
+ u64 tls_resync_res_ok;
338
+ u64 tls_resync_res_skip;
339
+ u64 tls_err;
340
+#endif
210341 };
211342
212343 struct mlx5e_sq_stats {
....@@ -222,9 +353,19 @@
222353 u64 csum_partial_inner;
223354 u64 added_vlan_packets;
224355 u64 nop;
356
+ u64 mpwqe_blks;
357
+ u64 mpwqe_pkts;
225358 #ifdef CONFIG_MLX5_EN_TLS
359
+ u64 tls_encrypted_packets;
360
+ u64 tls_encrypted_bytes;
361
+ u64 tls_ctx;
226362 u64 tls_ooo;
363
+ u64 tls_dump_packets;
364
+ u64 tls_dump_bytes;
227365 u64 tls_resync_bytes;
366
+ u64 tls_skip_no_sync_data;
367
+ u64 tls_drop_no_sync_data;
368
+ u64 tls_drop_bypass_req;
228369 #endif
229370 /* less likely accessed in data path */
230371 u64 csum_none;
....@@ -239,6 +380,9 @@
239380
240381 struct mlx5e_xdpsq_stats {
241382 u64 xmit;
383
+ u64 mpwqe;
384
+ u64 inlnw;
385
+ u64 nops;
242386 u64 full;
243387 u64 err;
244388 /* dirtied @completion */
....@@ -250,6 +394,7 @@
250394 u64 poll;
251395 u64 arm;
252396 u64 aff_change;
397
+ u64 force_irq;
253398 u64 eq_rearm;
254399 };
255400
....@@ -263,22 +408,24 @@
263408 struct mlx5e_pcie_stats pcie;
264409 };
265410
266
-enum {
267
- MLX5E_NDO_UPDATE_STATS = BIT(0x1),
268
-};
411
+extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
412
+unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
269413
270
-struct mlx5e_priv;
271
-struct mlx5e_stats_grp {
272
- u16 update_stats_mask;
273
- int (*get_num_stats)(struct mlx5e_priv *priv);
274
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
275
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
276
- void (*update_stats)(struct mlx5e_priv *priv);
277
-};
278
-
279
-extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
280
-extern const int mlx5e_num_stats_grps;
281
-
282
-void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
414
+extern MLX5E_DECLARE_STATS_GRP(sw);
415
+extern MLX5E_DECLARE_STATS_GRP(qcnt);
416
+extern MLX5E_DECLARE_STATS_GRP(vnic_env);
417
+extern MLX5E_DECLARE_STATS_GRP(vport);
418
+extern MLX5E_DECLARE_STATS_GRP(802_3);
419
+extern MLX5E_DECLARE_STATS_GRP(2863);
420
+extern MLX5E_DECLARE_STATS_GRP(2819);
421
+extern MLX5E_DECLARE_STATS_GRP(phy);
422
+extern MLX5E_DECLARE_STATS_GRP(eth_ext);
423
+extern MLX5E_DECLARE_STATS_GRP(pcie);
424
+extern MLX5E_DECLARE_STATS_GRP(per_prio);
425
+extern MLX5E_DECLARE_STATS_GRP(pme);
426
+extern MLX5E_DECLARE_STATS_GRP(channels);
427
+extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
428
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
429
+extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
283430
284431 #endif /* __MLX5_EN_STATS_H__ */