From 9999e48639b3cecb08ffb37358bcba3b48161b29 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 08:50:17 +0000 Subject: [PATCH] add ax88772_rst --- kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 924 +++++++++++++++++++++++++++++++++++++++++---------------- 1 files changed, 667 insertions(+), 257 deletions(-) diff --git a/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 9a68dee..ff4f10d 100644 --- a/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/kernel/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -30,9 +30,74 @@ * SOFTWARE. */ +#include "lib/mlx5.h" #include "en.h" -#include "en_accel/ipsec.h" #include "en_accel/tls.h" +#include "en_accel/en_accel.h" + +static unsigned int stats_grps_num(struct mlx5e_priv *priv) +{ + return !priv->profile->stats_grps_num ? 0 : + priv->profile->stats_grps_num(priv); +} + +unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) +{ + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; + const unsigned int num_stats_grps = stats_grps_num(priv); + unsigned int total = 0; + int i; + + for (i = 0; i < num_stats_grps; i++) + total += stats_grps[i]->get_num_stats(priv); + + return total; +} + +void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv) +{ + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; + const unsigned int num_stats_grps = stats_grps_num(priv); + int i; + + for (i = num_stats_grps - 1; i >= 0; i--) + if (stats_grps[i]->update_stats && + stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS) + stats_grps[i]->update_stats(priv); +} + +void mlx5e_stats_update(struct mlx5e_priv *priv) +{ + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; + const unsigned int num_stats_grps = stats_grps_num(priv); + int i; + + for (i = num_stats_grps - 1; i >= 0; i--) + if (stats_grps[i]->update_stats) + stats_grps[i]->update_stats(priv); +} + +void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx) +{ + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; + const unsigned int num_stats_grps = stats_grps_num(priv); + int i; + + for (i = 0; i < num_stats_grps; i++) + idx = stats_grps[i]->fill_stats(priv, data, idx); +} + +void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data) +{ + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; + const unsigned int num_stats_grps = stats_grps_num(priv); + int i, idx = 0; + + for (i = 0; i < num_stats_grps; i++) + idx = stats_grps[i]->fill_strings(priv, data, idx); +} + +/* Concrete NIC Stats */ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, @@ -45,10 +110,20 @@ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) }, #ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, #endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, @@ -64,6 +139,9 @@ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, @@ -78,6 +156,9 @@ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, @@ -88,28 +169,68 @@ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, +#endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) -static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw) { return NUM_SW_COUNTERS; } -static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw) { int i; @@ -118,7 +239,7 @@ return idx; } -static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) { int i; @@ -127,18 +248,20 @@ return idx; } -void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) { - struct mlx5e_sw_stats temp, *s = &temp; + struct mlx5e_sw_stats *s = &priv->stats.sw; int i; memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + for (i = 0; i < priv->max_nch; i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; + struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq; + struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; int j; @@ -158,6 +281,9 @@ s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_redirect += rq_stats->xdp_redirect; s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; + s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; + s->rx_xdp_tx_nops += xdpsq_stats->nops; s->rx_xdp_tx_full += xdpsq_stats->full; s->rx_xdp_tx_err += xdpsq_stats->err; s->rx_xdp_tx_cqe += xdpsq_stats->cqes; @@ -168,23 +294,67 @@ s->rx_buff_alloc_err += rq_stats->buff_alloc_err; s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; - s->rx_page_reuse += rq_stats->page_reuse; s->rx_cache_reuse += rq_stats->cache_reuse; s->rx_cache_full += rq_stats->cache_full; s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; s->rx_cache_waive += rq_stats->cache_waive; s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; + s->rx_recover += rq_stats->recover; +#ifdef CONFIG_MLX5_EN_TLS + s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; + s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; + s->rx_tls_ctx += rq_stats->tls_ctx; + s->rx_tls_del += rq_stats->tls_del; + s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; + s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; + s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; + s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; + s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; + s->rx_tls_err += rq_stats->tls_err; +#endif s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; s->ch_aff_change += ch_stats->aff_change; + s->ch_force_irq += ch_stats->force_irq; s->ch_eq_rearm += ch_stats->eq_rearm; /* xdp redirect */ s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; + s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; + s->tx_xdp_nops += xdpsq_red_stats->nops; s->tx_xdp_full += xdpsq_red_stats->full; s->tx_xdp_err += xdpsq_red_stats->err; s->tx_xdp_cqes += xdpsq_red_stats->cqes; + /* AF_XDP zero-copy */ + s->rx_xsk_packets += xskrq_stats->packets; + s->rx_xsk_bytes += xskrq_stats->bytes; + s->rx_xsk_csum_complete += xskrq_stats->csum_complete; + s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; + s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; + s->rx_xsk_csum_none += xskrq_stats->csum_none; + s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; + s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; + s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; + s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; + s->rx_xsk_wqe_err += xskrq_stats->wqe_err; + s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; + s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; + s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; + s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; + s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; + s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; + s->rx_xsk_congst_umr += xskrq_stats->congst_umr; + s->rx_xsk_arfs_err += xskrq_stats->arfs_err; + s->tx_xsk_xmit += xsksq_stats->xmit; + s->tx_xsk_mpwqe += xsksq_stats->mpwqe; + s->tx_xsk_inlnw += xsksq_stats->inlnw; + s->tx_xsk_full += xsksq_stats->full; + s->tx_xsk_err += xsksq_stats->err; + s->tx_xsk_cqes += xsksq_stats->cqes; for (j = 0; j < priv->max_opened_tc; j++) { struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; @@ -197,6 +367,8 @@ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_added_vlan_packets += sq_stats->added_vlan_packets; s->tx_nop += sq_stats->nop; + s->tx_mpwqe_blks += sq_stats->mpwqe_blks; + s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; @@ -207,8 +379,16 @@ s->tx_csum_none += sq_stats->csum_none; s->tx_csum_partial += sq_stats->csum_partial; #ifdef CONFIG_MLX5_EN_TLS - s->tx_tls_ooo += sq_stats->tls_ooo; - s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; + s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; + s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; + s->tx_tls_ctx += sq_stats->tls_ctx; + s->tx_tls_ooo += sq_stats->tls_ooo; + s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; + s->tx_tls_dump_packets += sq_stats->tls_dump_packets; + s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; + s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; + s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; + s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; #endif s->tx_cqes += sq_stats->cqes; @@ -216,8 +396,6 @@ barrier(); } } - - memcpy(&priv->stats.sw, s, sizeof(*s)); } static const struct counter_desc q_stats_desc[] = { @@ -231,7 +409,7 @@ #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc) -static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt) { int num_stats = 0; @@ -244,7 +422,7 @@ return num_stats; } -static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt) { int i; @@ -259,7 +437,7 @@ return idx; } -static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt) { int i; @@ -272,80 +450,97 @@ return idx; } -static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt) { struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; - u32 out[MLX5_ST_SZ_DW(query_q_counter_out)]; + u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {}; + int ret; - if (priv->q_counter && - !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, - sizeof(out))) - qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, - out, out_of_buffer); - if (priv->drop_rq_q_counter && - !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0, - out, sizeof(out))) - qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out, - out_of_buffer); + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + + if (priv->q_counter) { + MLX5_SET(query_q_counter_in, in, counter_set_id, + priv->q_counter); + ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); + if (!ret) + qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, + out, out_of_buffer); + } + + if (priv->drop_rq_q_counter) { + MLX5_SET(query_q_counter_in, in, counter_set_id, + priv->drop_rq_q_counter); + ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out); + if (!ret) + qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, + out, out_of_buffer); + } } #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c) -static const struct counter_desc vnic_env_stats_desc[] = { +static const struct counter_desc vnic_env_stats_steer_desc[] = { { "rx_steer_missed_packets", VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) }, }; -#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc) +static const struct counter_desc vnic_env_stats_dev_oob_desc[] = { + { "dev_internal_queue_oob", + VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) }, +}; -static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv) +#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \ + ARRAY_SIZE(vnic_env_stats_steer_desc) : 0) +#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \ + (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \ + ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0) + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env) { - return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ? - NUM_VNIC_ENV_COUNTERS : 0; + return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) + + NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); } -static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env) { int i; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) - return idx; - - for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - vnic_env_stats_desc[i].format); + vnic_env_stats_steer_desc[i].format); + + for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vnic_env_stats_dev_oob_desc[i].format); return idx; } -static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env) { int i; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) - return idx; - - for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++) + for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++) data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out, - vnic_env_stats_desc, i); + vnic_env_stats_steer_desc, i); + + for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++) + data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out, + vnic_env_stats_dev_oob_desc, i); return idx; } -static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) { u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out; - int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out); - u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; - if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) + if (!mlx5e_stats_grp_vnic_env_num_stats(priv)) return; - MLX5_SET(query_vnic_env_in, in, opcode, - MLX5_CMD_OP_QUERY_VNIC_ENV); - MLX5_SET(query_vnic_env_in, in, op_mod, 0); - MLX5_SET(query_vnic_env_in, in, other_vport, 0); - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); + MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); + mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out); } #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) @@ -394,13 +589,12 @@ #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) -static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport) { return NUM_VPORT_COUNTERS; } -static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport) { int i; @@ -409,8 +603,7 @@ return idx; } -static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport) { int i; @@ -420,17 +613,14 @@ return idx; } -static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport) { - int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 *out = (u32 *)priv->stats.vport.query_vport_out; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {}; struct mlx5_core_dev *mdev = priv->mdev; MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); - MLX5_SET(query_vport_counter_in, in, op_mod, 0); - MLX5_SET(query_vport_counter_in, in, other_vport, 0); - mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); + mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out); } #define PPORT_802_3_OFF(c) \ @@ -459,13 +649,12 @@ #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) -static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3) { return NUM_PPORT_802_3_COUNTERS; } -static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3) { int i; @@ -474,8 +663,7 @@ return idx; } -static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3) { int i; @@ -485,7 +673,10 @@ return idx; } -static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) +#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ + (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; @@ -493,10 +684,42 @@ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); void *out; + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; + MLX5_SET(ppcnt_reg, in, local_port, 1); out = pstats->IEEE_802_3_counters; MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); +} + +#define MLX5E_READ_CTR64_BE_F(ptr, c) \ + be64_to_cpu(*(__be64 *)((char *)ptr + \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_802_3_cntrs_grp_data_layout.c##_high))) + +void mlx5e_stats_pause_get(struct mlx5e_priv *priv, + struct ethtool_pause_stats *pause_stats) +{ + u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)]; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; + + MLX5_SET(ppcnt_reg, in, local_port, 1); + MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3, + sz, MLX5_REG_PPCNT, 0, 0); + + pause_stats->tx_pause_frames = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + a_pause_mac_ctrl_frames_transmitted); + pause_stats->rx_pause_frames = + MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, + a_pause_mac_ctrl_frames_received); } #define PPORT_2863_OFF(c) \ @@ -510,13 +733,12 @@ #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) -static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863) { return NUM_PPORT_2863_COUNTERS; } -static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863) { int i; @@ -525,8 +747,7 @@ return idx; } -static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863) { int i; @@ -536,7 +757,7 @@ return idx; } -static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; @@ -571,13 +792,12 @@ #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) -static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819) { return NUM_PPORT_2819_COUNTERS; } -static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819) { int i; @@ -586,8 +806,7 @@ return idx; } -static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819) { int i; @@ -597,13 +816,16 @@ return idx; } -static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0}; int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); void *out; + + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; MLX5_SET(ppcnt_reg, in, local_port, 1); out = pstats->RFC_2819_counters; @@ -619,50 +841,85 @@ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, }; -#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) +static const struct counter_desc +pport_phy_statistical_err_lanes_stats_desc[] = { + { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) }, + { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) }, + { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) }, + { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, +}; -static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ + ARRAY_SIZE(pport_phy_statistical_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ + ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) { + struct mlx5_core_dev *mdev = priv->mdev; + int num_stats; + /* "1" for link_down_events special counter */ - return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? - NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1; + num_stats = 1; + + num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ? + NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0; + + num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ? + NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0; + + return num_stats; } -static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) { + struct mlx5_core_dev *mdev = priv->mdev; int i; strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); - if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) return idx; for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_phy_statistical_stats_desc[i].format); + + if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_err_lanes_stats_desc[i].format); + return idx; } -static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) { + struct mlx5_core_dev *mdev = priv->mdev; int i; /* link_down_events_phy has special handling since it is not stored in __be64 format */ data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, counter_set.phys_layer_cntrs.link_down_events); - if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) return idx; for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i); + + if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_err_lanes_stats_desc, + i); return idx; } -static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; @@ -692,7 +949,7 @@ #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) -static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext) { if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) return NUM_PPORT_ETH_EXT_COUNTERS; @@ -700,8 +957,7 @@ return 0; } -static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext) { int i; @@ -712,8 +968,7 @@ return idx; } -static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext) { int i; @@ -725,7 +980,7 @@ return idx; } -static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; @@ -766,7 +1021,7 @@ #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) -static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie) { int num_stats = 0; @@ -782,8 +1037,7 @@ return num_stats; } -static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie) { int i; @@ -804,8 +1058,7 @@ return idx; } -static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie) { int i; @@ -829,7 +1082,7 @@ return idx; } -static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie) { struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; struct mlx5_core_dev *mdev = priv->mdev; @@ -845,12 +1098,152 @@ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); } +#define PPORT_PER_TC_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_tc_prio_grp_data_layout.c##_high) + +static const struct counter_desc pport_per_tc_prio_stats_desc[] = { + { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) }, +}; + +#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc) + +#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high) + +static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = { + { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) }, + { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) }, +}; + +#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \ + ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc) + +static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return 0; + + return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int i, prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return idx; + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_tc_prio_stats_desc[i].format, prio); + for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_tc_congest_prio_stats_desc[i].format, prio); + } + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest) +{ + struct mlx5e_pport_stats *pport = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + int i, prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return idx; + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio], + pport_per_tc_prio_stats_desc, i); + for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio], + pport_per_tc_congest_prio_stats_desc, i); + } + + return idx; +} + +static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + int prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return; + + MLX5_SET(ppcnt_reg, in, pnat, 2); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_tc_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } +} + +static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return 0; + + return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO; +} + +static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + void *out; + int prio; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return; + + MLX5_SET(ppcnt_reg, in, pnat, 2); + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_tc_congest_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } +} + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest) +{ + return mlx5e_grp_per_tc_prio_get_num_stats(priv) + + mlx5e_grp_per_tc_congest_prio_get_num_stats(priv); +} + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest) +{ + mlx5e_grp_per_tc_prio_update_stats(priv); + mlx5e_grp_per_tc_congest_prio_update_stats(priv); +} + #define PPORT_PER_PRIO_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_per_prio_grp_data_layout.c##_high) static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, + { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) }, { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, }; @@ -903,7 +1296,7 @@ }; static const struct counter_desc pport_pfc_stall_stats_desc[] = { - { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, + { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, }; @@ -1013,29 +1406,27 @@ return idx; } -static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio) { return mlx5e_grp_per_prio_traffic_get_num_stats() + mlx5e_grp_per_prio_pfc_get_num_stats(priv); } -static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio) { idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx); idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx); return idx; } -static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio) { idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx); idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx); return idx; } -static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; @@ -1043,6 +1434,9 @@ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int prio; void *out; + + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); @@ -1055,25 +1449,24 @@ } static const struct counter_desc mlx5e_pme_status_desc[] = { - { "module_unplug", 8 }, + { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED }, }; static const struct counter_desc mlx5e_pme_error_desc[] = { - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ - { "module_high_temp", 48 }, /* high temperature */ - { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ + { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK }, + { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE }, + { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE }, }; #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) -static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme) { return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; } -static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme) { int i; @@ -1086,61 +1479,42 @@ return idx; } -static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme) { - struct mlx5_priv *mlx5_priv = &priv->mdev->priv; + struct mlx5_pme_stats pme_stats; int i; + mlx5_get_pme_stats(priv->mdev, &pme_stats); + for (i = 0; i < NUM_PME_STATUS_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, + data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters, mlx5e_pme_status_desc, i); for (i = 0; i < NUM_PME_ERR_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, + data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters, mlx5e_pme_error_desc, i); return idx; } -static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv) -{ - return mlx5e_ipsec_get_count(priv); -} +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; } -static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) -{ - return idx + mlx5e_ipsec_get_strings(priv, - data + idx * ETH_GSTRING_LEN); -} - -static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) -{ - return idx + mlx5e_ipsec_get_stats(priv, data + idx); -} - -static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv) -{ - mlx5e_ipsec_update_stats(priv); -} - -static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls) { return mlx5e_tls_get_count(priv); } -static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls) { return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); } -static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls) { return idx + mlx5e_tls_get_stats(priv, data + idx); } + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; } static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, @@ -1164,13 +1538,27 @@ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, +#endif }; static const struct counter_desc sq_stats_desc[] = { @@ -1184,6 +1572,20 @@ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) }, +#endif { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, @@ -1196,6 +1598,9 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = { { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, @@ -1203,9 +1608,43 @@ static const struct counter_desc xdpsq_stats_desc[] = { { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, +}; + +static const struct counter_desc xskrq_stats_desc[] = { + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) }, +}; + +static const struct counter_desc xsksq_stats_desc[] = { + { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, + { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) }, + { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) }, + { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) }, + { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) }, + { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, }; static const struct counter_desc ch_stats_desc[] = { @@ -1213,6 +1652,7 @@ { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) }, + { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) }, { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; @@ -1220,23 +1660,27 @@ #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) +#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) +#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) -static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) { - int max_nch = priv->profile->max_nch(priv->mdev); + int max_nch = priv->max_nch; return (NUM_RQ_STATS * max_nch) + (NUM_CH_STATS * max_nch) + (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + (NUM_RQ_XDPSQ_STATS * max_nch) + - (NUM_XDPSQ_STATS * max_nch); + (NUM_XDPSQ_STATS * max_nch) + + (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) + + (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used); } -static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) { - int max_nch = priv->profile->max_nch(priv->mdev); + bool is_xsk = priv->xsk.ever_used; + int max_nch = priv->max_nch; int i, j, tc; for (i = 0; i < max_nch; i++) @@ -1248,6 +1692,9 @@ for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); + for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + xskrq_stats_desc[j].format, i); for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_xdpsq_stats_desc[j].format, i); @@ -1258,20 +1705,24 @@ for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, - priv->channel_tc2txq[i][tc]); + i + tc * max_nch); - for (i = 0; i < max_nch; i++) + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + xsksq_stats_desc[j].format, i); for (j = 0; j < NUM_XDPSQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, xdpsq_stats_desc[j].format, i); + } return idx; } -static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, - int idx) +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) { - int max_nch = priv->profile->max_nch(priv->mdev); + bool is_xsk = priv->xsk.ever_used; + int max_nch = priv->max_nch; int i, j, tc; for (i = 0; i < max_nch; i++) @@ -1285,6 +1736,10 @@ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, rq_stats_desc, j); + for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq, + xskrq_stats_desc, j); for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq, @@ -1298,107 +1753,62 @@ MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq_stats_desc, j); - for (i = 0; i < max_nch; i++) + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq, + xsksq_stats_desc, j); for (j = 0; j < NUM_XDPSQ_STATS; j++) data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq, xdpsq_stats_desc, j); + } return idx; } +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; } + +MLX5E_DEFINE_STATS_GRP(sw, 0); +MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS); +MLX5E_DEFINE_STATS_GRP(vnic_env, 0); +MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS); +MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS); +MLX5E_DEFINE_STATS_GRP(2863, 0); +MLX5E_DEFINE_STATS_GRP(2819, 0); +MLX5E_DEFINE_STATS_GRP(phy, 0); +MLX5E_DEFINE_STATS_GRP(pcie, 0); +MLX5E_DEFINE_STATS_GRP(per_prio, 0); +MLX5E_DEFINE_STATS_GRP(pme, 0); +MLX5E_DEFINE_STATS_GRP(channels, 0); +MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); +MLX5E_DEFINE_STATS_GRP(eth_ext, 0); +static MLX5E_DEFINE_STATS_GRP(tls, 0); + /* The stats groups order is opposite to the update_stats() order calls */ -const struct mlx5e_stats_grp mlx5e_stats_grps[] = { - { - .get_num_stats = mlx5e_grp_sw_get_num_stats, - .fill_strings = mlx5e_grp_sw_fill_strings, - .fill_stats = mlx5e_grp_sw_fill_stats, - .update_stats = mlx5e_grp_sw_update_stats, - }, - { - .get_num_stats = mlx5e_grp_q_get_num_stats, - .fill_strings = mlx5e_grp_q_fill_strings, - .fill_stats = mlx5e_grp_q_fill_stats, - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, - .update_stats = mlx5e_grp_q_update_stats, - }, - { - .get_num_stats = mlx5e_grp_vnic_env_get_num_stats, - .fill_strings = mlx5e_grp_vnic_env_fill_strings, - .fill_stats = mlx5e_grp_vnic_env_fill_stats, - .update_stats = mlx5e_grp_vnic_env_update_stats, - }, - { - .get_num_stats = mlx5e_grp_vport_get_num_stats, - .fill_strings = mlx5e_grp_vport_fill_strings, - .fill_stats = mlx5e_grp_vport_fill_stats, - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, - .update_stats = mlx5e_grp_vport_update_stats, - }, - { - .get_num_stats = mlx5e_grp_802_3_get_num_stats, - .fill_strings = mlx5e_grp_802_3_fill_strings, - .fill_stats = mlx5e_grp_802_3_fill_stats, - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, - .update_stats = mlx5e_grp_802_3_update_stats, - }, - { - .get_num_stats = mlx5e_grp_2863_get_num_stats, - .fill_strings = mlx5e_grp_2863_fill_strings, - .fill_stats = mlx5e_grp_2863_fill_stats, - .update_stats = mlx5e_grp_2863_update_stats, - }, - { - .get_num_stats = mlx5e_grp_2819_get_num_stats, - .fill_strings = mlx5e_grp_2819_fill_strings, - .fill_stats = mlx5e_grp_2819_fill_stats, - .update_stats = mlx5e_grp_2819_update_stats, - }, - { - .get_num_stats = mlx5e_grp_phy_get_num_stats, - .fill_strings = mlx5e_grp_phy_fill_strings, - .fill_stats = mlx5e_grp_phy_fill_stats, - .update_stats = mlx5e_grp_phy_update_stats, - }, - { - .get_num_stats = mlx5e_grp_eth_ext_get_num_stats, - .fill_strings = mlx5e_grp_eth_ext_fill_strings, - .fill_stats = mlx5e_grp_eth_ext_fill_stats, - .update_stats = mlx5e_grp_eth_ext_update_stats, - }, - { - .get_num_stats = mlx5e_grp_pcie_get_num_stats, - .fill_strings = mlx5e_grp_pcie_fill_strings, - .fill_stats = mlx5e_grp_pcie_fill_stats, - .update_stats = mlx5e_grp_pcie_update_stats, - }, - { - .get_num_stats = mlx5e_grp_per_prio_get_num_stats, - .fill_strings = mlx5e_grp_per_prio_fill_strings, - .fill_stats = mlx5e_grp_per_prio_fill_stats, - .update_stats = mlx5e_grp_per_prio_update_stats, - }, - { - .get_num_stats = mlx5e_grp_pme_get_num_stats, - .fill_strings = mlx5e_grp_pme_fill_strings, - .fill_stats = mlx5e_grp_pme_fill_stats, - }, - { - .get_num_stats = mlx5e_grp_ipsec_get_num_stats, - .fill_strings = mlx5e_grp_ipsec_fill_strings, - .fill_stats = mlx5e_grp_ipsec_fill_stats, - .update_stats = mlx5e_grp_ipsec_update_stats, - }, - { - .get_num_stats = mlx5e_grp_tls_get_num_stats, - .fill_strings = mlx5e_grp_tls_fill_strings, - .fill_stats = mlx5e_grp_tls_fill_stats, - }, - { - .get_num_stats = mlx5e_grp_channels_get_num_stats, - .fill_strings = mlx5e_grp_channels_fill_strings, - .fill_stats = mlx5e_grp_channels_fill_stats, - } +mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { + &MLX5E_STATS_GRP(sw), + &MLX5E_STATS_GRP(qcnt), + &MLX5E_STATS_GRP(vnic_env), + &MLX5E_STATS_GRP(vport), + &MLX5E_STATS_GRP(802_3), + &MLX5E_STATS_GRP(2863), + &MLX5E_STATS_GRP(2819), + &MLX5E_STATS_GRP(phy), + &MLX5E_STATS_GRP(eth_ext), + &MLX5E_STATS_GRP(pcie), + &MLX5E_STATS_GRP(per_prio), + &MLX5E_STATS_GRP(pme), +#ifdef CONFIG_MLX5_EN_IPSEC + &MLX5E_STATS_GRP(ipsec_sw), + &MLX5E_STATS_GRP(ipsec_hw), +#endif + &MLX5E_STATS_GRP(tls), + &MLX5E_STATS_GRP(channels), + &MLX5E_STATS_GRP(per_port_buff_congest), }; -const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); +unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) +{ + return ARRAY_SIZE(mlx5e_nic_stats_grps); +} -- Gitblit v1.6.2