From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Thu, 19 Dec 2024 01:47:39 +0000 Subject: [PATCH] add wifi6 8852be driver --- kernel/drivers/infiniband/hw/mlx4/mad.c | 201 ++++++++++++++++++++++++------------------------- 1 files changed, 99 insertions(+), 102 deletions(-) diff --git a/kernel/drivers/infiniband/hw/mlx4/mad.c b/kernel/drivers/infiniband/hw/mlx4/mad.c index 418b931..8bd1647 100644 --- a/kernel/drivers/infiniband/hw/mlx4/mad.c +++ b/kernel/drivers/infiniband/hw/mlx4/mad.c @@ -202,13 +202,13 @@ rdma_ah_set_port_num(&ah_attr, port_num); new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, - &ah_attr); + &ah_attr, 0); if (IS_ERR(new_ah)) return; spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) - rdma_destroy_ah(dev->sm_ah[port_num - 1]); + rdma_destroy_ah(dev->sm_ah[port_num - 1], 0); dev->sm_ah[port_num - 1] = new_ah; spin_unlock_irqrestore(&dev->sm_lock, flags); } @@ -500,6 +500,13 @@ sgid, dgid); } +static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) +{ + int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave; + + return (qpn >= proxy_start && qpn <= proxy_start + 1); +} + int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad) @@ -520,8 +527,10 @@ u16 cached_pkey; u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; - if (dest_qpt > IB_QPT_GSI) + if (dest_qpt > IB_QPT_GSI) { + pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt); return -EINVAL; + } tun_ctx = dev->sriov.demux[port-1].tun[slave]; @@ -538,12 +547,20 @@ if (dest_qpt) { u16 pkey_ix; ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); - if (ret) + if (ret) { + pr_debug("unable to get %s cached pkey for index %d, ret %d\n", + is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", + wc->pkey_index, ret); return -EINVAL; + } ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix); - if (ret) + if (ret) { + pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n", + is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", + cached_pkey, ret); return -EINVAL; + } tun_pkey_ix = pkey_ix; } else tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; @@ -567,7 +584,7 @@ return -EINVAL; rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0); } - ah = rdma_create_ah(tun_ctx->pd, &attr); + ah = rdma_create_ah(tun_ctx->pd, &attr, 0); if (IS_ERR(ah)) return -ENOMEM; @@ -584,7 +601,7 @@ tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); if (tun_qp->tx_ring[tun_tx_ix].ah) - rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah); + rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0); tun_qp->tx_ring[tun_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, tun_qp->tx_ring[tun_tx_ix].buf.map, @@ -657,7 +674,7 @@ spin_unlock(&tun_qp->tx_lock); tun_qp->tx_ring[tun_tx_ix].ah = NULL; end: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, 0); return ret; } @@ -715,7 +732,8 @@ err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); if (err) - pr_debug("failed sending to slave %d via tunnel qp (%d)\n", + pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n", + is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", slave, err); return 0; } @@ -794,7 +812,8 @@ err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); if (err) - pr_debug("failed sending to slave %d via tunnel qp (%d)\n", + pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n", + is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", slave, err); return 0; } @@ -806,25 +825,6 @@ u16 slid, prev_lid = 0; int err; struct ib_port_attr pattr; - - if (in_wc && in_wc->qp->qp_num) { - pr_debug("received MAD: slid:%d sqpn:%d " - "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", - in_wc->slid, in_wc->src_qp, - in_wc->dlid_path_bits, - in_wc->qp->qp_num, - in_wc->wc_flags, - in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, - be16_to_cpu(in_mad->mad_hdr.attr_id)); - if (in_wc->wc_flags & IB_WC_GRH) { - pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", - be64_to_cpu(in_grh->sgid.global.subnet_prefix), - be64_to_cpu(in_grh->sgid.global.interface_id)); - pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", - be64_to_cpu(in_grh->dgid.global.subnet_prefix), - be64_to_cpu(in_grh->dgid.global.interface_id)); - } - } slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE); @@ -964,7 +964,6 @@ } mutex_unlock(&dev->counters_table[port_num - 1].mutex); if (stats_avail) { - memset(out_mad->data, 0, sizeof out_mad->data); switch (counter_stats.counter_mode & 0xf) { case 0: edit_counter(&counter_stats, @@ -982,38 +981,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, - const struct ib_mad_hdr *in, size_t in_mad_size, - struct ib_mad_hdr *out, size_t *out_mad_size, - u16 *out_mad_pkey_index) + const struct ib_mad *in, struct ib_mad *out, + size_t *out_mad_size, u16 *out_mad_pkey_index) { struct mlx4_ib_dev *dev = to_mdev(ibdev); - const struct ib_mad *in_mad = (const struct ib_mad *)in; - struct ib_mad *out_mad = (struct ib_mad *)out; enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num); - - if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad))) - return IB_MAD_RESULT_FAILURE; /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA * queries, should be called only by VFs and for that specific purpose */ if (link == IB_LINK_LAYER_INFINIBAND) { if (mlx4_is_slave(dev->dev) && - (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && - (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || - in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || - in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) - return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); + (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && + (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS || + in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT || + in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO))) + return iboe_process_mad(ibdev, mad_flags, port_num, + in_wc, in_grh, in, out); - return ib_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); + return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, + in, out); } if (link == IB_LINK_LAYER_ETHERNET) return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, - in_grh, in_mad, out_mad); + in_grh, in, out); return -EINVAL; } @@ -1022,7 +1014,7 @@ struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0]) - rdma_destroy_ah(mad_send_wc->send_buf->context[0]); + rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0); ib_free_send_mad(mad_send_wc->send_buf); } @@ -1077,7 +1069,7 @@ } if (dev->sm_ah[p]) - rdma_destroy_ah(dev->sm_ah[p]); + rdma_destroy_ah(dev->sm_ah[p], 0); } } @@ -1359,14 +1351,6 @@ return ret; } -static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) -{ - int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave; - - return (qpn >= proxy_start && qpn <= proxy_start + 1); -} - - int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr, @@ -1381,9 +1365,9 @@ struct ib_ah *ah; struct ib_qp *send_qp = NULL; unsigned wire_tx_ix = 0; - int ret = 0; u16 wire_pkey_ix; int src_qpnum; + int ret; sqp_ctx = dev->sriov.sqps[port-1]; @@ -1403,25 +1387,32 @@ send_qp = sqp->qp; - /* create ah */ - ah = mlx4_ib_create_ah_slave(sqp_ctx->pd, attr, - rdma_ah_retrieve_grh(attr)->sgid_index, - s_mac, vlan_id); - if (IS_ERR(ah)) + ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah); + if (!ah) return -ENOMEM; + + ah->device = sqp_ctx->pd->device; + ah->pd = sqp_ctx->pd; + + /* create ah */ + ret = mlx4_ib_create_ah_slave(ah, attr, + rdma_ah_retrieve_grh(attr)->sgid_index, + s_mac, vlan_id); + if (ret) + goto out; + spin_lock(&sqp->tx_lock); if (sqp->tx_ix_head - sqp->tx_ix_tail >= - (MLX4_NUM_TUNNEL_BUFS - 1)) + (MLX4_NUM_WIRE_BUFS - 1)) ret = -EAGAIN; else - wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); + wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1); spin_unlock(&sqp->tx_lock); if (ret) goto out; sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); - if (sqp->tx_ring[wire_tx_ix].ah) - rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah); + kfree(sqp->tx_ring[wire_tx_ix].ah); sqp->tx_ring[wire_tx_ix].ah = ah; ib_dma_sync_single_for_cpu(&dev->ib_dev, sqp->tx_ring[wire_tx_ix].buf.map, @@ -1460,7 +1451,7 @@ spin_unlock(&sqp->tx_lock); sqp->tx_ring[wire_tx_ix].ah = NULL; out: - mlx4_ib_destroy_ah(ah); + kfree(ah); return ret; } @@ -1495,6 +1486,7 @@ u16 vlan_id; u8 qos; u8 *dmac; + int sts; /* Get slave that sent this packet */ if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || @@ -1591,13 +1583,17 @@ &vlan_id, &qos)) rdma_ah_set_sl(&ah_attr, qos); - mlx4_ib_send_to_wire(dev, slave, ctx->port, - is_proxy_qp0(dev, wc->src_qp, slave) ? - IB_QPT_SMI : IB_QPT_GSI, - be16_to_cpu(tunnel->hdr.pkey_index), - be32_to_cpu(tunnel->hdr.remote_qpn), - be32_to_cpu(tunnel->hdr.qkey), - &ah_attr, wc->smac, vlan_id, &tunnel->mad); + sts = mlx4_ib_send_to_wire(dev, slave, ctx->port, + is_proxy_qp0(dev, wc->src_qp, slave) ? + IB_QPT_SMI : IB_QPT_GSI, + be16_to_cpu(tunnel->hdr.pkey_index), + be32_to_cpu(tunnel->hdr.remote_qpn), + be32_to_cpu(tunnel->hdr.qkey), + &ah_attr, wc->smac, vlan_id, &tunnel->mad); + if (sts) + pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n", + is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI", + slave, sts); } static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, @@ -1606,19 +1602,20 @@ int i; struct mlx4_ib_demux_pv_qp *tun_qp; int rx_buf_size, tx_buf_size; + const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (qp_type > IB_QPT_GSI) return -EINVAL; tun_qp = &ctx->qp[qp_type]; - tun_qp->ring = kcalloc(MLX4_NUM_TUNNEL_BUFS, + tun_qp->ring = kcalloc(nmbr_bufs, sizeof(struct mlx4_ib_buf), GFP_KERNEL); if (!tun_qp->ring) return -ENOMEM; - tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS, + tun_qp->tx_ring = kcalloc(nmbr_bufs, sizeof (struct mlx4_ib_tun_tx_buf), GFP_KERNEL); if (!tun_qp->tx_ring) { @@ -1635,7 +1632,7 @@ tx_buf_size = sizeof (struct mlx4_mad_snd_buf); } - for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { + for (i = 0; i < nmbr_bufs; i++) { tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL); if (!tun_qp->ring[i].addr) goto err; @@ -1649,7 +1646,7 @@ } } - for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { + for (i = 0; i < nmbr_bufs; i++) { tun_qp->tx_ring[i].buf.addr = kmalloc(tx_buf_size, GFP_KERNEL); if (!tun_qp->tx_ring[i].buf.addr) @@ -1680,7 +1677,7 @@ tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); } - i = MLX4_NUM_TUNNEL_BUFS; + i = nmbr_bufs; err: while (i > 0) { --i; @@ -1701,6 +1698,7 @@ int i; struct mlx4_ib_demux_pv_qp *tun_qp; int rx_buf_size, tx_buf_size; + const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (qp_type > IB_QPT_GSI) return; @@ -1715,18 +1713,18 @@ } - for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { + for (i = 0; i < nmbr_bufs; i++) { ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, rx_buf_size, DMA_FROM_DEVICE); kfree(tun_qp->ring[i].addr); } - for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { + for (i = 0; i < nmbr_bufs; i++) { ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, tx_buf_size, DMA_TO_DEVICE); kfree(tun_qp->tx_ring[i].buf.addr); if (tun_qp->tx_ring[i].ah) - rdma_destroy_ah(tun_qp->tx_ring[i].ah); + rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0); } kfree(tun_qp->tx_ring); kfree(tun_qp->ring); @@ -1755,11 +1753,8 @@ "buf:%lld\n", wc.wr_id); break; case IB_WC_SEND: - pr_debug("received tunnel send completion:" - "wrid=0x%llx, status=0x%x\n", - wc.wr_id, wc.status); rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1)].ah); + (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&tun_qp->tx_lock); @@ -1776,7 +1771,7 @@ ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1)].ah); + (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah = NULL; spin_lock(&tun_qp->tx_lock); @@ -1804,6 +1799,7 @@ struct mlx4_ib_qp_tunnel_init_attr qp_init_attr; struct ib_qp_attr attr; int qp_attr_mask_INIT; + const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (qp_type > IB_QPT_GSI) return -EINVAL; @@ -1814,8 +1810,8 @@ qp_init_attr.init_attr.send_cq = ctx->cq; qp_init_attr.init_attr.recv_cq = ctx->cq; qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; - qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS; - qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS; + qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs; + qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs; qp_init_attr.init_attr.cap.max_send_sge = 1; qp_init_attr.init_attr.cap.max_recv_sge = 1; if (create_tun) { @@ -1877,7 +1873,7 @@ goto err_qp; } - for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { + for (i = 0; i < nmbr_bufs; i++) { ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); if (ret) { pr_err(" mlx4_ib_post_pv_buf error" @@ -1912,9 +1908,9 @@ if (wc.status == IB_WC_SUCCESS) { switch (wc.opcode) { case IB_WC_SEND: - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1)].ah); - sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah + kfree(sqp->tx_ring[wc.wr_id & + (MLX4_NUM_WIRE_BUFS - 1)].ah); + sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah = NULL; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; @@ -1923,13 +1919,13 @@ case IB_WC_RECV: mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *) (sqp->ring[wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload); + (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload); grh = &(((struct mlx4_mad_rcv_buf *) (sqp->ring[wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh); + (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh); mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1))) + (MLX4_NUM_WIRE_BUFS - 1))) pr_err("Failed reposting SQP " "buf:%lld\n", wc.wr_id); break; @@ -1941,9 +1937,9 @@ " status = %d, wrid = 0x%llx\n", ctx->slave, wc.status, wc.wr_id); if (!MLX4_TUN_IS_RECV(wc.wr_id)) { - rdma_destroy_ah(sqp->tx_ring[wc.wr_id & - (MLX4_NUM_TUNNEL_BUFS - 1)].ah); - sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah + kfree(sqp->tx_ring[wc.wr_id & + (MLX4_NUM_WIRE_BUFS - 1)].ah); + sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah = NULL; spin_lock(&sqp->tx_lock); sqp->tx_ix_tail++; @@ -1983,6 +1979,7 @@ { int ret, cq_size; struct ib_cq_init_attr cq_attr = {}; + const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS; if (ctx->state != DEMUX_PV_STATE_DOWN) return -EEXIST; @@ -2007,7 +2004,7 @@ goto err_out_qp0; } - cq_size = 2 * MLX4_NUM_TUNNEL_BUFS; + cq_size = 2 * nmbr_bufs; if (ctx->has_smi) cq_size *= 2; -- Gitblit v1.6.2