forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/infiniband/hw/mlx4/qp.c
....@@ -41,6 +41,7 @@
4141 #include <rdma/ib_pack.h>
4242 #include <rdma/ib_addr.h>
4343 #include <rdma/ib_mad.h>
44
+#include <rdma/uverbs_ioctl.h>
4445
4546 #include <linux/mlx4/driver.h>
4647 #include <linux/mlx4/qp.h>
....@@ -52,7 +53,8 @@
5253 struct mlx4_ib_cq *recv_cq);
5354 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
5455 struct mlx4_ib_cq *recv_cq);
55
-static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state);
56
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
57
+ struct ib_udata *udata);
5658
5759 enum {
5860 MLX4_IB_ACK_REQ_FREQ = 8,
....@@ -63,27 +65,6 @@
6365 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
6466 MLX4_IB_LINK_TYPE_IB = 0,
6567 MLX4_IB_LINK_TYPE_ETH = 1
66
-};
67
-
68
-enum {
69
- /*
70
- * Largest possible UD header: send with GRH and immediate
71
- * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
72
- * tag. (LRH would only use 8 bytes, so Ethernet is the
73
- * biggest case)
74
- */
75
- MLX4_IB_UD_HEADER_SIZE = 82,
76
- MLX4_IB_LSO_HEADER_SPARE = 128,
77
-};
78
-
79
-struct mlx4_ib_sqp {
80
- struct mlx4_ib_qp qp;
81
- int pkey_index;
82
- u32 qkey;
83
- u32 send_psn;
84
- struct ib_ud_header ud_header;
85
- u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
86
- struct ib_qp *roce_v2_gsi;
8768 };
8869
8970 enum {
....@@ -120,11 +101,6 @@
120101 MLX4_IB_QP_SRC = 0,
121102 MLX4_IB_RWQ_SRC = 1,
122103 };
123
-
124
-static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
125
-{
126
- return container_of(mqp, struct mlx4_ib_sqp, qp);
127
-}
128104
129105 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
130106 {
....@@ -323,7 +299,7 @@
323299 }
324300
325301 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
326
- int is_user, int has_rq, struct mlx4_ib_qp *qp,
302
+ bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
327303 u32 inl_recv_sz)
328304 {
329305 /* Sanity check RQ size before proceeding */
....@@ -401,7 +377,7 @@
401377 * We need to leave 2 KB + 1 WR of headroom in the SQ to
402378 * allow HW to prefetch.
403379 */
404
- qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
380
+ qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
405381 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
406382 qp->sq_spare_wqes);
407383
....@@ -504,10 +480,10 @@
504480 kfree(qp->sqp_proxy_rcv);
505481 }
506482
507
-static int qp_has_rq(struct ib_qp_init_attr *attr)
483
+static bool qp_has_rq(struct ib_qp_init_attr *attr)
508484 {
509485 if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
510
- return 0;
486
+ return false;
511487
512488 return !attr->srq;
513489 }
....@@ -654,8 +630,6 @@
654630 if (err)
655631 goto err_qpn;
656632
657
- mutex_init(&qp->mutex);
658
-
659633 INIT_LIST_HEAD(&qp->gid_list);
660634 INIT_LIST_HEAD(&qp->steering_rules);
661635
....@@ -694,80 +668,72 @@
694668 return err;
695669 }
696670
697
-static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
698
- struct ib_qp_init_attr *init_attr,
699
- struct ib_udata *udata)
671
+static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
672
+ struct ib_qp_init_attr *init_attr,
673
+ struct ib_udata *udata)
700674 {
701
- struct mlx4_ib_qp *qp;
702675 struct mlx4_ib_create_qp_rss ucmd = {};
703676 size_t required_cmd_sz;
704677 int err;
705678
706679 if (!udata) {
707680 pr_debug("RSS QP with NULL udata\n");
708
- return ERR_PTR(-EINVAL);
681
+ return -EINVAL;
709682 }
710683
711684 if (udata->outlen)
712
- return ERR_PTR(-EOPNOTSUPP);
685
+ return -EOPNOTSUPP;
713686
714687 required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
715688 sizeof(ucmd.reserved1);
716689 if (udata->inlen < required_cmd_sz) {
717690 pr_debug("invalid inlen\n");
718
- return ERR_PTR(-EINVAL);
691
+ return -EINVAL;
719692 }
720693
721694 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
722695 pr_debug("copy failed\n");
723
- return ERR_PTR(-EFAULT);
696
+ return -EFAULT;
724697 }
725698
726699 if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
727
- return ERR_PTR(-EOPNOTSUPP);
700
+ return -EOPNOTSUPP;
728701
729702 if (ucmd.comp_mask || ucmd.reserved1)
730
- return ERR_PTR(-EOPNOTSUPP);
703
+ return -EOPNOTSUPP;
731704
732705 if (udata->inlen > sizeof(ucmd) &&
733706 !ib_is_udata_cleared(udata, sizeof(ucmd),
734707 udata->inlen - sizeof(ucmd))) {
735708 pr_debug("inlen is not supported\n");
736
- return ERR_PTR(-EOPNOTSUPP);
709
+ return -EOPNOTSUPP;
737710 }
738711
739712 if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
740713 pr_debug("RSS QP with unsupported QP type %d\n",
741714 init_attr->qp_type);
742
- return ERR_PTR(-EOPNOTSUPP);
715
+ return -EOPNOTSUPP;
743716 }
744717
745718 if (init_attr->create_flags) {
746719 pr_debug("RSS QP doesn't support create flags\n");
747
- return ERR_PTR(-EOPNOTSUPP);
720
+ return -EOPNOTSUPP;
748721 }
749722
750723 if (init_attr->send_cq || init_attr->cap.max_send_wr) {
751724 pr_debug("RSS QP with unsupported send attributes\n");
752
- return ERR_PTR(-EOPNOTSUPP);
725
+ return -EOPNOTSUPP;
753726 }
754
-
755
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
756
- if (!qp)
757
- return ERR_PTR(-ENOMEM);
758727
759728 qp->pri.vid = 0xFFFF;
760729 qp->alt.vid = 0xFFFF;
761730
762731 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
763
- if (err) {
764
- kfree(qp);
765
- return ERR_PTR(err);
766
- }
732
+ if (err)
733
+ return err;
767734
768735 qp->ibqp.qp_num = qp->mqp.qpn;
769
-
770
- return &qp->ibqp;
736
+ return 0;
771737 }
772738
773739 /*
....@@ -847,26 +813,154 @@
847813 * reused for further WQN allocations.
848814 * The next created WQ will allocate a new range.
849815 */
850
- range->dirty = 1;
816
+ range->dirty = true;
851817 }
852818
853819 mutex_unlock(&context->wqn_ranges_mutex);
854820 }
855821
856
-static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
857
- enum mlx4_ib_source_type src,
858
- struct ib_qp_init_attr *init_attr,
859
- struct ib_udata *udata, int sqpn,
860
- struct mlx4_ib_qp **caller_qp)
822
+static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
823
+ struct ib_udata *udata, struct mlx4_ib_qp *qp)
861824 {
825
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
862826 int qpn;
863827 int err;
864
- struct mlx4_ib_sqp *sqp = NULL;
865
- struct mlx4_ib_qp *qp;
828
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
829
+ udata, struct mlx4_ib_ucontext, ibucontext);
830
+ struct mlx4_ib_cq *mcq;
831
+ unsigned long flags;
832
+ int range_size;
833
+ struct mlx4_ib_create_wq wq;
834
+ size_t copy_len;
835
+ int shift;
836
+ int n;
837
+
838
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
839
+
840
+ spin_lock_init(&qp->sq.lock);
841
+ spin_lock_init(&qp->rq.lock);
842
+ INIT_LIST_HEAD(&qp->gid_list);
843
+ INIT_LIST_HEAD(&qp->steering_rules);
844
+
845
+ qp->state = IB_QPS_RESET;
846
+
847
+ copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
848
+
849
+ if (ib_copy_from_udata(&wq, udata, copy_len)) {
850
+ err = -EFAULT;
851
+ goto err;
852
+ }
853
+
854
+ if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
855
+ wq.reserved[2]) {
856
+ pr_debug("user command isn't supported\n");
857
+ err = -EOPNOTSUPP;
858
+ goto err;
859
+ }
860
+
861
+ if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
862
+ pr_debug("WQN range size must be equal or smaller than %d\n",
863
+ dev->dev->caps.max_rss_tbl_sz);
864
+ err = -EOPNOTSUPP;
865
+ goto err;
866
+ }
867
+ range_size = 1 << wq.log_range_size;
868
+
869
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
870
+ qp->flags |= MLX4_IB_QP_SCATTER_FCS;
871
+
872
+ err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
873
+ if (err)
874
+ goto err;
875
+
876
+ qp->sq_no_prefetch = 1;
877
+ qp->sq.wqe_cnt = 1;
878
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
879
+ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
880
+ (qp->sq.wqe_cnt << qp->sq.wqe_shift);
881
+
882
+ qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
883
+ if (IS_ERR(qp->umem)) {
884
+ err = PTR_ERR(qp->umem);
885
+ goto err;
886
+ }
887
+
888
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
889
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
890
+
891
+ if (err)
892
+ goto err_buf;
893
+
894
+ err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
895
+ if (err)
896
+ goto err_mtt;
897
+
898
+ err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
899
+ if (err)
900
+ goto err_mtt;
901
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
902
+
903
+ err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
904
+ if (err)
905
+ goto err_wrid;
906
+
907
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
908
+ if (err)
909
+ goto err_qpn;
910
+
911
+ /*
912
+ * Hardware wants QPN written in big-endian order (after
913
+ * shifting) for send doorbell. Precompute this value to save
914
+ * a little bit when posting sends.
915
+ */
916
+ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
917
+
918
+ qp->mqp.event = mlx4_ib_wq_event;
919
+
920
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
921
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
922
+ to_mcq(init_attr->recv_cq));
923
+ /* Maintain device to QPs access, needed for further handling
924
+ * via reset flow
925
+ */
926
+ list_add_tail(&qp->qps_list, &dev->qp_list);
927
+ /* Maintain CQ to QPs access, needed for further handling
928
+ * via reset flow
929
+ */
930
+ mcq = to_mcq(init_attr->send_cq);
931
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
932
+ mcq = to_mcq(init_attr->recv_cq);
933
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
934
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
935
+ to_mcq(init_attr->recv_cq));
936
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
937
+ return 0;
938
+
939
+err_qpn:
940
+ mlx4_ib_release_wqn(context, qp, 0);
941
+err_wrid:
942
+ mlx4_ib_db_unmap_user(context, &qp->db);
943
+
944
+err_mtt:
945
+ mlx4_mtt_cleanup(dev->dev, &qp->mtt);
946
+err_buf:
947
+ ib_umem_release(qp->umem);
948
+err:
949
+ return err;
950
+}
951
+
952
+static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
953
+ struct ib_udata *udata, int sqpn,
954
+ struct mlx4_ib_qp *qp)
955
+{
956
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
957
+ int qpn;
958
+ int err;
959
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
960
+ udata, struct mlx4_ib_ucontext, ibucontext);
866961 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
867962 struct mlx4_ib_cq *mcq;
868963 unsigned long flags;
869
- int range_size = 0;
870964
871965 /* When tunneling special qps, we use a plain UD qp */
872966 if (sqpn) {
....@@ -909,76 +1003,41 @@
9091003 sqpn = qpn;
9101004 }
9111005
912
- if (!*caller_qp) {
913
- if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
914
- (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
915
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
916
- sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
917
- if (!sqp)
918
- return -ENOMEM;
919
- qp = &sqp->qp;
920
- qp->pri.vid = 0xFFFF;
921
- qp->alt.vid = 0xFFFF;
922
- } else {
923
- qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
924
- if (!qp)
925
- return -ENOMEM;
926
- qp->pri.vid = 0xFFFF;
927
- qp->alt.vid = 0xFFFF;
928
- }
929
- } else
930
- qp = *caller_qp;
1006
+ if (init_attr->qp_type == IB_QPT_SMI ||
1007
+ init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
1008
+ qp_type == MLX4_IB_QPT_GSI ||
1009
+ (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
1010
+ MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
1011
+ qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
1012
+ if (!qp->sqp)
1013
+ return -ENOMEM;
1014
+ }
9311015
9321016 qp->mlx4_ib_qp_type = qp_type;
9331017
934
- mutex_init(&qp->mutex);
9351018 spin_lock_init(&qp->sq.lock);
9361019 spin_lock_init(&qp->rq.lock);
9371020 INIT_LIST_HEAD(&qp->gid_list);
9381021 INIT_LIST_HEAD(&qp->steering_rules);
9391022
940
- qp->state = IB_QPS_RESET;
1023
+ qp->state = IB_QPS_RESET;
9411024 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
9421025 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
9431026
944
-
945
- if (pd->uobject) {
946
- union {
947
- struct mlx4_ib_create_qp qp;
948
- struct mlx4_ib_create_wq wq;
949
- } ucmd;
1027
+ if (udata) {
1028
+ struct mlx4_ib_create_qp ucmd;
9501029 size_t copy_len;
9511030 int shift;
9521031 int n;
9531032
954
- copy_len = (src == MLX4_IB_QP_SRC) ?
955
- sizeof(struct mlx4_ib_create_qp) :
956
- min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
1033
+ copy_len = sizeof(struct mlx4_ib_create_qp);
9571034
9581035 if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
9591036 err = -EFAULT;
9601037 goto err;
9611038 }
9621039
963
- if (src == MLX4_IB_RWQ_SRC) {
964
- if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
965
- ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
966
- pr_debug("user command isn't supported\n");
967
- err = -EOPNOTSUPP;
968
- goto err;
969
- }
970
-
971
- if (ucmd.wq.log_range_size >
972
- ilog2(dev->dev->caps.max_rss_tbl_sz)) {
973
- pr_debug("WQN range size must be equal or smaller than %d\n",
974
- dev->dev->caps.max_rss_tbl_sz);
975
- err = -EOPNOTSUPP;
976
- goto err;
977
- }
978
- range_size = 1 << ucmd.wq.log_range_size;
979
- } else {
980
- qp->inl_recv_sz = ucmd.qp.inl_recv_sz;
981
- }
1040
+ qp->inl_recv_sz = ucmd.inl_recv_sz;
9821041
9831042 if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
9841043 if (!(dev->dev->caps.flags &
....@@ -991,39 +1050,24 @@
9911050 qp->flags |= MLX4_IB_QP_SCATTER_FCS;
9921051 }
9931052
994
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
1053
+ err = set_rq_size(dev, &init_attr->cap, udata,
9951054 qp_has_rq(init_attr), qp, qp->inl_recv_sz);
9961055 if (err)
9971056 goto err;
9981057
999
- if (src == MLX4_IB_QP_SRC) {
1000
- qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch;
1058
+ qp->sq_no_prefetch = ucmd.sq_no_prefetch;
10011059
1002
- err = set_user_sq_size(dev, qp,
1003
- (struct mlx4_ib_create_qp *)
1004
- &ucmd);
1005
- if (err)
1006
- goto err;
1007
- } else {
1008
- qp->sq_no_prefetch = 1;
1009
- qp->sq.wqe_cnt = 1;
1010
- qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
1011
- /* Allocated buffer expects to have at least that SQ
1012
- * size.
1013
- */
1014
- qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
1015
- (qp->sq.wqe_cnt << qp->sq.wqe_shift);
1016
- }
1060
+ err = set_user_sq_size(dev, qp, &ucmd);
1061
+ if (err)
1062
+ goto err;
10171063
1018
- qp->umem = ib_umem_get(pd->uobject->context,
1019
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
1020
- ucmd.wq.buf_addr, qp->buf_size, 0, 0);
1064
+ qp->umem =
1065
+ ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
10211066 if (IS_ERR(qp->umem)) {
10221067 err = PTR_ERR(qp->umem);
10231068 goto err;
10241069 }
10251070
1026
- n = ib_umem_page_count(qp->umem);
10271071 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
10281072 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
10291073
....@@ -1035,15 +1079,13 @@
10351079 goto err_mtt;
10361080
10371081 if (qp_has_rq(init_attr)) {
1038
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
1039
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
1040
- ucmd.wq.db_addr, &qp->db);
1082
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
10411083 if (err)
10421084 goto err_mtt;
10431085 }
10441086 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
10451087 } else {
1046
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
1088
+ err = set_rq_size(dev, &init_attr->cap, udata,
10471089 qp_has_rq(init_attr), qp, 0);
10481090 if (err)
10491091 goto err;
....@@ -1109,11 +1151,6 @@
11091151 goto err_wrid;
11101152 }
11111153 }
1112
- } else if (src == MLX4_IB_RWQ_SRC) {
1113
- err = mlx4_ib_alloc_wqn(to_mucontext(pd->uobject->context), qp,
1114
- range_size, &qpn);
1115
- if (err)
1116
- goto err_wrid;
11171154 } else {
11181155 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
11191156 * otherwise, the WQE BlueFlame setup flow wrongly causes
....@@ -1152,11 +1189,7 @@
11521189 */
11531190 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
11541191
1155
- qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event :
1156
- mlx4_ib_wq_event;
1157
-
1158
- if (!*caller_qp)
1159
- *caller_qp = qp;
1192
+ qp->mqp.event = mlx4_ib_qp_event;
11601193
11611194 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
11621195 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
....@@ -1181,9 +1214,6 @@
11811214 if (!sqpn) {
11821215 if (qp->flags & MLX4_IB_QP_NETIF)
11831216 mlx4_ib_steer_qp_free(dev, qpn, 1);
1184
- else if (src == MLX4_IB_RWQ_SRC)
1185
- mlx4_ib_release_wqn(to_mucontext(pd->uobject->context),
1186
- qp, 0);
11871217 else
11881218 mlx4_qp_release_range(dev->dev, qpn, 1);
11891219 }
....@@ -1191,9 +1221,9 @@
11911221 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
11921222 free_proxy_bufs(pd->device, qp);
11931223 err_wrid:
1194
- if (pd->uobject) {
1224
+ if (udata) {
11951225 if (qp_has_rq(init_attr))
1196
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
1226
+ mlx4_ib_db_unmap_user(context, &qp->db);
11971227 } else {
11981228 kvfree(qp->sq.wrid);
11991229 kvfree(qp->rq.wrid);
....@@ -1203,20 +1233,16 @@
12031233 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
12041234
12051235 err_buf:
1206
- if (pd->uobject)
1207
- ib_umem_release(qp->umem);
1208
- else
1236
+ if (!qp->umem)
12091237 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1238
+ ib_umem_release(qp->umem);
12101239
12111240 err_db:
1212
- if (!pd->uobject && qp_has_rq(init_attr))
1241
+ if (!udata && qp_has_rq(init_attr))
12131242 mlx4_db_free(dev->dev, &qp->db);
12141243
12151244 err:
1216
- if (sqp)
1217
- kfree(sqp);
1218
- else if (!*caller_qp)
1219
- kfree(qp);
1245
+ kfree(qp->sqp);
12201246 return err;
12211247 }
12221248
....@@ -1330,11 +1356,11 @@
13301356 mlx4_qp_free(dev->dev, &qp->mqp);
13311357 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
13321358 del_gid_entries(qp);
1333
- kfree(qp->rss_ctx);
13341359 }
13351360
13361361 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1337
- enum mlx4_ib_source_type src, int is_user)
1362
+ enum mlx4_ib_source_type src,
1363
+ struct ib_udata *udata)
13381364 {
13391365 struct mlx4_ib_cq *send_cq, *recv_cq;
13401366 unsigned long flags;
....@@ -1376,7 +1402,7 @@
13761402 list_del(&qp->qps_list);
13771403 list_del(&qp->cq_send_list);
13781404 list_del(&qp->cq_recv_list);
1379
- if (!is_user) {
1405
+ if (!udata) {
13801406 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
13811407 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
13821408 if (send_cq != recv_cq)
....@@ -1394,22 +1420,28 @@
13941420 if (qp->flags & MLX4_IB_QP_NETIF)
13951421 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
13961422 else if (src == MLX4_IB_RWQ_SRC)
1397
- mlx4_ib_release_wqn(to_mucontext(
1398
- qp->ibwq.uobject->context), qp, 1);
1423
+ mlx4_ib_release_wqn(
1424
+ rdma_udata_to_drv_context(
1425
+ udata,
1426
+ struct mlx4_ib_ucontext,
1427
+ ibucontext),
1428
+ qp, 1);
13991429 else
14001430 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
14011431 }
14021432
14031433 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
14041434
1405
- if (is_user) {
1435
+ if (udata) {
14061436 if (qp->rq.wqe_cnt) {
1407
- struct mlx4_ib_ucontext *mcontext = !src ?
1408
- to_mucontext(qp->ibqp.uobject->context) :
1409
- to_mucontext(qp->ibwq.uobject->context);
1437
+ struct mlx4_ib_ucontext *mcontext =
1438
+ rdma_udata_to_drv_context(
1439
+ udata,
1440
+ struct mlx4_ib_ucontext,
1441
+ ibucontext);
1442
+
14101443 mlx4_ib_db_unmap_user(mcontext, &qp->db);
14111444 }
1412
- ib_umem_release(qp->umem);
14131445 } else {
14141446 kvfree(qp->sq.wrid);
14151447 kvfree(qp->rq.wrid);
....@@ -1420,6 +1452,7 @@
14201452 if (qp->rq.wqe_cnt)
14211453 mlx4_db_free(dev->dev, &qp->db);
14221454 }
1455
+ ib_umem_release(qp->umem);
14231456
14241457 del_gid_entries(qp);
14251458 }
....@@ -1441,17 +1474,16 @@
14411474 return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
14421475 }
14431476
1444
-static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1445
- struct ib_qp_init_attr *init_attr,
1446
- struct ib_udata *udata)
1477
+static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
1478
+ struct ib_qp_init_attr *init_attr,
1479
+ struct ib_udata *udata)
14471480 {
1448
- struct mlx4_ib_qp *qp = NULL;
14491481 int err;
14501482 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
14511483 u16 xrcdn = 0;
14521484
14531485 if (init_attr->rwq_ind_tbl)
1454
- return _mlx4_ib_create_qp_rss(pd, init_attr, udata);
1486
+ return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
14551487
14561488 /*
14571489 * We only support LSO, vendor flag1, and multicast loopback blocking,
....@@ -1463,16 +1495,16 @@
14631495 MLX4_IB_SRIOV_SQP |
14641496 MLX4_IB_QP_NETIF |
14651497 MLX4_IB_QP_CREATE_ROCE_V2_GSI))
1466
- return ERR_PTR(-EINVAL);
1498
+ return -EINVAL;
14671499
14681500 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
14691501 if (init_attr->qp_type != IB_QPT_UD)
1470
- return ERR_PTR(-EINVAL);
1502
+ return -EINVAL;
14711503 }
14721504
14731505 if (init_attr->create_flags) {
14741506 if (udata && init_attr->create_flags & ~(sup_u_create_flags))
1475
- return ERR_PTR(-EINVAL);
1507
+ return -EINVAL;
14761508
14771509 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
14781510 MLX4_IB_QP_CREATE_ROCE_V2_GSI |
....@@ -1482,7 +1514,7 @@
14821514 init_attr->qp_type > IB_QPT_GSI) ||
14831515 (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
14841516 init_attr->qp_type != IB_QPT_GSI))
1485
- return ERR_PTR(-EINVAL);
1517
+ return -EINVAL;
14861518 }
14871519
14881520 switch (init_attr->qp_type) {
....@@ -1490,58 +1522,46 @@
14901522 pd = to_mxrcd(init_attr->xrcd)->pd;
14911523 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
14921524 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1493
- /* fall through */
1525
+ fallthrough;
14941526 case IB_QPT_XRC_INI:
14951527 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1496
- return ERR_PTR(-ENOSYS);
1528
+ return -ENOSYS;
14971529 init_attr->recv_cq = init_attr->send_cq;
1498
- /* fall through */
1530
+ fallthrough;
14991531 case IB_QPT_RC:
15001532 case IB_QPT_UC:
15011533 case IB_QPT_RAW_PACKET:
1502
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1503
- if (!qp)
1504
- return ERR_PTR(-ENOMEM);
1534
+ case IB_QPT_UD:
15051535 qp->pri.vid = 0xFFFF;
15061536 qp->alt.vid = 0xFFFF;
1507
- /* fall through */
1508
- case IB_QPT_UD:
1509
- {
1510
- err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
1511
- init_attr, udata, 0, &qp);
1512
- if (err) {
1513
- kfree(qp);
1514
- return ERR_PTR(err);
1515
- }
1537
+ err = create_qp_common(pd, init_attr, udata, 0, qp);
1538
+ if (err)
1539
+ return err;
15161540
15171541 qp->ibqp.qp_num = qp->mqp.qpn;
15181542 qp->xrcdn = xrcdn;
1519
-
15201543 break;
1521
- }
15221544 case IB_QPT_SMI:
15231545 case IB_QPT_GSI:
15241546 {
15251547 int sqpn;
15261548
1527
- /* Userspace is not allowed to create special QPs: */
1528
- if (udata)
1529
- return ERR_PTR(-EINVAL);
15301549 if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
15311550 int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
15321551 1, 1, &sqpn, 0,
15331552 MLX4_RES_USAGE_DRIVER);
15341553
15351554 if (res)
1536
- return ERR_PTR(res);
1555
+ return res;
15371556 } else {
15381557 sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
15391558 }
15401559
1541
- err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
1542
- init_attr, udata, sqpn, &qp);
1560
+ qp->pri.vid = 0xFFFF;
1561
+ qp->alt.vid = 0xFFFF;
1562
+ err = create_qp_common(pd, init_attr, udata, sqpn, qp);
15431563 if (err)
1544
- return ERR_PTR(err);
1564
+ return err;
15451565
15461566 qp->port = init_attr->port_num;
15471567 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
....@@ -1550,25 +1570,33 @@
15501570 }
15511571 default:
15521572 /* Don't support raw QPs */
1553
- return ERR_PTR(-EINVAL);
1573
+ return -EOPNOTSUPP;
15541574 }
1555
-
1556
- return &qp->ibqp;
1575
+ return 0;
15571576 }
15581577
15591578 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
15601579 struct ib_qp_init_attr *init_attr,
15611580 struct ib_udata *udata) {
15621581 struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
1563
- struct ib_qp *ibqp;
15641582 struct mlx4_ib_dev *dev = to_mdev(device);
1583
+ struct mlx4_ib_qp *qp;
1584
+ int ret;
15651585
1566
- ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
1586
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1587
+ if (!qp)
1588
+ return ERR_PTR(-ENOMEM);
15671589
1568
- if (!IS_ERR(ibqp) &&
1569
- (init_attr->qp_type == IB_QPT_GSI) &&
1590
+ mutex_init(&qp->mutex);
1591
+ ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
1592
+ if (ret) {
1593
+ kfree(qp);
1594
+ return ERR_PTR(ret);
1595
+ }
1596
+
1597
+ if (init_attr->qp_type == IB_QPT_GSI &&
15701598 !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
1571
- struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
1599
+ struct mlx4_ib_sqp *sqp = qp->sqp;
15721600 int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
15731601
15741602 if (is_eth &&
....@@ -1580,17 +1608,17 @@
15801608 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
15811609 sqp->roce_v2_gsi = NULL;
15821610 } else {
1583
- sqp = to_msqp(to_mqp(sqp->roce_v2_gsi));
1584
- sqp->qp.flags |= MLX4_IB_ROCE_V2_GSI_QP;
1611
+ to_mqp(sqp->roce_v2_gsi)->flags |=
1612
+ MLX4_IB_ROCE_V2_GSI_QP;
15851613 }
15861614
15871615 init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
15881616 }
15891617 }
1590
- return ibqp;
1618
+ return &qp->ibqp;
15911619 }
15921620
1593
-static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
1621
+static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
15941622 {
15951623 struct mlx4_ib_dev *dev = to_mdev(qp->device);
15961624 struct mlx4_ib_qp *mqp = to_mqp(qp);
....@@ -1611,32 +1639,27 @@
16111639 if (qp->rwq_ind_tbl) {
16121640 destroy_qp_rss(dev, mqp);
16131641 } else {
1614
- struct mlx4_ib_pd *pd;
1615
-
1616
- pd = get_pd(mqp);
1617
- destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject);
1642
+ destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
16181643 }
16191644
1620
- if (is_sqp(dev, mqp))
1621
- kfree(to_msqp(mqp));
1622
- else
1623
- kfree(mqp);
1645
+ kfree(mqp->sqp);
1646
+ kfree(mqp);
16241647
16251648 return 0;
16261649 }
16271650
1628
-int mlx4_ib_destroy_qp(struct ib_qp *qp)
1651
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
16291652 {
16301653 struct mlx4_ib_qp *mqp = to_mqp(qp);
16311654
16321655 if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
1633
- struct mlx4_ib_sqp *sqp = to_msqp(mqp);
1656
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
16341657
16351658 if (sqp->roce_v2_gsi)
16361659 ib_destroy_qp(sqp->roce_v2_gsi);
16371660 }
16381661
1639
- return _mlx4_ib_destroy_qp(qp);
1662
+ return _mlx4_ib_destroy_qp(qp, udata);
16401663 }
16411664
16421665 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
....@@ -1943,7 +1966,8 @@
19431966 * Go over all RSS QP's childes (WQs) and apply their HW state according to
19441967 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
19451968 */
1946
-static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
1969
+static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
1970
+ struct ib_udata *udata)
19471971 {
19481972 int err = 0;
19491973 int i;
....@@ -1967,7 +1991,7 @@
19671991 }
19681992 wq->port = port_num;
19691993 if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
1970
- err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY);
1994
+ err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata);
19711995 if (err) {
19721996 mutex_unlock(&wq->mutex);
19731997 break;
....@@ -1989,7 +2013,8 @@
19892013
19902014 if ((wq->rss_usecnt == 1) &&
19912015 (ibwq->state == IB_WQS_RDY))
1992
- if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
2016
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET,
2017
+ udata))
19932018 pr_warn("failed to reverse WQN=0x%06x\n",
19942019 ibwq->wq_num);
19952020 wq->rss_usecnt--;
....@@ -2001,7 +2026,8 @@
20012026 return err;
20022027 }
20032028
2004
-static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl)
2029
+static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
2030
+ struct ib_udata *udata)
20052031 {
20062032 int i;
20072033
....@@ -2012,7 +2038,7 @@
20122038 mutex_lock(&wq->mutex);
20132039
20142040 if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
2015
- if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
2041
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata))
20162042 pr_warn("failed to reverse WQN=%x\n",
20172043 ibwq->wq_num);
20182044 wq->rss_usecnt--;
....@@ -2044,9 +2070,10 @@
20442070
20452071 static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
20462072 const struct ib_qp_attr *attr, int attr_mask,
2047
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
2073
+ enum ib_qp_state cur_state,
2074
+ enum ib_qp_state new_state,
2075
+ struct ib_udata *udata)
20482076 {
2049
- struct ib_uobject *ibuobject;
20502077 struct ib_srq *ibsrq;
20512078 const struct ib_gid_attr *gid_attr = NULL;
20522079 struct ib_rwq_ind_table *rwq_ind_tbl;
....@@ -2055,6 +2082,8 @@
20552082 struct mlx4_ib_qp *qp;
20562083 struct mlx4_ib_pd *pd;
20572084 struct mlx4_ib_cq *send_cq, *recv_cq;
2085
+ struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2086
+ udata, struct mlx4_ib_ucontext, ibucontext);
20582087 struct mlx4_qp_context *context;
20592088 enum mlx4_qp_optpar optpar = 0;
20602089 int sqd_event;
....@@ -2066,7 +2095,6 @@
20662095 struct ib_wq *ibwq;
20672096
20682097 ibwq = (struct ib_wq *)src;
2069
- ibuobject = ibwq->uobject;
20702098 ibsrq = NULL;
20712099 rwq_ind_tbl = NULL;
20722100 qp_type = IB_QPT_RAW_PACKET;
....@@ -2077,7 +2105,6 @@
20772105 struct ib_qp *ibqp;
20782106
20792107 ibqp = (struct ib_qp *)src;
2080
- ibuobject = ibqp->uobject;
20812108 ibsrq = ibqp->srq;
20822109 rwq_ind_tbl = ibqp->rwq_ind_tbl;
20832110 qp_type = ibqp->qp_type;
....@@ -2162,11 +2189,9 @@
21622189 context->param3 |= cpu_to_be32(1 << 30);
21632190 }
21642191
2165
- if (ibuobject)
2192
+ if (ucontext)
21662193 context->usr_page = cpu_to_be32(
2167
- mlx4_to_hw_uar_index(dev->dev,
2168
- to_mucontext(ibuobject->context)
2169
- ->uar.index));
2194
+ mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
21702195 else
21712196 context->usr_page = cpu_to_be32(
21722197 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
....@@ -2237,8 +2262,10 @@
22372262
22382263 if (is_eth) {
22392264 gid_attr = attr->ah_attr.grh.sgid_attr;
2240
- vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
2241
- memcpy(smac, gid_attr->ndev->dev_addr, ETH_ALEN);
2265
+ err = rdma_read_gid_l2_fields(gid_attr, &vlan,
2266
+ &smac[0]);
2267
+ if (err)
2268
+ goto out;
22422269 }
22432270
22442271 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
....@@ -2298,7 +2325,7 @@
22982325 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
22992326
23002327 /* Set "fast registration enabled" for all kernel QPs */
2301
- if (!ibuobject)
2328
+ if (!ucontext)
23022329 context->params1 |= cpu_to_be32(1 << 11);
23032330
23042331 if (attr_mask & IB_QP_RNR_RETRY) {
....@@ -2435,7 +2462,7 @@
24352462 else
24362463 sqd_event = 0;
24372464
2438
- if (!ibuobject &&
2465
+ if (!ucontext &&
24392466 cur_state == IB_QPS_RESET &&
24402467 new_state == IB_QPS_INIT)
24412468 context->rlkey_roce_mode |= (1 << 4);
....@@ -2446,7 +2473,7 @@
24462473 * headroom is stamped so that the hardware doesn't start
24472474 * processing stale work requests.
24482475 */
2449
- if (!ibuobject &&
2476
+ if (!ucontext &&
24502477 cur_state == IB_QPS_RESET &&
24512478 new_state == IB_QPS_INIT) {
24522479 struct mlx4_wqe_ctrl_seg *ctrl;
....@@ -2488,7 +2515,7 @@
24882515 qp->alt_port = attr->alt_port_num;
24892516
24902517 if (is_sqp(dev, qp))
2491
- store_sqp_attrs(to_msqp(qp), attr, attr_mask);
2518
+ store_sqp_attrs(qp->sqp, attr, attr_mask);
24922519
24932520 /*
24942521 * If we moved QP0 to RTR, bring the IB link up; if we moved
....@@ -2510,7 +2537,7 @@
25102537 * entries and reinitialize the QP.
25112538 */
25122539 if (new_state == IB_QPS_RESET) {
2513
- if (!ibuobject) {
2540
+ if (!ucontext) {
25142541 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
25152542 ibsrq ? to_msrq(ibsrq) : NULL);
25162543 if (send_cq != recv_cq)
....@@ -2631,7 +2658,6 @@
26312658 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
26322659 int attr_mask, struct ib_udata *udata)
26332660 {
2634
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
26352661 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
26362662 struct mlx4_ib_qp *qp = to_mqp(ibqp);
26372663 enum ib_qp_state cur_state, new_state;
....@@ -2641,13 +2667,8 @@
26412667 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
26422668 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
26432669
2644
- if (cur_state != new_state || cur_state != IB_QPS_RESET) {
2645
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2646
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
2647
- }
2648
-
26492670 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2650
- attr_mask, ll)) {
2671
+ attr_mask)) {
26512672 pr_debug("qpn 0x%x: invalid attribute mask specified "
26522673 "for transition %d to %d. qp_type %d,"
26532674 " attr_mask 0x%x\n",
....@@ -2742,16 +2763,17 @@
27422763 }
27432764
27442765 if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
2745
- err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num);
2766
+ err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num,
2767
+ udata);
27462768 if (err)
27472769 goto out;
27482770 }
27492771
27502772 err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
2751
- cur_state, new_state);
2773
+ cur_state, new_state, udata);
27522774
27532775 if (ibqp->rwq_ind_tbl && err)
2754
- bring_down_rss_rwqs(ibqp->rwq_ind_tbl);
2776
+ bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata);
27552777
27562778 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
27572779 attr->port_num = 1;
....@@ -2770,7 +2792,7 @@
27702792 ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
27712793
27722794 if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2773
- struct mlx4_ib_sqp *sqp = to_msqp(mqp);
2795
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
27742796 int err = 0;
27752797
27762798 if (sqp->roce_v2_gsi)
....@@ -2795,12 +2817,13 @@
27952817 return -EINVAL;
27962818 }
27972819
2798
-static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2820
+static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
27992821 const struct ib_ud_wr *wr,
28002822 void *wqe, unsigned *mlx_seg_len)
28012823 {
2802
- struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2803
- struct ib_device *ib_dev = &mdev->ib_dev;
2824
+ struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
2825
+ struct mlx4_ib_sqp *sqp = qp->sqp;
2826
+ struct ib_device *ib_dev = qp->ibqp.device;
28042827 struct mlx4_wqe_mlx_seg *mlx = wqe;
28052828 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
28062829 struct mlx4_ib_ah *ah = to_mah(wr->ah);
....@@ -2822,12 +2845,12 @@
28222845
28232846 /* for proxy-qp0 sends, need to add in size of tunnel header */
28242847 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2825
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2848
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
28262849 send_size += sizeof (struct mlx4_ib_tunnel_header);
28272850
28282851 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
28292852
2830
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2853
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
28312854 sqp->ud_header.lrh.service_level =
28322855 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
28332856 sqp->ud_header.lrh.destination_lid =
....@@ -2844,26 +2867,26 @@
28442867
28452868 sqp->ud_header.lrh.virtual_lane = 0;
28462869 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2847
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
2870
+ err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
28482871 if (err)
28492872 return err;
28502873 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2851
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2874
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
28522875 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
28532876 else
28542877 sqp->ud_header.bth.destination_qpn =
2855
- cpu_to_be32(mdev->dev->caps.spec_qps[sqp->qp.port - 1].qp0_tunnel);
2878
+ cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
28562879
28572880 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
28582881 if (mlx4_is_master(mdev->dev)) {
2859
- if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2882
+ if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
28602883 return -EINVAL;
28612884 } else {
2862
- if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2885
+ if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
28632886 return -EINVAL;
28642887 }
28652888 sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
2866
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
2889
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
28672890
28682891 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
28692892 sqp->ud_header.immediate_present = 0;
....@@ -2947,10 +2970,11 @@
29472970 }
29482971
29492972 #define MLX4_ROCEV2_QP1_SPORT 0xC000
2950
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
2973
+static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
29512974 void *wqe, unsigned *mlx_seg_len)
29522975 {
2953
- struct ib_device *ib_dev = sqp->qp.ibqp.device;
2976
+ struct mlx4_ib_sqp *sqp = qp->sqp;
2977
+ struct ib_device *ib_dev = qp->ibqp.device;
29542978 struct mlx4_ib_dev *ibdev = to_mdev(ib_dev);
29552979 struct mlx4_wqe_mlx_seg *mlx = wqe;
29562980 struct mlx4_wqe_ctrl_seg *ctrl = wqe;
....@@ -2974,7 +2998,7 @@
29742998 for (i = 0; i < wr->wr.num_sge; ++i)
29752999 send_size += wr->wr.sg_list[i].length;
29763000
2977
- is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
3001
+ is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
29783002 is_grh = mlx4_ib_ah_grh_present(ah);
29793003 if (is_eth) {
29803004 enum ib_gid_type gid_type;
....@@ -2988,9 +3012,9 @@
29883012 if (err)
29893013 return err;
29903014 } else {
2991
- err = fill_gid_by_hw_index(ibdev, sqp->qp.port,
2992
- ah->av.ib.gid_index,
2993
- &sgid, &gid_type);
3015
+ err = fill_gid_by_hw_index(ibdev, qp->port,
3016
+ ah->av.ib.gid_index, &sgid,
3017
+ &gid_type);
29943018 if (!err) {
29953019 is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
29963020 if (is_udp) {
....@@ -3006,7 +3030,7 @@
30063030 }
30073031 if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
30083032 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
3009
- is_vlan = 1;
3033
+ is_vlan = true;
30103034 }
30113035 }
30123036 err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
....@@ -3035,13 +3059,18 @@
30353059 * indexes don't necessarily match the hw ones, so
30363060 * we must use our own cache
30373061 */
3038
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
3039
- cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
3040
- demux[sqp->qp.port - 1].
3041
- subnet_prefix)));
3042
- sqp->ud_header.grh.source_gid.global.interface_id =
3043
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
3044
- guid_cache[ah->av.ib.gid_index];
3062
+ sqp->ud_header.grh.source_gid.global
3063
+ .subnet_prefix =
3064
+ cpu_to_be64(atomic64_read(
3065
+ &(to_mdev(ib_dev)
3066
+ ->sriov
3067
+ .demux[qp->port - 1]
3068
+ .subnet_prefix)));
3069
+ sqp->ud_header.grh.source_gid.global
3070
+ .interface_id =
3071
+ to_mdev(ib_dev)
3072
+ ->sriov.demux[qp->port - 1]
3073
+ .guid_cache[ah->av.ib.gid_index];
30453074 } else {
30463075 sqp->ud_header.grh.source_gid =
30473076 ah->ibah.sgid_attr->gid;
....@@ -3073,10 +3102,13 @@
30733102 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
30743103
30753104 if (!is_eth) {
3076
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
3077
- (sqp->ud_header.lrh.destination_lid ==
3078
- IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
3079
- (sqp->ud_header.lrh.service_level << 8));
3105
+ mlx->flags |=
3106
+ cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
3107
+ (sqp->ud_header.lrh.destination_lid ==
3108
+ IB_LID_PERMISSIVE ?
3109
+ MLX4_WQE_MLX_SLR :
3110
+ 0) |
3111
+ (sqp->ud_header.lrh.service_level << 8));
30803112 if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
30813113 mlx->flags |= cpu_to_be32(0x1); /* force loopback */
30823114 mlx->rlid = sqp->ud_header.lrh.destination_lid;
....@@ -3122,21 +3154,23 @@
31223154 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
31233155 }
31243156 } else {
3125
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
3126
- sl_to_vl(to_mdev(ib_dev),
3127
- sqp->ud_header.lrh.service_level,
3128
- sqp->qp.port);
3129
- if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
3157
+ sqp->ud_header.lrh.virtual_lane =
3158
+ !qp->ibqp.qp_num ?
3159
+ 15 :
3160
+ sl_to_vl(to_mdev(ib_dev),
3161
+ sqp->ud_header.lrh.service_level,
3162
+ qp->port);
3163
+ if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
31303164 return -EINVAL;
31313165 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
31323166 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
31333167 }
31343168 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
3135
- if (!sqp->qp.ibqp.qp_num)
3136
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
3169
+ if (!qp->ibqp.qp_num)
3170
+ err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
31373171 &pkey);
31383172 else
3139
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
3173
+ err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
31403174 &pkey);
31413175 if (err)
31423176 return err;
....@@ -3146,7 +3180,7 @@
31463180 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
31473181 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
31483182 sqp->qkey : wr->remote_qkey);
3149
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
3183
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
31503184
31513185 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
31523186
....@@ -3459,24 +3493,24 @@
34593493 int nreq;
34603494 int err = 0;
34613495 unsigned ind;
3462
- int uninitialized_var(size);
3463
- unsigned uninitialized_var(seglen);
3496
+ int size;
3497
+ unsigned seglen;
34643498 __be32 dummy;
34653499 __be32 *lso_wqe;
3466
- __be32 uninitialized_var(lso_hdr_sz);
3500
+ __be32 lso_hdr_sz;
34673501 __be32 blh;
34683502 int i;
34693503 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
34703504
34713505 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
3472
- struct mlx4_ib_sqp *sqp = to_msqp(qp);
3506
+ struct mlx4_ib_sqp *sqp = qp->sqp;
34733507
34743508 if (sqp->roce_v2_gsi) {
34753509 struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
34763510 enum ib_gid_type gid_type;
34773511 union ib_gid gid;
34783512
3479
- if (!fill_gid_by_hw_index(mdev, sqp->qp.port,
3513
+ if (!fill_gid_by_hw_index(mdev, qp->port,
34803514 ah->av.ib.gid_index,
34813515 &gid, &gid_type))
34823516 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
....@@ -3596,8 +3630,8 @@
35963630 break;
35973631
35983632 case MLX4_IB_QPT_TUN_SMI_OWNER:
3599
- err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3600
- ctrl, &seglen);
3633
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3634
+ &seglen);
36013635 if (unlikely(err)) {
36023636 *bad_wr = wr;
36033637 goto out;
....@@ -3633,8 +3667,8 @@
36333667 break;
36343668
36353669 case MLX4_IB_QPT_PROXY_SMI_OWNER:
3636
- err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3637
- ctrl, &seglen);
3670
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3671
+ &seglen);
36383672 if (unlikely(err)) {
36393673 *bad_wr = wr;
36403674 goto out;
....@@ -3667,8 +3701,7 @@
36673701
36683702 case MLX4_IB_QPT_SMI:
36693703 case MLX4_IB_QPT_GSI:
3670
- err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
3671
- &seglen);
3704
+ err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
36723705 if (unlikely(err)) {
36733706 *bad_wr = wr;
36743707 goto out;
....@@ -3753,12 +3786,6 @@
37533786
37543787 writel_relaxed(qp->doorbell_qpn,
37553788 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3756
-
3757
- /*
3758
- * Make sure doorbells don't leak out of SQ spinlock
3759
- * and reach the HCA out of order.
3760
- */
3761
- mmiowb();
37623789
37633790 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
37643791
....@@ -4054,13 +4081,13 @@
40544081 struct ib_wq_init_attr *init_attr,
40554082 struct ib_udata *udata)
40564083 {
4057
- struct mlx4_ib_dev *dev;
4058
- struct ib_qp_init_attr ib_qp_init_attr;
4084
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
4085
+ struct ib_qp_init_attr ib_qp_init_attr = {};
40594086 struct mlx4_ib_qp *qp;
40604087 struct mlx4_ib_create_wq ucmd;
40614088 int err, required_cmd_sz;
40624089
4063
- if (!(udata && pd->uobject))
4090
+ if (!udata)
40644091 return ERR_PTR(-EINVAL);
40654092
40664093 required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
....@@ -4080,14 +4107,13 @@
40804107 if (udata->outlen)
40814108 return ERR_PTR(-EOPNOTSUPP);
40824109
4083
- dev = to_mdev(pd->device);
4084
-
40854110 if (init_attr->wq_type != IB_WQT_RQ) {
40864111 pr_debug("unsupported wq type %d\n", init_attr->wq_type);
40874112 return ERR_PTR(-EOPNOTSUPP);
40884113 }
40894114
4090
- if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) {
4115
+ if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
4116
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
40914117 pr_debug("unsupported create_flags %u\n",
40924118 init_attr->create_flags);
40934119 return ERR_PTR(-EOPNOTSUPP);
....@@ -4097,10 +4123,10 @@
40974123 if (!qp)
40984124 return ERR_PTR(-ENOMEM);
40994125
4126
+ mutex_init(&qp->mutex);
41004127 qp->pri.vid = 0xFFFF;
41014128 qp->alt.vid = 0xFFFF;
41024129
4103
- memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr));
41044130 ib_qp_init_attr.qp_context = init_attr->wq_context;
41054131 ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
41064132 ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
....@@ -4111,8 +4137,7 @@
41114137 if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
41124138 ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
41134139
4114
- err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr,
4115
- udata, 0, &qp);
4140
+ err = create_rq(pd, &ib_qp_init_attr, udata, qp);
41164141 if (err) {
41174142 kfree(qp);
41184143 return ERR_PTR(err);
....@@ -4137,7 +4162,8 @@
41374162 }
41384163 }
41394164
4140
-static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state)
4165
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
4166
+ struct ib_udata *udata)
41414167 {
41424168 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
41434169 enum ib_qp_state qp_cur_state;
....@@ -4161,7 +4187,8 @@
41614187 attr_mask = IB_QP_PORT;
41624188
41634189 err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
4164
- attr_mask, IB_QPS_RESET, IB_QPS_INIT);
4190
+ attr_mask, IB_QPS_RESET, IB_QPS_INIT,
4191
+ udata);
41654192 if (err) {
41664193 pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
41674194 ibwq->wq_num);
....@@ -4173,12 +4200,13 @@
41734200
41744201 attr_mask = 0;
41754202 err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
4176
- qp_cur_state, qp_new_state);
4203
+ qp_cur_state, qp_new_state, udata);
41774204
41784205 if (err && (qp_cur_state == IB_QPS_INIT)) {
41794206 qp_new_state = IB_QPS_RESET;
41804207 if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
4181
- attr_mask, IB_QPS_INIT, IB_QPS_RESET)) {
4208
+ attr_mask, IB_QPS_INIT, IB_QPS_RESET,
4209
+ udata)) {
41824210 pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
41834211 ibwq->wq_num);
41844212 qp_new_state = IB_QPS_INIT;
....@@ -4218,13 +4246,8 @@
42184246 if (wq_attr_mask & IB_WQ_FLAGS)
42194247 return -EOPNOTSUPP;
42204248
4221
- cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
4222
- ibwq->state;
4223
- new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
4224
-
4225
- if (cur_state < IB_WQS_RESET || cur_state > IB_WQS_ERR ||
4226
- new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
4227
- return -EINVAL;
4249
+ cur_state = wq_attr->curr_wq_state;
4250
+ new_state = wq_attr->wq_state;
42284251
42294252 if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
42304253 return -EINVAL;
....@@ -4241,7 +4264,7 @@
42414264 * WQ, so we can apply its port on the WQ.
42424265 */
42434266 if (qp->rss_usecnt)
4244
- err = _mlx4_ib_modify_wq(ibwq, new_state);
4267
+ err = _mlx4_ib_modify_wq(ibwq, new_state, udata);
42454268
42464269 if (!err)
42474270 ibwq->state = new_state;
....@@ -4251,7 +4274,7 @@
42514274 return err;
42524275 }
42534276
4254
-int mlx4_ib_destroy_wq(struct ib_wq *ibwq)
4277
+int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
42554278 {
42564279 struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
42574280 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
....@@ -4259,41 +4282,38 @@
42594282 if (qp->counter_index)
42604283 mlx4_ib_free_qp_counter(dev, qp);
42614284
4262
- destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, 1);
4285
+ destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
42634286
42644287 kfree(qp);
4265
-
42664288 return 0;
42674289 }
42684290
4269
-struct ib_rwq_ind_table
4270
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
4271
- struct ib_rwq_ind_table_init_attr *init_attr,
4272
- struct ib_udata *udata)
4291
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
4292
+ struct ib_rwq_ind_table_init_attr *init_attr,
4293
+ struct ib_udata *udata)
42734294 {
4274
- struct ib_rwq_ind_table *rwq_ind_table;
42754295 struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
42764296 unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
4297
+ struct ib_device *device = rwq_ind_table->device;
42774298 unsigned int base_wqn;
42784299 size_t min_resp_len;
4279
- int i;
4280
- int err;
4300
+ int i, err = 0;
42814301
42824302 if (udata->inlen > 0 &&
42834303 !ib_is_udata_cleared(udata, 0,
42844304 udata->inlen))
4285
- return ERR_PTR(-EOPNOTSUPP);
4305
+ return -EOPNOTSUPP;
42864306
42874307 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
42884308 if (udata->outlen && udata->outlen < min_resp_len)
4289
- return ERR_PTR(-EINVAL);
4309
+ return -EINVAL;
42904310
42914311 if (ind_tbl_size >
42924312 device->attrs.rss_caps.max_rwq_indirection_table_size) {
42934313 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
42944314 ind_tbl_size,
42954315 device->attrs.rss_caps.max_rwq_indirection_table_size);
4296
- return ERR_PTR(-EINVAL);
4316
+ return -EINVAL;
42974317 }
42984318
42994319 base_wqn = init_attr->ind_tbl[0]->wq_num;
....@@ -4301,39 +4321,23 @@
43014321 if (base_wqn % ind_tbl_size) {
43024322 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
43034323 base_wqn);
4304
- return ERR_PTR(-EINVAL);
4324
+ return -EINVAL;
43054325 }
43064326
43074327 for (i = 1; i < ind_tbl_size; i++) {
43084328 if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
43094329 pr_debug("indirection table's WQNs aren't consecutive\n");
4310
- return ERR_PTR(-EINVAL);
4330
+ return -EINVAL;
43114331 }
43124332 }
4313
-
4314
- rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
4315
- if (!rwq_ind_table)
4316
- return ERR_PTR(-ENOMEM);
43174333
43184334 if (udata->outlen) {
43194335 resp.response_length = offsetof(typeof(resp), response_length) +
43204336 sizeof(resp.response_length);
43214337 err = ib_copy_to_udata(udata, &resp, resp.response_length);
4322
- if (err)
4323
- goto err;
43244338 }
43254339
4326
- return rwq_ind_table;
4327
-
4328
-err:
4329
- kfree(rwq_ind_table);
4330
- return ERR_PTR(err);
4331
-}
4332
-
4333
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
4334
-{
4335
- kfree(ib_rwq_ind_tbl);
4336
- return 0;
4340
+ return err;
43374341 }
43384342
43394343 struct mlx4_ib_drain_cqe {