forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/infiniband/hw/mlx4/qp.c
....@@ -41,6 +41,7 @@
4141 #include <rdma/ib_pack.h>
4242 #include <rdma/ib_addr.h>
4343 #include <rdma/ib_mad.h>
44
+#include <rdma/uverbs_ioctl.h>
4445
4546 #include <linux/mlx4/driver.h>
4647 #include <linux/mlx4/qp.h>
....@@ -52,7 +53,8 @@
5253 struct mlx4_ib_cq *recv_cq);
5354 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
5455 struct mlx4_ib_cq *recv_cq);
55
-static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state);
56
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
57
+ struct ib_udata *udata);
5658
5759 enum {
5860 MLX4_IB_ACK_REQ_FREQ = 8,
....@@ -63,27 +65,6 @@
6365 MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
6466 MLX4_IB_LINK_TYPE_IB = 0,
6567 MLX4_IB_LINK_TYPE_ETH = 1
66
-};
67
-
68
-enum {
69
- /*
70
- * Largest possible UD header: send with GRH and immediate
71
- * data plus 18 bytes for an Ethernet header with VLAN/802.1Q
72
- * tag. (LRH would only use 8 bytes, so Ethernet is the
73
- * biggest case)
74
- */
75
- MLX4_IB_UD_HEADER_SIZE = 82,
76
- MLX4_IB_LSO_HEADER_SPARE = 128,
77
-};
78
-
79
-struct mlx4_ib_sqp {
80
- struct mlx4_ib_qp qp;
81
- int pkey_index;
82
- u32 qkey;
83
- u32 send_psn;
84
- struct ib_ud_header ud_header;
85
- u8 header_buf[MLX4_IB_UD_HEADER_SIZE];
86
- struct ib_qp *roce_v2_gsi;
8768 };
8869
8970 enum {
....@@ -120,11 +101,6 @@
120101 MLX4_IB_QP_SRC = 0,
121102 MLX4_IB_RWQ_SRC = 1,
122103 };
123
-
124
-static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
125
-{
126
- return container_of(mqp, struct mlx4_ib_sqp, qp);
127
-}
128104
129105 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
130106 {
....@@ -323,7 +299,7 @@
323299 }
324300
325301 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
326
- int is_user, int has_rq, struct mlx4_ib_qp *qp,
302
+ bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
327303 u32 inl_recv_sz)
328304 {
329305 /* Sanity check RQ size before proceeding */
....@@ -401,7 +377,7 @@
401377 * We need to leave 2 KB + 1 WR of headroom in the SQ to
402378 * allow HW to prefetch.
403379 */
404
- qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
380
+ qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
405381 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
406382 qp->sq_spare_wqes);
407383
....@@ -436,9 +412,13 @@
436412 struct mlx4_ib_qp *qp,
437413 struct mlx4_ib_create_qp *ucmd)
438414 {
415
+ u32 cnt;
416
+
439417 /* Sanity check SQ size before proceeding */
440
- if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
441
- ucmd->log_sq_stride >
418
+ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
419
+ cnt > dev->dev->caps.max_wqes)
420
+ return -EINVAL;
421
+ if (ucmd->log_sq_stride >
442422 ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
443423 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
444424 return -EINVAL;
....@@ -504,10 +484,10 @@
504484 kfree(qp->sqp_proxy_rcv);
505485 }
506486
507
-static int qp_has_rq(struct ib_qp_init_attr *attr)
487
+static bool qp_has_rq(struct ib_qp_init_attr *attr)
508488 {
509489 if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT)
510
- return 0;
490
+ return false;
511491
512492 return !attr->srq;
513493 }
....@@ -550,15 +530,15 @@
550530 return (-EOPNOTSUPP);
551531 }
552532
553
- if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
554
- MLX4_IB_RX_HASH_DST_IPV4 |
555
- MLX4_IB_RX_HASH_SRC_IPV6 |
556
- MLX4_IB_RX_HASH_DST_IPV6 |
557
- MLX4_IB_RX_HASH_SRC_PORT_TCP |
558
- MLX4_IB_RX_HASH_DST_PORT_TCP |
559
- MLX4_IB_RX_HASH_SRC_PORT_UDP |
560
- MLX4_IB_RX_HASH_DST_PORT_UDP |
561
- MLX4_IB_RX_HASH_INNER)) {
533
+ if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
534
+ MLX4_IB_RX_HASH_DST_IPV4 |
535
+ MLX4_IB_RX_HASH_SRC_IPV6 |
536
+ MLX4_IB_RX_HASH_DST_IPV6 |
537
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
538
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
539
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
540
+ MLX4_IB_RX_HASH_DST_PORT_UDP |
541
+ MLX4_IB_RX_HASH_INNER)) {
562542 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
563543 ucmd->rx_hash_fields_mask);
564544 return (-EOPNOTSUPP);
....@@ -654,8 +634,6 @@
654634 if (err)
655635 goto err_qpn;
656636
657
- mutex_init(&qp->mutex);
658
-
659637 INIT_LIST_HEAD(&qp->gid_list);
660638 INIT_LIST_HEAD(&qp->steering_rules);
661639
....@@ -694,80 +672,72 @@
694672 return err;
695673 }
696674
697
-static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
698
- struct ib_qp_init_attr *init_attr,
699
- struct ib_udata *udata)
675
+static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
676
+ struct ib_qp_init_attr *init_attr,
677
+ struct ib_udata *udata)
700678 {
701
- struct mlx4_ib_qp *qp;
702679 struct mlx4_ib_create_qp_rss ucmd = {};
703680 size_t required_cmd_sz;
704681 int err;
705682
706683 if (!udata) {
707684 pr_debug("RSS QP with NULL udata\n");
708
- return ERR_PTR(-EINVAL);
685
+ return -EINVAL;
709686 }
710687
711688 if (udata->outlen)
712
- return ERR_PTR(-EOPNOTSUPP);
689
+ return -EOPNOTSUPP;
713690
714691 required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
715692 sizeof(ucmd.reserved1);
716693 if (udata->inlen < required_cmd_sz) {
717694 pr_debug("invalid inlen\n");
718
- return ERR_PTR(-EINVAL);
695
+ return -EINVAL;
719696 }
720697
721698 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
722699 pr_debug("copy failed\n");
723
- return ERR_PTR(-EFAULT);
700
+ return -EFAULT;
724701 }
725702
726703 if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
727
- return ERR_PTR(-EOPNOTSUPP);
704
+ return -EOPNOTSUPP;
728705
729706 if (ucmd.comp_mask || ucmd.reserved1)
730
- return ERR_PTR(-EOPNOTSUPP);
707
+ return -EOPNOTSUPP;
731708
732709 if (udata->inlen > sizeof(ucmd) &&
733710 !ib_is_udata_cleared(udata, sizeof(ucmd),
734711 udata->inlen - sizeof(ucmd))) {
735712 pr_debug("inlen is not supported\n");
736
- return ERR_PTR(-EOPNOTSUPP);
713
+ return -EOPNOTSUPP;
737714 }
738715
739716 if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
740717 pr_debug("RSS QP with unsupported QP type %d\n",
741718 init_attr->qp_type);
742
- return ERR_PTR(-EOPNOTSUPP);
719
+ return -EOPNOTSUPP;
743720 }
744721
745722 if (init_attr->create_flags) {
746723 pr_debug("RSS QP doesn't support create flags\n");
747
- return ERR_PTR(-EOPNOTSUPP);
724
+ return -EOPNOTSUPP;
748725 }
749726
750727 if (init_attr->send_cq || init_attr->cap.max_send_wr) {
751728 pr_debug("RSS QP with unsupported send attributes\n");
752
- return ERR_PTR(-EOPNOTSUPP);
729
+ return -EOPNOTSUPP;
753730 }
754
-
755
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
756
- if (!qp)
757
- return ERR_PTR(-ENOMEM);
758731
759732 qp->pri.vid = 0xFFFF;
760733 qp->alt.vid = 0xFFFF;
761734
762735 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
763
- if (err) {
764
- kfree(qp);
765
- return ERR_PTR(err);
766
- }
736
+ if (err)
737
+ return err;
767738
768739 qp->ibqp.qp_num = qp->mqp.qpn;
769
-
770
- return &qp->ibqp;
740
+ return 0;
771741 }
772742
773743 /*
....@@ -847,26 +817,154 @@
847817 * reused for further WQN allocations.
848818 * The next created WQ will allocate a new range.
849819 */
850
- range->dirty = 1;
820
+ range->dirty = true;
851821 }
852822
853823 mutex_unlock(&context->wqn_ranges_mutex);
854824 }
855825
856
-static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
857
- enum mlx4_ib_source_type src,
858
- struct ib_qp_init_attr *init_attr,
859
- struct ib_udata *udata, int sqpn,
860
- struct mlx4_ib_qp **caller_qp)
826
+static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
827
+ struct ib_udata *udata, struct mlx4_ib_qp *qp)
861828 {
829
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
862830 int qpn;
863831 int err;
864
- struct mlx4_ib_sqp *sqp = NULL;
865
- struct mlx4_ib_qp *qp;
832
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
833
+ udata, struct mlx4_ib_ucontext, ibucontext);
834
+ struct mlx4_ib_cq *mcq;
835
+ unsigned long flags;
836
+ int range_size;
837
+ struct mlx4_ib_create_wq wq;
838
+ size_t copy_len;
839
+ int shift;
840
+ int n;
841
+
842
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
843
+
844
+ spin_lock_init(&qp->sq.lock);
845
+ spin_lock_init(&qp->rq.lock);
846
+ INIT_LIST_HEAD(&qp->gid_list);
847
+ INIT_LIST_HEAD(&qp->steering_rules);
848
+
849
+ qp->state = IB_QPS_RESET;
850
+
851
+ copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
852
+
853
+ if (ib_copy_from_udata(&wq, udata, copy_len)) {
854
+ err = -EFAULT;
855
+ goto err;
856
+ }
857
+
858
+ if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] ||
859
+ wq.reserved[2]) {
860
+ pr_debug("user command isn't supported\n");
861
+ err = -EOPNOTSUPP;
862
+ goto err;
863
+ }
864
+
865
+ if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) {
866
+ pr_debug("WQN range size must be equal or smaller than %d\n",
867
+ dev->dev->caps.max_rss_tbl_sz);
868
+ err = -EOPNOTSUPP;
869
+ goto err;
870
+ }
871
+ range_size = 1 << wq.log_range_size;
872
+
873
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS)
874
+ qp->flags |= MLX4_IB_QP_SCATTER_FCS;
875
+
876
+ err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
877
+ if (err)
878
+ goto err;
879
+
880
+ qp->sq_no_prefetch = 1;
881
+ qp->sq.wqe_cnt = 1;
882
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
883
+ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
884
+ (qp->sq.wqe_cnt << qp->sq.wqe_shift);
885
+
886
+ qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
887
+ if (IS_ERR(qp->umem)) {
888
+ err = PTR_ERR(qp->umem);
889
+ goto err;
890
+ }
891
+
892
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
893
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
894
+
895
+ if (err)
896
+ goto err_buf;
897
+
898
+ err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
899
+ if (err)
900
+ goto err_mtt;
901
+
902
+ err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
903
+ if (err)
904
+ goto err_mtt;
905
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
906
+
907
+ err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
908
+ if (err)
909
+ goto err_wrid;
910
+
911
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
912
+ if (err)
913
+ goto err_qpn;
914
+
915
+ /*
916
+ * Hardware wants QPN written in big-endian order (after
917
+ * shifting) for send doorbell. Precompute this value to save
918
+ * a little bit when posting sends.
919
+ */
920
+ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
921
+
922
+ qp->mqp.event = mlx4_ib_wq_event;
923
+
924
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
925
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
926
+ to_mcq(init_attr->recv_cq));
927
+ /* Maintain device to QPs access, needed for further handling
928
+ * via reset flow
929
+ */
930
+ list_add_tail(&qp->qps_list, &dev->qp_list);
931
+ /* Maintain CQ to QPs access, needed for further handling
932
+ * via reset flow
933
+ */
934
+ mcq = to_mcq(init_attr->send_cq);
935
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
936
+ mcq = to_mcq(init_attr->recv_cq);
937
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
938
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
939
+ to_mcq(init_attr->recv_cq));
940
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
941
+ return 0;
942
+
943
+err_qpn:
944
+ mlx4_ib_release_wqn(context, qp, 0);
945
+err_wrid:
946
+ mlx4_ib_db_unmap_user(context, &qp->db);
947
+
948
+err_mtt:
949
+ mlx4_mtt_cleanup(dev->dev, &qp->mtt);
950
+err_buf:
951
+ ib_umem_release(qp->umem);
952
+err:
953
+ return err;
954
+}
955
+
956
+static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
957
+ struct ib_udata *udata, int sqpn,
958
+ struct mlx4_ib_qp *qp)
959
+{
960
+ struct mlx4_ib_dev *dev = to_mdev(pd->device);
961
+ int qpn;
962
+ int err;
963
+ struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
964
+ udata, struct mlx4_ib_ucontext, ibucontext);
866965 enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
867966 struct mlx4_ib_cq *mcq;
868967 unsigned long flags;
869
- int range_size = 0;
870968
871969 /* When tunneling special qps, we use a plain UD qp */
872970 if (sqpn) {
....@@ -909,76 +1007,41 @@
9091007 sqpn = qpn;
9101008 }
9111009
912
- if (!*caller_qp) {
913
- if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
914
- (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
915
- MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
916
- sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
917
- if (!sqp)
918
- return -ENOMEM;
919
- qp = &sqp->qp;
920
- qp->pri.vid = 0xFFFF;
921
- qp->alt.vid = 0xFFFF;
922
- } else {
923
- qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
924
- if (!qp)
925
- return -ENOMEM;
926
- qp->pri.vid = 0xFFFF;
927
- qp->alt.vid = 0xFFFF;
928
- }
929
- } else
930
- qp = *caller_qp;
1010
+ if (init_attr->qp_type == IB_QPT_SMI ||
1011
+ init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI ||
1012
+ qp_type == MLX4_IB_QPT_GSI ||
1013
+ (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
1014
+ MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
1015
+ qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
1016
+ if (!qp->sqp)
1017
+ return -ENOMEM;
1018
+ }
9311019
9321020 qp->mlx4_ib_qp_type = qp_type;
9331021
934
- mutex_init(&qp->mutex);
9351022 spin_lock_init(&qp->sq.lock);
9361023 spin_lock_init(&qp->rq.lock);
9371024 INIT_LIST_HEAD(&qp->gid_list);
9381025 INIT_LIST_HEAD(&qp->steering_rules);
9391026
940
- qp->state = IB_QPS_RESET;
1027
+ qp->state = IB_QPS_RESET;
9411028 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
9421029 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
9431030
944
-
945
- if (pd->uobject) {
946
- union {
947
- struct mlx4_ib_create_qp qp;
948
- struct mlx4_ib_create_wq wq;
949
- } ucmd;
1031
+ if (udata) {
1032
+ struct mlx4_ib_create_qp ucmd;
9501033 size_t copy_len;
9511034 int shift;
9521035 int n;
9531036
954
- copy_len = (src == MLX4_IB_QP_SRC) ?
955
- sizeof(struct mlx4_ib_create_qp) :
956
- min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
1037
+ copy_len = sizeof(struct mlx4_ib_create_qp);
9571038
9581039 if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
9591040 err = -EFAULT;
9601041 goto err;
9611042 }
9621043
963
- if (src == MLX4_IB_RWQ_SRC) {
964
- if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
965
- ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
966
- pr_debug("user command isn't supported\n");
967
- err = -EOPNOTSUPP;
968
- goto err;
969
- }
970
-
971
- if (ucmd.wq.log_range_size >
972
- ilog2(dev->dev->caps.max_rss_tbl_sz)) {
973
- pr_debug("WQN range size must be equal or smaller than %d\n",
974
- dev->dev->caps.max_rss_tbl_sz);
975
- err = -EOPNOTSUPP;
976
- goto err;
977
- }
978
- range_size = 1 << ucmd.wq.log_range_size;
979
- } else {
980
- qp->inl_recv_sz = ucmd.qp.inl_recv_sz;
981
- }
1044
+ qp->inl_recv_sz = ucmd.inl_recv_sz;
9821045
9831046 if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
9841047 if (!(dev->dev->caps.flags &
....@@ -991,39 +1054,24 @@
9911054 qp->flags |= MLX4_IB_QP_SCATTER_FCS;
9921055 }
9931056
994
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
1057
+ err = set_rq_size(dev, &init_attr->cap, udata,
9951058 qp_has_rq(init_attr), qp, qp->inl_recv_sz);
9961059 if (err)
9971060 goto err;
9981061
999
- if (src == MLX4_IB_QP_SRC) {
1000
- qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch;
1062
+ qp->sq_no_prefetch = ucmd.sq_no_prefetch;
10011063
1002
- err = set_user_sq_size(dev, qp,
1003
- (struct mlx4_ib_create_qp *)
1004
- &ucmd);
1005
- if (err)
1006
- goto err;
1007
- } else {
1008
- qp->sq_no_prefetch = 1;
1009
- qp->sq.wqe_cnt = 1;
1010
- qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
1011
- /* Allocated buffer expects to have at least that SQ
1012
- * size.
1013
- */
1014
- qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
1015
- (qp->sq.wqe_cnt << qp->sq.wqe_shift);
1016
- }
1064
+ err = set_user_sq_size(dev, qp, &ucmd);
1065
+ if (err)
1066
+ goto err;
10171067
1018
- qp->umem = ib_umem_get(pd->uobject->context,
1019
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
1020
- ucmd.wq.buf_addr, qp->buf_size, 0, 0);
1068
+ qp->umem =
1069
+ ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
10211070 if (IS_ERR(qp->umem)) {
10221071 err = PTR_ERR(qp->umem);
10231072 goto err;
10241073 }
10251074
1026
- n = ib_umem_page_count(qp->umem);
10271075 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
10281076 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
10291077
....@@ -1035,15 +1083,13 @@
10351083 goto err_mtt;
10361084
10371085 if (qp_has_rq(init_attr)) {
1038
- err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
1039
- (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
1040
- ucmd.wq.db_addr, &qp->db);
1086
+ err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
10411087 if (err)
10421088 goto err_mtt;
10431089 }
10441090 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
10451091 } else {
1046
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
1092
+ err = set_rq_size(dev, &init_attr->cap, udata,
10471093 qp_has_rq(init_attr), qp, 0);
10481094 if (err)
10491095 goto err;
....@@ -1109,11 +1155,6 @@
11091155 goto err_wrid;
11101156 }
11111157 }
1112
- } else if (src == MLX4_IB_RWQ_SRC) {
1113
- err = mlx4_ib_alloc_wqn(to_mucontext(pd->uobject->context), qp,
1114
- range_size, &qpn);
1115
- if (err)
1116
- goto err_wrid;
11171158 } else {
11181159 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
11191160 * otherwise, the WQE BlueFlame setup flow wrongly causes
....@@ -1152,11 +1193,7 @@
11521193 */
11531194 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
11541195
1155
- qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event :
1156
- mlx4_ib_wq_event;
1157
-
1158
- if (!*caller_qp)
1159
- *caller_qp = qp;
1196
+ qp->mqp.event = mlx4_ib_qp_event;
11601197
11611198 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
11621199 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
....@@ -1181,9 +1218,6 @@
11811218 if (!sqpn) {
11821219 if (qp->flags & MLX4_IB_QP_NETIF)
11831220 mlx4_ib_steer_qp_free(dev, qpn, 1);
1184
- else if (src == MLX4_IB_RWQ_SRC)
1185
- mlx4_ib_release_wqn(to_mucontext(pd->uobject->context),
1186
- qp, 0);
11871221 else
11881222 mlx4_qp_release_range(dev->dev, qpn, 1);
11891223 }
....@@ -1191,9 +1225,9 @@
11911225 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
11921226 free_proxy_bufs(pd->device, qp);
11931227 err_wrid:
1194
- if (pd->uobject) {
1228
+ if (udata) {
11951229 if (qp_has_rq(init_attr))
1196
- mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
1230
+ mlx4_ib_db_unmap_user(context, &qp->db);
11971231 } else {
11981232 kvfree(qp->sq.wrid);
11991233 kvfree(qp->rq.wrid);
....@@ -1203,20 +1237,16 @@
12031237 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
12041238
12051239 err_buf:
1206
- if (pd->uobject)
1207
- ib_umem_release(qp->umem);
1208
- else
1240
+ if (!qp->umem)
12091241 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1242
+ ib_umem_release(qp->umem);
12101243
12111244 err_db:
1212
- if (!pd->uobject && qp_has_rq(init_attr))
1245
+ if (!udata && qp_has_rq(init_attr))
12131246 mlx4_db_free(dev->dev, &qp->db);
12141247
12151248 err:
1216
- if (sqp)
1217
- kfree(sqp);
1218
- else if (!*caller_qp)
1219
- kfree(qp);
1249
+ kfree(qp->sqp);
12201250 return err;
12211251 }
12221252
....@@ -1330,11 +1360,11 @@
13301360 mlx4_qp_free(dev->dev, &qp->mqp);
13311361 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
13321362 del_gid_entries(qp);
1333
- kfree(qp->rss_ctx);
13341363 }
13351364
13361365 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1337
- enum mlx4_ib_source_type src, int is_user)
1366
+ enum mlx4_ib_source_type src,
1367
+ struct ib_udata *udata)
13381368 {
13391369 struct mlx4_ib_cq *send_cq, *recv_cq;
13401370 unsigned long flags;
....@@ -1376,7 +1406,7 @@
13761406 list_del(&qp->qps_list);
13771407 list_del(&qp->cq_send_list);
13781408 list_del(&qp->cq_recv_list);
1379
- if (!is_user) {
1409
+ if (!udata) {
13801410 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
13811411 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
13821412 if (send_cq != recv_cq)
....@@ -1394,22 +1424,28 @@
13941424 if (qp->flags & MLX4_IB_QP_NETIF)
13951425 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
13961426 else if (src == MLX4_IB_RWQ_SRC)
1397
- mlx4_ib_release_wqn(to_mucontext(
1398
- qp->ibwq.uobject->context), qp, 1);
1427
+ mlx4_ib_release_wqn(
1428
+ rdma_udata_to_drv_context(
1429
+ udata,
1430
+ struct mlx4_ib_ucontext,
1431
+ ibucontext),
1432
+ qp, 1);
13991433 else
14001434 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
14011435 }
14021436
14031437 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
14041438
1405
- if (is_user) {
1439
+ if (udata) {
14061440 if (qp->rq.wqe_cnt) {
1407
- struct mlx4_ib_ucontext *mcontext = !src ?
1408
- to_mucontext(qp->ibqp.uobject->context) :
1409
- to_mucontext(qp->ibwq.uobject->context);
1441
+ struct mlx4_ib_ucontext *mcontext =
1442
+ rdma_udata_to_drv_context(
1443
+ udata,
1444
+ struct mlx4_ib_ucontext,
1445
+ ibucontext);
1446
+
14101447 mlx4_ib_db_unmap_user(mcontext, &qp->db);
14111448 }
1412
- ib_umem_release(qp->umem);
14131449 } else {
14141450 kvfree(qp->sq.wrid);
14151451 kvfree(qp->rq.wrid);
....@@ -1420,6 +1456,7 @@
14201456 if (qp->rq.wqe_cnt)
14211457 mlx4_db_free(dev->dev, &qp->db);
14221458 }
1459
+ ib_umem_release(qp->umem);
14231460
14241461 del_gid_entries(qp);
14251462 }
....@@ -1441,17 +1478,16 @@
14411478 return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
14421479 }
14431480
1444
-static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1445
- struct ib_qp_init_attr *init_attr,
1446
- struct ib_udata *udata)
1481
+static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
1482
+ struct ib_qp_init_attr *init_attr,
1483
+ struct ib_udata *udata)
14471484 {
1448
- struct mlx4_ib_qp *qp = NULL;
14491485 int err;
14501486 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
14511487 u16 xrcdn = 0;
14521488
14531489 if (init_attr->rwq_ind_tbl)
1454
- return _mlx4_ib_create_qp_rss(pd, init_attr, udata);
1490
+ return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
14551491
14561492 /*
14571493 * We only support LSO, vendor flag1, and multicast loopback blocking,
....@@ -1463,16 +1499,16 @@
14631499 MLX4_IB_SRIOV_SQP |
14641500 MLX4_IB_QP_NETIF |
14651501 MLX4_IB_QP_CREATE_ROCE_V2_GSI))
1466
- return ERR_PTR(-EINVAL);
1502
+ return -EINVAL;
14671503
14681504 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
14691505 if (init_attr->qp_type != IB_QPT_UD)
1470
- return ERR_PTR(-EINVAL);
1506
+ return -EINVAL;
14711507 }
14721508
14731509 if (init_attr->create_flags) {
14741510 if (udata && init_attr->create_flags & ~(sup_u_create_flags))
1475
- return ERR_PTR(-EINVAL);
1511
+ return -EINVAL;
14761512
14771513 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
14781514 MLX4_IB_QP_CREATE_ROCE_V2_GSI |
....@@ -1482,7 +1518,7 @@
14821518 init_attr->qp_type > IB_QPT_GSI) ||
14831519 (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI &&
14841520 init_attr->qp_type != IB_QPT_GSI))
1485
- return ERR_PTR(-EINVAL);
1521
+ return -EINVAL;
14861522 }
14871523
14881524 switch (init_attr->qp_type) {
....@@ -1490,58 +1526,46 @@
14901526 pd = to_mxrcd(init_attr->xrcd)->pd;
14911527 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
14921528 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq;
1493
- /* fall through */
1529
+ fallthrough;
14941530 case IB_QPT_XRC_INI:
14951531 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1496
- return ERR_PTR(-ENOSYS);
1532
+ return -ENOSYS;
14971533 init_attr->recv_cq = init_attr->send_cq;
1498
- /* fall through */
1534
+ fallthrough;
14991535 case IB_QPT_RC:
15001536 case IB_QPT_UC:
15011537 case IB_QPT_RAW_PACKET:
1502
- qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1503
- if (!qp)
1504
- return ERR_PTR(-ENOMEM);
1538
+ case IB_QPT_UD:
15051539 qp->pri.vid = 0xFFFF;
15061540 qp->alt.vid = 0xFFFF;
1507
- /* fall through */
1508
- case IB_QPT_UD:
1509
- {
1510
- err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
1511
- init_attr, udata, 0, &qp);
1512
- if (err) {
1513
- kfree(qp);
1514
- return ERR_PTR(err);
1515
- }
1541
+ err = create_qp_common(pd, init_attr, udata, 0, qp);
1542
+ if (err)
1543
+ return err;
15161544
15171545 qp->ibqp.qp_num = qp->mqp.qpn;
15181546 qp->xrcdn = xrcdn;
1519
-
15201547 break;
1521
- }
15221548 case IB_QPT_SMI:
15231549 case IB_QPT_GSI:
15241550 {
15251551 int sqpn;
15261552
1527
- /* Userspace is not allowed to create special QPs: */
1528
- if (udata)
1529
- return ERR_PTR(-EINVAL);
15301553 if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
15311554 int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
15321555 1, 1, &sqpn, 0,
15331556 MLX4_RES_USAGE_DRIVER);
15341557
15351558 if (res)
1536
- return ERR_PTR(res);
1559
+ return res;
15371560 } else {
15381561 sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
15391562 }
15401563
1541
- err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
1542
- init_attr, udata, sqpn, &qp);
1564
+ qp->pri.vid = 0xFFFF;
1565
+ qp->alt.vid = 0xFFFF;
1566
+ err = create_qp_common(pd, init_attr, udata, sqpn, qp);
15431567 if (err)
1544
- return ERR_PTR(err);
1568
+ return err;
15451569
15461570 qp->port = init_attr->port_num;
15471571 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
....@@ -1550,25 +1574,33 @@
15501574 }
15511575 default:
15521576 /* Don't support raw QPs */
1553
- return ERR_PTR(-EINVAL);
1577
+ return -EOPNOTSUPP;
15541578 }
1555
-
1556
- return &qp->ibqp;
1579
+ return 0;
15571580 }
15581581
15591582 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
15601583 struct ib_qp_init_attr *init_attr,
15611584 struct ib_udata *udata) {
15621585 struct ib_device *device = pd ? pd->device : init_attr->xrcd->device;
1563
- struct ib_qp *ibqp;
15641586 struct mlx4_ib_dev *dev = to_mdev(device);
1587
+ struct mlx4_ib_qp *qp;
1588
+ int ret;
15651589
1566
- ibqp = _mlx4_ib_create_qp(pd, init_attr, udata);
1590
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1591
+ if (!qp)
1592
+ return ERR_PTR(-ENOMEM);
15671593
1568
- if (!IS_ERR(ibqp) &&
1569
- (init_attr->qp_type == IB_QPT_GSI) &&
1594
+ mutex_init(&qp->mutex);
1595
+ ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
1596
+ if (ret) {
1597
+ kfree(qp);
1598
+ return ERR_PTR(ret);
1599
+ }
1600
+
1601
+ if (init_attr->qp_type == IB_QPT_GSI &&
15701602 !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) {
1571
- struct mlx4_ib_sqp *sqp = to_msqp((to_mqp(ibqp)));
1603
+ struct mlx4_ib_sqp *sqp = qp->sqp;
15721604 int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num);
15731605
15741606 if (is_eth &&
....@@ -1580,17 +1612,17 @@
15801612 pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
15811613 sqp->roce_v2_gsi = NULL;
15821614 } else {
1583
- sqp = to_msqp(to_mqp(sqp->roce_v2_gsi));
1584
- sqp->qp.flags |= MLX4_IB_ROCE_V2_GSI_QP;
1615
+ to_mqp(sqp->roce_v2_gsi)->flags |=
1616
+ MLX4_IB_ROCE_V2_GSI_QP;
15851617 }
15861618
15871619 init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI;
15881620 }
15891621 }
1590
- return ibqp;
1622
+ return &qp->ibqp;
15911623 }
15921624
1593
-static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
1625
+static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
15941626 {
15951627 struct mlx4_ib_dev *dev = to_mdev(qp->device);
15961628 struct mlx4_ib_qp *mqp = to_mqp(qp);
....@@ -1611,32 +1643,27 @@
16111643 if (qp->rwq_ind_tbl) {
16121644 destroy_qp_rss(dev, mqp);
16131645 } else {
1614
- struct mlx4_ib_pd *pd;
1615
-
1616
- pd = get_pd(mqp);
1617
- destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject);
1646
+ destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
16181647 }
16191648
1620
- if (is_sqp(dev, mqp))
1621
- kfree(to_msqp(mqp));
1622
- else
1623
- kfree(mqp);
1649
+ kfree(mqp->sqp);
1650
+ kfree(mqp);
16241651
16251652 return 0;
16261653 }
16271654
1628
-int mlx4_ib_destroy_qp(struct ib_qp *qp)
1655
+int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
16291656 {
16301657 struct mlx4_ib_qp *mqp = to_mqp(qp);
16311658
16321659 if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
1633
- struct mlx4_ib_sqp *sqp = to_msqp(mqp);
1660
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
16341661
16351662 if (sqp->roce_v2_gsi)
16361663 ib_destroy_qp(sqp->roce_v2_gsi);
16371664 }
16381665
1639
- return _mlx4_ib_destroy_qp(qp);
1666
+ return _mlx4_ib_destroy_qp(qp, udata);
16401667 }
16411668
16421669 static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
....@@ -1943,7 +1970,8 @@
19431970 * Go over all RSS QP's childes (WQs) and apply their HW state according to
19441971 * their logic state if the RSS QP is the first RSS QP associated for the WQ.
19451972 */
1946
-static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
1973
+static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num,
1974
+ struct ib_udata *udata)
19471975 {
19481976 int err = 0;
19491977 int i;
....@@ -1967,7 +1995,7 @@
19671995 }
19681996 wq->port = port_num;
19691997 if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
1970
- err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY);
1998
+ err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata);
19711999 if (err) {
19722000 mutex_unlock(&wq->mutex);
19732001 break;
....@@ -1989,7 +2017,8 @@
19892017
19902018 if ((wq->rss_usecnt == 1) &&
19912019 (ibwq->state == IB_WQS_RDY))
1992
- if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
2020
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET,
2021
+ udata))
19932022 pr_warn("failed to reverse WQN=0x%06x\n",
19942023 ibwq->wq_num);
19952024 wq->rss_usecnt--;
....@@ -2001,7 +2030,8 @@
20012030 return err;
20022031 }
20032032
2004
-static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl)
2033
+static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl,
2034
+ struct ib_udata *udata)
20052035 {
20062036 int i;
20072037
....@@ -2012,7 +2042,7 @@
20122042 mutex_lock(&wq->mutex);
20132043
20142044 if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
2015
- if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
2045
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata))
20162046 pr_warn("failed to reverse WQN=%x\n",
20172047 ibwq->wq_num);
20182048 wq->rss_usecnt--;
....@@ -2044,9 +2074,10 @@
20442074
20452075 static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
20462076 const struct ib_qp_attr *attr, int attr_mask,
2047
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
2077
+ enum ib_qp_state cur_state,
2078
+ enum ib_qp_state new_state,
2079
+ struct ib_udata *udata)
20482080 {
2049
- struct ib_uobject *ibuobject;
20502081 struct ib_srq *ibsrq;
20512082 const struct ib_gid_attr *gid_attr = NULL;
20522083 struct ib_rwq_ind_table *rwq_ind_tbl;
....@@ -2055,6 +2086,8 @@
20552086 struct mlx4_ib_qp *qp;
20562087 struct mlx4_ib_pd *pd;
20572088 struct mlx4_ib_cq *send_cq, *recv_cq;
2089
+ struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2090
+ udata, struct mlx4_ib_ucontext, ibucontext);
20582091 struct mlx4_qp_context *context;
20592092 enum mlx4_qp_optpar optpar = 0;
20602093 int sqd_event;
....@@ -2066,7 +2099,6 @@
20662099 struct ib_wq *ibwq;
20672100
20682101 ibwq = (struct ib_wq *)src;
2069
- ibuobject = ibwq->uobject;
20702102 ibsrq = NULL;
20712103 rwq_ind_tbl = NULL;
20722104 qp_type = IB_QPT_RAW_PACKET;
....@@ -2077,7 +2109,6 @@
20772109 struct ib_qp *ibqp;
20782110
20792111 ibqp = (struct ib_qp *)src;
2080
- ibuobject = ibqp->uobject;
20812112 ibsrq = ibqp->srq;
20822113 rwq_ind_tbl = ibqp->rwq_ind_tbl;
20832114 qp_type = ibqp->qp_type;
....@@ -2162,11 +2193,9 @@
21622193 context->param3 |= cpu_to_be32(1 << 30);
21632194 }
21642195
2165
- if (ibuobject)
2196
+ if (ucontext)
21662197 context->usr_page = cpu_to_be32(
2167
- mlx4_to_hw_uar_index(dev->dev,
2168
- to_mucontext(ibuobject->context)
2169
- ->uar.index));
2198
+ mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index));
21702199 else
21712200 context->usr_page = cpu_to_be32(
21722201 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
....@@ -2237,8 +2266,10 @@
22372266
22382267 if (is_eth) {
22392268 gid_attr = attr->ah_attr.grh.sgid_attr;
2240
- vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
2241
- memcpy(smac, gid_attr->ndev->dev_addr, ETH_ALEN);
2269
+ err = rdma_read_gid_l2_fields(gid_attr, &vlan,
2270
+ &smac[0]);
2271
+ if (err)
2272
+ goto out;
22422273 }
22432274
22442275 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
....@@ -2298,7 +2329,7 @@
22982329 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
22992330
23002331 /* Set "fast registration enabled" for all kernel QPs */
2301
- if (!ibuobject)
2332
+ if (!ucontext)
23022333 context->params1 |= cpu_to_be32(1 << 11);
23032334
23042335 if (attr_mask & IB_QP_RNR_RETRY) {
....@@ -2435,7 +2466,7 @@
24352466 else
24362467 sqd_event = 0;
24372468
2438
- if (!ibuobject &&
2469
+ if (!ucontext &&
24392470 cur_state == IB_QPS_RESET &&
24402471 new_state == IB_QPS_INIT)
24412472 context->rlkey_roce_mode |= (1 << 4);
....@@ -2446,7 +2477,7 @@
24462477 * headroom is stamped so that the hardware doesn't start
24472478 * processing stale work requests.
24482479 */
2449
- if (!ibuobject &&
2480
+ if (!ucontext &&
24502481 cur_state == IB_QPS_RESET &&
24512482 new_state == IB_QPS_INIT) {
24522483 struct mlx4_wqe_ctrl_seg *ctrl;
....@@ -2488,7 +2519,7 @@
24882519 qp->alt_port = attr->alt_port_num;
24892520
24902521 if (is_sqp(dev, qp))
2491
- store_sqp_attrs(to_msqp(qp), attr, attr_mask);
2522
+ store_sqp_attrs(qp->sqp, attr, attr_mask);
24922523
24932524 /*
24942525 * If we moved QP0 to RTR, bring the IB link up; if we moved
....@@ -2510,7 +2541,7 @@
25102541 * entries and reinitialize the QP.
25112542 */
25122543 if (new_state == IB_QPS_RESET) {
2513
- if (!ibuobject) {
2544
+ if (!ucontext) {
25142545 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
25152546 ibsrq ? to_msrq(ibsrq) : NULL);
25162547 if (send_cq != recv_cq)
....@@ -2631,7 +2662,6 @@
26312662 static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
26322663 int attr_mask, struct ib_udata *udata)
26332664 {
2634
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
26352665 struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
26362666 struct mlx4_ib_qp *qp = to_mqp(ibqp);
26372667 enum ib_qp_state cur_state, new_state;
....@@ -2641,13 +2671,8 @@
26412671 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
26422672 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
26432673
2644
- if (cur_state != new_state || cur_state != IB_QPS_RESET) {
2645
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2646
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
2647
- }
2648
-
26492674 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
2650
- attr_mask, ll)) {
2675
+ attr_mask)) {
26512676 pr_debug("qpn 0x%x: invalid attribute mask specified "
26522677 "for transition %d to %d. qp_type %d,"
26532678 " attr_mask 0x%x\n",
....@@ -2742,16 +2767,17 @@
27422767 }
27432768
27442769 if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
2745
- err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num);
2770
+ err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num,
2771
+ udata);
27462772 if (err)
27472773 goto out;
27482774 }
27492775
27502776 err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
2751
- cur_state, new_state);
2777
+ cur_state, new_state, udata);
27522778
27532779 if (ibqp->rwq_ind_tbl && err)
2754
- bring_down_rss_rwqs(ibqp->rwq_ind_tbl);
2780
+ bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata);
27552781
27562782 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
27572783 attr->port_num = 1;
....@@ -2770,7 +2796,7 @@
27702796 ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata);
27712797
27722798 if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
2773
- struct mlx4_ib_sqp *sqp = to_msqp(mqp);
2799
+ struct mlx4_ib_sqp *sqp = mqp->sqp;
27742800 int err = 0;
27752801
27762802 if (sqp->roce_v2_gsi)
....@@ -2795,12 +2821,13 @@
27952821 return -EINVAL;
27962822 }
27972823
2798
-static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
2824
+static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
27992825 const struct ib_ud_wr *wr,
28002826 void *wqe, unsigned *mlx_seg_len)
28012827 {
2802
- struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
2803
- struct ib_device *ib_dev = &mdev->ib_dev;
2828
+ struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
2829
+ struct mlx4_ib_sqp *sqp = qp->sqp;
2830
+ struct ib_device *ib_dev = qp->ibqp.device;
28042831 struct mlx4_wqe_mlx_seg *mlx = wqe;
28052832 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
28062833 struct mlx4_ib_ah *ah = to_mah(wr->ah);
....@@ -2822,12 +2849,12 @@
28222849
28232850 /* for proxy-qp0 sends, need to add in size of tunnel header */
28242851 /* for tunnel-qp0 sends, tunnel header is already in s/g list */
2825
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2852
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
28262853 send_size += sizeof (struct mlx4_ib_tunnel_header);
28272854
28282855 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header);
28292856
2830
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2857
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
28312858 sqp->ud_header.lrh.service_level =
28322859 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
28332860 sqp->ud_header.lrh.destination_lid =
....@@ -2844,26 +2871,26 @@
28442871
28452872 sqp->ud_header.lrh.virtual_lane = 0;
28462873 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
2847
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
2874
+ err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
28482875 if (err)
28492876 return err;
28502877 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
2851
- if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2878
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
28522879 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
28532880 else
28542881 sqp->ud_header.bth.destination_qpn =
2855
- cpu_to_be32(mdev->dev->caps.spec_qps[sqp->qp.port - 1].qp0_tunnel);
2882
+ cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
28562883
28572884 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
28582885 if (mlx4_is_master(mdev->dev)) {
2859
- if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2886
+ if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
28602887 return -EINVAL;
28612888 } else {
2862
- if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey))
2889
+ if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
28632890 return -EINVAL;
28642891 }
28652892 sqp->ud_header.deth.qkey = cpu_to_be32(qkey);
2866
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn);
2893
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
28672894
28682895 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
28692896 sqp->ud_header.immediate_present = 0;
....@@ -2947,10 +2974,11 @@
29472974 }
29482975
29492976 #define MLX4_ROCEV2_QP1_SPORT 0xC000
2950
-static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
2977
+static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
29512978 void *wqe, unsigned *mlx_seg_len)
29522979 {
2953
- struct ib_device *ib_dev = sqp->qp.ibqp.device;
2980
+ struct mlx4_ib_sqp *sqp = qp->sqp;
2981
+ struct ib_device *ib_dev = qp->ibqp.device;
29542982 struct mlx4_ib_dev *ibdev = to_mdev(ib_dev);
29552983 struct mlx4_wqe_mlx_seg *mlx = wqe;
29562984 struct mlx4_wqe_ctrl_seg *ctrl = wqe;
....@@ -2974,7 +3002,7 @@
29743002 for (i = 0; i < wr->wr.num_sge; ++i)
29753003 send_size += wr->wr.sg_list[i].length;
29763004
2977
- is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
3005
+ is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
29783006 is_grh = mlx4_ib_ah_grh_present(ah);
29793007 if (is_eth) {
29803008 enum ib_gid_type gid_type;
....@@ -2988,9 +3016,9 @@
29883016 if (err)
29893017 return err;
29903018 } else {
2991
- err = fill_gid_by_hw_index(ibdev, sqp->qp.port,
2992
- ah->av.ib.gid_index,
2993
- &sgid, &gid_type);
3019
+ err = fill_gid_by_hw_index(ibdev, qp->port,
3020
+ ah->av.ib.gid_index, &sgid,
3021
+ &gid_type);
29943022 if (!err) {
29953023 is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
29963024 if (is_udp) {
....@@ -3006,7 +3034,7 @@
30063034 }
30073035 if (ah->av.eth.vlan != cpu_to_be16(0xffff)) {
30083036 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff;
3009
- is_vlan = 1;
3037
+ is_vlan = true;
30103038 }
30113039 }
30123040 err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh,
....@@ -3035,13 +3063,18 @@
30353063 * indexes don't necessarily match the hw ones, so
30363064 * we must use our own cache
30373065 */
3038
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
3039
- cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
3040
- demux[sqp->qp.port - 1].
3041
- subnet_prefix)));
3042
- sqp->ud_header.grh.source_gid.global.interface_id =
3043
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
3044
- guid_cache[ah->av.ib.gid_index];
3066
+ sqp->ud_header.grh.source_gid.global
3067
+ .subnet_prefix =
3068
+ cpu_to_be64(atomic64_read(
3069
+ &(to_mdev(ib_dev)
3070
+ ->sriov
3071
+ .demux[qp->port - 1]
3072
+ .subnet_prefix)));
3073
+ sqp->ud_header.grh.source_gid.global
3074
+ .interface_id =
3075
+ to_mdev(ib_dev)
3076
+ ->sriov.demux[qp->port - 1]
3077
+ .guid_cache[ah->av.ib.gid_index];
30453078 } else {
30463079 sqp->ud_header.grh.source_gid =
30473080 ah->ibah.sgid_attr->gid;
....@@ -3073,10 +3106,13 @@
30733106 mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
30743107
30753108 if (!is_eth) {
3076
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
3077
- (sqp->ud_header.lrh.destination_lid ==
3078
- IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) |
3079
- (sqp->ud_header.lrh.service_level << 8));
3109
+ mlx->flags |=
3110
+ cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
3111
+ (sqp->ud_header.lrh.destination_lid ==
3112
+ IB_LID_PERMISSIVE ?
3113
+ MLX4_WQE_MLX_SLR :
3114
+ 0) |
3115
+ (sqp->ud_header.lrh.service_level << 8));
30803116 if (ah->av.ib.port_pd & cpu_to_be32(0x80000000))
30813117 mlx->flags |= cpu_to_be32(0x1); /* force loopback */
30823118 mlx->rlid = sqp->ud_header.lrh.destination_lid;
....@@ -3122,21 +3158,23 @@
31223158 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
31233159 }
31243160 } else {
3125
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 :
3126
- sl_to_vl(to_mdev(ib_dev),
3127
- sqp->ud_header.lrh.service_level,
3128
- sqp->qp.port);
3129
- if (sqp->qp.ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
3161
+ sqp->ud_header.lrh.virtual_lane =
3162
+ !qp->ibqp.qp_num ?
3163
+ 15 :
3164
+ sl_to_vl(to_mdev(ib_dev),
3165
+ sqp->ud_header.lrh.service_level,
3166
+ qp->port);
3167
+ if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
31303168 return -EINVAL;
31313169 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
31323170 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
31333171 }
31343172 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
3135
- if (!sqp->qp.ibqp.qp_num)
3136
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
3173
+ if (!qp->ibqp.qp_num)
3174
+ err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
31373175 &pkey);
31383176 else
3139
- err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
3177
+ err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
31403178 &pkey);
31413179 if (err)
31423180 return err;
....@@ -3146,7 +3184,7 @@
31463184 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
31473185 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
31483186 sqp->qkey : wr->remote_qkey);
3149
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
3187
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
31503188
31513189 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
31523190
....@@ -3459,24 +3497,24 @@
34593497 int nreq;
34603498 int err = 0;
34613499 unsigned ind;
3462
- int uninitialized_var(size);
3463
- unsigned uninitialized_var(seglen);
3500
+ int size;
3501
+ unsigned seglen;
34643502 __be32 dummy;
34653503 __be32 *lso_wqe;
3466
- __be32 uninitialized_var(lso_hdr_sz);
3504
+ __be32 lso_hdr_sz;
34673505 __be32 blh;
34683506 int i;
34693507 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
34703508
34713509 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
3472
- struct mlx4_ib_sqp *sqp = to_msqp(qp);
3510
+ struct mlx4_ib_sqp *sqp = qp->sqp;
34733511
34743512 if (sqp->roce_v2_gsi) {
34753513 struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah);
34763514 enum ib_gid_type gid_type;
34773515 union ib_gid gid;
34783516
3479
- if (!fill_gid_by_hw_index(mdev, sqp->qp.port,
3517
+ if (!fill_gid_by_hw_index(mdev, qp->port,
34803518 ah->av.ib.gid_index,
34813519 &gid, &gid_type))
34823520 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
....@@ -3596,8 +3634,8 @@
35963634 break;
35973635
35983636 case MLX4_IB_QPT_TUN_SMI_OWNER:
3599
- err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3600
- ctrl, &seglen);
3637
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3638
+ &seglen);
36013639 if (unlikely(err)) {
36023640 *bad_wr = wr;
36033641 goto out;
....@@ -3633,8 +3671,8 @@
36333671 break;
36343672
36353673 case MLX4_IB_QPT_PROXY_SMI_OWNER:
3636
- err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
3637
- ctrl, &seglen);
3674
+ err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3675
+ &seglen);
36383676 if (unlikely(err)) {
36393677 *bad_wr = wr;
36403678 goto out;
....@@ -3667,8 +3705,7 @@
36673705
36683706 case MLX4_IB_QPT_SMI:
36693707 case MLX4_IB_QPT_GSI:
3670
- err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
3671
- &seglen);
3708
+ err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
36723709 if (unlikely(err)) {
36733710 *bad_wr = wr;
36743711 goto out;
....@@ -3753,12 +3790,6 @@
37533790
37543791 writel_relaxed(qp->doorbell_qpn,
37553792 to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
3756
-
3757
- /*
3758
- * Make sure doorbells don't leak out of SQ spinlock
3759
- * and reach the HCA out of order.
3760
- */
3761
- mmiowb();
37623793
37633794 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
37643795
....@@ -4054,13 +4085,13 @@
40544085 struct ib_wq_init_attr *init_attr,
40554086 struct ib_udata *udata)
40564087 {
4057
- struct mlx4_ib_dev *dev;
4058
- struct ib_qp_init_attr ib_qp_init_attr;
4088
+ struct mlx4_dev *dev = to_mdev(pd->device)->dev;
4089
+ struct ib_qp_init_attr ib_qp_init_attr = {};
40594090 struct mlx4_ib_qp *qp;
40604091 struct mlx4_ib_create_wq ucmd;
40614092 int err, required_cmd_sz;
40624093
4063
- if (!(udata && pd->uobject))
4094
+ if (!udata)
40644095 return ERR_PTR(-EINVAL);
40654096
40664097 required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
....@@ -4080,14 +4111,13 @@
40804111 if (udata->outlen)
40814112 return ERR_PTR(-EOPNOTSUPP);
40824113
4083
- dev = to_mdev(pd->device);
4084
-
40854114 if (init_attr->wq_type != IB_WQT_RQ) {
40864115 pr_debug("unsupported wq type %d\n", init_attr->wq_type);
40874116 return ERR_PTR(-EOPNOTSUPP);
40884117 }
40894118
4090
- if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS) {
4119
+ if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS ||
4120
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
40914121 pr_debug("unsupported create_flags %u\n",
40924122 init_attr->create_flags);
40934123 return ERR_PTR(-EOPNOTSUPP);
....@@ -4097,10 +4127,10 @@
40974127 if (!qp)
40984128 return ERR_PTR(-ENOMEM);
40994129
4130
+ mutex_init(&qp->mutex);
41004131 qp->pri.vid = 0xFFFF;
41014132 qp->alt.vid = 0xFFFF;
41024133
4103
- memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr));
41044134 ib_qp_init_attr.qp_context = init_attr->wq_context;
41054135 ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
41064136 ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
....@@ -4111,8 +4141,7 @@
41114141 if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS)
41124142 ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS;
41134143
4114
- err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr,
4115
- udata, 0, &qp);
4144
+ err = create_rq(pd, &ib_qp_init_attr, udata, qp);
41164145 if (err) {
41174146 kfree(qp);
41184147 return ERR_PTR(err);
....@@ -4137,7 +4166,8 @@
41374166 }
41384167 }
41394168
4140
-static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state)
4169
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state,
4170
+ struct ib_udata *udata)
41414171 {
41424172 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
41434173 enum ib_qp_state qp_cur_state;
....@@ -4161,7 +4191,8 @@
41614191 attr_mask = IB_QP_PORT;
41624192
41634193 err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
4164
- attr_mask, IB_QPS_RESET, IB_QPS_INIT);
4194
+ attr_mask, IB_QPS_RESET, IB_QPS_INIT,
4195
+ udata);
41654196 if (err) {
41664197 pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
41674198 ibwq->wq_num);
....@@ -4173,12 +4204,13 @@
41734204
41744205 attr_mask = 0;
41754206 err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
4176
- qp_cur_state, qp_new_state);
4207
+ qp_cur_state, qp_new_state, udata);
41774208
41784209 if (err && (qp_cur_state == IB_QPS_INIT)) {
41794210 qp_new_state = IB_QPS_RESET;
41804211 if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
4181
- attr_mask, IB_QPS_INIT, IB_QPS_RESET)) {
4212
+ attr_mask, IB_QPS_INIT, IB_QPS_RESET,
4213
+ udata)) {
41824214 pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
41834215 ibwq->wq_num);
41844216 qp_new_state = IB_QPS_INIT;
....@@ -4218,13 +4250,8 @@
42184250 if (wq_attr_mask & IB_WQ_FLAGS)
42194251 return -EOPNOTSUPP;
42204252
4221
- cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
4222
- ibwq->state;
4223
- new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
4224
-
4225
- if (cur_state < IB_WQS_RESET || cur_state > IB_WQS_ERR ||
4226
- new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
4227
- return -EINVAL;
4253
+ cur_state = wq_attr->curr_wq_state;
4254
+ new_state = wq_attr->wq_state;
42284255
42294256 if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
42304257 return -EINVAL;
....@@ -4241,7 +4268,7 @@
42414268 * WQ, so we can apply its port on the WQ.
42424269 */
42434270 if (qp->rss_usecnt)
4244
- err = _mlx4_ib_modify_wq(ibwq, new_state);
4271
+ err = _mlx4_ib_modify_wq(ibwq, new_state, udata);
42454272
42464273 if (!err)
42474274 ibwq->state = new_state;
....@@ -4251,7 +4278,7 @@
42514278 return err;
42524279 }
42534280
4254
-int mlx4_ib_destroy_wq(struct ib_wq *ibwq)
4281
+int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
42554282 {
42564283 struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
42574284 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
....@@ -4259,41 +4286,38 @@
42594286 if (qp->counter_index)
42604287 mlx4_ib_free_qp_counter(dev, qp);
42614288
4262
- destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, 1);
4289
+ destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
42634290
42644291 kfree(qp);
4265
-
42664292 return 0;
42674293 }
42684294
4269
-struct ib_rwq_ind_table
4270
-*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
4271
- struct ib_rwq_ind_table_init_attr *init_attr,
4272
- struct ib_udata *udata)
4295
+int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table,
4296
+ struct ib_rwq_ind_table_init_attr *init_attr,
4297
+ struct ib_udata *udata)
42734298 {
4274
- struct ib_rwq_ind_table *rwq_ind_table;
42754299 struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
42764300 unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
4301
+ struct ib_device *device = rwq_ind_table->device;
42774302 unsigned int base_wqn;
42784303 size_t min_resp_len;
4279
- int i;
4280
- int err;
4304
+ int i, err = 0;
42814305
42824306 if (udata->inlen > 0 &&
42834307 !ib_is_udata_cleared(udata, 0,
42844308 udata->inlen))
4285
- return ERR_PTR(-EOPNOTSUPP);
4309
+ return -EOPNOTSUPP;
42864310
42874311 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
42884312 if (udata->outlen && udata->outlen < min_resp_len)
4289
- return ERR_PTR(-EINVAL);
4313
+ return -EINVAL;
42904314
42914315 if (ind_tbl_size >
42924316 device->attrs.rss_caps.max_rwq_indirection_table_size) {
42934317 pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
42944318 ind_tbl_size,
42954319 device->attrs.rss_caps.max_rwq_indirection_table_size);
4296
- return ERR_PTR(-EINVAL);
4320
+ return -EINVAL;
42974321 }
42984322
42994323 base_wqn = init_attr->ind_tbl[0]->wq_num;
....@@ -4301,39 +4325,23 @@
43014325 if (base_wqn % ind_tbl_size) {
43024326 pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
43034327 base_wqn);
4304
- return ERR_PTR(-EINVAL);
4328
+ return -EINVAL;
43054329 }
43064330
43074331 for (i = 1; i < ind_tbl_size; i++) {
43084332 if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
43094333 pr_debug("indirection table's WQNs aren't consecutive\n");
4310
- return ERR_PTR(-EINVAL);
4334
+ return -EINVAL;
43114335 }
43124336 }
4313
-
4314
- rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
4315
- if (!rwq_ind_table)
4316
- return ERR_PTR(-ENOMEM);
43174337
43184338 if (udata->outlen) {
43194339 resp.response_length = offsetof(typeof(resp), response_length) +
43204340 sizeof(resp.response_length);
43214341 err = ib_copy_to_udata(udata, &resp, resp.response_length);
4322
- if (err)
4323
- goto err;
43244342 }
43254343
4326
- return rwq_ind_table;
4327
-
4328
-err:
4329
- kfree(rwq_ind_table);
4330
- return ERR_PTR(err);
4331
-}
4332
-
4333
-int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
4334
-{
4335
- kfree(ib_rwq_ind_tbl);
4336
- return 0;
4344
+ return err;
43374345 }
43384346
43394347 struct mlx4_ib_drain_cqe {