forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/infiniband/hw/mthca/mthca_qp.c
....@@ -42,6 +42,7 @@
4242 #include <rdma/ib_verbs.h>
4343 #include <rdma/ib_cache.h>
4444 #include <rdma/ib_pack.h>
45
+#include <rdma/uverbs_ioctl.h>
4546
4647 #include "mthca_dev.h"
4748 #include "mthca_cmd.h"
....@@ -114,7 +115,7 @@
114115 u8 hop_limit;
115116 __be32 sl_tclass_flowlabel;
116117 u8 rgid[16];
117
-} __attribute__((packed));
118
+} __packed;
118119
119120 struct mthca_qp_context {
120121 __be32 flags;
....@@ -153,14 +154,14 @@
153154 __be16 rq_wqe_counter; /* reserved on Tavor */
154155 __be16 sq_wqe_counter; /* reserved on Tavor */
155156 u32 reserved3[18];
156
-} __attribute__((packed));
157
+} __packed;
157158
158159 struct mthca_qp_param {
159160 __be32 opt_param_mask;
160161 u32 reserved1;
161162 struct mthca_qp_context context;
162163 u32 reserved2[62];
163
-} __attribute__((packed));
164
+} __packed;
164165
165166 enum {
166167 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
....@@ -554,10 +555,14 @@
554555
555556 static int __mthca_modify_qp(struct ib_qp *ibqp,
556557 const struct ib_qp_attr *attr, int attr_mask,
557
- enum ib_qp_state cur_state, enum ib_qp_state new_state)
558
+ enum ib_qp_state cur_state,
559
+ enum ib_qp_state new_state,
560
+ struct ib_udata *udata)
558561 {
559562 struct mthca_dev *dev = to_mdev(ibqp->device);
560563 struct mthca_qp *qp = to_mqp(ibqp);
564
+ struct mthca_ucontext *context = rdma_udata_to_drv_context(
565
+ udata, struct mthca_ucontext, ibucontext);
561566 struct mthca_mailbox *mailbox;
562567 struct mthca_qp_param *qp_param;
563568 struct mthca_qp_context *qp_context;
....@@ -619,8 +624,7 @@
619624 /* leave arbel_sched_queue as 0 */
620625
621626 if (qp->ibqp.uobject)
622
- qp_context->usr_page =
623
- cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
627
+ qp_context->usr_page = cpu_to_be32(context->uar.index);
624628 else
625629 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
626630 qp_context->local_qpn = cpu_to_be32(qp->qpn);
....@@ -805,7 +809,7 @@
805809 qp->alt_port = attr->alt_port_num;
806810
807811 if (is_sqp(dev, qp))
808
- store_attrs(to_msqp(qp), attr, attr_mask);
812
+ store_attrs(qp->sqp, attr, attr_mask);
809813
810814 /*
811815 * If we moved QP0 to RTR, bring the IB link up; if we moved
....@@ -872,8 +876,8 @@
872876
873877 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
874878
875
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
876
- IB_LINK_LAYER_UNSPECIFIED)) {
879
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
880
+ attr_mask)) {
877881 mthca_dbg(dev, "Bad QP transition (transport %d) "
878882 "%d->%d with attr 0x%08x\n",
879883 qp->transport, cur_state, new_state,
....@@ -913,7 +917,8 @@
913917 goto out;
914918 }
915919
916
- err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
920
+ err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state,
921
+ udata);
917922
918923 out:
919924 mutex_unlock(&qp->mutex);
....@@ -981,7 +986,8 @@
981986 */
982987 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
983988 struct mthca_pd *pd,
984
- struct mthca_qp *qp)
989
+ struct mthca_qp *qp,
990
+ struct ib_udata *udata)
985991 {
986992 int size;
987993 int err = -ENOMEM;
....@@ -1048,7 +1054,7 @@
10481054 * allocate anything. All we need is to calculate the WQE
10491055 * sizes and the send_wqe_offset, so we're done now.
10501056 */
1051
- if (pd->ibpd.uobject)
1057
+ if (udata)
10521058 return 0;
10531059
10541060 size = PAGE_ALIGN(qp->send_wqe_offset +
....@@ -1155,7 +1161,8 @@
11551161 struct mthca_cq *send_cq,
11561162 struct mthca_cq *recv_cq,
11571163 enum ib_sig_type send_policy,
1158
- struct mthca_qp *qp)
1164
+ struct mthca_qp *qp,
1165
+ struct ib_udata *udata)
11591166 {
11601167 int ret;
11611168 int i;
....@@ -1178,7 +1185,7 @@
11781185 if (ret)
11791186 return ret;
11801187
1181
- ret = mthca_alloc_wqe_buf(dev, pd, qp);
1188
+ ret = mthca_alloc_wqe_buf(dev, pd, qp, udata);
11821189 if (ret) {
11831190 mthca_unmap_memfree(dev, qp);
11841191 return ret;
....@@ -1191,7 +1198,7 @@
11911198 * will be allocated and buffers will be initialized in
11921199 * userspace.
11931200 */
1194
- if (pd->ibpd.uobject)
1201
+ if (udata)
11951202 return 0;
11961203
11971204 ret = mthca_alloc_memfree(dev, qp);
....@@ -1285,7 +1292,8 @@
12851292 enum ib_qp_type type,
12861293 enum ib_sig_type send_policy,
12871294 struct ib_qp_cap *cap,
1288
- struct mthca_qp *qp)
1295
+ struct mthca_qp *qp,
1296
+ struct ib_udata *udata)
12891297 {
12901298 int err;
12911299
....@@ -1308,7 +1316,7 @@
13081316 qp->port = 0;
13091317
13101318 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1311
- send_policy, qp);
1319
+ send_policy, qp, udata);
13121320 if (err) {
13131321 mthca_free(&dev->qp_table.alloc, qp->qpn);
13141322 return err;
....@@ -1360,38 +1368,40 @@
13601368 struct ib_qp_cap *cap,
13611369 int qpn,
13621370 int port,
1363
- struct mthca_sqp *sqp)
1371
+ struct mthca_qp *qp,
1372
+ struct ib_udata *udata)
13641373 {
13651374 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
13661375 int err;
13671376
1368
- sqp->qp.transport = MLX;
1369
- err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1377
+ qp->transport = MLX;
1378
+ err = mthca_set_qp_size(dev, cap, pd, qp);
13701379 if (err)
13711380 return err;
13721381
1373
- sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1374
- sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1375
- &sqp->header_dma, GFP_KERNEL);
1376
- if (!sqp->header_buf)
1382
+ qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
1383
+ qp->sqp->header_buf =
1384
+ dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1385
+ &qp->sqp->header_dma, GFP_KERNEL);
1386
+ if (!qp->sqp->header_buf)
13771387 return -ENOMEM;
13781388
13791389 spin_lock_irq(&dev->qp_table.lock);
13801390 if (mthca_array_get(&dev->qp_table.qp, mqpn))
13811391 err = -EBUSY;
13821392 else
1383
- mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1393
+ mthca_array_set(&dev->qp_table.qp, mqpn, qp);
13841394 spin_unlock_irq(&dev->qp_table.lock);
13851395
13861396 if (err)
13871397 goto err_out;
13881398
1389
- sqp->qp.port = port;
1390
- sqp->qp.qpn = mqpn;
1391
- sqp->qp.transport = MLX;
1399
+ qp->port = port;
1400
+ qp->qpn = mqpn;
1401
+ qp->transport = MLX;
13921402
13931403 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1394
- send_policy, &sqp->qp);
1404
+ send_policy, qp, udata);
13951405 if (err)
13961406 goto err_out_free;
13971407
....@@ -1412,10 +1422,9 @@
14121422
14131423 mthca_unlock_cqs(send_cq, recv_cq);
14141424
1415
- err_out:
1416
- dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1417
- sqp->header_buf, sqp->header_dma);
1418
-
1425
+err_out:
1426
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1427
+ qp->sqp->header_buf, qp->sqp->header_dma);
14191428 return err;
14201429 }
14211430
....@@ -1478,20 +1487,19 @@
14781487
14791488 if (is_sqp(dev, qp)) {
14801489 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1481
- dma_free_coherent(&dev->pdev->dev,
1482
- to_msqp(qp)->header_buf_size,
1483
- to_msqp(qp)->header_buf,
1484
- to_msqp(qp)->header_dma);
1490
+ dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1491
+ qp->sqp->header_buf, qp->sqp->header_dma);
14851492 } else
14861493 mthca_free(&dev->qp_table.alloc, qp->qpn);
14871494 }
14881495
14891496 /* Create UD header for an MLX send and build a data segment for it */
1490
-static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1491
- int ind, const struct ib_ud_wr *wr,
1497
+static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
1498
+ const struct ib_ud_wr *wr,
14921499 struct mthca_mlx_seg *mlx,
14931500 struct mthca_data_seg *data)
14941501 {
1502
+ struct mthca_sqp *sqp = qp->sqp;
14951503 int header_size;
14961504 int err;
14971505 u16 pkey;
....@@ -1504,7 +1512,7 @@
15041512 if (err)
15051513 return err;
15061514 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1507
- mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1515
+ mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
15081516 (sqp->ud_header.lrh.destination_lid ==
15091517 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
15101518 (sqp->ud_header.lrh.service_level << 8));
....@@ -1525,29 +1533,29 @@
15251533 return -EINVAL;
15261534 }
15271535
1528
- sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1536
+ sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0;
15291537 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
15301538 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
15311539 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1532
- if (!sqp->qp.ibqp.qp_num)
1533
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1534
- sqp->pkey_index, &pkey);
1540
+ if (!qp->ibqp.qp_num)
1541
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
1542
+ &pkey);
15351543 else
1536
- ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1537
- wr->pkey_index, &pkey);
1544
+ ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
1545
+ &pkey);
15381546 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
15391547 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
15401548 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
15411549 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
15421550 sqp->qkey : wr->remote_qkey);
1543
- sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1551
+ sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
15441552
15451553 header_size = ib_ud_header_pack(&sqp->ud_header,
15461554 sqp->header_buf +
15471555 ind * MTHCA_UD_HEADER_SIZE);
15481556
15491557 data->byte_count = cpu_to_be32(header_size);
1550
- data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1558
+ data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
15511559 data->addr = cpu_to_be64(sqp->header_dma +
15521560 ind * MTHCA_UD_HEADER_SIZE);
15531561
....@@ -1630,8 +1638,8 @@
16301638 * without initializing f0 and size0, and they are in fact
16311639 * never used uninitialized.
16321640 */
1633
- int uninitialized_var(size0);
1634
- u32 uninitialized_var(f0);
1641
+ int size0;
1642
+ u32 f0;
16351643 int ind;
16361644 u8 op0 = 0;
16371645
....@@ -1726,9 +1734,9 @@
17261734 break;
17271735
17281736 case MLX:
1729
- err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
1730
- wqe - sizeof (struct mthca_next_seg),
1731
- wqe);
1737
+ err = build_mlx_header(
1738
+ dev, qp, ind, ud_wr(wr),
1739
+ wqe - sizeof(struct mthca_next_seg), wqe);
17321740 if (err) {
17331741 *bad_wr = wr;
17341742 goto out;
....@@ -1800,11 +1808,6 @@
18001808 (qp->qpn << 8) | size0,
18011809 dev->kar + MTHCA_SEND_DOORBELL,
18021810 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1803
- /*
1804
- * Make sure doorbells don't leak out of SQ spinlock
1805
- * and reach the HCA out of order:
1806
- */
1807
- mmiowb();
18081811 }
18091812
18101813 qp->sq.next_ind = ind;
....@@ -1831,7 +1834,7 @@
18311834 * without initializing size0, and it is in fact never used
18321835 * uninitialized.
18331836 */
1834
- int uninitialized_var(size0);
1837
+ int size0;
18351838 int ind;
18361839 void *wqe;
18371840 void *prev_wqe;
....@@ -1915,12 +1918,6 @@
19151918 qp->rq.next_ind = ind;
19161919 qp->rq.head += nreq;
19171920
1918
- /*
1919
- * Make sure doorbells don't leak out of RQ spinlock and reach
1920
- * the HCA out of order:
1921
- */
1922
- mmiowb();
1923
-
19241921 spin_unlock_irqrestore(&qp->rq.lock, flags);
19251922 return err;
19261923 }
....@@ -1945,8 +1942,8 @@
19451942 * without initializing f0 and size0, and they are in fact
19461943 * never used uninitialized.
19471944 */
1948
- int uninitialized_var(size0);
1949
- u32 uninitialized_var(f0);
1945
+ int size0;
1946
+ u32 f0;
19501947 int ind;
19511948 u8 op0 = 0;
19521949
....@@ -2067,9 +2064,9 @@
20672064 break;
20682065
20692066 case MLX:
2070
- err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
2071
- wqe - sizeof (struct mthca_next_seg),
2072
- wqe);
2067
+ err = build_mlx_header(
2068
+ dev, qp, ind, ud_wr(wr),
2069
+ wqe - sizeof(struct mthca_next_seg), wqe);
20732070 if (err) {
20742071 *bad_wr = wr;
20752072 goto out;
....@@ -2154,12 +2151,6 @@
21542151 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
21552152 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
21562153 }
2157
-
2158
- /*
2159
- * Make sure doorbells don't leak out of SQ spinlock and reach
2160
- * the HCA out of order:
2161
- */
2162
- mmiowb();
21632154
21642155 spin_unlock_irqrestore(&qp->sq.lock, flags);
21652156 return err;