| .. | .. |
|---|
| 42 | 42 | #include <rdma/ib_verbs.h> |
|---|
| 43 | 43 | #include <rdma/ib_cache.h> |
|---|
| 44 | 44 | #include <rdma/ib_pack.h> |
|---|
| 45 | +#include <rdma/uverbs_ioctl.h> |
|---|
| 45 | 46 | |
|---|
| 46 | 47 | #include "mthca_dev.h" |
|---|
| 47 | 48 | #include "mthca_cmd.h" |
|---|
| .. | .. |
|---|
| 114 | 115 | u8 hop_limit; |
|---|
| 115 | 116 | __be32 sl_tclass_flowlabel; |
|---|
| 116 | 117 | u8 rgid[16]; |
|---|
| 117 | | -} __attribute__((packed)); |
|---|
| 118 | +} __packed; |
|---|
| 118 | 119 | |
|---|
| 119 | 120 | struct mthca_qp_context { |
|---|
| 120 | 121 | __be32 flags; |
|---|
| .. | .. |
|---|
| 153 | 154 | __be16 rq_wqe_counter; /* reserved on Tavor */ |
|---|
| 154 | 155 | __be16 sq_wqe_counter; /* reserved on Tavor */ |
|---|
| 155 | 156 | u32 reserved3[18]; |
|---|
| 156 | | -} __attribute__((packed)); |
|---|
| 157 | +} __packed; |
|---|
| 157 | 158 | |
|---|
| 158 | 159 | struct mthca_qp_param { |
|---|
| 159 | 160 | __be32 opt_param_mask; |
|---|
| 160 | 161 | u32 reserved1; |
|---|
| 161 | 162 | struct mthca_qp_context context; |
|---|
| 162 | 163 | u32 reserved2[62]; |
|---|
| 163 | | -} __attribute__((packed)); |
|---|
| 164 | +} __packed; |
|---|
| 164 | 165 | |
|---|
| 165 | 166 | enum { |
|---|
| 166 | 167 | MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, |
|---|
| .. | .. |
|---|
| 554 | 555 | |
|---|
| 555 | 556 | static int __mthca_modify_qp(struct ib_qp *ibqp, |
|---|
| 556 | 557 | const struct ib_qp_attr *attr, int attr_mask, |
|---|
| 557 | | - enum ib_qp_state cur_state, enum ib_qp_state new_state) |
|---|
| 558 | + enum ib_qp_state cur_state, |
|---|
| 559 | + enum ib_qp_state new_state, |
|---|
| 560 | + struct ib_udata *udata) |
|---|
| 558 | 561 | { |
|---|
| 559 | 562 | struct mthca_dev *dev = to_mdev(ibqp->device); |
|---|
| 560 | 563 | struct mthca_qp *qp = to_mqp(ibqp); |
|---|
| 564 | + struct mthca_ucontext *context = rdma_udata_to_drv_context( |
|---|
| 565 | + udata, struct mthca_ucontext, ibucontext); |
|---|
| 561 | 566 | struct mthca_mailbox *mailbox; |
|---|
| 562 | 567 | struct mthca_qp_param *qp_param; |
|---|
| 563 | 568 | struct mthca_qp_context *qp_context; |
|---|
| .. | .. |
|---|
| 619 | 624 | /* leave arbel_sched_queue as 0 */ |
|---|
| 620 | 625 | |
|---|
| 621 | 626 | if (qp->ibqp.uobject) |
|---|
| 622 | | - qp_context->usr_page = |
|---|
| 623 | | - cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); |
|---|
| 627 | + qp_context->usr_page = cpu_to_be32(context->uar.index); |
|---|
| 624 | 628 | else |
|---|
| 625 | 629 | qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); |
|---|
| 626 | 630 | qp_context->local_qpn = cpu_to_be32(qp->qpn); |
|---|
| .. | .. |
|---|
| 805 | 809 | qp->alt_port = attr->alt_port_num; |
|---|
| 806 | 810 | |
|---|
| 807 | 811 | if (is_sqp(dev, qp)) |
|---|
| 808 | | - store_attrs(to_msqp(qp), attr, attr_mask); |
|---|
| 812 | + store_attrs(qp->sqp, attr, attr_mask); |
|---|
| 809 | 813 | |
|---|
| 810 | 814 | /* |
|---|
| 811 | 815 | * If we moved QP0 to RTR, bring the IB link up; if we moved |
|---|
| .. | .. |
|---|
| 872 | 876 | |
|---|
| 873 | 877 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; |
|---|
| 874 | 878 | |
|---|
| 875 | | - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, |
|---|
| 876 | | - IB_LINK_LAYER_UNSPECIFIED)) { |
|---|
| 879 | + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, |
|---|
| 880 | + attr_mask)) { |
|---|
| 877 | 881 | mthca_dbg(dev, "Bad QP transition (transport %d) " |
|---|
| 878 | 882 | "%d->%d with attr 0x%08x\n", |
|---|
| 879 | 883 | qp->transport, cur_state, new_state, |
|---|
| .. | .. |
|---|
| 913 | 917 | goto out; |
|---|
| 914 | 918 | } |
|---|
| 915 | 919 | |
|---|
| 916 | | - err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); |
|---|
| 920 | + err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state, |
|---|
| 921 | + udata); |
|---|
| 917 | 922 | |
|---|
| 918 | 923 | out: |
|---|
| 919 | 924 | mutex_unlock(&qp->mutex); |
|---|
| .. | .. |
|---|
| 981 | 986 | */ |
|---|
| 982 | 987 | static int mthca_alloc_wqe_buf(struct mthca_dev *dev, |
|---|
| 983 | 988 | struct mthca_pd *pd, |
|---|
| 984 | | - struct mthca_qp *qp) |
|---|
| 989 | + struct mthca_qp *qp, |
|---|
| 990 | + struct ib_udata *udata) |
|---|
| 985 | 991 | { |
|---|
| 986 | 992 | int size; |
|---|
| 987 | 993 | int err = -ENOMEM; |
|---|
| .. | .. |
|---|
| 1048 | 1054 | * allocate anything. All we need is to calculate the WQE |
|---|
| 1049 | 1055 | * sizes and the send_wqe_offset, so we're done now. |
|---|
| 1050 | 1056 | */ |
|---|
| 1051 | | - if (pd->ibpd.uobject) |
|---|
| 1057 | + if (udata) |
|---|
| 1052 | 1058 | return 0; |
|---|
| 1053 | 1059 | |
|---|
| 1054 | 1060 | size = PAGE_ALIGN(qp->send_wqe_offset + |
|---|
| .. | .. |
|---|
| 1155 | 1161 | struct mthca_cq *send_cq, |
|---|
| 1156 | 1162 | struct mthca_cq *recv_cq, |
|---|
| 1157 | 1163 | enum ib_sig_type send_policy, |
|---|
| 1158 | | - struct mthca_qp *qp) |
|---|
| 1164 | + struct mthca_qp *qp, |
|---|
| 1165 | + struct ib_udata *udata) |
|---|
| 1159 | 1166 | { |
|---|
| 1160 | 1167 | int ret; |
|---|
| 1161 | 1168 | int i; |
|---|
| .. | .. |
|---|
| 1178 | 1185 | if (ret) |
|---|
| 1179 | 1186 | return ret; |
|---|
| 1180 | 1187 | |
|---|
| 1181 | | - ret = mthca_alloc_wqe_buf(dev, pd, qp); |
|---|
| 1188 | + ret = mthca_alloc_wqe_buf(dev, pd, qp, udata); |
|---|
| 1182 | 1189 | if (ret) { |
|---|
| 1183 | 1190 | mthca_unmap_memfree(dev, qp); |
|---|
| 1184 | 1191 | return ret; |
|---|
| .. | .. |
|---|
| 1191 | 1198 | * will be allocated and buffers will be initialized in |
|---|
| 1192 | 1199 | * userspace. |
|---|
| 1193 | 1200 | */ |
|---|
| 1194 | | - if (pd->ibpd.uobject) |
|---|
| 1201 | + if (udata) |
|---|
| 1195 | 1202 | return 0; |
|---|
| 1196 | 1203 | |
|---|
| 1197 | 1204 | ret = mthca_alloc_memfree(dev, qp); |
|---|
| .. | .. |
|---|
| 1285 | 1292 | enum ib_qp_type type, |
|---|
| 1286 | 1293 | enum ib_sig_type send_policy, |
|---|
| 1287 | 1294 | struct ib_qp_cap *cap, |
|---|
| 1288 | | - struct mthca_qp *qp) |
|---|
| 1295 | + struct mthca_qp *qp, |
|---|
| 1296 | + struct ib_udata *udata) |
|---|
| 1289 | 1297 | { |
|---|
| 1290 | 1298 | int err; |
|---|
| 1291 | 1299 | |
|---|
| .. | .. |
|---|
| 1308 | 1316 | qp->port = 0; |
|---|
| 1309 | 1317 | |
|---|
| 1310 | 1318 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, |
|---|
| 1311 | | - send_policy, qp); |
|---|
| 1319 | + send_policy, qp, udata); |
|---|
| 1312 | 1320 | if (err) { |
|---|
| 1313 | 1321 | mthca_free(&dev->qp_table.alloc, qp->qpn); |
|---|
| 1314 | 1322 | return err; |
|---|
| .. | .. |
|---|
| 1360 | 1368 | struct ib_qp_cap *cap, |
|---|
| 1361 | 1369 | int qpn, |
|---|
| 1362 | 1370 | int port, |
|---|
| 1363 | | - struct mthca_sqp *sqp) |
|---|
| 1371 | + struct mthca_qp *qp, |
|---|
| 1372 | + struct ib_udata *udata) |
|---|
| 1364 | 1373 | { |
|---|
| 1365 | 1374 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; |
|---|
| 1366 | 1375 | int err; |
|---|
| 1367 | 1376 | |
|---|
| 1368 | | - sqp->qp.transport = MLX; |
|---|
| 1369 | | - err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); |
|---|
| 1377 | + qp->transport = MLX; |
|---|
| 1378 | + err = mthca_set_qp_size(dev, cap, pd, qp); |
|---|
| 1370 | 1379 | if (err) |
|---|
| 1371 | 1380 | return err; |
|---|
| 1372 | 1381 | |
|---|
| 1373 | | - sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; |
|---|
| 1374 | | - sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, |
|---|
| 1375 | | - &sqp->header_dma, GFP_KERNEL); |
|---|
| 1376 | | - if (!sqp->header_buf) |
|---|
| 1382 | + qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE; |
|---|
| 1383 | + qp->sqp->header_buf = |
|---|
| 1384 | + dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, |
|---|
| 1385 | + &qp->sqp->header_dma, GFP_KERNEL); |
|---|
| 1386 | + if (!qp->sqp->header_buf) |
|---|
| 1377 | 1387 | return -ENOMEM; |
|---|
| 1378 | 1388 | |
|---|
| 1379 | 1389 | spin_lock_irq(&dev->qp_table.lock); |
|---|
| 1380 | 1390 | if (mthca_array_get(&dev->qp_table.qp, mqpn)) |
|---|
| 1381 | 1391 | err = -EBUSY; |
|---|
| 1382 | 1392 | else |
|---|
| 1383 | | - mthca_array_set(&dev->qp_table.qp, mqpn, sqp); |
|---|
| 1393 | + mthca_array_set(&dev->qp_table.qp, mqpn, qp); |
|---|
| 1384 | 1394 | spin_unlock_irq(&dev->qp_table.lock); |
|---|
| 1385 | 1395 | |
|---|
| 1386 | 1396 | if (err) |
|---|
| 1387 | 1397 | goto err_out; |
|---|
| 1388 | 1398 | |
|---|
| 1389 | | - sqp->qp.port = port; |
|---|
| 1390 | | - sqp->qp.qpn = mqpn; |
|---|
| 1391 | | - sqp->qp.transport = MLX; |
|---|
| 1399 | + qp->port = port; |
|---|
| 1400 | + qp->qpn = mqpn; |
|---|
| 1401 | + qp->transport = MLX; |
|---|
| 1392 | 1402 | |
|---|
| 1393 | 1403 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, |
|---|
| 1394 | | - send_policy, &sqp->qp); |
|---|
| 1404 | + send_policy, qp, udata); |
|---|
| 1395 | 1405 | if (err) |
|---|
| 1396 | 1406 | goto err_out_free; |
|---|
| 1397 | 1407 | |
|---|
| .. | .. |
|---|
| 1412 | 1422 | |
|---|
| 1413 | 1423 | mthca_unlock_cqs(send_cq, recv_cq); |
|---|
| 1414 | 1424 | |
|---|
| 1415 | | - err_out: |
|---|
| 1416 | | - dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, |
|---|
| 1417 | | - sqp->header_buf, sqp->header_dma); |
|---|
| 1418 | | - |
|---|
| 1425 | +err_out: |
|---|
| 1426 | + dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, |
|---|
| 1427 | + qp->sqp->header_buf, qp->sqp->header_dma); |
|---|
| 1419 | 1428 | return err; |
|---|
| 1420 | 1429 | } |
|---|
| 1421 | 1430 | |
|---|
| .. | .. |
|---|
| 1478 | 1487 | |
|---|
| 1479 | 1488 | if (is_sqp(dev, qp)) { |
|---|
| 1480 | 1489 | atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); |
|---|
| 1481 | | - dma_free_coherent(&dev->pdev->dev, |
|---|
| 1482 | | - to_msqp(qp)->header_buf_size, |
|---|
| 1483 | | - to_msqp(qp)->header_buf, |
|---|
| 1484 | | - to_msqp(qp)->header_dma); |
|---|
| 1490 | + dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, |
|---|
| 1491 | + qp->sqp->header_buf, qp->sqp->header_dma); |
|---|
| 1485 | 1492 | } else |
|---|
| 1486 | 1493 | mthca_free(&dev->qp_table.alloc, qp->qpn); |
|---|
| 1487 | 1494 | } |
|---|
| 1488 | 1495 | |
|---|
| 1489 | 1496 | /* Create UD header for an MLX send and build a data segment for it */ |
|---|
| 1490 | | -static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, |
|---|
| 1491 | | - int ind, const struct ib_ud_wr *wr, |
|---|
| 1497 | +static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind, |
|---|
| 1498 | + const struct ib_ud_wr *wr, |
|---|
| 1492 | 1499 | struct mthca_mlx_seg *mlx, |
|---|
| 1493 | 1500 | struct mthca_data_seg *data) |
|---|
| 1494 | 1501 | { |
|---|
| 1502 | + struct mthca_sqp *sqp = qp->sqp; |
|---|
| 1495 | 1503 | int header_size; |
|---|
| 1496 | 1504 | int err; |
|---|
| 1497 | 1505 | u16 pkey; |
|---|
| .. | .. |
|---|
| 1504 | 1512 | if (err) |
|---|
| 1505 | 1513 | return err; |
|---|
| 1506 | 1514 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); |
|---|
| 1507 | | - mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | |
|---|
| 1515 | + mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | |
|---|
| 1508 | 1516 | (sqp->ud_header.lrh.destination_lid == |
|---|
| 1509 | 1517 | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | |
|---|
| 1510 | 1518 | (sqp->ud_header.lrh.service_level << 8)); |
|---|
| .. | .. |
|---|
| 1525 | 1533 | return -EINVAL; |
|---|
| 1526 | 1534 | } |
|---|
| 1527 | 1535 | |
|---|
| 1528 | | - sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; |
|---|
| 1536 | + sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0; |
|---|
| 1529 | 1537 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
|---|
| 1530 | 1538 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; |
|---|
| 1531 | 1539 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
|---|
| 1532 | | - if (!sqp->qp.ibqp.qp_num) |
|---|
| 1533 | | - ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
|---|
| 1534 | | - sqp->pkey_index, &pkey); |
|---|
| 1540 | + if (!qp->ibqp.qp_num) |
|---|
| 1541 | + ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index, |
|---|
| 1542 | + &pkey); |
|---|
| 1535 | 1543 | else |
|---|
| 1536 | | - ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, |
|---|
| 1537 | | - wr->pkey_index, &pkey); |
|---|
| 1544 | + ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, |
|---|
| 1545 | + &pkey); |
|---|
| 1538 | 1546 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
|---|
| 1539 | 1547 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
|---|
| 1540 | 1548 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
|---|
| 1541 | 1549 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? |
|---|
| 1542 | 1550 | sqp->qkey : wr->remote_qkey); |
|---|
| 1543 | | - sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); |
|---|
| 1551 | + sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); |
|---|
| 1544 | 1552 | |
|---|
| 1545 | 1553 | header_size = ib_ud_header_pack(&sqp->ud_header, |
|---|
| 1546 | 1554 | sqp->header_buf + |
|---|
| 1547 | 1555 | ind * MTHCA_UD_HEADER_SIZE); |
|---|
| 1548 | 1556 | |
|---|
| 1549 | 1557 | data->byte_count = cpu_to_be32(header_size); |
|---|
| 1550 | | - data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); |
|---|
| 1558 | + data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey); |
|---|
| 1551 | 1559 | data->addr = cpu_to_be64(sqp->header_dma + |
|---|
| 1552 | 1560 | ind * MTHCA_UD_HEADER_SIZE); |
|---|
| 1553 | 1561 | |
|---|
| .. | .. |
|---|
| 1630 | 1638 | * without initializing f0 and size0, and they are in fact |
|---|
| 1631 | 1639 | * never used uninitialized. |
|---|
| 1632 | 1640 | */ |
|---|
| 1633 | | - int uninitialized_var(size0); |
|---|
| 1634 | | - u32 uninitialized_var(f0); |
|---|
| 1641 | + int size0; |
|---|
| 1642 | + u32 f0; |
|---|
| 1635 | 1643 | int ind; |
|---|
| 1636 | 1644 | u8 op0 = 0; |
|---|
| 1637 | 1645 | |
|---|
| .. | .. |
|---|
| 1726 | 1734 | break; |
|---|
| 1727 | 1735 | |
|---|
| 1728 | 1736 | case MLX: |
|---|
| 1729 | | - err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), |
|---|
| 1730 | | - wqe - sizeof (struct mthca_next_seg), |
|---|
| 1731 | | - wqe); |
|---|
| 1737 | + err = build_mlx_header( |
|---|
| 1738 | + dev, qp, ind, ud_wr(wr), |
|---|
| 1739 | + wqe - sizeof(struct mthca_next_seg), wqe); |
|---|
| 1732 | 1740 | if (err) { |
|---|
| 1733 | 1741 | *bad_wr = wr; |
|---|
| 1734 | 1742 | goto out; |
|---|
| .. | .. |
|---|
| 1800 | 1808 | (qp->qpn << 8) | size0, |
|---|
| 1801 | 1809 | dev->kar + MTHCA_SEND_DOORBELL, |
|---|
| 1802 | 1810 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
|---|
| 1803 | | - /* |
|---|
| 1804 | | - * Make sure doorbells don't leak out of SQ spinlock |
|---|
| 1805 | | - * and reach the HCA out of order: |
|---|
| 1806 | | - */ |
|---|
| 1807 | | - mmiowb(); |
|---|
| 1808 | 1811 | } |
|---|
| 1809 | 1812 | |
|---|
| 1810 | 1813 | qp->sq.next_ind = ind; |
|---|
| .. | .. |
|---|
| 1831 | 1834 | * without initializing size0, and it is in fact never used |
|---|
| 1832 | 1835 | * uninitialized. |
|---|
| 1833 | 1836 | */ |
|---|
| 1834 | | - int uninitialized_var(size0); |
|---|
| 1837 | + int size0; |
|---|
| 1835 | 1838 | int ind; |
|---|
| 1836 | 1839 | void *wqe; |
|---|
| 1837 | 1840 | void *prev_wqe; |
|---|
| .. | .. |
|---|
| 1915 | 1918 | qp->rq.next_ind = ind; |
|---|
| 1916 | 1919 | qp->rq.head += nreq; |
|---|
| 1917 | 1920 | |
|---|
| 1918 | | - /* |
|---|
| 1919 | | - * Make sure doorbells don't leak out of RQ spinlock and reach |
|---|
| 1920 | | - * the HCA out of order: |
|---|
| 1921 | | - */ |
|---|
| 1922 | | - mmiowb(); |
|---|
| 1923 | | - |
|---|
| 1924 | 1921 | spin_unlock_irqrestore(&qp->rq.lock, flags); |
|---|
| 1925 | 1922 | return err; |
|---|
| 1926 | 1923 | } |
|---|
| .. | .. |
|---|
| 1945 | 1942 | * without initializing f0 and size0, and they are in fact |
|---|
| 1946 | 1943 | * never used uninitialized. |
|---|
| 1947 | 1944 | */ |
|---|
| 1948 | | - int uninitialized_var(size0); |
|---|
| 1949 | | - u32 uninitialized_var(f0); |
|---|
| 1945 | + int size0; |
|---|
| 1946 | + u32 f0; |
|---|
| 1950 | 1947 | int ind; |
|---|
| 1951 | 1948 | u8 op0 = 0; |
|---|
| 1952 | 1949 | |
|---|
| .. | .. |
|---|
| 2067 | 2064 | break; |
|---|
| 2068 | 2065 | |
|---|
| 2069 | 2066 | case MLX: |
|---|
| 2070 | | - err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr), |
|---|
| 2071 | | - wqe - sizeof (struct mthca_next_seg), |
|---|
| 2072 | | - wqe); |
|---|
| 2067 | + err = build_mlx_header( |
|---|
| 2068 | + dev, qp, ind, ud_wr(wr), |
|---|
| 2069 | + wqe - sizeof(struct mthca_next_seg), wqe); |
|---|
| 2073 | 2070 | if (err) { |
|---|
| 2074 | 2071 | *bad_wr = wr; |
|---|
| 2075 | 2072 | goto out; |
|---|
| .. | .. |
|---|
| 2154 | 2151 | mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, |
|---|
| 2155 | 2152 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
|---|
| 2156 | 2153 | } |
|---|
| 2157 | | - |
|---|
| 2158 | | - /* |
|---|
| 2159 | | - * Make sure doorbells don't leak out of SQ spinlock and reach |
|---|
| 2160 | | - * the HCA out of order: |
|---|
| 2161 | | - */ |
|---|
| 2162 | | - mmiowb(); |
|---|
| 2163 | 2154 | |
|---|
| 2164 | 2155 | spin_unlock_irqrestore(&qp->sq.lock, flags); |
|---|
| 2165 | 2156 | return err; |
|---|