.. | .. |
---|
39 | 39 | #include <linux/utsname.h> |
---|
40 | 40 | #include <linux/rculist.h> |
---|
41 | 41 | #include <linux/mm.h> |
---|
42 | | -#include <linux/random.h> |
---|
43 | 42 | #include <linux/vmalloc.h> |
---|
44 | 43 | #include <rdma/rdma_vt.h> |
---|
45 | 44 | |
---|
.. | .. |
---|
131 | 130 | */ |
---|
132 | 131 | __be64 ib_qib_sys_image_guid; |
---|
133 | 132 | |
---|
134 | | -/** |
---|
135 | | - * qib_copy_sge - copy data to SGE memory |
---|
136 | | - * @ss: the SGE state |
---|
137 | | - * @data: the data to copy |
---|
138 | | - * @length: the length of the data |
---|
139 | | - */ |
---|
140 | | -void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release) |
---|
141 | | -{ |
---|
142 | | - struct rvt_sge *sge = &ss->sge; |
---|
143 | | - |
---|
144 | | - while (length) { |
---|
145 | | - u32 len = rvt_get_sge_length(sge, length); |
---|
146 | | - |
---|
147 | | - WARN_ON_ONCE(len == 0); |
---|
148 | | - memcpy(sge->vaddr, data, len); |
---|
149 | | - rvt_update_sge(ss, len, release); |
---|
150 | | - data += len; |
---|
151 | | - length -= len; |
---|
152 | | - } |
---|
153 | | -} |
---|
154 | | - |
---|
155 | 133 | /* |
---|
156 | 134 | * Count the number of DMA descriptors needed to send length bytes of data. |
---|
157 | 135 | * Don't modify the qib_sge_state to get the count. |
---|
.. | .. |
---|
165 | 143 | u32 ndesc = 1; /* count the header */ |
---|
166 | 144 | |
---|
167 | 145 | while (length) { |
---|
168 | | - u32 len = sge.length; |
---|
| 146 | + u32 len = rvt_get_sge_length(&sge, length); |
---|
169 | 147 | |
---|
170 | | - if (len > length) |
---|
171 | | - len = length; |
---|
172 | | - if (len > sge.sge_length) |
---|
173 | | - len = sge.sge_length; |
---|
174 | | - BUG_ON(len == 0); |
---|
175 | 148 | if (((long) sge.vaddr & (sizeof(u32) - 1)) || |
---|
176 | 149 | (len != length && (len & (sizeof(u32) - 1)))) { |
---|
177 | 150 | ndesc = 0; |
---|
.. | .. |
---|
208 | 181 | struct rvt_sge *sge = &ss->sge; |
---|
209 | 182 | |
---|
210 | 183 | while (length) { |
---|
211 | | - u32 len = sge->length; |
---|
| 184 | + u32 len = rvt_get_sge_length(sge, length); |
---|
212 | 185 | |
---|
213 | | - if (len > length) |
---|
214 | | - len = length; |
---|
215 | | - if (len > sge->sge_length) |
---|
216 | | - len = sge->sge_length; |
---|
217 | | - BUG_ON(len == 0); |
---|
218 | 186 | memcpy(data, sge->vaddr, len); |
---|
219 | 187 | sge->vaddr += len; |
---|
220 | 188 | sge->length -= len; |
---|
.. | .. |
---|
269 | 237 | case IB_QPT_GSI: |
---|
270 | 238 | if (ib_qib_disable_sma) |
---|
271 | 239 | break; |
---|
272 | | - /* FALLTHROUGH */ |
---|
| 240 | + fallthrough; |
---|
273 | 241 | case IB_QPT_UD: |
---|
274 | 242 | qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); |
---|
275 | 243 | break; |
---|
.. | .. |
---|
465 | 433 | u32 last; |
---|
466 | 434 | |
---|
467 | 435 | while (1) { |
---|
468 | | - u32 len = ss->sge.length; |
---|
| 436 | + u32 len = rvt_get_sge_length(&ss->sge, length); |
---|
469 | 437 | u32 off; |
---|
470 | 438 | |
---|
471 | | - if (len > length) |
---|
472 | | - len = length; |
---|
473 | | - if (len > ss->sge.sge_length) |
---|
474 | | - len = ss->sge.sge_length; |
---|
475 | | - BUG_ON(len == 0); |
---|
476 | 439 | /* If the source address is not aligned, try to align it. */ |
---|
477 | 440 | off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); |
---|
478 | 441 | if (off) { |
---|
.. | .. |
---|
754 | 717 | |
---|
755 | 718 | spin_lock(&qp->s_lock); |
---|
756 | 719 | if (tx->wqe) |
---|
757 | | - qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); |
---|
| 720 | + rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); |
---|
758 | 721 | else if (qp->ibqp.qp_type == IB_QPT_RC) { |
---|
759 | 722 | struct ib_header *hdr; |
---|
760 | 723 | |
---|
.. | .. |
---|
1027 | 990 | } |
---|
1028 | 991 | if (qp->s_wqe) { |
---|
1029 | 992 | spin_lock_irqsave(&qp->s_lock, flags); |
---|
1030 | | - qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); |
---|
| 993 | + rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); |
---|
1031 | 994 | spin_unlock_irqrestore(&qp->s_lock, flags); |
---|
1032 | 995 | } else if (qp->ibqp.qp_type == IB_QPT_RC) { |
---|
1033 | 996 | spin_lock_irqsave(&qp->s_lock, flags); |
---|
.. | .. |
---|
1388 | 1351 | rcu_read_lock(); |
---|
1389 | 1352 | qp0 = rcu_dereference(ibp->rvp.qp[0]); |
---|
1390 | 1353 | if (qp0) |
---|
1391 | | - ah = rdma_create_ah(qp0->ibqp.pd, &attr); |
---|
| 1354 | + ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0); |
---|
1392 | 1355 | rcu_read_unlock(); |
---|
1393 | 1356 | return ah; |
---|
1394 | 1357 | } |
---|
.. | .. |
---|
1497 | 1460 | rdi->dparms.props.max_cq = ib_qib_max_cqs; |
---|
1498 | 1461 | rdi->dparms.props.max_cqe = ib_qib_max_cqes; |
---|
1499 | 1462 | rdi->dparms.props.max_ah = ib_qib_max_ahs; |
---|
1500 | | - rdi->dparms.props.max_map_per_fmr = 32767; |
---|
1501 | 1463 | rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; |
---|
1502 | 1464 | rdi->dparms.props.max_qp_init_rd_atom = 255; |
---|
1503 | 1465 | rdi->dparms.props.max_srq = ib_qib_max_srqs; |
---|
.. | .. |
---|
1512 | 1474 | rdi->dparms.props.max_mcast_grp; |
---|
1513 | 1475 | /* post send table */ |
---|
1514 | 1476 | dd->verbs_dev.rdi.post_parms = qib_post_parms; |
---|
| 1477 | + |
---|
| 1478 | + /* opcode translation table */ |
---|
| 1479 | + dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode; |
---|
1515 | 1480 | } |
---|
| 1481 | + |
---|
| 1482 | +static const struct ib_device_ops qib_dev_ops = { |
---|
| 1483 | + .owner = THIS_MODULE, |
---|
| 1484 | + .driver_id = RDMA_DRIVER_QIB, |
---|
| 1485 | + |
---|
| 1486 | + .init_port = qib_create_port_files, |
---|
| 1487 | + .modify_device = qib_modify_device, |
---|
| 1488 | + .process_mad = qib_process_mad, |
---|
| 1489 | +}; |
---|
1516 | 1490 | |
---|
1517 | 1491 | /** |
---|
1518 | 1492 | * qib_register_ib_device - register our device with the infiniband core |
---|
.. | .. |
---|
1527 | 1501 | unsigned i, ctxt; |
---|
1528 | 1502 | int ret; |
---|
1529 | 1503 | |
---|
1530 | | - get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); |
---|
1531 | 1504 | for (i = 0; i < dd->num_pports; i++) |
---|
1532 | 1505 | init_ibport(ppd + i); |
---|
1533 | 1506 | |
---|
.. | .. |
---|
1572 | 1545 | if (!ib_qib_sys_image_guid) |
---|
1573 | 1546 | ib_qib_sys_image_guid = ppd->guid; |
---|
1574 | 1547 | |
---|
1575 | | - ibdev->owner = THIS_MODULE; |
---|
1576 | 1548 | ibdev->node_guid = ppd->guid; |
---|
1577 | 1549 | ibdev->phys_port_cnt = dd->num_pports; |
---|
1578 | 1550 | ibdev->dev.parent = &dd->pcidev->dev; |
---|
1579 | | - ibdev->modify_device = qib_modify_device; |
---|
1580 | | - ibdev->process_mad = qib_process_mad; |
---|
1581 | 1551 | |
---|
1582 | 1552 | snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), |
---|
1583 | 1553 | "Intel Infiniband HCA %s", init_utsname()->nodename); |
---|
.. | .. |
---|
1585 | 1555 | /* |
---|
1586 | 1556 | * Fill in rvt info object. |
---|
1587 | 1557 | */ |
---|
1588 | | - dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files; |
---|
1589 | 1558 | dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev; |
---|
1590 | 1559 | dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah; |
---|
1591 | | - dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe; |
---|
| 1560 | + dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe; |
---|
1592 | 1561 | dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah; |
---|
1593 | 1562 | dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn; |
---|
1594 | 1563 | dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc; |
---|
.. | .. |
---|
1631 | 1600 | dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id; |
---|
1632 | 1601 | dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB; |
---|
1633 | 1602 | dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE; |
---|
| 1603 | + dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY; |
---|
1634 | 1604 | |
---|
1635 | 1605 | qib_fill_device_attr(dd); |
---|
1636 | 1606 | |
---|
.. | .. |
---|
1642 | 1612 | i, |
---|
1643 | 1613 | dd->rcd[ctxt]->pkeys); |
---|
1644 | 1614 | } |
---|
| 1615 | + rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group); |
---|
1645 | 1616 | |
---|
1646 | | - ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_QIB); |
---|
| 1617 | + ib_set_device_ops(ibdev, &qib_dev_ops); |
---|
| 1618 | + ret = rvt_register_device(&dd->verbs_dev.rdi); |
---|
1647 | 1619 | if (ret) |
---|
1648 | 1620 | goto err_tx; |
---|
1649 | 1621 | |
---|
1650 | | - ret = qib_verbs_register_sysfs(dd); |
---|
1651 | | - if (ret) |
---|
1652 | | - goto err_class; |
---|
1653 | | - |
---|
1654 | 1622 | return ret; |
---|
1655 | 1623 | |
---|
1656 | | -err_class: |
---|
1657 | | - rvt_unregister_device(&dd->verbs_dev.rdi); |
---|
1658 | 1624 | err_tx: |
---|
1659 | 1625 | while (!list_empty(&dev->txreq_free)) { |
---|
1660 | 1626 | struct list_head *l = dev->txreq_free.next; |
---|
.. | .. |
---|
1716 | 1682 | * It is only used in post send, which doesn't hold |
---|
1717 | 1683 | * the s_lock. |
---|
1718 | 1684 | */ |
---|
1719 | | -void _qib_schedule_send(struct rvt_qp *qp) |
---|
| 1685 | +bool _qib_schedule_send(struct rvt_qp *qp) |
---|
1720 | 1686 | { |
---|
1721 | 1687 | struct qib_ibport *ibp = |
---|
1722 | 1688 | to_iport(qp->ibqp.device, qp->port_num); |
---|
1723 | 1689 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); |
---|
1724 | 1690 | struct qib_qp_priv *priv = qp->priv; |
---|
1725 | 1691 | |
---|
1726 | | - queue_work(ppd->qib_wq, &priv->s_work); |
---|
| 1692 | + return queue_work(ppd->qib_wq, &priv->s_work); |
---|
1727 | 1693 | } |
---|
1728 | 1694 | |
---|
1729 | 1695 | /** |
---|
.. | .. |
---|
1733 | 1699 | * This schedules qp progress. The s_lock |
---|
1734 | 1700 | * should be held. |
---|
1735 | 1701 | */ |
---|
1736 | | -void qib_schedule_send(struct rvt_qp *qp) |
---|
| 1702 | +bool qib_schedule_send(struct rvt_qp *qp) |
---|
1737 | 1703 | { |
---|
1738 | 1704 | if (qib_send_ok(qp)) |
---|
1739 | | - _qib_schedule_send(qp); |
---|
| 1705 | + return _qib_schedule_send(qp); |
---|
| 1706 | + return false; |
---|
1740 | 1707 | } |
---|