hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/qib/qib_verbs.c
....@@ -39,7 +39,6 @@
3939 #include <linux/utsname.h>
4040 #include <linux/rculist.h>
4141 #include <linux/mm.h>
42
-#include <linux/random.h>
4342 #include <linux/vmalloc.h>
4443 #include <rdma/rdma_vt.h>
4544
....@@ -131,27 +130,6 @@
131130 */
132131 __be64 ib_qib_sys_image_guid;
133132
134
-/**
135
- * qib_copy_sge - copy data to SGE memory
136
- * @ss: the SGE state
137
- * @data: the data to copy
138
- * @length: the length of the data
139
- */
140
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
141
-{
142
- struct rvt_sge *sge = &ss->sge;
143
-
144
- while (length) {
145
- u32 len = rvt_get_sge_length(sge, length);
146
-
147
- WARN_ON_ONCE(len == 0);
148
- memcpy(sge->vaddr, data, len);
149
- rvt_update_sge(ss, len, release);
150
- data += len;
151
- length -= len;
152
- }
153
-}
154
-
155133 /*
156134 * Count the number of DMA descriptors needed to send length bytes of data.
157135 * Don't modify the qib_sge_state to get the count.
....@@ -165,13 +143,8 @@
165143 u32 ndesc = 1; /* count the header */
166144
167145 while (length) {
168
- u32 len = sge.length;
146
+ u32 len = rvt_get_sge_length(&sge, length);
169147
170
- if (len > length)
171
- len = length;
172
- if (len > sge.sge_length)
173
- len = sge.sge_length;
174
- BUG_ON(len == 0);
175148 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
176149 (len != length && (len & (sizeof(u32) - 1)))) {
177150 ndesc = 0;
....@@ -208,13 +181,8 @@
208181 struct rvt_sge *sge = &ss->sge;
209182
210183 while (length) {
211
- u32 len = sge->length;
184
+ u32 len = rvt_get_sge_length(sge, length);
212185
213
- if (len > length)
214
- len = length;
215
- if (len > sge->sge_length)
216
- len = sge->sge_length;
217
- BUG_ON(len == 0);
218186 memcpy(data, sge->vaddr, len);
219187 sge->vaddr += len;
220188 sge->length -= len;
....@@ -269,7 +237,7 @@
269237 case IB_QPT_GSI:
270238 if (ib_qib_disable_sma)
271239 break;
272
- /* FALLTHROUGH */
240
+ fallthrough;
273241 case IB_QPT_UD:
274242 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
275243 break;
....@@ -465,14 +433,9 @@
465433 u32 last;
466434
467435 while (1) {
468
- u32 len = ss->sge.length;
436
+ u32 len = rvt_get_sge_length(&ss->sge, length);
469437 u32 off;
470438
471
- if (len > length)
472
- len = length;
473
- if (len > ss->sge.sge_length)
474
- len = ss->sge.sge_length;
475
- BUG_ON(len == 0);
476439 /* If the source address is not aligned, try to align it. */
477440 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
478441 if (off) {
....@@ -754,7 +717,7 @@
754717
755718 spin_lock(&qp->s_lock);
756719 if (tx->wqe)
757
- qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
720
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
758721 else if (qp->ibqp.qp_type == IB_QPT_RC) {
759722 struct ib_header *hdr;
760723
....@@ -1027,7 +990,7 @@
1027990 }
1028991 if (qp->s_wqe) {
1029992 spin_lock_irqsave(&qp->s_lock, flags);
1030
- qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
993
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1031994 spin_unlock_irqrestore(&qp->s_lock, flags);
1032995 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1033996 spin_lock_irqsave(&qp->s_lock, flags);
....@@ -1388,7 +1351,7 @@
13881351 rcu_read_lock();
13891352 qp0 = rcu_dereference(ibp->rvp.qp[0]);
13901353 if (qp0)
1391
- ah = rdma_create_ah(qp0->ibqp.pd, &attr);
1354
+ ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0);
13921355 rcu_read_unlock();
13931356 return ah;
13941357 }
....@@ -1497,7 +1460,6 @@
14971460 rdi->dparms.props.max_cq = ib_qib_max_cqs;
14981461 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
14991462 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1500
- rdi->dparms.props.max_map_per_fmr = 32767;
15011463 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
15021464 rdi->dparms.props.max_qp_init_rd_atom = 255;
15031465 rdi->dparms.props.max_srq = ib_qib_max_srqs;
....@@ -1512,7 +1474,19 @@
15121474 rdi->dparms.props.max_mcast_grp;
15131475 /* post send table */
15141476 dd->verbs_dev.rdi.post_parms = qib_post_parms;
1477
+
1478
+ /* opcode translation table */
1479
+ dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
15151480 }
1481
+
1482
+static const struct ib_device_ops qib_dev_ops = {
1483
+ .owner = THIS_MODULE,
1484
+ .driver_id = RDMA_DRIVER_QIB,
1485
+
1486
+ .init_port = qib_create_port_files,
1487
+ .modify_device = qib_modify_device,
1488
+ .process_mad = qib_process_mad,
1489
+};
15161490
15171491 /**
15181492 * qib_register_ib_device - register our device with the infiniband core
....@@ -1527,7 +1501,6 @@
15271501 unsigned i, ctxt;
15281502 int ret;
15291503
1530
- get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
15311504 for (i = 0; i < dd->num_pports; i++)
15321505 init_ibport(ppd + i);
15331506
....@@ -1572,12 +1545,9 @@
15721545 if (!ib_qib_sys_image_guid)
15731546 ib_qib_sys_image_guid = ppd->guid;
15741547
1575
- ibdev->owner = THIS_MODULE;
15761548 ibdev->node_guid = ppd->guid;
15771549 ibdev->phys_port_cnt = dd->num_pports;
15781550 ibdev->dev.parent = &dd->pcidev->dev;
1579
- ibdev->modify_device = qib_modify_device;
1580
- ibdev->process_mad = qib_process_mad;
15811551
15821552 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
15831553 "Intel Infiniband HCA %s", init_utsname()->nodename);
....@@ -1585,10 +1555,9 @@
15851555 /*
15861556 * Fill in rvt info object.
15871557 */
1588
- dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
15891558 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
15901559 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
1591
- dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
1560
+ dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
15921561 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
15931562 dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
15941563 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
....@@ -1631,6 +1600,7 @@
16311600 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
16321601 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
16331602 dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
1603
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
16341604
16351605 qib_fill_device_attr(dd);
16361606
....@@ -1642,19 +1612,15 @@
16421612 i,
16431613 dd->rcd[ctxt]->pkeys);
16441614 }
1615
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
16451616
1646
- ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_QIB);
1617
+ ib_set_device_ops(ibdev, &qib_dev_ops);
1618
+ ret = rvt_register_device(&dd->verbs_dev.rdi);
16471619 if (ret)
16481620 goto err_tx;
16491621
1650
- ret = qib_verbs_register_sysfs(dd);
1651
- if (ret)
1652
- goto err_class;
1653
-
16541622 return ret;
16551623
1656
-err_class:
1657
- rvt_unregister_device(&dd->verbs_dev.rdi);
16581624 err_tx:
16591625 while (!list_empty(&dev->txreq_free)) {
16601626 struct list_head *l = dev->txreq_free.next;
....@@ -1716,14 +1682,14 @@
17161682 * It is only used in post send, which doesn't hold
17171683 * the s_lock.
17181684 */
1719
-void _qib_schedule_send(struct rvt_qp *qp)
1685
+bool _qib_schedule_send(struct rvt_qp *qp)
17201686 {
17211687 struct qib_ibport *ibp =
17221688 to_iport(qp->ibqp.device, qp->port_num);
17231689 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
17241690 struct qib_qp_priv *priv = qp->priv;
17251691
1726
- queue_work(ppd->qib_wq, &priv->s_work);
1692
+ return queue_work(ppd->qib_wq, &priv->s_work);
17271693 }
17281694
17291695 /**
....@@ -1733,8 +1699,9 @@
17331699 * This schedules qp progress. The s_lock
17341700 * should be held.
17351701 */
1736
-void qib_schedule_send(struct rvt_qp *qp)
1702
+bool qib_schedule_send(struct rvt_qp *qp)
17371703 {
17381704 if (qib_send_ok(qp))
1739
- _qib_schedule_send(qp);
1705
+ return _qib_schedule_send(qp);
1706
+ return false;
17401707 }