hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/infiniband/hw/i40iw/i40iw_verbs.c
....@@ -45,6 +45,7 @@
4545 #include <rdma/iw_cm.h>
4646 #include <rdma/ib_user_verbs.h>
4747 #include <rdma/ib_umem.h>
48
+#include <rdma/uverbs_ioctl.h>
4849 #include "i40iw.h"
4950
5051 /**
....@@ -63,7 +64,8 @@
6364 return -EINVAL;
6465 memset(props, 0, sizeof(*props));
6566 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
66
- props->fw_ver = I40IW_FW_VERSION;
67
+ props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 |
68
+ i40iw_fw_minor_ver(&iwdev->sc_dev);
6769 props->device_cap_flags = iwdev->device_cap_flags;
6870 props->vendor_id = iwdev->ldev->pcidev->vendor;
6971 props->vendor_part_id = iwdev->ldev->pcidev->device;
....@@ -81,7 +83,6 @@
8183 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
8284 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
8385 props->atomic_cap = IB_ATOMIC_NONE;
84
- props->max_map_per_fmr = 1;
8586 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
8687 return 0;
8788 }
....@@ -96,22 +97,10 @@
9697 u8 port,
9798 struct ib_port_attr *props)
9899 {
99
- struct i40iw_device *iwdev = to_iwdev(ibdev);
100
- struct net_device *netdev = iwdev->netdev;
101
-
102
- /* props being zeroed by the caller, avoid zeroing it here */
103
- props->max_mtu = IB_MTU_4096;
104
- props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
105
-
106100 props->lid = 1;
107
- if (netif_carrier_ok(iwdev->netdev))
108
- props->state = IB_PORT_ACTIVE;
109
- else
110
- props->state = IB_PORT_DOWN;
111101 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
112102 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
113103 props->gid_tbl_len = 1;
114
- props->pkey_tbl_len = 1;
115104 props->active_width = IB_WIDTH_4X;
116105 props->active_speed = 1;
117106 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
....@@ -120,78 +109,55 @@
120109
121110 /**
122111 * i40iw_alloc_ucontext - Allocate the user context data structure
123
- * @ibdev: device pointer from stack
112
+ * @uctx: Uverbs context pointer from stack
124113 * @udata: user data
125114 *
126115 * This keeps track of all objects associated with a particular
127116 * user-mode client.
128117 */
129
-static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
130
- struct ib_udata *udata)
118
+static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
119
+ struct ib_udata *udata)
131120 {
121
+ struct ib_device *ibdev = uctx->device;
132122 struct i40iw_device *iwdev = to_iwdev(ibdev);
133123 struct i40iw_alloc_ucontext_req req;
134
- struct i40iw_alloc_ucontext_resp uresp;
135
- struct i40iw_ucontext *ucontext;
124
+ struct i40iw_alloc_ucontext_resp uresp = {};
125
+ struct i40iw_ucontext *ucontext = to_ucontext(uctx);
136126
137127 if (ib_copy_from_udata(&req, udata, sizeof(req)))
138
- return ERR_PTR(-EINVAL);
128
+ return -EINVAL;
139129
140130 if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
141131 i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
142
- return ERR_PTR(-EINVAL);
132
+ return -EINVAL;
143133 }
144134
145
- memset(&uresp, 0, sizeof(uresp));
146135 uresp.max_qps = iwdev->max_qp;
147136 uresp.max_pds = iwdev->max_pd;
148137 uresp.wq_size = iwdev->max_qp_wr * 2;
149138 uresp.kernel_ver = req.userspace_ver;
150139
151
- ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
152
- if (!ucontext)
153
- return ERR_PTR(-ENOMEM);
154
-
155140 ucontext->iwdev = iwdev;
156141 ucontext->abi_ver = req.userspace_ver;
157142
158
- if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
159
- kfree(ucontext);
160
- return ERR_PTR(-EFAULT);
161
- }
143
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
144
+ return -EFAULT;
162145
163146 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
164147 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
165148 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
166149 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
167150
168
- return &ucontext->ibucontext;
151
+ return 0;
169152 }
170153
171154 /**
172155 * i40iw_dealloc_ucontext - deallocate the user context data structure
173156 * @context: user context created during alloc
174157 */
175
-static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
158
+static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
176159 {
177
- struct i40iw_ucontext *ucontext = to_ucontext(context);
178
- unsigned long flags;
179
-
180
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
181
- if (!list_empty(&ucontext->cq_reg_mem_list)) {
182
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
183
- return -EBUSY;
184
- }
185
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
186
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
187
- if (!list_empty(&ucontext->qp_reg_mem_list)) {
188
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
189
- return -EBUSY;
190
- }
191
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
192
-
193
- kfree(ucontext);
194
- return 0;
160
+ return;
195161 }
196162
197163 /**
....@@ -209,11 +175,8 @@
209175
210176 dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
211177
212
- if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
213
- pgprot_noncached(vma->vm_page_prot)))
214
- return -EAGAIN;
215
-
216
- return 0;
178
+ return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
179
+ pgprot_noncached(vma->vm_page_prot), NULL);
217180 }
218181
219182 /**
....@@ -290,43 +253,34 @@
290253
291254 /**
292255 * i40iw_alloc_pd - allocate protection domain
293
- * @ibdev: device pointer from stack
294
- * @context: user context created during alloc
256
+ * @pd: PD pointer
295257 * @udata: user data
296258 */
297
-static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
298
- struct ib_ucontext *context,
299
- struct ib_udata *udata)
259
+static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
300260 {
301
- struct i40iw_pd *iwpd;
302
- struct i40iw_device *iwdev = to_iwdev(ibdev);
261
+ struct i40iw_pd *iwpd = to_iwpd(pd);
262
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
303263 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
304264 struct i40iw_alloc_pd_resp uresp;
305265 struct i40iw_sc_pd *sc_pd;
306
- struct i40iw_ucontext *ucontext;
307266 u32 pd_id = 0;
308267 int err;
309268
310269 if (iwdev->closing)
311
- return ERR_PTR(-ENODEV);
270
+ return -ENODEV;
312271
313272 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
314273 iwdev->max_pd, &pd_id, &iwdev->next_pd);
315274 if (err) {
316275 i40iw_pr_err("alloc resource failed\n");
317
- return ERR_PTR(err);
318
- }
319
-
320
- iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
321
- if (!iwpd) {
322
- err = -ENOMEM;
323
- goto free_res;
276
+ return err;
324277 }
325278
326279 sc_pd = &iwpd->sc_pd;
327280
328
- if (context) {
329
- ucontext = to_ucontext(context);
281
+ if (udata) {
282
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
283
+ udata, struct i40iw_ucontext, ibucontext);
330284 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
331285 memset(&uresp, 0, sizeof(uresp));
332286 uresp.pd_id = pd_id;
....@@ -339,19 +293,19 @@
339293 }
340294
341295 i40iw_add_pdusecount(iwpd);
342
- return &iwpd->ibpd;
296
+ return 0;
297
+
343298 error:
344
- kfree(iwpd);
345
-free_res:
346299 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
347
- return ERR_PTR(err);
300
+ return err;
348301 }
349302
350303 /**
351304 * i40iw_dealloc_pd - deallocate pd
352305 * @ibpd: ptr of pd to be deallocated
306
+ * @udata: user data or null for kernel object
353307 */
354
-static int i40iw_dealloc_pd(struct ib_pd *ibpd)
308
+static int i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
355309 {
356310 struct i40iw_pd *iwpd = to_iwpd(ibpd);
357311 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
....@@ -387,11 +341,11 @@
387341 * @iwqp: qp ptr (user or kernel)
388342 * @qp_num: qp number assigned
389343 */
390
-void i40iw_free_qp_resources(struct i40iw_device *iwdev,
391
- struct i40iw_qp *iwqp,
392
- u32 qp_num)
344
+void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
393345 {
394346 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
347
+ struct i40iw_device *iwdev = iwqp->iwdev;
348
+ u32 qp_num = iwqp->ibqp.qp_num;
395349
396350 i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
397351 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
....@@ -403,7 +357,7 @@
403357 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
404358 kfree(iwqp->kqp.wrid_mem);
405359 iwqp->kqp.wrid_mem = NULL;
406
- kfree(iwqp->allocated_buffer);
360
+ kfree(iwqp);
407361 }
408362
409363 /**
....@@ -422,9 +376,13 @@
422376 * i40iw_destroy_qp - destroy qp
423377 * @ibqp: qp's ib pointer also to get to device's qp address
424378 */
425
-static int i40iw_destroy_qp(struct ib_qp *ibqp)
379
+static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
426380 {
427381 struct i40iw_qp *iwqp = to_iwqp(ibqp);
382
+ struct ib_qp_attr attr;
383
+ struct i40iw_device *iwdev = iwqp->iwdev;
384
+
385
+ memset(&attr, 0, sizeof(attr));
428386
429387 iwqp->destroyed = 1;
430388
....@@ -439,7 +397,15 @@
439397 }
440398 }
441399
442
- i40iw_rem_ref(&iwqp->ibqp);
400
+ attr.qp_state = IB_QPS_ERR;
401
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
402
+ i40iw_qp_rem_ref(&iwqp->ibqp);
403
+ wait_for_completion(&iwqp->free_qp);
404
+ i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
405
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
406
+ i40iw_free_qp_resources(iwqp);
407
+ i40iw_rem_devusecount(iwdev);
408
+
443409 return 0;
444410 }
445411
....@@ -543,11 +509,11 @@
543509 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
544510 struct i40iw_cqp *iwcqp = &iwdev->cqp;
545511 struct i40iw_qp *iwqp;
546
- struct i40iw_ucontext *ucontext;
512
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
513
+ udata, struct i40iw_ucontext, ibucontext);
547514 struct i40iw_create_qp_req req;
548515 struct i40iw_create_qp_resp uresp;
549516 u32 qp_num = 0;
550
- void *mem;
551517 enum i40iw_status_code ret;
552518 int err_code;
553519 int sq_size;
....@@ -589,16 +555,15 @@
589555 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
590556 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
591557
592
- mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
593
- if (!mem)
558
+ iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
559
+ if (!iwqp)
594560 return ERR_PTR(-ENOMEM);
595561
596
- iwqp = (struct i40iw_qp *)mem;
597
- iwqp->allocated_buffer = mem;
598562 qp = &iwqp->sc_qp;
599563 qp->back_qp = (void *)iwqp;
600564 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
601565
566
+ iwqp->iwdev = iwdev;
602567 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
603568
604569 if (i40iw_allocate_dma_mem(dev->hw,
....@@ -623,7 +588,6 @@
623588 goto error;
624589 }
625590
626
- iwqp->iwdev = iwdev;
627591 iwqp->iwpd = iwpd;
628592 iwqp->ibqp.qp_num = qp_num;
629593 qp = &iwqp->sc_qp;
....@@ -639,7 +603,7 @@
639603 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
640604
641605 if (init_attr->qp_type != IB_QPT_RC) {
642
- err_code = -EINVAL;
606
+ err_code = -EOPNOTSUPP;
643607 goto error;
644608 }
645609 if (iwdev->push_mode)
....@@ -651,28 +615,25 @@
651615 goto error;
652616 }
653617 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
654
- if (ibpd->uobject && ibpd->uobject->context) {
655
- iwqp->user_mode = 1;
656
- ucontext = to_ucontext(ibpd->uobject->context);
618
+ iwqp->user_mode = 1;
657619
658
- if (req.user_wqe_buffers) {
659
- struct i40iw_pbl *iwpbl;
620
+ if (req.user_wqe_buffers) {
621
+ struct i40iw_pbl *iwpbl;
660622
661
- spin_lock_irqsave(
662
- &ucontext->qp_reg_mem_list_lock, flags);
663
- iwpbl = i40iw_get_pbl(
664
- (unsigned long)req.user_wqe_buffers,
665
- &ucontext->qp_reg_mem_list);
666
- spin_unlock_irqrestore(
667
- &ucontext->qp_reg_mem_list_lock, flags);
623
+ spin_lock_irqsave(
624
+ &ucontext->qp_reg_mem_list_lock, flags);
625
+ iwpbl = i40iw_get_pbl(
626
+ (unsigned long)req.user_wqe_buffers,
627
+ &ucontext->qp_reg_mem_list);
628
+ spin_unlock_irqrestore(
629
+ &ucontext->qp_reg_mem_list_lock, flags);
668630
669
- if (!iwpbl) {
670
- err_code = -ENODATA;
671
- i40iw_pr_err("no pbl info\n");
672
- goto error;
673
- }
674
- memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
631
+ if (!iwpbl) {
632
+ err_code = -ENODATA;
633
+ i40iw_pr_err("no pbl info\n");
634
+ goto error;
675635 }
636
+ memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
676637 }
677638 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
678639 } else {
....@@ -740,13 +701,13 @@
740701 goto error;
741702 }
742703
743
- i40iw_add_ref(&iwqp->ibqp);
704
+ refcount_set(&iwqp->refcount, 1);
744705 spin_lock_init(&iwqp->lock);
745706 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
746707 iwdev->qp_table[qp_num] = iwqp;
747708 i40iw_add_pdusecount(iwqp->iwpd);
748709 i40iw_add_devusecount(iwdev);
749
- if (ibpd->uobject && udata) {
710
+ if (udata) {
750711 memset(&uresp, 0, sizeof(uresp));
751712 uresp.actual_sq_size = sq_size;
752713 uresp.actual_rq_size = rq_size;
....@@ -755,17 +716,18 @@
755716 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
756717 if (err_code) {
757718 i40iw_pr_err("copy_to_udata failed\n");
758
- i40iw_destroy_qp(&iwqp->ibqp);
759
- /* let the completion of the qp destroy free the qp */
719
+ i40iw_destroy_qp(&iwqp->ibqp, udata);
720
+ /* let the completion of the qp destroy free the qp */
760721 return ERR_PTR(err_code);
761722 }
762723 }
763724 init_completion(&iwqp->sq_drained);
764725 init_completion(&iwqp->rq_drained);
726
+ init_completion(&iwqp->free_qp);
765727
766728 return &iwqp->ibqp;
767729 error:
768
- i40iw_free_qp_resources(iwdev, iwqp, qp_num);
730
+ i40iw_free_qp_resources(iwqp);
769731 return ERR_PTR(err_code);
770732 }
771733
....@@ -836,7 +798,7 @@
836798 case I40IW_QP_STATE_RTS:
837799 if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
838800 i40iw_send_reset(iwqp->cm_node);
839
- /* fall through */
801
+ fallthrough;
840802 case I40IW_QP_STATE_IDLE:
841803 case I40IW_QP_STATE_TERMINATE:
842804 case I40IW_QP_STATE_CLOSING:
....@@ -1076,47 +1038,41 @@
10761038 /**
10771039 * i40iw_destroy_cq - destroy cq
10781040 * @ib_cq: cq pointer
1041
+ * @udata: user data or NULL for kernel object
10791042 */
1080
-static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1043
+static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
10811044 {
10821045 struct i40iw_cq *iwcq;
10831046 struct i40iw_device *iwdev;
10841047 struct i40iw_sc_cq *cq;
1085
-
1086
- if (!ib_cq) {
1087
- i40iw_pr_err("ib_cq == NULL\n");
1088
- return 0;
1089
- }
10901048
10911049 iwcq = to_iwcq(ib_cq);
10921050 iwdev = to_iwdev(ib_cq->device);
10931051 cq = &iwcq->sc_cq;
10941052 i40iw_cq_wq_destroy(iwdev, cq);
10951053 cq_free_resources(iwdev, iwcq);
1096
- kfree(iwcq);
10971054 i40iw_rem_devusecount(iwdev);
10981055 return 0;
10991056 }
11001057
11011058 /**
11021059 * i40iw_create_cq - create cq
1103
- * @ibdev: device pointer from stack
1060
+ * @ibcq: CQ allocated
11041061 * @attr: attributes for cq
1105
- * @context: user context created during alloc
11061062 * @udata: user data
11071063 */
1108
-static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1109
- const struct ib_cq_init_attr *attr,
1110
- struct ib_ucontext *context,
1111
- struct ib_udata *udata)
1064
+static int i40iw_create_cq(struct ib_cq *ibcq,
1065
+ const struct ib_cq_init_attr *attr,
1066
+ struct ib_udata *udata)
11121067 {
1068
+ struct ib_device *ibdev = ibcq->device;
11131069 struct i40iw_device *iwdev = to_iwdev(ibdev);
1114
- struct i40iw_cq *iwcq;
1070
+ struct i40iw_cq *iwcq = to_iwcq(ibcq);
11151071 struct i40iw_pbl *iwpbl;
11161072 u32 cq_num = 0;
11171073 struct i40iw_sc_cq *cq;
11181074 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1119
- struct i40iw_cq_init_info info;
1075
+ struct i40iw_cq_init_info info = {};
11201076 enum i40iw_status_code status;
11211077 struct i40iw_cqp_request *cqp_request;
11221078 struct cqp_commands_info *cqp_info;
....@@ -1126,22 +1082,16 @@
11261082 int entries = attr->cqe;
11271083
11281084 if (iwdev->closing)
1129
- return ERR_PTR(-ENODEV);
1085
+ return -ENODEV;
11301086
11311087 if (entries > iwdev->max_cqe)
1132
- return ERR_PTR(-EINVAL);
1133
-
1134
- iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1135
- if (!iwcq)
1136
- return ERR_PTR(-ENOMEM);
1137
-
1138
- memset(&info, 0, sizeof(info));
1088
+ return -EINVAL;
11391089
11401090 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
11411091 iwdev->max_cq, &cq_num,
11421092 &iwdev->next_cq);
11431093 if (err_code)
1144
- goto error;
1094
+ return err_code;
11451095
11461096 cq = &iwcq->sc_cq;
11471097 cq->back_cq = (void *)iwcq;
....@@ -1157,14 +1107,14 @@
11571107 info.ceq_id_valid = true;
11581108 info.ceqe_mask = 1;
11591109 info.type = I40IW_CQ_TYPE_IWARP;
1160
- if (context) {
1161
- struct i40iw_ucontext *ucontext;
1110
+ if (udata) {
1111
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1112
+ udata, struct i40iw_ucontext, ibucontext);
11621113 struct i40iw_create_cq_req req;
11631114 struct i40iw_cq_mr *cqmr;
11641115
11651116 memset(&req, 0, sizeof(req));
11661117 iwcq->user_mode = true;
1167
- ucontext = to_ucontext(context);
11681118 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
11691119 err_code = -EFAULT;
11701120 goto cq_free_resources;
....@@ -1234,7 +1184,7 @@
12341184 goto cq_free_resources;
12351185 }
12361186
1237
- if (context) {
1187
+ if (udata) {
12381188 struct i40iw_create_cq_resp resp;
12391189
12401190 memset(&resp, 0, sizeof(resp));
....@@ -1248,15 +1198,13 @@
12481198 }
12491199
12501200 i40iw_add_devusecount(iwdev);
1251
- return (struct ib_cq *)iwcq;
1201
+ return 0;
12521202
12531203 cq_destroy:
12541204 i40iw_cq_wq_destroy(iwdev, cq);
12551205 cq_free_resources:
12561206 cq_free_resources(iwdev, iwcq);
1257
-error:
1258
- kfree(iwcq);
1259
- return ERR_PTR(err_code);
1207
+ return err_code;
12601208 }
12611209
12621210 /**
....@@ -1351,55 +1299,20 @@
13511299 {
13521300 struct ib_umem *region = iwmr->region;
13531301 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1354
- int chunk_pages, entry, i;
13551302 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
13561303 struct i40iw_pble_info *pinfo;
1357
- struct scatterlist *sg;
1358
- u64 pg_addr = 0;
1304
+ struct ib_block_iter biter;
13591305 u32 idx = 0;
13601306
13611307 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
13621308
1363
- for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1364
- chunk_pages = sg_dma_len(sg) >> region->page_shift;
1365
- if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1366
- !iwpbl->qp_mr.sq_page)
1367
- iwpbl->qp_mr.sq_page = sg_page(sg);
1368
- for (i = 0; i < chunk_pages; i++) {
1369
- pg_addr = sg_dma_address(sg) +
1370
- (i << region->page_shift);
1309
+ if (iwmr->type == IW_MEMREG_TYPE_QP)
1310
+ iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
13711311
1372
- if ((entry + i) == 0)
1373
- *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
1374
- else if (!(pg_addr & ~iwmr->page_msk))
1375
- *pbl = cpu_to_le64(pg_addr);
1376
- else
1377
- continue;
1378
- pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1379
- }
1312
+ rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
1313
+ *pbl = rdma_block_iter_dma_address(&biter);
1314
+ pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
13801315 }
1381
-}
1382
-
1383
-/**
1384
- * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
1385
- * @addr: virtual address
1386
- * @iwmr: mr pointer for this memory registration
1387
- */
1388
-static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
1389
-{
1390
- struct vm_area_struct *vma;
1391
- struct hstate *h;
1392
-
1393
- down_read(&current->mm->mmap_sem);
1394
- vma = find_vma(current->mm, addr);
1395
- if (vma && is_vm_hugetlb_page(vma)) {
1396
- h = hstate_vma(vma);
1397
- if (huge_page_size(h) == 0x200000) {
1398
- iwmr->page_size = huge_page_size(h);
1399
- iwmr->page_msk = huge_page_mask(h);
1400
- }
1401
- }
1402
- up_read(&current->mm->mmap_sem);
14031316 }
14041317
14051318 /**
....@@ -1618,8 +1531,7 @@
16181531 * @mr_type: memory for stag registrion
16191532 * @max_num_sg: man number of pages
16201533 */
1621
-static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1622
- enum ib_mr_type mr_type,
1534
+static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
16231535 u32 max_num_sg)
16241536 {
16251537 struct i40iw_pd *iwpd = to_iwpd(pd);
....@@ -1813,28 +1725,29 @@
18131725 {
18141726 struct i40iw_pd *iwpd = to_iwpd(pd);
18151727 struct i40iw_device *iwdev = to_iwdev(pd->device);
1816
- struct i40iw_ucontext *ucontext;
1728
+ struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1729
+ udata, struct i40iw_ucontext, ibucontext);
18171730 struct i40iw_pble_alloc *palloc;
18181731 struct i40iw_pbl *iwpbl;
18191732 struct i40iw_mr *iwmr;
18201733 struct ib_umem *region;
18211734 struct i40iw_mem_reg_req req;
1822
- u64 pbl_depth = 0;
18231735 u32 stag = 0;
18241736 u16 access;
1825
- u64 region_length;
18261737 bool use_pbles = false;
18271738 unsigned long flags;
18281739 int err = -ENOSYS;
18291740 int ret;
1830
- int pg_shift;
1741
+
1742
+ if (!udata)
1743
+ return ERR_PTR(-EOPNOTSUPP);
18311744
18321745 if (iwdev->closing)
18331746 return ERR_PTR(-ENODEV);
18341747
18351748 if (length > I40IW_MAX_MR_SIZE)
18361749 return ERR_PTR(-EINVAL);
1837
- region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1750
+ region = ib_umem_get(pd->device, start, length, acc);
18381751 if (IS_ERR(region))
18391752 return (struct ib_mr *)region;
18401753
....@@ -1854,25 +1767,18 @@
18541767 iwmr->region = region;
18551768 iwmr->ibmr.pd = pd;
18561769 iwmr->ibmr.device = pd->device;
1857
- ucontext = to_ucontext(pd->uobject->context);
18581770
18591771 iwmr->page_size = PAGE_SIZE;
1860
- iwmr->page_msk = PAGE_MASK;
1861
-
1862
- if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
1863
- i40iw_set_hugetlb_values(start, iwmr);
1864
-
1865
- region_length = region->length + (start & (iwmr->page_size - 1));
1866
- pg_shift = ffs(iwmr->page_size) - 1;
1867
- pbl_depth = region_length >> pg_shift;
1868
- pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
1772
+ if (req.reg_type == IW_MEMREG_TYPE_MEM)
1773
+ iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
1774
+ virt);
18691775 iwmr->length = region->length;
18701776
18711777 iwpbl->user_base = virt;
18721778 palloc = &iwpbl->pble_alloc;
18731779
18741780 iwmr->type = req.reg_type;
1875
- iwmr->page_cnt = (u32)pbl_depth;
1781
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
18761782
18771783 switch (req.reg_type) {
18781784 case IW_MEMREG_TYPE_QP:
....@@ -2054,7 +1960,7 @@
20541960 * i40iw_dereg_mr - deregister mr
20551961 * @ib_mr: mr ptr for dereg
20561962 */
2057
-static int i40iw_dereg_mr(struct ib_mr *ib_mr)
1963
+static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
20581964 {
20591965 struct ib_pd *ibpd = ib_mr->pd;
20601966 struct i40iw_pd *iwpd = to_iwpd(ibpd);
....@@ -2068,14 +1974,17 @@
20681974 struct cqp_commands_info *cqp_info;
20691975 u32 stag_idx;
20701976
2071
- if (iwmr->region)
2072
- ib_umem_release(iwmr->region);
1977
+ ib_umem_release(iwmr->region);
20731978
20741979 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2075
- if (ibpd->uobject) {
2076
- struct i40iw_ucontext *ucontext;
1980
+ /* region is released. only test for userness. */
1981
+ if (iwmr->region) {
1982
+ struct i40iw_ucontext *ucontext =
1983
+ rdma_udata_to_drv_context(
1984
+ udata,
1985
+ struct i40iw_ucontext,
1986
+ ibucontext);
20771987
2078
- ucontext = to_ucontext(ibpd->uobject->context);
20791988 i40iw_del_memlist(iwmr, ucontext);
20801989 }
20811990 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
....@@ -2115,46 +2024,48 @@
21152024 }
21162025
21172026 /**
2118
- * i40iw_show_rev
2027
+ * hw_rev_show
21192028 */
2120
-static ssize_t i40iw_show_rev(struct device *dev,
2121
- struct device_attribute *attr, char *buf)
2029
+static ssize_t hw_rev_show(struct device *dev,
2030
+ struct device_attribute *attr, char *buf)
21222031 {
2123
- struct i40iw_ib_device *iwibdev = container_of(dev,
2124
- struct i40iw_ib_device,
2125
- ibdev.dev);
2032
+ struct i40iw_ib_device *iwibdev =
2033
+ rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
21262034 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
21272035
21282036 return sprintf(buf, "%x\n", hw_rev);
21292037 }
2038
+static DEVICE_ATTR_RO(hw_rev);
21302039
21312040 /**
2132
- * i40iw_show_hca
2041
+ * hca_type_show
21332042 */
2134
-static ssize_t i40iw_show_hca(struct device *dev,
2135
- struct device_attribute *attr, char *buf)
2043
+static ssize_t hca_type_show(struct device *dev,
2044
+ struct device_attribute *attr, char *buf)
21362045 {
21372046 return sprintf(buf, "I40IW\n");
21382047 }
2048
+static DEVICE_ATTR_RO(hca_type);
21392049
21402050 /**
2141
- * i40iw_show_board
2051
+ * board_id_show
21422052 */
2143
-static ssize_t i40iw_show_board(struct device *dev,
2144
- struct device_attribute *attr,
2145
- char *buf)
2053
+static ssize_t board_id_show(struct device *dev,
2054
+ struct device_attribute *attr, char *buf)
21462055 {
21472056 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
21482057 }
2058
+static DEVICE_ATTR_RO(board_id);
21492059
2150
-static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
2151
-static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
2152
-static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
2060
+static struct attribute *i40iw_dev_attributes[] = {
2061
+ &dev_attr_hw_rev.attr,
2062
+ &dev_attr_hca_type.attr,
2063
+ &dev_attr_board_id.attr,
2064
+ NULL
2065
+};
21532066
2154
-static struct device_attribute *i40iw_dev_attributes[] = {
2155
- &dev_attr_hw_rev,
2156
- &dev_attr_hca_type,
2157
- &dev_attr_board_id
2067
+static const struct attribute_group i40iw_attr_group = {
2068
+ .attrs = i40iw_dev_attributes,
21582069 };
21592070
21602071 /**
....@@ -2213,7 +2124,6 @@
22132124
22142125 switch (ib_wr->opcode) {
22152126 case IB_WR_SEND:
2216
- /* fall-through */
22172127 case IB_WR_SEND_WITH_INV:
22182128 if (ib_wr->opcode == IB_WR_SEND) {
22192129 if (ib_wr->send_flags & IB_SEND_SOLICITED)
....@@ -2270,7 +2180,7 @@
22702180 break;
22712181 case IB_WR_RDMA_READ_WITH_INV:
22722182 inv_stag = true;
2273
- /* fall-through*/
2183
+ fallthrough;
22742184 case IB_WR_RDMA_READ:
22752185 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
22762186 err = -EINVAL;
....@@ -2527,7 +2437,6 @@
25272437 if (err)
25282438 return err;
25292439
2530
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
25312440 immutable->gid_tbl_len = attr.gid_tbl_len;
25322441
25332442 return 0;
....@@ -2601,10 +2510,11 @@
26012510
26022511 static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
26032512 {
2604
- u32 firmware_version = I40IW_FW_VERSION;
2513
+ struct i40iw_device *iwdev = to_iwdev(dev);
26052514
2606
- snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
2607
- (firmware_version & 0x000000ff));
2515
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%llu.%llu",
2516
+ i40iw_fw_major_ver(&iwdev->sc_dev),
2517
+ i40iw_fw_minor_ver(&iwdev->sc_dev));
26082518 }
26092519
26102520 /**
....@@ -2682,40 +2592,53 @@
26822592 return 0;
26832593 }
26842594
2685
-/**
2686
- * i40iw_query_pkey - Query partition key
2687
- * @ibdev: device pointer from stack
2688
- * @port: port number
2689
- * @index: index of pkey
2690
- * @pkey: pointer to store the pkey
2691
- */
2692
-static int i40iw_query_pkey(struct ib_device *ibdev,
2693
- u8 port,
2694
- u16 index,
2695
- u16 *pkey)
2696
-{
2697
- *pkey = 0;
2698
- return 0;
2699
-}
2595
+static const struct ib_device_ops i40iw_dev_ops = {
2596
+ .owner = THIS_MODULE,
2597
+ .driver_id = RDMA_DRIVER_I40IW,
2598
+ /* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */
2599
+ .uverbs_abi_ver = I40IW_ABI_VER,
27002600
2701
-/**
2702
- * i40iw_get_vector_affinity - report IRQ affinity mask
2703
- * @ibdev: IB device
2704
- * @comp_vector: completion vector index
2705
- */
2706
-static const struct cpumask *i40iw_get_vector_affinity(struct ib_device *ibdev,
2707
- int comp_vector)
2708
-{
2709
- struct i40iw_device *iwdev = to_iwdev(ibdev);
2710
- struct i40iw_msix_vector *msix_vec;
2711
-
2712
- if (iwdev->msix_shared)
2713
- msix_vec = &iwdev->iw_msixtbl[comp_vector];
2714
- else
2715
- msix_vec = &iwdev->iw_msixtbl[comp_vector + 1];
2716
-
2717
- return irq_get_affinity_mask(msix_vec->irq);
2718
-}
2601
+ .alloc_hw_stats = i40iw_alloc_hw_stats,
2602
+ .alloc_mr = i40iw_alloc_mr,
2603
+ .alloc_pd = i40iw_alloc_pd,
2604
+ .alloc_ucontext = i40iw_alloc_ucontext,
2605
+ .create_cq = i40iw_create_cq,
2606
+ .create_qp = i40iw_create_qp,
2607
+ .dealloc_pd = i40iw_dealloc_pd,
2608
+ .dealloc_ucontext = i40iw_dealloc_ucontext,
2609
+ .dereg_mr = i40iw_dereg_mr,
2610
+ .destroy_cq = i40iw_destroy_cq,
2611
+ .destroy_qp = i40iw_destroy_qp,
2612
+ .drain_rq = i40iw_drain_rq,
2613
+ .drain_sq = i40iw_drain_sq,
2614
+ .get_dev_fw_str = i40iw_get_dev_fw_str,
2615
+ .get_dma_mr = i40iw_get_dma_mr,
2616
+ .get_hw_stats = i40iw_get_hw_stats,
2617
+ .get_port_immutable = i40iw_port_immutable,
2618
+ .iw_accept = i40iw_accept,
2619
+ .iw_add_ref = i40iw_qp_add_ref,
2620
+ .iw_connect = i40iw_connect,
2621
+ .iw_create_listen = i40iw_create_listen,
2622
+ .iw_destroy_listen = i40iw_destroy_listen,
2623
+ .iw_get_qp = i40iw_get_qp,
2624
+ .iw_reject = i40iw_reject,
2625
+ .iw_rem_ref = i40iw_qp_rem_ref,
2626
+ .map_mr_sg = i40iw_map_mr_sg,
2627
+ .mmap = i40iw_mmap,
2628
+ .modify_qp = i40iw_modify_qp,
2629
+ .poll_cq = i40iw_poll_cq,
2630
+ .post_recv = i40iw_post_recv,
2631
+ .post_send = i40iw_post_send,
2632
+ .query_device = i40iw_query_device,
2633
+ .query_gid = i40iw_query_gid,
2634
+ .query_port = i40iw_query_port,
2635
+ .query_qp = i40iw_query_qp,
2636
+ .reg_user_mr = i40iw_reg_user_mr,
2637
+ .req_notify_cq = i40iw_req_notify_cq,
2638
+ INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
2639
+ INIT_RDMA_OBJ_SIZE(ib_cq, i40iw_cq, ibcq),
2640
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
2641
+};
27192642
27202643 /**
27212644 * i40iw_init_rdma_device - initialization of iwarp device
....@@ -2725,15 +2648,13 @@
27252648 {
27262649 struct i40iw_ib_device *iwibdev;
27272650 struct net_device *netdev = iwdev->netdev;
2728
- struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2651
+ struct pci_dev *pcidev = iwdev->hw.pcidev;
27292652
2730
- iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2653
+ iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
27312654 if (!iwibdev) {
27322655 i40iw_pr_err("iwdev == NULL\n");
27332656 return NULL;
27342657 }
2735
- strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2736
- iwibdev->ibdev.owner = THIS_MODULE;
27372658 iwdev->iwibdev = iwibdev;
27382659 iwibdev->iwdev = iwdev;
27392660
....@@ -2764,53 +2685,9 @@
27642685 iwibdev->ibdev.phys_port_cnt = 1;
27652686 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
27662687 iwibdev->ibdev.dev.parent = &pcidev->dev;
2767
- iwibdev->ibdev.query_port = i40iw_query_port;
2768
- iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2769
- iwibdev->ibdev.query_gid = i40iw_query_gid;
2770
- iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2771
- iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2772
- iwibdev->ibdev.mmap = i40iw_mmap;
2773
- iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2774
- iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2775
- iwibdev->ibdev.create_qp = i40iw_create_qp;
2776
- iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2777
- iwibdev->ibdev.query_qp = i40iw_query_qp;
2778
- iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2779
- iwibdev->ibdev.create_cq = i40iw_create_cq;
2780
- iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2781
- iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2782
- iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2783
- iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
2784
- iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2785
- iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
2786
- iwibdev->ibdev.query_device = i40iw_query_device;
2787
- iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2788
- iwibdev->ibdev.drain_rq = i40iw_drain_rq;
2789
- iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2790
- iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
2791
- iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2792
- if (!iwibdev->ibdev.iwcm) {
2793
- ib_dealloc_device(&iwibdev->ibdev);
2794
- return NULL;
2795
- }
2796
-
2797
- iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2798
- iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2799
- iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2800
- iwibdev->ibdev.iwcm->connect = i40iw_connect;
2801
- iwibdev->ibdev.iwcm->accept = i40iw_accept;
2802
- iwibdev->ibdev.iwcm->reject = i40iw_reject;
2803
- iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2804
- iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
2805
- memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2806
- sizeof(iwibdev->ibdev.iwcm->ifname));
2807
- iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
2808
- iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
2809
- iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2810
- iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2811
- iwibdev->ibdev.post_send = i40iw_post_send;
2812
- iwibdev->ibdev.post_recv = i40iw_post_recv;
2813
- iwibdev->ibdev.get_vector_affinity = i40iw_get_vector_affinity;
2688
+ memcpy(iwibdev->ibdev.iw_ifname, netdev->name,
2689
+ sizeof(iwibdev->ibdev.iw_ifname));
2690
+ ib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);
28142691
28152692 return iwibdev;
28162693 }
....@@ -2831,31 +2708,12 @@
28312708 }
28322709
28332710 /**
2834
- * i40iw_unregister_rdma_device - unregister of iwarp from IB
2835
- * @iwibdev: rdma device ptr
2836
- */
2837
-static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2838
-{
2839
- int i;
2840
-
2841
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2842
- device_remove_file(&iwibdev->ibdev.dev,
2843
- i40iw_dev_attributes[i]);
2844
- ib_unregister_device(&iwibdev->ibdev);
2845
-}
2846
-
2847
-/**
28482711 * i40iw_destroy_rdma_device - destroy rdma device and free resources
28492712 * @iwibdev: IB device ptr
28502713 */
28512714 void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
28522715 {
2853
- if (!iwibdev)
2854
- return;
2855
-
2856
- i40iw_unregister_rdma_device(iwibdev);
2857
- kfree(iwibdev->ibdev.iwcm);
2858
- iwibdev->ibdev.iwcm = NULL;
2716
+ ib_unregister_device(&iwibdev->ibdev);
28592717 wait_event_timeout(iwibdev->iwdev->close_wq,
28602718 !atomic64_read(&iwibdev->iwdev->use_count),
28612719 I40IW_EVENT_TIMEOUT);
....@@ -2868,36 +2726,25 @@
28682726 */
28692727 int i40iw_register_rdma_device(struct i40iw_device *iwdev)
28702728 {
2871
- int i, ret;
2729
+ int ret;
28722730 struct i40iw_ib_device *iwibdev;
28732731
28742732 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
28752733 if (!iwdev->iwibdev)
28762734 return -ENOMEM;
28772735 iwibdev = iwdev->iwibdev;
2878
-
2879
- iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW;
2880
- ret = ib_register_device(&iwibdev->ibdev, NULL);
2736
+ rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
2737
+ ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
28812738 if (ret)
28822739 goto error;
28832740
2884
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2885
- ret =
2886
- device_create_file(&iwibdev->ibdev.dev,
2887
- i40iw_dev_attributes[i]);
2888
- if (ret) {
2889
- while (i > 0) {
2890
- i--;
2891
- device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2892
- }
2893
- ib_unregister_device(&iwibdev->ibdev);
2894
- goto error;
2895
- }
2896
- }
2741
+ dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX);
2742
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev);
2743
+ if (ret)
2744
+ goto error;
2745
+
28972746 return 0;
28982747 error:
2899
- kfree(iwdev->iwibdev->ibdev.iwcm);
2900
- iwdev->iwibdev->ibdev.iwcm = NULL;
29012748 ib_dealloc_device(&iwdev->iwibdev->ibdev);
29022749 return ret;
29032750 }