hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/infiniband/hw/qedr/verbs.c
....@@ -42,6 +42,7 @@
4242 #include <rdma/ib_umem.h>
4343 #include <rdma/ib_addr.h>
4444 #include <rdma/ib_cache.h>
45
+#include <rdma/uverbs_ioctl.h>
4546
4647 #include <linux/qed/common_hsi.h>
4748 #include "qedr_hsi_rdma.h"
....@@ -50,12 +51,18 @@
5051 #include "verbs.h"
5152 #include <rdma/qedr-abi.h>
5253 #include "qedr_roce_cm.h"
54
+#include "qedr_iw_cm.h"
5355
5456 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
5557 #define RDMA_MAX_SGE_PER_SRQ (4)
5658 #define RDMA_MAX_SRQ_WQE_SIZE (RDMA_MAX_SGE_PER_SRQ + 1)
5759
5860 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
61
+
62
+enum {
63
+ QEDR_USER_MMAP_IO_WC = 0,
64
+ QEDR_USER_MMAP_PHYS_PAGE,
65
+};
5966
6067 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
6168 size_t len)
....@@ -129,6 +136,8 @@
129136 IB_DEVICE_RC_RNR_NAK_GEN |
130137 IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
131138
139
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
140
+ attr->device_cap_flags |= IB_DEVICE_XRC;
132141 attr->max_send_sge = qattr->max_sge;
133142 attr->max_recv_sge = qattr->max_sge;
134143 attr->max_sge_rd = qattr->max_sge;
....@@ -138,8 +147,6 @@
138147 attr->max_mw = qattr->max_mw;
139148 attr->max_pd = qattr->max_pd;
140149 attr->atomic_cap = dev->atomic_cap;
141
- attr->max_fmr = qattr->max_fmr;
142
- attr->max_map_per_fmr = 16;
143150 attr->max_qp_init_rd_atom =
144151 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
145152 attr->max_qp_rd_atom =
....@@ -152,13 +159,13 @@
152159
153160 attr->local_ca_ack_delay = qattr->dev_ack_delay;
154161 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
155
- attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
162
+ attr->max_pkeys = qattr->max_pkey;
156163 attr->max_ah = qattr->max_ah;
157164
158165 return 0;
159166 }
160167
161
-static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
168
+static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
162169 u8 *ib_width)
163170 {
164171 switch (speed) {
....@@ -209,10 +216,6 @@
209216 struct qed_rdma_port *rdma_port;
210217
211218 dev = get_qedr_dev(ibdev);
212
- if (port > 1) {
213
- DP_ERR(dev, "invalid_port=0x%x\n", port);
214
- return -EINVAL;
215
- }
216219
217220 if (!dev->rdma_ctx) {
218221 DP_ERR(dev, "rdma_ctx is NULL\n");
....@@ -224,22 +227,22 @@
224227 /* *attr being zeroed by the caller, avoid zeroing it here */
225228 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
226229 attr->state = IB_PORT_ACTIVE;
227
- attr->phys_state = 5;
230
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
228231 } else {
229232 attr->state = IB_PORT_DOWN;
230
- attr->phys_state = 3;
233
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
231234 }
232235 attr->max_mtu = IB_MTU_4096;
233
- attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
234236 attr->lid = 0;
235237 attr->lmc = 0;
236238 attr->sm_lid = 0;
237239 attr->sm_sl = 0;
238240 attr->ip_gids = true;
239241 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
242
+ attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
240243 attr->gid_tbl_len = 1;
241
- attr->pkey_tbl_len = 1;
242244 } else {
245
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
243246 attr->gid_tbl_len = QEDR_MAX_SGID;
244247 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
245248 }
....@@ -253,111 +256,82 @@
253256 return 0;
254257 }
255258
256
-int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
257
- struct ib_port_modify *props)
259
+int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
258260 {
259
- struct qedr_dev *dev;
260
-
261
- dev = get_qedr_dev(ibdev);
262
- if (port > 1) {
263
- DP_ERR(dev, "invalid_port=0x%x\n", port);
264
- return -EINVAL;
265
- }
266
-
267
- return 0;
268
-}
269
-
270
-static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
271
- unsigned long len)
272
-{
273
- struct qedr_mm *mm;
274
-
275
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
276
- if (!mm)
277
- return -ENOMEM;
278
-
279
- mm->key.phy_addr = phy_addr;
280
- /* This function might be called with a length which is not a multiple
281
- * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
282
- * forces this granularity by increasing the requested size if needed.
283
- * When qedr_mmap is called, it will search the list with the updated
284
- * length as a key. To prevent search failures, the length is rounded up
285
- * in advance to PAGE_SIZE.
286
- */
287
- mm->key.len = roundup(len, PAGE_SIZE);
288
- INIT_LIST_HEAD(&mm->entry);
289
-
290
- mutex_lock(&uctx->mm_list_lock);
291
- list_add(&mm->entry, &uctx->mm_head);
292
- mutex_unlock(&uctx->mm_list_lock);
293
-
294
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
295
- "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
296
- (unsigned long long)mm->key.phy_addr,
297
- (unsigned long)mm->key.len, uctx);
298
-
299
- return 0;
300
-}
301
-
302
-static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
303
- unsigned long len)
304
-{
305
- bool found = false;
306
- struct qedr_mm *mm;
307
-
308
- mutex_lock(&uctx->mm_list_lock);
309
- list_for_each_entry(mm, &uctx->mm_head, entry) {
310
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
311
- continue;
312
-
313
- found = true;
314
- break;
315
- }
316
- mutex_unlock(&uctx->mm_list_lock);
317
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
318
- "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
319
- mm->key.phy_addr, mm->key.len, uctx, found);
320
-
321
- return found;
322
-}
323
-
324
-struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
325
- struct ib_udata *udata)
326
-{
261
+ struct ib_device *ibdev = uctx->device;
327262 int rc;
328
- struct qedr_ucontext *ctx;
329
- struct qedr_alloc_ucontext_resp uresp;
263
+ struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
264
+ struct qedr_alloc_ucontext_resp uresp = {};
265
+ struct qedr_alloc_ucontext_req ureq = {};
330266 struct qedr_dev *dev = get_qedr_dev(ibdev);
331267 struct qed_rdma_add_user_out_params oparams;
268
+ struct qedr_user_mmap_entry *entry;
332269
333270 if (!udata)
334
- return ERR_PTR(-EFAULT);
271
+ return -EFAULT;
335272
336
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
337
- if (!ctx)
338
- return ERR_PTR(-ENOMEM);
273
+ if (udata->inlen) {
274
+ rc = ib_copy_from_udata(&ureq, udata,
275
+ min(sizeof(ureq), udata->inlen));
276
+ if (rc) {
277
+ DP_ERR(dev, "Problem copying data from user space\n");
278
+ return -EFAULT;
279
+ }
280
+ ctx->edpm_mode = !!(ureq.context_flags &
281
+ QEDR_ALLOC_UCTX_EDPM_MODE);
282
+ ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
283
+ }
339284
340285 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
341286 if (rc) {
342287 DP_ERR(dev,
343288 "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
344289 rc);
345
- goto err;
290
+ return rc;
346291 }
347292
348293 ctx->dpi = oparams.dpi;
349294 ctx->dpi_addr = oparams.dpi_addr;
350295 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
351296 ctx->dpi_size = oparams.dpi_size;
352
- INIT_LIST_HEAD(&ctx->mm_head);
353
- mutex_init(&ctx->mm_list_lock);
297
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
298
+ if (!entry) {
299
+ rc = -ENOMEM;
300
+ goto err;
301
+ }
354302
355
- memset(&uresp, 0, sizeof(uresp));
303
+ entry->io_address = ctx->dpi_phys_addr;
304
+ entry->length = ctx->dpi_size;
305
+ entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
306
+ entry->dpi = ctx->dpi;
307
+ entry->dev = dev;
308
+ rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
309
+ ctx->dpi_size);
310
+ if (rc) {
311
+ kfree(entry);
312
+ goto err;
313
+ }
314
+ ctx->db_mmap_entry = &entry->rdma_entry;
356315
357
- uresp.dpm_enabled = dev->user_dpm_enabled;
316
+ if (!dev->user_dpm_enabled)
317
+ uresp.dpm_flags = 0;
318
+ else if (rdma_protocol_iwarp(&dev->ibdev, 1))
319
+ uresp.dpm_flags = QEDR_DPM_TYPE_IWARP_LEGACY;
320
+ else
321
+ uresp.dpm_flags = QEDR_DPM_TYPE_ROCE_ENHANCED |
322
+ QEDR_DPM_TYPE_ROCE_LEGACY |
323
+ QEDR_DPM_TYPE_ROCE_EDPM_MODE;
324
+
325
+ if (ureq.context_flags & QEDR_SUPPORT_DPM_SIZES) {
326
+ uresp.dpm_flags |= QEDR_DPM_SIZES_SET;
327
+ uresp.ldpm_limit_size = QEDR_LDPM_MAX_SIZE;
328
+ uresp.edpm_trans_size = QEDR_EDPM_TRANS_SIZE;
329
+ uresp.edpm_limit_size = QEDR_EDPM_MAX_SIZE;
330
+ }
331
+
358332 uresp.wids_enabled = 1;
359333 uresp.wid_count = oparams.wid_count;
360
- uresp.db_pa = ctx->dpi_phys_addr;
334
+ uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
361335 uresp.db_size = ctx->dpi_size;
362336 uresp.max_send_wr = dev->attr.max_sqe;
363337 uresp.max_recv_wr = dev->attr.max_rqe;
....@@ -373,155 +347,160 @@
373347
374348 ctx->dev = dev;
375349
376
- rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
377
- if (rc)
378
- goto err;
379
-
380350 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
381351 &ctx->ibucontext);
382
- return &ctx->ibucontext;
352
+ return 0;
383353
384354 err:
385
- kfree(ctx);
386
- return ERR_PTR(rc);
355
+ if (!ctx->db_mmap_entry)
356
+ dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
357
+ else
358
+ rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
359
+
360
+ return rc;
387361 }
388362
389
-int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
363
+void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
390364 {
391365 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
392
- struct qedr_mm *mm, *tmp;
393
- int status = 0;
394366
395367 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
396368 uctx);
397
- uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
398369
399
- list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
400
- DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
401
- "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
402
- mm->key.phy_addr, mm->key.len, uctx);
403
- list_del(&mm->entry);
404
- kfree(mm);
405
- }
406
-
407
- kfree(uctx);
408
- return status;
370
+ rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
409371 }
410372
411
-int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
373
+void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
412374 {
413
- struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
414
- struct qedr_dev *dev = get_qedr_dev(context->device);
415
- unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
416
- unsigned long len = (vma->vm_end - vma->vm_start);
417
- unsigned long dpi_start;
375
+ struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry);
376
+ struct qedr_dev *dev = entry->dev;
418377
419
- dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
378
+ if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
379
+ free_page((unsigned long)entry->address);
380
+ else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
381
+ dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
420382
421
- DP_DEBUG(dev, QEDR_MSG_INIT,
422
- "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
423
- (void *)vma->vm_start, (void *)vma->vm_end,
424
- (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
425
-
426
- if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
427
- DP_ERR(dev,
428
- "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n",
429
- (void *)vma->vm_start, (void *)vma->vm_end);
430
- return -EINVAL;
431
- }
432
-
433
- if (!qedr_search_mmap(ucontext, phys_addr, len)) {
434
- DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
435
- vma->vm_pgoff);
436
- return -EINVAL;
437
- }
438
-
439
- if (phys_addr < dpi_start ||
440
- ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
441
- DP_ERR(dev,
442
- "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
443
- (void *)phys_addr, (void *)dpi_start,
444
- ucontext->dpi_size);
445
- return -EINVAL;
446
- }
447
-
448
- if (vma->vm_flags & VM_READ) {
449
- DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
450
- return -EINVAL;
451
- }
452
-
453
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
454
- return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
455
- vma->vm_page_prot);
383
+ kfree(entry);
456384 }
457385
458
-struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
459
- struct ib_ucontext *context, struct ib_udata *udata)
386
+int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma)
460387 {
388
+ struct ib_device *dev = ucontext->device;
389
+ size_t length = vma->vm_end - vma->vm_start;
390
+ struct rdma_user_mmap_entry *rdma_entry;
391
+ struct qedr_user_mmap_entry *entry;
392
+ int rc = 0;
393
+ u64 pfn;
394
+
395
+ ibdev_dbg(dev,
396
+ "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
397
+ vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
398
+
399
+ rdma_entry = rdma_user_mmap_entry_get(ucontext, vma);
400
+ if (!rdma_entry) {
401
+ ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n",
402
+ vma->vm_pgoff);
403
+ return -EINVAL;
404
+ }
405
+ entry = get_qedr_mmap_entry(rdma_entry);
406
+ ibdev_dbg(dev,
407
+ "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
408
+ entry->io_address, length, entry->mmap_flag);
409
+
410
+ switch (entry->mmap_flag) {
411
+ case QEDR_USER_MMAP_IO_WC:
412
+ pfn = entry->io_address >> PAGE_SHIFT;
413
+ rc = rdma_user_mmap_io(ucontext, vma, pfn, length,
414
+ pgprot_writecombine(vma->vm_page_prot),
415
+ rdma_entry);
416
+ break;
417
+ case QEDR_USER_MMAP_PHYS_PAGE:
418
+ rc = vm_insert_page(vma, vma->vm_start,
419
+ virt_to_page(entry->address));
420
+ break;
421
+ default:
422
+ rc = -EINVAL;
423
+ }
424
+
425
+ if (rc)
426
+ ibdev_dbg(dev,
427
+ "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
428
+ entry->io_address, length, entry->mmap_flag, rc);
429
+
430
+ rdma_user_mmap_entry_put(rdma_entry);
431
+ return rc;
432
+}
433
+
434
+int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
435
+{
436
+ struct ib_device *ibdev = ibpd->device;
461437 struct qedr_dev *dev = get_qedr_dev(ibdev);
462
- struct qedr_pd *pd;
438
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
463439 u16 pd_id;
464440 int rc;
465441
466442 DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
467
- (udata && context) ? "User Lib" : "Kernel");
443
+ udata ? "User Lib" : "Kernel");
468444
469445 if (!dev->rdma_ctx) {
470446 DP_ERR(dev, "invalid RDMA context\n");
471
- return ERR_PTR(-EINVAL);
447
+ return -EINVAL;
472448 }
473
-
474
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
475
- if (!pd)
476
- return ERR_PTR(-ENOMEM);
477449
478450 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
479451 if (rc)
480
- goto err;
452
+ return rc;
481453
482454 pd->pd_id = pd_id;
483455
484
- if (udata && context) {
456
+ if (udata) {
485457 struct qedr_alloc_pd_uresp uresp = {
486458 .pd_id = pd_id,
487459 };
460
+ struct qedr_ucontext *context = rdma_udata_to_drv_context(
461
+ udata, struct qedr_ucontext, ibucontext);
488462
489463 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
490464 if (rc) {
491465 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
492466 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
493
- goto err;
467
+ return rc;
494468 }
495469
496
- pd->uctx = get_qedr_ucontext(context);
470
+ pd->uctx = context;
497471 pd->uctx->pd = pd;
498472 }
499
-
500
- return &pd->ibpd;
501
-
502
-err:
503
- kfree(pd);
504
- return ERR_PTR(rc);
505
-}
506
-
507
-int qedr_dealloc_pd(struct ib_pd *ibpd)
508
-{
509
- struct qedr_dev *dev = get_qedr_dev(ibpd->device);
510
- struct qedr_pd *pd = get_qedr_pd(ibpd);
511
-
512
- if (!pd) {
513
- pr_err("Invalid PD received in dealloc_pd\n");
514
- return -EINVAL;
515
- }
516
-
517
- DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
518
- dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
519
-
520
- kfree(pd);
521473
522474 return 0;
523475 }
524476
477
+int qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
478
+{
479
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
480
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
481
+
482
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
483
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
484
+ return 0;
485
+}
486
+
487
+
488
+int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
489
+{
490
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
491
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(ibxrcd);
492
+
493
+ return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
494
+}
495
+
496
+int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
497
+{
498
+ struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
499
+ u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
500
+
501
+ dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
502
+ return 0;
503
+}
525504 static void qedr_free_pbl(struct qedr_dev *dev,
526505 struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
527506 {
....@@ -561,8 +540,8 @@
561540 return ERR_PTR(-ENOMEM);
562541
563542 for (i = 0; i < pbl_info->num_pbls; i++) {
564
- va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
565
- &pa, flags);
543
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
544
+ flags);
566545 if (!va)
567546 goto err;
568547
....@@ -641,13 +620,10 @@
641620 struct qedr_pbl *pbl,
642621 struct qedr_pbl_info *pbl_info, u32 pg_shift)
643622 {
644
- int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
645
- u32 fw_pg_cnt, fw_pg_per_umem_pg;
623
+ int pbe_cnt, total_num_pbes = 0;
646624 struct qedr_pbl *pbl_tbl;
647
- struct scatterlist *sg;
625
+ struct ib_block_iter biter;
648626 struct regpair *pbe;
649
- u64 pg_addr;
650
- int entry;
651627
652628 if (!pbl_info->num_pbes)
653629 return;
....@@ -668,52 +644,73 @@
668644
669645 pbe_cnt = 0;
670646
671
- shift = umem->page_shift;
647
+ rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) {
648
+ u64 pg_addr = rdma_block_iter_dma_address(&biter);
672649
673
- fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
650
+ pbe->lo = cpu_to_le32(pg_addr);
651
+ pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
674652
675
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
676
- pages = sg_dma_len(sg) >> shift;
677
- pg_addr = sg_dma_address(sg);
678
- for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
679
- for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
680
- pbe->lo = cpu_to_le32(pg_addr);
681
- pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
653
+ pbe_cnt++;
654
+ total_num_pbes++;
655
+ pbe++;
682656
683
- pg_addr += BIT(pg_shift);
684
- pbe_cnt++;
685
- total_num_pbes++;
686
- pbe++;
657
+ if (total_num_pbes == pbl_info->num_pbes)
658
+ return;
687659
688
- if (total_num_pbes == pbl_info->num_pbes)
689
- return;
690
-
691
- /* If the given pbl is full storing the pbes,
692
- * move to next pbl.
693
- */
694
- if (pbe_cnt ==
695
- (pbl_info->pbl_size / sizeof(u64))) {
696
- pbl_tbl++;
697
- pbe = (struct regpair *)pbl_tbl->va;
698
- pbe_cnt = 0;
699
- }
700
-
701
- fw_pg_cnt++;
702
- }
660
+ /* If the given pbl is full storing the pbes, move to next pbl.
661
+ */
662
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
663
+ pbl_tbl++;
664
+ pbe = (struct regpair *)pbl_tbl->va;
665
+ pbe_cnt = 0;
703666 }
704667 }
705668 }
706669
670
+static int qedr_db_recovery_add(struct qedr_dev *dev,
671
+ void __iomem *db_addr,
672
+ void *db_data,
673
+ enum qed_db_rec_width db_width,
674
+ enum qed_db_rec_space db_space)
675
+{
676
+ if (!db_data) {
677
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
678
+ return 0;
679
+ }
680
+
681
+ return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
682
+ db_width, db_space);
683
+}
684
+
685
+static void qedr_db_recovery_del(struct qedr_dev *dev,
686
+ void __iomem *db_addr,
687
+ void *db_data)
688
+{
689
+ if (!db_data) {
690
+ DP_DEBUG(dev, QEDR_MSG_INIT, "avoiding db rec since old lib\n");
691
+ return;
692
+ }
693
+
694
+ /* Ignore return code as there is not much we can do about it. Error
695
+ * log will be printed inside.
696
+ */
697
+ dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
698
+}
699
+
707700 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
708
- struct qedr_cq *cq, struct ib_udata *udata)
701
+ struct qedr_cq *cq, struct ib_udata *udata,
702
+ u32 db_offset)
709703 {
710704 struct qedr_create_cq_uresp uresp;
711705 int rc;
712706
713707 memset(&uresp, 0, sizeof(uresp));
714708
715
- uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
709
+ uresp.db_offset = db_offset;
716710 uresp.icid = cq->icid;
711
+ if (cq->q.db_mmap_entry)
712
+ uresp.db_rec_addr =
713
+ rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
717714
718715 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
719716 if (rc)
....@@ -741,11 +738,58 @@
741738 return aligned_size / QEDR_CQE_SIZE;
742739 }
743740
744
-static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
741
+static int qedr_init_user_db_rec(struct ib_udata *udata,
742
+ struct qedr_dev *dev, struct qedr_userq *q,
743
+ bool requires_db_rec)
744
+{
745
+ struct qedr_ucontext *uctx =
746
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
747
+ ibucontext);
748
+ struct qedr_user_mmap_entry *entry;
749
+ int rc;
750
+
751
+ /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
752
+ if (requires_db_rec == 0 || !uctx->db_rec)
753
+ return 0;
754
+
755
+ /* Allocate a page for doorbell recovery, add to mmap */
756
+ q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
757
+ if (!q->db_rec_data) {
758
+ DP_ERR(dev, "get_zeroed_page failed\n");
759
+ return -ENOMEM;
760
+ }
761
+
762
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
763
+ if (!entry)
764
+ goto err_free_db_data;
765
+
766
+ entry->address = q->db_rec_data;
767
+ entry->length = PAGE_SIZE;
768
+ entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
769
+ rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
770
+ &entry->rdma_entry,
771
+ PAGE_SIZE);
772
+ if (rc)
773
+ goto err_free_entry;
774
+
775
+ q->db_mmap_entry = &entry->rdma_entry;
776
+
777
+ return 0;
778
+
779
+err_free_entry:
780
+ kfree(entry);
781
+
782
+err_free_db_data:
783
+ free_page((unsigned long)q->db_rec_data);
784
+ q->db_rec_data = NULL;
785
+ return -ENOMEM;
786
+}
787
+
788
+static inline int qedr_init_user_queue(struct ib_udata *udata,
745789 struct qedr_dev *dev,
746
- struct qedr_userq *q,
747
- u64 buf_addr, size_t buf_len,
748
- int access, int dmasync,
790
+ struct qedr_userq *q, u64 buf_addr,
791
+ size_t buf_len, bool requires_db_rec,
792
+ int access,
749793 int alloc_and_init)
750794 {
751795 u32 fw_pages;
....@@ -753,16 +797,14 @@
753797
754798 q->buf_addr = buf_addr;
755799 q->buf_len = buf_len;
756
- q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
800
+ q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
757801 if (IS_ERR(q->umem)) {
758802 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
759803 PTR_ERR(q->umem));
760804 return PTR_ERR(q->umem);
761805 }
762806
763
- fw_pages = ib_umem_page_count(q->umem) <<
764
- (q->umem->page_shift - FW_PAGE_SHIFT);
765
-
807
+ fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
766808 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
767809 if (rc)
768810 goto err0;
....@@ -783,7 +825,8 @@
783825 }
784826 }
785827
786
- return 0;
828
+ /* mmap the user address used to store doorbell data for recovery */
829
+ return qedr_init_user_db_rec(udata, dev, q, requires_db_rec);
787830
788831 err0:
789832 ib_umem_release(q->umem);
....@@ -816,9 +859,6 @@
816859 cq->db.data.agg_flags = flags;
817860 cq->db.data.value = cpu_to_le32(cons);
818861 writeq(cq->db.raw, cq->db_addr);
819
-
820
- /* Make sure write would stick */
821
- mmiowb();
822862 }
823863
824864 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
....@@ -857,20 +897,28 @@
857897 return 0;
858898 }
859899
860
-struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
861
- const struct ib_cq_init_attr *attr,
862
- struct ib_ucontext *ib_ctx, struct ib_udata *udata)
900
+int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
901
+ struct ib_udata *udata)
863902 {
864
- struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
903
+ struct ib_device *ibdev = ibcq->device;
904
+ struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
905
+ udata, struct qedr_ucontext, ibucontext);
865906 struct qed_rdma_destroy_cq_out_params destroy_oparams;
866907 struct qed_rdma_destroy_cq_in_params destroy_iparams;
908
+ struct qed_chain_init_params chain_params = {
909
+ .mode = QED_CHAIN_MODE_PBL,
910
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
911
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
912
+ .elem_size = sizeof(union rdma_cqe),
913
+ };
867914 struct qedr_dev *dev = get_qedr_dev(ibdev);
868915 struct qed_rdma_create_cq_in_params params;
869
- struct qedr_create_cq_ureq ureq;
916
+ struct qedr_create_cq_ureq ureq = {};
870917 int vector = attr->comp_vector;
871918 int entries = attr->cqe;
872
- struct qedr_cq *cq;
919
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
873920 int chain_entries;
921
+ u32 db_offset;
874922 int page_cnt;
875923 u64 pbl_ptr;
876924 u16 icid;
....@@ -884,19 +932,19 @@
884932 DP_ERR(dev,
885933 "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
886934 entries, QEDR_MAX_CQES);
887
- return ERR_PTR(-EINVAL);
935
+ return -EINVAL;
888936 }
889937
890938 chain_entries = qedr_align_cq_entries(entries);
891939 chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
940
+ chain_params.num_elems = chain_entries;
892941
893
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
894
- if (!cq)
895
- return ERR_PTR(-ENOMEM);
942
+ /* calc db offset. user will add DPI base, kernel will add db addr */
943
+ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
896944
897945 if (udata) {
898
- memset(&ureq, 0, sizeof(ureq));
899
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
946
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
947
+ udata->inlen))) {
900948 DP_ERR(dev,
901949 "create cq: problem copying data from user space\n");
902950 goto err0;
....@@ -910,9 +958,9 @@
910958
911959 cq->cq_type = QEDR_CQ_TYPE_USER;
912960
913
- rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
914
- ureq.len, IB_ACCESS_LOCAL_WRITE,
915
- 1, 1);
961
+ rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
962
+ ureq.len, true, IB_ACCESS_LOCAL_WRITE,
963
+ 1);
916964 if (rc)
917965 goto err0;
918966
....@@ -920,18 +968,14 @@
920968 page_cnt = cq->q.pbl_info.num_pbes;
921969
922970 cq->ibcq.cqe = chain_entries;
971
+ cq->q.db_addr = ctx->dpi_addr + db_offset;
923972 } else {
924973 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
925974
926
- rc = dev->ops->common->chain_alloc(dev->cdev,
927
- QED_CHAIN_USE_TO_CONSUME,
928
- QED_CHAIN_MODE_PBL,
929
- QED_CHAIN_CNT_TYPE_U32,
930
- chain_entries,
931
- sizeof(union rdma_cqe),
932
- &cq->pbl, NULL);
975
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
976
+ &chain_params);
933977 if (rc)
934
- goto err1;
978
+ goto err0;
935979
936980 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
937981 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
....@@ -943,22 +987,29 @@
943987
944988 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
945989 if (rc)
946
- goto err2;
990
+ goto err1;
947991
948992 cq->icid = icid;
949993 cq->sig = QEDR_CQ_MAGIC_NUMBER;
950994 spin_lock_init(&cq->cq_lock);
951995
952
- if (ib_ctx) {
953
- rc = qedr_copy_cq_uresp(dev, cq, udata);
996
+ if (udata) {
997
+ rc = qedr_copy_cq_uresp(dev, cq, udata, db_offset);
954998 if (rc)
955
- goto err3;
999
+ goto err2;
1000
+
1001
+ rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1002
+ &cq->q.db_rec_data->db_data,
1003
+ DB_REC_WIDTH_64B,
1004
+ DB_REC_USER);
1005
+ if (rc)
1006
+ goto err2;
1007
+
9561008 } else {
9571009 /* Generate doorbell address. */
958
- cq->db_addr = dev->db_addr +
959
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
9601010 cq->db.data.icid = cq->icid;
961
- cq->db.data.params = DB_AGG_CMD_SET <<
1011
+ cq->db_addr = dev->db_addr + db_offset;
1012
+ cq->db.data.params = DB_AGG_CMD_MAX <<
9621013 RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
9631014
9641015 /* point to the very last element, passing it we will toggle */
....@@ -967,29 +1018,34 @@
9671018 cq->latest_cqe = NULL;
9681019 consume_cqe(cq);
9691020 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1021
+
1022
+ rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1023
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
1024
+ if (rc)
1025
+ goto err2;
9701026 }
9711027
9721028 DP_DEBUG(dev, QEDR_MSG_CQ,
9731029 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
9741030 cq->icid, cq, params.cq_size);
9751031
976
- return &cq->ibcq;
1032
+ return 0;
9771033
978
-err3:
1034
+err2:
9791035 destroy_iparams.icid = cq->icid;
9801036 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
9811037 &destroy_oparams);
982
-err2:
983
- if (udata)
984
- qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
985
- else
986
- dev->ops->common->chain_free(dev->cdev, &cq->pbl);
9871038 err1:
988
- if (udata)
1039
+ if (udata) {
1040
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
9891041 ib_umem_release(cq->q.umem);
1042
+ if (cq->q.db_mmap_entry)
1043
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1044
+ } else {
1045
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1046
+ }
9901047 err0:
991
- kfree(cq);
992
- return ERR_PTR(-EINVAL);
1048
+ return -EINVAL;
9931049 }
9941050
9951051 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
....@@ -1005,33 +1061,39 @@
10051061 #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
10061062 #define QEDR_DESTROY_CQ_ITER_DURATION (10)
10071063
1008
-int qedr_destroy_cq(struct ib_cq *ibcq)
1064
+int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
10091065 {
10101066 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
10111067 struct qed_rdma_destroy_cq_out_params oparams;
10121068 struct qed_rdma_destroy_cq_in_params iparams;
10131069 struct qedr_cq *cq = get_qedr_cq(ibcq);
10141070 int iter;
1015
- int rc;
10161071
10171072 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
10181073
10191074 cq->destroyed = 1;
10201075
10211076 /* GSIs CQs are handled by driver, so they don't exist in the FW */
1022
- if (cq->cq_type == QEDR_CQ_TYPE_GSI)
1023
- goto done;
1077
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1078
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1079
+ return 0;
1080
+ }
10241081
10251082 iparams.icid = cq->icid;
1026
- rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1027
- if (rc)
1028
- return rc;
1029
-
1083
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
10301084 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
10311085
1032
- if (ibcq->uobject && ibcq->uobject->context) {
1086
+ if (udata) {
10331087 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
10341088 ib_umem_release(cq->q.umem);
1089
+
1090
+ if (cq->q.db_rec_data) {
1091
+ qedr_db_recovery_del(dev, cq->q.db_addr,
1092
+ &cq->q.db_rec_data->db_data);
1093
+ rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1094
+ }
1095
+ } else {
1096
+ qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
10351097 }
10361098
10371099 /* We don't want the IRQ handler to handle a non-existing CQ so we
....@@ -1057,27 +1119,12 @@
10571119 iter--;
10581120 }
10591121
1060
- if (oparams.num_cq_notif != cq->cnq_notif)
1061
- goto err;
1062
-
10631122 /* Note that we don't need to have explicit code to wait for the
10641123 * completion of the event handler because it is invoked from the EQ.
10651124 * Since the destroy CQ ramrod has also been received on the EQ we can
10661125 * be certain that there's no event handler in process.
10671126 */
1068
-done:
1069
- cq->sig = ~cq->sig;
1070
-
1071
- kfree(cq);
1072
-
10731127 return 0;
1074
-
1075
-err:
1076
- DP_ERR(dev,
1077
- "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
1078
- cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
1079
-
1080
- return -EINVAL;
10811128 }
10821129
10831130 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
....@@ -1090,10 +1137,13 @@
10901137 enum rdma_network_type nw_type;
10911138 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
10921139 u32 ipv4_addr;
1140
+ int ret;
10931141 int i;
10941142
10951143 gid_attr = grh->sgid_attr;
1096
- qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
1144
+ ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1145
+ if (ret)
1146
+ return ret;
10971147
10981148 nw_type = rdma_gid_attr_network_type(gid_attr);
10991149 switch (nw_type) {
....@@ -1107,7 +1157,7 @@
11071157 SET_FIELD(qp_params->modify_flags,
11081158 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
11091159 break;
1110
- case RDMA_NETWORK_IB:
1160
+ case RDMA_NETWORK_ROCE_V1:
11111161 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
11121162 sizeof(qp_params->sgid));
11131163 memcpy(&qp_params->dgid.bytes[0],
....@@ -1127,6 +1177,8 @@
11271177 QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
11281178 qp_params->roce_mode = ROCE_V2_IPV4;
11291179 break;
1180
+ default:
1181
+ return -EINVAL;
11301182 }
11311183
11321184 for (i = 0; i < 4; i++) {
....@@ -1141,16 +1193,20 @@
11411193 }
11421194
11431195 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1144
- struct ib_qp_init_attr *attrs)
1196
+ struct ib_qp_init_attr *attrs,
1197
+ struct ib_udata *udata)
11451198 {
11461199 struct qedr_device_attr *qattr = &dev->attr;
11471200
11481201 /* QP0... attrs->qp_type == IB_QPT_GSI */
1149
- if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1202
+ if (attrs->qp_type != IB_QPT_RC &&
1203
+ attrs->qp_type != IB_QPT_GSI &&
1204
+ attrs->qp_type != IB_QPT_XRC_INI &&
1205
+ attrs->qp_type != IB_QPT_XRC_TGT) {
11501206 DP_DEBUG(dev, QEDR_MSG_QP,
11511207 "create qp: unsupported qp type=0x%x requested\n",
11521208 attrs->qp_type);
1153
- return -EINVAL;
1209
+ return -EOPNOTSUPP;
11541210 }
11551211
11561212 if (attrs->cap.max_send_wr > qattr->max_sqe) {
....@@ -1181,12 +1237,21 @@
11811237 return -EINVAL;
11821238 }
11831239
1184
- /* Unprivileged user space cannot create special QP */
1185
- if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1186
- DP_ERR(dev,
1187
- "create qp: userspace can't create special QPs of type=0x%x\n",
1188
- attrs->qp_type);
1189
- return -EINVAL;
1240
+ /* verify consumer QPs are not trying to use GSI QP's CQ.
1241
+ * TGT QP isn't associated with RQ/SQ
1242
+ */
1243
+ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1244
+ (attrs->qp_type != IB_QPT_XRC_TGT) &&
1245
+ (attrs->qp_type != IB_QPT_XRC_INI)) {
1246
+ struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1247
+ struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1248
+
1249
+ if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1250
+ (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1251
+ DP_ERR(dev,
1252
+ "create qp: consumer QP cannot use GSI CQs.\n");
1253
+ return -EINVAL;
1254
+ }
11901255 }
11911256
11921257 return 0;
....@@ -1222,6 +1287,9 @@
12221287 }
12231288
12241289 uresp->rq_icid = qp->icid;
1290
+ if (qp->urq.db_mmap_entry)
1291
+ uresp->rq_db_rec_addr =
1292
+ rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
12251293 }
12261294
12271295 static void qedr_copy_sq_uresp(struct qedr_dev *dev,
....@@ -1235,22 +1303,30 @@
12351303 uresp->sq_icid = qp->icid;
12361304 else
12371305 uresp->sq_icid = qp->icid + 1;
1306
+
1307
+ if (qp->usq.db_mmap_entry)
1308
+ uresp->sq_db_rec_addr =
1309
+ rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
12381310 }
12391311
12401312 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1241
- struct qedr_qp *qp, struct ib_udata *udata)
1313
+ struct qedr_qp *qp, struct ib_udata *udata,
1314
+ struct qedr_create_qp_uresp *uresp)
12421315 {
1243
- struct qedr_create_qp_uresp uresp;
12441316 int rc;
12451317
1246
- memset(&uresp, 0, sizeof(uresp));
1247
- qedr_copy_sq_uresp(dev, &uresp, qp);
1248
- qedr_copy_rq_uresp(dev, &uresp, qp);
1318
+ memset(uresp, 0, sizeof(*uresp));
12491319
1250
- uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1251
- uresp.qp_id = qp->qp_id;
1320
+ if (qedr_qp_has_sq(qp))
1321
+ qedr_copy_sq_uresp(dev, uresp, qp);
12521322
1253
- rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1323
+ if (qedr_qp_has_rq(qp))
1324
+ qedr_copy_rq_uresp(dev, uresp, qp);
1325
+
1326
+ uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1327
+ uresp->qp_id = qp->qp_id;
1328
+
1329
+ rc = qedr_ib_copy_to_udata(udata, uresp, sizeof(*uresp));
12541330 if (rc)
12551331 DP_ERR(dev,
12561332 "create qp: failed a copy to user space with qp icid=0x%x.\n",
....@@ -1265,19 +1341,29 @@
12651341 struct ib_qp_init_attr *attrs)
12661342 {
12671343 spin_lock_init(&qp->q_lock);
1268
- atomic_set(&qp->refcnt, 1);
1344
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1345
+ kref_init(&qp->refcnt);
1346
+ init_completion(&qp->iwarp_cm_comp);
1347
+ }
1348
+
12691349 qp->pd = pd;
12701350 qp->qp_type = attrs->qp_type;
12711351 qp->max_inline_data = attrs->cap.max_inline_data;
1272
- qp->sq.max_sges = attrs->cap.max_send_sge;
12731352 qp->state = QED_ROCE_QP_STATE_RESET;
12741353 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1275
- qp->sq_cq = get_qedr_cq(attrs->send_cq);
12761354 qp->dev = dev;
1355
+ if (qedr_qp_has_sq(qp)) {
1356
+ qp->sq.max_sges = attrs->cap.max_send_sge;
1357
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
1358
+ DP_DEBUG(dev, QEDR_MSG_QP,
1359
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1360
+ qp->sq.max_sges, qp->sq_cq->icid);
1361
+ }
12771362
1278
- if (attrs->srq) {
1363
+ if (attrs->srq)
12791364 qp->srq = get_qedr_srq(attrs->srq);
1280
- } else {
1365
+
1366
+ if (qedr_qp_has_rq(qp)) {
12811367 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
12821368 qp->rq.max_sges = attrs->cap.max_recv_sge;
12831369 DP_DEBUG(dev, QEDR_MSG_QP,
....@@ -1294,19 +1380,34 @@
12941380 qp->sq.max_sges, qp->sq_cq->icid);
12951381 }
12961382
1297
-static void qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1383
+static int qedr_set_roce_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
12981384 {
1299
- qp->sq.db = dev->db_addr +
1300
- DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1301
- qp->sq.db_data.data.icid = qp->icid + 1;
1302
- if (!qp->srq) {
1385
+ int rc = 0;
1386
+
1387
+ if (qedr_qp_has_sq(qp)) {
1388
+ qp->sq.db = dev->db_addr +
1389
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1390
+ qp->sq.db_data.data.icid = qp->icid + 1;
1391
+ rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1392
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
1393
+ if (rc)
1394
+ return rc;
1395
+ }
1396
+
1397
+ if (qedr_qp_has_rq(qp)) {
13031398 qp->rq.db = dev->db_addr +
13041399 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
13051400 qp->rq.db_data.data.icid = qp->icid;
1401
+ rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1402
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
1403
+ if (rc && qedr_qp_has_sq(qp))
1404
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
13061405 }
1406
+
1407
+ return rc;
13071408 }
13081409
1309
-static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
1410
+static int qedr_check_srq_params(struct qedr_dev *dev,
13101411 struct ib_srq_init_attr *attrs,
13111412 struct ib_udata *udata)
13121413 {
....@@ -1323,6 +1424,10 @@
13231424 DP_ERR(dev,
13241425 "create srq: unsupported sge=0x%x requested (max_srq_sge=0x%x)\n",
13251426 attrs->attr.max_sge, qattr->max_sge);
1427
+ }
1428
+
1429
+ if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1430
+ DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
13261431 return -EINVAL;
13271432 }
13281433
....@@ -1348,22 +1453,21 @@
13481453 hw_srq->phy_prod_pair_addr);
13491454 }
13501455
1351
-static int qedr_init_srq_user_params(struct ib_ucontext *ib_ctx,
1456
+static int qedr_init_srq_user_params(struct ib_udata *udata,
13521457 struct qedr_srq *srq,
13531458 struct qedr_create_srq_ureq *ureq,
1354
- int access, int dmasync)
1459
+ int access)
13551460 {
13561461 struct scatterlist *sg;
13571462 int rc;
13581463
1359
- rc = qedr_init_user_queue(ib_ctx, srq->dev, &srq->usrq, ureq->srq_addr,
1360
- ureq->srq_len, access, dmasync, 1);
1464
+ rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1465
+ ureq->srq_len, false, access, 1);
13611466 if (rc)
13621467 return rc;
13631468
1364
- srq->prod_umem = ib_umem_get(ib_ctx, ureq->prod_pair_addr,
1365
- sizeof(struct rdma_srq_producers),
1366
- access, dmasync);
1469
+ srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1470
+ sizeof(struct rdma_srq_producers), access);
13671471 if (IS_ERR(srq->prod_umem)) {
13681472 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
13691473 ib_umem_release(srq->usrq.umem);
....@@ -1384,6 +1488,12 @@
13841488 struct ib_srq_init_attr *init_attr)
13851489 {
13861490 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1491
+ struct qed_chain_init_params params = {
1492
+ .mode = QED_CHAIN_MODE_PBL,
1493
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1494
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
1495
+ .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
1496
+ };
13871497 dma_addr_t phy_prod_pair_addr;
13881498 u32 num_elems;
13891499 void *va;
....@@ -1402,13 +1512,9 @@
14021512 hw_srq->virt_prod_pair_addr = va;
14031513
14041514 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1405
- rc = dev->ops->common->chain_alloc(dev->cdev,
1406
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1407
- QED_CHAIN_MODE_PBL,
1408
- QED_CHAIN_CNT_TYPE_U32,
1409
- num_elems,
1410
- QEDR_SRQ_WQE_ELEM_SIZE,
1411
- &hw_srq->pbl, NULL);
1515
+ params.num_elems = num_elems;
1516
+
1517
+ rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
14121518 if (rc)
14131519 goto err0;
14141520
....@@ -1422,66 +1528,53 @@
14221528 return rc;
14231529 }
14241530
1425
-static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
1426
- void *ptr, u32 id);
1427
-static void qedr_idr_remove(struct qedr_dev *dev,
1428
- struct qedr_idr *qidr, u32 id);
1429
-
1430
-struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
1431
- struct ib_srq_init_attr *init_attr,
1432
- struct ib_udata *udata)
1531
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1532
+ struct ib_udata *udata)
14331533 {
14341534 struct qed_rdma_destroy_srq_in_params destroy_in_params;
14351535 struct qed_rdma_create_srq_in_params in_params = {};
1436
- struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1536
+ struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
14371537 struct qed_rdma_create_srq_out_params out_params;
1438
- struct qedr_pd *pd = get_qedr_pd(ibpd);
1538
+ struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
14391539 struct qedr_create_srq_ureq ureq = {};
14401540 u64 pbl_base_addr, phy_prod_pair_addr;
1441
- struct ib_ucontext *ib_ctx = NULL;
14421541 struct qedr_srq_hwq_info *hw_srq;
1443
- struct qedr_ucontext *ctx = NULL;
14441542 u32 page_cnt, page_size;
1445
- struct qedr_srq *srq;
1543
+ struct qedr_srq *srq = get_qedr_srq(ibsrq);
14461544 int rc = 0;
14471545
14481546 DP_DEBUG(dev, QEDR_MSG_QP,
14491547 "create SRQ called from %s (pd %p)\n",
14501548 (udata) ? "User lib" : "kernel", pd);
14511549
1452
- rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
1550
+ rc = qedr_check_srq_params(dev, init_attr, udata);
14531551 if (rc)
1454
- return ERR_PTR(-EINVAL);
1455
-
1456
- srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1457
- if (!srq)
1458
- return ERR_PTR(-ENOMEM);
1552
+ return -EINVAL;
14591553
14601554 srq->dev = dev;
1555
+ srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
14611556 hw_srq = &srq->hw_srq;
14621557 spin_lock_init(&srq->lock);
14631558
14641559 hw_srq->max_wr = init_attr->attr.max_wr;
14651560 hw_srq->max_sges = init_attr->attr.max_sge;
14661561
1467
- if (udata && ibpd->uobject && ibpd->uobject->context) {
1468
- ib_ctx = ibpd->uobject->context;
1469
- ctx = get_qedr_ucontext(ib_ctx);
1470
-
1471
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1562
+ if (udata) {
1563
+ if (ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1564
+ udata->inlen))) {
14721565 DP_ERR(dev,
14731566 "create srq: problem copying data from user space\n");
14741567 goto err0;
14751568 }
14761569
1477
- rc = qedr_init_srq_user_params(ib_ctx, srq, &ureq, 0, 0);
1570
+ rc = qedr_init_srq_user_params(udata, srq, &ureq, 0);
14781571 if (rc)
14791572 goto err0;
14801573
14811574 page_cnt = srq->usrq.pbl_info.num_pbes;
14821575 pbl_base_addr = srq->usrq.pbl_tbl->pa;
14831576 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1484
- page_size = BIT(srq->usrq.umem->page_shift);
1577
+ page_size = PAGE_SIZE;
14851578 } else {
14861579 struct qed_chain *pbl;
14871580
....@@ -1501,6 +1594,14 @@
15011594 in_params.prod_pair_addr = phy_prod_pair_addr;
15021595 in_params.num_pages = page_cnt;
15031596 in_params.page_size = page_size;
1597
+ if (srq->is_xrc) {
1598
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1599
+ struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1600
+
1601
+ in_params.is_xrc = 1;
1602
+ in_params.xrcd_id = xrcd->xrcd_id;
1603
+ in_params.cq_cid = cq->icid;
1604
+ }
15041605
15051606 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
15061607 if (rc)
....@@ -1514,13 +1615,13 @@
15141615 goto err2;
15151616 }
15161617
1517
- rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
1618
+ rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
15181619 if (rc)
15191620 goto err2;
15201621
15211622 DP_DEBUG(dev, QEDR_MSG_SRQ,
15221623 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1523
- return &srq->ibsrq;
1624
+ return 0;
15241625
15251626 err2:
15261627 destroy_in_params.srq_id = srq->srq_id;
....@@ -1532,22 +1633,21 @@
15321633 else
15331634 qedr_free_srq_kernel_params(srq);
15341635 err0:
1535
- kfree(srq);
1536
-
1537
- return ERR_PTR(-EFAULT);
1636
+ return -EFAULT;
15381637 }
15391638
1540
-int qedr_destroy_srq(struct ib_srq *ibsrq)
1639
+int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
15411640 {
15421641 struct qed_rdma_destroy_srq_in_params in_params = {};
15431642 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
15441643 struct qedr_srq *srq = get_qedr_srq(ibsrq);
15451644
1546
- qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
1645
+ xa_erase_irq(&dev->srqs, srq->srq_id);
15471646 in_params.srq_id = srq->srq_id;
1647
+ in_params.is_xrc = srq->is_xrc;
15481648 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
15491649
1550
- if (ibsrq->pd->uobject)
1650
+ if (ibsrq->uobject)
15511651 qedr_free_srq_user_params(srq);
15521652 else
15531653 qedr_free_srq_kernel_params(srq);
....@@ -1555,8 +1655,6 @@
15551655 DP_DEBUG(dev, QEDR_MSG_SRQ,
15561656 "destroy srq: destroyed srq with srq_id=0x%0x\n",
15571657 srq->srq_id);
1558
- kfree(srq);
1559
-
15601658 return 0;
15611659 }
15621660
....@@ -1598,6 +1696,20 @@
15981696 return 0;
15991697 }
16001698
1699
+static enum qed_rdma_qp_type qedr_ib_to_qed_qp_type(enum ib_qp_type ib_qp_type)
1700
+{
1701
+ switch (ib_qp_type) {
1702
+ case IB_QPT_RC:
1703
+ return QED_RDMA_QP_TYPE_RC;
1704
+ case IB_QPT_XRC_INI:
1705
+ return QED_RDMA_QP_TYPE_XRC_INI;
1706
+ case IB_QPT_XRC_TGT:
1707
+ return QED_RDMA_QP_TYPE_XRC_TGT;
1708
+ default:
1709
+ return QED_RDMA_QP_TYPE_INVAL;
1710
+ }
1711
+}
1712
+
16011713 static inline void
16021714 qedr_init_common_qp_in_params(struct qedr_dev *dev,
16031715 struct qedr_pd *pd,
....@@ -1612,20 +1724,27 @@
16121724
16131725 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
16141726 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1615
- params->pd = pd->pd_id;
1616
- params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1617
- params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1727
+ params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
16181728 params->stats_queue = 0;
1619
- params->srq_id = 0;
1620
- params->use_srq = false;
16211729
1622
- if (!qp->srq) {
1730
+ if (pd) {
1731
+ params->pd = pd->pd_id;
1732
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1733
+ }
1734
+
1735
+ if (qedr_qp_has_sq(qp))
1736
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1737
+
1738
+ if (qedr_qp_has_rq(qp))
16231739 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
16241740
1625
- } else {
1741
+ if (qedr_qp_has_srq(qp)) {
16261742 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
16271743 params->srq_id = qp->srq->srq_id;
16281744 params->use_srq = true;
1745
+ } else {
1746
+ params->srq_id = 0;
1747
+ params->use_srq = false;
16291748 }
16301749 }
16311750
....@@ -1639,31 +1758,10 @@
16391758 "rq_len=%zd"
16401759 "\n",
16411760 qp,
1642
- qp->usq.buf_addr,
1643
- qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
1644
-}
1645
-
1646
-static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
1647
- void *ptr, u32 id)
1648
-{
1649
- int rc;
1650
-
1651
- idr_preload(GFP_KERNEL);
1652
- spin_lock_irq(&qidr->idr_lock);
1653
-
1654
- rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
1655
-
1656
- spin_unlock_irq(&qidr->idr_lock);
1657
- idr_preload_end();
1658
-
1659
- return rc < 0 ? rc : 0;
1660
-}
1661
-
1662
-static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
1663
-{
1664
- spin_lock_irq(&qidr->idr_lock);
1665
- idr_remove(&qidr->idr, id);
1666
- spin_unlock_irq(&qidr->idr_lock);
1761
+ qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1762
+ qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1763
+ qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1764
+ qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
16671765 }
16681766
16691767 static inline void
....@@ -1685,15 +1783,19 @@
16851783 &qp->urq.pbl_info, FW_PAGE_SHIFT);
16861784 }
16871785
1688
-static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
1786
+static void qedr_cleanup_user(struct qedr_dev *dev,
1787
+ struct qedr_ucontext *ctx,
1788
+ struct qedr_qp *qp)
16891789 {
1690
- if (qp->usq.umem)
1790
+ if (qedr_qp_has_sq(qp)) {
16911791 ib_umem_release(qp->usq.umem);
1692
- qp->usq.umem = NULL;
1792
+ qp->usq.umem = NULL;
1793
+ }
16931794
1694
- if (qp->urq.umem)
1795
+ if (qedr_qp_has_rq(qp)) {
16951796 ib_umem_release(qp->urq.umem);
1696
- qp->urq.umem = NULL;
1797
+ qp->urq.umem = NULL;
1798
+ }
16971799
16981800 if (rdma_protocol_roce(&dev->ibdev, 1)) {
16991801 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
....@@ -1702,6 +1804,22 @@
17021804 kfree(qp->usq.pbl_tbl);
17031805 kfree(qp->urq.pbl_tbl);
17041806 }
1807
+
1808
+ if (qp->usq.db_rec_data) {
1809
+ qedr_db_recovery_del(dev, qp->usq.db_addr,
1810
+ &qp->usq.db_rec_data->db_data);
1811
+ rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1812
+ }
1813
+
1814
+ if (qp->urq.db_rec_data) {
1815
+ qedr_db_recovery_del(dev, qp->urq.db_addr,
1816
+ &qp->urq.db_rec_data->db_data);
1817
+ rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1818
+ }
1819
+
1820
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
1821
+ qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1822
+ &qp->urq.db_rec_db2_data);
17051823 }
17061824
17071825 static int qedr_create_user_qp(struct qedr_dev *dev,
....@@ -1712,31 +1830,41 @@
17121830 {
17131831 struct qed_rdma_create_qp_in_params in_params;
17141832 struct qed_rdma_create_qp_out_params out_params;
1715
- struct qedr_pd *pd = get_qedr_pd(ibpd);
1716
- struct ib_ucontext *ib_ctx = NULL;
1717
- struct qedr_create_qp_ureq ureq;
1833
+ struct qedr_create_qp_uresp uresp = {};
1834
+ struct qedr_create_qp_ureq ureq = {};
17181835 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1719
- int rc = -EINVAL;
1836
+ struct qedr_ucontext *ctx = NULL;
1837
+ struct qedr_pd *pd = NULL;
1838
+ int rc = 0;
17201839
1721
- ib_ctx = ibpd->uobject->context;
1840
+ qp->create_type = QEDR_QP_CREATE_USER;
17221841
1723
- memset(&ureq, 0, sizeof(ureq));
1724
- rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1725
- if (rc) {
1726
- DP_ERR(dev, "Problem copying data from user space\n");
1727
- return rc;
1842
+ if (ibpd) {
1843
+ pd = get_qedr_pd(ibpd);
1844
+ ctx = pd->uctx;
17281845 }
17291846
1730
- /* SQ - read access only (0), dma sync not required (0) */
1731
- rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
1732
- ureq.sq_len, 0, 0, alloc_and_init);
1733
- if (rc)
1734
- return rc;
1847
+ if (udata) {
1848
+ rc = ib_copy_from_udata(&ureq, udata, min(sizeof(ureq),
1849
+ udata->inlen));
1850
+ if (rc) {
1851
+ DP_ERR(dev, "Problem copying data from user space\n");
1852
+ return rc;
1853
+ }
1854
+ }
17351855
1736
- if (!qp->srq) {
1737
- /* RQ - read access only (0), dma sync not required (0) */
1738
- rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
1739
- ureq.rq_len, 0, 0, alloc_and_init);
1856
+ if (qedr_qp_has_sq(qp)) {
1857
+ /* SQ - read access only (0) */
1858
+ rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1859
+ ureq.sq_len, true, 0, alloc_and_init);
1860
+ if (rc)
1861
+ return rc;
1862
+ }
1863
+
1864
+ if (qedr_qp_has_rq(qp)) {
1865
+ /* RQ - read access only (0) */
1866
+ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1867
+ ureq.rq_len, true, 0, alloc_and_init);
17401868 if (rc)
17411869 return rc;
17421870 }
....@@ -1745,12 +1873,27 @@
17451873 qedr_init_common_qp_in_params(dev, pd, qp, attrs, false, &in_params);
17461874 in_params.qp_handle_lo = ureq.qp_handle_lo;
17471875 in_params.qp_handle_hi = ureq.qp_handle_hi;
1748
- in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1749
- in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1750
- if (!qp->srq) {
1876
+
1877
+ if (qp->qp_type == IB_QPT_XRC_TGT) {
1878
+ struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1879
+
1880
+ in_params.xrcd_id = xrcd->xrcd_id;
1881
+ in_params.qp_handle_lo = qp->qp_id;
1882
+ in_params.use_srq = 1;
1883
+ }
1884
+
1885
+ if (qedr_qp_has_sq(qp)) {
1886
+ in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1887
+ in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1888
+ }
1889
+
1890
+ if (qedr_qp_has_rq(qp)) {
17511891 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
17521892 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
17531893 }
1894
+
1895
+ if (ctx)
1896
+ SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
17541897
17551898 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
17561899 &in_params, &out_params);
....@@ -1766,28 +1909,78 @@
17661909 qp->qp_id = out_params.qp_id;
17671910 qp->icid = out_params.icid;
17681911
1769
- rc = qedr_copy_qp_uresp(dev, qp, udata);
1770
- if (rc)
1771
- goto err;
1912
+ if (udata) {
1913
+ rc = qedr_copy_qp_uresp(dev, qp, udata, &uresp);
1914
+ if (rc)
1915
+ goto err;
1916
+ }
17721917
1918
+ /* db offset was calculated in copy_qp_uresp, now set in the user q */
1919
+ if (qedr_qp_has_sq(qp)) {
1920
+ qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1921
+ qp->sq.max_wr = attrs->cap.max_send_wr;
1922
+ rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1923
+ &qp->usq.db_rec_data->db_data,
1924
+ DB_REC_WIDTH_32B,
1925
+ DB_REC_USER);
1926
+ if (rc)
1927
+ goto err;
1928
+ }
1929
+
1930
+ if (qedr_qp_has_rq(qp)) {
1931
+ qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1932
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
1933
+ rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1934
+ &qp->urq.db_rec_data->db_data,
1935
+ DB_REC_WIDTH_32B,
1936
+ DB_REC_USER);
1937
+ if (rc)
1938
+ goto err;
1939
+ }
1940
+
1941
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1942
+ qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1943
+
1944
+ /* calculate the db_rec_db2 data since it is constant so no
1945
+ * need to reflect from user
1946
+ */
1947
+ qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1948
+ qp->urq.db_rec_db2_data.data.value =
1949
+ cpu_to_le16(DQ_TCM_IWARP_POST_RQ_CF_CMD);
1950
+
1951
+ rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1952
+ &qp->urq.db_rec_db2_data,
1953
+ DB_REC_WIDTH_32B,
1954
+ DB_REC_USER);
1955
+ if (rc)
1956
+ goto err;
1957
+ }
17731958 qedr_qp_user_print(dev, qp);
1774
-
1775
- return 0;
1959
+ return rc;
17761960 err:
17771961 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
17781962 if (rc)
17791963 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
17801964
17811965 err1:
1782
- qedr_cleanup_user(dev, qp);
1966
+ qedr_cleanup_user(dev, ctx, qp);
17831967 return rc;
17841968 }
17851969
1786
-static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
1970
+static int qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
17871971 {
1972
+ int rc;
1973
+
17881974 qp->sq.db = dev->db_addr +
17891975 DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
17901976 qp->sq.db_data.data.icid = qp->icid;
1977
+
1978
+ rc = qedr_db_recovery_add(dev, qp->sq.db,
1979
+ &qp->sq.db_data,
1980
+ DB_REC_WIDTH_32B,
1981
+ DB_REC_KERNEL);
1982
+ if (rc)
1983
+ return rc;
17911984
17921985 qp->rq.db = dev->db_addr +
17931986 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
....@@ -1796,6 +1989,19 @@
17961989 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
17971990 qp->rq.iwarp_db2_data.data.icid = qp->icid;
17981991 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
1992
+
1993
+ rc = qedr_db_recovery_add(dev, qp->rq.db,
1994
+ &qp->rq.db_data,
1995
+ DB_REC_WIDTH_32B,
1996
+ DB_REC_KERNEL);
1997
+ if (rc)
1998
+ return rc;
1999
+
2000
+ rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2001
+ &qp->rq.iwarp_db2_data,
2002
+ DB_REC_WIDTH_32B,
2003
+ DB_REC_KERNEL);
2004
+ return rc;
17992005 }
18002006
18012007 static int
....@@ -1805,29 +2011,28 @@
18052011 u32 n_sq_elems, u32 n_rq_elems)
18062012 {
18072013 struct qed_rdma_create_qp_out_params out_params;
2014
+ struct qed_chain_init_params params = {
2015
+ .mode = QED_CHAIN_MODE_PBL,
2016
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2017
+ };
18082018 int rc;
18092019
1810
- rc = dev->ops->common->chain_alloc(dev->cdev,
1811
- QED_CHAIN_USE_TO_PRODUCE,
1812
- QED_CHAIN_MODE_PBL,
1813
- QED_CHAIN_CNT_TYPE_U32,
1814
- n_sq_elems,
1815
- QEDR_SQE_ELEMENT_SIZE,
1816
- &qp->sq.pbl, NULL);
2020
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2021
+ params.num_elems = n_sq_elems;
2022
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
18172023
2024
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
18182025 if (rc)
18192026 return rc;
18202027
18212028 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
18222029 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
18232030
1824
- rc = dev->ops->common->chain_alloc(dev->cdev,
1825
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1826
- QED_CHAIN_MODE_PBL,
1827
- QED_CHAIN_CNT_TYPE_U32,
1828
- n_rq_elems,
1829
- QEDR_RQE_ELEMENT_SIZE,
1830
- &qp->rq.pbl, NULL);
2031
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2032
+ params.num_elems = n_rq_elems;
2033
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2034
+
2035
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
18312036 if (rc)
18322037 return rc;
18332038
....@@ -1843,8 +2048,7 @@
18432048 qp->qp_id = out_params.qp_id;
18442049 qp->icid = out_params.icid;
18452050
1846
- qedr_set_roce_db_info(dev, qp);
1847
- return rc;
2051
+ return qedr_set_roce_db_info(dev, qp);
18482052 }
18492053
18502054 static int
....@@ -1854,14 +2058,19 @@
18542058 u32 n_sq_elems, u32 n_rq_elems)
18552059 {
18562060 struct qed_rdma_create_qp_out_params out_params;
1857
- struct qed_chain_ext_pbl ext_pbl;
2061
+ struct qed_chain_init_params params = {
2062
+ .mode = QED_CHAIN_MODE_PBL,
2063
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
2064
+ };
18582065 int rc;
18592066
18602067 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
18612068 QEDR_SQE_ELEMENT_SIZE,
2069
+ QED_CHAIN_PAGE_SIZE,
18622070 QED_CHAIN_MODE_PBL);
18632071 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
18642072 QEDR_RQE_ELEMENT_SIZE,
2073
+ QED_CHAIN_PAGE_SIZE,
18652074 QED_CHAIN_MODE_PBL);
18662075
18672076 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
....@@ -1871,39 +2080,31 @@
18712080 return -EINVAL;
18722081
18732082 /* Now we allocate the chain */
1874
- ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
1875
- ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
18762083
1877
- rc = dev->ops->common->chain_alloc(dev->cdev,
1878
- QED_CHAIN_USE_TO_PRODUCE,
1879
- QED_CHAIN_MODE_PBL,
1880
- QED_CHAIN_CNT_TYPE_U32,
1881
- n_sq_elems,
1882
- QEDR_SQE_ELEMENT_SIZE,
1883
- &qp->sq.pbl, &ext_pbl);
2084
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
2085
+ params.num_elems = n_sq_elems;
2086
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
2087
+ params.ext_pbl_virt = out_params.sq_pbl_virt;
2088
+ params.ext_pbl_phys = out_params.sq_pbl_phys;
18842089
2090
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
18852091 if (rc)
18862092 goto err;
18872093
1888
- ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
1889
- ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
2094
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
2095
+ params.num_elems = n_rq_elems;
2096
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
2097
+ params.ext_pbl_virt = out_params.rq_pbl_virt;
2098
+ params.ext_pbl_phys = out_params.rq_pbl_phys;
18902099
1891
- rc = dev->ops->common->chain_alloc(dev->cdev,
1892
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1893
- QED_CHAIN_MODE_PBL,
1894
- QED_CHAIN_CNT_TYPE_U32,
1895
- n_rq_elems,
1896
- QEDR_RQE_ELEMENT_SIZE,
1897
- &qp->rq.pbl, &ext_pbl);
1898
-
2100
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
18992101 if (rc)
19002102 goto err;
19012103
19022104 qp->qp_id = out_params.qp_id;
19032105 qp->icid = out_params.icid;
19042106
1905
- qedr_set_iwarp_db_info(dev, qp);
1906
- return rc;
2107
+ return qedr_set_iwarp_db_info(dev, qp);
19072108
19082109 err:
19092110 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
....@@ -1918,6 +2119,20 @@
19182119
19192120 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
19202121 kfree(qp->rqe_wr_id);
2122
+
2123
+ /* GSI qp is not registered to db mechanism so no need to delete */
2124
+ if (qp->qp_type == IB_QPT_GSI)
2125
+ return;
2126
+
2127
+ qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2128
+
2129
+ if (!qp->srq) {
2130
+ qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2131
+
2132
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
2133
+ qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2134
+ &qp->rq.iwarp_db2_data);
2135
+ }
19212136 }
19222137
19232138 static int qedr_create_kernel_qp(struct qedr_dev *dev,
....@@ -1933,6 +2148,7 @@
19332148 u32 n_sq_entries;
19342149
19352150 memset(&in_params, 0, sizeof(in_params));
2151
+ qp->create_type = QEDR_QP_CREATE_KERNEL;
19362152
19372153 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
19382154 * the ring. The ring should allow at least a single WR, even if the
....@@ -1995,20 +2211,51 @@
19952211 return rc;
19962212 }
19972213
2214
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
2215
+ struct ib_udata *udata)
2216
+{
2217
+ struct qedr_ucontext *ctx =
2218
+ rdma_udata_to_drv_context(udata, struct qedr_ucontext,
2219
+ ibucontext);
2220
+ int rc;
2221
+
2222
+ if (qp->qp_type != IB_QPT_GSI) {
2223
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2224
+ if (rc)
2225
+ return rc;
2226
+ }
2227
+
2228
+ if (qp->create_type == QEDR_QP_CREATE_USER)
2229
+ qedr_cleanup_user(dev, ctx, qp);
2230
+ else
2231
+ qedr_cleanup_kernel(dev, qp);
2232
+
2233
+ return 0;
2234
+}
2235
+
19982236 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
19992237 struct ib_qp_init_attr *attrs,
20002238 struct ib_udata *udata)
20012239 {
2002
- struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2003
- struct qedr_pd *pd = get_qedr_pd(ibpd);
2240
+ struct qedr_xrcd *xrcd = NULL;
2241
+ struct qedr_pd *pd = NULL;
2242
+ struct qedr_dev *dev;
20042243 struct qedr_qp *qp;
20052244 struct ib_qp *ibqp;
20062245 int rc = 0;
20072246
2247
+ if (attrs->qp_type == IB_QPT_XRC_TGT) {
2248
+ xrcd = get_qedr_xrcd(attrs->xrcd);
2249
+ dev = get_qedr_dev(xrcd->ibxrcd.device);
2250
+ } else {
2251
+ pd = get_qedr_pd(ibpd);
2252
+ dev = get_qedr_dev(ibpd->device);
2253
+ }
2254
+
20082255 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
20092256 udata ? "user library" : "kernel", pd);
20102257
2011
- rc = qedr_check_qp_attrs(ibpd, dev, attrs);
2258
+ rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
20122259 if (rc)
20132260 return ERR_PTR(rc);
20142261
....@@ -2035,25 +2282,27 @@
20352282 return ibqp;
20362283 }
20372284
2038
- if (udata)
2285
+ if (udata || xrcd)
20392286 rc = qedr_create_user_qp(dev, qp, ibpd, udata, attrs);
20402287 else
20412288 rc = qedr_create_kernel_qp(dev, qp, ibpd, attrs);
20422289
20432290 if (rc)
2044
- goto err;
2291
+ goto out_free_qp;
20452292
20462293 qp->ibqp.qp_num = qp->qp_id;
20472294
20482295 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2049
- rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
2296
+ rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
20502297 if (rc)
2051
- goto err;
2298
+ goto out_free_qp_resources;
20522299 }
20532300
20542301 return &qp->ibqp;
20552302
2056
-err:
2303
+out_free_qp_resources:
2304
+ qedr_free_qp_resources(dev, qp, udata);
2305
+out_free_qp:
20572306 kfree(qp);
20582307
20592308 return ERR_PTR(-EFAULT);
....@@ -2131,7 +2380,7 @@
21312380 default:
21322381 status = -EINVAL;
21332382 break;
2134
- };
2383
+ }
21352384 break;
21362385 case QED_ROCE_QP_STATE_INIT:
21372386 switch (new_state) {
....@@ -2142,8 +2391,6 @@
21422391
21432392 if (rdma_protocol_roce(&dev->ibdev, 1)) {
21442393 writel(qp->rq.db_data.raw, qp->rq.db);
2145
- /* Make sure write takes effect */
2146
- mmiowb();
21472394 }
21482395 break;
21492396 case QED_ROCE_QP_STATE_ERR:
....@@ -2152,7 +2399,7 @@
21522399 /* Invalid state change. */
21532400 status = -EINVAL;
21542401 break;
2155
- };
2402
+ }
21562403 break;
21572404 case QED_ROCE_QP_STATE_RTR:
21582405 /* RTR->XXX */
....@@ -2165,7 +2412,7 @@
21652412 /* Invalid state change. */
21662413 status = -EINVAL;
21672414 break;
2168
- };
2415
+ }
21692416 break;
21702417 case QED_ROCE_QP_STATE_RTS:
21712418 /* RTS->XXX */
....@@ -2178,7 +2425,7 @@
21782425 /* Invalid state change. */
21792426 status = -EINVAL;
21802427 break;
2181
- };
2428
+ }
21822429 break;
21832430 case QED_ROCE_QP_STATE_SQD:
21842431 /* SQD->XXX */
....@@ -2190,7 +2437,7 @@
21902437 /* Invalid state change. */
21912438 status = -EINVAL;
21922439 break;
2193
- };
2440
+ }
21942441 break;
21952442 case QED_ROCE_QP_STATE_ERR:
21962443 /* ERR->XXX */
....@@ -2208,12 +2455,12 @@
22082455 default:
22092456 status = -EINVAL;
22102457 break;
2211
- };
2458
+ }
22122459 break;
22132460 default:
22142461 status = -EINVAL;
22152462 break;
2216
- };
2463
+ }
22172464
22182465 return status;
22192466 }
....@@ -2241,8 +2488,7 @@
22412488
22422489 if (rdma_protocol_roce(&dev->ibdev, 1)) {
22432490 if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
2244
- ibqp->qp_type, attr_mask,
2245
- IB_LINK_LAYER_ETHERNET)) {
2491
+ ibqp->qp_type, attr_mask)) {
22462492 DP_ERR(dev,
22472493 "modify qp: invalid attribute mask=0x%x specified for\n"
22482494 "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
....@@ -2376,6 +2622,8 @@
23762622 1 << max_t(int, attr->timeout - 8, 0);
23772623 else
23782624 qp_params.ack_timeout = 0;
2625
+
2626
+ qp->timeout = attr->timeout;
23792627 }
23802628
23812629 if (attr_mask & IB_QP_RETRY_CNT) {
....@@ -2535,7 +2783,7 @@
25352783 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
25362784 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
25372785 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2538
- qp_attr->timeout = params.timeout;
2786
+ qp_attr->timeout = qp->timeout;
25392787 qp_attr->rnr_retry = params.rnr_retry;
25402788 qp_attr->retry_cnt = params.retry_cnt;
25412789 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
....@@ -2560,31 +2808,12 @@
25602808 return rc;
25612809 }
25622810
2563
-static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
2564
-{
2565
- int rc = 0;
2566
-
2567
- if (qp->qp_type != IB_QPT_GSI) {
2568
- rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2569
- if (rc)
2570
- return rc;
2571
- }
2572
-
2573
- if (qp->ibqp.uobject && qp->ibqp.uobject->context)
2574
- qedr_cleanup_user(dev, qp);
2575
- else
2576
- qedr_cleanup_kernel(dev, qp);
2577
-
2578
- return 0;
2579
-}
2580
-
2581
-int qedr_destroy_qp(struct ib_qp *ibqp)
2811
+int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
25822812 {
25832813 struct qedr_qp *qp = get_qedr_qp(ibqp);
25842814 struct qedr_dev *dev = qp->dev;
25852815 struct ib_qp_attr attr;
25862816 int attr_mask = 0;
2587
- int rc = 0;
25882817
25892818 DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
25902819 qp, qp->qp_type);
....@@ -2601,57 +2830,64 @@
26012830 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
26022831 }
26032832 } else {
2604
- /* Wait for the connect/accept to complete */
2605
- if (qp->ep) {
2606
- int wait_count = 1;
2833
+ /* If connection establishment started the WAIT_FOR_CONNECT
2834
+ * bit will be on and we need to Wait for the establishment
2835
+ * to complete before destroying the qp.
2836
+ */
2837
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
2838
+ &qp->iwarp_cm_flags))
2839
+ wait_for_completion(&qp->iwarp_cm_comp);
26072840
2608
- while (qp->ep->during_connect) {
2609
- DP_DEBUG(dev, QEDR_MSG_QP,
2610
- "Still in during connect/accept\n");
2611
-
2612
- msleep(100);
2613
- if (wait_count++ > 200) {
2614
- DP_NOTICE(dev,
2615
- "during connect timeout\n");
2616
- break;
2617
- }
2618
- }
2619
- }
2841
+ /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
2842
+ * bit will be on, and we need to wait for the disconnect to
2843
+ * complete before continuing. We can use the same completion,
2844
+ * iwarp_cm_comp, since this is the only place that waits for
2845
+ * this completion and it is sequential. In addition,
2846
+ * disconnect can't occur before the connection is fully
2847
+ * established, therefore if WAIT_FOR_DISCONNECT is on it
2848
+ * means WAIT_FOR_CONNECT is also on and the completion for
2849
+ * CONNECT already occurred.
2850
+ */
2851
+ if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
2852
+ &qp->iwarp_cm_flags))
2853
+ wait_for_completion(&qp->iwarp_cm_comp);
26202854 }
26212855
26222856 if (qp->qp_type == IB_QPT_GSI)
26232857 qedr_destroy_gsi_qp(dev);
26242858
2625
- qedr_free_qp_resources(dev, qp);
2859
+ /* We need to remove the entry from the xarray before we release the
2860
+ * qp_id to avoid a race of the qp_id being reallocated and failing
2861
+ * on xa_insert
2862
+ */
2863
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
2864
+ xa_erase(&dev->qps, qp->qp_id);
26262865
2627
- if (atomic_dec_and_test(&qp->refcnt) &&
2628
- rdma_protocol_iwarp(&dev->ibdev, 1)) {
2629
- qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
2866
+ qedr_free_qp_resources(dev, qp, udata);
2867
+
2868
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
2869
+ qedr_iw_qp_rem_ref(&qp->ibqp);
2870
+ else
26302871 kfree(qp);
2631
- }
2632
- return rc;
2872
+
2873
+ return 0;
26332874 }
26342875
2635
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
2636
- struct ib_udata *udata)
2876
+int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
2877
+ struct ib_udata *udata)
26372878 {
2638
- struct qedr_ah *ah;
2879
+ struct qedr_ah *ah = get_qedr_ah(ibah);
26392880
2640
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2641
- if (!ah)
2642
- return ERR_PTR(-ENOMEM);
2881
+ rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
26432882
2644
- rdma_copy_ah_attr(&ah->attr, attr);
2645
-
2646
- return &ah->ibah;
2883
+ return 0;
26472884 }
26482885
2649
-int qedr_destroy_ah(struct ib_ah *ibah)
2886
+int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
26502887 {
26512888 struct qedr_ah *ah = get_qedr_ah(ibah);
26522889
26532890 rdma_destroy_ah_attr(&ah->attr);
2654
- kfree(ah);
26552891 return 0;
26562892 }
26572893
....@@ -2737,22 +2973,27 @@
27372973
27382974 mr->type = QEDR_MR_USER;
27392975
2740
- mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2976
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
27412977 if (IS_ERR(mr->umem)) {
27422978 rc = -EFAULT;
27432979 goto err0;
27442980 }
27452981
2746
- rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2982
+ rc = init_mr_info(dev, &mr->info,
2983
+ ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
27472984 if (rc)
27482985 goto err1;
27492986
27502987 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2751
- &mr->info.pbl_info, mr->umem->page_shift);
2988
+ &mr->info.pbl_info, PAGE_SHIFT);
27522989
27532990 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
27542991 if (rc) {
2755
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2992
+ if (rc == -EINVAL)
2993
+ DP_ERR(dev, "Out of MR resources\n");
2994
+ else
2995
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
2996
+
27562997 goto err1;
27572998 }
27582999
....@@ -2769,11 +3010,9 @@
27693010 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
27703011 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
27713012 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2772
- mr->hw_mr.page_size_log = mr->umem->page_shift;
2773
- mr->hw_mr.fbo = ib_umem_offset(mr->umem);
3013
+ mr->hw_mr.page_size_log = PAGE_SHIFT;
27743014 mr->hw_mr.length = len;
27753015 mr->hw_mr.vaddr = usr_addr;
2776
- mr->hw_mr.zbva = false;
27773016 mr->hw_mr.phy_mr = false;
27783017 mr->hw_mr.dma_mr = false;
27793018
....@@ -2801,7 +3040,7 @@
28013040 return ERR_PTR(rc);
28023041 }
28033042
2804
-int qedr_dereg_mr(struct ib_mr *ib_mr)
3043
+int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
28053044 {
28063045 struct qedr_mr *mr = get_qedr_mr(ib_mr);
28073046 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
....@@ -2817,8 +3056,7 @@
28173056 free_mr_info(dev, &mr->info);
28183057
28193058 /* it could be user registered memory. */
2820
- if (mr->umem)
2821
- ib_umem_release(mr->umem);
3059
+ ib_umem_release(mr->umem);
28223060
28233061 kfree(mr);
28243062
....@@ -2850,8 +3088,12 @@
28503088
28513089 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
28523090 if (rc) {
2853
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2854
- goto err0;
3091
+ if (rc == -EINVAL)
3092
+ DP_ERR(dev, "Out of MR resources\n");
3093
+ else
3094
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3095
+
3096
+ goto err1;
28553097 }
28563098
28573099 /* Index only, 18 bit long, lkey = itid << 8 | key */
....@@ -2867,17 +3109,15 @@
28673109 mr->hw_mr.pbl_ptr = 0;
28683110 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
28693111 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2870
- mr->hw_mr.fbo = 0;
28713112 mr->hw_mr.length = 0;
28723113 mr->hw_mr.vaddr = 0;
2873
- mr->hw_mr.zbva = false;
28743114 mr->hw_mr.phy_mr = true;
28753115 mr->hw_mr.dma_mr = false;
28763116
28773117 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
28783118 if (rc) {
28793119 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2880
- goto err1;
3120
+ goto err2;
28813121 }
28823122
28833123 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
....@@ -2886,15 +3126,17 @@
28863126 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
28873127 return mr;
28883128
2889
-err1:
3129
+err2:
28903130 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3131
+err1:
3132
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
28913133 err0:
28923134 kfree(mr);
28933135 return ERR_PTR(rc);
28943136 }
28953137
2896
-struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2897
- enum ib_mr_type mr_type, u32 max_num_sg)
3138
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
3139
+ u32 max_num_sg)
28983140 {
28993141 struct qedr_mr *mr;
29003142
....@@ -2982,7 +3224,11 @@
29823224
29833225 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
29843226 if (rc) {
2985
- DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
3227
+ if (rc == -EINVAL)
3228
+ DP_ERR(dev, "Out of MR resources\n");
3229
+ else
3230
+ DP_ERR(dev, "roce alloc tid returned error %d\n", rc);
3231
+
29863232 goto err1;
29873233 }
29883234
....@@ -3411,7 +3657,7 @@
34113657 break;
34123658 case IB_WR_RDMA_READ_WITH_INV:
34133659 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3414
- /* fallthrough -- same is identical to RDMA READ */
3660
+ fallthrough; /* same is identical to RDMA READ */
34153661
34163662 case IB_WR_RDMA_READ:
34173663 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
....@@ -3564,9 +3810,6 @@
35643810 smp_wmb();
35653811 writel(qp->sq.db_data.raw, qp->sq.db);
35663812
3567
- /* Make sure write sticks */
3568
- mmiowb();
3569
-
35703813 spin_unlock_irqrestore(&qp->q_lock, flags);
35713814
35723815 return rc;
....@@ -3651,10 +3894,10 @@
36513894 * in first 4 bytes and need to update WQE producer in
36523895 * next 4 bytes.
36533896 */
3654
- srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod;
3897
+ srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
36553898 /* Make sure sge producer is updated first */
36563899 dma_wmb();
3657
- srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod;
3900
+ srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
36583901
36593902 wr = wr->next;
36603903 }
....@@ -3755,12 +3998,8 @@
37553998
37563999 writel(qp->rq.db_data.raw, qp->rq.db);
37574000
3758
- /* Make sure write sticks */
3759
- mmiowb();
3760
-
37614001 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
37624002 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
3763
- mmiowb(); /* for second doorbell */
37644003 }
37654004
37664005 wr = wr->next;
....@@ -4252,19 +4491,10 @@
42524491 }
42534492
42544493 int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
4255
- u8 port_num,
4256
- const struct ib_wc *in_wc,
4257
- const struct ib_grh *in_grh,
4258
- const struct ib_mad_hdr *mad_hdr,
4259
- size_t in_mad_size, struct ib_mad_hdr *out_mad,
4260
- size_t *out_mad_size, u16 *out_mad_pkey_index)
4494
+ u8 port_num, const struct ib_wc *in_wc,
4495
+ const struct ib_grh *in_grh, const struct ib_mad *in,
4496
+ struct ib_mad *out_mad, size_t *out_mad_size,
4497
+ u16 *out_mad_pkey_index)
42614498 {
4262
- struct qedr_dev *dev = get_qedr_dev(ibdev);
4263
-
4264
- DP_DEBUG(dev, QEDR_MSG_GSI,
4265
- "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
4266
- mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
4267
- mad_hdr->class_specific, mad_hdr->class_version,
4268
- mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
42694499 return IB_MAD_RESULT_SUCCESS;
42704500 }