.. | .. |
---|
48 | 48 | #include <rdma/ib_addr.h> |
---|
49 | 49 | #include <rdma/ib_mad.h> |
---|
50 | 50 | #include <rdma/ib_cache.h> |
---|
| 51 | +#include <rdma/uverbs_ioctl.h> |
---|
51 | 52 | |
---|
52 | 53 | #include "bnxt_ulp.h" |
---|
53 | 54 | |
---|
.. | .. |
---|
118 | 119 | } |
---|
119 | 120 | |
---|
120 | 121 | /* Device */ |
---|
121 | | -struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num) |
---|
122 | | -{ |
---|
123 | | - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
---|
124 | | - struct net_device *netdev = NULL; |
---|
125 | | - |
---|
126 | | - rcu_read_lock(); |
---|
127 | | - if (rdev) |
---|
128 | | - netdev = rdev->netdev; |
---|
129 | | - if (netdev) |
---|
130 | | - dev_hold(netdev); |
---|
131 | | - |
---|
132 | | - rcu_read_unlock(); |
---|
133 | | - return netdev; |
---|
134 | | -} |
---|
135 | | - |
---|
136 | 122 | int bnxt_re_query_device(struct ib_device *ibdev, |
---|
137 | 123 | struct ib_device_attr *ib_attr, |
---|
138 | 124 | struct ib_udata *udata) |
---|
.. | .. |
---|
191 | 177 | ib_attr->max_total_mcast_qp_attach = 0; |
---|
192 | 178 | ib_attr->max_ah = dev_attr->max_ah; |
---|
193 | 179 | |
---|
194 | | - ib_attr->max_fmr = 0; |
---|
195 | | - ib_attr->max_map_per_fmr = 0; |
---|
196 | | - |
---|
197 | 180 | ib_attr->max_srq = dev_attr->max_srq; |
---|
198 | 181 | ib_attr->max_srq_wr = dev_attr->max_srq_wqes; |
---|
199 | 182 | ib_attr->max_srq_sge = dev_attr->max_srq_sges; |
---|
.. | .. |
---|
202 | 185 | |
---|
203 | 186 | ib_attr->max_pkeys = 1; |
---|
204 | 187 | ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; |
---|
205 | | - return 0; |
---|
206 | | -} |
---|
207 | | - |
---|
208 | | -int bnxt_re_modify_device(struct ib_device *ibdev, |
---|
209 | | - int device_modify_mask, |
---|
210 | | - struct ib_device_modify *device_modify) |
---|
211 | | -{ |
---|
212 | | - switch (device_modify_mask) { |
---|
213 | | - case IB_DEVICE_MODIFY_SYS_IMAGE_GUID: |
---|
214 | | - /* Modify the GUID requires the modification of the GID table */ |
---|
215 | | - /* GUID should be made as READ-ONLY */ |
---|
216 | | - break; |
---|
217 | | - case IB_DEVICE_MODIFY_NODE_DESC: |
---|
218 | | - /* Node Desc should be made as READ-ONLY */ |
---|
219 | | - break; |
---|
220 | | - default: |
---|
221 | | - break; |
---|
222 | | - } |
---|
223 | 188 | return 0; |
---|
224 | 189 | } |
---|
225 | 190 | |
---|
.. | .. |
---|
234 | 199 | |
---|
235 | 200 | if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) { |
---|
236 | 201 | port_attr->state = IB_PORT_ACTIVE; |
---|
237 | | - port_attr->phys_state = 5; |
---|
| 202 | + port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
---|
238 | 203 | } else { |
---|
239 | 204 | port_attr->state = IB_PORT_DOWN; |
---|
240 | | - port_attr->phys_state = 3; |
---|
| 205 | + port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
---|
241 | 206 | } |
---|
242 | 207 | port_attr->max_mtu = IB_MTU_4096; |
---|
243 | 208 | port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); |
---|
.. | .. |
---|
322 | 287 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
---|
323 | 288 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
---|
324 | 289 | struct bnxt_qplib_gid *gid_to_del; |
---|
| 290 | + u16 vlan_id = 0xFFFF; |
---|
325 | 291 | |
---|
326 | 292 | /* Delete the entry from the hardware */ |
---|
327 | 293 | ctx = *context; |
---|
.. | .. |
---|
331 | 297 | if (sgid_tbl && sgid_tbl->active) { |
---|
332 | 298 | if (ctx->idx >= sgid_tbl->max) |
---|
333 | 299 | return -EINVAL; |
---|
334 | | - gid_to_del = &sgid_tbl->tbl[ctx->idx]; |
---|
| 300 | + gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; |
---|
| 301 | + vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; |
---|
335 | 302 | /* DEL_GID is called in WQ context(netdevice_event_work_handler) |
---|
336 | 303 | * or via the ib_unregister_device path. In the former case QP1 |
---|
337 | 304 | * may not be destroyed yet, in which case just return as FW |
---|
.. | .. |
---|
342 | 309 | */ |
---|
343 | 310 | if (ctx->idx == 0 && |
---|
344 | 311 | rdma_link_local_addr((struct in6_addr *)gid_to_del) && |
---|
345 | | - ctx->refcnt == 1 && rdev->qp1_sqp) { |
---|
346 | | - dev_dbg(rdev_to_dev(rdev), |
---|
347 | | - "Trying to delete GID0 while QP1 is alive\n"); |
---|
| 312 | + ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { |
---|
| 313 | + ibdev_dbg(&rdev->ibdev, |
---|
| 314 | + "Trying to delete GID0 while QP1 is alive\n"); |
---|
348 | 315 | return -EFAULT; |
---|
349 | 316 | } |
---|
350 | 317 | ctx->refcnt--; |
---|
351 | 318 | if (!ctx->refcnt) { |
---|
352 | | - rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); |
---|
| 319 | + rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, |
---|
| 320 | + vlan_id, true); |
---|
353 | 321 | if (rc) { |
---|
354 | | - dev_err(rdev_to_dev(rdev), |
---|
355 | | - "Failed to remove GID: %#x", rc); |
---|
| 322 | + ibdev_err(&rdev->ibdev, |
---|
| 323 | + "Failed to remove GID: %#x", rc); |
---|
356 | 324 | } else { |
---|
357 | 325 | ctx_tbl = sgid_tbl->ctx; |
---|
358 | 326 | ctx_tbl[ctx->idx] = NULL; |
---|
.. | .. |
---|
374 | 342 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
---|
375 | 343 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
---|
376 | 344 | |
---|
377 | | - if ((attr->ndev) && is_vlan_dev(attr->ndev)) |
---|
378 | | - vlan_id = vlan_dev_vlan_id(attr->ndev); |
---|
| 345 | + rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL); |
---|
| 346 | + if (rc) |
---|
| 347 | + return rc; |
---|
379 | 348 | |
---|
380 | 349 | rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid, |
---|
381 | 350 | rdev->qplib_res.netdev->dev_addr, |
---|
.. | .. |
---|
388 | 357 | } |
---|
389 | 358 | |
---|
390 | 359 | if (rc < 0) { |
---|
391 | | - dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc); |
---|
| 360 | + ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc); |
---|
392 | 361 | return rc; |
---|
393 | 362 | } |
---|
394 | 363 | |
---|
.. | .. |
---|
451 | 420 | wqe.bind.r_key = fence->bind_rkey; |
---|
452 | 421 | fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); |
---|
453 | 422 | |
---|
454 | | - dev_dbg(rdev_to_dev(qp->rdev), |
---|
455 | | - "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", |
---|
| 423 | + ibdev_dbg(&qp->rdev->ibdev, |
---|
| 424 | + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", |
---|
456 | 425 | wqe.bind.r_key, qp->qplib_qp.id, pd); |
---|
457 | 426 | rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); |
---|
458 | 427 | if (rc) { |
---|
459 | | - dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); |
---|
| 428 | + ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n"); |
---|
460 | 429 | return rc; |
---|
461 | 430 | } |
---|
462 | 431 | bnxt_qplib_post_send_db(&qp->qplib_qp); |
---|
.. | .. |
---|
500 | 469 | struct bnxt_re_mr *mr = NULL; |
---|
501 | 470 | dma_addr_t dma_addr = 0; |
---|
502 | 471 | struct ib_mw *mw; |
---|
503 | | - u64 pbl_tbl; |
---|
504 | 472 | int rc; |
---|
505 | 473 | |
---|
506 | 474 | dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, |
---|
507 | 475 | DMA_BIDIRECTIONAL); |
---|
508 | 476 | rc = dma_mapping_error(dev, dma_addr); |
---|
509 | 477 | if (rc) { |
---|
510 | | - dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); |
---|
| 478 | + ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n"); |
---|
511 | 479 | rc = -EIO; |
---|
512 | 480 | fence->dma_addr = 0; |
---|
513 | 481 | goto fail; |
---|
.. | .. |
---|
527 | 495 | mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); |
---|
528 | 496 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); |
---|
529 | 497 | if (rc) { |
---|
530 | | - dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); |
---|
| 498 | + ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); |
---|
531 | 499 | goto fail; |
---|
532 | 500 | } |
---|
533 | 501 | |
---|
.. | .. |
---|
535 | 503 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
---|
536 | 504 | mr->qplib_mr.va = (u64)(unsigned long)fence->va; |
---|
537 | 505 | mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; |
---|
538 | | - pbl_tbl = dma_addr; |
---|
539 | | - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, |
---|
540 | | - BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE); |
---|
| 506 | + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, |
---|
| 507 | + BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE); |
---|
541 | 508 | if (rc) { |
---|
542 | | - dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); |
---|
| 509 | + ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n"); |
---|
543 | 510 | goto fail; |
---|
544 | 511 | } |
---|
545 | 512 | mr->ib_mr.rkey = mr->qplib_mr.rkey; |
---|
.. | .. |
---|
547 | 514 | /* Create a fence MW only for kernel consumers */ |
---|
548 | 515 | mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); |
---|
549 | 516 | if (IS_ERR(mw)) { |
---|
550 | | - dev_err(rdev_to_dev(rdev), |
---|
551 | | - "Failed to create fence-MW for PD: %p\n", pd); |
---|
| 517 | + ibdev_err(&rdev->ibdev, |
---|
| 518 | + "Failed to create fence-MW for PD: %p\n", pd); |
---|
552 | 519 | rc = PTR_ERR(mw); |
---|
553 | 520 | goto fail; |
---|
554 | 521 | } |
---|
.. | .. |
---|
563 | 530 | } |
---|
564 | 531 | |
---|
565 | 532 | /* Protection Domains */ |
---|
566 | | -int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) |
---|
| 533 | +int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) |
---|
567 | 534 | { |
---|
568 | 535 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
---|
569 | 536 | struct bnxt_re_dev *rdev = pd->rdev; |
---|
570 | | - int rc; |
---|
571 | 537 | |
---|
572 | 538 | bnxt_re_destroy_fence_mr(pd); |
---|
573 | 539 | |
---|
574 | | - if (pd->qplib_pd.id) { |
---|
575 | | - rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, |
---|
576 | | - &rdev->qplib_res.pd_tbl, |
---|
577 | | - &pd->qplib_pd); |
---|
578 | | - if (rc) |
---|
579 | | - dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); |
---|
580 | | - } |
---|
581 | | - |
---|
582 | | - kfree(pd); |
---|
| 540 | + if (pd->qplib_pd.id) |
---|
| 541 | + bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, |
---|
| 542 | + &pd->qplib_pd); |
---|
583 | 543 | return 0; |
---|
584 | 544 | } |
---|
585 | 545 | |
---|
586 | | -struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, |
---|
587 | | - struct ib_ucontext *ucontext, |
---|
588 | | - struct ib_udata *udata) |
---|
| 546 | +int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
---|
589 | 547 | { |
---|
| 548 | + struct ib_device *ibdev = ibpd->device; |
---|
590 | 549 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
---|
591 | | - struct bnxt_re_ucontext *ucntx = container_of(ucontext, |
---|
592 | | - struct bnxt_re_ucontext, |
---|
593 | | - ib_uctx); |
---|
594 | | - struct bnxt_re_pd *pd; |
---|
| 550 | + struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context( |
---|
| 551 | + udata, struct bnxt_re_ucontext, ib_uctx); |
---|
| 552 | + struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd); |
---|
595 | 553 | int rc; |
---|
596 | | - |
---|
597 | | - pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
---|
598 | | - if (!pd) |
---|
599 | | - return ERR_PTR(-ENOMEM); |
---|
600 | 554 | |
---|
601 | 555 | pd->rdev = rdev; |
---|
602 | 556 | if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) { |
---|
603 | | - dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD"); |
---|
| 557 | + ibdev_err(&rdev->ibdev, "Failed to allocate HW PD"); |
---|
604 | 558 | rc = -ENOMEM; |
---|
605 | 559 | goto fail; |
---|
606 | 560 | } |
---|
.. | .. |
---|
627 | 581 | |
---|
628 | 582 | rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
629 | 583 | if (rc) { |
---|
630 | | - dev_err(rdev_to_dev(rdev), |
---|
631 | | - "Failed to copy user response\n"); |
---|
| 584 | + ibdev_err(&rdev->ibdev, |
---|
| 585 | + "Failed to copy user response\n"); |
---|
632 | 586 | goto dbfail; |
---|
633 | 587 | } |
---|
634 | 588 | } |
---|
635 | 589 | |
---|
636 | 590 | if (!udata) |
---|
637 | 591 | if (bnxt_re_create_fence_mr(pd)) |
---|
638 | | - dev_warn(rdev_to_dev(rdev), |
---|
639 | | - "Failed to create Fence-MR\n"); |
---|
640 | | - return &pd->ib_pd; |
---|
| 592 | + ibdev_warn(&rdev->ibdev, |
---|
| 593 | + "Failed to create Fence-MR\n"); |
---|
| 594 | + return 0; |
---|
641 | 595 | dbfail: |
---|
642 | | - (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, |
---|
643 | | - &pd->qplib_pd); |
---|
| 596 | + bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, |
---|
| 597 | + &pd->qplib_pd); |
---|
644 | 598 | fail: |
---|
645 | | - kfree(pd); |
---|
646 | | - return ERR_PTR(rc); |
---|
| 599 | + return rc; |
---|
647 | 600 | } |
---|
648 | 601 | |
---|
649 | 602 | /* Address Handles */ |
---|
650 | | -int bnxt_re_destroy_ah(struct ib_ah *ib_ah) |
---|
| 603 | +int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) |
---|
651 | 604 | { |
---|
652 | 605 | struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
---|
653 | 606 | struct bnxt_re_dev *rdev = ah->rdev; |
---|
654 | | - int rc; |
---|
655 | 607 | |
---|
656 | | - rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah); |
---|
657 | | - if (rc) { |
---|
658 | | - dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH"); |
---|
659 | | - return rc; |
---|
660 | | - } |
---|
661 | | - kfree(ah); |
---|
| 608 | + bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, |
---|
| 609 | + !(flags & RDMA_DESTROY_AH_SLEEPABLE)); |
---|
662 | 610 | return 0; |
---|
663 | 611 | } |
---|
664 | 612 | |
---|
665 | | -struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd, |
---|
666 | | - struct rdma_ah_attr *ah_attr, |
---|
667 | | - struct ib_udata *udata) |
---|
| 613 | +static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype) |
---|
668 | 614 | { |
---|
669 | | - struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
---|
670 | | - struct bnxt_re_dev *rdev = pd->rdev; |
---|
671 | | - struct bnxt_re_ah *ah; |
---|
672 | | - const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); |
---|
673 | | - int rc; |
---|
674 | 615 | u8 nw_type; |
---|
675 | 616 | |
---|
676 | | - if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { |
---|
677 | | - dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set"); |
---|
678 | | - return ERR_PTR(-EINVAL); |
---|
| 617 | + switch (ntype) { |
---|
| 618 | + case RDMA_NETWORK_IPV4: |
---|
| 619 | + nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; |
---|
| 620 | + break; |
---|
| 621 | + case RDMA_NETWORK_IPV6: |
---|
| 622 | + nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; |
---|
| 623 | + break; |
---|
| 624 | + default: |
---|
| 625 | + nw_type = CMDQ_CREATE_AH_TYPE_V1; |
---|
| 626 | + break; |
---|
679 | 627 | } |
---|
680 | | - ah = kzalloc(sizeof(*ah), GFP_ATOMIC); |
---|
681 | | - if (!ah) |
---|
682 | | - return ERR_PTR(-ENOMEM); |
---|
| 628 | + return nw_type; |
---|
| 629 | +} |
---|
| 630 | + |
---|
| 631 | +int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, |
---|
| 632 | + struct ib_udata *udata) |
---|
| 633 | +{ |
---|
| 634 | + struct ib_pd *ib_pd = ib_ah->pd; |
---|
| 635 | + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
---|
| 636 | + struct rdma_ah_attr *ah_attr = init_attr->ah_attr; |
---|
| 637 | + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); |
---|
| 638 | + struct bnxt_re_dev *rdev = pd->rdev; |
---|
| 639 | + const struct ib_gid_attr *sgid_attr; |
---|
| 640 | + struct bnxt_re_gid_ctx *ctx; |
---|
| 641 | + struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); |
---|
| 642 | + u8 nw_type; |
---|
| 643 | + int rc; |
---|
| 644 | + |
---|
| 645 | + if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) { |
---|
| 646 | + ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set"); |
---|
| 647 | + return -EINVAL; |
---|
| 648 | + } |
---|
683 | 649 | |
---|
684 | 650 | ah->rdev = rdev; |
---|
685 | 651 | ah->qplib_ah.pd = &pd->qplib_pd; |
---|
.. | .. |
---|
687 | 653 | /* Supply the configuration for the HW */ |
---|
688 | 654 | memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw, |
---|
689 | 655 | sizeof(union ib_gid)); |
---|
690 | | - /* |
---|
691 | | - * If RoCE V2 is enabled, stack will have two entries for |
---|
692 | | - * each GID entry. Avoiding this duplicte entry in HW. Dividing |
---|
693 | | - * the GID index by 2 for RoCE V2 |
---|
| 656 | + sgid_attr = grh->sgid_attr; |
---|
| 657 | + /* Get the HW context of the GID. The reference |
---|
| 658 | + * of GID table entry is already taken by the caller. |
---|
694 | 659 | */ |
---|
695 | | - ah->qplib_ah.sgid_index = grh->sgid_index / 2; |
---|
| 660 | + ctx = rdma_read_gid_hw_context(sgid_attr); |
---|
| 661 | + ah->qplib_ah.sgid_index = ctx->idx; |
---|
696 | 662 | ah->qplib_ah.host_sgid_index = grh->sgid_index; |
---|
697 | 663 | ah->qplib_ah.traffic_class = grh->traffic_class; |
---|
698 | 664 | ah->qplib_ah.flow_label = grh->flow_label; |
---|
699 | 665 | ah->qplib_ah.hop_limit = grh->hop_limit; |
---|
700 | 666 | ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr); |
---|
701 | | - if (ib_pd->uobject && |
---|
702 | | - !rdma_is_multicast_addr((struct in6_addr *) |
---|
703 | | - grh->dgid.raw) && |
---|
704 | | - !rdma_link_local_addr((struct in6_addr *) |
---|
705 | | - grh->dgid.raw)) { |
---|
706 | | - const struct ib_gid_attr *sgid_attr; |
---|
707 | 667 | |
---|
708 | | - sgid_attr = grh->sgid_attr; |
---|
709 | | - /* Get network header type for this GID */ |
---|
710 | | - nw_type = rdma_gid_attr_network_type(sgid_attr); |
---|
711 | | - switch (nw_type) { |
---|
712 | | - case RDMA_NETWORK_IPV4: |
---|
713 | | - ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4; |
---|
714 | | - break; |
---|
715 | | - case RDMA_NETWORK_IPV6: |
---|
716 | | - ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6; |
---|
717 | | - break; |
---|
718 | | - default: |
---|
719 | | - ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1; |
---|
720 | | - break; |
---|
721 | | - } |
---|
722 | | - } |
---|
| 668 | + /* Get network header type for this GID */ |
---|
| 669 | + nw_type = rdma_gid_attr_network_type(sgid_attr); |
---|
| 670 | + ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type); |
---|
723 | 671 | |
---|
724 | 672 | memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN); |
---|
725 | | - rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah); |
---|
| 673 | + rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, |
---|
| 674 | + !(init_attr->flags & |
---|
| 675 | + RDMA_CREATE_AH_SLEEPABLE)); |
---|
726 | 676 | if (rc) { |
---|
727 | | - dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH"); |
---|
728 | | - goto fail; |
---|
| 677 | + ibdev_err(&rdev->ibdev, "Failed to allocate HW AH"); |
---|
| 678 | + return rc; |
---|
729 | 679 | } |
---|
730 | 680 | |
---|
731 | 681 | /* Write AVID to shared page. */ |
---|
732 | | - if (ib_pd->uobject) { |
---|
733 | | - struct ib_ucontext *ib_uctx = ib_pd->uobject->context; |
---|
734 | | - struct bnxt_re_ucontext *uctx; |
---|
| 682 | + if (udata) { |
---|
| 683 | + struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( |
---|
| 684 | + udata, struct bnxt_re_ucontext, ib_uctx); |
---|
735 | 685 | unsigned long flag; |
---|
736 | 686 | u32 *wrptr; |
---|
737 | 687 | |
---|
738 | | - uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); |
---|
739 | 688 | spin_lock_irqsave(&uctx->sh_lock, flag); |
---|
740 | 689 | wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT); |
---|
741 | 690 | *wrptr = ah->qplib_ah.id; |
---|
.. | .. |
---|
743 | 692 | spin_unlock_irqrestore(&uctx->sh_lock, flag); |
---|
744 | 693 | } |
---|
745 | 694 | |
---|
746 | | - return &ah->ib_ah; |
---|
747 | | - |
---|
748 | | -fail: |
---|
749 | | - kfree(ah); |
---|
750 | | - return ERR_PTR(rc); |
---|
| 695 | + return 0; |
---|
751 | 696 | } |
---|
752 | 697 | |
---|
753 | 698 | int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) |
---|
.. | .. |
---|
796 | 741 | spin_unlock_irqrestore(&qp->scq->cq_lock, flags); |
---|
797 | 742 | } |
---|
798 | 743 | |
---|
| 744 | +static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) |
---|
| 745 | +{ |
---|
| 746 | + struct bnxt_re_qp *gsi_sqp; |
---|
| 747 | + struct bnxt_re_ah *gsi_sah; |
---|
| 748 | + struct bnxt_re_dev *rdev; |
---|
| 749 | + int rc = 0; |
---|
| 750 | + |
---|
| 751 | + rdev = qp->rdev; |
---|
| 752 | + gsi_sqp = rdev->gsi_ctx.gsi_sqp; |
---|
| 753 | + gsi_sah = rdev->gsi_ctx.gsi_sah; |
---|
| 754 | + |
---|
| 755 | + ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n"); |
---|
| 756 | + bnxt_qplib_destroy_ah(&rdev->qplib_res, |
---|
| 757 | + &gsi_sah->qplib_ah, |
---|
| 758 | + true); |
---|
| 759 | + bnxt_qplib_clean_qp(&qp->qplib_qp); |
---|
| 760 | + |
---|
| 761 | + ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); |
---|
| 762 | + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); |
---|
| 763 | + if (rc) { |
---|
| 764 | + ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed"); |
---|
| 765 | + goto fail; |
---|
| 766 | + } |
---|
| 767 | + bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); |
---|
| 768 | + |
---|
| 769 | + /* remove from active qp list */ |
---|
| 770 | + mutex_lock(&rdev->qp_lock); |
---|
| 771 | + list_del(&gsi_sqp->list); |
---|
| 772 | + mutex_unlock(&rdev->qp_lock); |
---|
| 773 | + atomic_dec(&rdev->qp_count); |
---|
| 774 | + |
---|
| 775 | + kfree(rdev->gsi_ctx.sqp_tbl); |
---|
| 776 | + kfree(gsi_sah); |
---|
| 777 | + kfree(gsi_sqp); |
---|
| 778 | + rdev->gsi_ctx.gsi_sqp = NULL; |
---|
| 779 | + rdev->gsi_ctx.gsi_sah = NULL; |
---|
| 780 | + rdev->gsi_ctx.sqp_tbl = NULL; |
---|
| 781 | + |
---|
| 782 | + return 0; |
---|
| 783 | +fail: |
---|
| 784 | + return rc; |
---|
| 785 | +} |
---|
| 786 | + |
---|
799 | 787 | /* Queue Pairs */ |
---|
800 | | -int bnxt_re_destroy_qp(struct ib_qp *ib_qp) |
---|
| 788 | +int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
---|
801 | 789 | { |
---|
802 | 790 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
---|
803 | 791 | struct bnxt_re_dev *rdev = qp->rdev; |
---|
804 | | - int rc; |
---|
805 | 792 | unsigned int flags; |
---|
| 793 | + int rc; |
---|
806 | 794 | |
---|
807 | 795 | bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); |
---|
| 796 | + |
---|
808 | 797 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); |
---|
809 | 798 | if (rc) { |
---|
810 | | - dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); |
---|
| 799 | + ibdev_err(&rdev->ibdev, "Failed to destroy HW QP"); |
---|
811 | 800 | return rc; |
---|
812 | 801 | } |
---|
813 | 802 | |
---|
814 | | - flags = bnxt_re_lock_cqs(qp); |
---|
815 | | - bnxt_qplib_clean_qp(&qp->qplib_qp); |
---|
816 | | - bnxt_re_unlock_cqs(qp, flags); |
---|
817 | | - bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); |
---|
818 | | - |
---|
819 | | - if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { |
---|
820 | | - rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, |
---|
821 | | - &rdev->sqp_ah->qplib_ah); |
---|
822 | | - if (rc) { |
---|
823 | | - dev_err(rdev_to_dev(rdev), |
---|
824 | | - "Failed to destroy HW AH for shadow QP"); |
---|
825 | | - return rc; |
---|
826 | | - } |
---|
827 | | - |
---|
| 803 | + if (rdma_is_kernel_res(&qp->ib_qp.res)) { |
---|
| 804 | + flags = bnxt_re_lock_cqs(qp); |
---|
828 | 805 | bnxt_qplib_clean_qp(&qp->qplib_qp); |
---|
829 | | - rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, |
---|
830 | | - &rdev->qp1_sqp->qplib_qp); |
---|
831 | | - if (rc) { |
---|
832 | | - dev_err(rdev_to_dev(rdev), |
---|
833 | | - "Failed to destroy Shadow QP"); |
---|
834 | | - return rc; |
---|
835 | | - } |
---|
836 | | - bnxt_qplib_free_qp_res(&rdev->qplib_res, |
---|
837 | | - &rdev->qp1_sqp->qplib_qp); |
---|
838 | | - mutex_lock(&rdev->qp_lock); |
---|
839 | | - list_del(&rdev->qp1_sqp->list); |
---|
840 | | - atomic_dec(&rdev->qp_count); |
---|
841 | | - mutex_unlock(&rdev->qp_lock); |
---|
842 | | - |
---|
843 | | - kfree(rdev->sqp_ah); |
---|
844 | | - kfree(rdev->qp1_sqp); |
---|
845 | | - rdev->qp1_sqp = NULL; |
---|
846 | | - rdev->sqp_ah = NULL; |
---|
| 806 | + bnxt_re_unlock_cqs(qp, flags); |
---|
847 | 807 | } |
---|
848 | 808 | |
---|
849 | | - if (!IS_ERR_OR_NULL(qp->rumem)) |
---|
850 | | - ib_umem_release(qp->rumem); |
---|
851 | | - if (!IS_ERR_OR_NULL(qp->sumem)) |
---|
852 | | - ib_umem_release(qp->sumem); |
---|
| 809 | + bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); |
---|
| 810 | + |
---|
| 811 | + if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { |
---|
| 812 | + rc = bnxt_re_destroy_gsi_sqp(qp); |
---|
| 813 | + if (rc) |
---|
| 814 | + goto sh_fail; |
---|
| 815 | + } |
---|
853 | 816 | |
---|
854 | 817 | mutex_lock(&rdev->qp_lock); |
---|
855 | 818 | list_del(&qp->list); |
---|
856 | | - atomic_dec(&rdev->qp_count); |
---|
857 | 819 | mutex_unlock(&rdev->qp_lock); |
---|
| 820 | + atomic_dec(&rdev->qp_count); |
---|
| 821 | + |
---|
| 822 | + ib_umem_release(qp->rumem); |
---|
| 823 | + ib_umem_release(qp->sumem); |
---|
| 824 | + |
---|
858 | 825 | kfree(qp); |
---|
859 | 826 | return 0; |
---|
| 827 | +sh_fail: |
---|
| 828 | + return rc; |
---|
860 | 829 | } |
---|
861 | 830 | |
---|
862 | 831 | static u8 __from_ib_qp_type(enum ib_qp_type type) |
---|
.. | .. |
---|
873 | 842 | } |
---|
874 | 843 | } |
---|
875 | 844 | |
---|
| 845 | +static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp, |
---|
| 846 | + int rsge, int max) |
---|
| 847 | +{ |
---|
| 848 | + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
---|
| 849 | + rsge = max; |
---|
| 850 | + return bnxt_re_get_rwqe_size(rsge); |
---|
| 851 | +} |
---|
| 852 | + |
---|
| 853 | +static u16 bnxt_re_get_wqe_size(int ilsize, int nsge) |
---|
| 854 | +{ |
---|
| 855 | + u16 wqe_size, calc_ils; |
---|
| 856 | + |
---|
| 857 | + wqe_size = bnxt_re_get_swqe_size(nsge); |
---|
| 858 | + if (ilsize) { |
---|
| 859 | + calc_ils = sizeof(struct sq_send_hdr) + ilsize; |
---|
| 860 | + wqe_size = max_t(u16, calc_ils, wqe_size); |
---|
| 861 | + wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr)); |
---|
| 862 | + } |
---|
| 863 | + return wqe_size; |
---|
| 864 | +} |
---|
| 865 | + |
---|
| 866 | +static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp, |
---|
| 867 | + struct ib_qp_init_attr *init_attr) |
---|
| 868 | +{ |
---|
| 869 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
| 870 | + struct bnxt_qplib_qp *qplqp; |
---|
| 871 | + struct bnxt_re_dev *rdev; |
---|
| 872 | + struct bnxt_qplib_q *sq; |
---|
| 873 | + int align, ilsize; |
---|
| 874 | + |
---|
| 875 | + rdev = qp->rdev; |
---|
| 876 | + qplqp = &qp->qplib_qp; |
---|
| 877 | + sq = &qplqp->sq; |
---|
| 878 | + dev_attr = &rdev->dev_attr; |
---|
| 879 | + |
---|
| 880 | + align = sizeof(struct sq_send_hdr); |
---|
| 881 | + ilsize = ALIGN(init_attr->cap.max_inline_data, align); |
---|
| 882 | + |
---|
| 883 | + sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge); |
---|
| 884 | + if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges)) |
---|
| 885 | + return -EINVAL; |
---|
| 886 | + /* For gen p4 and gen p5 backward compatibility mode |
---|
| 887 | + * wqe size is fixed to 128 bytes |
---|
| 888 | + */ |
---|
| 889 | + if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) && |
---|
| 890 | + qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
---|
| 891 | + sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges); |
---|
| 892 | + |
---|
| 893 | + if (init_attr->cap.max_inline_data) { |
---|
| 894 | + qplqp->max_inline_data = sq->wqe_size - |
---|
| 895 | + sizeof(struct sq_send_hdr); |
---|
| 896 | + init_attr->cap.max_inline_data = qplqp->max_inline_data; |
---|
| 897 | + if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) |
---|
| 898 | + sq->max_sge = qplqp->max_inline_data / |
---|
| 899 | + sizeof(struct sq_sge); |
---|
| 900 | + } |
---|
| 901 | + |
---|
| 902 | + return 0; |
---|
| 903 | +} |
---|
| 904 | + |
---|
876 | 905 | static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, |
---|
877 | 906 | struct bnxt_re_qp *qp, struct ib_udata *udata) |
---|
878 | 907 | { |
---|
| 908 | + struct bnxt_qplib_qp *qplib_qp; |
---|
| 909 | + struct bnxt_re_ucontext *cntx; |
---|
879 | 910 | struct bnxt_re_qp_req ureq; |
---|
880 | | - struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; |
---|
| 911 | + int bytes = 0, psn_sz; |
---|
881 | 912 | struct ib_umem *umem; |
---|
882 | | - int bytes = 0; |
---|
883 | | - struct ib_ucontext *context = pd->ib_pd.uobject->context; |
---|
884 | | - struct bnxt_re_ucontext *cntx = container_of(context, |
---|
885 | | - struct bnxt_re_ucontext, |
---|
886 | | - ib_uctx); |
---|
| 913 | + int psn_nume; |
---|
| 914 | + |
---|
| 915 | + qplib_qp = &qp->qplib_qp; |
---|
| 916 | + cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, |
---|
| 917 | + ib_uctx); |
---|
887 | 918 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) |
---|
888 | 919 | return -EFAULT; |
---|
889 | 920 | |
---|
890 | | - bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE); |
---|
| 921 | + bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size); |
---|
891 | 922 | /* Consider mapping PSN search memory only for RC QPs. */ |
---|
892 | | - if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) |
---|
893 | | - bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search)); |
---|
| 923 | + if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { |
---|
| 924 | + psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? |
---|
| 925 | + sizeof(struct sq_psn_search_ext) : |
---|
| 926 | + sizeof(struct sq_psn_search); |
---|
| 927 | + psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ? |
---|
| 928 | + qplib_qp->sq.max_wqe : |
---|
| 929 | + ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) / |
---|
| 930 | + sizeof(struct bnxt_qplib_sge)); |
---|
| 931 | + bytes += (psn_nume * psn_sz); |
---|
| 932 | + } |
---|
| 933 | + |
---|
894 | 934 | bytes = PAGE_ALIGN(bytes); |
---|
895 | | - umem = ib_umem_get(context, ureq.qpsva, bytes, |
---|
896 | | - IB_ACCESS_LOCAL_WRITE, 1); |
---|
| 935 | + umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes, |
---|
| 936 | + IB_ACCESS_LOCAL_WRITE); |
---|
897 | 937 | if (IS_ERR(umem)) |
---|
898 | 938 | return PTR_ERR(umem); |
---|
899 | 939 | |
---|
900 | 940 | qp->sumem = umem; |
---|
901 | | - qplib_qp->sq.sglist = umem->sg_head.sgl; |
---|
902 | | - qplib_qp->sq.nmap = umem->nmap; |
---|
| 941 | + qplib_qp->sq.sg_info.umem = umem; |
---|
| 942 | + qplib_qp->sq.sg_info.pgsize = PAGE_SIZE; |
---|
| 943 | + qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT; |
---|
903 | 944 | qplib_qp->qp_handle = ureq.qp_handle; |
---|
904 | 945 | |
---|
905 | 946 | if (!qp->qplib_qp.srq) { |
---|
906 | | - bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); |
---|
| 947 | + bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size); |
---|
907 | 948 | bytes = PAGE_ALIGN(bytes); |
---|
908 | | - umem = ib_umem_get(context, ureq.qprva, bytes, |
---|
909 | | - IB_ACCESS_LOCAL_WRITE, 1); |
---|
| 949 | + umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, |
---|
| 950 | + IB_ACCESS_LOCAL_WRITE); |
---|
910 | 951 | if (IS_ERR(umem)) |
---|
911 | 952 | goto rqfail; |
---|
912 | 953 | qp->rumem = umem; |
---|
913 | | - qplib_qp->rq.sglist = umem->sg_head.sgl; |
---|
914 | | - qplib_qp->rq.nmap = umem->nmap; |
---|
| 954 | + qplib_qp->rq.sg_info.umem = umem; |
---|
| 955 | + qplib_qp->rq.sg_info.pgsize = PAGE_SIZE; |
---|
| 956 | + qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT; |
---|
915 | 957 | } |
---|
916 | 958 | |
---|
917 | 959 | qplib_qp->dpi = &cntx->dpi; |
---|
.. | .. |
---|
919 | 961 | rqfail: |
---|
920 | 962 | ib_umem_release(qp->sumem); |
---|
921 | 963 | qp->sumem = NULL; |
---|
922 | | - qplib_qp->sq.sglist = NULL; |
---|
923 | | - qplib_qp->sq.nmap = 0; |
---|
| 964 | + memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); |
---|
924 | 965 | |
---|
925 | 966 | return PTR_ERR(umem); |
---|
926 | 967 | } |
---|
.. | .. |
---|
958 | 999 | /* Have DMAC same as SMAC */ |
---|
959 | 1000 | ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr); |
---|
960 | 1001 | |
---|
961 | | - rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah); |
---|
| 1002 | + rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false); |
---|
962 | 1003 | if (rc) { |
---|
963 | | - dev_err(rdev_to_dev(rdev), |
---|
964 | | - "Failed to allocate HW AH for Shadow QP"); |
---|
| 1004 | + ibdev_err(&rdev->ibdev, |
---|
| 1005 | + "Failed to allocate HW AH for Shadow QP"); |
---|
965 | 1006 | goto fail; |
---|
966 | 1007 | } |
---|
967 | 1008 | |
---|
.. | .. |
---|
998 | 1039 | qp->qplib_qp.sig_type = true; |
---|
999 | 1040 | |
---|
1000 | 1041 | /* Shadow QP SQ depth should be same as QP1 RQ depth */ |
---|
| 1042 | + qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6); |
---|
1001 | 1043 | qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; |
---|
1002 | 1044 | qp->qplib_qp.sq.max_sge = 2; |
---|
1003 | 1045 | /* Q full delta can be 1 since it is internal QP */ |
---|
1004 | 1046 | qp->qplib_qp.sq.q_full_delta = 1; |
---|
| 1047 | + qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE; |
---|
| 1048 | + qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT; |
---|
1005 | 1049 | |
---|
1006 | 1050 | qp->qplib_qp.scq = qp1_qp->scq; |
---|
1007 | 1051 | qp->qplib_qp.rcq = qp1_qp->rcq; |
---|
1008 | 1052 | |
---|
| 1053 | + qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6); |
---|
1009 | 1054 | qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; |
---|
1010 | 1055 | qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; |
---|
1011 | 1056 | /* Q full delta can be 1 since it is internal QP */ |
---|
1012 | 1057 | qp->qplib_qp.rq.q_full_delta = 1; |
---|
| 1058 | + qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE; |
---|
| 1059 | + qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT; |
---|
1013 | 1060 | |
---|
1014 | 1061 | qp->qplib_qp.mtu = qp1_qp->mtu; |
---|
1015 | 1062 | |
---|
.. | .. |
---|
1020 | 1067 | rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp); |
---|
1021 | 1068 | if (rc) |
---|
1022 | 1069 | goto fail; |
---|
1023 | | - |
---|
1024 | | - rdev->sqp_id = qp->qplib_qp.id; |
---|
1025 | 1070 | |
---|
1026 | 1071 | spin_lock_init(&qp->sq_lock); |
---|
1027 | 1072 | INIT_LIST_HEAD(&qp->list); |
---|
.. | .. |
---|
1035 | 1080 | return NULL; |
---|
1036 | 1081 | } |
---|
1037 | 1082 | |
---|
| 1083 | +static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, |
---|
| 1084 | + struct ib_qp_init_attr *init_attr) |
---|
| 1085 | +{ |
---|
| 1086 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
| 1087 | + struct bnxt_qplib_qp *qplqp; |
---|
| 1088 | + struct bnxt_re_dev *rdev; |
---|
| 1089 | + struct bnxt_qplib_q *rq; |
---|
| 1090 | + int entries; |
---|
| 1091 | + |
---|
| 1092 | + rdev = qp->rdev; |
---|
| 1093 | + qplqp = &qp->qplib_qp; |
---|
| 1094 | + rq = &qplqp->rq; |
---|
| 1095 | + dev_attr = &rdev->dev_attr; |
---|
| 1096 | + |
---|
| 1097 | + if (init_attr->srq) { |
---|
| 1098 | + struct bnxt_re_srq *srq; |
---|
| 1099 | + |
---|
| 1100 | + srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); |
---|
| 1101 | + if (!srq) { |
---|
| 1102 | + ibdev_err(&rdev->ibdev, "SRQ not found"); |
---|
| 1103 | + return -EINVAL; |
---|
| 1104 | + } |
---|
| 1105 | + qplqp->srq = &srq->qplib_srq; |
---|
| 1106 | + rq->max_wqe = 0; |
---|
| 1107 | + } else { |
---|
| 1108 | + rq->max_sge = init_attr->cap.max_recv_sge; |
---|
| 1109 | + if (rq->max_sge > dev_attr->max_qp_sges) |
---|
| 1110 | + rq->max_sge = dev_attr->max_qp_sges; |
---|
| 1111 | + init_attr->cap.max_recv_sge = rq->max_sge; |
---|
| 1112 | + rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge, |
---|
| 1113 | + dev_attr->max_qp_sges); |
---|
| 1114 | + /* Allocate 1 more than what's provided so posting max doesn't |
---|
| 1115 | + * mean empty. |
---|
| 1116 | + */ |
---|
| 1117 | + entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1); |
---|
| 1118 | + rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); |
---|
| 1119 | + rq->q_full_delta = 0; |
---|
| 1120 | + rq->sg_info.pgsize = PAGE_SIZE; |
---|
| 1121 | + rq->sg_info.pgshft = PAGE_SHIFT; |
---|
| 1122 | + } |
---|
| 1123 | + |
---|
| 1124 | + return 0; |
---|
| 1125 | +} |
---|
| 1126 | + |
---|
| 1127 | +static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) |
---|
| 1128 | +{ |
---|
| 1129 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
| 1130 | + struct bnxt_qplib_qp *qplqp; |
---|
| 1131 | + struct bnxt_re_dev *rdev; |
---|
| 1132 | + |
---|
| 1133 | + rdev = qp->rdev; |
---|
| 1134 | + qplqp = &qp->qplib_qp; |
---|
| 1135 | + dev_attr = &rdev->dev_attr; |
---|
| 1136 | + |
---|
| 1137 | + if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { |
---|
| 1138 | + qplqp->rq.max_sge = dev_attr->max_qp_sges; |
---|
| 1139 | + if (qplqp->rq.max_sge > dev_attr->max_qp_sges) |
---|
| 1140 | + qplqp->rq.max_sge = dev_attr->max_qp_sges; |
---|
| 1141 | + qplqp->rq.max_sge = 6; |
---|
| 1142 | + } |
---|
| 1143 | +} |
---|
| 1144 | + |
---|
| 1145 | +static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, |
---|
| 1146 | + struct ib_qp_init_attr *init_attr, |
---|
| 1147 | + struct ib_udata *udata) |
---|
| 1148 | +{ |
---|
| 1149 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
| 1150 | + struct bnxt_qplib_qp *qplqp; |
---|
| 1151 | + struct bnxt_re_dev *rdev; |
---|
| 1152 | + struct bnxt_qplib_q *sq; |
---|
| 1153 | + int entries; |
---|
| 1154 | + int diff; |
---|
| 1155 | + int rc; |
---|
| 1156 | + |
---|
| 1157 | + rdev = qp->rdev; |
---|
| 1158 | + qplqp = &qp->qplib_qp; |
---|
| 1159 | + sq = &qplqp->sq; |
---|
| 1160 | + dev_attr = &rdev->dev_attr; |
---|
| 1161 | + |
---|
| 1162 | + sq->max_sge = init_attr->cap.max_send_sge; |
---|
| 1163 | + if (sq->max_sge > dev_attr->max_qp_sges) { |
---|
| 1164 | + sq->max_sge = dev_attr->max_qp_sges; |
---|
| 1165 | + init_attr->cap.max_send_sge = sq->max_sge; |
---|
| 1166 | + } |
---|
| 1167 | + |
---|
| 1168 | + rc = bnxt_re_setup_swqe_size(qp, init_attr); |
---|
| 1169 | + if (rc) |
---|
| 1170 | + return rc; |
---|
| 1171 | + |
---|
| 1172 | + entries = init_attr->cap.max_send_wr; |
---|
| 1173 | + /* Allocate 128 + 1 more than what's provided */ |
---|
| 1174 | + diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ? |
---|
| 1175 | + 0 : BNXT_QPLIB_RESERVED_QP_WRS; |
---|
| 1176 | + entries = roundup_pow_of_two(entries + diff + 1); |
---|
| 1177 | + sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1); |
---|
| 1178 | + sq->q_full_delta = diff + 1; |
---|
| 1179 | + /* |
---|
| 1180 | + * Reserving one slot for Phantom WQE. Application can |
---|
| 1181 | + * post one extra entry in this case. But allowing this to avoid |
---|
| 1182 | + * unexpected Queue full condition |
---|
| 1183 | + */ |
---|
| 1184 | + qplqp->sq.q_full_delta -= 1; |
---|
| 1185 | + qplqp->sq.sg_info.pgsize = PAGE_SIZE; |
---|
| 1186 | + qplqp->sq.sg_info.pgshft = PAGE_SHIFT; |
---|
| 1187 | + |
---|
| 1188 | + return 0; |
---|
| 1189 | +} |
---|
| 1190 | + |
---|
| 1191 | +static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, |
---|
| 1192 | + struct ib_qp_init_attr *init_attr) |
---|
| 1193 | +{ |
---|
| 1194 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
| 1195 | + struct bnxt_qplib_qp *qplqp; |
---|
| 1196 | + struct bnxt_re_dev *rdev; |
---|
| 1197 | + int entries; |
---|
| 1198 | + |
---|
| 1199 | + rdev = qp->rdev; |
---|
| 1200 | + qplqp = &qp->qplib_qp; |
---|
| 1201 | + dev_attr = &rdev->dev_attr; |
---|
| 1202 | + |
---|
| 1203 | + if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { |
---|
| 1204 | + entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1); |
---|
| 1205 | + qplqp->sq.max_wqe = min_t(u32, entries, |
---|
| 1206 | + dev_attr->max_qp_wqes + 1); |
---|
| 1207 | + qplqp->sq.q_full_delta = qplqp->sq.max_wqe - |
---|
| 1208 | + init_attr->cap.max_send_wr; |
---|
| 1209 | + qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ |
---|
| 1210 | + if (qplqp->sq.max_sge > dev_attr->max_qp_sges) |
---|
| 1211 | + qplqp->sq.max_sge = dev_attr->max_qp_sges; |
---|
| 1212 | + } |
---|
| 1213 | +} |
---|
| 1214 | + |
---|
| 1215 | +static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, |
---|
| 1216 | + struct ib_qp_init_attr *init_attr) |
---|
| 1217 | +{ |
---|
| 1218 | + struct bnxt_qplib_chip_ctx *chip_ctx; |
---|
| 1219 | + int qptype; |
---|
| 1220 | + |
---|
| 1221 | + chip_ctx = rdev->chip_ctx; |
---|
| 1222 | + |
---|
| 1223 | + qptype = __from_ib_qp_type(init_attr->qp_type); |
---|
| 1224 | + if (qptype == IB_QPT_MAX) { |
---|
| 1225 | + ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype); |
---|
| 1226 | + qptype = -EOPNOTSUPP; |
---|
| 1227 | + goto out; |
---|
| 1228 | + } |
---|
| 1229 | + |
---|
| 1230 | + if (bnxt_qplib_is_chip_gen_p5(chip_ctx) && |
---|
| 1231 | + init_attr->qp_type == IB_QPT_GSI) |
---|
| 1232 | + qptype = CMDQ_CREATE_QP_TYPE_GSI; |
---|
| 1233 | +out: |
---|
| 1234 | + return qptype; |
---|
| 1235 | +} |
---|
| 1236 | + |
---|
| 1237 | +static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, |
---|
| 1238 | + struct ib_qp_init_attr *init_attr, |
---|
| 1239 | + struct ib_udata *udata) |
---|
| 1240 | +{ |
---|
| 1241 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
| 1242 | + struct bnxt_qplib_qp *qplqp; |
---|
| 1243 | + struct bnxt_re_dev *rdev; |
---|
| 1244 | + struct bnxt_re_cq *cq; |
---|
| 1245 | + int rc = 0, qptype; |
---|
| 1246 | + |
---|
| 1247 | + rdev = qp->rdev; |
---|
| 1248 | + qplqp = &qp->qplib_qp; |
---|
| 1249 | + dev_attr = &rdev->dev_attr; |
---|
| 1250 | + |
---|
| 1251 | + /* Setup misc params */ |
---|
| 1252 | + ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); |
---|
| 1253 | + qplqp->pd = &pd->qplib_pd; |
---|
| 1254 | + qplqp->qp_handle = (u64)qplqp; |
---|
| 1255 | + qplqp->max_inline_data = init_attr->cap.max_inline_data; |
---|
| 1256 | + qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? |
---|
| 1257 | + true : false); |
---|
| 1258 | + qptype = bnxt_re_init_qp_type(rdev, init_attr); |
---|
| 1259 | + if (qptype < 0) { |
---|
| 1260 | + rc = qptype; |
---|
| 1261 | + goto out; |
---|
| 1262 | + } |
---|
| 1263 | + qplqp->type = (u8)qptype; |
---|
| 1264 | + qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode; |
---|
| 1265 | + |
---|
| 1266 | + if (init_attr->qp_type == IB_QPT_RC) { |
---|
| 1267 | + qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; |
---|
| 1268 | + qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; |
---|
| 1269 | + } |
---|
| 1270 | + qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); |
---|
| 1271 | + qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ |
---|
| 1272 | + if (init_attr->create_flags) |
---|
| 1273 | + ibdev_dbg(&rdev->ibdev, |
---|
| 1274 | + "QP create flags 0x%x not supported", |
---|
| 1275 | + init_attr->create_flags); |
---|
| 1276 | + |
---|
| 1277 | + /* Setup CQs */ |
---|
| 1278 | + if (init_attr->send_cq) { |
---|
| 1279 | + cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); |
---|
| 1280 | + if (!cq) { |
---|
| 1281 | + ibdev_err(&rdev->ibdev, "Send CQ not found"); |
---|
| 1282 | + rc = -EINVAL; |
---|
| 1283 | + goto out; |
---|
| 1284 | + } |
---|
| 1285 | + qplqp->scq = &cq->qplib_cq; |
---|
| 1286 | + qp->scq = cq; |
---|
| 1287 | + } |
---|
| 1288 | + |
---|
| 1289 | + if (init_attr->recv_cq) { |
---|
| 1290 | + cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); |
---|
| 1291 | + if (!cq) { |
---|
| 1292 | + ibdev_err(&rdev->ibdev, "Receive CQ not found"); |
---|
| 1293 | + rc = -EINVAL; |
---|
| 1294 | + goto out; |
---|
| 1295 | + } |
---|
| 1296 | + qplqp->rcq = &cq->qplib_cq; |
---|
| 1297 | + qp->rcq = cq; |
---|
| 1298 | + } |
---|
| 1299 | + |
---|
| 1300 | + /* Setup RQ/SRQ */ |
---|
| 1301 | + rc = bnxt_re_init_rq_attr(qp, init_attr); |
---|
| 1302 | + if (rc) |
---|
| 1303 | + goto out; |
---|
| 1304 | + if (init_attr->qp_type == IB_QPT_GSI) |
---|
| 1305 | + bnxt_re_adjust_gsi_rq_attr(qp); |
---|
| 1306 | + |
---|
| 1307 | + /* Setup SQ */ |
---|
| 1308 | + rc = bnxt_re_init_sq_attr(qp, init_attr, udata); |
---|
| 1309 | + if (rc) |
---|
| 1310 | + goto out; |
---|
| 1311 | + if (init_attr->qp_type == IB_QPT_GSI) |
---|
| 1312 | + bnxt_re_adjust_gsi_sq_attr(qp, init_attr); |
---|
| 1313 | + |
---|
| 1314 | + if (udata) /* This will update DPI and qp_handle */ |
---|
| 1315 | + rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); |
---|
| 1316 | +out: |
---|
| 1317 | + return rc; |
---|
| 1318 | +} |
---|
| 1319 | + |
---|
| 1320 | +static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, |
---|
| 1321 | + struct bnxt_re_pd *pd) |
---|
| 1322 | +{ |
---|
| 1323 | + struct bnxt_re_sqp_entries *sqp_tbl = NULL; |
---|
| 1324 | + struct bnxt_re_dev *rdev; |
---|
| 1325 | + struct bnxt_re_qp *sqp; |
---|
| 1326 | + struct bnxt_re_ah *sah; |
---|
| 1327 | + int rc = 0; |
---|
| 1328 | + |
---|
| 1329 | + rdev = qp->rdev; |
---|
| 1330 | + /* Create a shadow QP to handle the QP1 traffic */ |
---|
| 1331 | + sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES, |
---|
| 1332 | + GFP_KERNEL); |
---|
| 1333 | + if (!sqp_tbl) |
---|
| 1334 | + return -ENOMEM; |
---|
| 1335 | + rdev->gsi_ctx.sqp_tbl = sqp_tbl; |
---|
| 1336 | + |
---|
| 1337 | + sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); |
---|
| 1338 | + if (!sqp) { |
---|
| 1339 | + rc = -ENODEV; |
---|
| 1340 | + ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1"); |
---|
| 1341 | + goto out; |
---|
| 1342 | + } |
---|
| 1343 | + rdev->gsi_ctx.gsi_sqp = sqp; |
---|
| 1344 | + |
---|
| 1345 | + sqp->rcq = qp->rcq; |
---|
| 1346 | + sqp->scq = qp->scq; |
---|
| 1347 | + sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, |
---|
| 1348 | + &qp->qplib_qp); |
---|
| 1349 | + if (!sah) { |
---|
| 1350 | + bnxt_qplib_destroy_qp(&rdev->qplib_res, |
---|
| 1351 | + &sqp->qplib_qp); |
---|
| 1352 | + rc = -ENODEV; |
---|
| 1353 | + ibdev_err(&rdev->ibdev, |
---|
| 1354 | + "Failed to create AH entry for ShadowQP"); |
---|
| 1355 | + goto out; |
---|
| 1356 | + } |
---|
| 1357 | + rdev->gsi_ctx.gsi_sah = sah; |
---|
| 1358 | + |
---|
| 1359 | + return 0; |
---|
| 1360 | +out: |
---|
| 1361 | + kfree(sqp_tbl); |
---|
| 1362 | + return rc; |
---|
| 1363 | +} |
---|
| 1364 | + |
---|
| 1365 | +static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, |
---|
| 1366 | + struct ib_qp_init_attr *init_attr) |
---|
| 1367 | +{ |
---|
| 1368 | + struct bnxt_re_dev *rdev; |
---|
| 1369 | + struct bnxt_qplib_qp *qplqp; |
---|
| 1370 | + int rc = 0; |
---|
| 1371 | + |
---|
| 1372 | + rdev = qp->rdev; |
---|
| 1373 | + qplqp = &qp->qplib_qp; |
---|
| 1374 | + |
---|
| 1375 | + qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; |
---|
| 1376 | + qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; |
---|
| 1377 | + |
---|
| 1378 | + rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp); |
---|
| 1379 | + if (rc) { |
---|
| 1380 | + ibdev_err(&rdev->ibdev, "create HW QP1 failed!"); |
---|
| 1381 | + goto out; |
---|
| 1382 | + } |
---|
| 1383 | + |
---|
| 1384 | + rc = bnxt_re_create_shadow_gsi(qp, pd); |
---|
| 1385 | +out: |
---|
| 1386 | + return rc; |
---|
| 1387 | +} |
---|
| 1388 | + |
---|
| 1389 | +static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, |
---|
| 1390 | + struct ib_qp_init_attr *init_attr, |
---|
| 1391 | + struct bnxt_qplib_dev_attr *dev_attr) |
---|
| 1392 | +{ |
---|
| 1393 | + bool rc = true; |
---|
| 1394 | + |
---|
| 1395 | + if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || |
---|
| 1396 | + init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || |
---|
| 1397 | + init_attr->cap.max_send_sge > dev_attr->max_qp_sges || |
---|
| 1398 | + init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || |
---|
| 1399 | + init_attr->cap.max_inline_data > dev_attr->max_inline_data) { |
---|
| 1400 | + ibdev_err(&rdev->ibdev, |
---|
| 1401 | + "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", |
---|
| 1402 | + init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, |
---|
| 1403 | + init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, |
---|
| 1404 | + init_attr->cap.max_send_sge, dev_attr->max_qp_sges, |
---|
| 1405 | + init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, |
---|
| 1406 | + init_attr->cap.max_inline_data, |
---|
| 1407 | + dev_attr->max_inline_data); |
---|
| 1408 | + rc = false; |
---|
| 1409 | + } |
---|
| 1410 | + return rc; |
---|
| 1411 | +} |
---|
| 1412 | + |
---|
1038 | 1413 | struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, |
---|
1039 | 1414 | struct ib_qp_init_attr *qp_init_attr, |
---|
1040 | 1415 | struct ib_udata *udata) |
---|
.. | .. |
---|
1043 | 1418 | struct bnxt_re_dev *rdev = pd->rdev; |
---|
1044 | 1419 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
---|
1045 | 1420 | struct bnxt_re_qp *qp; |
---|
1046 | | - struct bnxt_re_cq *cq; |
---|
1047 | | - struct bnxt_re_srq *srq; |
---|
1048 | | - int rc, entries; |
---|
| 1421 | + int rc; |
---|
1049 | 1422 | |
---|
1050 | | - if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) || |
---|
1051 | | - (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) || |
---|
1052 | | - (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) || |
---|
1053 | | - (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) || |
---|
1054 | | - (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data)) |
---|
1055 | | - return ERR_PTR(-EINVAL); |
---|
| 1423 | + rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); |
---|
| 1424 | + if (!rc) { |
---|
| 1425 | + rc = -EINVAL; |
---|
| 1426 | + goto exit; |
---|
| 1427 | + } |
---|
1056 | 1428 | |
---|
1057 | 1429 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
---|
1058 | | - if (!qp) |
---|
1059 | | - return ERR_PTR(-ENOMEM); |
---|
1060 | | - |
---|
| 1430 | + if (!qp) { |
---|
| 1431 | + rc = -ENOMEM; |
---|
| 1432 | + goto exit; |
---|
| 1433 | + } |
---|
1061 | 1434 | qp->rdev = rdev; |
---|
1062 | | - ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); |
---|
1063 | | - qp->qplib_qp.pd = &pd->qplib_pd; |
---|
1064 | | - qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); |
---|
1065 | | - qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type); |
---|
1066 | | - if (qp->qplib_qp.type == IB_QPT_MAX) { |
---|
1067 | | - dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported", |
---|
1068 | | - qp->qplib_qp.type); |
---|
1069 | | - rc = -EINVAL; |
---|
| 1435 | + rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); |
---|
| 1436 | + if (rc) |
---|
1070 | 1437 | goto fail; |
---|
1071 | | - } |
---|
1072 | | - qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data; |
---|
1073 | | - qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == |
---|
1074 | | - IB_SIGNAL_ALL_WR) ? true : false); |
---|
1075 | 1438 | |
---|
1076 | | - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; |
---|
1077 | | - if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) |
---|
1078 | | - qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; |
---|
1079 | | - |
---|
1080 | | - if (qp_init_attr->send_cq) { |
---|
1081 | | - cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq, |
---|
1082 | | - ib_cq); |
---|
1083 | | - if (!cq) { |
---|
1084 | | - dev_err(rdev_to_dev(rdev), "Send CQ not found"); |
---|
1085 | | - rc = -EINVAL; |
---|
1086 | | - goto fail; |
---|
1087 | | - } |
---|
1088 | | - qp->qplib_qp.scq = &cq->qplib_cq; |
---|
1089 | | - qp->scq = cq; |
---|
1090 | | - } |
---|
1091 | | - |
---|
1092 | | - if (qp_init_attr->recv_cq) { |
---|
1093 | | - cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq, |
---|
1094 | | - ib_cq); |
---|
1095 | | - if (!cq) { |
---|
1096 | | - dev_err(rdev_to_dev(rdev), "Receive CQ not found"); |
---|
1097 | | - rc = -EINVAL; |
---|
1098 | | - goto fail; |
---|
1099 | | - } |
---|
1100 | | - qp->qplib_qp.rcq = &cq->qplib_cq; |
---|
1101 | | - qp->rcq = cq; |
---|
1102 | | - } |
---|
1103 | | - |
---|
1104 | | - if (qp_init_attr->srq) { |
---|
1105 | | - srq = container_of(qp_init_attr->srq, struct bnxt_re_srq, |
---|
1106 | | - ib_srq); |
---|
1107 | | - if (!srq) { |
---|
1108 | | - dev_err(rdev_to_dev(rdev), "SRQ not found"); |
---|
1109 | | - rc = -EINVAL; |
---|
1110 | | - goto fail; |
---|
1111 | | - } |
---|
1112 | | - qp->qplib_qp.srq = &srq->qplib_srq; |
---|
1113 | | - qp->qplib_qp.rq.max_wqe = 0; |
---|
1114 | | - } else { |
---|
1115 | | - /* Allocate 1 more than what's provided so posting max doesn't |
---|
1116 | | - * mean empty |
---|
1117 | | - */ |
---|
1118 | | - entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1); |
---|
1119 | | - qp->qplib_qp.rq.max_wqe = min_t(u32, entries, |
---|
1120 | | - dev_attr->max_qp_wqes + 1); |
---|
1121 | | - |
---|
1122 | | - qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - |
---|
1123 | | - qp_init_attr->cap.max_recv_wr; |
---|
1124 | | - |
---|
1125 | | - qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; |
---|
1126 | | - if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) |
---|
1127 | | - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
---|
1128 | | - } |
---|
1129 | | - |
---|
1130 | | - qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); |
---|
1131 | | - |
---|
1132 | | - if (qp_init_attr->qp_type == IB_QPT_GSI) { |
---|
1133 | | - /* Allocate 1 more than what's provided */ |
---|
1134 | | - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); |
---|
1135 | | - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, |
---|
1136 | | - dev_attr->max_qp_wqes + 1); |
---|
1137 | | - qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - |
---|
1138 | | - qp_init_attr->cap.max_send_wr; |
---|
1139 | | - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
---|
1140 | | - if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) |
---|
1141 | | - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; |
---|
1142 | | - qp->qplib_qp.sq.max_sge++; |
---|
1143 | | - if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) |
---|
1144 | | - qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; |
---|
1145 | | - |
---|
1146 | | - qp->qplib_qp.rq_hdr_buf_size = |
---|
1147 | | - BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; |
---|
1148 | | - |
---|
1149 | | - qp->qplib_qp.sq_hdr_buf_size = |
---|
1150 | | - BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; |
---|
1151 | | - qp->qplib_qp.dpi = &rdev->dpi_privileged; |
---|
1152 | | - rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp); |
---|
1153 | | - if (rc) { |
---|
1154 | | - dev_err(rdev_to_dev(rdev), "Failed to create HW QP1"); |
---|
1155 | | - goto fail; |
---|
1156 | | - } |
---|
1157 | | - /* Create a shadow QP to handle the QP1 traffic */ |
---|
1158 | | - rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, |
---|
1159 | | - &qp->qplib_qp); |
---|
1160 | | - if (!rdev->qp1_sqp) { |
---|
1161 | | - rc = -EINVAL; |
---|
1162 | | - dev_err(rdev_to_dev(rdev), |
---|
1163 | | - "Failed to create Shadow QP for QP1"); |
---|
| 1439 | + if (qp_init_attr->qp_type == IB_QPT_GSI && |
---|
| 1440 | + !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) { |
---|
| 1441 | + rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); |
---|
| 1442 | + if (rc == -ENODEV) |
---|
1164 | 1443 | goto qp_destroy; |
---|
1165 | | - } |
---|
1166 | | - rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, |
---|
1167 | | - &qp->qplib_qp); |
---|
1168 | | - if (!rdev->sqp_ah) { |
---|
1169 | | - bnxt_qplib_destroy_qp(&rdev->qplib_res, |
---|
1170 | | - &rdev->qp1_sqp->qplib_qp); |
---|
1171 | | - rc = -EINVAL; |
---|
1172 | | - dev_err(rdev_to_dev(rdev), |
---|
1173 | | - "Failed to create AH entry for ShadowQP"); |
---|
1174 | | - goto qp_destroy; |
---|
1175 | | - } |
---|
1176 | | - |
---|
| 1444 | + if (rc) |
---|
| 1445 | + goto fail; |
---|
1177 | 1446 | } else { |
---|
1178 | | - /* Allocate 128 + 1 more than what's provided */ |
---|
1179 | | - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + |
---|
1180 | | - BNXT_QPLIB_RESERVED_QP_WRS + 1); |
---|
1181 | | - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, |
---|
1182 | | - dev_attr->max_qp_wqes + |
---|
1183 | | - BNXT_QPLIB_RESERVED_QP_WRS + 1); |
---|
1184 | | - qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; |
---|
1185 | | - |
---|
1186 | | - /* |
---|
1187 | | - * Reserving one slot for Phantom WQE. Application can |
---|
1188 | | - * post one extra entry in this case. But allowing this to avoid |
---|
1189 | | - * unexpected Queue full condition |
---|
1190 | | - */ |
---|
1191 | | - |
---|
1192 | | - qp->qplib_qp.sq.q_full_delta -= 1; |
---|
1193 | | - |
---|
1194 | | - qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; |
---|
1195 | | - qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; |
---|
1196 | | - if (udata) { |
---|
1197 | | - rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); |
---|
1198 | | - if (rc) |
---|
1199 | | - goto fail; |
---|
1200 | | - } else { |
---|
1201 | | - qp->qplib_qp.dpi = &rdev->dpi_privileged; |
---|
1202 | | - } |
---|
1203 | | - |
---|
1204 | 1447 | rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); |
---|
1205 | 1448 | if (rc) { |
---|
1206 | | - dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); |
---|
| 1449 | + ibdev_err(&rdev->ibdev, "Failed to create HW QP"); |
---|
1207 | 1450 | goto free_umem; |
---|
| 1451 | + } |
---|
| 1452 | + if (udata) { |
---|
| 1453 | + struct bnxt_re_qp_resp resp; |
---|
| 1454 | + |
---|
| 1455 | + resp.qpid = qp->qplib_qp.id; |
---|
| 1456 | + resp.rsvd = 0; |
---|
| 1457 | + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
| 1458 | + if (rc) { |
---|
| 1459 | + ibdev_err(&rdev->ibdev, "Failed to copy QP udata"); |
---|
| 1460 | + goto qp_destroy; |
---|
| 1461 | + } |
---|
1208 | 1462 | } |
---|
1209 | 1463 | } |
---|
1210 | 1464 | |
---|
1211 | 1465 | qp->ib_qp.qp_num = qp->qplib_qp.id; |
---|
| 1466 | + if (qp_init_attr->qp_type == IB_QPT_GSI) |
---|
| 1467 | + rdev->gsi_ctx.gsi_qp = qp; |
---|
1212 | 1468 | spin_lock_init(&qp->sq_lock); |
---|
1213 | 1469 | spin_lock_init(&qp->rq_lock); |
---|
1214 | | - |
---|
1215 | | - if (udata) { |
---|
1216 | | - struct bnxt_re_qp_resp resp; |
---|
1217 | | - |
---|
1218 | | - resp.qpid = qp->ib_qp.qp_num; |
---|
1219 | | - resp.rsvd = 0; |
---|
1220 | | - rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
1221 | | - if (rc) { |
---|
1222 | | - dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); |
---|
1223 | | - goto qp_destroy; |
---|
1224 | | - } |
---|
1225 | | - } |
---|
1226 | 1470 | INIT_LIST_HEAD(&qp->list); |
---|
1227 | 1471 | mutex_lock(&rdev->qp_lock); |
---|
1228 | 1472 | list_add_tail(&qp->list, &rdev->qp_list); |
---|
1229 | | - atomic_inc(&rdev->qp_count); |
---|
1230 | 1473 | mutex_unlock(&rdev->qp_lock); |
---|
| 1474 | + atomic_inc(&rdev->qp_count); |
---|
1231 | 1475 | |
---|
1232 | 1476 | return &qp->ib_qp; |
---|
1233 | 1477 | qp_destroy: |
---|
1234 | 1478 | bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); |
---|
1235 | 1479 | free_umem: |
---|
1236 | | - if (udata) { |
---|
1237 | | - if (qp->rumem) |
---|
1238 | | - ib_umem_release(qp->rumem); |
---|
1239 | | - if (qp->sumem) |
---|
1240 | | - ib_umem_release(qp->sumem); |
---|
1241 | | - } |
---|
| 1480 | + ib_umem_release(qp->rumem); |
---|
| 1481 | + ib_umem_release(qp->sumem); |
---|
1242 | 1482 | fail: |
---|
1243 | 1483 | kfree(qp); |
---|
| 1484 | +exit: |
---|
1244 | 1485 | return ERR_PTR(rc); |
---|
1245 | 1486 | } |
---|
1246 | 1487 | |
---|
.. | .. |
---|
1323 | 1564 | } |
---|
1324 | 1565 | |
---|
1325 | 1566 | /* Shared Receive Queues */ |
---|
1326 | | -int bnxt_re_destroy_srq(struct ib_srq *ib_srq) |
---|
| 1567 | +int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata) |
---|
1327 | 1568 | { |
---|
1328 | 1569 | struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, |
---|
1329 | 1570 | ib_srq); |
---|
1330 | 1571 | struct bnxt_re_dev *rdev = srq->rdev; |
---|
1331 | 1572 | struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; |
---|
1332 | 1573 | struct bnxt_qplib_nq *nq = NULL; |
---|
1333 | | - int rc; |
---|
1334 | 1574 | |
---|
1335 | 1575 | if (qplib_srq->cq) |
---|
1336 | 1576 | nq = qplib_srq->cq->nq; |
---|
1337 | | - rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); |
---|
1338 | | - if (rc) { |
---|
1339 | | - dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!"); |
---|
1340 | | - return rc; |
---|
1341 | | - } |
---|
1342 | | - |
---|
1343 | | - if (srq->umem) |
---|
1344 | | - ib_umem_release(srq->umem); |
---|
1345 | | - kfree(srq); |
---|
| 1577 | + bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq); |
---|
| 1578 | + ib_umem_release(srq->umem); |
---|
1346 | 1579 | atomic_dec(&rdev->srq_count); |
---|
1347 | 1580 | if (nq) |
---|
1348 | 1581 | nq->budget--; |
---|
.. | .. |
---|
1358 | 1591 | struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq; |
---|
1359 | 1592 | struct ib_umem *umem; |
---|
1360 | 1593 | int bytes = 0; |
---|
1361 | | - struct ib_ucontext *context = pd->ib_pd.uobject->context; |
---|
1362 | | - struct bnxt_re_ucontext *cntx = container_of(context, |
---|
1363 | | - struct bnxt_re_ucontext, |
---|
1364 | | - ib_uctx); |
---|
| 1594 | + struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context( |
---|
| 1595 | + udata, struct bnxt_re_ucontext, ib_uctx); |
---|
| 1596 | + |
---|
1365 | 1597 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) |
---|
1366 | 1598 | return -EFAULT; |
---|
1367 | 1599 | |
---|
1368 | | - bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); |
---|
| 1600 | + bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size); |
---|
1369 | 1601 | bytes = PAGE_ALIGN(bytes); |
---|
1370 | | - umem = ib_umem_get(context, ureq.srqva, bytes, |
---|
1371 | | - IB_ACCESS_LOCAL_WRITE, 1); |
---|
| 1602 | + umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes, |
---|
| 1603 | + IB_ACCESS_LOCAL_WRITE); |
---|
1372 | 1604 | if (IS_ERR(umem)) |
---|
1373 | 1605 | return PTR_ERR(umem); |
---|
1374 | 1606 | |
---|
1375 | 1607 | srq->umem = umem; |
---|
1376 | | - qplib_srq->nmap = umem->nmap; |
---|
1377 | | - qplib_srq->sglist = umem->sg_head.sgl; |
---|
| 1608 | + qplib_srq->sg_info.umem = umem; |
---|
| 1609 | + qplib_srq->sg_info.pgsize = PAGE_SIZE; |
---|
| 1610 | + qplib_srq->sg_info.pgshft = PAGE_SHIFT; |
---|
1378 | 1611 | qplib_srq->srq_handle = ureq.srq_handle; |
---|
1379 | 1612 | qplib_srq->dpi = &cntx->dpi; |
---|
1380 | 1613 | |
---|
1381 | 1614 | return 0; |
---|
1382 | 1615 | } |
---|
1383 | 1616 | |
---|
1384 | | -struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd, |
---|
1385 | | - struct ib_srq_init_attr *srq_init_attr, |
---|
1386 | | - struct ib_udata *udata) |
---|
| 1617 | +int bnxt_re_create_srq(struct ib_srq *ib_srq, |
---|
| 1618 | + struct ib_srq_init_attr *srq_init_attr, |
---|
| 1619 | + struct ib_udata *udata) |
---|
1387 | 1620 | { |
---|
1388 | | - struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
---|
1389 | | - struct bnxt_re_dev *rdev = pd->rdev; |
---|
1390 | | - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
---|
1391 | | - struct bnxt_re_srq *srq; |
---|
| 1621 | + struct bnxt_qplib_dev_attr *dev_attr; |
---|
1392 | 1622 | struct bnxt_qplib_nq *nq = NULL; |
---|
| 1623 | + struct bnxt_re_dev *rdev; |
---|
| 1624 | + struct bnxt_re_srq *srq; |
---|
| 1625 | + struct bnxt_re_pd *pd; |
---|
| 1626 | + struct ib_pd *ib_pd; |
---|
1393 | 1627 | int rc, entries; |
---|
1394 | 1628 | |
---|
| 1629 | + ib_pd = ib_srq->pd; |
---|
| 1630 | + pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
---|
| 1631 | + rdev = pd->rdev; |
---|
| 1632 | + dev_attr = &rdev->dev_attr; |
---|
| 1633 | + srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); |
---|
| 1634 | + |
---|
1395 | 1635 | if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { |
---|
1396 | | - dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded"); |
---|
| 1636 | + ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded"); |
---|
1397 | 1637 | rc = -EINVAL; |
---|
1398 | 1638 | goto exit; |
---|
1399 | 1639 | } |
---|
.. | .. |
---|
1403 | 1643 | goto exit; |
---|
1404 | 1644 | } |
---|
1405 | 1645 | |
---|
1406 | | - srq = kzalloc(sizeof(*srq), GFP_KERNEL); |
---|
1407 | | - if (!srq) { |
---|
1408 | | - rc = -ENOMEM; |
---|
1409 | | - goto exit; |
---|
1410 | | - } |
---|
1411 | 1646 | srq->rdev = rdev; |
---|
1412 | 1647 | srq->qplib_srq.pd = &pd->qplib_pd; |
---|
1413 | 1648 | srq->qplib_srq.dpi = &rdev->dpi_privileged; |
---|
.. | .. |
---|
1417 | 1652 | entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); |
---|
1418 | 1653 | if (entries > dev_attr->max_srq_wqes + 1) |
---|
1419 | 1654 | entries = dev_attr->max_srq_wqes + 1; |
---|
1420 | | - |
---|
1421 | 1655 | srq->qplib_srq.max_wqe = entries; |
---|
| 1656 | + |
---|
1422 | 1657 | srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; |
---|
| 1658 | + /* 128 byte wqe size for SRQ . So use max sges */ |
---|
| 1659 | + srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges); |
---|
1423 | 1660 | srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; |
---|
1424 | 1661 | srq->srq_limit = srq_init_attr->attr.srq_limit; |
---|
1425 | 1662 | srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; |
---|
.. | .. |
---|
1433 | 1670 | |
---|
1434 | 1671 | rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq); |
---|
1435 | 1672 | if (rc) { |
---|
1436 | | - dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!"); |
---|
| 1673 | + ibdev_err(&rdev->ibdev, "Create HW SRQ failed!"); |
---|
1437 | 1674 | goto fail; |
---|
1438 | 1675 | } |
---|
1439 | 1676 | |
---|
.. | .. |
---|
1443 | 1680 | resp.srqid = srq->qplib_srq.id; |
---|
1444 | 1681 | rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
1445 | 1682 | if (rc) { |
---|
1446 | | - dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!"); |
---|
| 1683 | + ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!"); |
---|
1447 | 1684 | bnxt_qplib_destroy_srq(&rdev->qplib_res, |
---|
1448 | 1685 | &srq->qplib_srq); |
---|
1449 | 1686 | goto fail; |
---|
.. | .. |
---|
1452 | 1689 | if (nq) |
---|
1453 | 1690 | nq->budget++; |
---|
1454 | 1691 | atomic_inc(&rdev->srq_count); |
---|
| 1692 | + spin_lock_init(&srq->lock); |
---|
1455 | 1693 | |
---|
1456 | | - return &srq->ib_srq; |
---|
| 1694 | + return 0; |
---|
1457 | 1695 | |
---|
1458 | 1696 | fail: |
---|
1459 | | - if (srq->umem) |
---|
1460 | | - ib_umem_release(srq->umem); |
---|
1461 | | - kfree(srq); |
---|
| 1697 | + ib_umem_release(srq->umem); |
---|
1462 | 1698 | exit: |
---|
1463 | | - return ERR_PTR(rc); |
---|
| 1699 | + return rc; |
---|
1464 | 1700 | } |
---|
1465 | 1701 | |
---|
1466 | 1702 | int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, |
---|
.. | .. |
---|
1484 | 1720 | srq->qplib_srq.threshold = srq_attr->srq_limit; |
---|
1485 | 1721 | rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); |
---|
1486 | 1722 | if (rc) { |
---|
1487 | | - dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!"); |
---|
| 1723 | + ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); |
---|
1488 | 1724 | return rc; |
---|
1489 | 1725 | } |
---|
1490 | 1726 | /* On success, update the shadow */ |
---|
.. | .. |
---|
1492 | 1728 | /* No need to Build and send response back to udata */ |
---|
1493 | 1729 | break; |
---|
1494 | 1730 | default: |
---|
1495 | | - dev_err(rdev_to_dev(rdev), |
---|
1496 | | - "Unsupported srq_attr_mask 0x%x", srq_attr_mask); |
---|
| 1731 | + ibdev_err(&rdev->ibdev, |
---|
| 1732 | + "Unsupported srq_attr_mask 0x%x", srq_attr_mask); |
---|
1497 | 1733 | return -EINVAL; |
---|
1498 | 1734 | } |
---|
1499 | 1735 | return 0; |
---|
.. | .. |
---|
1511 | 1747 | tsrq.qplib_srq.id = srq->qplib_srq.id; |
---|
1512 | 1748 | rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq); |
---|
1513 | 1749 | if (rc) { |
---|
1514 | | - dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!"); |
---|
| 1750 | + ibdev_err(&rdev->ibdev, "Query HW SRQ failed!"); |
---|
1515 | 1751 | return rc; |
---|
1516 | 1752 | } |
---|
1517 | 1753 | srq_attr->max_wr = srq->qplib_srq.max_wqe; |
---|
.. | .. |
---|
1553 | 1789 | struct bnxt_re_qp *qp1_qp, |
---|
1554 | 1790 | int qp_attr_mask) |
---|
1555 | 1791 | { |
---|
1556 | | - struct bnxt_re_qp *qp = rdev->qp1_sqp; |
---|
| 1792 | + struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; |
---|
1557 | 1793 | int rc = 0; |
---|
1558 | 1794 | |
---|
1559 | 1795 | if (qp_attr_mask & IB_QP_STATE) { |
---|
.. | .. |
---|
1577 | 1813 | |
---|
1578 | 1814 | rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); |
---|
1579 | 1815 | if (rc) |
---|
1580 | | - dev_err(rdev_to_dev(rdev), |
---|
1581 | | - "Failed to modify Shadow QP for QP1"); |
---|
| 1816 | + ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1"); |
---|
1582 | 1817 | return rc; |
---|
1583 | 1818 | } |
---|
1584 | 1819 | |
---|
.. | .. |
---|
1598 | 1833 | curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state); |
---|
1599 | 1834 | new_qp_state = qp_attr->qp_state; |
---|
1600 | 1835 | if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state, |
---|
1601 | | - ib_qp->qp_type, qp_attr_mask, |
---|
1602 | | - IB_LINK_LAYER_ETHERNET)) { |
---|
1603 | | - dev_err(rdev_to_dev(rdev), |
---|
1604 | | - "Invalid attribute mask: %#x specified ", |
---|
1605 | | - qp_attr_mask); |
---|
1606 | | - dev_err(rdev_to_dev(rdev), |
---|
1607 | | - "for qpn: %#x type: %#x", |
---|
1608 | | - ib_qp->qp_num, ib_qp->qp_type); |
---|
1609 | | - dev_err(rdev_to_dev(rdev), |
---|
1610 | | - "curr_qp_state=0x%x, new_qp_state=0x%x\n", |
---|
1611 | | - curr_qp_state, new_qp_state); |
---|
| 1836 | + ib_qp->qp_type, qp_attr_mask)) { |
---|
| 1837 | + ibdev_err(&rdev->ibdev, |
---|
| 1838 | + "Invalid attribute mask: %#x specified ", |
---|
| 1839 | + qp_attr_mask); |
---|
| 1840 | + ibdev_err(&rdev->ibdev, |
---|
| 1841 | + "for qpn: %#x type: %#x", |
---|
| 1842 | + ib_qp->qp_num, ib_qp->qp_type); |
---|
| 1843 | + ibdev_err(&rdev->ibdev, |
---|
| 1844 | + "curr_qp_state=0x%x, new_qp_state=0x%x\n", |
---|
| 1845 | + curr_qp_state, new_qp_state); |
---|
1612 | 1846 | return -EINVAL; |
---|
1613 | 1847 | } |
---|
1614 | 1848 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE; |
---|
.. | .. |
---|
1616 | 1850 | |
---|
1617 | 1851 | if (!qp->sumem && |
---|
1618 | 1852 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { |
---|
1619 | | - dev_dbg(rdev_to_dev(rdev), |
---|
1620 | | - "Move QP = %p to flush list\n", |
---|
1621 | | - qp); |
---|
| 1853 | + ibdev_dbg(&rdev->ibdev, |
---|
| 1854 | + "Move QP = %p to flush list\n", qp); |
---|
1622 | 1855 | flags = bnxt_re_lock_cqs(qp); |
---|
1623 | 1856 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); |
---|
1624 | 1857 | bnxt_re_unlock_cqs(qp, flags); |
---|
1625 | 1858 | } |
---|
1626 | 1859 | if (!qp->sumem && |
---|
1627 | 1860 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { |
---|
1628 | | - dev_dbg(rdev_to_dev(rdev), |
---|
1629 | | - "Move QP = %p out of flush list\n", |
---|
1630 | | - qp); |
---|
| 1861 | + ibdev_dbg(&rdev->ibdev, |
---|
| 1862 | + "Move QP = %p out of flush list\n", qp); |
---|
1631 | 1863 | flags = bnxt_re_lock_cqs(qp); |
---|
1632 | 1864 | bnxt_qplib_clean_qp(&qp->qplib_qp); |
---|
1633 | 1865 | bnxt_re_unlock_cqs(qp, flags); |
---|
.. | .. |
---|
1644 | 1876 | __from_ib_access_flags(qp_attr->qp_access_flags); |
---|
1645 | 1877 | /* LOCAL_WRITE access must be set to allow RC receive */ |
---|
1646 | 1878 | qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; |
---|
| 1879 | + /* Temp: Set all params on QP as of now */ |
---|
| 1880 | + qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE; |
---|
| 1881 | + qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ; |
---|
1647 | 1882 | } |
---|
1648 | 1883 | if (qp_attr_mask & IB_QP_PKEY_INDEX) { |
---|
1649 | 1884 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY; |
---|
.. | .. |
---|
1657 | 1892 | const struct ib_global_route *grh = |
---|
1658 | 1893 | rdma_ah_read_grh(&qp_attr->ah_attr); |
---|
1659 | 1894 | const struct ib_gid_attr *sgid_attr; |
---|
| 1895 | + struct bnxt_re_gid_ctx *ctx; |
---|
1660 | 1896 | |
---|
1661 | 1897 | qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID | |
---|
1662 | 1898 | CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL | |
---|
.. | .. |
---|
1668 | 1904 | memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw, |
---|
1669 | 1905 | sizeof(qp->qplib_qp.ah.dgid.data)); |
---|
1670 | 1906 | qp->qplib_qp.ah.flow_label = grh->flow_label; |
---|
1671 | | - /* If RoCE V2 is enabled, stack will have two entries for |
---|
1672 | | - * each GID entry. Avoiding this duplicte entry in HW. Dividing |
---|
1673 | | - * the GID index by 2 for RoCE V2 |
---|
| 1907 | + sgid_attr = grh->sgid_attr; |
---|
| 1908 | + /* Get the HW context of the GID. The reference |
---|
| 1909 | + * of GID table entry is already taken by the caller. |
---|
1674 | 1910 | */ |
---|
1675 | | - qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2; |
---|
| 1911 | + ctx = rdma_read_gid_hw_context(sgid_attr); |
---|
| 1912 | + qp->qplib_qp.ah.sgid_index = ctx->idx; |
---|
1676 | 1913 | qp->qplib_qp.ah.host_sgid_index = grh->sgid_index; |
---|
1677 | 1914 | qp->qplib_qp.ah.hop_limit = grh->hop_limit; |
---|
1678 | 1915 | qp->qplib_qp.ah.traffic_class = grh->traffic_class; |
---|
.. | .. |
---|
1680 | 1917 | ether_addr_copy(qp->qplib_qp.ah.dmac, |
---|
1681 | 1918 | qp_attr->ah_attr.roce.dmac); |
---|
1682 | 1919 | |
---|
1683 | | - sgid_attr = qp_attr->ah_attr.grh.sgid_attr; |
---|
1684 | | - memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr, |
---|
1685 | | - ETH_ALEN); |
---|
| 1920 | + rc = rdma_read_gid_l2_fields(sgid_attr, NULL, |
---|
| 1921 | + &qp->qplib_qp.smac[0]); |
---|
| 1922 | + if (rc) |
---|
| 1923 | + return rc; |
---|
| 1924 | + |
---|
1686 | 1925 | nw_type = rdma_gid_attr_network_type(sgid_attr); |
---|
1687 | 1926 | switch (nw_type) { |
---|
1688 | 1927 | case RDMA_NETWORK_IPV4: |
---|
.. | .. |
---|
1751 | 1990 | if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
---|
1752 | 1991 | if (qp_attr->max_dest_rd_atomic > |
---|
1753 | 1992 | dev_attr->max_qp_init_rd_atom) { |
---|
1754 | | - dev_err(rdev_to_dev(rdev), |
---|
1755 | | - "max_dest_rd_atomic requested%d is > dev_max%d", |
---|
1756 | | - qp_attr->max_dest_rd_atomic, |
---|
1757 | | - dev_attr->max_qp_init_rd_atom); |
---|
| 1993 | + ibdev_err(&rdev->ibdev, |
---|
| 1994 | + "max_dest_rd_atomic requested%d is > dev_max%d", |
---|
| 1995 | + qp_attr->max_dest_rd_atomic, |
---|
| 1996 | + dev_attr->max_qp_init_rd_atom); |
---|
1758 | 1997 | return -EINVAL; |
---|
1759 | 1998 | } |
---|
1760 | 1999 | |
---|
.. | .. |
---|
1775 | 2014 | (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) || |
---|
1776 | 2015 | (qp_attr->cap.max_inline_data >= |
---|
1777 | 2016 | dev_attr->max_inline_data)) { |
---|
1778 | | - dev_err(rdev_to_dev(rdev), |
---|
1779 | | - "Create QP failed - max exceeded"); |
---|
| 2017 | + ibdev_err(&rdev->ibdev, |
---|
| 2018 | + "Create QP failed - max exceeded"); |
---|
1780 | 2019 | return -EINVAL; |
---|
1781 | 2020 | } |
---|
1782 | 2021 | entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); |
---|
.. | .. |
---|
1809 | 2048 | } |
---|
1810 | 2049 | rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp); |
---|
1811 | 2050 | if (rc) { |
---|
1812 | | - dev_err(rdev_to_dev(rdev), "Failed to modify HW QP"); |
---|
| 2051 | + ibdev_err(&rdev->ibdev, "Failed to modify HW QP"); |
---|
1813 | 2052 | return rc; |
---|
1814 | 2053 | } |
---|
1815 | | - if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) |
---|
| 2054 | + if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) |
---|
1816 | 2055 | rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); |
---|
1817 | 2056 | return rc; |
---|
1818 | 2057 | } |
---|
.. | .. |
---|
1834 | 2073 | |
---|
1835 | 2074 | rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp); |
---|
1836 | 2075 | if (rc) { |
---|
1837 | | - dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); |
---|
| 2076 | + ibdev_err(&rdev->ibdev, "Failed to query HW QP"); |
---|
1838 | 2077 | goto out; |
---|
1839 | 2078 | } |
---|
1840 | 2079 | qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state); |
---|
.. | .. |
---|
1902 | 2141 | |
---|
1903 | 2142 | memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); |
---|
1904 | 2143 | |
---|
1905 | | - if (is_vlan_dev(sgid_attr->ndev)) |
---|
1906 | | - vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); |
---|
| 2144 | + rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL); |
---|
| 2145 | + if (rc) |
---|
| 2146 | + return rc; |
---|
| 2147 | + |
---|
1907 | 2148 | /* Get network header type for this GID */ |
---|
1908 | 2149 | nw_type = rdma_gid_attr_network_type(sgid_attr); |
---|
1909 | 2150 | switch (nw_type) { |
---|
.. | .. |
---|
2038 | 2279 | wqe->num_sge++; |
---|
2039 | 2280 | |
---|
2040 | 2281 | } else { |
---|
2041 | | - dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!"); |
---|
| 2282 | + ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!"); |
---|
2042 | 2283 | rc = -ENOMEM; |
---|
2043 | 2284 | } |
---|
2044 | 2285 | return rc; |
---|
.. | .. |
---|
2055 | 2296 | struct bnxt_qplib_swqe *wqe, |
---|
2056 | 2297 | int payload_size) |
---|
2057 | 2298 | { |
---|
2058 | | - struct bnxt_qplib_sge ref, sge; |
---|
2059 | | - u32 rq_prod_index; |
---|
2060 | 2299 | struct bnxt_re_sqp_entries *sqp_entry; |
---|
| 2300 | + struct bnxt_qplib_sge ref, sge; |
---|
| 2301 | + struct bnxt_re_dev *rdev; |
---|
| 2302 | + u32 rq_prod_index; |
---|
| 2303 | + |
---|
| 2304 | + rdev = qp->rdev; |
---|
2061 | 2305 | |
---|
2062 | 2306 | rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); |
---|
2063 | 2307 | |
---|
.. | .. |
---|
2072 | 2316 | ref.lkey = wqe->sg_list[0].lkey; |
---|
2073 | 2317 | ref.size = wqe->sg_list[0].size; |
---|
2074 | 2318 | |
---|
2075 | | - sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index]; |
---|
| 2319 | + sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; |
---|
2076 | 2320 | |
---|
2077 | 2321 | /* SGE 1 */ |
---|
2078 | 2322 | wqe->sg_list[0].addr = sge.addr; |
---|
.. | .. |
---|
2092 | 2336 | |
---|
2093 | 2337 | static int is_ud_qp(struct bnxt_re_qp *qp) |
---|
2094 | 2338 | { |
---|
2095 | | - return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD; |
---|
| 2339 | + return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD || |
---|
| 2340 | + qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI); |
---|
2096 | 2341 | } |
---|
2097 | 2342 | |
---|
2098 | 2343 | static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp, |
---|
.. | .. |
---|
2223 | 2468 | wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; |
---|
2224 | 2469 | wqe->frmr.page_list = mr->pages; |
---|
2225 | 2470 | wqe->frmr.page_list_len = mr->npages; |
---|
2226 | | - wqe->frmr.levels = qplib_frpl->hwq.level + 1; |
---|
| 2471 | + wqe->frmr.levels = qplib_frpl->hwq.level; |
---|
2227 | 2472 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; |
---|
2228 | 2473 | |
---|
2229 | 2474 | /* Need unconditional fence for reg_mr |
---|
.. | .. |
---|
2270 | 2515 | |
---|
2271 | 2516 | if ((sge_len + wqe->inline_len) > |
---|
2272 | 2517 | BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) { |
---|
2273 | | - dev_err(rdev_to_dev(rdev), |
---|
2274 | | - "Inline data size requested > supported value"); |
---|
| 2518 | + ibdev_err(&rdev->ibdev, |
---|
| 2519 | + "Inline data size requested > supported value"); |
---|
2275 | 2520 | return -EINVAL; |
---|
2276 | 2521 | } |
---|
2277 | 2522 | sge_len = wr->sg_list[i].length; |
---|
.. | .. |
---|
2318 | 2563 | struct bnxt_re_qp *qp, |
---|
2319 | 2564 | const struct ib_send_wr *wr) |
---|
2320 | 2565 | { |
---|
2321 | | - struct bnxt_qplib_swqe wqe; |
---|
2322 | 2566 | int rc = 0, payload_sz = 0; |
---|
2323 | 2567 | unsigned long flags; |
---|
2324 | 2568 | |
---|
2325 | 2569 | spin_lock_irqsave(&qp->sq_lock, flags); |
---|
2326 | | - memset(&wqe, 0, sizeof(wqe)); |
---|
2327 | 2570 | while (wr) { |
---|
2328 | | - /* House keeping */ |
---|
2329 | | - memset(&wqe, 0, sizeof(wqe)); |
---|
| 2571 | + struct bnxt_qplib_swqe wqe = {}; |
---|
2330 | 2572 | |
---|
2331 | 2573 | /* Common */ |
---|
2332 | 2574 | wqe.num_sge = wr->num_sge; |
---|
2333 | 2575 | if (wr->num_sge > qp->qplib_qp.sq.max_sge) { |
---|
2334 | | - dev_err(rdev_to_dev(rdev), |
---|
2335 | | - "Limit exceeded for Send SGEs"); |
---|
| 2576 | + ibdev_err(&rdev->ibdev, |
---|
| 2577 | + "Limit exceeded for Send SGEs"); |
---|
2336 | 2578 | rc = -EINVAL; |
---|
2337 | 2579 | goto bad; |
---|
2338 | 2580 | } |
---|
.. | .. |
---|
2351 | 2593 | rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); |
---|
2352 | 2594 | bad: |
---|
2353 | 2595 | if (rc) { |
---|
2354 | | - dev_err(rdev_to_dev(rdev), |
---|
2355 | | - "Post send failed opcode = %#x rc = %d", |
---|
2356 | | - wr->opcode, rc); |
---|
| 2596 | + ibdev_err(&rdev->ibdev, |
---|
| 2597 | + "Post send failed opcode = %#x rc = %d", |
---|
| 2598 | + wr->opcode, rc); |
---|
2357 | 2599 | break; |
---|
2358 | 2600 | } |
---|
2359 | 2601 | wr = wr->next; |
---|
.. | .. |
---|
2380 | 2622 | /* Common */ |
---|
2381 | 2623 | wqe.num_sge = wr->num_sge; |
---|
2382 | 2624 | if (wr->num_sge > qp->qplib_qp.sq.max_sge) { |
---|
2383 | | - dev_err(rdev_to_dev(qp->rdev), |
---|
2384 | | - "Limit exceeded for Send SGEs"); |
---|
| 2625 | + ibdev_err(&qp->rdev->ibdev, |
---|
| 2626 | + "Limit exceeded for Send SGEs"); |
---|
2385 | 2627 | rc = -EINVAL; |
---|
2386 | 2628 | goto bad; |
---|
2387 | 2629 | } |
---|
.. | .. |
---|
2396 | 2638 | switch (wr->opcode) { |
---|
2397 | 2639 | case IB_WR_SEND: |
---|
2398 | 2640 | case IB_WR_SEND_WITH_IMM: |
---|
2399 | | - if (ib_qp->qp_type == IB_QPT_GSI) { |
---|
| 2641 | + if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) { |
---|
2400 | 2642 | rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe, |
---|
2401 | 2643 | payload_sz); |
---|
2402 | 2644 | if (rc) |
---|
.. | .. |
---|
2412 | 2654 | default: |
---|
2413 | 2655 | break; |
---|
2414 | 2656 | } |
---|
2415 | | - /* fall through */ |
---|
| 2657 | + fallthrough; |
---|
2416 | 2658 | case IB_WR_SEND_WITH_INV: |
---|
2417 | 2659 | rc = bnxt_re_build_send_wqe(qp, wr, &wqe); |
---|
2418 | 2660 | break; |
---|
.. | .. |
---|
2426 | 2668 | rc = bnxt_re_build_atomic_wqe(wr, &wqe); |
---|
2427 | 2669 | break; |
---|
2428 | 2670 | case IB_WR_RDMA_READ_WITH_INV: |
---|
2429 | | - dev_err(rdev_to_dev(qp->rdev), |
---|
2430 | | - "RDMA Read with Invalidate is not supported"); |
---|
| 2671 | + ibdev_err(&qp->rdev->ibdev, |
---|
| 2672 | + "RDMA Read with Invalidate is not supported"); |
---|
2431 | 2673 | rc = -EINVAL; |
---|
2432 | 2674 | goto bad; |
---|
2433 | 2675 | case IB_WR_LOCAL_INV: |
---|
.. | .. |
---|
2438 | 2680 | break; |
---|
2439 | 2681 | default: |
---|
2440 | 2682 | /* Unsupported WRs */ |
---|
2441 | | - dev_err(rdev_to_dev(qp->rdev), |
---|
2442 | | - "WR (%#x) is not supported", wr->opcode); |
---|
| 2683 | + ibdev_err(&qp->rdev->ibdev, |
---|
| 2684 | + "WR (%#x) is not supported", wr->opcode); |
---|
2443 | 2685 | rc = -EINVAL; |
---|
2444 | 2686 | goto bad; |
---|
2445 | 2687 | } |
---|
.. | .. |
---|
2447 | 2689 | rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); |
---|
2448 | 2690 | bad: |
---|
2449 | 2691 | if (rc) { |
---|
2450 | | - dev_err(rdev_to_dev(qp->rdev), |
---|
2451 | | - "post_send failed op:%#x qps = %#x rc = %d\n", |
---|
2452 | | - wr->opcode, qp->qplib_qp.state, rc); |
---|
| 2692 | + ibdev_err(&qp->rdev->ibdev, |
---|
| 2693 | + "post_send failed op:%#x qps = %#x rc = %d\n", |
---|
| 2694 | + wr->opcode, qp->qplib_qp.state, rc); |
---|
2453 | 2695 | *bad_wr = wr; |
---|
2454 | 2696 | break; |
---|
2455 | 2697 | } |
---|
.. | .. |
---|
2477 | 2719 | /* Common */ |
---|
2478 | 2720 | wqe.num_sge = wr->num_sge; |
---|
2479 | 2721 | if (wr->num_sge > qp->qplib_qp.rq.max_sge) { |
---|
2480 | | - dev_err(rdev_to_dev(rdev), |
---|
2481 | | - "Limit exceeded for Receive SGEs"); |
---|
| 2722 | + ibdev_err(&rdev->ibdev, |
---|
| 2723 | + "Limit exceeded for Receive SGEs"); |
---|
2482 | 2724 | rc = -EINVAL; |
---|
2483 | 2725 | break; |
---|
2484 | 2726 | } |
---|
.. | .. |
---|
2514 | 2756 | /* Common */ |
---|
2515 | 2757 | wqe.num_sge = wr->num_sge; |
---|
2516 | 2758 | if (wr->num_sge > qp->qplib_qp.rq.max_sge) { |
---|
2517 | | - dev_err(rdev_to_dev(qp->rdev), |
---|
2518 | | - "Limit exceeded for Receive SGEs"); |
---|
| 2759 | + ibdev_err(&qp->rdev->ibdev, |
---|
| 2760 | + "Limit exceeded for Receive SGEs"); |
---|
2519 | 2761 | rc = -EINVAL; |
---|
2520 | 2762 | *bad_wr = wr; |
---|
2521 | 2763 | break; |
---|
.. | .. |
---|
2526 | 2768 | wqe.wr_id = wr->wr_id; |
---|
2527 | 2769 | wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV; |
---|
2528 | 2770 | |
---|
2529 | | - if (ib_qp->qp_type == IB_QPT_GSI) |
---|
| 2771 | + if (ib_qp->qp_type == IB_QPT_GSI && |
---|
| 2772 | + qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI) |
---|
2530 | 2773 | rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe, |
---|
2531 | 2774 | payload_sz); |
---|
2532 | 2775 | if (!rc) |
---|
.. | .. |
---|
2554 | 2797 | } |
---|
2555 | 2798 | |
---|
2556 | 2799 | /* Completion Queues */ |
---|
2557 | | -int bnxt_re_destroy_cq(struct ib_cq *ib_cq) |
---|
| 2800 | +int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) |
---|
2558 | 2801 | { |
---|
2559 | | - int rc; |
---|
2560 | 2802 | struct bnxt_re_cq *cq; |
---|
2561 | 2803 | struct bnxt_qplib_nq *nq; |
---|
2562 | 2804 | struct bnxt_re_dev *rdev; |
---|
.. | .. |
---|
2565 | 2807 | rdev = cq->rdev; |
---|
2566 | 2808 | nq = cq->qplib_cq.nq; |
---|
2567 | 2809 | |
---|
2568 | | - rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); |
---|
2569 | | - if (rc) { |
---|
2570 | | - dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ"); |
---|
2571 | | - return rc; |
---|
2572 | | - } |
---|
2573 | | - if (!IS_ERR_OR_NULL(cq->umem)) |
---|
2574 | | - ib_umem_release(cq->umem); |
---|
| 2810 | + bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); |
---|
| 2811 | + ib_umem_release(cq->umem); |
---|
2575 | 2812 | |
---|
2576 | 2813 | atomic_dec(&rdev->cq_count); |
---|
2577 | 2814 | nq->budget--; |
---|
2578 | 2815 | kfree(cq->cql); |
---|
2579 | | - kfree(cq); |
---|
2580 | | - |
---|
2581 | 2816 | return 0; |
---|
2582 | 2817 | } |
---|
2583 | 2818 | |
---|
2584 | | -struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, |
---|
2585 | | - const struct ib_cq_init_attr *attr, |
---|
2586 | | - struct ib_ucontext *context, |
---|
2587 | | - struct ib_udata *udata) |
---|
| 2819 | +int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
---|
| 2820 | + struct ib_udata *udata) |
---|
2588 | 2821 | { |
---|
2589 | | - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
---|
| 2822 | + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev); |
---|
2590 | 2823 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
---|
2591 | | - struct bnxt_re_cq *cq = NULL; |
---|
| 2824 | + struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); |
---|
2592 | 2825 | int rc, entries; |
---|
2593 | 2826 | int cqe = attr->cqe; |
---|
2594 | 2827 | struct bnxt_qplib_nq *nq = NULL; |
---|
.. | .. |
---|
2596 | 2829 | |
---|
2597 | 2830 | /* Validate CQ fields */ |
---|
2598 | 2831 | if (cqe < 1 || cqe > dev_attr->max_cq_wqes) { |
---|
2599 | | - dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded"); |
---|
2600 | | - return ERR_PTR(-EINVAL); |
---|
| 2832 | + ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded"); |
---|
| 2833 | + return -EINVAL; |
---|
2601 | 2834 | } |
---|
2602 | | - cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
---|
2603 | | - if (!cq) |
---|
2604 | | - return ERR_PTR(-ENOMEM); |
---|
2605 | 2835 | |
---|
2606 | 2836 | cq->rdev = rdev; |
---|
2607 | 2837 | cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); |
---|
.. | .. |
---|
2610 | 2840 | if (entries > dev_attr->max_cq_wqes + 1) |
---|
2611 | 2841 | entries = dev_attr->max_cq_wqes + 1; |
---|
2612 | 2842 | |
---|
2613 | | - if (context) { |
---|
| 2843 | + cq->qplib_cq.sg_info.pgsize = PAGE_SIZE; |
---|
| 2844 | + cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT; |
---|
| 2845 | + if (udata) { |
---|
2614 | 2846 | struct bnxt_re_cq_req req; |
---|
2615 | | - struct bnxt_re_ucontext *uctx = container_of |
---|
2616 | | - (context, |
---|
2617 | | - struct bnxt_re_ucontext, |
---|
2618 | | - ib_uctx); |
---|
| 2847 | + struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context( |
---|
| 2848 | + udata, struct bnxt_re_ucontext, ib_uctx); |
---|
2619 | 2849 | if (ib_copy_from_udata(&req, udata, sizeof(req))) { |
---|
2620 | 2850 | rc = -EFAULT; |
---|
2621 | 2851 | goto fail; |
---|
2622 | 2852 | } |
---|
2623 | 2853 | |
---|
2624 | | - cq->umem = ib_umem_get(context, req.cq_va, |
---|
| 2854 | + cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va, |
---|
2625 | 2855 | entries * sizeof(struct cq_base), |
---|
2626 | | - IB_ACCESS_LOCAL_WRITE, 1); |
---|
| 2856 | + IB_ACCESS_LOCAL_WRITE); |
---|
2627 | 2857 | if (IS_ERR(cq->umem)) { |
---|
2628 | 2858 | rc = PTR_ERR(cq->umem); |
---|
2629 | 2859 | goto fail; |
---|
2630 | 2860 | } |
---|
2631 | | - cq->qplib_cq.sghead = cq->umem->sg_head.sgl; |
---|
2632 | | - cq->qplib_cq.nmap = cq->umem->nmap; |
---|
| 2861 | + cq->qplib_cq.sg_info.umem = cq->umem; |
---|
2633 | 2862 | cq->qplib_cq.dpi = &uctx->dpi; |
---|
2634 | 2863 | } else { |
---|
2635 | 2864 | cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); |
---|
.. | .. |
---|
2641 | 2870 | } |
---|
2642 | 2871 | |
---|
2643 | 2872 | cq->qplib_cq.dpi = &rdev->dpi_privileged; |
---|
2644 | | - cq->qplib_cq.sghead = NULL; |
---|
2645 | | - cq->qplib_cq.nmap = 0; |
---|
2646 | 2873 | } |
---|
2647 | 2874 | /* |
---|
2648 | 2875 | * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a |
---|
.. | .. |
---|
2656 | 2883 | |
---|
2657 | 2884 | rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); |
---|
2658 | 2885 | if (rc) { |
---|
2659 | | - dev_err(rdev_to_dev(rdev), "Failed to create HW CQ"); |
---|
| 2886 | + ibdev_err(&rdev->ibdev, "Failed to create HW CQ"); |
---|
2660 | 2887 | goto fail; |
---|
2661 | 2888 | } |
---|
2662 | 2889 | |
---|
.. | .. |
---|
2667 | 2894 | atomic_inc(&rdev->cq_count); |
---|
2668 | 2895 | spin_lock_init(&cq->cq_lock); |
---|
2669 | 2896 | |
---|
2670 | | - if (context) { |
---|
| 2897 | + if (udata) { |
---|
2671 | 2898 | struct bnxt_re_cq_resp resp; |
---|
2672 | 2899 | |
---|
2673 | 2900 | resp.cqid = cq->qplib_cq.id; |
---|
.. | .. |
---|
2676 | 2903 | resp.rsvd = 0; |
---|
2677 | 2904 | rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
2678 | 2905 | if (rc) { |
---|
2679 | | - dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata"); |
---|
| 2906 | + ibdev_err(&rdev->ibdev, "Failed to copy CQ udata"); |
---|
2680 | 2907 | bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); |
---|
2681 | 2908 | goto c2fail; |
---|
2682 | 2909 | } |
---|
2683 | 2910 | } |
---|
2684 | 2911 | |
---|
2685 | | - return &cq->ib_cq; |
---|
| 2912 | + return 0; |
---|
2686 | 2913 | |
---|
2687 | 2914 | c2fail: |
---|
2688 | | - if (context) |
---|
2689 | | - ib_umem_release(cq->umem); |
---|
| 2915 | + ib_umem_release(cq->umem); |
---|
2690 | 2916 | fail: |
---|
2691 | 2917 | kfree(cq->cql); |
---|
2692 | | - kfree(cq); |
---|
2693 | | - return ERR_PTR(rc); |
---|
| 2918 | + return rc; |
---|
2694 | 2919 | } |
---|
2695 | 2920 | |
---|
2696 | 2921 | static u8 __req_to_ib_wc_status(u8 qstatus) |
---|
.. | .. |
---|
2909 | 3134 | return rc; |
---|
2910 | 3135 | } |
---|
2911 | 3136 | |
---|
2912 | | -static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, |
---|
| 3137 | +static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, |
---|
2913 | 3138 | struct bnxt_qplib_cqe *cqe) |
---|
2914 | 3139 | { |
---|
2915 | | - struct bnxt_re_dev *rdev = qp1_qp->rdev; |
---|
| 3140 | + struct bnxt_re_dev *rdev = gsi_qp->rdev; |
---|
2916 | 3141 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
---|
2917 | | - struct bnxt_re_qp *qp = rdev->qp1_sqp; |
---|
| 3142 | + struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; |
---|
| 3143 | + struct bnxt_re_ah *gsi_sah; |
---|
2918 | 3144 | struct ib_send_wr *swr; |
---|
2919 | 3145 | struct ib_ud_wr udwr; |
---|
2920 | 3146 | struct ib_recv_wr rwr; |
---|
.. | .. |
---|
2937 | 3163 | swr = &udwr.wr; |
---|
2938 | 3164 | tbl_idx = cqe->wr_id; |
---|
2939 | 3165 | |
---|
2940 | | - rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf + |
---|
2941 | | - (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size); |
---|
2942 | | - rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp, |
---|
| 3166 | + rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + |
---|
| 3167 | + (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); |
---|
| 3168 | + rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, |
---|
2943 | 3169 | tbl_idx); |
---|
2944 | 3170 | |
---|
2945 | 3171 | /* Shadow QP header buffer */ |
---|
2946 | | - shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp, |
---|
| 3172 | + shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, |
---|
2947 | 3173 | tbl_idx); |
---|
2948 | | - sqp_entry = &rdev->sqp_tbl[tbl_idx]; |
---|
| 3174 | + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
---|
2949 | 3175 | |
---|
2950 | 3176 | /* Store this cqe */ |
---|
2951 | 3177 | memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); |
---|
2952 | | - sqp_entry->qp1_qp = qp1_qp; |
---|
| 3178 | + sqp_entry->qp1_qp = gsi_qp; |
---|
2953 | 3179 | |
---|
2954 | 3180 | /* Find packet type from the cqe */ |
---|
2955 | 3181 | |
---|
2956 | 3182 | pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags, |
---|
2957 | 3183 | cqe->raweth_qp1_flags2); |
---|
2958 | 3184 | if (pkt_type < 0) { |
---|
2959 | | - dev_err(rdev_to_dev(rdev), "Invalid packet\n"); |
---|
| 3185 | + ibdev_err(&rdev->ibdev, "Invalid packet\n"); |
---|
2960 | 3186 | return -EINVAL; |
---|
2961 | 3187 | } |
---|
2962 | 3188 | |
---|
.. | .. |
---|
3003 | 3229 | rwr.wr_id = tbl_idx; |
---|
3004 | 3230 | rwr.next = NULL; |
---|
3005 | 3231 | |
---|
3006 | | - rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr); |
---|
| 3232 | + rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); |
---|
3007 | 3233 | if (rc) { |
---|
3008 | | - dev_err(rdev_to_dev(rdev), |
---|
3009 | | - "Failed to post Rx buffers to shadow QP"); |
---|
| 3234 | + ibdev_err(&rdev->ibdev, |
---|
| 3235 | + "Failed to post Rx buffers to shadow QP"); |
---|
3010 | 3236 | return -ENOMEM; |
---|
3011 | 3237 | } |
---|
3012 | 3238 | |
---|
.. | .. |
---|
3015 | 3241 | swr->wr_id = tbl_idx; |
---|
3016 | 3242 | swr->opcode = IB_WR_SEND; |
---|
3017 | 3243 | swr->next = NULL; |
---|
3018 | | - |
---|
3019 | | - udwr.ah = &rdev->sqp_ah->ib_ah; |
---|
3020 | | - udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id; |
---|
3021 | | - udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey; |
---|
| 3244 | + gsi_sah = rdev->gsi_ctx.gsi_sah; |
---|
| 3245 | + udwr.ah = &gsi_sah->ib_ah; |
---|
| 3246 | + udwr.remote_qpn = gsi_sqp->qplib_qp.id; |
---|
| 3247 | + udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; |
---|
3022 | 3248 | |
---|
3023 | 3249 | /* post data received in the send queue */ |
---|
3024 | | - rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr); |
---|
3025 | | - |
---|
3026 | | - return 0; |
---|
| 3250 | + return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); |
---|
3027 | 3251 | } |
---|
3028 | 3252 | |
---|
3029 | 3253 | static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, |
---|
.. | .. |
---|
3088 | 3312 | wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
---|
3089 | 3313 | } |
---|
3090 | 3314 | |
---|
3091 | | -static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, |
---|
| 3315 | +static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, |
---|
3092 | 3316 | struct ib_wc *wc, |
---|
3093 | 3317 | struct bnxt_qplib_cqe *cqe) |
---|
3094 | 3318 | { |
---|
3095 | | - struct bnxt_re_dev *rdev = qp->rdev; |
---|
3096 | | - struct bnxt_re_qp *qp1_qp = NULL; |
---|
| 3319 | + struct bnxt_re_dev *rdev = gsi_sqp->rdev; |
---|
| 3320 | + struct bnxt_re_qp *gsi_qp = NULL; |
---|
3097 | 3321 | struct bnxt_qplib_cqe *orig_cqe = NULL; |
---|
3098 | 3322 | struct bnxt_re_sqp_entries *sqp_entry = NULL; |
---|
3099 | 3323 | int nw_type; |
---|
.. | .. |
---|
3103 | 3327 | |
---|
3104 | 3328 | tbl_idx = cqe->wr_id; |
---|
3105 | 3329 | |
---|
3106 | | - sqp_entry = &rdev->sqp_tbl[tbl_idx]; |
---|
3107 | | - qp1_qp = sqp_entry->qp1_qp; |
---|
| 3330 | + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
---|
| 3331 | + gsi_qp = sqp_entry->qp1_qp; |
---|
3108 | 3332 | orig_cqe = &sqp_entry->cqe; |
---|
3109 | 3333 | |
---|
3110 | 3334 | wc->wr_id = sqp_entry->wrid; |
---|
3111 | 3335 | wc->byte_len = orig_cqe->length; |
---|
3112 | | - wc->qp = &qp1_qp->ib_qp; |
---|
| 3336 | + wc->qp = &gsi_qp->ib_qp; |
---|
3113 | 3337 | |
---|
3114 | 3338 | wc->ex.imm_data = orig_cqe->immdata; |
---|
3115 | 3339 | wc->src_qp = orig_cqe->src_qp; |
---|
.. | .. |
---|
3136 | 3360 | } |
---|
3137 | 3361 | } |
---|
3138 | 3362 | |
---|
3139 | | -static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, |
---|
| 3363 | +static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, |
---|
| 3364 | + struct ib_wc *wc, |
---|
3140 | 3365 | struct bnxt_qplib_cqe *cqe) |
---|
3141 | 3366 | { |
---|
| 3367 | + struct bnxt_re_dev *rdev; |
---|
| 3368 | + u16 vlan_id = 0; |
---|
| 3369 | + u8 nw_type; |
---|
| 3370 | + |
---|
| 3371 | + rdev = qp->rdev; |
---|
3142 | 3372 | wc->opcode = IB_WC_RECV; |
---|
3143 | 3373 | wc->status = __rc_to_ib_wc_status(cqe->status); |
---|
3144 | 3374 | |
---|
3145 | | - if (cqe->flags & CQ_RES_RC_FLAGS_IMM) |
---|
| 3375 | + if (cqe->flags & CQ_RES_UD_FLAGS_IMM) |
---|
3146 | 3376 | wc->wc_flags |= IB_WC_WITH_IMM; |
---|
3147 | | - if (cqe->flags & CQ_RES_RC_FLAGS_INV) |
---|
3148 | | - wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
---|
3149 | | - if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) == |
---|
3150 | | - (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) |
---|
3151 | | - wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
---|
| 3377 | + /* report only on GSI QP for Thor */ |
---|
| 3378 | + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) { |
---|
| 3379 | + wc->wc_flags |= IB_WC_GRH; |
---|
| 3380 | + memcpy(wc->smac, cqe->smac, ETH_ALEN); |
---|
| 3381 | + wc->wc_flags |= IB_WC_WITH_SMAC; |
---|
| 3382 | + if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { |
---|
| 3383 | + vlan_id = (cqe->cfa_meta & 0xFFF); |
---|
| 3384 | + } |
---|
| 3385 | + /* Mark only if vlan_id is non zero */ |
---|
| 3386 | + if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { |
---|
| 3387 | + wc->vlan_id = vlan_id; |
---|
| 3388 | + wc->wc_flags |= IB_WC_WITH_VLAN; |
---|
| 3389 | + } |
---|
| 3390 | + nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> |
---|
| 3391 | + CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; |
---|
| 3392 | + wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type); |
---|
| 3393 | + wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; |
---|
| 3394 | + } |
---|
| 3395 | + |
---|
3152 | 3396 | } |
---|
3153 | 3397 | |
---|
3154 | 3398 | static int send_phantom_wqe(struct bnxt_re_qp *qp) |
---|
.. | .. |
---|
3162 | 3406 | rc = bnxt_re_bind_fence_mw(lib_qp); |
---|
3163 | 3407 | if (!rc) { |
---|
3164 | 3408 | lib_qp->sq.phantom_wqe_cnt++; |
---|
3165 | | - dev_dbg(&lib_qp->sq.hwq.pdev->dev, |
---|
3166 | | - "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", |
---|
3167 | | - lib_qp->id, lib_qp->sq.hwq.prod, |
---|
3168 | | - HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), |
---|
3169 | | - lib_qp->sq.phantom_wqe_cnt); |
---|
| 3409 | + ibdev_dbg(&qp->rdev->ibdev, |
---|
| 3410 | + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", |
---|
| 3411 | + lib_qp->id, lib_qp->sq.hwq.prod, |
---|
| 3412 | + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), |
---|
| 3413 | + lib_qp->sq.phantom_wqe_cnt); |
---|
3170 | 3414 | } |
---|
3171 | 3415 | |
---|
3172 | 3416 | spin_unlock_irqrestore(&qp->sq_lock, flags); |
---|
.. | .. |
---|
3176 | 3420 | int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) |
---|
3177 | 3421 | { |
---|
3178 | 3422 | struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); |
---|
3179 | | - struct bnxt_re_qp *qp; |
---|
| 3423 | + struct bnxt_re_qp *qp, *sh_qp; |
---|
3180 | 3424 | struct bnxt_qplib_cqe *cqe; |
---|
3181 | 3425 | int i, ncqe, budget; |
---|
3182 | 3426 | struct bnxt_qplib_q *sq; |
---|
.. | .. |
---|
3189 | 3433 | budget = min_t(u32, num_entries, cq->max_cql); |
---|
3190 | 3434 | num_entries = budget; |
---|
3191 | 3435 | if (!cq->cql) { |
---|
3192 | | - dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); |
---|
| 3436 | + ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use"); |
---|
3193 | 3437 | goto exit; |
---|
3194 | 3438 | } |
---|
3195 | 3439 | cqe = &cq->cql[0]; |
---|
.. | .. |
---|
3202 | 3446 | qp = container_of(lib_qp, |
---|
3203 | 3447 | struct bnxt_re_qp, qplib_qp); |
---|
3204 | 3448 | if (send_phantom_wqe(qp) == -ENOMEM) |
---|
3205 | | - dev_err(rdev_to_dev(cq->rdev), |
---|
3206 | | - "Phantom failed! Scheduled to send again\n"); |
---|
| 3449 | + ibdev_err(&cq->rdev->ibdev, |
---|
| 3450 | + "Phantom failed! Scheduled to send again\n"); |
---|
3207 | 3451 | else |
---|
3208 | 3452 | sq->send_phantom = false; |
---|
3209 | 3453 | } |
---|
.. | .. |
---|
3227 | 3471 | (unsigned long)(cqe->qp_handle), |
---|
3228 | 3472 | struct bnxt_re_qp, qplib_qp); |
---|
3229 | 3473 | if (!qp) { |
---|
3230 | | - dev_err(rdev_to_dev(cq->rdev), |
---|
3231 | | - "POLL CQ : bad QP handle"); |
---|
| 3474 | + ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle"); |
---|
3232 | 3475 | continue; |
---|
3233 | 3476 | } |
---|
3234 | 3477 | wc->qp = &qp->ib_qp; |
---|
.. | .. |
---|
3240 | 3483 | |
---|
3241 | 3484 | switch (cqe->opcode) { |
---|
3242 | 3485 | case CQ_BASE_CQE_TYPE_REQ: |
---|
3243 | | - if (qp->qplib_qp.id == |
---|
3244 | | - qp->rdev->qp1_sqp->qplib_qp.id) { |
---|
| 3486 | + sh_qp = qp->rdev->gsi_ctx.gsi_sqp; |
---|
| 3487 | + if (sh_qp && |
---|
| 3488 | + qp->qplib_qp.id == sh_qp->qplib_qp.id) { |
---|
3245 | 3489 | /* Handle this completion with |
---|
3246 | 3490 | * the stored completion |
---|
3247 | 3491 | */ |
---|
.. | .. |
---|
3267 | 3511 | * stored in the table |
---|
3268 | 3512 | */ |
---|
3269 | 3513 | tbl_idx = cqe->wr_id; |
---|
3270 | | - sqp_entry = &cq->rdev->sqp_tbl[tbl_idx]; |
---|
| 3514 | + sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; |
---|
3271 | 3515 | wc->wr_id = sqp_entry->wrid; |
---|
3272 | 3516 | bnxt_re_process_res_rawqp1_wc(wc, cqe); |
---|
3273 | 3517 | break; |
---|
.. | .. |
---|
3275 | 3519 | bnxt_re_process_res_rc_wc(wc, cqe); |
---|
3276 | 3520 | break; |
---|
3277 | 3521 | case CQ_BASE_CQE_TYPE_RES_UD: |
---|
3278 | | - if (qp->qplib_qp.id == |
---|
3279 | | - qp->rdev->qp1_sqp->qplib_qp.id) { |
---|
| 3522 | + sh_qp = qp->rdev->gsi_ctx.gsi_sqp; |
---|
| 3523 | + if (sh_qp && |
---|
| 3524 | + qp->qplib_qp.id == sh_qp->qplib_qp.id) { |
---|
3280 | 3525 | /* Handle this completion with |
---|
3281 | 3526 | * the stored completion |
---|
3282 | 3527 | */ |
---|
.. | .. |
---|
3288 | 3533 | break; |
---|
3289 | 3534 | } |
---|
3290 | 3535 | } |
---|
3291 | | - bnxt_re_process_res_ud_wc(wc, cqe); |
---|
| 3536 | + bnxt_re_process_res_ud_wc(qp, wc, cqe); |
---|
3292 | 3537 | break; |
---|
3293 | 3538 | default: |
---|
3294 | | - dev_err(rdev_to_dev(cq->rdev), |
---|
3295 | | - "POLL CQ : type 0x%x not handled", |
---|
3296 | | - cqe->opcode); |
---|
| 3539 | + ibdev_err(&cq->rdev->ibdev, |
---|
| 3540 | + "POLL CQ : type 0x%x not handled", |
---|
| 3541 | + cqe->opcode); |
---|
3297 | 3542 | continue; |
---|
3298 | 3543 | } |
---|
3299 | 3544 | wc++; |
---|
.. | .. |
---|
3315 | 3560 | spin_lock_irqsave(&cq->cq_lock, flags); |
---|
3316 | 3561 | /* Trigger on the very next completion */ |
---|
3317 | 3562 | if (ib_cqn_flags & IB_CQ_NEXT_COMP) |
---|
3318 | | - type = DBR_DBR_TYPE_CQ_ARMALL; |
---|
| 3563 | + type = DBC_DBC_TYPE_CQ_ARMALL; |
---|
3319 | 3564 | /* Trigger on the next solicited completion */ |
---|
3320 | 3565 | else if (ib_cqn_flags & IB_CQ_SOLICITED) |
---|
3321 | | - type = DBR_DBR_TYPE_CQ_ARMSE; |
---|
| 3566 | + type = DBC_DBC_TYPE_CQ_ARMSE; |
---|
3322 | 3567 | |
---|
3323 | 3568 | /* Poll to see if there are missed events */ |
---|
3324 | 3569 | if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && |
---|
.. | .. |
---|
3339 | 3584 | struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); |
---|
3340 | 3585 | struct bnxt_re_dev *rdev = pd->rdev; |
---|
3341 | 3586 | struct bnxt_re_mr *mr; |
---|
3342 | | - u64 pbl = 0; |
---|
3343 | 3587 | int rc; |
---|
3344 | 3588 | |
---|
3345 | 3589 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
---|
.. | .. |
---|
3358 | 3602 | |
---|
3359 | 3603 | mr->qplib_mr.hwq.level = PBL_LVL_MAX; |
---|
3360 | 3604 | mr->qplib_mr.total_size = -1; /* Infinte length */ |
---|
3361 | | - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false, |
---|
| 3605 | + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0, |
---|
3362 | 3606 | PAGE_SIZE); |
---|
3363 | 3607 | if (rc) |
---|
3364 | 3608 | goto fail_mr; |
---|
.. | .. |
---|
3378 | 3622 | return ERR_PTR(rc); |
---|
3379 | 3623 | } |
---|
3380 | 3624 | |
---|
3381 | | -int bnxt_re_dereg_mr(struct ib_mr *ib_mr) |
---|
| 3625 | +int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
---|
3382 | 3626 | { |
---|
3383 | 3627 | struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr); |
---|
3384 | 3628 | struct bnxt_re_dev *rdev = mr->rdev; |
---|
.. | .. |
---|
3386 | 3630 | |
---|
3387 | 3631 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); |
---|
3388 | 3632 | if (rc) { |
---|
3389 | | - dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); |
---|
| 3633 | + ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc); |
---|
3390 | 3634 | return rc; |
---|
3391 | 3635 | } |
---|
3392 | 3636 | |
---|
.. | .. |
---|
3397 | 3641 | mr->npages = 0; |
---|
3398 | 3642 | mr->pages = NULL; |
---|
3399 | 3643 | } |
---|
3400 | | - if (!IS_ERR_OR_NULL(mr->ib_umem)) |
---|
3401 | | - ib_umem_release(mr->ib_umem); |
---|
| 3644 | + ib_umem_release(mr->ib_umem); |
---|
3402 | 3645 | |
---|
3403 | 3646 | kfree(mr); |
---|
3404 | 3647 | atomic_dec(&rdev->mr_count); |
---|
.. | .. |
---|
3434 | 3677 | int rc; |
---|
3435 | 3678 | |
---|
3436 | 3679 | if (type != IB_MR_TYPE_MEM_REG) { |
---|
3437 | | - dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type); |
---|
| 3680 | + ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type); |
---|
3438 | 3681 | return ERR_PTR(-EINVAL); |
---|
3439 | 3682 | } |
---|
3440 | 3683 | if (max_num_sg > MAX_PBL_LVL_1_PGS) |
---|
.. | .. |
---|
3464 | 3707 | rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res, |
---|
3465 | 3708 | &mr->qplib_frpl, max_num_sg); |
---|
3466 | 3709 | if (rc) { |
---|
3467 | | - dev_err(rdev_to_dev(rdev), |
---|
3468 | | - "Failed to allocate HW FR page list"); |
---|
| 3710 | + ibdev_err(&rdev->ibdev, |
---|
| 3711 | + "Failed to allocate HW FR page list"); |
---|
3469 | 3712 | goto fail_mr; |
---|
3470 | 3713 | } |
---|
3471 | 3714 | |
---|
.. | .. |
---|
3500 | 3743 | CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); |
---|
3501 | 3744 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); |
---|
3502 | 3745 | if (rc) { |
---|
3503 | | - dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); |
---|
| 3746 | + ibdev_err(&rdev->ibdev, "Allocate MW failed!"); |
---|
3504 | 3747 | goto fail; |
---|
3505 | 3748 | } |
---|
3506 | 3749 | mw->ib_mw.rkey = mw->qplib_mw.rkey; |
---|
.. | .. |
---|
3521 | 3764 | |
---|
3522 | 3765 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); |
---|
3523 | 3766 | if (rc) { |
---|
3524 | | - dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); |
---|
| 3767 | + ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc); |
---|
3525 | 3768 | return rc; |
---|
3526 | 3769 | } |
---|
3527 | 3770 | |
---|
3528 | 3771 | kfree(mw); |
---|
3529 | 3772 | atomic_dec(&rdev->mw_count); |
---|
3530 | 3773 | return rc; |
---|
3531 | | -} |
---|
3532 | | - |
---|
3533 | | -static int bnxt_re_page_size_ok(int page_shift) |
---|
3534 | | -{ |
---|
3535 | | - switch (page_shift) { |
---|
3536 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K: |
---|
3537 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K: |
---|
3538 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K: |
---|
3539 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M: |
---|
3540 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K: |
---|
3541 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M: |
---|
3542 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M: |
---|
3543 | | - case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G: |
---|
3544 | | - return 1; |
---|
3545 | | - default: |
---|
3546 | | - return 0; |
---|
3547 | | - } |
---|
3548 | | -} |
---|
3549 | | - |
---|
3550 | | -static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig, |
---|
3551 | | - int page_shift) |
---|
3552 | | -{ |
---|
3553 | | - u64 *pbl_tbl = pbl_tbl_orig; |
---|
3554 | | - u64 paddr; |
---|
3555 | | - u64 page_mask = (1ULL << page_shift) - 1; |
---|
3556 | | - int i, pages; |
---|
3557 | | - struct scatterlist *sg; |
---|
3558 | | - int entry; |
---|
3559 | | - |
---|
3560 | | - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
---|
3561 | | - pages = sg_dma_len(sg) >> PAGE_SHIFT; |
---|
3562 | | - for (i = 0; i < pages; i++) { |
---|
3563 | | - paddr = sg_dma_address(sg) + (i << PAGE_SHIFT); |
---|
3564 | | - if (pbl_tbl == pbl_tbl_orig) |
---|
3565 | | - *pbl_tbl++ = paddr & ~page_mask; |
---|
3566 | | - else if ((paddr & page_mask) == 0) |
---|
3567 | | - *pbl_tbl++ = paddr; |
---|
3568 | | - } |
---|
3569 | | - } |
---|
3570 | | - return pbl_tbl - pbl_tbl_orig; |
---|
3571 | 3774 | } |
---|
3572 | 3775 | |
---|
3573 | 3776 | /* uverbs */ |
---|
.. | .. |
---|
3579 | 3782 | struct bnxt_re_dev *rdev = pd->rdev; |
---|
3580 | 3783 | struct bnxt_re_mr *mr; |
---|
3581 | 3784 | struct ib_umem *umem; |
---|
3582 | | - u64 *pbl_tbl = NULL; |
---|
3583 | | - int umem_pgs, page_shift, rc; |
---|
| 3785 | + unsigned long page_size; |
---|
| 3786 | + int umem_pgs, rc; |
---|
3584 | 3787 | |
---|
3585 | 3788 | if (length > BNXT_RE_MAX_MR_SIZE) { |
---|
3586 | | - dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n", |
---|
3587 | | - length, BNXT_RE_MAX_MR_SIZE); |
---|
| 3789 | + ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n", |
---|
| 3790 | + length, BNXT_RE_MAX_MR_SIZE); |
---|
3588 | 3791 | return ERR_PTR(-ENOMEM); |
---|
3589 | 3792 | } |
---|
3590 | 3793 | |
---|
.. | .. |
---|
3599 | 3802 | |
---|
3600 | 3803 | rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); |
---|
3601 | 3804 | if (rc) { |
---|
3602 | | - dev_err(rdev_to_dev(rdev), "Failed to allocate MR"); |
---|
| 3805 | + ibdev_err(&rdev->ibdev, "Failed to allocate MR"); |
---|
3603 | 3806 | goto free_mr; |
---|
3604 | 3807 | } |
---|
3605 | 3808 | /* The fixed portion of the rkey is the same as the lkey */ |
---|
3606 | 3809 | mr->ib_mr.rkey = mr->qplib_mr.rkey; |
---|
3607 | 3810 | |
---|
3608 | | - umem = ib_umem_get(ib_pd->uobject->context, start, length, |
---|
3609 | | - mr_access_flags, 0); |
---|
| 3811 | + umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags); |
---|
3610 | 3812 | if (IS_ERR(umem)) { |
---|
3611 | | - dev_err(rdev_to_dev(rdev), "Failed to get umem"); |
---|
| 3813 | + ibdev_err(&rdev->ibdev, "Failed to get umem"); |
---|
3612 | 3814 | rc = -EFAULT; |
---|
3613 | 3815 | goto free_mrw; |
---|
3614 | 3816 | } |
---|
3615 | 3817 | mr->ib_umem = umem; |
---|
3616 | 3818 | |
---|
3617 | 3819 | mr->qplib_mr.va = virt_addr; |
---|
3618 | | - umem_pgs = ib_umem_page_count(umem); |
---|
3619 | | - if (!umem_pgs) { |
---|
3620 | | - dev_err(rdev_to_dev(rdev), "umem is invalid!"); |
---|
3621 | | - rc = -EINVAL; |
---|
| 3820 | + page_size = ib_umem_find_best_pgsz( |
---|
| 3821 | + umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr); |
---|
| 3822 | + if (!page_size) { |
---|
| 3823 | + ibdev_err(&rdev->ibdev, "umem page size unsupported!"); |
---|
| 3824 | + rc = -EFAULT; |
---|
3622 | 3825 | goto free_umem; |
---|
3623 | 3826 | } |
---|
3624 | 3827 | mr->qplib_mr.total_size = length; |
---|
3625 | 3828 | |
---|
3626 | | - pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL); |
---|
3627 | | - if (!pbl_tbl) { |
---|
3628 | | - rc = -ENOMEM; |
---|
| 3829 | + if (page_size == BNXT_RE_PAGE_SIZE_4K && |
---|
| 3830 | + length > BNXT_RE_MAX_MR_SIZE_LOW) { |
---|
| 3831 | + ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu", |
---|
| 3832 | + length, (u64)BNXT_RE_MAX_MR_SIZE_LOW); |
---|
| 3833 | + rc = -EINVAL; |
---|
3629 | 3834 | goto free_umem; |
---|
3630 | 3835 | } |
---|
3631 | 3836 | |
---|
3632 | | - page_shift = umem->page_shift; |
---|
3633 | | - |
---|
3634 | | - if (!bnxt_re_page_size_ok(page_shift)) { |
---|
3635 | | - dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); |
---|
3636 | | - rc = -EFAULT; |
---|
3637 | | - goto fail; |
---|
3638 | | - } |
---|
3639 | | - |
---|
3640 | | - if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) { |
---|
3641 | | - dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu", |
---|
3642 | | - length, (u64)BNXT_RE_MAX_MR_SIZE_LOW); |
---|
3643 | | - rc = -EINVAL; |
---|
3644 | | - goto fail; |
---|
3645 | | - } |
---|
3646 | | - if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) { |
---|
3647 | | - page_shift = BNXT_RE_PAGE_SHIFT_2M; |
---|
3648 | | - dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x", |
---|
3649 | | - 1 << page_shift); |
---|
3650 | | - } |
---|
3651 | | - |
---|
3652 | | - /* Map umem buf ptrs to the PBL */ |
---|
3653 | | - umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift); |
---|
3654 | | - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl, |
---|
3655 | | - umem_pgs, false, 1 << page_shift); |
---|
| 3837 | + umem_pgs = ib_umem_num_dma_blocks(umem, page_size); |
---|
| 3838 | + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem, |
---|
| 3839 | + umem_pgs, page_size); |
---|
3656 | 3840 | if (rc) { |
---|
3657 | | - dev_err(rdev_to_dev(rdev), "Failed to register user MR"); |
---|
3658 | | - goto fail; |
---|
| 3841 | + ibdev_err(&rdev->ibdev, "Failed to register user MR"); |
---|
| 3842 | + goto free_umem; |
---|
3659 | 3843 | } |
---|
3660 | | - |
---|
3661 | | - kfree(pbl_tbl); |
---|
3662 | 3844 | |
---|
3663 | 3845 | mr->ib_mr.lkey = mr->qplib_mr.lkey; |
---|
3664 | 3846 | mr->ib_mr.rkey = mr->qplib_mr.lkey; |
---|
3665 | 3847 | atomic_inc(&rdev->mr_count); |
---|
3666 | 3848 | |
---|
3667 | 3849 | return &mr->ib_mr; |
---|
3668 | | -fail: |
---|
3669 | | - kfree(pbl_tbl); |
---|
3670 | 3850 | free_umem: |
---|
3671 | 3851 | ib_umem_release(umem); |
---|
3672 | 3852 | free_mrw: |
---|
.. | .. |
---|
3676 | 3856 | return ERR_PTR(rc); |
---|
3677 | 3857 | } |
---|
3678 | 3858 | |
---|
3679 | | -struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, |
---|
3680 | | - struct ib_udata *udata) |
---|
| 3859 | +int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata) |
---|
3681 | 3860 | { |
---|
| 3861 | + struct ib_device *ibdev = ctx->device; |
---|
| 3862 | + struct bnxt_re_ucontext *uctx = |
---|
| 3863 | + container_of(ctx, struct bnxt_re_ucontext, ib_uctx); |
---|
3682 | 3864 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); |
---|
3683 | | - struct bnxt_re_uctx_resp resp; |
---|
3684 | | - struct bnxt_re_ucontext *uctx; |
---|
3685 | 3865 | struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; |
---|
| 3866 | + struct bnxt_re_uctx_resp resp; |
---|
| 3867 | + u32 chip_met_rev_num = 0; |
---|
3686 | 3868 | int rc; |
---|
3687 | 3869 | |
---|
3688 | | - dev_dbg(rdev_to_dev(rdev), "ABI version requested %d", |
---|
3689 | | - ibdev->uverbs_abi_ver); |
---|
| 3870 | + ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver); |
---|
3690 | 3871 | |
---|
3691 | | - if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) { |
---|
3692 | | - dev_dbg(rdev_to_dev(rdev), " is different from the device %d ", |
---|
3693 | | - BNXT_RE_ABI_VERSION); |
---|
3694 | | - return ERR_PTR(-EPERM); |
---|
| 3872 | + if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) { |
---|
| 3873 | + ibdev_dbg(ibdev, " is different from the device %d ", |
---|
| 3874 | + BNXT_RE_ABI_VERSION); |
---|
| 3875 | + return -EPERM; |
---|
3695 | 3876 | } |
---|
3696 | | - |
---|
3697 | | - uctx = kzalloc(sizeof(*uctx), GFP_KERNEL); |
---|
3698 | | - if (!uctx) |
---|
3699 | | - return ERR_PTR(-ENOMEM); |
---|
3700 | 3877 | |
---|
3701 | 3878 | uctx->rdev = rdev; |
---|
3702 | 3879 | |
---|
.. | .. |
---|
3707 | 3884 | } |
---|
3708 | 3885 | spin_lock_init(&uctx->sh_lock); |
---|
3709 | 3886 | |
---|
3710 | | - resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/ |
---|
| 3887 | + resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX; |
---|
| 3888 | + chip_met_rev_num = rdev->chip_ctx->chip_num; |
---|
| 3889 | + chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) << |
---|
| 3890 | + BNXT_RE_CHIP_ID0_CHIP_REV_SFT; |
---|
| 3891 | + chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) << |
---|
| 3892 | + BNXT_RE_CHIP_ID0_CHIP_MET_SFT; |
---|
| 3893 | + resp.chip_id0 = chip_met_rev_num; |
---|
| 3894 | + /* Future extension of chip info */ |
---|
| 3895 | + resp.chip_id1 = 0; |
---|
| 3896 | + /*Temp, Use xa_alloc instead */ |
---|
| 3897 | + resp.dev_id = rdev->en_dev->pdev->devfn; |
---|
3711 | 3898 | resp.max_qp = rdev->qplib_ctx.qpc_count; |
---|
3712 | 3899 | resp.pg_size = PAGE_SIZE; |
---|
3713 | 3900 | resp.cqe_sz = sizeof(struct cq_base); |
---|
3714 | 3901 | resp.max_cqd = dev_attr->max_cq_wqes; |
---|
3715 | 3902 | resp.rsvd = 0; |
---|
3716 | 3903 | |
---|
3717 | | - rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
| 3904 | + rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); |
---|
3718 | 3905 | if (rc) { |
---|
3719 | | - dev_err(rdev_to_dev(rdev), "Failed to copy user context"); |
---|
| 3906 | + ibdev_err(ibdev, "Failed to copy user context"); |
---|
3720 | 3907 | rc = -EFAULT; |
---|
3721 | 3908 | goto cfail; |
---|
3722 | 3909 | } |
---|
3723 | 3910 | |
---|
3724 | | - return &uctx->ib_uctx; |
---|
| 3911 | + return 0; |
---|
3725 | 3912 | cfail: |
---|
3726 | 3913 | free_page((unsigned long)uctx->shpg); |
---|
3727 | 3914 | uctx->shpg = NULL; |
---|
3728 | 3915 | fail: |
---|
3729 | | - kfree(uctx); |
---|
3730 | | - return ERR_PTR(rc); |
---|
| 3916 | + return rc; |
---|
3731 | 3917 | } |
---|
3732 | 3918 | |
---|
3733 | | -int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) |
---|
| 3919 | +void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) |
---|
3734 | 3920 | { |
---|
3735 | 3921 | struct bnxt_re_ucontext *uctx = container_of(ib_uctx, |
---|
3736 | 3922 | struct bnxt_re_ucontext, |
---|
3737 | 3923 | ib_uctx); |
---|
3738 | 3924 | |
---|
3739 | 3925 | struct bnxt_re_dev *rdev = uctx->rdev; |
---|
3740 | | - int rc = 0; |
---|
3741 | 3926 | |
---|
3742 | 3927 | if (uctx->shpg) |
---|
3743 | 3928 | free_page((unsigned long)uctx->shpg); |
---|
.. | .. |
---|
3746 | 3931 | /* Free DPI only if this is the first PD allocated by the |
---|
3747 | 3932 | * application and mark the context dpi as NULL |
---|
3748 | 3933 | */ |
---|
3749 | | - rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, |
---|
3750 | | - &rdev->qplib_res.dpi_tbl, |
---|
3751 | | - &uctx->dpi); |
---|
3752 | | - if (rc) |
---|
3753 | | - dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!"); |
---|
3754 | | - /* Don't fail, continue*/ |
---|
| 3934 | + bnxt_qplib_dealloc_dpi(&rdev->qplib_res, |
---|
| 3935 | + &rdev->qplib_res.dpi_tbl, &uctx->dpi); |
---|
3755 | 3936 | uctx->dpi.dbr = NULL; |
---|
3756 | 3937 | } |
---|
3757 | | - |
---|
3758 | | - kfree(uctx); |
---|
3759 | | - return 0; |
---|
3760 | 3938 | } |
---|
3761 | 3939 | |
---|
3762 | 3940 | /* Helper function to mmap the virtual memory from user app */ |
---|
.. | .. |
---|
3775 | 3953 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
---|
3776 | 3954 | if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, |
---|
3777 | 3955 | PAGE_SIZE, vma->vm_page_prot)) { |
---|
3778 | | - dev_err(rdev_to_dev(rdev), "Failed to map DPI"); |
---|
| 3956 | + ibdev_err(&rdev->ibdev, "Failed to map DPI"); |
---|
3779 | 3957 | return -EAGAIN; |
---|
3780 | 3958 | } |
---|
3781 | 3959 | } else { |
---|
3782 | 3960 | pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT; |
---|
3783 | 3961 | if (remap_pfn_range(vma, vma->vm_start, |
---|
3784 | 3962 | pfn, PAGE_SIZE, vma->vm_page_prot)) { |
---|
3785 | | - dev_err(rdev_to_dev(rdev), |
---|
3786 | | - "Failed to map shared page"); |
---|
| 3963 | + ibdev_err(&rdev->ibdev, "Failed to map shared page"); |
---|
3787 | 3964 | return -EAGAIN; |
---|
3788 | 3965 | } |
---|
3789 | 3966 | } |
---|