.. | .. |
---|
47 | 47 | #include <rdma/ib_umem.h> |
---|
48 | 48 | #include <rdma/ib_addr.h> |
---|
49 | 49 | #include <rdma/ib_cache.h> |
---|
| 50 | +#include <rdma/uverbs_ioctl.h> |
---|
50 | 51 | |
---|
51 | 52 | #include "ocrdma.h" |
---|
52 | 53 | #include "ocrdma_hw.h" |
---|
.. | .. |
---|
98 | 99 | attr->max_mw = dev->attr.max_mw; |
---|
99 | 100 | attr->max_pd = dev->attr.max_pd; |
---|
100 | 101 | attr->atomic_cap = 0; |
---|
101 | | - attr->max_fmr = 0; |
---|
102 | | - attr->max_map_per_fmr = 0; |
---|
103 | 102 | attr->max_qp_rd_atom = |
---|
104 | 103 | min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); |
---|
105 | 104 | attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; |
---|
.. | .. |
---|
112 | 111 | return 0; |
---|
113 | 112 | } |
---|
114 | 113 | |
---|
115 | | -struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num) |
---|
116 | | -{ |
---|
117 | | - struct ocrdma_dev *dev; |
---|
118 | | - struct net_device *ndev = NULL; |
---|
119 | | - |
---|
120 | | - rcu_read_lock(); |
---|
121 | | - |
---|
122 | | - dev = get_ocrdma_dev(ibdev); |
---|
123 | | - if (dev) |
---|
124 | | - ndev = dev->nic_info.netdev; |
---|
125 | | - if (ndev) |
---|
126 | | - dev_hold(ndev); |
---|
127 | | - |
---|
128 | | - rcu_read_unlock(); |
---|
129 | | - |
---|
130 | | - return ndev; |
---|
131 | | -} |
---|
132 | | - |
---|
133 | 114 | static inline void get_link_speed_and_width(struct ocrdma_dev *dev, |
---|
134 | | - u8 *ib_speed, u8 *ib_width) |
---|
| 115 | + u16 *ib_speed, u8 *ib_width) |
---|
135 | 116 | { |
---|
136 | 117 | int status; |
---|
137 | 118 | u8 speed; |
---|
.. | .. |
---|
177 | 158 | |
---|
178 | 159 | /* props being zeroed by the caller, avoid zeroing it here */ |
---|
179 | 160 | dev = get_ocrdma_dev(ibdev); |
---|
180 | | - if (port > 1) { |
---|
181 | | - pr_err("%s(%d) invalid_port=0x%x\n", __func__, |
---|
182 | | - dev->id, port); |
---|
183 | | - return -EINVAL; |
---|
184 | | - } |
---|
185 | 161 | netdev = dev->nic_info.netdev; |
---|
186 | 162 | if (netif_running(netdev) && netif_oper_up(netdev)) { |
---|
187 | 163 | port_state = IB_PORT_ACTIVE; |
---|
188 | | - props->phys_state = 5; |
---|
| 164 | + props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
---|
189 | 165 | } else { |
---|
190 | 166 | port_state = IB_PORT_DOWN; |
---|
191 | | - props->phys_state = 3; |
---|
| 167 | + props->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
---|
192 | 168 | } |
---|
193 | 169 | props->max_mtu = IB_MTU_4096; |
---|
194 | 170 | props->active_mtu = iboe_get_mtu(netdev->mtu); |
---|
.. | .. |
---|
209 | 185 | &props->active_width); |
---|
210 | 186 | props->max_msg_sz = 0x80000000; |
---|
211 | 187 | props->max_vl_num = 4; |
---|
212 | | - return 0; |
---|
213 | | -} |
---|
214 | | - |
---|
215 | | -int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, |
---|
216 | | - struct ib_port_modify *props) |
---|
217 | | -{ |
---|
218 | | - struct ocrdma_dev *dev; |
---|
219 | | - |
---|
220 | | - dev = get_ocrdma_dev(ibdev); |
---|
221 | | - if (port > 1) { |
---|
222 | | - pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); |
---|
223 | | - return -EINVAL; |
---|
224 | | - } |
---|
225 | 188 | return 0; |
---|
226 | 189 | } |
---|
227 | 190 | |
---|
.. | .. |
---|
379 | 342 | return status; |
---|
380 | 343 | } |
---|
381 | 344 | |
---|
382 | | -static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, |
---|
383 | | - struct ocrdma_ucontext *uctx, |
---|
384 | | - struct ib_udata *udata) |
---|
| 345 | +/* |
---|
| 346 | + * NOTE: |
---|
| 347 | + * |
---|
| 348 | + * ocrdma_ucontext must be used here because this function is also |
---|
| 349 | + * called from ocrdma_alloc_ucontext where ib_udata does not have |
---|
| 350 | + * valid ib_ucontext pointer. ib_uverbs_get_context does not call |
---|
| 351 | + * uobj_{alloc|get_xxx} helpers which are used to store the |
---|
| 352 | + * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so |
---|
| 353 | + * ib_udata does NOT imply valid ib_ucontext here! |
---|
| 354 | + */ |
---|
| 355 | +static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd, |
---|
| 356 | + struct ocrdma_ucontext *uctx, |
---|
| 357 | + struct ib_udata *udata) |
---|
385 | 358 | { |
---|
386 | | - struct ocrdma_pd *pd = NULL; |
---|
387 | 359 | int status; |
---|
388 | | - |
---|
389 | | - pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
---|
390 | | - if (!pd) |
---|
391 | | - return ERR_PTR(-ENOMEM); |
---|
392 | 360 | |
---|
393 | 361 | if (udata && uctx && dev->attr.max_dpp_pds) { |
---|
394 | 362 | pd->dpp_enabled = |
---|
.. | .. |
---|
398 | 366 | dev->attr.wqe_size) : 0; |
---|
399 | 367 | } |
---|
400 | 368 | |
---|
401 | | - if (dev->pd_mgr->pd_prealloc_valid) { |
---|
402 | | - status = ocrdma_get_pd_num(dev, pd); |
---|
403 | | - if (status == 0) { |
---|
404 | | - return pd; |
---|
405 | | - } else { |
---|
406 | | - kfree(pd); |
---|
407 | | - return ERR_PTR(status); |
---|
408 | | - } |
---|
409 | | - } |
---|
| 369 | + if (dev->pd_mgr->pd_prealloc_valid) |
---|
| 370 | + return ocrdma_get_pd_num(dev, pd); |
---|
410 | 371 | |
---|
411 | 372 | retry: |
---|
412 | 373 | status = ocrdma_mbx_alloc_pd(dev, pd); |
---|
.. | .. |
---|
415 | 376 | pd->dpp_enabled = false; |
---|
416 | 377 | pd->num_dpp_qp = 0; |
---|
417 | 378 | goto retry; |
---|
418 | | - } else { |
---|
419 | | - kfree(pd); |
---|
420 | | - return ERR_PTR(status); |
---|
421 | 379 | } |
---|
| 380 | + return status; |
---|
422 | 381 | } |
---|
423 | 382 | |
---|
424 | | - return pd; |
---|
| 383 | + return 0; |
---|
425 | 384 | } |
---|
426 | 385 | |
---|
427 | 386 | static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, |
---|
.. | .. |
---|
430 | 389 | return (uctx->cntxt_pd == pd); |
---|
431 | 390 | } |
---|
432 | 391 | |
---|
433 | | -static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, |
---|
| 392 | +static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev, |
---|
434 | 393 | struct ocrdma_pd *pd) |
---|
435 | 394 | { |
---|
436 | | - int status; |
---|
437 | | - |
---|
438 | 395 | if (dev->pd_mgr->pd_prealloc_valid) |
---|
439 | | - status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); |
---|
| 396 | + ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); |
---|
440 | 397 | else |
---|
441 | | - status = ocrdma_mbx_dealloc_pd(dev, pd); |
---|
442 | | - |
---|
443 | | - kfree(pd); |
---|
444 | | - return status; |
---|
| 398 | + ocrdma_mbx_dealloc_pd(dev, pd); |
---|
445 | 399 | } |
---|
446 | 400 | |
---|
447 | 401 | static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev, |
---|
448 | 402 | struct ocrdma_ucontext *uctx, |
---|
449 | 403 | struct ib_udata *udata) |
---|
450 | 404 | { |
---|
451 | | - int status = 0; |
---|
| 405 | + struct ib_device *ibdev = &dev->ibdev; |
---|
| 406 | + struct ib_pd *pd; |
---|
| 407 | + int status; |
---|
452 | 408 | |
---|
453 | | - uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata); |
---|
454 | | - if (IS_ERR(uctx->cntxt_pd)) { |
---|
455 | | - status = PTR_ERR(uctx->cntxt_pd); |
---|
456 | | - uctx->cntxt_pd = NULL; |
---|
| 409 | + pd = rdma_zalloc_drv_obj(ibdev, ib_pd); |
---|
| 410 | + if (!pd) |
---|
| 411 | + return -ENOMEM; |
---|
| 412 | + |
---|
| 413 | + pd->device = ibdev; |
---|
| 414 | + uctx->cntxt_pd = get_ocrdma_pd(pd); |
---|
| 415 | + |
---|
| 416 | + status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata); |
---|
| 417 | + if (status) { |
---|
| 418 | + kfree(uctx->cntxt_pd); |
---|
457 | 419 | goto err; |
---|
458 | 420 | } |
---|
459 | 421 | |
---|
.. | .. |
---|
463 | 425 | return status; |
---|
464 | 426 | } |
---|
465 | 427 | |
---|
466 | | -static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) |
---|
| 428 | +static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) |
---|
467 | 429 | { |
---|
468 | 430 | struct ocrdma_pd *pd = uctx->cntxt_pd; |
---|
469 | 431 | struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); |
---|
.. | .. |
---|
473 | 435 | __func__, dev->id, pd->id); |
---|
474 | 436 | } |
---|
475 | 437 | uctx->cntxt_pd = NULL; |
---|
476 | | - (void)_ocrdma_dealloc_pd(dev, pd); |
---|
477 | | - return 0; |
---|
| 438 | + _ocrdma_dealloc_pd(dev, pd); |
---|
| 439 | + kfree(pd); |
---|
478 | 440 | } |
---|
479 | 441 | |
---|
480 | 442 | static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) |
---|
.. | .. |
---|
498 | 460 | mutex_unlock(&uctx->mm_list_lock); |
---|
499 | 461 | } |
---|
500 | 462 | |
---|
501 | | -struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev, |
---|
502 | | - struct ib_udata *udata) |
---|
| 463 | +int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) |
---|
503 | 464 | { |
---|
| 465 | + struct ib_device *ibdev = uctx->device; |
---|
504 | 466 | int status; |
---|
505 | | - struct ocrdma_ucontext *ctx; |
---|
506 | | - struct ocrdma_alloc_ucontext_resp resp; |
---|
| 467 | + struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx); |
---|
| 468 | + struct ocrdma_alloc_ucontext_resp resp = {}; |
---|
507 | 469 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); |
---|
508 | 470 | struct pci_dev *pdev = dev->nic_info.pdev; |
---|
509 | 471 | u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE); |
---|
510 | 472 | |
---|
511 | 473 | if (!udata) |
---|
512 | | - return ERR_PTR(-EFAULT); |
---|
513 | | - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
---|
514 | | - if (!ctx) |
---|
515 | | - return ERR_PTR(-ENOMEM); |
---|
| 474 | + return -EFAULT; |
---|
516 | 475 | INIT_LIST_HEAD(&ctx->mm_head); |
---|
517 | 476 | mutex_init(&ctx->mm_list_lock); |
---|
518 | 477 | |
---|
519 | | - ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, |
---|
520 | | - &ctx->ah_tbl.pa, GFP_KERNEL); |
---|
521 | | - if (!ctx->ah_tbl.va) { |
---|
522 | | - kfree(ctx); |
---|
523 | | - return ERR_PTR(-ENOMEM); |
---|
524 | | - } |
---|
| 478 | + ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len, |
---|
| 479 | + &ctx->ah_tbl.pa, GFP_KERNEL); |
---|
| 480 | + if (!ctx->ah_tbl.va) |
---|
| 481 | + return -ENOMEM; |
---|
| 482 | + |
---|
525 | 483 | ctx->ah_tbl.len = map_len; |
---|
526 | 484 | |
---|
527 | | - memset(&resp, 0, sizeof(resp)); |
---|
528 | 485 | resp.ah_tbl_len = ctx->ah_tbl.len; |
---|
529 | 486 | resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va); |
---|
530 | 487 | |
---|
.. | .. |
---|
546 | 503 | status = ib_copy_to_udata(udata, &resp, sizeof(resp)); |
---|
547 | 504 | if (status) |
---|
548 | 505 | goto cpy_err; |
---|
549 | | - return &ctx->ibucontext; |
---|
| 506 | + return 0; |
---|
550 | 507 | |
---|
551 | 508 | cpy_err: |
---|
| 509 | + ocrdma_dealloc_ucontext_pd(ctx); |
---|
552 | 510 | pd_err: |
---|
553 | 511 | ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len); |
---|
554 | 512 | map_err: |
---|
555 | 513 | dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va, |
---|
556 | 514 | ctx->ah_tbl.pa); |
---|
557 | | - kfree(ctx); |
---|
558 | | - return ERR_PTR(status); |
---|
| 515 | + return status; |
---|
559 | 516 | } |
---|
560 | 517 | |
---|
561 | | -int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) |
---|
| 518 | +void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) |
---|
562 | 519 | { |
---|
563 | | - int status; |
---|
564 | 520 | struct ocrdma_mm *mm, *tmp; |
---|
565 | 521 | struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); |
---|
566 | 522 | struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); |
---|
567 | 523 | struct pci_dev *pdev = dev->nic_info.pdev; |
---|
568 | 524 | |
---|
569 | | - status = ocrdma_dealloc_ucontext_pd(uctx); |
---|
| 525 | + ocrdma_dealloc_ucontext_pd(uctx); |
---|
570 | 526 | |
---|
571 | 527 | ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len); |
---|
572 | 528 | dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va, |
---|
.. | .. |
---|
576 | 532 | list_del(&mm->entry); |
---|
577 | 533 | kfree(mm); |
---|
578 | 534 | } |
---|
579 | | - kfree(uctx); |
---|
580 | | - return status; |
---|
581 | 535 | } |
---|
582 | 536 | |
---|
583 | 537 | int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) |
---|
.. | .. |
---|
624 | 578 | } |
---|
625 | 579 | |
---|
626 | 580 | static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd, |
---|
627 | | - struct ib_ucontext *ib_ctx, |
---|
628 | 581 | struct ib_udata *udata) |
---|
629 | 582 | { |
---|
630 | 583 | int status; |
---|
.. | .. |
---|
632 | 585 | u64 dpp_page_addr = 0; |
---|
633 | 586 | u32 db_page_size; |
---|
634 | 587 | struct ocrdma_alloc_pd_uresp rsp; |
---|
635 | | - struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); |
---|
| 588 | + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( |
---|
| 589 | + udata, struct ocrdma_ucontext, ibucontext); |
---|
636 | 590 | |
---|
637 | 591 | memset(&rsp, 0, sizeof(rsp)); |
---|
638 | 592 | rsp.id = pd->id; |
---|
.. | .. |
---|
670 | 624 | return status; |
---|
671 | 625 | } |
---|
672 | 626 | |
---|
673 | | -struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev, |
---|
674 | | - struct ib_ucontext *context, |
---|
675 | | - struct ib_udata *udata) |
---|
| 627 | +int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
---|
676 | 628 | { |
---|
| 629 | + struct ib_device *ibdev = ibpd->device; |
---|
677 | 630 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); |
---|
678 | 631 | struct ocrdma_pd *pd; |
---|
679 | | - struct ocrdma_ucontext *uctx = NULL; |
---|
680 | 632 | int status; |
---|
681 | 633 | u8 is_uctx_pd = false; |
---|
| 634 | + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( |
---|
| 635 | + udata, struct ocrdma_ucontext, ibucontext); |
---|
682 | 636 | |
---|
683 | | - if (udata && context) { |
---|
684 | | - uctx = get_ocrdma_ucontext(context); |
---|
| 637 | + if (udata) { |
---|
685 | 638 | pd = ocrdma_get_ucontext_pd(uctx); |
---|
686 | 639 | if (pd) { |
---|
687 | 640 | is_uctx_pd = true; |
---|
.. | .. |
---|
689 | 642 | } |
---|
690 | 643 | } |
---|
691 | 644 | |
---|
692 | | - pd = _ocrdma_alloc_pd(dev, uctx, udata); |
---|
693 | | - if (IS_ERR(pd)) { |
---|
694 | | - status = PTR_ERR(pd); |
---|
| 645 | + pd = get_ocrdma_pd(ibpd); |
---|
| 646 | + status = _ocrdma_alloc_pd(dev, pd, uctx, udata); |
---|
| 647 | + if (status) |
---|
695 | 648 | goto exit; |
---|
696 | | - } |
---|
697 | 649 | |
---|
698 | 650 | pd_mapping: |
---|
699 | | - if (udata && context) { |
---|
700 | | - status = ocrdma_copy_pd_uresp(dev, pd, context, udata); |
---|
| 651 | + if (udata) { |
---|
| 652 | + status = ocrdma_copy_pd_uresp(dev, pd, udata); |
---|
701 | 653 | if (status) |
---|
702 | 654 | goto err; |
---|
703 | 655 | } |
---|
704 | | - return &pd->ibpd; |
---|
| 656 | + return 0; |
---|
705 | 657 | |
---|
706 | 658 | err: |
---|
707 | | - if (is_uctx_pd) { |
---|
| 659 | + if (is_uctx_pd) |
---|
708 | 660 | ocrdma_release_ucontext_pd(uctx); |
---|
709 | | - } else { |
---|
710 | | - if (_ocrdma_dealloc_pd(dev, pd)) |
---|
711 | | - pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__); |
---|
712 | | - } |
---|
| 661 | + else |
---|
| 662 | + _ocrdma_dealloc_pd(dev, pd); |
---|
713 | 663 | exit: |
---|
714 | | - return ERR_PTR(status); |
---|
| 664 | + return status; |
---|
715 | 665 | } |
---|
716 | 666 | |
---|
717 | | -int ocrdma_dealloc_pd(struct ib_pd *ibpd) |
---|
| 667 | +int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
---|
718 | 668 | { |
---|
719 | 669 | struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
---|
720 | 670 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
---|
721 | 671 | struct ocrdma_ucontext *uctx = NULL; |
---|
722 | | - int status = 0; |
---|
723 | 672 | u64 usr_db; |
---|
724 | 673 | |
---|
725 | 674 | uctx = pd->uctx; |
---|
.. | .. |
---|
733 | 682 | |
---|
734 | 683 | if (is_ucontext_pd(uctx, pd)) { |
---|
735 | 684 | ocrdma_release_ucontext_pd(uctx); |
---|
736 | | - return status; |
---|
| 685 | + return 0; |
---|
737 | 686 | } |
---|
738 | 687 | } |
---|
739 | | - status = _ocrdma_dealloc_pd(dev, pd); |
---|
740 | | - return status; |
---|
| 688 | + _ocrdma_dealloc_pd(dev, pd); |
---|
| 689 | + return 0; |
---|
741 | 690 | } |
---|
742 | 691 | |
---|
743 | 692 | static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr, |
---|
.. | .. |
---|
850 | 799 | return -ENOMEM; |
---|
851 | 800 | |
---|
852 | 801 | for (i = 0; i < mr->num_pbls; i++) { |
---|
853 | | - va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); |
---|
| 802 | + va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); |
---|
854 | 803 | if (!va) { |
---|
855 | 804 | ocrdma_free_mr_pbl_tbl(dev, mr); |
---|
856 | 805 | status = -ENOMEM; |
---|
.. | .. |
---|
862 | 811 | return status; |
---|
863 | 812 | } |
---|
864 | 813 | |
---|
865 | | -static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, |
---|
866 | | - u32 num_pbes) |
---|
| 814 | +static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr) |
---|
867 | 815 | { |
---|
868 | 816 | struct ocrdma_pbe *pbe; |
---|
869 | | - struct scatterlist *sg; |
---|
| 817 | + struct ib_block_iter biter; |
---|
870 | 818 | struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; |
---|
871 | | - struct ib_umem *umem = mr->umem; |
---|
872 | | - int shift, pg_cnt, pages, pbe_cnt, entry, total_num_pbes = 0; |
---|
| 819 | + int pbe_cnt; |
---|
| 820 | + u64 pg_addr; |
---|
873 | 821 | |
---|
874 | 822 | if (!mr->hwmr.num_pbes) |
---|
875 | 823 | return; |
---|
.. | .. |
---|
877 | 825 | pbe = (struct ocrdma_pbe *)pbl_tbl->va; |
---|
878 | 826 | pbe_cnt = 0; |
---|
879 | 827 | |
---|
880 | | - shift = umem->page_shift; |
---|
| 828 | + rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) { |
---|
| 829 | + /* store the page address in pbe */ |
---|
| 830 | + pg_addr = rdma_block_iter_dma_address(&biter); |
---|
| 831 | + pbe->pa_lo = cpu_to_le32(pg_addr); |
---|
| 832 | + pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr)); |
---|
| 833 | + pbe_cnt += 1; |
---|
| 834 | + pbe++; |
---|
881 | 835 | |
---|
882 | | - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { |
---|
883 | | - pages = sg_dma_len(sg) >> shift; |
---|
884 | | - for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { |
---|
885 | | - /* store the page address in pbe */ |
---|
886 | | - pbe->pa_lo = |
---|
887 | | - cpu_to_le32(sg_dma_address(sg) + |
---|
888 | | - (pg_cnt << shift)); |
---|
889 | | - pbe->pa_hi = |
---|
890 | | - cpu_to_le32(upper_32_bits(sg_dma_address(sg) + |
---|
891 | | - (pg_cnt << shift))); |
---|
892 | | - pbe_cnt += 1; |
---|
893 | | - total_num_pbes += 1; |
---|
894 | | - pbe++; |
---|
895 | | - |
---|
896 | | - /* if done building pbes, issue the mbx cmd. */ |
---|
897 | | - if (total_num_pbes == num_pbes) |
---|
898 | | - return; |
---|
899 | | - |
---|
900 | | - /* if the given pbl is full storing the pbes, |
---|
901 | | - * move to next pbl. |
---|
902 | | - */ |
---|
903 | | - if (pbe_cnt == |
---|
904 | | - (mr->hwmr.pbl_size / sizeof(u64))) { |
---|
905 | | - pbl_tbl++; |
---|
906 | | - pbe = (struct ocrdma_pbe *)pbl_tbl->va; |
---|
907 | | - pbe_cnt = 0; |
---|
908 | | - } |
---|
909 | | - |
---|
| 836 | + /* if the given pbl is full storing the pbes, |
---|
| 837 | + * move to next pbl. |
---|
| 838 | + */ |
---|
| 839 | + if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) { |
---|
| 840 | + pbl_tbl++; |
---|
| 841 | + pbe = (struct ocrdma_pbe *)pbl_tbl->va; |
---|
| 842 | + pbe_cnt = 0; |
---|
910 | 843 | } |
---|
911 | 844 | } |
---|
912 | 845 | } |
---|
.. | .. |
---|
918 | 851 | struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
---|
919 | 852 | struct ocrdma_mr *mr; |
---|
920 | 853 | struct ocrdma_pd *pd; |
---|
921 | | - u32 num_pbes; |
---|
922 | 854 | |
---|
923 | 855 | pd = get_ocrdma_pd(ibpd); |
---|
924 | 856 | |
---|
.. | .. |
---|
928 | 860 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
---|
929 | 861 | if (!mr) |
---|
930 | 862 | return ERR_PTR(status); |
---|
931 | | - mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0); |
---|
| 863 | + mr->umem = ib_umem_get(ibpd->device, start, len, acc); |
---|
932 | 864 | if (IS_ERR(mr->umem)) { |
---|
933 | 865 | status = -EFAULT; |
---|
934 | 866 | goto umem_err; |
---|
935 | 867 | } |
---|
936 | | - num_pbes = ib_umem_page_count(mr->umem); |
---|
937 | | - status = ocrdma_get_pbl_info(dev, mr, num_pbes); |
---|
| 868 | + status = ocrdma_get_pbl_info( |
---|
| 869 | + dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE)); |
---|
938 | 870 | if (status) |
---|
939 | 871 | goto umem_err; |
---|
940 | 872 | |
---|
941 | | - mr->hwmr.pbe_size = BIT(mr->umem->page_shift); |
---|
942 | | - mr->hwmr.fbo = ib_umem_offset(mr->umem); |
---|
| 873 | + mr->hwmr.pbe_size = PAGE_SIZE; |
---|
943 | 874 | mr->hwmr.va = usr_addr; |
---|
944 | 875 | mr->hwmr.len = len; |
---|
945 | 876 | mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0; |
---|
.. | .. |
---|
950 | 881 | status = ocrdma_build_pbl_tbl(dev, &mr->hwmr); |
---|
951 | 882 | if (status) |
---|
952 | 883 | goto umem_err; |
---|
953 | | - build_user_pbes(dev, mr, num_pbes); |
---|
| 884 | + build_user_pbes(dev, mr); |
---|
954 | 885 | status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc); |
---|
955 | 886 | if (status) |
---|
956 | 887 | goto mbx_err; |
---|
.. | .. |
---|
967 | 898 | return ERR_PTR(status); |
---|
968 | 899 | } |
---|
969 | 900 | |
---|
970 | | -int ocrdma_dereg_mr(struct ib_mr *ib_mr) |
---|
| 901 | +int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
---|
971 | 902 | { |
---|
972 | 903 | struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); |
---|
973 | 904 | struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); |
---|
.. | .. |
---|
978 | 909 | ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); |
---|
979 | 910 | |
---|
980 | 911 | /* it could be user registered memory. */ |
---|
981 | | - if (mr->umem) |
---|
982 | | - ib_umem_release(mr->umem); |
---|
| 912 | + ib_umem_release(mr->umem); |
---|
983 | 913 | kfree(mr); |
---|
984 | 914 | |
---|
985 | 915 | /* Don't stop cleanup, in case FW is unresponsive */ |
---|
.. | .. |
---|
991 | 921 | } |
---|
992 | 922 | |
---|
993 | 923 | static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, |
---|
994 | | - struct ib_udata *udata, |
---|
995 | | - struct ib_ucontext *ib_ctx) |
---|
| 924 | + struct ib_udata *udata) |
---|
996 | 925 | { |
---|
997 | 926 | int status; |
---|
998 | | - struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx); |
---|
| 927 | + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( |
---|
| 928 | + udata, struct ocrdma_ucontext, ibucontext); |
---|
999 | 929 | struct ocrdma_create_cq_uresp uresp; |
---|
| 930 | + |
---|
| 931 | + /* this must be user flow! */ |
---|
| 932 | + if (!udata) |
---|
| 933 | + return -EINVAL; |
---|
1000 | 934 | |
---|
1001 | 935 | memset(&uresp, 0, sizeof(uresp)); |
---|
1002 | 936 | uresp.cq_id = cq->id; |
---|
.. | .. |
---|
1026 | 960 | return status; |
---|
1027 | 961 | } |
---|
1028 | 962 | |
---|
1029 | | -struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, |
---|
1030 | | - const struct ib_cq_init_attr *attr, |
---|
1031 | | - struct ib_ucontext *ib_ctx, |
---|
1032 | | - struct ib_udata *udata) |
---|
| 963 | +int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, |
---|
| 964 | + struct ib_udata *udata) |
---|
1033 | 965 | { |
---|
| 966 | + struct ib_device *ibdev = ibcq->device; |
---|
1034 | 967 | int entries = attr->cqe; |
---|
1035 | | - struct ocrdma_cq *cq; |
---|
| 968 | + struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
---|
1036 | 969 | struct ocrdma_dev *dev = get_ocrdma_dev(ibdev); |
---|
1037 | | - struct ocrdma_ucontext *uctx = NULL; |
---|
| 970 | + struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context( |
---|
| 971 | + udata, struct ocrdma_ucontext, ibucontext); |
---|
1038 | 972 | u16 pd_id = 0; |
---|
1039 | 973 | int status; |
---|
1040 | 974 | struct ocrdma_create_cq_ureq ureq; |
---|
1041 | 975 | |
---|
1042 | 976 | if (attr->flags) |
---|
1043 | | - return ERR_PTR(-EINVAL); |
---|
| 977 | + return -EINVAL; |
---|
1044 | 978 | |
---|
1045 | 979 | if (udata) { |
---|
1046 | 980 | if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) |
---|
1047 | | - return ERR_PTR(-EFAULT); |
---|
| 981 | + return -EFAULT; |
---|
1048 | 982 | } else |
---|
1049 | 983 | ureq.dpp_cq = 0; |
---|
1050 | | - cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
---|
1051 | | - if (!cq) |
---|
1052 | | - return ERR_PTR(-ENOMEM); |
---|
1053 | 984 | |
---|
1054 | 985 | spin_lock_init(&cq->cq_lock); |
---|
1055 | 986 | spin_lock_init(&cq->comp_handler_lock); |
---|
1056 | 987 | INIT_LIST_HEAD(&cq->sq_head); |
---|
1057 | 988 | INIT_LIST_HEAD(&cq->rq_head); |
---|
1058 | 989 | |
---|
1059 | | - if (ib_ctx) { |
---|
1060 | | - uctx = get_ocrdma_ucontext(ib_ctx); |
---|
| 990 | + if (udata) |
---|
1061 | 991 | pd_id = uctx->cntxt_pd->id; |
---|
1062 | | - } |
---|
1063 | 992 | |
---|
1064 | 993 | status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); |
---|
1065 | | - if (status) { |
---|
1066 | | - kfree(cq); |
---|
1067 | | - return ERR_PTR(status); |
---|
1068 | | - } |
---|
1069 | | - if (ib_ctx) { |
---|
1070 | | - status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx); |
---|
| 994 | + if (status) |
---|
| 995 | + return status; |
---|
| 996 | + |
---|
| 997 | + if (udata) { |
---|
| 998 | + status = ocrdma_copy_cq_uresp(dev, cq, udata); |
---|
1071 | 999 | if (status) |
---|
1072 | 1000 | goto ctx_err; |
---|
1073 | 1001 | } |
---|
1074 | 1002 | cq->phase = OCRDMA_CQE_VALID; |
---|
1075 | 1003 | dev->cq_tbl[cq->id] = cq; |
---|
1076 | | - return &cq->ibcq; |
---|
| 1004 | + return 0; |
---|
1077 | 1005 | |
---|
1078 | 1006 | ctx_err: |
---|
1079 | 1007 | ocrdma_mbx_destroy_cq(dev, cq); |
---|
1080 | | - kfree(cq); |
---|
1081 | | - return ERR_PTR(status); |
---|
| 1008 | + return status; |
---|
1082 | 1009 | } |
---|
1083 | 1010 | |
---|
1084 | 1011 | int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, |
---|
.. | .. |
---|
1121 | 1048 | spin_unlock_irqrestore(&cq->cq_lock, flags); |
---|
1122 | 1049 | } |
---|
1123 | 1050 | |
---|
1124 | | -int ocrdma_destroy_cq(struct ib_cq *ibcq) |
---|
| 1051 | +int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) |
---|
1125 | 1052 | { |
---|
1126 | 1053 | struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); |
---|
1127 | 1054 | struct ocrdma_eq *eq = NULL; |
---|
.. | .. |
---|
1131 | 1058 | |
---|
1132 | 1059 | dev->cq_tbl[cq->id] = NULL; |
---|
1133 | 1060 | indx = ocrdma_get_eq_table_index(dev, cq->eqn); |
---|
1134 | | - BUG_ON(indx == -EINVAL); |
---|
1135 | 1061 | |
---|
1136 | 1062 | eq = &dev->eq_tbl[indx]; |
---|
1137 | 1063 | irq = ocrdma_get_irq(dev, eq); |
---|
1138 | 1064 | synchronize_irq(irq); |
---|
1139 | 1065 | ocrdma_flush_cq(cq); |
---|
1140 | 1066 | |
---|
1141 | | - (void)ocrdma_mbx_destroy_cq(dev, cq); |
---|
| 1067 | + ocrdma_mbx_destroy_cq(dev, cq); |
---|
1142 | 1068 | if (cq->ucontext) { |
---|
1143 | 1069 | pdid = cq->ucontext->cntxt_pd->id; |
---|
1144 | 1070 | ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, |
---|
.. | .. |
---|
1147 | 1073 | ocrdma_get_db_addr(dev, pdid), |
---|
1148 | 1074 | dev->nic_info.db_page_size); |
---|
1149 | 1075 | } |
---|
1150 | | - |
---|
1151 | | - kfree(cq); |
---|
1152 | 1076 | return 0; |
---|
1153 | 1077 | } |
---|
1154 | 1078 | |
---|
.. | .. |
---|
1169 | 1093 | } |
---|
1170 | 1094 | |
---|
1171 | 1095 | static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, |
---|
1172 | | - struct ib_qp_init_attr *attrs) |
---|
| 1096 | + struct ib_qp_init_attr *attrs, |
---|
| 1097 | + struct ib_udata *udata) |
---|
1173 | 1098 | { |
---|
1174 | 1099 | if ((attrs->qp_type != IB_QPT_GSI) && |
---|
1175 | 1100 | (attrs->qp_type != IB_QPT_RC) && |
---|
.. | .. |
---|
1177 | 1102 | (attrs->qp_type != IB_QPT_UD)) { |
---|
1178 | 1103 | pr_err("%s(%d) unsupported qp type=0x%x requested\n", |
---|
1179 | 1104 | __func__, dev->id, attrs->qp_type); |
---|
1180 | | - return -EINVAL; |
---|
| 1105 | + return -EOPNOTSUPP; |
---|
1181 | 1106 | } |
---|
1182 | 1107 | /* Skip the check for QP1 to support CM size of 128 */ |
---|
1183 | 1108 | if ((attrs->qp_type != IB_QPT_GSI) && |
---|
.. | .. |
---|
1217 | 1142 | return -EINVAL; |
---|
1218 | 1143 | } |
---|
1219 | 1144 | /* unprivileged user space cannot create special QP */ |
---|
1220 | | - if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { |
---|
| 1145 | + if (udata && attrs->qp_type == IB_QPT_GSI) { |
---|
1221 | 1146 | pr_err |
---|
1222 | 1147 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", |
---|
1223 | 1148 | __func__, dev->id, attrs->qp_type); |
---|
.. | .. |
---|
1374 | 1299 | struct ocrdma_create_qp_ureq ureq; |
---|
1375 | 1300 | u16 dpp_credit_lmt, dpp_offset; |
---|
1376 | 1301 | |
---|
1377 | | - status = ocrdma_check_qp_params(ibpd, dev, attrs); |
---|
| 1302 | + status = ocrdma_check_qp_params(ibpd, dev, attrs, udata); |
---|
1378 | 1303 | if (status) |
---|
1379 | 1304 | goto gen_err; |
---|
1380 | 1305 | |
---|
.. | .. |
---|
1480 | 1405 | new_qps = old_qps; |
---|
1481 | 1406 | spin_unlock_irqrestore(&qp->q_lock, flags); |
---|
1482 | 1407 | |
---|
1483 | | - if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, |
---|
1484 | | - IB_LINK_LAYER_ETHERNET)) { |
---|
| 1408 | + if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { |
---|
1485 | 1409 | pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" |
---|
1486 | 1410 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", |
---|
1487 | 1411 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, |
---|
.. | .. |
---|
1742 | 1666 | spin_unlock_irqrestore(&dev->flush_q_lock, flags); |
---|
1743 | 1667 | } |
---|
1744 | 1668 | |
---|
1745 | | -int ocrdma_destroy_qp(struct ib_qp *ibqp) |
---|
| 1669 | +int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
---|
1746 | 1670 | { |
---|
1747 | 1671 | struct ocrdma_pd *pd; |
---|
1748 | 1672 | struct ocrdma_qp *qp; |
---|
.. | .. |
---|
1838 | 1762 | return status; |
---|
1839 | 1763 | } |
---|
1840 | 1764 | |
---|
1841 | | -struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, |
---|
1842 | | - struct ib_srq_init_attr *init_attr, |
---|
1843 | | - struct ib_udata *udata) |
---|
| 1765 | +int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr, |
---|
| 1766 | + struct ib_udata *udata) |
---|
1844 | 1767 | { |
---|
1845 | | - int status = -ENOMEM; |
---|
1846 | | - struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); |
---|
1847 | | - struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device); |
---|
1848 | | - struct ocrdma_srq *srq; |
---|
| 1768 | + int status; |
---|
| 1769 | + struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd); |
---|
| 1770 | + struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); |
---|
| 1771 | + struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq); |
---|
1849 | 1772 | |
---|
1850 | 1773 | if (init_attr->attr.max_sge > dev->attr.max_recv_sge) |
---|
1851 | | - return ERR_PTR(-EINVAL); |
---|
| 1774 | + return -EINVAL; |
---|
1852 | 1775 | if (init_attr->attr.max_wr > dev->attr.max_rqe) |
---|
1853 | | - return ERR_PTR(-EINVAL); |
---|
1854 | | - |
---|
1855 | | - srq = kzalloc(sizeof(*srq), GFP_KERNEL); |
---|
1856 | | - if (!srq) |
---|
1857 | | - return ERR_PTR(status); |
---|
| 1776 | + return -EINVAL; |
---|
1858 | 1777 | |
---|
1859 | 1778 | spin_lock_init(&srq->q_lock); |
---|
1860 | 1779 | srq->pd = pd; |
---|
1861 | 1780 | srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size); |
---|
1862 | 1781 | status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd); |
---|
1863 | 1782 | if (status) |
---|
1864 | | - goto err; |
---|
| 1783 | + return status; |
---|
1865 | 1784 | |
---|
1866 | | - if (udata == NULL) { |
---|
1867 | | - status = -ENOMEM; |
---|
| 1785 | + if (!udata) { |
---|
1868 | 1786 | srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64), |
---|
1869 | 1787 | GFP_KERNEL); |
---|
1870 | | - if (srq->rqe_wr_id_tbl == NULL) |
---|
| 1788 | + if (!srq->rqe_wr_id_tbl) { |
---|
| 1789 | + status = -ENOMEM; |
---|
1871 | 1790 | goto arm_err; |
---|
| 1791 | + } |
---|
1872 | 1792 | |
---|
1873 | 1793 | srq->bit_fields_len = (srq->rq.max_cnt / 32) + |
---|
1874 | 1794 | (srq->rq.max_cnt % 32 ? 1 : 0); |
---|
1875 | 1795 | srq->idx_bit_fields = |
---|
1876 | 1796 | kmalloc_array(srq->bit_fields_len, sizeof(u32), |
---|
1877 | 1797 | GFP_KERNEL); |
---|
1878 | | - if (srq->idx_bit_fields == NULL) |
---|
| 1798 | + if (!srq->idx_bit_fields) { |
---|
| 1799 | + status = -ENOMEM; |
---|
1879 | 1800 | goto arm_err; |
---|
| 1801 | + } |
---|
1880 | 1802 | memset(srq->idx_bit_fields, 0xff, |
---|
1881 | 1803 | srq->bit_fields_len * sizeof(u32)); |
---|
1882 | 1804 | } |
---|
.. | .. |
---|
1893 | 1815 | goto arm_err; |
---|
1894 | 1816 | } |
---|
1895 | 1817 | |
---|
1896 | | - return &srq->ibsrq; |
---|
| 1818 | + return 0; |
---|
1897 | 1819 | |
---|
1898 | 1820 | arm_err: |
---|
1899 | 1821 | ocrdma_mbx_destroy_srq(dev, srq); |
---|
1900 | | -err: |
---|
1901 | 1822 | kfree(srq->rqe_wr_id_tbl); |
---|
1902 | 1823 | kfree(srq->idx_bit_fields); |
---|
1903 | | - kfree(srq); |
---|
1904 | | - return ERR_PTR(status); |
---|
| 1824 | + return status; |
---|
1905 | 1825 | } |
---|
1906 | 1826 | |
---|
1907 | 1827 | int ocrdma_modify_srq(struct ib_srq *ibsrq, |
---|
.. | .. |
---|
1930 | 1850 | return status; |
---|
1931 | 1851 | } |
---|
1932 | 1852 | |
---|
1933 | | -int ocrdma_destroy_srq(struct ib_srq *ibsrq) |
---|
| 1853 | +int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
---|
1934 | 1854 | { |
---|
1935 | | - int status; |
---|
1936 | 1855 | struct ocrdma_srq *srq; |
---|
1937 | 1856 | struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device); |
---|
1938 | 1857 | |
---|
1939 | 1858 | srq = get_ocrdma_srq(ibsrq); |
---|
1940 | 1859 | |
---|
1941 | | - status = ocrdma_mbx_destroy_srq(dev, srq); |
---|
| 1860 | + ocrdma_mbx_destroy_srq(dev, srq); |
---|
1942 | 1861 | |
---|
1943 | 1862 | if (srq->pd->uctx) |
---|
1944 | 1863 | ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, |
---|
.. | .. |
---|
1946 | 1865 | |
---|
1947 | 1866 | kfree(srq->idx_bit_fields); |
---|
1948 | 1867 | kfree(srq->rqe_wr_id_tbl); |
---|
1949 | | - kfree(srq); |
---|
1950 | | - return status; |
---|
| 1868 | + return 0; |
---|
1951 | 1869 | } |
---|
1952 | 1870 | |
---|
1953 | 1871 | /* unprivileged verbs and their support functions. */ |
---|
.. | .. |
---|
2210 | 2128 | case IB_WR_SEND_WITH_IMM: |
---|
2211 | 2129 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); |
---|
2212 | 2130 | hdr->immdt = ntohl(wr->ex.imm_data); |
---|
2213 | | - /* fall through */ |
---|
| 2131 | + fallthrough; |
---|
2214 | 2132 | case IB_WR_SEND: |
---|
2215 | 2133 | hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT); |
---|
2216 | 2134 | ocrdma_build_send(qp, hdr, wr); |
---|
.. | .. |
---|
2224 | 2142 | case IB_WR_RDMA_WRITE_WITH_IMM: |
---|
2225 | 2143 | hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT); |
---|
2226 | 2144 | hdr->immdt = ntohl(wr->ex.imm_data); |
---|
2227 | | - /* fall through */ |
---|
| 2145 | + fallthrough; |
---|
2228 | 2146 | case IB_WR_RDMA_WRITE: |
---|
2229 | 2147 | hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); |
---|
2230 | 2148 | status = ocrdma_build_write(qp, hdr, wr); |
---|
.. | .. |
---|
2976 | 2894 | return 0; |
---|
2977 | 2895 | } |
---|
2978 | 2896 | |
---|
2979 | | -struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, |
---|
2980 | | - enum ib_mr_type mr_type, |
---|
| 2897 | +struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, |
---|
2981 | 2898 | u32 max_num_sg) |
---|
2982 | 2899 | { |
---|
2983 | 2900 | int status; |
---|