.. | .. |
---|
31 | 31 | */ |
---|
32 | 32 | |
---|
33 | 33 | #include <linux/module.h> |
---|
| 34 | +#include <rdma/uverbs_ioctl.h> |
---|
34 | 35 | |
---|
35 | 36 | #include "iw_cxgb4.h" |
---|
36 | 37 | |
---|
.. | .. |
---|
56 | 57 | |
---|
57 | 58 | static int max_fr_immd = T4_MAX_FR_IMMD; |
---|
58 | 59 | module_param(max_fr_immd, int, 0644); |
---|
59 | | -MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); |
---|
| 60 | +MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate"); |
---|
60 | 61 | |
---|
61 | 62 | static int alloc_ird(struct c4iw_dev *dev, u32 ird) |
---|
62 | 63 | { |
---|
63 | 64 | int ret = 0; |
---|
64 | 65 | |
---|
65 | | - spin_lock_irq(&dev->lock); |
---|
| 66 | + xa_lock_irq(&dev->qps); |
---|
66 | 67 | if (ird <= dev->avail_ird) |
---|
67 | 68 | dev->avail_ird -= ird; |
---|
68 | 69 | else |
---|
69 | 70 | ret = -ENOMEM; |
---|
70 | | - spin_unlock_irq(&dev->lock); |
---|
| 71 | + xa_unlock_irq(&dev->qps); |
---|
71 | 72 | |
---|
72 | 73 | if (ret) |
---|
73 | 74 | dev_warn(&dev->rdev.lldi.pdev->dev, |
---|
.. | .. |
---|
78 | 79 | |
---|
79 | 80 | static void free_ird(struct c4iw_dev *dev, int ird) |
---|
80 | 81 | { |
---|
81 | | - spin_lock_irq(&dev->lock); |
---|
| 82 | + xa_lock_irq(&dev->qps); |
---|
82 | 83 | dev->avail_ird += ird; |
---|
83 | | - spin_unlock_irq(&dev->lock); |
---|
| 84 | + xa_unlock_irq(&dev->qps); |
---|
84 | 85 | } |
---|
85 | 86 | |
---|
86 | 87 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) |
---|
.. | .. |
---|
99 | 100 | static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) |
---|
100 | 101 | { |
---|
101 | 102 | dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, |
---|
102 | | - pci_unmap_addr(sq, mapping)); |
---|
| 103 | + dma_unmap_addr(sq, mapping)); |
---|
103 | 104 | } |
---|
104 | 105 | |
---|
105 | 106 | static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) |
---|
.. | .. |
---|
132 | 133 | if (!sq->queue) |
---|
133 | 134 | return -ENOMEM; |
---|
134 | 135 | sq->phys_addr = virt_to_phys(sq->queue); |
---|
135 | | - pci_unmap_addr_set(sq, mapping, sq->dma_addr); |
---|
| 136 | + dma_unmap_addr_set(sq, mapping, sq->dma_addr); |
---|
136 | 137 | return 0; |
---|
137 | 138 | } |
---|
138 | 139 | |
---|
.. | .. |
---|
273 | 274 | (unsigned long long)virt_to_phys(wq->sq.queue), |
---|
274 | 275 | wq->rq.queue, |
---|
275 | 276 | (unsigned long long)virt_to_phys(wq->rq.queue)); |
---|
276 | | - memset(wq->rq.queue, 0, wq->rq.memsize); |
---|
277 | 277 | dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); |
---|
278 | 278 | } |
---|
279 | 279 | |
---|
.. | .. |
---|
303 | 303 | wq->rq.msn = 1; |
---|
304 | 304 | |
---|
305 | 305 | /* build fw_ri_res_wr */ |
---|
306 | | - wr_len = sizeof *res_wr + 2 * sizeof *res; |
---|
| 306 | + wr_len = sizeof(*res_wr) + 2 * sizeof(*res); |
---|
307 | 307 | if (need_rq) |
---|
308 | 308 | wr_len += sizeof(*res); |
---|
309 | 309 | skb = alloc_skb(wr_len, GFP_KERNEL); |
---|
.. | .. |
---|
439 | 439 | rem -= len; |
---|
440 | 440 | } |
---|
441 | 441 | } |
---|
442 | | - len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); |
---|
| 442 | + len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp)); |
---|
443 | 443 | if (len) |
---|
444 | 444 | memset(dstp, 0, len); |
---|
445 | 445 | immdp->op = FW_RI_DATA_IMMD; |
---|
.. | .. |
---|
528 | 528 | T4_MAX_SEND_INLINE, &plen); |
---|
529 | 529 | if (ret) |
---|
530 | 530 | return ret; |
---|
531 | | - size = sizeof wqe->send + sizeof(struct fw_ri_immd) + |
---|
| 531 | + size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) + |
---|
532 | 532 | plen; |
---|
533 | 533 | } else { |
---|
534 | 534 | ret = build_isgl((__be64 *)sq->queue, |
---|
.. | .. |
---|
537 | 537 | wr->sg_list, wr->num_sge, &plen); |
---|
538 | 538 | if (ret) |
---|
539 | 539 | return ret; |
---|
540 | | - size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + |
---|
| 540 | + size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) + |
---|
541 | 541 | wr->num_sge * sizeof(struct fw_ri_sge); |
---|
542 | 542 | } |
---|
543 | 543 | } else { |
---|
.. | .. |
---|
545 | 545 | wqe->send.u.immd_src[0].r1 = 0; |
---|
546 | 546 | wqe->send.u.immd_src[0].r2 = 0; |
---|
547 | 547 | wqe->send.u.immd_src[0].immdlen = 0; |
---|
548 | | - size = sizeof wqe->send + sizeof(struct fw_ri_immd); |
---|
| 548 | + size = sizeof(wqe->send) + sizeof(struct fw_ri_immd); |
---|
549 | 549 | plen = 0; |
---|
550 | 550 | } |
---|
551 | 551 | *len16 = DIV_ROUND_UP(size, 16); |
---|
.. | .. |
---|
579 | 579 | T4_MAX_WRITE_INLINE, &plen); |
---|
580 | 580 | if (ret) |
---|
581 | 581 | return ret; |
---|
582 | | - size = sizeof wqe->write + sizeof(struct fw_ri_immd) + |
---|
| 582 | + size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) + |
---|
583 | 583 | plen; |
---|
584 | 584 | } else { |
---|
585 | 585 | ret = build_isgl((__be64 *)sq->queue, |
---|
.. | .. |
---|
588 | 588 | wr->sg_list, wr->num_sge, &plen); |
---|
589 | 589 | if (ret) |
---|
590 | 590 | return ret; |
---|
591 | | - size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + |
---|
| 591 | + size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) + |
---|
592 | 592 | wr->num_sge * sizeof(struct fw_ri_sge); |
---|
593 | 593 | } |
---|
594 | 594 | } else { |
---|
.. | .. |
---|
596 | 596 | wqe->write.u.immd_src[0].r1 = 0; |
---|
597 | 597 | wqe->write.u.immd_src[0].r2 = 0; |
---|
598 | 598 | wqe->write.u.immd_src[0].immdlen = 0; |
---|
599 | | - size = sizeof wqe->write + sizeof(struct fw_ri_immd); |
---|
| 599 | + size = sizeof(wqe->write) + sizeof(struct fw_ri_immd); |
---|
600 | 600 | plen = 0; |
---|
601 | 601 | } |
---|
602 | 602 | *len16 = DIV_ROUND_UP(size, 16); |
---|
.. | .. |
---|
633 | 633 | |
---|
634 | 634 | wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); |
---|
635 | 635 | wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); |
---|
636 | | - wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey); |
---|
| 636 | + if (wr->next->opcode == IB_WR_SEND) |
---|
| 637 | + wcwr->stag_inv = 0; |
---|
| 638 | + else |
---|
| 639 | + wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey); |
---|
637 | 640 | wcwr->r2 = 0; |
---|
638 | 641 | wcwr->r3 = 0; |
---|
639 | 642 | |
---|
.. | .. |
---|
680 | 683 | } |
---|
681 | 684 | wqe->read.r2 = 0; |
---|
682 | 685 | wqe->read.r5 = 0; |
---|
683 | | - *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); |
---|
| 686 | + *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16); |
---|
684 | 687 | return 0; |
---|
685 | 688 | } |
---|
686 | 689 | |
---|
.. | .. |
---|
727 | 730 | |
---|
728 | 731 | /* SEND_WITH_INV swsqe */ |
---|
729 | 732 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; |
---|
730 | | - swsqe->opcode = FW_RI_SEND_WITH_INV; |
---|
| 733 | + if (wr->next->opcode == IB_WR_SEND) |
---|
| 734 | + swsqe->opcode = FW_RI_SEND; |
---|
| 735 | + else |
---|
| 736 | + swsqe->opcode = FW_RI_SEND_WITH_INV; |
---|
731 | 737 | swsqe->idx = qhp->wq.sq.pidx; |
---|
732 | 738 | swsqe->complete = 0; |
---|
733 | 739 | swsqe->signaled = send_signaled; |
---|
.. | .. |
---|
760 | 766 | &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); |
---|
761 | 767 | if (ret) |
---|
762 | 768 | return ret; |
---|
763 | | - *len16 = DIV_ROUND_UP(sizeof wqe->recv + |
---|
764 | | - wr->num_sge * sizeof(struct fw_ri_sge), 16); |
---|
| 769 | + *len16 = DIV_ROUND_UP( |
---|
| 770 | + sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16); |
---|
765 | 771 | return 0; |
---|
766 | 772 | } |
---|
767 | 773 | |
---|
.. | .. |
---|
880 | 886 | { |
---|
881 | 887 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); |
---|
882 | 888 | wqe->inv.r2 = 0; |
---|
883 | | - *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); |
---|
| 889 | + *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16); |
---|
884 | 890 | return 0; |
---|
885 | | -} |
---|
886 | | - |
---|
887 | | -static void free_qp_work(struct work_struct *work) |
---|
888 | | -{ |
---|
889 | | - struct c4iw_ucontext *ucontext; |
---|
890 | | - struct c4iw_qp *qhp; |
---|
891 | | - struct c4iw_dev *rhp; |
---|
892 | | - |
---|
893 | | - qhp = container_of(work, struct c4iw_qp, free_work); |
---|
894 | | - ucontext = qhp->ucontext; |
---|
895 | | - rhp = qhp->rhp; |
---|
896 | | - |
---|
897 | | - pr_debug("qhp %p ucontext %p\n", qhp, ucontext); |
---|
898 | | - destroy_qp(&rhp->rdev, &qhp->wq, |
---|
899 | | - ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); |
---|
900 | | - |
---|
901 | | - if (ucontext) |
---|
902 | | - c4iw_put_ucontext(ucontext); |
---|
903 | | - c4iw_put_wr_wait(qhp->wr_waitp); |
---|
904 | | - kfree(qhp); |
---|
905 | | -} |
---|
906 | | - |
---|
907 | | -static void queue_qp_free(struct kref *kref) |
---|
908 | | -{ |
---|
909 | | - struct c4iw_qp *qhp; |
---|
910 | | - |
---|
911 | | - qhp = container_of(kref, struct c4iw_qp, kref); |
---|
912 | | - pr_debug("qhp %p\n", qhp); |
---|
913 | | - queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); |
---|
914 | 891 | } |
---|
915 | 892 | |
---|
916 | 893 | void c4iw_qp_add_ref(struct ib_qp *qp) |
---|
917 | 894 | { |
---|
918 | 895 | pr_debug("ib_qp %p\n", qp); |
---|
919 | | - kref_get(&to_c4iw_qp(qp)->kref); |
---|
| 896 | + refcount_inc(&to_c4iw_qp(qp)->qp_refcnt); |
---|
920 | 897 | } |
---|
921 | 898 | |
---|
922 | 899 | void c4iw_qp_rem_ref(struct ib_qp *qp) |
---|
923 | 900 | { |
---|
924 | 901 | pr_debug("ib_qp %p\n", qp); |
---|
925 | | - kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); |
---|
| 902 | + if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt)) |
---|
| 903 | + complete(&to_c4iw_qp(qp)->qp_rel_comp); |
---|
926 | 904 | } |
---|
927 | 905 | |
---|
928 | 906 | static void add_to_fc_list(struct list_head *head, struct list_head *entry) |
---|
.. | .. |
---|
935 | 913 | { |
---|
936 | 914 | unsigned long flags; |
---|
937 | 915 | |
---|
938 | | - spin_lock_irqsave(&qhp->rhp->lock, flags); |
---|
| 916 | + xa_lock_irqsave(&qhp->rhp->qps, flags); |
---|
939 | 917 | spin_lock(&qhp->lock); |
---|
940 | 918 | if (qhp->rhp->db_state == NORMAL) |
---|
941 | 919 | t4_ring_sq_db(&qhp->wq, inc, NULL); |
---|
.. | .. |
---|
944 | 922 | qhp->wq.sq.wq_pidx_inc += inc; |
---|
945 | 923 | } |
---|
946 | 924 | spin_unlock(&qhp->lock); |
---|
947 | | - spin_unlock_irqrestore(&qhp->rhp->lock, flags); |
---|
| 925 | + xa_unlock_irqrestore(&qhp->rhp->qps, flags); |
---|
948 | 926 | return 0; |
---|
949 | 927 | } |
---|
950 | 928 | |
---|
.. | .. |
---|
952 | 930 | { |
---|
953 | 931 | unsigned long flags; |
---|
954 | 932 | |
---|
955 | | - spin_lock_irqsave(&qhp->rhp->lock, flags); |
---|
| 933 | + xa_lock_irqsave(&qhp->rhp->qps, flags); |
---|
956 | 934 | spin_lock(&qhp->lock); |
---|
957 | 935 | if (qhp->rhp->db_state == NORMAL) |
---|
958 | 936 | t4_ring_rq_db(&qhp->wq, inc, NULL); |
---|
.. | .. |
---|
961 | 939 | qhp->wq.rq.wq_pidx_inc += inc; |
---|
962 | 940 | } |
---|
963 | 941 | spin_unlock(&qhp->lock); |
---|
964 | | - spin_unlock_irqrestore(&qhp->rhp->lock, flags); |
---|
| 942 | + xa_unlock_irqrestore(&qhp->rhp->qps, flags); |
---|
965 | 943 | return 0; |
---|
966 | 944 | } |
---|
967 | 945 | |
---|
.. | .. |
---|
1134 | 1112 | /* |
---|
1135 | 1113 | * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is |
---|
1136 | 1114 | * the response for small NVMEe-oF READ requests. If the chain is |
---|
1137 | | - * exactly a WRITE->SEND_WITH_INV and the sgl depths and lengths |
---|
1138 | | - * meet the requirements of the fw_ri_write_cmpl_wr work request, |
---|
1139 | | - * then build and post the write_cmpl WR. If any of the tests |
---|
| 1115 | + * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths |
---|
| 1116 | + * and lengths meet the requirements of the fw_ri_write_cmpl_wr work |
---|
| 1117 | + * request, then build and post the write_cmpl WR. If any of the tests |
---|
1140 | 1118 | * below are not true, then we continue on with the tradtional WRITE |
---|
1141 | 1119 | * and SEND WRs. |
---|
1142 | 1120 | */ |
---|
.. | .. |
---|
1146 | 1124 | wr && wr->next && !wr->next->next && |
---|
1147 | 1125 | wr->opcode == IB_WR_RDMA_WRITE && |
---|
1148 | 1126 | wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL && |
---|
1149 | | - wr->next->opcode == IB_WR_SEND_WITH_INV && |
---|
| 1127 | + (wr->next->opcode == IB_WR_SEND || |
---|
| 1128 | + wr->next->opcode == IB_WR_SEND_WITH_INV) && |
---|
1150 | 1129 | wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE && |
---|
1151 | 1130 | wr->next->num_sge == 1 && num_wrs >= 2) { |
---|
1152 | 1131 | post_write_cmpl(qhp, wr); |
---|
.. | .. |
---|
1187 | 1166 | break; |
---|
1188 | 1167 | } |
---|
1189 | 1168 | fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE; |
---|
1190 | | - /*FALLTHROUGH*/ |
---|
| 1169 | + fallthrough; |
---|
1191 | 1170 | case IB_WR_RDMA_WRITE: |
---|
1192 | 1171 | fw_opcode = FW_RI_RDMA_WRITE_WR; |
---|
1193 | 1172 | swsqe->opcode = FW_RI_RDMA_WRITE; |
---|
.. | .. |
---|
1601 | 1580 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); |
---|
1602 | 1581 | |
---|
1603 | 1582 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; |
---|
1604 | | - wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); |
---|
| 1583 | + wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term)); |
---|
1605 | 1584 | term = (struct terminate_message *)wqe->u.terminate.termmsg; |
---|
1606 | 1585 | if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { |
---|
1607 | 1586 | term->layer_etype = qhp->attr.layer_etype; |
---|
.. | .. |
---|
1746 | 1725 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) |
---|
1747 | 1726 | { |
---|
1748 | 1727 | pr_debug("p2p_type = %d\n", p2p_type); |
---|
1749 | | - memset(&init->u, 0, sizeof init->u); |
---|
| 1728 | + memset(&init->u, 0, sizeof(init->u)); |
---|
1750 | 1729 | switch (p2p_type) { |
---|
1751 | 1730 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: |
---|
1752 | 1731 | init->u.write.opcode = FW_RI_RDMA_WRITE_WR; |
---|
1753 | 1732 | init->u.write.stag_sink = cpu_to_be32(1); |
---|
1754 | 1733 | init->u.write.to_sink = cpu_to_be64(1); |
---|
1755 | 1734 | init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; |
---|
1756 | | - init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + |
---|
1757 | | - sizeof(struct fw_ri_immd), |
---|
1758 | | - 16); |
---|
| 1735 | + init->u.write.len16 = DIV_ROUND_UP( |
---|
| 1736 | + sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16); |
---|
1759 | 1737 | break; |
---|
1760 | 1738 | case FW_RI_INIT_P2PTYPE_READ_REQ: |
---|
1761 | 1739 | init->u.write.opcode = FW_RI_RDMA_READ_WR; |
---|
.. | .. |
---|
1763 | 1741 | init->u.read.to_src_lo = cpu_to_be32(1); |
---|
1764 | 1742 | init->u.read.stag_sink = cpu_to_be32(1); |
---|
1765 | 1743 | init->u.read.to_sink_lo = cpu_to_be32(1); |
---|
1766 | | - init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); |
---|
| 1744 | + init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16); |
---|
1767 | 1745 | break; |
---|
1768 | 1746 | } |
---|
1769 | 1747 | } |
---|
.. | .. |
---|
1777 | 1755 | pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp, |
---|
1778 | 1756 | qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); |
---|
1779 | 1757 | |
---|
1780 | | - skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
---|
| 1758 | + skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); |
---|
1781 | 1759 | if (!skb) { |
---|
1782 | 1760 | ret = -ENOMEM; |
---|
1783 | 1761 | goto out; |
---|
.. | .. |
---|
1972 | 1950 | qhp->attr.ecode = attrs->ecode; |
---|
1973 | 1951 | ep = qhp->ep; |
---|
1974 | 1952 | if (!internal) { |
---|
1975 | | - c4iw_get_ep(&qhp->ep->com); |
---|
| 1953 | + c4iw_get_ep(&ep->com); |
---|
1976 | 1954 | terminate = 1; |
---|
1977 | 1955 | disconnect = 1; |
---|
1978 | 1956 | } else { |
---|
.. | .. |
---|
2090 | 2068 | return ret; |
---|
2091 | 2069 | } |
---|
2092 | 2070 | |
---|
2093 | | -int c4iw_destroy_qp(struct ib_qp *ib_qp) |
---|
| 2071 | +int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) |
---|
2094 | 2072 | { |
---|
2095 | 2073 | struct c4iw_dev *rhp; |
---|
2096 | 2074 | struct c4iw_qp *qhp; |
---|
| 2075 | + struct c4iw_ucontext *ucontext; |
---|
2097 | 2076 | struct c4iw_qp_attributes attrs; |
---|
2098 | 2077 | |
---|
2099 | 2078 | qhp = to_c4iw_qp(ib_qp); |
---|
2100 | 2079 | rhp = qhp->rhp; |
---|
| 2080 | + ucontext = qhp->ucontext; |
---|
2101 | 2081 | |
---|
2102 | 2082 | attrs.next_state = C4IW_QP_STATE_ERROR; |
---|
2103 | 2083 | if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) |
---|
.. | .. |
---|
2106 | 2086 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); |
---|
2107 | 2087 | wait_event(qhp->wait, !qhp->ep); |
---|
2108 | 2088 | |
---|
2109 | | - remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
---|
2110 | | - |
---|
2111 | | - spin_lock_irq(&rhp->lock); |
---|
| 2089 | + xa_lock_irq(&rhp->qps); |
---|
| 2090 | + __xa_erase(&rhp->qps, qhp->wq.sq.qid); |
---|
2112 | 2091 | if (!list_empty(&qhp->db_fc_entry)) |
---|
2113 | 2092 | list_del_init(&qhp->db_fc_entry); |
---|
2114 | | - spin_unlock_irq(&rhp->lock); |
---|
| 2093 | + xa_unlock_irq(&rhp->qps); |
---|
2115 | 2094 | free_ird(rhp, qhp->attr.max_ird); |
---|
2116 | 2095 | |
---|
2117 | 2096 | c4iw_qp_rem_ref(ib_qp); |
---|
2118 | 2097 | |
---|
| 2098 | + wait_for_completion(&qhp->qp_rel_comp); |
---|
| 2099 | + |
---|
2119 | 2100 | pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); |
---|
| 2101 | + pr_debug("qhp %p ucontext %p\n", qhp, ucontext); |
---|
| 2102 | + |
---|
| 2103 | + destroy_qp(&rhp->rdev, &qhp->wq, |
---|
| 2104 | + ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); |
---|
| 2105 | + |
---|
| 2106 | + c4iw_put_wr_wait(qhp->wr_waitp); |
---|
| 2107 | + |
---|
| 2108 | + kfree(qhp); |
---|
2120 | 2109 | return 0; |
---|
2121 | 2110 | } |
---|
2122 | 2111 | |
---|
.. | .. |
---|
2130 | 2119 | struct c4iw_cq *rchp; |
---|
2131 | 2120 | struct c4iw_create_qp_resp uresp; |
---|
2132 | 2121 | unsigned int sqsize, rqsize = 0; |
---|
2133 | | - struct c4iw_ucontext *ucontext; |
---|
| 2122 | + struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context( |
---|
| 2123 | + udata, struct c4iw_ucontext, ibucontext); |
---|
2134 | 2124 | int ret; |
---|
2135 | 2125 | struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; |
---|
2136 | 2126 | struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; |
---|
.. | .. |
---|
2138 | 2128 | pr_debug("ib_pd %p\n", pd); |
---|
2139 | 2129 | |
---|
2140 | 2130 | if (attrs->qp_type != IB_QPT_RC) |
---|
2141 | | - return ERR_PTR(-EINVAL); |
---|
| 2131 | + return ERR_PTR(-EOPNOTSUPP); |
---|
2142 | 2132 | |
---|
2143 | 2133 | php = to_c4iw_pd(pd); |
---|
2144 | 2134 | rhp = php->rhp; |
---|
.. | .. |
---|
2163 | 2153 | sqsize = attrs->cap.max_send_wr + 1; |
---|
2164 | 2154 | if (sqsize < 8) |
---|
2165 | 2155 | sqsize = 8; |
---|
2166 | | - |
---|
2167 | | - ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; |
---|
2168 | 2156 | |
---|
2169 | 2157 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); |
---|
2170 | 2158 | if (!qhp) |
---|
.. | .. |
---|
2227 | 2215 | spin_lock_init(&qhp->lock); |
---|
2228 | 2216 | mutex_init(&qhp->mutex); |
---|
2229 | 2217 | init_waitqueue_head(&qhp->wait); |
---|
2230 | | - kref_init(&qhp->kref); |
---|
2231 | | - INIT_WORK(&qhp->free_work, free_qp_work); |
---|
| 2218 | + init_completion(&qhp->qp_rel_comp); |
---|
| 2219 | + refcount_set(&qhp->qp_refcnt, 1); |
---|
2232 | 2220 | |
---|
2233 | | - ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); |
---|
| 2221 | + ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); |
---|
2234 | 2222 | if (ret) |
---|
2235 | 2223 | goto err_destroy_qp; |
---|
2236 | 2224 | |
---|
.. | .. |
---|
2299 | 2287 | ucontext->key += PAGE_SIZE; |
---|
2300 | 2288 | } |
---|
2301 | 2289 | spin_unlock(&ucontext->mmap_lock); |
---|
2302 | | - ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); |
---|
| 2290 | + ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); |
---|
2303 | 2291 | if (ret) |
---|
2304 | 2292 | goto err_free_ma_sync_key; |
---|
2305 | 2293 | sq_key_mm->key = uresp.sq_key; |
---|
.. | .. |
---|
2332 | 2320 | insert_mmap(ucontext, ma_sync_key_mm); |
---|
2333 | 2321 | } |
---|
2334 | 2322 | |
---|
2335 | | - c4iw_get_ucontext(ucontext); |
---|
2336 | 2323 | qhp->ucontext = ucontext; |
---|
2337 | 2324 | } |
---|
2338 | 2325 | if (!attrs->srq) { |
---|
.. | .. |
---|
2367 | 2354 | err_free_sq_key: |
---|
2368 | 2355 | kfree(sq_key_mm); |
---|
2369 | 2356 | err_remove_handle: |
---|
2370 | | - remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); |
---|
| 2357 | + xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); |
---|
2371 | 2358 | err_destroy_qp: |
---|
2372 | 2359 | destroy_qp(&rhp->rdev, &qhp->wq, |
---|
2373 | 2360 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); |
---|
.. | .. |
---|
2384 | 2371 | struct c4iw_dev *rhp; |
---|
2385 | 2372 | struct c4iw_qp *qhp; |
---|
2386 | 2373 | enum c4iw_qp_attr_mask mask = 0; |
---|
2387 | | - struct c4iw_qp_attributes attrs; |
---|
| 2374 | + struct c4iw_qp_attributes attrs = {}; |
---|
2388 | 2375 | |
---|
2389 | 2376 | pr_debug("ib_qp %p\n", ibqp); |
---|
2390 | 2377 | |
---|
.. | .. |
---|
2396 | 2383 | if (!attr_mask) |
---|
2397 | 2384 | return 0; |
---|
2398 | 2385 | |
---|
2399 | | - memset(&attrs, 0, sizeof attrs); |
---|
2400 | 2386 | qhp = to_c4iw_qp(ibqp); |
---|
2401 | 2387 | rhp = qhp->rhp; |
---|
2402 | 2388 | |
---|
.. | .. |
---|
2480 | 2466 | { |
---|
2481 | 2467 | struct c4iw_qp *qhp = to_c4iw_qp(ibqp); |
---|
2482 | 2468 | |
---|
2483 | | - memset(attr, 0, sizeof *attr); |
---|
2484 | | - memset(init_attr, 0, sizeof *init_attr); |
---|
| 2469 | + memset(attr, 0, sizeof(*attr)); |
---|
| 2470 | + memset(init_attr, 0, sizeof(*init_attr)); |
---|
2485 | 2471 | attr->qp_state = to_ib_qp_state(qhp->attr.state); |
---|
2486 | 2472 | attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); |
---|
2487 | 2473 | init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; |
---|
.. | .. |
---|
2524 | 2510 | |
---|
2525 | 2511 | dma_free_coherent(&rdev->lldi.pdev->dev, |
---|
2526 | 2512 | wq->memsize, wq->queue, |
---|
2527 | | - pci_unmap_addr(wq, mapping)); |
---|
| 2513 | + dma_unmap_addr(wq, mapping)); |
---|
2528 | 2514 | c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); |
---|
2529 | 2515 | kfree(wq->sw_rq); |
---|
2530 | 2516 | c4iw_put_qpid(rdev, wq->qid, uctx); |
---|
.. | .. |
---|
2566 | 2552 | wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> |
---|
2567 | 2553 | T4_RQT_ENTRY_SHIFT; |
---|
2568 | 2554 | |
---|
2569 | | - wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, |
---|
2570 | | - wq->memsize, &wq->dma_addr, |
---|
2571 | | - GFP_KERNEL); |
---|
| 2555 | + wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, |
---|
| 2556 | + &wq->dma_addr, GFP_KERNEL); |
---|
2572 | 2557 | if (!wq->queue) |
---|
2573 | 2558 | goto err_free_rqtpool; |
---|
2574 | 2559 | |
---|
2575 | | - memset(wq->queue, 0, wq->memsize); |
---|
2576 | | - pci_unmap_addr_set(wq, mapping, wq->dma_addr); |
---|
| 2560 | + dma_unmap_addr_set(wq, mapping, wq->dma_addr); |
---|
2577 | 2561 | |
---|
2578 | 2562 | wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS, |
---|
2579 | 2563 | &wq->bar2_qid, |
---|
.. | .. |
---|
2593 | 2577 | /* build fw_ri_res_wr */ |
---|
2594 | 2578 | wr_len = sizeof(*res_wr) + sizeof(*res); |
---|
2595 | 2579 | |
---|
2596 | | - skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); |
---|
| 2580 | + skb = alloc_skb(wr_len, GFP_KERNEL); |
---|
2597 | 2581 | if (!skb) |
---|
2598 | 2582 | goto err_free_queue; |
---|
2599 | 2583 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
---|
.. | .. |
---|
2652 | 2636 | err_free_queue: |
---|
2653 | 2637 | dma_free_coherent(&rdev->lldi.pdev->dev, |
---|
2654 | 2638 | wq->memsize, wq->queue, |
---|
2655 | | - pci_unmap_addr(wq, mapping)); |
---|
| 2639 | + dma_unmap_addr(wq, mapping)); |
---|
2656 | 2640 | err_free_rqtpool: |
---|
2657 | 2641 | c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); |
---|
2658 | 2642 | err_free_pending_wrs: |
---|
.. | .. |
---|
2684 | 2668 | } |
---|
2685 | 2669 | } |
---|
2686 | 2670 | |
---|
2687 | | -struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs, |
---|
| 2671 | +int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs, |
---|
2688 | 2672 | struct ib_udata *udata) |
---|
2689 | 2673 | { |
---|
| 2674 | + struct ib_pd *pd = ib_srq->pd; |
---|
2690 | 2675 | struct c4iw_dev *rhp; |
---|
2691 | | - struct c4iw_srq *srq; |
---|
| 2676 | + struct c4iw_srq *srq = to_c4iw_srq(ib_srq); |
---|
2692 | 2677 | struct c4iw_pd *php; |
---|
2693 | 2678 | struct c4iw_create_srq_resp uresp; |
---|
2694 | 2679 | struct c4iw_ucontext *ucontext; |
---|
.. | .. |
---|
2703 | 2688 | rhp = php->rhp; |
---|
2704 | 2689 | |
---|
2705 | 2690 | if (!rhp->rdev.lldi.vr->srq.size) |
---|
2706 | | - return ERR_PTR(-EINVAL); |
---|
| 2691 | + return -EINVAL; |
---|
2707 | 2692 | if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size) |
---|
2708 | | - return ERR_PTR(-E2BIG); |
---|
| 2693 | + return -E2BIG; |
---|
2709 | 2694 | if (attrs->attr.max_sge > T4_MAX_RECV_SGE) |
---|
2710 | | - return ERR_PTR(-E2BIG); |
---|
| 2695 | + return -E2BIG; |
---|
2711 | 2696 | |
---|
2712 | 2697 | /* |
---|
2713 | 2698 | * SRQ RQT and RQ must be a power of 2 and at least 16 deep. |
---|
.. | .. |
---|
2715 | 2700 | rqsize = attrs->attr.max_wr + 1; |
---|
2716 | 2701 | rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16)); |
---|
2717 | 2702 | |
---|
2718 | | - ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; |
---|
2719 | | - |
---|
2720 | | - srq = kzalloc(sizeof(*srq), GFP_KERNEL); |
---|
2721 | | - if (!srq) |
---|
2722 | | - return ERR_PTR(-ENOMEM); |
---|
| 2703 | + ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, |
---|
| 2704 | + ibucontext); |
---|
2723 | 2705 | |
---|
2724 | 2706 | srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); |
---|
2725 | | - if (!srq->wr_waitp) { |
---|
2726 | | - ret = -ENOMEM; |
---|
2727 | | - goto err_free_srq; |
---|
2728 | | - } |
---|
| 2707 | + if (!srq->wr_waitp) |
---|
| 2708 | + return -ENOMEM; |
---|
2729 | 2709 | |
---|
2730 | 2710 | srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); |
---|
2731 | 2711 | if (srq->idx < 0) { |
---|
.. | .. |
---|
2759 | 2739 | if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) |
---|
2760 | 2740 | srq->flags = T4_SRQ_LIMIT_SUPPORT; |
---|
2761 | 2741 | |
---|
2762 | | - ret = insert_handle(rhp, &rhp->qpidr, srq, srq->wq.qid); |
---|
2763 | | - if (ret) |
---|
2764 | | - goto err_free_queue; |
---|
2765 | | - |
---|
2766 | 2742 | if (udata) { |
---|
2767 | 2743 | srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL); |
---|
2768 | 2744 | if (!srq_key_mm) { |
---|
2769 | 2745 | ret = -ENOMEM; |
---|
2770 | | - goto err_remove_handle; |
---|
| 2746 | + goto err_free_queue; |
---|
2771 | 2747 | } |
---|
2772 | 2748 | srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL); |
---|
2773 | 2749 | if (!srq_db_key_mm) { |
---|
.. | .. |
---|
2805 | 2781 | (unsigned long)srq->wq.memsize, attrs->attr.max_wr); |
---|
2806 | 2782 | |
---|
2807 | 2783 | spin_lock_init(&srq->lock); |
---|
2808 | | - return &srq->ibsrq; |
---|
| 2784 | + return 0; |
---|
| 2785 | + |
---|
2809 | 2786 | err_free_srq_db_key_mm: |
---|
2810 | 2787 | kfree(srq_db_key_mm); |
---|
2811 | 2788 | err_free_srq_key_mm: |
---|
2812 | 2789 | kfree(srq_key_mm); |
---|
2813 | | -err_remove_handle: |
---|
2814 | | - remove_handle(rhp, &rhp->qpidr, srq->wq.qid); |
---|
2815 | 2790 | err_free_queue: |
---|
2816 | 2791 | free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, |
---|
2817 | 2792 | srq->wr_waitp); |
---|
2818 | 2793 | err_free_skb: |
---|
2819 | | - if (srq->destroy_skb) |
---|
2820 | | - kfree_skb(srq->destroy_skb); |
---|
| 2794 | + kfree_skb(srq->destroy_skb); |
---|
2821 | 2795 | err_free_srq_idx: |
---|
2822 | 2796 | c4iw_free_srq_idx(&rhp->rdev, srq->idx); |
---|
2823 | 2797 | err_free_wr_wait: |
---|
2824 | 2798 | c4iw_put_wr_wait(srq->wr_waitp); |
---|
2825 | | -err_free_srq: |
---|
2826 | | - kfree(srq); |
---|
2827 | | - return ERR_PTR(ret); |
---|
| 2799 | + return ret; |
---|
2828 | 2800 | } |
---|
2829 | 2801 | |
---|
2830 | | -int c4iw_destroy_srq(struct ib_srq *ibsrq) |
---|
| 2802 | +int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) |
---|
2831 | 2803 | { |
---|
2832 | 2804 | struct c4iw_dev *rhp; |
---|
2833 | 2805 | struct c4iw_srq *srq; |
---|
.. | .. |
---|
2837 | 2809 | rhp = srq->rhp; |
---|
2838 | 2810 | |
---|
2839 | 2811 | pr_debug("%s id %d\n", __func__, srq->wq.qid); |
---|
2840 | | - |
---|
2841 | | - remove_handle(rhp, &rhp->qpidr, srq->wq.qid); |
---|
2842 | | - ucontext = ibsrq->uobject ? |
---|
2843 | | - to_c4iw_ucontext(ibsrq->uobject->context) : NULL; |
---|
| 2812 | + ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, |
---|
| 2813 | + ibucontext); |
---|
2844 | 2814 | free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, |
---|
2845 | 2815 | srq->wr_waitp); |
---|
2846 | 2816 | c4iw_free_srq_idx(&rhp->rdev, srq->idx); |
---|
2847 | 2817 | c4iw_put_wr_wait(srq->wr_waitp); |
---|
2848 | | - kfree(srq); |
---|
2849 | 2818 | return 0; |
---|
2850 | 2819 | } |
---|