| .. | .. |
|---|
| 77 | 77 | module_param(enable_ecn, int, 0644); |
|---|
| 78 | 78 | MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); |
|---|
| 79 | 79 | |
|---|
| 80 | | -static int dack_mode = 1; |
|---|
| 80 | +static int dack_mode; |
|---|
| 81 | 81 | module_param(dack_mode, int, 0644); |
|---|
| 82 | | -MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); |
|---|
| 82 | +MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); |
|---|
| 83 | 83 | |
|---|
| 84 | 84 | uint c4iw_max_read_depth = 32; |
|---|
| 85 | 85 | module_param(c4iw_max_read_depth, int, 0644); |
|---|
| .. | .. |
|---|
| 331 | 331 | { |
|---|
| 332 | 332 | unsigned long flags; |
|---|
| 333 | 333 | |
|---|
| 334 | | - spin_lock_irqsave(&ep->com.dev->lock, flags); |
|---|
| 335 | | - _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); |
|---|
| 336 | | - if (idr_is_empty(&ep->com.dev->hwtid_idr)) |
|---|
| 334 | + xa_lock_irqsave(&ep->com.dev->hwtids, flags); |
|---|
| 335 | + __xa_erase(&ep->com.dev->hwtids, ep->hwtid); |
|---|
| 336 | + if (xa_empty(&ep->com.dev->hwtids)) |
|---|
| 337 | 337 | wake_up(&ep->com.dev->wait); |
|---|
| 338 | | - spin_unlock_irqrestore(&ep->com.dev->lock, flags); |
|---|
| 338 | + xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); |
|---|
| 339 | 339 | } |
|---|
| 340 | 340 | |
|---|
| 341 | | -static void insert_ep_tid(struct c4iw_ep *ep) |
|---|
| 341 | +static int insert_ep_tid(struct c4iw_ep *ep) |
|---|
| 342 | 342 | { |
|---|
| 343 | 343 | unsigned long flags; |
|---|
| 344 | + int err; |
|---|
| 344 | 345 | |
|---|
| 345 | | - spin_lock_irqsave(&ep->com.dev->lock, flags); |
|---|
| 346 | | - _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0); |
|---|
| 347 | | - spin_unlock_irqrestore(&ep->com.dev->lock, flags); |
|---|
| 346 | + xa_lock_irqsave(&ep->com.dev->hwtids, flags); |
|---|
| 347 | + err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL); |
|---|
| 348 | + xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); |
|---|
| 349 | + |
|---|
| 350 | + return err; |
|---|
| 348 | 351 | } |
|---|
| 349 | 352 | |
|---|
| 350 | 353 | /* |
|---|
| .. | .. |
|---|
| 355 | 358 | struct c4iw_ep *ep; |
|---|
| 356 | 359 | unsigned long flags; |
|---|
| 357 | 360 | |
|---|
| 358 | | - spin_lock_irqsave(&dev->lock, flags); |
|---|
| 359 | | - ep = idr_find(&dev->hwtid_idr, tid); |
|---|
| 361 | + xa_lock_irqsave(&dev->hwtids, flags); |
|---|
| 362 | + ep = xa_load(&dev->hwtids, tid); |
|---|
| 360 | 363 | if (ep) |
|---|
| 361 | 364 | c4iw_get_ep(&ep->com); |
|---|
| 362 | | - spin_unlock_irqrestore(&dev->lock, flags); |
|---|
| 365 | + xa_unlock_irqrestore(&dev->hwtids, flags); |
|---|
| 363 | 366 | return ep; |
|---|
| 364 | 367 | } |
|---|
| 365 | 368 | |
|---|
| .. | .. |
|---|
| 372 | 375 | struct c4iw_listen_ep *ep; |
|---|
| 373 | 376 | unsigned long flags; |
|---|
| 374 | 377 | |
|---|
| 375 | | - spin_lock_irqsave(&dev->lock, flags); |
|---|
| 376 | | - ep = idr_find(&dev->stid_idr, stid); |
|---|
| 378 | + xa_lock_irqsave(&dev->stids, flags); |
|---|
| 379 | + ep = xa_load(&dev->stids, stid); |
|---|
| 377 | 380 | if (ep) |
|---|
| 378 | 381 | c4iw_get_ep(&ep->com); |
|---|
| 379 | | - spin_unlock_irqrestore(&dev->lock, flags); |
|---|
| 382 | + xa_unlock_irqrestore(&dev->stids, flags); |
|---|
| 380 | 383 | return ep; |
|---|
| 381 | 384 | } |
|---|
| 382 | 385 | |
|---|
| .. | .. |
|---|
| 403 | 406 | ep->com.local_addr.ss_family); |
|---|
| 404 | 407 | dst_release(ep->dst); |
|---|
| 405 | 408 | cxgb4_l2t_release(ep->l2t); |
|---|
| 406 | | - if (ep->mpa_skb) |
|---|
| 407 | | - kfree_skb(ep->mpa_skb); |
|---|
| 409 | + kfree_skb(ep->mpa_skb); |
|---|
| 408 | 410 | } |
|---|
| 409 | 411 | if (!skb_queue_empty(&ep->com.ep_skb_list)) |
|---|
| 410 | 412 | skb_queue_purge(&ep->com.ep_skb_list); |
|---|
| .. | .. |
|---|
| 556 | 558 | cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], |
|---|
| 557 | 559 | (const u32 *)&sin6->sin6_addr.s6_addr, 1); |
|---|
| 558 | 560 | } |
|---|
| 559 | | - remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); |
|---|
| 561 | + xa_erase_irq(&ep->com.dev->atids, ep->atid); |
|---|
| 560 | 562 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); |
|---|
| 561 | 563 | queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); |
|---|
| 562 | 564 | } |
|---|
| .. | .. |
|---|
| 656 | 658 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
|---|
| 657 | 659 | } |
|---|
| 658 | 660 | |
|---|
| 659 | | -static int send_abort(struct c4iw_ep *ep) |
|---|
| 661 | +static void read_tcb(struct c4iw_ep *ep) |
|---|
| 662 | +{ |
|---|
| 663 | + struct sk_buff *skb; |
|---|
| 664 | + struct cpl_get_tcb *req; |
|---|
| 665 | + int wrlen = roundup(sizeof(*req), 16); |
|---|
| 666 | + |
|---|
| 667 | + skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); |
|---|
| 668 | + if (WARN_ON(!skb)) |
|---|
| 669 | + return; |
|---|
| 670 | + |
|---|
| 671 | + set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); |
|---|
| 672 | + req = (struct cpl_get_tcb *) skb_put(skb, wrlen); |
|---|
| 673 | + memset(req, 0, wrlen); |
|---|
| 674 | + INIT_TP_WR(req, ep->hwtid); |
|---|
| 675 | + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid)); |
|---|
| 676 | + req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid)); |
|---|
| 677 | + |
|---|
| 678 | + /* |
|---|
| 679 | + * keep a ref on the ep so the tcb is not unlocked before this |
|---|
| 680 | + * cpl completes. The ref is released in read_tcb_rpl(). |
|---|
| 681 | + */ |
|---|
| 682 | + c4iw_get_ep(&ep->com); |
|---|
| 683 | + if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb))) |
|---|
| 684 | + c4iw_put_ep(&ep->com); |
|---|
| 685 | +} |
|---|
| 686 | + |
|---|
| 687 | +static int send_abort_req(struct c4iw_ep *ep) |
|---|
| 660 | 688 | { |
|---|
| 661 | 689 | u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16); |
|---|
| 662 | 690 | struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); |
|---|
| .. | .. |
|---|
| 669 | 697 | ep, abort_arp_failure); |
|---|
| 670 | 698 | |
|---|
| 671 | 699 | return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); |
|---|
| 700 | +} |
|---|
| 701 | + |
|---|
| 702 | +static int send_abort(struct c4iw_ep *ep) |
|---|
| 703 | +{ |
|---|
| 704 | + if (!ep->com.qp || !ep->com.qp->srq) { |
|---|
| 705 | + send_abort_req(ep); |
|---|
| 706 | + return 0; |
|---|
| 707 | + } |
|---|
| 708 | + set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags); |
|---|
| 709 | + read_tcb(ep); |
|---|
| 710 | + return 0; |
|---|
| 672 | 711 | } |
|---|
| 673 | 712 | |
|---|
| 674 | 713 | static int send_connect(struct c4iw_ep *ep) |
|---|
| .. | .. |
|---|
| 912 | 951 | mpalen = sizeof(*mpa) + ep->plen; |
|---|
| 913 | 952 | if (mpa_rev_to_use == 2) |
|---|
| 914 | 953 | mpalen += sizeof(struct mpa_v2_conn_params); |
|---|
| 915 | | - wrlen = roundup(mpalen + sizeof *req, 16); |
|---|
| 954 | + wrlen = roundup(mpalen + sizeof(*req), 16); |
|---|
| 916 | 955 | skb = get_skb(skb, wrlen, GFP_KERNEL); |
|---|
| 917 | 956 | if (!skb) { |
|---|
| 918 | 957 | connect_reply_upcall(ep, -ENOMEM); |
|---|
| .. | .. |
|---|
| 956 | 995 | } |
|---|
| 957 | 996 | |
|---|
| 958 | 997 | if (mpa_rev_to_use == 2) { |
|---|
| 959 | | - mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
|---|
| 960 | | - sizeof (struct mpa_v2_conn_params)); |
|---|
| 998 | + mpa->private_data_size = |
|---|
| 999 | + htons(ntohs(mpa->private_data_size) + |
|---|
| 1000 | + sizeof(struct mpa_v2_conn_params)); |
|---|
| 961 | 1001 | pr_debug("initiator ird %u ord %u\n", ep->ird, |
|---|
| 962 | 1002 | ep->ord); |
|---|
| 963 | 1003 | mpa_v2_params.ird = htons((u16)ep->ird); |
|---|
| .. | .. |
|---|
| 1016 | 1056 | mpalen = sizeof(*mpa) + plen; |
|---|
| 1017 | 1057 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) |
|---|
| 1018 | 1058 | mpalen += sizeof(struct mpa_v2_conn_params); |
|---|
| 1019 | | - wrlen = roundup(mpalen + sizeof *req, 16); |
|---|
| 1059 | + wrlen = roundup(mpalen + sizeof(*req), 16); |
|---|
| 1020 | 1060 | |
|---|
| 1021 | 1061 | skb = get_skb(NULL, wrlen, GFP_KERNEL); |
|---|
| 1022 | 1062 | if (!skb) { |
|---|
| .. | .. |
|---|
| 1047 | 1087 | |
|---|
| 1048 | 1088 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
|---|
| 1049 | 1089 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; |
|---|
| 1050 | | - mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
|---|
| 1051 | | - sizeof (struct mpa_v2_conn_params)); |
|---|
| 1090 | + mpa->private_data_size = |
|---|
| 1091 | + htons(ntohs(mpa->private_data_size) + |
|---|
| 1092 | + sizeof(struct mpa_v2_conn_params)); |
|---|
| 1052 | 1093 | mpa_v2_params.ird = htons(((u16)ep->ird) | |
|---|
| 1053 | 1094 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : |
|---|
| 1054 | 1095 | 0)); |
|---|
| .. | .. |
|---|
| 1095 | 1136 | mpalen = sizeof(*mpa) + plen; |
|---|
| 1096 | 1137 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) |
|---|
| 1097 | 1138 | mpalen += sizeof(struct mpa_v2_conn_params); |
|---|
| 1098 | | - wrlen = roundup(mpalen + sizeof *req, 16); |
|---|
| 1139 | + wrlen = roundup(mpalen + sizeof(*req), 16); |
|---|
| 1099 | 1140 | |
|---|
| 1100 | 1141 | skb = get_skb(NULL, wrlen, GFP_KERNEL); |
|---|
| 1101 | 1142 | if (!skb) { |
|---|
| .. | .. |
|---|
| 1130 | 1171 | |
|---|
| 1131 | 1172 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
|---|
| 1132 | 1173 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; |
|---|
| 1133 | | - mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
|---|
| 1134 | | - sizeof (struct mpa_v2_conn_params)); |
|---|
| 1174 | + mpa->private_data_size = |
|---|
| 1175 | + htons(ntohs(mpa->private_data_size) + |
|---|
| 1176 | + sizeof(struct mpa_v2_conn_params)); |
|---|
| 1135 | 1177 | mpa_v2_params.ird = htons((u16)ep->ird); |
|---|
| 1136 | 1178 | mpa_v2_params.ord = htons((u16)ep->ord); |
|---|
| 1137 | 1179 | if (peer2peer && (ep->mpa_attr.p2p_type != |
|---|
| .. | .. |
|---|
| 1199 | 1241 | set_emss(ep, tcp_opt); |
|---|
| 1200 | 1242 | |
|---|
| 1201 | 1243 | /* dealloc the atid */ |
|---|
| 1202 | | - remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); |
|---|
| 1244 | + xa_erase_irq(&ep->com.dev->atids, atid); |
|---|
| 1203 | 1245 | cxgb4_free_atid(t, atid); |
|---|
| 1204 | 1246 | set_bit(ACT_ESTAB, &ep->com.history); |
|---|
| 1205 | 1247 | |
|---|
| .. | .. |
|---|
| 1852 | 1894 | return 0; |
|---|
| 1853 | 1895 | } |
|---|
| 1854 | 1896 | |
|---|
| 1855 | | -static void complete_cached_srq_buffers(struct c4iw_ep *ep, |
|---|
| 1856 | | - __be32 srqidx_status) |
|---|
| 1897 | +static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx) |
|---|
| 1857 | 1898 | { |
|---|
| 1858 | 1899 | enum chip_type adapter_type; |
|---|
| 1859 | | - u32 srqidx; |
|---|
| 1860 | 1900 | |
|---|
| 1861 | 1901 | adapter_type = ep->com.dev->rdev.lldi.adapter_type; |
|---|
| 1862 | | - srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(srqidx_status)); |
|---|
| 1863 | 1902 | |
|---|
| 1864 | 1903 | /* |
|---|
| 1865 | 1904 | * If this TCB had a srq buffer cached, then we must complete |
|---|
| .. | .. |
|---|
| 1877 | 1916 | |
|---|
| 1878 | 1917 | static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) |
|---|
| 1879 | 1918 | { |
|---|
| 1919 | + u32 srqidx; |
|---|
| 1880 | 1920 | struct c4iw_ep *ep; |
|---|
| 1881 | 1921 | struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb); |
|---|
| 1882 | 1922 | int release = 0; |
|---|
| .. | .. |
|---|
| 1888 | 1928 | return 0; |
|---|
| 1889 | 1929 | } |
|---|
| 1890 | 1930 | |
|---|
| 1891 | | - complete_cached_srq_buffers(ep, rpl->srqidx_status); |
|---|
| 1931 | + if (ep->com.qp && ep->com.qp->srq) { |
|---|
| 1932 | + srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status)); |
|---|
| 1933 | + complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx); |
|---|
| 1934 | + } |
|---|
| 1892 | 1935 | |
|---|
| 1893 | 1936 | pr_debug("ep %p tid %u\n", ep, ep->hwtid); |
|---|
| 1894 | 1937 | mutex_lock(&ep->com.mutex); |
|---|
| .. | .. |
|---|
| 2061 | 2104 | } |
|---|
| 2062 | 2105 | ep->mtu = pdev->mtu; |
|---|
| 2063 | 2106 | ep->tx_chan = cxgb4_port_chan(pdev); |
|---|
| 2064 | | - ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, |
|---|
| 2065 | | - cxgb4_port_viid(pdev)); |
|---|
| 2107 | + ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; |
|---|
| 2066 | 2108 | step = cdev->rdev.lldi.ntxq / |
|---|
| 2067 | 2109 | cdev->rdev.lldi.nchan; |
|---|
| 2068 | 2110 | ep->txq_idx = cxgb4_port_idx(pdev) * step; |
|---|
| .. | .. |
|---|
| 2081 | 2123 | goto out; |
|---|
| 2082 | 2124 | ep->mtu = dst_mtu(dst); |
|---|
| 2083 | 2125 | ep->tx_chan = cxgb4_port_chan(pdev); |
|---|
| 2084 | | - ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, |
|---|
| 2085 | | - cxgb4_port_viid(pdev)); |
|---|
| 2126 | + ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; |
|---|
| 2086 | 2127 | step = cdev->rdev.lldi.ntxq / |
|---|
| 2087 | 2128 | cdev->rdev.lldi.nchan; |
|---|
| 2088 | 2129 | ep->txq_idx = cxgb4_port_idx(pdev) * step; |
|---|
| .. | .. |
|---|
| 2149 | 2190 | err = -ENOMEM; |
|---|
| 2150 | 2191 | goto fail2; |
|---|
| 2151 | 2192 | } |
|---|
| 2152 | | - insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); |
|---|
| 2193 | + err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL); |
|---|
| 2194 | + if (err) |
|---|
| 2195 | + goto fail2a; |
|---|
| 2153 | 2196 | |
|---|
| 2154 | 2197 | /* find a route */ |
|---|
| 2155 | 2198 | if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { |
|---|
| .. | .. |
|---|
| 2201 | 2244 | fail4: |
|---|
| 2202 | 2245 | dst_release(ep->dst); |
|---|
| 2203 | 2246 | fail3: |
|---|
| 2204 | | - remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); |
|---|
| 2247 | + xa_erase_irq(&ep->com.dev->atids, ep->atid); |
|---|
| 2248 | +fail2a: |
|---|
| 2205 | 2249 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); |
|---|
| 2206 | 2250 | fail2: |
|---|
| 2207 | 2251 | /* |
|---|
| .. | .. |
|---|
| 2284 | 2328 | (const u32 *) |
|---|
| 2285 | 2329 | &sin6->sin6_addr.s6_addr, 1); |
|---|
| 2286 | 2330 | } |
|---|
| 2287 | | - remove_handle(ep->com.dev, &ep->com.dev->atid_idr, |
|---|
| 2288 | | - atid); |
|---|
| 2331 | + xa_erase_irq(&ep->com.dev->atids, atid); |
|---|
| 2289 | 2332 | cxgb4_free_atid(t, atid); |
|---|
| 2290 | 2333 | dst_release(ep->dst); |
|---|
| 2291 | 2334 | cxgb4_l2t_release(ep->l2t); |
|---|
| .. | .. |
|---|
| 2322 | 2365 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), |
|---|
| 2323 | 2366 | ep->com.local_addr.ss_family); |
|---|
| 2324 | 2367 | |
|---|
| 2325 | | - remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); |
|---|
| 2368 | + xa_erase_irq(&ep->com.dev->atids, atid); |
|---|
| 2326 | 2369 | cxgb4_free_atid(t, atid); |
|---|
| 2327 | 2370 | dst_release(ep->dst); |
|---|
| 2328 | 2371 | cxgb4_l2t_release(ep->l2t); |
|---|
| .. | .. |
|---|
| 2482 | 2525 | u16 peer_mss = ntohs(req->tcpopt.mss); |
|---|
| 2483 | 2526 | int iptype; |
|---|
| 2484 | 2527 | unsigned short hdrs; |
|---|
| 2485 | | - u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); |
|---|
| 2528 | + u8 tos; |
|---|
| 2486 | 2529 | |
|---|
| 2487 | 2530 | parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); |
|---|
| 2488 | 2531 | if (!parent_ep) { |
|---|
| .. | .. |
|---|
| 2495 | 2538 | pr_err("%s - listening ep not in LISTEN\n", __func__); |
|---|
| 2496 | 2539 | goto reject; |
|---|
| 2497 | 2540 | } |
|---|
| 2541 | + |
|---|
| 2542 | + if (parent_ep->com.cm_id->tos_set) |
|---|
| 2543 | + tos = parent_ep->com.cm_id->tos; |
|---|
| 2544 | + else |
|---|
| 2545 | + tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); |
|---|
| 2498 | 2546 | |
|---|
| 2499 | 2547 | cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, |
|---|
| 2500 | 2548 | &iptype, local_ip, peer_ip, &local_port, &peer_port); |
|---|
| .. | .. |
|---|
| 2515 | 2563 | ntohs(peer_port), peer_mss); |
|---|
| 2516 | 2564 | dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, |
|---|
| 2517 | 2565 | local_ip, peer_ip, local_port, peer_port, |
|---|
| 2518 | | - PASS_OPEN_TOS_G(ntohl(req->tos_stid)), |
|---|
| 2566 | + tos, |
|---|
| 2519 | 2567 | ((struct sockaddr_in6 *) |
|---|
| 2520 | 2568 | &parent_ep->com.local_addr)->sin6_scope_id); |
|---|
| 2521 | 2569 | } |
|---|
| .. | .. |
|---|
| 2746 | 2794 | return 0; |
|---|
| 2747 | 2795 | } |
|---|
| 2748 | 2796 | |
|---|
| 2797 | +static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep) |
|---|
| 2798 | +{ |
|---|
| 2799 | + complete_cached_srq_buffers(ep, ep->srqe_idx); |
|---|
| 2800 | + if (ep->com.cm_id && ep->com.qp) { |
|---|
| 2801 | + struct c4iw_qp_attributes attrs; |
|---|
| 2802 | + |
|---|
| 2803 | + attrs.next_state = C4IW_QP_STATE_ERROR; |
|---|
| 2804 | + c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
|---|
| 2805 | + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
|---|
| 2806 | + } |
|---|
| 2807 | + peer_abort_upcall(ep); |
|---|
| 2808 | + release_ep_resources(ep); |
|---|
| 2809 | + c4iw_put_ep(&ep->com); |
|---|
| 2810 | +} |
|---|
| 2811 | + |
|---|
| 2749 | 2812 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) |
|---|
| 2750 | 2813 | { |
|---|
| 2751 | 2814 | struct cpl_abort_req_rss6 *req = cplhdr(skb); |
|---|
| .. | .. |
|---|
| 2756 | 2819 | int release = 0; |
|---|
| 2757 | 2820 | unsigned int tid = GET_TID(req); |
|---|
| 2758 | 2821 | u8 status; |
|---|
| 2822 | + u32 srqidx; |
|---|
| 2759 | 2823 | |
|---|
| 2760 | 2824 | u32 len = roundup(sizeof(struct cpl_abort_rpl), 16); |
|---|
| 2761 | 2825 | |
|---|
| .. | .. |
|---|
| 2774 | 2838 | mutex_unlock(&dev->rdev.stats.lock); |
|---|
| 2775 | 2839 | goto deref_ep; |
|---|
| 2776 | 2840 | } |
|---|
| 2777 | | - |
|---|
| 2778 | | - complete_cached_srq_buffers(ep, req->srqidx_status); |
|---|
| 2779 | 2841 | |
|---|
| 2780 | 2842 | pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, |
|---|
| 2781 | 2843 | ep->com.state); |
|---|
| .. | .. |
|---|
| 2823 | 2885 | case MORIBUND: |
|---|
| 2824 | 2886 | case CLOSING: |
|---|
| 2825 | 2887 | stop_ep_timer(ep); |
|---|
| 2826 | | - /*FALLTHROUGH*/ |
|---|
| 2888 | + fallthrough; |
|---|
| 2827 | 2889 | case FPDU_MODE: |
|---|
| 2890 | + if (ep->com.qp && ep->com.qp->srq) { |
|---|
| 2891 | + srqidx = ABORT_RSS_SRQIDX_G( |
|---|
| 2892 | + be32_to_cpu(req->srqidx_status)); |
|---|
| 2893 | + if (srqidx) { |
|---|
| 2894 | + complete_cached_srq_buffers(ep, srqidx); |
|---|
| 2895 | + } else { |
|---|
| 2896 | + /* Hold ep ref until finish_peer_abort() */ |
|---|
| 2897 | + c4iw_get_ep(&ep->com); |
|---|
| 2898 | + __state_set(&ep->com, ABORTING); |
|---|
| 2899 | + set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags); |
|---|
| 2900 | + read_tcb(ep); |
|---|
| 2901 | + break; |
|---|
| 2902 | + |
|---|
| 2903 | + } |
|---|
| 2904 | + } |
|---|
| 2905 | + |
|---|
| 2828 | 2906 | if (ep->com.cm_id && ep->com.qp) { |
|---|
| 2829 | 2907 | attrs.next_state = C4IW_QP_STATE_ERROR; |
|---|
| 2830 | 2908 | ret = c4iw_modify_qp(ep->com.qp->rhp, |
|---|
| .. | .. |
|---|
| 2876 | 2954 | (const u32 *)&sin6->sin6_addr.s6_addr, |
|---|
| 2877 | 2955 | 1); |
|---|
| 2878 | 2956 | } |
|---|
| 2879 | | - remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); |
|---|
| 2957 | + xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid); |
|---|
| 2880 | 2958 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, |
|---|
| 2881 | 2959 | ep->com.local_addr.ss_family); |
|---|
| 2882 | 2960 | dst_release(ep->dst); |
|---|
| .. | .. |
|---|
| 2957 | 3035 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
|---|
| 2958 | 3036 | } |
|---|
| 2959 | 3037 | |
|---|
| 3038 | + /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3, |
|---|
| 3039 | + * when entering the TERM state the RNIC MUST initiate a CLOSE. |
|---|
| 3040 | + */ |
|---|
| 3041 | + c4iw_ep_disconnect(ep, 1, GFP_KERNEL); |
|---|
| 2960 | 3042 | c4iw_put_ep(&ep->com); |
|---|
| 2961 | 3043 | } else |
|---|
| 2962 | 3044 | pr_warn("TERM received tid %u no ep/qp\n", tid); |
|---|
| .. | .. |
|---|
| 3152 | 3234 | int found = 0; |
|---|
| 3153 | 3235 | struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
|---|
| 3154 | 3236 | struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; |
|---|
| 3237 | + const struct in_ifaddr *ifa; |
|---|
| 3155 | 3238 | |
|---|
| 3156 | 3239 | ind = in_dev_get(dev->rdev.lldi.ports[0]); |
|---|
| 3157 | 3240 | if (!ind) |
|---|
| 3158 | 3241 | return -EADDRNOTAVAIL; |
|---|
| 3159 | | - for_primary_ifa(ind) { |
|---|
| 3242 | + rcu_read_lock(); |
|---|
| 3243 | + in_dev_for_each_ifa_rcu(ifa, ind) { |
|---|
| 3244 | + if (ifa->ifa_flags & IFA_F_SECONDARY) |
|---|
| 3245 | + continue; |
|---|
| 3160 | 3246 | laddr->sin_addr.s_addr = ifa->ifa_address; |
|---|
| 3161 | 3247 | raddr->sin_addr.s_addr = ifa->ifa_address; |
|---|
| 3162 | 3248 | found = 1; |
|---|
| 3163 | 3249 | break; |
|---|
| 3164 | 3250 | } |
|---|
| 3165 | | - endfor_ifa(ind); |
|---|
| 3251 | + rcu_read_unlock(); |
|---|
| 3252 | + |
|---|
| 3166 | 3253 | in_dev_put(ind); |
|---|
| 3167 | 3254 | return found ? 0 : -EADDRNOTAVAIL; |
|---|
| 3168 | 3255 | } |
|---|
| .. | .. |
|---|
| 3195 | 3282 | |
|---|
| 3196 | 3283 | static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) |
|---|
| 3197 | 3284 | { |
|---|
| 3198 | | - struct in6_addr uninitialized_var(addr); |
|---|
| 3285 | + struct in6_addr addr; |
|---|
| 3199 | 3286 | struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; |
|---|
| 3200 | 3287 | struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; |
|---|
| 3201 | 3288 | |
|---|
| .. | .. |
|---|
| 3271 | 3358 | err = -ENOMEM; |
|---|
| 3272 | 3359 | goto fail2; |
|---|
| 3273 | 3360 | } |
|---|
| 3274 | | - insert_handle(dev, &dev->atid_idr, ep, ep->atid); |
|---|
| 3361 | + err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL); |
|---|
| 3362 | + if (err) |
|---|
| 3363 | + goto fail5; |
|---|
| 3275 | 3364 | |
|---|
| 3276 | 3365 | memcpy(&ep->com.local_addr, &cm_id->m_local_addr, |
|---|
| 3277 | 3366 | sizeof(ep->com.local_addr)); |
|---|
| .. | .. |
|---|
| 3359 | 3448 | fail4: |
|---|
| 3360 | 3449 | dst_release(ep->dst); |
|---|
| 3361 | 3450 | fail3: |
|---|
| 3362 | | - remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); |
|---|
| 3451 | + xa_erase_irq(&ep->com.dev->atids, ep->atid); |
|---|
| 3452 | +fail5: |
|---|
| 3363 | 3453 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); |
|---|
| 3364 | 3454 | fail2: |
|---|
| 3365 | 3455 | skb_queue_purge(&ep->com.ep_skb_list); |
|---|
| .. | .. |
|---|
| 3482 | 3572 | err = -ENOMEM; |
|---|
| 3483 | 3573 | goto fail2; |
|---|
| 3484 | 3574 | } |
|---|
| 3485 | | - insert_handle(dev, &dev->stid_idr, ep, ep->stid); |
|---|
| 3575 | + err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL); |
|---|
| 3576 | + if (err) |
|---|
| 3577 | + goto fail3; |
|---|
| 3486 | 3578 | |
|---|
| 3487 | 3579 | state_set(&ep->com, LISTEN); |
|---|
| 3488 | 3580 | if (ep->com.local_addr.ss_family == AF_INET) |
|---|
| .. | .. |
|---|
| 3493 | 3585 | cm_id->provider_data = ep; |
|---|
| 3494 | 3586 | goto out; |
|---|
| 3495 | 3587 | } |
|---|
| 3496 | | - remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); |
|---|
| 3588 | + xa_erase_irq(&ep->com.dev->stids, ep->stid); |
|---|
| 3589 | +fail3: |
|---|
| 3497 | 3590 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, |
|---|
| 3498 | 3591 | ep->com.local_addr.ss_family); |
|---|
| 3499 | 3592 | fail2: |
|---|
| .. | .. |
|---|
| 3533 | 3626 | cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], |
|---|
| 3534 | 3627 | (const u32 *)&sin6->sin6_addr.s6_addr, 1); |
|---|
| 3535 | 3628 | } |
|---|
| 3536 | | - remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); |
|---|
| 3629 | + xa_erase_irq(&ep->com.dev->stids, ep->stid); |
|---|
| 3537 | 3630 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, |
|---|
| 3538 | 3631 | ep->com.local_addr.ss_family); |
|---|
| 3539 | 3632 | done: |
|---|
| .. | .. |
|---|
| 3667 | 3760 | send_fw_act_open_req(ep, atid); |
|---|
| 3668 | 3761 | return; |
|---|
| 3669 | 3762 | } |
|---|
| 3670 | | - /* fall through */ |
|---|
| 3763 | + fallthrough; |
|---|
| 3671 | 3764 | case FW_EADDRINUSE: |
|---|
| 3672 | 3765 | set_bit(ACT_RETRY_INUSE, &ep->com.history); |
|---|
| 3673 | 3766 | if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { |
|---|
| .. | .. |
|---|
| 3693 | 3786 | cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], |
|---|
| 3694 | 3787 | (const u32 *)&sin6->sin6_addr.s6_addr, 1); |
|---|
| 3695 | 3788 | } |
|---|
| 3696 | | - remove_handle(dev, &dev->atid_idr, atid); |
|---|
| 3789 | + xa_erase_irq(&dev->atids, atid); |
|---|
| 3697 | 3790 | cxgb4_free_atid(dev->rdev.lldi.tids, atid); |
|---|
| 3698 | 3791 | dst_release(ep->dst); |
|---|
| 3699 | 3792 | cxgb4_l2t_release(ep->l2t); |
|---|
| .. | .. |
|---|
| 3724 | 3817 | kfree_skb(rpl_skb); |
|---|
| 3725 | 3818 | } |
|---|
| 3726 | 3819 | return; |
|---|
| 3820 | +} |
|---|
| 3821 | + |
|---|
| 3822 | +static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word) |
|---|
| 3823 | +{ |
|---|
| 3824 | + u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]); |
|---|
| 3825 | + u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]); |
|---|
| 3826 | + u64 t; |
|---|
| 3827 | + u32 shift = 32; |
|---|
| 3828 | + |
|---|
| 3829 | + t = (thi << shift) | (tlo >> shift); |
|---|
| 3830 | + |
|---|
| 3831 | + return t; |
|---|
| 3832 | +} |
|---|
| 3833 | + |
|---|
| 3834 | +static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift) |
|---|
| 3835 | +{ |
|---|
| 3836 | + u32 v; |
|---|
| 3837 | + u64 t = be64_to_cpu(tcb[(31 - word) / 2]); |
|---|
| 3838 | + |
|---|
| 3839 | + if (word & 0x1) |
|---|
| 3840 | + shift += 32; |
|---|
| 3841 | + v = (t >> shift) & mask; |
|---|
| 3842 | + return v; |
|---|
| 3843 | +} |
|---|
| 3844 | + |
|---|
| 3845 | +static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) |
|---|
| 3846 | +{ |
|---|
| 3847 | + struct cpl_get_tcb_rpl *rpl = cplhdr(skb); |
|---|
| 3848 | + __be64 *tcb = (__be64 *)(rpl + 1); |
|---|
| 3849 | + unsigned int tid = GET_TID(rpl); |
|---|
| 3850 | + struct c4iw_ep *ep; |
|---|
| 3851 | + u64 t_flags_64; |
|---|
| 3852 | + u32 rx_pdu_out; |
|---|
| 3853 | + |
|---|
| 3854 | + ep = get_ep_from_tid(dev, tid); |
|---|
| 3855 | + if (!ep) |
|---|
| 3856 | + return 0; |
|---|
| 3857 | + /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to |
|---|
| 3858 | + * determine if there's a rx PDU feedback event pending. |
|---|
| 3859 | + * |
|---|
| 3860 | + * If that bit is set, it means we'll need to re-read the TCB's |
|---|
| 3861 | + * rq_start value. The final value is the one present in a TCB |
|---|
| 3862 | + * with the TF_RX_PDU_OUT bit cleared. |
|---|
| 3863 | + */ |
|---|
| 3864 | + |
|---|
| 3865 | + t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W); |
|---|
| 3866 | + rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S; |
|---|
| 3867 | + |
|---|
| 3868 | + c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */ |
|---|
| 3869 | + c4iw_put_ep(&ep->com); /* from read_tcb() */ |
|---|
| 3870 | + |
|---|
| 3871 | + /* If TF_RX_PDU_OUT bit is set, re-read the TCB */ |
|---|
| 3872 | + if (rx_pdu_out) { |
|---|
| 3873 | + if (++ep->rx_pdu_out_cnt >= 2) { |
|---|
| 3874 | + WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n"); |
|---|
| 3875 | + goto cleanup; |
|---|
| 3876 | + } |
|---|
| 3877 | + read_tcb(ep); |
|---|
| 3878 | + return 0; |
|---|
| 3879 | + } |
|---|
| 3880 | + |
|---|
| 3881 | + ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, |
|---|
| 3882 | + TCB_RQ_START_S); |
|---|
| 3883 | +cleanup: |
|---|
| 3884 | + pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); |
|---|
| 3885 | + |
|---|
| 3886 | + if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) |
|---|
| 3887 | + finish_peer_abort(dev, ep); |
|---|
| 3888 | + else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) |
|---|
| 3889 | + send_abort_req(ep); |
|---|
| 3890 | + else |
|---|
| 3891 | + WARN_ONCE(1, "unexpected state!"); |
|---|
| 3892 | + |
|---|
| 3893 | + return 0; |
|---|
| 3727 | 3894 | } |
|---|
| 3728 | 3895 | |
|---|
| 3729 | 3896 | static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) |
|---|
| .. | .. |
|---|
| 3952 | 4119 | } else { |
|---|
| 3953 | 4120 | vlan_eh = (struct vlan_ethhdr *)(req + 1); |
|---|
| 3954 | 4121 | iph = (struct iphdr *)(vlan_eh + 1); |
|---|
| 3955 | | - skb->vlan_tci = ntohs(cpl->vlan); |
|---|
| 4122 | + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); |
|---|
| 3956 | 4123 | } |
|---|
| 3957 | 4124 | |
|---|
| 3958 | 4125 | if (iph->version != 0x4) |
|---|
| .. | .. |
|---|
| 4046 | 4213 | [CPL_CLOSE_CON_RPL] = close_con_rpl, |
|---|
| 4047 | 4214 | [CPL_RDMA_TERMINATE] = terminate, |
|---|
| 4048 | 4215 | [CPL_FW4_ACK] = fw4_ack, |
|---|
| 4216 | + [CPL_GET_TCB_RPL] = read_tcb_rpl, |
|---|
| 4049 | 4217 | [CPL_FW6_MSG] = deferred_fw6_msg, |
|---|
| 4050 | 4218 | [CPL_RX_PKT] = rx_pkt, |
|---|
| 4051 | 4219 | [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, |
|---|
| .. | .. |
|---|
| 4277 | 4445 | [CPL_RDMA_TERMINATE] = sched, |
|---|
| 4278 | 4446 | [CPL_FW4_ACK] = sched, |
|---|
| 4279 | 4447 | [CPL_SET_TCB_RPL] = set_tcb_rpl, |
|---|
| 4448 | + [CPL_GET_TCB_RPL] = sched, |
|---|
| 4280 | 4449 | [CPL_FW6_MSG] = fw6_msg, |
|---|
| 4281 | 4450 | [CPL_RX_PKT] = sched |
|---|
| 4282 | 4451 | }; |
|---|