.. | .. |
---|
1 | 1 | /* |
---|
2 | 2 | * Linux driver for VMware's vmxnet3 ethernet NIC. |
---|
3 | 3 | * |
---|
4 | | - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. |
---|
| 4 | + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software; you can redistribute it and/or modify it |
---|
7 | 7 | * under the terms of the GNU General Public License as published by the |
---|
.. | .. |
---|
535 | 535 | } |
---|
536 | 536 | |
---|
537 | 537 | sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); |
---|
538 | | - tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, |
---|
539 | | - &tq->buf_info_pa, GFP_KERNEL); |
---|
| 538 | + tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, |
---|
| 539 | + &tq->buf_info_pa, GFP_KERNEL); |
---|
540 | 540 | if (!tq->buf_info) |
---|
541 | 541 | goto err; |
---|
542 | 542 | |
---|
.. | .. |
---|
595 | 595 | if (dma_mapping_error(&adapter->pdev->dev, |
---|
596 | 596 | rbi->dma_addr)) { |
---|
597 | 597 | dev_kfree_skb_any(rbi->skb); |
---|
| 598 | + rbi->skb = NULL; |
---|
598 | 599 | rq->stats.rx_buf_alloc_failure++; |
---|
599 | 600 | break; |
---|
600 | 601 | } |
---|
.. | .. |
---|
619 | 620 | if (dma_mapping_error(&adapter->pdev->dev, |
---|
620 | 621 | rbi->dma_addr)) { |
---|
621 | 622 | put_page(rbi->page); |
---|
| 623 | + rbi->page = NULL; |
---|
622 | 624 | rq->stats.rx_buf_alloc_failure++; |
---|
623 | 625 | break; |
---|
624 | 626 | } |
---|
.. | .. |
---|
657 | 659 | vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, |
---|
658 | 660 | struct vmxnet3_rx_buf_info *rbi) |
---|
659 | 661 | { |
---|
660 | | - struct skb_frag_struct *frag = skb_shinfo(skb)->frags + |
---|
661 | | - skb_shinfo(skb)->nr_frags; |
---|
| 662 | + skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; |
---|
662 | 663 | |
---|
663 | 664 | BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); |
---|
664 | 665 | |
---|
665 | 666 | __skb_frag_set_page(frag, rbi->page); |
---|
666 | | - frag->page_offset = 0; |
---|
| 667 | + skb_frag_off_set(frag, 0); |
---|
667 | 668 | skb_frag_size_set(frag, rcd->len); |
---|
668 | 669 | skb->data_len += rcd->len; |
---|
669 | 670 | skb->truesize += PAGE_SIZE; |
---|
.. | .. |
---|
755 | 756 | } |
---|
756 | 757 | |
---|
757 | 758 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
758 | | - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
---|
| 759 | + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
759 | 760 | u32 buf_size; |
---|
760 | 761 | |
---|
761 | 762 | buf_offset = 0; |
---|
.. | .. |
---|
843 | 844 | u8 protocol = 0; |
---|
844 | 845 | |
---|
845 | 846 | if (ctx->mss) { /* TSO */ |
---|
846 | | - ctx->eth_ip_hdr_size = skb_transport_offset(skb); |
---|
847 | | - ctx->l4_hdr_size = tcp_hdrlen(skb); |
---|
848 | | - ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; |
---|
| 847 | + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { |
---|
| 848 | + ctx->l4_offset = skb_inner_transport_offset(skb); |
---|
| 849 | + ctx->l4_hdr_size = inner_tcp_hdrlen(skb); |
---|
| 850 | + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; |
---|
| 851 | + } else { |
---|
| 852 | + ctx->l4_offset = skb_transport_offset(skb); |
---|
| 853 | + ctx->l4_hdr_size = tcp_hdrlen(skb); |
---|
| 854 | + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; |
---|
| 855 | + } |
---|
849 | 856 | } else { |
---|
850 | 857 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
851 | | - ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); |
---|
| 858 | + /* For encap packets, skb_checksum_start_offset refers |
---|
| 859 | + * to inner L4 offset. Thus, below works for encap as |
---|
| 860 | + * well as non-encap case |
---|
| 861 | + */ |
---|
| 862 | + ctx->l4_offset = skb_checksum_start_offset(skb); |
---|
852 | 863 | |
---|
853 | | - if (ctx->ipv4) { |
---|
854 | | - const struct iphdr *iph = ip_hdr(skb); |
---|
| 864 | + if (VMXNET3_VERSION_GE_4(adapter) && |
---|
| 865 | + skb->encapsulation) { |
---|
| 866 | + struct iphdr *iph = inner_ip_hdr(skb); |
---|
855 | 867 | |
---|
856 | | - protocol = iph->protocol; |
---|
857 | | - } else if (ctx->ipv6) { |
---|
858 | | - const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
---|
| 868 | + if (iph->version == 4) { |
---|
| 869 | + protocol = iph->protocol; |
---|
| 870 | + } else { |
---|
| 871 | + const struct ipv6hdr *ipv6h; |
---|
859 | 872 | |
---|
860 | | - protocol = ipv6h->nexthdr; |
---|
| 873 | + ipv6h = inner_ipv6_hdr(skb); |
---|
| 874 | + protocol = ipv6h->nexthdr; |
---|
| 875 | + } |
---|
| 876 | + } else { |
---|
| 877 | + if (ctx->ipv4) { |
---|
| 878 | + const struct iphdr *iph = ip_hdr(skb); |
---|
| 879 | + |
---|
| 880 | + protocol = iph->protocol; |
---|
| 881 | + } else if (ctx->ipv6) { |
---|
| 882 | + const struct ipv6hdr *ipv6h; |
---|
| 883 | + |
---|
| 884 | + ipv6h = ipv6_hdr(skb); |
---|
| 885 | + protocol = ipv6h->nexthdr; |
---|
| 886 | + } |
---|
861 | 887 | } |
---|
862 | 888 | |
---|
863 | 889 | switch (protocol) { |
---|
864 | 890 | case IPPROTO_TCP: |
---|
865 | | - ctx->l4_hdr_size = tcp_hdrlen(skb); |
---|
| 891 | + ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : |
---|
| 892 | + tcp_hdrlen(skb); |
---|
866 | 893 | break; |
---|
867 | 894 | case IPPROTO_UDP: |
---|
868 | 895 | ctx->l4_hdr_size = sizeof(struct udphdr); |
---|
.. | .. |
---|
872 | 899 | break; |
---|
873 | 900 | } |
---|
874 | 901 | |
---|
875 | | - ctx->copy_size = min(ctx->eth_ip_hdr_size + |
---|
| 902 | + ctx->copy_size = min(ctx->l4_offset + |
---|
876 | 903 | ctx->l4_hdr_size, skb->len); |
---|
877 | 904 | } else { |
---|
878 | | - ctx->eth_ip_hdr_size = 0; |
---|
| 905 | + ctx->l4_offset = 0; |
---|
879 | 906 | ctx->l4_hdr_size = 0; |
---|
880 | 907 | /* copy as much as allowed */ |
---|
881 | 908 | ctx->copy_size = min_t(unsigned int, |
---|
.. | .. |
---|
931 | 958 | |
---|
932 | 959 | |
---|
933 | 960 | static void |
---|
| 961 | +vmxnet3_prepare_inner_tso(struct sk_buff *skb, |
---|
| 962 | + struct vmxnet3_tx_ctx *ctx) |
---|
| 963 | +{ |
---|
| 964 | + struct tcphdr *tcph = inner_tcp_hdr(skb); |
---|
| 965 | + struct iphdr *iph = inner_ip_hdr(skb); |
---|
| 966 | + |
---|
| 967 | + if (iph->version == 4) { |
---|
| 968 | + iph->check = 0; |
---|
| 969 | + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, |
---|
| 970 | + IPPROTO_TCP, 0); |
---|
| 971 | + } else { |
---|
| 972 | + struct ipv6hdr *iph = inner_ipv6_hdr(skb); |
---|
| 973 | + |
---|
| 974 | + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, |
---|
| 975 | + IPPROTO_TCP, 0); |
---|
| 976 | + } |
---|
| 977 | +} |
---|
| 978 | + |
---|
| 979 | +static void |
---|
934 | 980 | vmxnet3_prepare_tso(struct sk_buff *skb, |
---|
935 | 981 | struct vmxnet3_tx_ctx *ctx) |
---|
936 | 982 | { |
---|
.. | .. |
---|
943 | 989 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, |
---|
944 | 990 | IPPROTO_TCP, 0); |
---|
945 | 991 | } else if (ctx->ipv6) { |
---|
946 | | - struct ipv6hdr *iph = ipv6_hdr(skb); |
---|
947 | | - |
---|
948 | | - tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, |
---|
949 | | - IPPROTO_TCP, 0); |
---|
| 992 | + tcp_v6_gso_csum_prep(skb); |
---|
950 | 993 | } |
---|
951 | 994 | } |
---|
952 | 995 | |
---|
.. | .. |
---|
956 | 999 | int i; |
---|
957 | 1000 | |
---|
958 | 1001 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
959 | | - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
---|
| 1002 | + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
960 | 1003 | |
---|
961 | 1004 | count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); |
---|
962 | 1005 | } |
---|
.. | .. |
---|
1007 | 1050 | } |
---|
1008 | 1051 | tq->stats.copy_skb_header++; |
---|
1009 | 1052 | } |
---|
1010 | | - vmxnet3_prepare_tso(skb, &ctx); |
---|
| 1053 | + if (skb->encapsulation) { |
---|
| 1054 | + vmxnet3_prepare_inner_tso(skb, &ctx); |
---|
| 1055 | + } else { |
---|
| 1056 | + vmxnet3_prepare_tso(skb, &ctx); |
---|
| 1057 | + } |
---|
1011 | 1058 | } else { |
---|
1012 | 1059 | if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { |
---|
1013 | 1060 | |
---|
.. | .. |
---|
1030 | 1077 | BUG_ON(ret <= 0 && ctx.copy_size != 0); |
---|
1031 | 1078 | /* hdrs parsed, check against other limits */ |
---|
1032 | 1079 | if (ctx.mss) { |
---|
1033 | | - if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > |
---|
| 1080 | + if (unlikely(ctx.l4_offset + ctx.l4_hdr_size > |
---|
1034 | 1081 | VMXNET3_MAX_TX_BUF_SIZE)) { |
---|
1035 | 1082 | tq->stats.drop_oversized_hdr++; |
---|
1036 | 1083 | goto drop_pkt; |
---|
1037 | 1084 | } |
---|
1038 | 1085 | } else { |
---|
1039 | 1086 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
1040 | | - if (unlikely(ctx.eth_ip_hdr_size + |
---|
| 1087 | + if (unlikely(ctx.l4_offset + |
---|
1041 | 1088 | skb->csum_offset > |
---|
1042 | 1089 | VMXNET3_MAX_CSUM_OFFSET)) { |
---|
1043 | 1090 | tq->stats.drop_oversized_hdr++; |
---|
.. | .. |
---|
1084 | 1131 | #endif |
---|
1085 | 1132 | tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); |
---|
1086 | 1133 | if (ctx.mss) { |
---|
1087 | | - gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; |
---|
1088 | | - gdesc->txd.om = VMXNET3_OM_TSO; |
---|
1089 | | - gdesc->txd.msscof = ctx.mss; |
---|
| 1134 | + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { |
---|
| 1135 | + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; |
---|
| 1136 | + gdesc->txd.om = VMXNET3_OM_ENCAP; |
---|
| 1137 | + gdesc->txd.msscof = ctx.mss; |
---|
| 1138 | + |
---|
| 1139 | + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) |
---|
| 1140 | + gdesc->txd.oco = 1; |
---|
| 1141 | + } else { |
---|
| 1142 | + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; |
---|
| 1143 | + gdesc->txd.om = VMXNET3_OM_TSO; |
---|
| 1144 | + gdesc->txd.msscof = ctx.mss; |
---|
| 1145 | + } |
---|
1090 | 1146 | num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; |
---|
1091 | 1147 | } else { |
---|
1092 | 1148 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
1093 | | - gdesc->txd.hlen = ctx.eth_ip_hdr_size; |
---|
1094 | | - gdesc->txd.om = VMXNET3_OM_CSUM; |
---|
1095 | | - gdesc->txd.msscof = ctx.eth_ip_hdr_size + |
---|
1096 | | - skb->csum_offset; |
---|
| 1149 | + if (VMXNET3_VERSION_GE_4(adapter) && |
---|
| 1150 | + skb->encapsulation) { |
---|
| 1151 | + gdesc->txd.hlen = ctx.l4_offset + |
---|
| 1152 | + ctx.l4_hdr_size; |
---|
| 1153 | + gdesc->txd.om = VMXNET3_OM_ENCAP; |
---|
| 1154 | + gdesc->txd.msscof = 0; /* Reserved */ |
---|
| 1155 | + } else { |
---|
| 1156 | + gdesc->txd.hlen = ctx.l4_offset; |
---|
| 1157 | + gdesc->txd.om = VMXNET3_OM_CSUM; |
---|
| 1158 | + gdesc->txd.msscof = ctx.l4_offset + |
---|
| 1159 | + skb->csum_offset; |
---|
| 1160 | + } |
---|
1097 | 1161 | } else { |
---|
1098 | 1162 | gdesc->txd.om = 0; |
---|
1099 | 1163 | gdesc->txd.msscof = 0; |
---|
.. | .. |
---|
1172 | 1236 | (le32_to_cpu(gdesc->dword[3]) & |
---|
1173 | 1237 | VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { |
---|
1174 | 1238 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
1175 | | - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); |
---|
1176 | | - BUG_ON(gdesc->rcd.frg); |
---|
| 1239 | + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && |
---|
| 1240 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1241 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
| 1242 | + WARN_ON_ONCE(gdesc->rcd.frg && |
---|
| 1243 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1244 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
1177 | 1245 | } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & |
---|
1178 | 1246 | (1 << VMXNET3_RCD_TUC_SHIFT))) { |
---|
1179 | 1247 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
1180 | | - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); |
---|
1181 | | - BUG_ON(gdesc->rcd.frg); |
---|
| 1248 | + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && |
---|
| 1249 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1250 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
| 1251 | + WARN_ON_ONCE(gdesc->rcd.frg && |
---|
| 1252 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1253 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
1182 | 1254 | } else { |
---|
1183 | 1255 | if (gdesc->rcd.csum) { |
---|
1184 | 1256 | skb->csum = htons(gdesc->rcd.csum); |
---|
.. | .. |
---|
1284 | 1356 | }; |
---|
1285 | 1357 | u32 num_pkts = 0; |
---|
1286 | 1358 | bool skip_page_frags = false; |
---|
| 1359 | + bool encap_lro = false; |
---|
1287 | 1360 | struct Vmxnet3_RxCompDesc *rcd; |
---|
1288 | 1361 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; |
---|
1289 | 1362 | u16 segCnt = 0, mss = 0; |
---|
.. | .. |
---|
1424 | 1497 | if (VMXNET3_VERSION_GE_2(adapter) && |
---|
1425 | 1498 | rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { |
---|
1426 | 1499 | struct Vmxnet3_RxCompDescExt *rcdlro; |
---|
| 1500 | + union Vmxnet3_GenericDesc *gdesc; |
---|
| 1501 | + |
---|
1427 | 1502 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; |
---|
| 1503 | + gdesc = (union Vmxnet3_GenericDesc *)rcd; |
---|
1428 | 1504 | |
---|
1429 | 1505 | segCnt = rcdlro->segCnt; |
---|
1430 | 1506 | WARN_ON_ONCE(segCnt == 0); |
---|
1431 | 1507 | mss = rcdlro->mss; |
---|
1432 | 1508 | if (unlikely(segCnt <= 1)) |
---|
1433 | 1509 | segCnt = 0; |
---|
| 1510 | + encap_lro = (le32_to_cpu(gdesc->dword[0]) & |
---|
| 1511 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)); |
---|
1434 | 1512 | } else { |
---|
1435 | 1513 | segCnt = 0; |
---|
1436 | 1514 | } |
---|
.. | .. |
---|
1498 | 1576 | vmxnet3_rx_csum(adapter, skb, |
---|
1499 | 1577 | (union Vmxnet3_GenericDesc *)rcd); |
---|
1500 | 1578 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
---|
1501 | | - if (!rcd->tcp || |
---|
| 1579 | + if ((!rcd->tcp && !encap_lro) || |
---|
1502 | 1580 | !(adapter->netdev->features & NETIF_F_LRO)) |
---|
1503 | 1581 | goto not_lro; |
---|
1504 | 1582 | |
---|
.. | .. |
---|
1507 | 1585 | SKB_GSO_TCPV4 : SKB_GSO_TCPV6; |
---|
1508 | 1586 | skb_shinfo(skb)->gso_size = mss; |
---|
1509 | 1587 | skb_shinfo(skb)->gso_segs = segCnt; |
---|
1510 | | - } else if (segCnt != 0 || skb->len > mtu) { |
---|
| 1588 | + } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { |
---|
1511 | 1589 | u32 hlen; |
---|
1512 | 1590 | |
---|
1513 | 1591 | hlen = vmxnet3_get_hdr_len(adapter, skb, |
---|
.. | .. |
---|
1536 | 1614 | napi_gro_receive(&rq->napi, skb); |
---|
1537 | 1615 | |
---|
1538 | 1616 | ctx->skb = NULL; |
---|
| 1617 | + encap_lro = false; |
---|
1539 | 1618 | num_pkts++; |
---|
1540 | 1619 | } |
---|
1541 | 1620 | |
---|
.. | .. |
---|
1583 | 1662 | { |
---|
1584 | 1663 | u32 i, ring_idx; |
---|
1585 | 1664 | struct Vmxnet3_RxDesc *rxd; |
---|
| 1665 | + |
---|
| 1666 | + /* ring has already been cleaned up */ |
---|
| 1667 | + if (!rq->rx_ring[0].base) |
---|
| 1668 | + return; |
---|
1586 | 1669 | |
---|
1587 | 1670 | for (ring_idx = 0; ring_idx < 2; ring_idx++) { |
---|
1588 | 1671 | for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { |
---|
.. | .. |
---|
1815 | 1898 | |
---|
1816 | 1899 | sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + |
---|
1817 | 1900 | rq->rx_ring[1].size); |
---|
1818 | | - bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, |
---|
1819 | | - GFP_KERNEL); |
---|
| 1901 | + bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, |
---|
| 1902 | + GFP_KERNEL); |
---|
1820 | 1903 | if (!bi) |
---|
1821 | 1904 | goto err; |
---|
1822 | 1905 | |
---|
.. | .. |
---|
2433 | 2516 | if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) |
---|
2434 | 2517 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; |
---|
2435 | 2518 | |
---|
| 2519 | + if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | |
---|
| 2520 | + NETIF_F_GSO_UDP_TUNNEL_CSUM)) |
---|
| 2521 | + devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; |
---|
| 2522 | + |
---|
2436 | 2523 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); |
---|
2437 | 2524 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); |
---|
2438 | 2525 | devRead->misc.queueDescLen = cpu_to_le32( |
---|
.. | .. |
---|
2558 | 2645 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
---|
2559 | 2646 | } |
---|
2560 | 2647 | |
---|
| 2648 | +static void |
---|
| 2649 | +vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter) |
---|
| 2650 | +{ |
---|
| 2651 | + struct Vmxnet3_DriverShared *shared = adapter->shared; |
---|
| 2652 | + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; |
---|
| 2653 | + unsigned long flags; |
---|
| 2654 | + |
---|
| 2655 | + if (!VMXNET3_VERSION_GE_4(adapter)) |
---|
| 2656 | + return; |
---|
| 2657 | + |
---|
| 2658 | + spin_lock_irqsave(&adapter->cmd_lock, flags); |
---|
| 2659 | + |
---|
| 2660 | + if (adapter->default_rss_fields) { |
---|
| 2661 | + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
---|
| 2662 | + VMXNET3_CMD_GET_RSS_FIELDS); |
---|
| 2663 | + adapter->rss_fields = |
---|
| 2664 | + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
---|
| 2665 | + } else { |
---|
| 2666 | + cmdInfo->setRssFields = adapter->rss_fields; |
---|
| 2667 | + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
---|
| 2668 | + VMXNET3_CMD_SET_RSS_FIELDS); |
---|
| 2669 | + /* Not all requested RSS may get applied, so get and |
---|
| 2670 | + * cache what was actually applied. |
---|
| 2671 | + */ |
---|
| 2672 | + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
---|
| 2673 | + VMXNET3_CMD_GET_RSS_FIELDS); |
---|
| 2674 | + adapter->rss_fields = |
---|
| 2675 | + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
---|
| 2676 | + } |
---|
| 2677 | + |
---|
| 2678 | + spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
---|
| 2679 | +} |
---|
| 2680 | + |
---|
2561 | 2681 | int |
---|
2562 | 2682 | vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) |
---|
2563 | 2683 | { |
---|
.. | .. |
---|
2607 | 2727 | } |
---|
2608 | 2728 | |
---|
2609 | 2729 | vmxnet3_init_coalesce(adapter); |
---|
| 2730 | + vmxnet3_init_rssfields(adapter); |
---|
2610 | 2731 | |
---|
2611 | 2732 | for (i = 0; i < adapter->num_rx_queues; i++) { |
---|
2612 | 2733 | VMXNET3_WRITE_BAR0_REG(adapter, |
---|
.. | .. |
---|
3043 | 3164 | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
---|
3044 | 3165 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | |
---|
3045 | 3166 | NETIF_F_LRO; |
---|
| 3167 | + |
---|
| 3168 | + if (VMXNET3_VERSION_GE_4(adapter)) { |
---|
| 3169 | + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | |
---|
| 3170 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 3171 | + |
---|
| 3172 | + netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | |
---|
| 3173 | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
---|
| 3174 | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | |
---|
| 3175 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | |
---|
| 3176 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 3177 | + } |
---|
| 3178 | + |
---|
3046 | 3179 | if (dma64) |
---|
3047 | 3180 | netdev->hw_features |= NETIF_F_HIGHDMA; |
---|
3048 | 3181 | netdev->vlan_features = netdev->hw_features & |
---|
.. | .. |
---|
3199 | 3332 | |
---|
3200 | 3333 | |
---|
3201 | 3334 | static void |
---|
3202 | | -vmxnet3_tx_timeout(struct net_device *netdev) |
---|
| 3335 | +vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
---|
3203 | 3336 | { |
---|
3204 | 3337 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
---|
3205 | 3338 | adapter->tx_timeout_count++; |
---|
.. | .. |
---|
3247 | 3380 | .ndo_start_xmit = vmxnet3_xmit_frame, |
---|
3248 | 3381 | .ndo_set_mac_address = vmxnet3_set_mac_addr, |
---|
3249 | 3382 | .ndo_change_mtu = vmxnet3_change_mtu, |
---|
| 3383 | + .ndo_fix_features = vmxnet3_fix_features, |
---|
3250 | 3384 | .ndo_set_features = vmxnet3_set_features, |
---|
| 3385 | + .ndo_features_check = vmxnet3_features_check, |
---|
3251 | 3386 | .ndo_get_stats64 = vmxnet3_get_stats64, |
---|
3252 | 3387 | .ndo_tx_timeout = vmxnet3_tx_timeout, |
---|
3253 | 3388 | .ndo_set_rx_mode = vmxnet3_set_mc, |
---|
.. | .. |
---|
3385 | 3520 | goto err_alloc_pci; |
---|
3386 | 3521 | |
---|
3387 | 3522 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); |
---|
3388 | | - if (ver & (1 << VMXNET3_REV_3)) { |
---|
| 3523 | + if (ver & (1 << VMXNET3_REV_4)) { |
---|
| 3524 | + VMXNET3_WRITE_BAR1_REG(adapter, |
---|
| 3525 | + VMXNET3_REG_VRRS, |
---|
| 3526 | + 1 << VMXNET3_REV_4); |
---|
| 3527 | + adapter->version = VMXNET3_REV_4 + 1; |
---|
| 3528 | + } else if (ver & (1 << VMXNET3_REV_3)) { |
---|
3389 | 3529 | VMXNET3_WRITE_BAR1_REG(adapter, |
---|
3390 | 3530 | VMXNET3_REG_VRRS, |
---|
3391 | 3531 | 1 << VMXNET3_REV_3); |
---|
.. | .. |
---|
3429 | 3569 | err = -ENOMEM; |
---|
3430 | 3570 | goto err_ver; |
---|
3431 | 3571 | } |
---|
3432 | | - memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); |
---|
3433 | 3572 | adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; |
---|
3434 | 3573 | adapter->default_coal_mode = true; |
---|
| 3574 | + } |
---|
| 3575 | + |
---|
| 3576 | + if (VMXNET3_VERSION_GE_4(adapter)) { |
---|
| 3577 | + adapter->default_rss_fields = true; |
---|
| 3578 | + adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; |
---|
3435 | 3579 | } |
---|
3436 | 3580 | |
---|
3437 | 3581 | SET_NETDEV_DEV(netdev, &pdev->dev); |
---|
.. | .. |
---|
3650 | 3794 | } |
---|
3651 | 3795 | |
---|
3652 | 3796 | if (adapter->wol & WAKE_ARP) { |
---|
3653 | | - in_dev = in_dev_get(netdev); |
---|
3654 | | - if (!in_dev) |
---|
3655 | | - goto skip_arp; |
---|
| 3797 | + rcu_read_lock(); |
---|
3656 | 3798 | |
---|
3657 | | - ifa = (struct in_ifaddr *)in_dev->ifa_list; |
---|
3658 | | - if (!ifa) |
---|
| 3799 | + in_dev = __in_dev_get_rcu(netdev); |
---|
| 3800 | + if (!in_dev) { |
---|
| 3801 | + rcu_read_unlock(); |
---|
3659 | 3802 | goto skip_arp; |
---|
| 3803 | + } |
---|
| 3804 | + |
---|
| 3805 | + ifa = rcu_dereference(in_dev->ifa_list); |
---|
| 3806 | + if (!ifa) { |
---|
| 3807 | + rcu_read_unlock(); |
---|
| 3808 | + goto skip_arp; |
---|
| 3809 | + } |
---|
3660 | 3810 | |
---|
3661 | 3811 | pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ |
---|
3662 | 3812 | sizeof(struct arphdr) + /* ARP header */ |
---|
.. | .. |
---|
3676 | 3826 | |
---|
3677 | 3827 | /* The Unicast IPv4 address in 'tip' field. */ |
---|
3678 | 3828 | arpreq += 2 * ETH_ALEN + sizeof(u32); |
---|
3679 | | - *(u32 *)arpreq = ifa->ifa_address; |
---|
| 3829 | + *(__be32 *)arpreq = ifa->ifa_address; |
---|
| 3830 | + |
---|
| 3831 | + rcu_read_unlock(); |
---|
3680 | 3832 | |
---|
3681 | 3833 | /* The mask for the relevant bits. */ |
---|
3682 | 3834 | pmConf->filters[i].mask[0] = 0x00; |
---|
.. | .. |
---|
3685 | 3837 | pmConf->filters[i].mask[3] = 0x00; |
---|
3686 | 3838 | pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ |
---|
3687 | 3839 | pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ |
---|
3688 | | - in_dev_put(in_dev); |
---|
3689 | 3840 | |
---|
3690 | 3841 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; |
---|
3691 | 3842 | i++; |
---|