.. | .. |
---|
1 | 1 | /* |
---|
2 | 2 | * Linux driver for VMware's vmxnet3 ethernet NIC. |
---|
3 | 3 | * |
---|
4 | | - * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. |
---|
| 4 | + * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. |
---|
5 | 5 | * |
---|
6 | 6 | * This program is free software; you can redistribute it and/or modify it |
---|
7 | 7 | * under the terms of the GNU General Public License as published by the |
---|
.. | .. |
---|
535 | 535 | } |
---|
536 | 536 | |
---|
537 | 537 | sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); |
---|
538 | | - tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, |
---|
539 | | - &tq->buf_info_pa, GFP_KERNEL); |
---|
| 538 | + tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz, |
---|
| 539 | + &tq->buf_info_pa, GFP_KERNEL); |
---|
540 | 540 | if (!tq->buf_info) |
---|
541 | 541 | goto err; |
---|
542 | 542 | |
---|
.. | .. |
---|
595 | 595 | if (dma_mapping_error(&adapter->pdev->dev, |
---|
596 | 596 | rbi->dma_addr)) { |
---|
597 | 597 | dev_kfree_skb_any(rbi->skb); |
---|
| 598 | + rbi->skb = NULL; |
---|
598 | 599 | rq->stats.rx_buf_alloc_failure++; |
---|
599 | 600 | break; |
---|
600 | 601 | } |
---|
.. | .. |
---|
619 | 620 | if (dma_mapping_error(&adapter->pdev->dev, |
---|
620 | 621 | rbi->dma_addr)) { |
---|
621 | 622 | put_page(rbi->page); |
---|
| 623 | + rbi->page = NULL; |
---|
622 | 624 | rq->stats.rx_buf_alloc_failure++; |
---|
623 | 625 | break; |
---|
624 | 626 | } |
---|
.. | .. |
---|
657 | 659 | vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, |
---|
658 | 660 | struct vmxnet3_rx_buf_info *rbi) |
---|
659 | 661 | { |
---|
660 | | - struct skb_frag_struct *frag = skb_shinfo(skb)->frags + |
---|
661 | | - skb_shinfo(skb)->nr_frags; |
---|
| 662 | + skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; |
---|
662 | 663 | |
---|
663 | 664 | BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); |
---|
664 | 665 | |
---|
665 | 666 | __skb_frag_set_page(frag, rbi->page); |
---|
666 | | - frag->page_offset = 0; |
---|
| 667 | + skb_frag_off_set(frag, 0); |
---|
667 | 668 | skb_frag_size_set(frag, rcd->len); |
---|
668 | 669 | skb->data_len += rcd->len; |
---|
669 | 670 | skb->truesize += PAGE_SIZE; |
---|
.. | .. |
---|
755 | 756 | } |
---|
756 | 757 | |
---|
757 | 758 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
758 | | - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
---|
| 759 | + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
759 | 760 | u32 buf_size; |
---|
760 | 761 | |
---|
761 | 762 | buf_offset = 0; |
---|
.. | .. |
---|
843 | 844 | u8 protocol = 0; |
---|
844 | 845 | |
---|
845 | 846 | if (ctx->mss) { /* TSO */ |
---|
846 | | - ctx->eth_ip_hdr_size = skb_transport_offset(skb); |
---|
847 | | - ctx->l4_hdr_size = tcp_hdrlen(skb); |
---|
848 | | - ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; |
---|
| 847 | + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { |
---|
| 848 | + ctx->l4_offset = skb_inner_transport_offset(skb); |
---|
| 849 | + ctx->l4_hdr_size = inner_tcp_hdrlen(skb); |
---|
| 850 | + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; |
---|
| 851 | + } else { |
---|
| 852 | + ctx->l4_offset = skb_transport_offset(skb); |
---|
| 853 | + ctx->l4_hdr_size = tcp_hdrlen(skb); |
---|
| 854 | + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; |
---|
| 855 | + } |
---|
849 | 856 | } else { |
---|
850 | 857 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
851 | | - ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); |
---|
| 858 | + /* For encap packets, skb_checksum_start_offset refers |
---|
| 859 | + * to inner L4 offset. Thus, below works for encap as |
---|
| 860 | + * well as non-encap case |
---|
| 861 | + */ |
---|
| 862 | + ctx->l4_offset = skb_checksum_start_offset(skb); |
---|
852 | 863 | |
---|
853 | | - if (ctx->ipv4) { |
---|
854 | | - const struct iphdr *iph = ip_hdr(skb); |
---|
| 864 | + if (VMXNET3_VERSION_GE_4(adapter) && |
---|
| 865 | + skb->encapsulation) { |
---|
| 866 | + struct iphdr *iph = inner_ip_hdr(skb); |
---|
855 | 867 | |
---|
856 | | - protocol = iph->protocol; |
---|
857 | | - } else if (ctx->ipv6) { |
---|
858 | | - const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
---|
| 868 | + if (iph->version == 4) { |
---|
| 869 | + protocol = iph->protocol; |
---|
| 870 | + } else { |
---|
| 871 | + const struct ipv6hdr *ipv6h; |
---|
859 | 872 | |
---|
860 | | - protocol = ipv6h->nexthdr; |
---|
| 873 | + ipv6h = inner_ipv6_hdr(skb); |
---|
| 874 | + protocol = ipv6h->nexthdr; |
---|
| 875 | + } |
---|
| 876 | + } else { |
---|
| 877 | + if (ctx->ipv4) { |
---|
| 878 | + const struct iphdr *iph = ip_hdr(skb); |
---|
| 879 | + |
---|
| 880 | + protocol = iph->protocol; |
---|
| 881 | + } else if (ctx->ipv6) { |
---|
| 882 | + const struct ipv6hdr *ipv6h; |
---|
| 883 | + |
---|
| 884 | + ipv6h = ipv6_hdr(skb); |
---|
| 885 | + protocol = ipv6h->nexthdr; |
---|
| 886 | + } |
---|
861 | 887 | } |
---|
862 | 888 | |
---|
863 | 889 | switch (protocol) { |
---|
864 | 890 | case IPPROTO_TCP: |
---|
865 | | - ctx->l4_hdr_size = tcp_hdrlen(skb); |
---|
| 891 | + ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : |
---|
| 892 | + tcp_hdrlen(skb); |
---|
866 | 893 | break; |
---|
867 | 894 | case IPPROTO_UDP: |
---|
868 | 895 | ctx->l4_hdr_size = sizeof(struct udphdr); |
---|
.. | .. |
---|
872 | 899 | break; |
---|
873 | 900 | } |
---|
874 | 901 | |
---|
875 | | - ctx->copy_size = min(ctx->eth_ip_hdr_size + |
---|
| 902 | + ctx->copy_size = min(ctx->l4_offset + |
---|
876 | 903 | ctx->l4_hdr_size, skb->len); |
---|
877 | 904 | } else { |
---|
878 | | - ctx->eth_ip_hdr_size = 0; |
---|
| 905 | + ctx->l4_offset = 0; |
---|
879 | 906 | ctx->l4_hdr_size = 0; |
---|
880 | 907 | /* copy as much as allowed */ |
---|
881 | 908 | ctx->copy_size = min_t(unsigned int, |
---|
.. | .. |
---|
931 | 958 | |
---|
932 | 959 | |
---|
933 | 960 | static void |
---|
| 961 | +vmxnet3_prepare_inner_tso(struct sk_buff *skb, |
---|
| 962 | + struct vmxnet3_tx_ctx *ctx) |
---|
| 963 | +{ |
---|
| 964 | + struct tcphdr *tcph = inner_tcp_hdr(skb); |
---|
| 965 | + struct iphdr *iph = inner_ip_hdr(skb); |
---|
| 966 | + |
---|
| 967 | + if (iph->version == 4) { |
---|
| 968 | + iph->check = 0; |
---|
| 969 | + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, |
---|
| 970 | + IPPROTO_TCP, 0); |
---|
| 971 | + } else { |
---|
| 972 | + struct ipv6hdr *iph = inner_ipv6_hdr(skb); |
---|
| 973 | + |
---|
| 974 | + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, |
---|
| 975 | + IPPROTO_TCP, 0); |
---|
| 976 | + } |
---|
| 977 | +} |
---|
| 978 | + |
---|
| 979 | +static void |
---|
934 | 980 | vmxnet3_prepare_tso(struct sk_buff *skb, |
---|
935 | 981 | struct vmxnet3_tx_ctx *ctx) |
---|
936 | 982 | { |
---|
.. | .. |
---|
943 | 989 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, |
---|
944 | 990 | IPPROTO_TCP, 0); |
---|
945 | 991 | } else if (ctx->ipv6) { |
---|
946 | | - struct ipv6hdr *iph = ipv6_hdr(skb); |
---|
947 | | - |
---|
948 | | - tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, |
---|
949 | | - IPPROTO_TCP, 0); |
---|
| 992 | + tcp_v6_gso_csum_prep(skb); |
---|
950 | 993 | } |
---|
951 | 994 | } |
---|
952 | 995 | |
---|
.. | .. |
---|
956 | 999 | int i; |
---|
957 | 1000 | |
---|
958 | 1001 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
---|
959 | | - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
---|
| 1002 | + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
---|
960 | 1003 | |
---|
961 | 1004 | count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); |
---|
962 | 1005 | } |
---|
.. | .. |
---|
1007 | 1050 | } |
---|
1008 | 1051 | tq->stats.copy_skb_header++; |
---|
1009 | 1052 | } |
---|
1010 | | - vmxnet3_prepare_tso(skb, &ctx); |
---|
| 1053 | + if (skb->encapsulation) { |
---|
| 1054 | + vmxnet3_prepare_inner_tso(skb, &ctx); |
---|
| 1055 | + } else { |
---|
| 1056 | + vmxnet3_prepare_tso(skb, &ctx); |
---|
| 1057 | + } |
---|
1011 | 1058 | } else { |
---|
1012 | 1059 | if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { |
---|
1013 | 1060 | |
---|
.. | .. |
---|
1030 | 1077 | BUG_ON(ret <= 0 && ctx.copy_size != 0); |
---|
1031 | 1078 | /* hdrs parsed, check against other limits */ |
---|
1032 | 1079 | if (ctx.mss) { |
---|
1033 | | - if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > |
---|
| 1080 | + if (unlikely(ctx.l4_offset + ctx.l4_hdr_size > |
---|
1034 | 1081 | VMXNET3_MAX_TX_BUF_SIZE)) { |
---|
1035 | 1082 | tq->stats.drop_oversized_hdr++; |
---|
1036 | 1083 | goto drop_pkt; |
---|
1037 | 1084 | } |
---|
1038 | 1085 | } else { |
---|
1039 | 1086 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
1040 | | - if (unlikely(ctx.eth_ip_hdr_size + |
---|
| 1087 | + if (unlikely(ctx.l4_offset + |
---|
1041 | 1088 | skb->csum_offset > |
---|
1042 | 1089 | VMXNET3_MAX_CSUM_OFFSET)) { |
---|
1043 | 1090 | tq->stats.drop_oversized_hdr++; |
---|
.. | .. |
---|
1084 | 1131 | #endif |
---|
1085 | 1132 | tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); |
---|
1086 | 1133 | if (ctx.mss) { |
---|
1087 | | - gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; |
---|
1088 | | - gdesc->txd.om = VMXNET3_OM_TSO; |
---|
1089 | | - gdesc->txd.msscof = ctx.mss; |
---|
| 1134 | + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { |
---|
| 1135 | + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; |
---|
| 1136 | + gdesc->txd.om = VMXNET3_OM_ENCAP; |
---|
| 1137 | + gdesc->txd.msscof = ctx.mss; |
---|
| 1138 | + |
---|
| 1139 | + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) |
---|
| 1140 | + gdesc->txd.oco = 1; |
---|
| 1141 | + } else { |
---|
| 1142 | + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; |
---|
| 1143 | + gdesc->txd.om = VMXNET3_OM_TSO; |
---|
| 1144 | + gdesc->txd.msscof = ctx.mss; |
---|
| 1145 | + } |
---|
1090 | 1146 | num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; |
---|
1091 | 1147 | } else { |
---|
1092 | 1148 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
---|
1093 | | - gdesc->txd.hlen = ctx.eth_ip_hdr_size; |
---|
1094 | | - gdesc->txd.om = VMXNET3_OM_CSUM; |
---|
1095 | | - gdesc->txd.msscof = ctx.eth_ip_hdr_size + |
---|
1096 | | - skb->csum_offset; |
---|
| 1149 | + if (VMXNET3_VERSION_GE_4(adapter) && |
---|
| 1150 | + skb->encapsulation) { |
---|
| 1151 | + gdesc->txd.hlen = ctx.l4_offset + |
---|
| 1152 | + ctx.l4_hdr_size; |
---|
| 1153 | + gdesc->txd.om = VMXNET3_OM_ENCAP; |
---|
| 1154 | + gdesc->txd.msscof = 0; /* Reserved */ |
---|
| 1155 | + } else { |
---|
| 1156 | + gdesc->txd.hlen = ctx.l4_offset; |
---|
| 1157 | + gdesc->txd.om = VMXNET3_OM_CSUM; |
---|
| 1158 | + gdesc->txd.msscof = ctx.l4_offset + |
---|
| 1159 | + skb->csum_offset; |
---|
| 1160 | + } |
---|
1097 | 1161 | } else { |
---|
1098 | 1162 | gdesc->txd.om = 0; |
---|
1099 | 1163 | gdesc->txd.msscof = 0; |
---|
.. | .. |
---|
1172 | 1236 | (le32_to_cpu(gdesc->dword[3]) & |
---|
1173 | 1237 | VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { |
---|
1174 | 1238 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
1175 | | - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); |
---|
1176 | | - BUG_ON(gdesc->rcd.frg); |
---|
| 1239 | + if ((le32_to_cpu(gdesc->dword[0]) & |
---|
| 1240 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { |
---|
| 1241 | + skb->csum_level = 1; |
---|
| 1242 | + } |
---|
| 1243 | + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && |
---|
| 1244 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1245 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
| 1246 | + WARN_ON_ONCE(gdesc->rcd.frg && |
---|
| 1247 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1248 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
1177 | 1249 | } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & |
---|
1178 | 1250 | (1 << VMXNET3_RCD_TUC_SHIFT))) { |
---|
1179 | 1251 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
---|
1180 | | - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); |
---|
1181 | | - BUG_ON(gdesc->rcd.frg); |
---|
| 1252 | + if ((le32_to_cpu(gdesc->dword[0]) & |
---|
| 1253 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) { |
---|
| 1254 | + skb->csum_level = 1; |
---|
| 1255 | + } |
---|
| 1256 | + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && |
---|
| 1257 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1258 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
| 1259 | + WARN_ON_ONCE(gdesc->rcd.frg && |
---|
| 1260 | + !(le32_to_cpu(gdesc->dword[0]) & |
---|
| 1261 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); |
---|
1182 | 1262 | } else { |
---|
1183 | 1263 | if (gdesc->rcd.csum) { |
---|
1184 | 1264 | skb->csum = htons(gdesc->rcd.csum); |
---|
.. | .. |
---|
1284 | 1364 | }; |
---|
1285 | 1365 | u32 num_pkts = 0; |
---|
1286 | 1366 | bool skip_page_frags = false; |
---|
| 1367 | + bool encap_lro = false; |
---|
1287 | 1368 | struct Vmxnet3_RxCompDesc *rcd; |
---|
1288 | 1369 | struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; |
---|
1289 | 1370 | u16 segCnt = 0, mss = 0; |
---|
.. | .. |
---|
1424 | 1505 | if (VMXNET3_VERSION_GE_2(adapter) && |
---|
1425 | 1506 | rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { |
---|
1426 | 1507 | struct Vmxnet3_RxCompDescExt *rcdlro; |
---|
| 1508 | + union Vmxnet3_GenericDesc *gdesc; |
---|
| 1509 | + |
---|
1427 | 1510 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; |
---|
| 1511 | + gdesc = (union Vmxnet3_GenericDesc *)rcd; |
---|
1428 | 1512 | |
---|
1429 | 1513 | segCnt = rcdlro->segCnt; |
---|
1430 | 1514 | WARN_ON_ONCE(segCnt == 0); |
---|
1431 | 1515 | mss = rcdlro->mss; |
---|
1432 | 1516 | if (unlikely(segCnt <= 1)) |
---|
1433 | 1517 | segCnt = 0; |
---|
| 1518 | + encap_lro = (le32_to_cpu(gdesc->dword[0]) & |
---|
| 1519 | + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)); |
---|
1434 | 1520 | } else { |
---|
1435 | 1521 | segCnt = 0; |
---|
1436 | 1522 | } |
---|
.. | .. |
---|
1498 | 1584 | vmxnet3_rx_csum(adapter, skb, |
---|
1499 | 1585 | (union Vmxnet3_GenericDesc *)rcd); |
---|
1500 | 1586 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
---|
1501 | | - if (!rcd->tcp || |
---|
| 1587 | + if ((!rcd->tcp && !encap_lro) || |
---|
1502 | 1588 | !(adapter->netdev->features & NETIF_F_LRO)) |
---|
1503 | 1589 | goto not_lro; |
---|
1504 | 1590 | |
---|
.. | .. |
---|
1507 | 1593 | SKB_GSO_TCPV4 : SKB_GSO_TCPV6; |
---|
1508 | 1594 | skb_shinfo(skb)->gso_size = mss; |
---|
1509 | 1595 | skb_shinfo(skb)->gso_segs = segCnt; |
---|
1510 | | - } else if (segCnt != 0 || skb->len > mtu) { |
---|
| 1596 | + } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { |
---|
1511 | 1597 | u32 hlen; |
---|
1512 | 1598 | |
---|
1513 | 1599 | hlen = vmxnet3_get_hdr_len(adapter, skb, |
---|
.. | .. |
---|
1536 | 1622 | napi_gro_receive(&rq->napi, skb); |
---|
1537 | 1623 | |
---|
1538 | 1624 | ctx->skb = NULL; |
---|
| 1625 | + encap_lro = false; |
---|
1539 | 1626 | num_pkts++; |
---|
1540 | 1627 | } |
---|
1541 | 1628 | |
---|
.. | .. |
---|
1583 | 1670 | { |
---|
1584 | 1671 | u32 i, ring_idx; |
---|
1585 | 1672 | struct Vmxnet3_RxDesc *rxd; |
---|
| 1673 | + |
---|
| 1674 | + /* ring has already been cleaned up */ |
---|
| 1675 | + if (!rq->rx_ring[0].base) |
---|
| 1676 | + return; |
---|
1586 | 1677 | |
---|
1587 | 1678 | for (ring_idx = 0; ring_idx < 2; ring_idx++) { |
---|
1588 | 1679 | for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { |
---|
.. | .. |
---|
1815 | 1906 | |
---|
1816 | 1907 | sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + |
---|
1817 | 1908 | rq->rx_ring[1].size); |
---|
1818 | | - bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, |
---|
1819 | | - GFP_KERNEL); |
---|
| 1909 | + bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, |
---|
| 1910 | + GFP_KERNEL); |
---|
1820 | 1911 | if (!bi) |
---|
1821 | 1912 | goto err; |
---|
1822 | 1913 | |
---|
.. | .. |
---|
2433 | 2524 | if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) |
---|
2434 | 2525 | devRead->misc.uptFeatures |= UPT1_F_RXVLAN; |
---|
2435 | 2526 | |
---|
| 2527 | + if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | |
---|
| 2528 | + NETIF_F_GSO_UDP_TUNNEL_CSUM)) |
---|
| 2529 | + devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; |
---|
| 2530 | + |
---|
2436 | 2531 | devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); |
---|
2437 | 2532 | devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); |
---|
2438 | 2533 | devRead->misc.queueDescLen = cpu_to_le32( |
---|
.. | .. |
---|
2558 | 2653 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
---|
2559 | 2654 | } |
---|
2560 | 2655 | |
---|
| 2656 | +static void |
---|
| 2657 | +vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter) |
---|
| 2658 | +{ |
---|
| 2659 | + struct Vmxnet3_DriverShared *shared = adapter->shared; |
---|
| 2660 | + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; |
---|
| 2661 | + unsigned long flags; |
---|
| 2662 | + |
---|
| 2663 | + if (!VMXNET3_VERSION_GE_4(adapter)) |
---|
| 2664 | + return; |
---|
| 2665 | + |
---|
| 2666 | + spin_lock_irqsave(&adapter->cmd_lock, flags); |
---|
| 2667 | + |
---|
| 2668 | + if (adapter->default_rss_fields) { |
---|
| 2669 | + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
---|
| 2670 | + VMXNET3_CMD_GET_RSS_FIELDS); |
---|
| 2671 | + adapter->rss_fields = |
---|
| 2672 | + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
---|
| 2673 | + } else { |
---|
| 2674 | + cmdInfo->setRssFields = adapter->rss_fields; |
---|
| 2675 | + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
---|
| 2676 | + VMXNET3_CMD_SET_RSS_FIELDS); |
---|
| 2677 | + /* Not all requested RSS may get applied, so get and |
---|
| 2678 | + * cache what was actually applied. |
---|
| 2679 | + */ |
---|
| 2680 | + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
---|
| 2681 | + VMXNET3_CMD_GET_RSS_FIELDS); |
---|
| 2682 | + adapter->rss_fields = |
---|
| 2683 | + VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); |
---|
| 2684 | + } |
---|
| 2685 | + |
---|
| 2686 | + spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
---|
| 2687 | +} |
---|
| 2688 | + |
---|
2561 | 2689 | int |
---|
2562 | 2690 | vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) |
---|
2563 | 2691 | { |
---|
.. | .. |
---|
2607 | 2735 | } |
---|
2608 | 2736 | |
---|
2609 | 2737 | vmxnet3_init_coalesce(adapter); |
---|
| 2738 | + vmxnet3_init_rssfields(adapter); |
---|
2610 | 2739 | |
---|
2611 | 2740 | for (i = 0; i < adapter->num_rx_queues; i++) { |
---|
2612 | 2741 | VMXNET3_WRITE_BAR0_REG(adapter, |
---|
.. | .. |
---|
3043 | 3172 | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
---|
3044 | 3173 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | |
---|
3045 | 3174 | NETIF_F_LRO; |
---|
| 3175 | + |
---|
| 3176 | + if (VMXNET3_VERSION_GE_4(adapter)) { |
---|
| 3177 | + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | |
---|
| 3178 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 3179 | + |
---|
| 3180 | + netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | |
---|
| 3181 | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | |
---|
| 3182 | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | |
---|
| 3183 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | |
---|
| 3184 | + NETIF_F_GSO_UDP_TUNNEL_CSUM; |
---|
| 3185 | + } |
---|
| 3186 | + |
---|
3046 | 3187 | if (dma64) |
---|
3047 | 3188 | netdev->hw_features |= NETIF_F_HIGHDMA; |
---|
3048 | 3189 | netdev->vlan_features = netdev->hw_features & |
---|
.. | .. |
---|
3199 | 3340 | |
---|
3200 | 3341 | |
---|
3201 | 3342 | static void |
---|
3202 | | -vmxnet3_tx_timeout(struct net_device *netdev) |
---|
| 3343 | +vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
---|
3203 | 3344 | { |
---|
3204 | 3345 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
---|
3205 | 3346 | adapter->tx_timeout_count++; |
---|
.. | .. |
---|
3247 | 3388 | .ndo_start_xmit = vmxnet3_xmit_frame, |
---|
3248 | 3389 | .ndo_set_mac_address = vmxnet3_set_mac_addr, |
---|
3249 | 3390 | .ndo_change_mtu = vmxnet3_change_mtu, |
---|
| 3391 | + .ndo_fix_features = vmxnet3_fix_features, |
---|
3250 | 3392 | .ndo_set_features = vmxnet3_set_features, |
---|
| 3393 | + .ndo_features_check = vmxnet3_features_check, |
---|
3251 | 3394 | .ndo_get_stats64 = vmxnet3_get_stats64, |
---|
3252 | 3395 | .ndo_tx_timeout = vmxnet3_tx_timeout, |
---|
3253 | 3396 | .ndo_set_rx_mode = vmxnet3_set_mc, |
---|
.. | .. |
---|
3385 | 3528 | goto err_alloc_pci; |
---|
3386 | 3529 | |
---|
3387 | 3530 | ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); |
---|
3388 | | - if (ver & (1 << VMXNET3_REV_3)) { |
---|
| 3531 | + if (ver & (1 << VMXNET3_REV_4)) { |
---|
| 3532 | + VMXNET3_WRITE_BAR1_REG(adapter, |
---|
| 3533 | + VMXNET3_REG_VRRS, |
---|
| 3534 | + 1 << VMXNET3_REV_4); |
---|
| 3535 | + adapter->version = VMXNET3_REV_4 + 1; |
---|
| 3536 | + } else if (ver & (1 << VMXNET3_REV_3)) { |
---|
3389 | 3537 | VMXNET3_WRITE_BAR1_REG(adapter, |
---|
3390 | 3538 | VMXNET3_REG_VRRS, |
---|
3391 | 3539 | 1 << VMXNET3_REV_3); |
---|
.. | .. |
---|
3429 | 3577 | err = -ENOMEM; |
---|
3430 | 3578 | goto err_ver; |
---|
3431 | 3579 | } |
---|
3432 | | - memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); |
---|
3433 | 3580 | adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; |
---|
3434 | 3581 | adapter->default_coal_mode = true; |
---|
| 3582 | + } |
---|
| 3583 | + |
---|
| 3584 | + if (VMXNET3_VERSION_GE_4(adapter)) { |
---|
| 3585 | + adapter->default_rss_fields = true; |
---|
| 3586 | + adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT; |
---|
3435 | 3587 | } |
---|
3436 | 3588 | |
---|
3437 | 3589 | SET_NETDEV_DEV(netdev, &pdev->dev); |
---|
.. | .. |
---|
3650 | 3802 | } |
---|
3651 | 3803 | |
---|
3652 | 3804 | if (adapter->wol & WAKE_ARP) { |
---|
3653 | | - in_dev = in_dev_get(netdev); |
---|
3654 | | - if (!in_dev) |
---|
3655 | | - goto skip_arp; |
---|
| 3805 | + rcu_read_lock(); |
---|
3656 | 3806 | |
---|
3657 | | - ifa = (struct in_ifaddr *)in_dev->ifa_list; |
---|
3658 | | - if (!ifa) |
---|
| 3807 | + in_dev = __in_dev_get_rcu(netdev); |
---|
| 3808 | + if (!in_dev) { |
---|
| 3809 | + rcu_read_unlock(); |
---|
3659 | 3810 | goto skip_arp; |
---|
| 3811 | + } |
---|
| 3812 | + |
---|
| 3813 | + ifa = rcu_dereference(in_dev->ifa_list); |
---|
| 3814 | + if (!ifa) { |
---|
| 3815 | + rcu_read_unlock(); |
---|
| 3816 | + goto skip_arp; |
---|
| 3817 | + } |
---|
3660 | 3818 | |
---|
3661 | 3819 | pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ |
---|
3662 | 3820 | sizeof(struct arphdr) + /* ARP header */ |
---|
.. | .. |
---|
3676 | 3834 | |
---|
3677 | 3835 | /* The Unicast IPv4 address in 'tip' field. */ |
---|
3678 | 3836 | arpreq += 2 * ETH_ALEN + sizeof(u32); |
---|
3679 | | - *(u32 *)arpreq = ifa->ifa_address; |
---|
| 3837 | + *(__be32 *)arpreq = ifa->ifa_address; |
---|
| 3838 | + |
---|
| 3839 | + rcu_read_unlock(); |
---|
3680 | 3840 | |
---|
3681 | 3841 | /* The mask for the relevant bits. */ |
---|
3682 | 3842 | pmConf->filters[i].mask[0] = 0x00; |
---|
.. | .. |
---|
3685 | 3845 | pmConf->filters[i].mask[3] = 0x00; |
---|
3686 | 3846 | pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ |
---|
3687 | 3847 | pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ |
---|
3688 | | - in_dev_put(in_dev); |
---|
3689 | 3848 | |
---|
3690 | 3849 | pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; |
---|
3691 | 3850 | i++; |
---|