.. | .. |
---|
80 | 80 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
---|
81 | 81 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); |
---|
82 | 82 | MODULE_LICENSE("GPL"); |
---|
83 | | -MODULE_VERSION(DRV_VERSION); |
---|
84 | 83 | MODULE_DEVICE_TABLE(pci, enic_id_table); |
---|
85 | 84 | |
---|
86 | 85 | #define ENIC_LARGE_PKT_THRESHOLD 1000 |
---|
.. | .. |
---|
177 | 176 | irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); |
---|
178 | 177 | } |
---|
179 | 178 | |
---|
180 | | -static void enic_udp_tunnel_add(struct net_device *netdev, |
---|
181 | | - struct udp_tunnel_info *ti) |
---|
| 179 | +static int enic_udp_tunnel_set_port(struct net_device *netdev, |
---|
| 180 | + unsigned int table, unsigned int entry, |
---|
| 181 | + struct udp_tunnel_info *ti) |
---|
182 | 182 | { |
---|
183 | 183 | struct enic *enic = netdev_priv(netdev); |
---|
184 | | - __be16 port = ti->port; |
---|
185 | 184 | int err; |
---|
186 | 185 | |
---|
187 | 186 | spin_lock_bh(&enic->devcmd_lock); |
---|
188 | 187 | |
---|
189 | | - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { |
---|
190 | | - netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported"); |
---|
191 | | - goto error; |
---|
192 | | - } |
---|
193 | | - |
---|
194 | | - switch (ti->sa_family) { |
---|
195 | | - case AF_INET6: |
---|
196 | | - if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) { |
---|
197 | | - netdev_info(netdev, "vxlan: only IPv4 offload supported"); |
---|
198 | | - goto error; |
---|
199 | | - } |
---|
200 | | - /* Fall through */ |
---|
201 | | - case AF_INET: |
---|
202 | | - break; |
---|
203 | | - default: |
---|
204 | | - goto error; |
---|
205 | | - } |
---|
206 | | - |
---|
207 | | - if (enic->vxlan.vxlan_udp_port_number) { |
---|
208 | | - if (ntohs(port) == enic->vxlan.vxlan_udp_port_number) |
---|
209 | | - netdev_warn(netdev, "vxlan: udp port already offloaded"); |
---|
210 | | - else |
---|
211 | | - netdev_info(netdev, "vxlan: offload supported for only one UDP port"); |
---|
212 | | - |
---|
213 | | - goto error; |
---|
214 | | - } |
---|
215 | | - if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) && |
---|
216 | | - !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) { |
---|
217 | | - netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter"); |
---|
218 | | - goto error; |
---|
219 | | - } |
---|
220 | | - |
---|
221 | 188 | err = vnic_dev_overlay_offload_cfg(enic->vdev, |
---|
222 | 189 | OVERLAY_CFG_VXLAN_PORT_UPDATE, |
---|
223 | | - ntohs(port)); |
---|
| 190 | + ntohs(ti->port)); |
---|
224 | 191 | if (err) |
---|
225 | 192 | goto error; |
---|
226 | 193 | |
---|
.. | .. |
---|
229 | 196 | if (err) |
---|
230 | 197 | goto error; |
---|
231 | 198 | |
---|
232 | | - enic->vxlan.vxlan_udp_port_number = ntohs(port); |
---|
233 | | - |
---|
234 | | - netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ", |
---|
235 | | - (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); |
---|
236 | | - |
---|
237 | | - goto unlock; |
---|
238 | | - |
---|
| 199 | + enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); |
---|
239 | 200 | error: |
---|
240 | | - netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d", |
---|
241 | | - ntohs(port), ti->sa_family, ti->type); |
---|
242 | | -unlock: |
---|
243 | 201 | spin_unlock_bh(&enic->devcmd_lock); |
---|
| 202 | + |
---|
| 203 | + return err; |
---|
244 | 204 | } |
---|
245 | 205 | |
---|
246 | | -static void enic_udp_tunnel_del(struct net_device *netdev, |
---|
247 | | - struct udp_tunnel_info *ti) |
---|
| 206 | +static int enic_udp_tunnel_unset_port(struct net_device *netdev, |
---|
| 207 | + unsigned int table, unsigned int entry, |
---|
| 208 | + struct udp_tunnel_info *ti) |
---|
248 | 209 | { |
---|
249 | 210 | struct enic *enic = netdev_priv(netdev); |
---|
250 | 211 | int err; |
---|
251 | 212 | |
---|
252 | 213 | spin_lock_bh(&enic->devcmd_lock); |
---|
253 | 214 | |
---|
254 | | - if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) || |
---|
255 | | - ti->type != UDP_TUNNEL_TYPE_VXLAN) { |
---|
256 | | - netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", |
---|
257 | | - ntohs(ti->port), ti->sa_family, ti->type); |
---|
258 | | - goto unlock; |
---|
259 | | - } |
---|
260 | | - |
---|
261 | 215 | err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, |
---|
262 | 216 | OVERLAY_OFFLOAD_DISABLE); |
---|
263 | | - if (err) { |
---|
264 | | - netdev_err(netdev, "vxlan: del offload udp port: %d failed", |
---|
265 | | - ntohs(ti->port)); |
---|
| 217 | + if (err) |
---|
266 | 218 | goto unlock; |
---|
267 | | - } |
---|
268 | 219 | |
---|
269 | 220 | enic->vxlan.vxlan_udp_port_number = 0; |
---|
270 | 221 | |
---|
271 | | - netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n", |
---|
272 | | - ntohs(ti->port), ti->sa_family); |
---|
273 | | - |
---|
274 | 222 | unlock: |
---|
275 | 223 | spin_unlock_bh(&enic->devcmd_lock); |
---|
| 224 | + |
---|
| 225 | + return err; |
---|
276 | 226 | } |
---|
| 227 | + |
---|
| 228 | +static const struct udp_tunnel_nic_info enic_udp_tunnels = { |
---|
| 229 | + .set_port = enic_udp_tunnel_set_port, |
---|
| 230 | + .unset_port = enic_udp_tunnel_unset_port, |
---|
| 231 | + .tables = { |
---|
| 232 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
---|
| 233 | + }, |
---|
| 234 | +}, enic_udp_tunnels_v4 = { |
---|
| 235 | + .set_port = enic_udp_tunnel_set_port, |
---|
| 236 | + .unset_port = enic_udp_tunnel_unset_port, |
---|
| 237 | + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, |
---|
| 238 | + .tables = { |
---|
| 239 | + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, |
---|
| 240 | + }, |
---|
| 241 | +}; |
---|
277 | 242 | |
---|
278 | 243 | static netdev_features_t enic_features_check(struct sk_buff *skb, |
---|
279 | 244 | struct net_device *dev, |
---|
.. | .. |
---|
307 | 272 | case ntohs(ETH_P_IPV6): |
---|
308 | 273 | if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6)) |
---|
309 | 274 | goto out; |
---|
310 | | - /* Fall through */ |
---|
| 275 | + fallthrough; |
---|
311 | 276 | case ntohs(ETH_P_IP): |
---|
312 | 277 | break; |
---|
313 | 278 | default: |
---|
.. | .. |
---|
361 | 326 | struct enic *enic = vnic_dev_priv(wq->vdev); |
---|
362 | 327 | |
---|
363 | 328 | if (buf->sop) |
---|
364 | | - pci_unmap_single(enic->pdev, buf->dma_addr, |
---|
365 | | - buf->len, PCI_DMA_TODEVICE); |
---|
| 329 | + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, |
---|
| 330 | + DMA_TO_DEVICE); |
---|
366 | 331 | else |
---|
367 | | - pci_unmap_page(enic->pdev, buf->dma_addr, |
---|
368 | | - buf->len, PCI_DMA_TODEVICE); |
---|
| 332 | + dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, |
---|
| 333 | + DMA_TO_DEVICE); |
---|
369 | 334 | |
---|
370 | 335 | if (buf->os_buf) |
---|
371 | 336 | dev_kfree_skb_any(buf->os_buf); |
---|
.. | .. |
---|
609 | 574 | dma_addr_t dma_addr; |
---|
610 | 575 | int err = 0; |
---|
611 | 576 | |
---|
612 | | - dma_addr = pci_map_single(enic->pdev, skb->data, head_len, |
---|
613 | | - PCI_DMA_TODEVICE); |
---|
| 577 | + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, |
---|
| 578 | + DMA_TO_DEVICE); |
---|
614 | 579 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
---|
615 | 580 | return -ENOMEM; |
---|
616 | 581 | |
---|
.. | .. |
---|
640 | 605 | dma_addr_t dma_addr; |
---|
641 | 606 | int err = 0; |
---|
642 | 607 | |
---|
643 | | - dma_addr = pci_map_single(enic->pdev, skb->data, head_len, |
---|
644 | | - PCI_DMA_TODEVICE); |
---|
| 608 | + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, |
---|
| 609 | + DMA_TO_DEVICE); |
---|
645 | 610 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
---|
646 | 611 | return -ENOMEM; |
---|
647 | 612 | |
---|
.. | .. |
---|
696 | 661 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, |
---|
697 | 662 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
---|
698 | 663 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { |
---|
699 | | - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
---|
700 | | - &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); |
---|
| 664 | + tcp_v6_gso_csum_prep(skb); |
---|
701 | 665 | } |
---|
702 | 666 | } |
---|
703 | 667 | |
---|
.. | .. |
---|
729 | 693 | */ |
---|
730 | 694 | while (frag_len_left) { |
---|
731 | 695 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); |
---|
732 | | - dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, |
---|
733 | | - PCI_DMA_TODEVICE); |
---|
| 696 | + dma_addr = dma_map_single(&enic->pdev->dev, |
---|
| 697 | + skb->data + offset, len, |
---|
| 698 | + DMA_TO_DEVICE); |
---|
734 | 699 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
---|
735 | 700 | return -ENOMEM; |
---|
736 | 701 | enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, |
---|
.. | .. |
---|
788 | 753 | dma_addr_t dma_addr; |
---|
789 | 754 | int err = 0; |
---|
790 | 755 | |
---|
791 | | - dma_addr = pci_map_single(enic->pdev, skb->data, head_len, |
---|
792 | | - PCI_DMA_TODEVICE); |
---|
| 756 | + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len, |
---|
| 757 | + DMA_TO_DEVICE); |
---|
793 | 758 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
---|
794 | 759 | return -ENOMEM; |
---|
795 | 760 | |
---|
.. | .. |
---|
899 | 864 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) |
---|
900 | 865 | netif_tx_stop_queue(txq); |
---|
901 | 866 | skb_tx_timestamp(skb); |
---|
902 | | - if (!skb->xmit_more || netif_xmit_stopped(txq)) |
---|
| 867 | + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) |
---|
903 | 868 | vnic_wq_doorbell(wq); |
---|
904 | 869 | |
---|
905 | 870 | error: |
---|
.. | .. |
---|
1098 | 1063 | } |
---|
1099 | 1064 | |
---|
1100 | 1065 | /* netif_tx_lock held, BHs disabled */ |
---|
1101 | | -static void enic_tx_timeout(struct net_device *netdev) |
---|
| 1066 | +static void enic_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
---|
1102 | 1067 | { |
---|
1103 | 1068 | struct enic *enic = netdev_priv(netdev); |
---|
1104 | 1069 | schedule_work(&enic->tx_hang_reset); |
---|
.. | .. |
---|
1261 | 1226 | if (!buf->os_buf) |
---|
1262 | 1227 | return; |
---|
1263 | 1228 | |
---|
1264 | | - pci_unmap_single(enic->pdev, buf->dma_addr, |
---|
1265 | | - buf->len, PCI_DMA_FROMDEVICE); |
---|
| 1229 | + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, |
---|
| 1230 | + DMA_FROM_DEVICE); |
---|
1266 | 1231 | dev_kfree_skb_any(buf->os_buf); |
---|
1267 | 1232 | buf->os_buf = NULL; |
---|
1268 | 1233 | } |
---|
.. | .. |
---|
1287 | 1252 | if (!skb) |
---|
1288 | 1253 | return -ENOMEM; |
---|
1289 | 1254 | |
---|
1290 | | - dma_addr = pci_map_single(enic->pdev, skb->data, len, |
---|
1291 | | - PCI_DMA_FROMDEVICE); |
---|
| 1255 | + dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, |
---|
| 1256 | + DMA_FROM_DEVICE); |
---|
1292 | 1257 | if (unlikely(enic_dma_map_check(enic, dma_addr))) { |
---|
1293 | 1258 | dev_kfree_skb(skb); |
---|
1294 | 1259 | return -ENOMEM; |
---|
.. | .. |
---|
1320 | 1285 | new_skb = netdev_alloc_skb_ip_align(netdev, len); |
---|
1321 | 1286 | if (!new_skb) |
---|
1322 | 1287 | return false; |
---|
1323 | | - pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, |
---|
1324 | | - DMA_FROM_DEVICE); |
---|
| 1288 | + dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, |
---|
| 1289 | + DMA_FROM_DEVICE); |
---|
1325 | 1290 | memcpy(new_skb->data, (*skb)->data, len); |
---|
1326 | 1291 | *skb = new_skb; |
---|
1327 | 1292 | |
---|
.. | .. |
---|
1370 | 1335 | enic->rq_truncated_pkts++; |
---|
1371 | 1336 | } |
---|
1372 | 1337 | |
---|
1373 | | - pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, |
---|
1374 | | - PCI_DMA_FROMDEVICE); |
---|
| 1338 | + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, |
---|
| 1339 | + DMA_FROM_DEVICE); |
---|
1375 | 1340 | dev_kfree_skb_any(skb); |
---|
1376 | 1341 | buf->os_buf = NULL; |
---|
1377 | 1342 | |
---|
.. | .. |
---|
1385 | 1350 | |
---|
1386 | 1351 | if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { |
---|
1387 | 1352 | buf->os_buf = NULL; |
---|
1388 | | - pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, |
---|
1389 | | - PCI_DMA_FROMDEVICE); |
---|
| 1353 | + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, |
---|
| 1354 | + buf->len, DMA_FROM_DEVICE); |
---|
1390 | 1355 | } |
---|
1391 | 1356 | prefetch(skb->data - NET_IP_ALIGN); |
---|
1392 | 1357 | |
---|
.. | .. |
---|
1459 | 1424 | /* Buffer overflow |
---|
1460 | 1425 | */ |
---|
1461 | 1426 | |
---|
1462 | | - pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, |
---|
1463 | | - PCI_DMA_FROMDEVICE); |
---|
| 1427 | + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, |
---|
| 1428 | + DMA_FROM_DEVICE); |
---|
1464 | 1429 | dev_kfree_skb_any(skb); |
---|
1465 | 1430 | buf->os_buf = NULL; |
---|
1466 | 1431 | } |
---|
.. | .. |
---|
2215 | 2180 | dma_addr_t rss_key_buf_pa; |
---|
2216 | 2181 | int i, kidx, bidx, err; |
---|
2217 | 2182 | |
---|
2218 | | - rss_key_buf_va = pci_zalloc_consistent(enic->pdev, |
---|
2219 | | - sizeof(union vnic_rss_key), |
---|
2220 | | - &rss_key_buf_pa); |
---|
| 2183 | + rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev, |
---|
| 2184 | + sizeof(union vnic_rss_key), |
---|
| 2185 | + &rss_key_buf_pa, GFP_ATOMIC); |
---|
2221 | 2186 | if (!rss_key_buf_va) |
---|
2222 | 2187 | return -ENOMEM; |
---|
2223 | 2188 | |
---|
.. | .. |
---|
2232 | 2197 | sizeof(union vnic_rss_key)); |
---|
2233 | 2198 | spin_unlock_bh(&enic->devcmd_lock); |
---|
2234 | 2199 | |
---|
2235 | | - pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), |
---|
2236 | | - rss_key_buf_va, rss_key_buf_pa); |
---|
| 2200 | + dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key), |
---|
| 2201 | + rss_key_buf_va, rss_key_buf_pa); |
---|
2237 | 2202 | |
---|
2238 | 2203 | return err; |
---|
2239 | 2204 | } |
---|
.. | .. |
---|
2252 | 2217 | unsigned int i; |
---|
2253 | 2218 | int err; |
---|
2254 | 2219 | |
---|
2255 | | - rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, |
---|
2256 | | - sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); |
---|
| 2220 | + rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev, |
---|
| 2221 | + sizeof(union vnic_rss_cpu), |
---|
| 2222 | + &rss_cpu_buf_pa, GFP_ATOMIC); |
---|
2257 | 2223 | if (!rss_cpu_buf_va) |
---|
2258 | 2224 | return -ENOMEM; |
---|
2259 | 2225 | |
---|
.. | .. |
---|
2266 | 2232 | sizeof(union vnic_rss_cpu)); |
---|
2267 | 2233 | spin_unlock_bh(&enic->devcmd_lock); |
---|
2268 | 2234 | |
---|
2269 | | - pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), |
---|
2270 | | - rss_cpu_buf_va, rss_cpu_buf_pa); |
---|
| 2235 | + dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu), |
---|
| 2236 | + rss_cpu_buf_va, rss_cpu_buf_pa); |
---|
2271 | 2237 | |
---|
2272 | 2238 | return err; |
---|
2273 | 2239 | } |
---|
.. | .. |
---|
2546 | 2512 | #ifdef CONFIG_RFS_ACCEL |
---|
2547 | 2513 | .ndo_rx_flow_steer = enic_rx_flow_steer, |
---|
2548 | 2514 | #endif |
---|
2549 | | - .ndo_udp_tunnel_add = enic_udp_tunnel_add, |
---|
2550 | | - .ndo_udp_tunnel_del = enic_udp_tunnel_del, |
---|
| 2515 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 2516 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
2551 | 2517 | .ndo_features_check = enic_features_check, |
---|
2552 | 2518 | }; |
---|
2553 | 2519 | |
---|
.. | .. |
---|
2572 | 2538 | #ifdef CONFIG_RFS_ACCEL |
---|
2573 | 2539 | .ndo_rx_flow_steer = enic_rx_flow_steer, |
---|
2574 | 2540 | #endif |
---|
2575 | | - .ndo_udp_tunnel_add = enic_udp_tunnel_add, |
---|
2576 | | - .ndo_udp_tunnel_del = enic_udp_tunnel_del, |
---|
| 2541 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 2542 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
2577 | 2543 | .ndo_features_check = enic_features_check, |
---|
2578 | 2544 | }; |
---|
2579 | 2545 | |
---|
.. | .. |
---|
2581 | 2547 | { |
---|
2582 | 2548 | unsigned int i; |
---|
2583 | 2549 | |
---|
2584 | | - for (i = 0; i < enic->rq_count; i++) { |
---|
2585 | | - napi_hash_del(&enic->napi[i]); |
---|
2586 | | - netif_napi_del(&enic->napi[i]); |
---|
2587 | | - } |
---|
| 2550 | + for (i = 0; i < enic->rq_count; i++) |
---|
| 2551 | + __netif_napi_del(&enic->napi[i]); |
---|
| 2552 | + |
---|
2588 | 2553 | if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) |
---|
2589 | 2554 | for (i = 0; i < enic->wq_count; i++) |
---|
2590 | | - netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); |
---|
| 2555 | + __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); |
---|
| 2556 | + |
---|
| 2557 | + /* observe RCU grace period after __netif_napi_del() calls */ |
---|
| 2558 | + synchronize_net(); |
---|
2591 | 2559 | |
---|
2592 | 2560 | enic_free_vnic_resources(enic); |
---|
2593 | 2561 | enic_clear_intr_mode(enic); |
---|
.. | .. |
---|
2753 | 2721 | * fail to 32-bit. |
---|
2754 | 2722 | */ |
---|
2755 | 2723 | |
---|
2756 | | - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); |
---|
| 2724 | + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47)); |
---|
2757 | 2725 | if (err) { |
---|
2758 | | - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 2726 | + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
2759 | 2727 | if (err) { |
---|
2760 | 2728 | dev_err(dev, "No usable DMA configuration, aborting\n"); |
---|
2761 | 2729 | goto err_out_release_regions; |
---|
2762 | 2730 | } |
---|
2763 | | - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
---|
| 2731 | + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); |
---|
2764 | 2732 | if (err) { |
---|
2765 | 2733 | dev_err(dev, "Unable to obtain %u-bit DMA " |
---|
2766 | 2734 | "for consistent allocations, aborting\n", 32); |
---|
2767 | 2735 | goto err_out_release_regions; |
---|
2768 | 2736 | } |
---|
2769 | 2737 | } else { |
---|
2770 | | - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); |
---|
| 2738 | + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47)); |
---|
2771 | 2739 | if (err) { |
---|
2772 | 2740 | dev_err(dev, "Unable to obtain %u-bit DMA " |
---|
2773 | 2741 | "for consistent allocations, aborting\n", 47); |
---|
.. | .. |
---|
2983 | 2951 | patch_level = fls(patch_level); |
---|
2984 | 2952 | patch_level = patch_level ? patch_level - 1 : 0; |
---|
2985 | 2953 | enic->vxlan.patch_level = patch_level; |
---|
| 2954 | + |
---|
| 2955 | + if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || |
---|
| 2956 | + enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { |
---|
| 2957 | + netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4; |
---|
| 2958 | + if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) |
---|
| 2959 | + netdev->udp_tunnel_nic_info = &enic_udp_tunnels; |
---|
| 2960 | + } |
---|
2986 | 2961 | } |
---|
2987 | 2962 | |
---|
2988 | 2963 | netdev->features |= netdev->hw_features; |
---|
.. | .. |
---|
3074 | 3049 | |
---|
3075 | 3050 | static int __init enic_init_module(void) |
---|
3076 | 3051 | { |
---|
3077 | | - pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); |
---|
3078 | | - |
---|
3079 | 3052 | return pci_register_driver(&enic_driver); |
---|
3080 | 3053 | } |
---|
3081 | 3054 | |
---|