| .. | .. |
|---|
| 5 | 5 | #include <linux/of_net.h> |
|---|
| 6 | 6 | #include <linux/pci.h> |
|---|
| 7 | 7 | #include <linux/bpf.h> |
|---|
| 8 | +#include <generated/utsrelease.h> |
|---|
| 8 | 9 | |
|---|
| 9 | 10 | /* Local includes */ |
|---|
| 10 | 11 | #include "i40e.h" |
|---|
| 11 | 12 | #include "i40e_diag.h" |
|---|
| 13 | +#include "i40e_xsk.h" |
|---|
| 12 | 14 | #include <net/udp_tunnel.h> |
|---|
| 15 | +#include <net/xdp_sock_drv.h> |
|---|
| 13 | 16 | /* All i40e tracepoints are defined by the include below, which |
|---|
| 14 | 17 | * must be included exactly once across the whole kernel with |
|---|
| 15 | 18 | * CREATE_TRACE_POINTS defined |
|---|
| .. | .. |
|---|
| 21 | 24 | static const char i40e_driver_string[] = |
|---|
| 22 | 25 | "Intel(R) Ethernet Connection XL710 Network Driver"; |
|---|
| 23 | 26 | |
|---|
| 24 | | -#define DRV_KERN "-k" |
|---|
| 25 | | - |
|---|
| 26 | | -#define DRV_VERSION_MAJOR 2 |
|---|
| 27 | | -#define DRV_VERSION_MINOR 3 |
|---|
| 28 | | -#define DRV_VERSION_BUILD 2 |
|---|
| 29 | | -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ |
|---|
| 30 | | - __stringify(DRV_VERSION_MINOR) "." \ |
|---|
| 31 | | - __stringify(DRV_VERSION_BUILD) DRV_KERN |
|---|
| 32 | | -const char i40e_driver_version_str[] = DRV_VERSION; |
|---|
| 33 | | -static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; |
|---|
| 27 | +static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation."; |
|---|
| 34 | 28 | |
|---|
| 35 | 29 | /* a bit of forward declarations */ |
|---|
| 36 | 30 | static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); |
|---|
| 37 | 31 | static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); |
|---|
| 38 | 32 | static int i40e_add_vsi(struct i40e_vsi *vsi); |
|---|
| 39 | 33 | static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); |
|---|
| 40 | | -static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); |
|---|
| 34 | +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired); |
|---|
| 41 | 35 | static int i40e_setup_misc_vector(struct i40e_pf *pf); |
|---|
| 42 | 36 | static void i40e_determine_queue_usage(struct i40e_pf *pf); |
|---|
| 43 | 37 | static int i40e_setup_pf_filter_control(struct i40e_pf *pf); |
|---|
| .. | .. |
|---|
| 46 | 40 | bool lock_acquired); |
|---|
| 47 | 41 | static int i40e_reset(struct i40e_pf *pf); |
|---|
| 48 | 42 | static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); |
|---|
| 43 | +static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf); |
|---|
| 44 | +static int i40e_restore_interrupt_scheme(struct i40e_pf *pf); |
|---|
| 45 | +static bool i40e_check_recovery_mode(struct i40e_pf *pf); |
|---|
| 46 | +static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw); |
|---|
| 49 | 47 | static void i40e_fdir_sb_setup(struct i40e_pf *pf); |
|---|
| 50 | 48 | static int i40e_veb_get_bw_info(struct i40e_veb *veb); |
|---|
| 51 | 49 | static int i40e_get_capabilities(struct i40e_pf *pf, |
|---|
| 52 | 50 | enum i40e_admin_queue_opc list_type); |
|---|
| 53 | | - |
|---|
| 51 | +static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf); |
|---|
| 54 | 52 | |
|---|
| 55 | 53 | /* i40e_pci_tbl - PCI Device ID Table |
|---|
| 56 | 54 | * |
|---|
| .. | .. |
|---|
| 69 | 67 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, |
|---|
| 70 | 68 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, |
|---|
| 71 | 69 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, |
|---|
| 70 | + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0}, |
|---|
| 71 | + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0}, |
|---|
| 72 | + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0}, |
|---|
| 72 | 73 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, |
|---|
| 73 | 74 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, |
|---|
| 74 | 75 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, |
|---|
| .. | .. |
|---|
| 77 | 78 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, |
|---|
| 78 | 79 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, |
|---|
| 79 | 80 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, |
|---|
| 81 | + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, |
|---|
| 82 | + {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0}, |
|---|
| 80 | 83 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0}, |
|---|
| 81 | 84 | {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0}, |
|---|
| 82 | 85 | /* required last entry */ |
|---|
| .. | .. |
|---|
| 91 | 94 | |
|---|
| 92 | 95 | MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); |
|---|
| 93 | 96 | MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); |
|---|
| 94 | | -MODULE_LICENSE("GPL"); |
|---|
| 95 | | -MODULE_VERSION(DRV_VERSION); |
|---|
| 97 | +MODULE_LICENSE("GPL v2"); |
|---|
| 96 | 98 | |
|---|
| 97 | 99 | static struct workqueue_struct *i40e_wq; |
|---|
| 98 | 100 | |
|---|
| .. | .. |
|---|
| 127 | 129 | struct i40e_pf *pf = (struct i40e_pf *)hw->back; |
|---|
| 128 | 130 | |
|---|
| 129 | 131 | mem->size = ALIGN(size, alignment); |
|---|
| 130 | | - mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, |
|---|
| 131 | | - &mem->pa, GFP_KERNEL); |
|---|
| 132 | + mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, |
|---|
| 133 | + GFP_KERNEL); |
|---|
| 132 | 134 | if (!mem->va) |
|---|
| 133 | 135 | return -ENOMEM; |
|---|
| 134 | 136 | |
|---|
| .. | .. |
|---|
| 302 | 304 | **/ |
|---|
| 303 | 305 | void i40e_service_event_schedule(struct i40e_pf *pf) |
|---|
| 304 | 306 | { |
|---|
| 305 | | - if (!test_bit(__I40E_DOWN, pf->state) && |
|---|
| 306 | | - !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) |
|---|
| 307 | + if ((!test_bit(__I40E_DOWN, pf->state) && |
|---|
| 308 | + !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || |
|---|
| 309 | + test_bit(__I40E_RECOVERY_MODE, pf->state)) |
|---|
| 307 | 310 | queue_work(i40e_wq, &pf->service_task); |
|---|
| 308 | 311 | } |
|---|
| 309 | 312 | |
|---|
| 310 | 313 | /** |
|---|
| 311 | 314 | * i40e_tx_timeout - Respond to a Tx Hang |
|---|
| 312 | 315 | * @netdev: network interface device structure |
|---|
| 316 | + * @txqueue: queue number timing out |
|---|
| 313 | 317 | * |
|---|
| 314 | 318 | * If any port has noticed a Tx timeout, it is likely that the whole |
|---|
| 315 | 319 | * device is munged, not just the one netdev port, so go for the full |
|---|
| 316 | 320 | * reset. |
|---|
| 317 | 321 | **/ |
|---|
| 318 | | -static void i40e_tx_timeout(struct net_device *netdev) |
|---|
| 322 | +static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
|---|
| 319 | 323 | { |
|---|
| 320 | 324 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
|---|
| 321 | 325 | struct i40e_vsi *vsi = np->vsi; |
|---|
| 322 | 326 | struct i40e_pf *pf = vsi->back; |
|---|
| 323 | 327 | struct i40e_ring *tx_ring = NULL; |
|---|
| 324 | | - unsigned int i, hung_queue = 0; |
|---|
| 328 | + unsigned int i; |
|---|
| 325 | 329 | u32 head, val; |
|---|
| 326 | 330 | |
|---|
| 327 | 331 | pf->tx_timeout_count++; |
|---|
| 328 | 332 | |
|---|
| 329 | | - /* find the stopped queue the same way the stack does */ |
|---|
| 330 | | - for (i = 0; i < netdev->num_tx_queues; i++) { |
|---|
| 331 | | - struct netdev_queue *q; |
|---|
| 332 | | - unsigned long trans_start; |
|---|
| 333 | | - |
|---|
| 334 | | - q = netdev_get_tx_queue(netdev, i); |
|---|
| 335 | | - trans_start = q->trans_start; |
|---|
| 336 | | - if (netif_xmit_stopped(q) && |
|---|
| 337 | | - time_after(jiffies, |
|---|
| 338 | | - (trans_start + netdev->watchdog_timeo))) { |
|---|
| 339 | | - hung_queue = i; |
|---|
| 340 | | - break; |
|---|
| 341 | | - } |
|---|
| 342 | | - } |
|---|
| 343 | | - |
|---|
| 344 | | - if (i == netdev->num_tx_queues) { |
|---|
| 345 | | - netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); |
|---|
| 346 | | - } else { |
|---|
| 347 | | - /* now that we have an index, find the tx_ring struct */ |
|---|
| 348 | | - for (i = 0; i < vsi->num_queue_pairs; i++) { |
|---|
| 349 | | - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { |
|---|
| 350 | | - if (hung_queue == |
|---|
| 351 | | - vsi->tx_rings[i]->queue_index) { |
|---|
| 352 | | - tx_ring = vsi->tx_rings[i]; |
|---|
| 353 | | - break; |
|---|
| 354 | | - } |
|---|
| 333 | + /* with txqueue index, find the tx_ring struct */ |
|---|
| 334 | + for (i = 0; i < vsi->num_queue_pairs; i++) { |
|---|
| 335 | + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { |
|---|
| 336 | + if (txqueue == |
|---|
| 337 | + vsi->tx_rings[i]->queue_index) { |
|---|
| 338 | + tx_ring = vsi->tx_rings[i]; |
|---|
| 339 | + break; |
|---|
| 355 | 340 | } |
|---|
| 356 | 341 | } |
|---|
| 357 | 342 | } |
|---|
| .. | .. |
|---|
| 377 | 362 | val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); |
|---|
| 378 | 363 | |
|---|
| 379 | 364 | netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", |
|---|
| 380 | | - vsi->seid, hung_queue, tx_ring->next_to_clean, |
|---|
| 365 | + vsi->seid, txqueue, tx_ring->next_to_clean, |
|---|
| 381 | 366 | head, tx_ring->next_to_use, |
|---|
| 382 | 367 | readl(tx_ring->tail), val); |
|---|
| 383 | 368 | } |
|---|
| 384 | 369 | |
|---|
| 385 | 370 | pf->tx_timeout_last_recovery = jiffies; |
|---|
| 386 | | - netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", |
|---|
| 387 | | - pf->tx_timeout_recovery_level, hung_queue); |
|---|
| 371 | + netdev_info(netdev, "tx_timeout recovery level %d, txqueue %d\n", |
|---|
| 372 | + pf->tx_timeout_recovery_level, txqueue); |
|---|
| 388 | 373 | |
|---|
| 389 | 374 | switch (pf->tx_timeout_recovery_level) { |
|---|
| 390 | 375 | case 1: |
|---|
| .. | .. |
|---|
| 397 | 382 | set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); |
|---|
| 398 | 383 | break; |
|---|
| 399 | 384 | default: |
|---|
| 400 | | - netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); |
|---|
| 385 | + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); |
|---|
| 386 | + set_bit(__I40E_DOWN_REQUESTED, pf->state); |
|---|
| 387 | + set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); |
|---|
| 401 | 388 | break; |
|---|
| 402 | 389 | } |
|---|
| 403 | 390 | |
|---|
| .. | .. |
|---|
| 553 | 540 | sizeof(pf->veb[i]->stats)); |
|---|
| 554 | 541 | memset(&pf->veb[i]->stats_offsets, 0, |
|---|
| 555 | 542 | sizeof(pf->veb[i]->stats_offsets)); |
|---|
| 543 | + memset(&pf->veb[i]->tc_stats, 0, |
|---|
| 544 | + sizeof(pf->veb[i]->tc_stats)); |
|---|
| 545 | + memset(&pf->veb[i]->tc_stats_offsets, 0, |
|---|
| 546 | + sizeof(pf->veb[i]->tc_stats_offsets)); |
|---|
| 556 | 547 | pf->veb[i]->stat_offsets_loaded = false; |
|---|
| 557 | 548 | } |
|---|
| 558 | 549 | } |
|---|
| 559 | 550 | pf->hw_csum_rx_error = 0; |
|---|
| 551 | +} |
|---|
| 552 | + |
|---|
| 553 | +/** |
|---|
| 554 | + * i40e_compute_pci_to_hw_id - compute index form PCI function. |
|---|
| 555 | + * @vsi: ptr to the VSI to read from. |
|---|
| 556 | + * @hw: ptr to the hardware info. |
|---|
| 557 | + **/ |
|---|
| 558 | +static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw) |
|---|
| 559 | +{ |
|---|
| 560 | + int pf_count = i40e_get_pf_count(hw); |
|---|
| 561 | + |
|---|
| 562 | + if (vsi->type == I40E_VSI_SRIOV) |
|---|
| 563 | + return (hw->port * BIT(7)) / pf_count + vsi->vf_id; |
|---|
| 564 | + |
|---|
| 565 | + return hw->port + BIT(7); |
|---|
| 566 | +} |
|---|
| 567 | + |
|---|
| 568 | +/** |
|---|
| 569 | + * i40e_stat_update64 - read and update a 64 bit stat from the chip. |
|---|
| 570 | + * @hw: ptr to the hardware info. |
|---|
| 571 | + * @hireg: the high 32 bit reg to read. |
|---|
| 572 | + * @loreg: the low 32 bit reg to read. |
|---|
| 573 | + * @offset_loaded: has the initial offset been loaded yet. |
|---|
| 574 | + * @offset: ptr to current offset value. |
|---|
| 575 | + * @stat: ptr to the stat. |
|---|
| 576 | + * |
|---|
| 577 | + * Since the device stats are not reset at PFReset, they will not |
|---|
| 578 | + * be zeroed when the driver starts. We'll save the first values read |
|---|
| 579 | + * and use them as offsets to be subtracted from the raw values in order |
|---|
| 580 | + * to report stats that count from zero. |
|---|
| 581 | + **/ |
|---|
| 582 | +static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg, |
|---|
| 583 | + bool offset_loaded, u64 *offset, u64 *stat) |
|---|
| 584 | +{ |
|---|
| 585 | + u64 new_data; |
|---|
| 586 | + |
|---|
| 587 | + new_data = rd64(hw, loreg); |
|---|
| 588 | + |
|---|
| 589 | + if (!offset_loaded || new_data < *offset) |
|---|
| 590 | + *offset = new_data; |
|---|
| 591 | + *stat = new_data - *offset; |
|---|
| 560 | 592 | } |
|---|
| 561 | 593 | |
|---|
| 562 | 594 | /** |
|---|
| .. | .. |
|---|
| 631 | 663 | } |
|---|
| 632 | 664 | |
|---|
| 633 | 665 | /** |
|---|
| 666 | + * i40e_stats_update_rx_discards - update rx_discards. |
|---|
| 667 | + * @vsi: ptr to the VSI to be updated. |
|---|
| 668 | + * @hw: ptr to the hardware info. |
|---|
| 669 | + * @stat_idx: VSI's stat_counter_idx. |
|---|
| 670 | + * @offset_loaded: ptr to the VSI's stat_offsets_loaded. |
|---|
| 671 | + * @stat_offset: ptr to stat_offset to store first read of specific register. |
|---|
| 672 | + * @stat: ptr to VSI's stat to be updated. |
|---|
| 673 | + **/ |
|---|
| 674 | +static void |
|---|
| 675 | +i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw, |
|---|
| 676 | + int stat_idx, bool offset_loaded, |
|---|
| 677 | + struct i40e_eth_stats *stat_offset, |
|---|
| 678 | + struct i40e_eth_stats *stat) |
|---|
| 679 | +{ |
|---|
| 680 | + u64 rx_rdpc, rx_rxerr; |
|---|
| 681 | + |
|---|
| 682 | + i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded, |
|---|
| 683 | + &stat_offset->rx_discards, &rx_rdpc); |
|---|
| 684 | + i40e_stat_update64(hw, |
|---|
| 685 | + I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)), |
|---|
| 686 | + I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)), |
|---|
| 687 | + offset_loaded, &stat_offset->rx_discards_other, |
|---|
| 688 | + &rx_rxerr); |
|---|
| 689 | + |
|---|
| 690 | + stat->rx_discards = rx_rdpc + rx_rxerr; |
|---|
| 691 | +} |
|---|
| 692 | + |
|---|
| 693 | +/** |
|---|
| 634 | 694 | * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. |
|---|
| 635 | 695 | * @vsi: the VSI to be updated |
|---|
| 636 | 696 | **/ |
|---|
| .. | .. |
|---|
| 655 | 715 | i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), |
|---|
| 656 | 716 | vsi->stat_offsets_loaded, |
|---|
| 657 | 717 | &oes->rx_unknown_protocol, &es->rx_unknown_protocol); |
|---|
| 658 | | - i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), |
|---|
| 659 | | - vsi->stat_offsets_loaded, |
|---|
| 660 | | - &oes->tx_errors, &es->tx_errors); |
|---|
| 661 | 718 | |
|---|
| 662 | 719 | i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), |
|---|
| 663 | 720 | I40E_GLV_GORCL(stat_idx), |
|---|
| .. | .. |
|---|
| 692 | 749 | I40E_GLV_BPTCL(stat_idx), |
|---|
| 693 | 750 | vsi->stat_offsets_loaded, |
|---|
| 694 | 751 | &oes->tx_broadcast, &es->tx_broadcast); |
|---|
| 752 | + |
|---|
| 753 | + i40e_stats_update_rx_discards(vsi, hw, stat_idx, |
|---|
| 754 | + vsi->stat_offsets_loaded, oes, es); |
|---|
| 755 | + |
|---|
| 695 | 756 | vsi->stat_offsets_loaded = true; |
|---|
| 696 | 757 | } |
|---|
| 697 | 758 | |
|---|
| .. | .. |
|---|
| 699 | 760 | * i40e_update_veb_stats - Update Switch component statistics |
|---|
| 700 | 761 | * @veb: the VEB being updated |
|---|
| 701 | 762 | **/ |
|---|
| 702 | | -static void i40e_update_veb_stats(struct i40e_veb *veb) |
|---|
| 763 | +void i40e_update_veb_stats(struct i40e_veb *veb) |
|---|
| 703 | 764 | { |
|---|
| 704 | 765 | struct i40e_pf *pf = veb->pf; |
|---|
| 705 | 766 | struct i40e_hw *hw = &pf->hw; |
|---|
| .. | .. |
|---|
| 852 | 913 | rx_p += packets; |
|---|
| 853 | 914 | rx_buf += p->rx_stats.alloc_buff_failed; |
|---|
| 854 | 915 | rx_page += p->rx_stats.alloc_page_failed; |
|---|
| 916 | + |
|---|
| 917 | + if (i40e_enabled_xdp_vsi(vsi)) { |
|---|
| 918 | + /* locate XDP ring */ |
|---|
| 919 | + p = READ_ONCE(vsi->xdp_rings[q]); |
|---|
| 920 | + if (!p) |
|---|
| 921 | + continue; |
|---|
| 922 | + |
|---|
| 923 | + do { |
|---|
| 924 | + start = u64_stats_fetch_begin_irq(&p->syncp); |
|---|
| 925 | + packets = p->stats.packets; |
|---|
| 926 | + bytes = p->stats.bytes; |
|---|
| 927 | + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); |
|---|
| 928 | + tx_b += bytes; |
|---|
| 929 | + tx_p += packets; |
|---|
| 930 | + tx_restart += p->tx_stats.restart_queue; |
|---|
| 931 | + tx_busy += p->tx_stats.tx_busy; |
|---|
| 932 | + tx_linearize += p->tx_stats.tx_linearize; |
|---|
| 933 | + tx_force_wb += p->tx_stats.tx_force_wb; |
|---|
| 934 | + } |
|---|
| 855 | 935 | } |
|---|
| 856 | 936 | rcu_read_unlock(); |
|---|
| 857 | 937 | vsi->tx_restart = tx_restart; |
|---|
| .. | .. |
|---|
| 1129 | 1209 | i40e_update_pf_stats(pf); |
|---|
| 1130 | 1210 | |
|---|
| 1131 | 1211 | i40e_update_vsi_stats(vsi); |
|---|
| 1212 | +} |
|---|
| 1213 | + |
|---|
| 1214 | +/** |
|---|
| 1215 | + * i40e_count_filters - counts VSI mac filters |
|---|
| 1216 | + * @vsi: the VSI to be searched |
|---|
| 1217 | + * |
|---|
| 1218 | + * Returns count of mac filters |
|---|
| 1219 | + **/ |
|---|
| 1220 | +int i40e_count_filters(struct i40e_vsi *vsi) |
|---|
| 1221 | +{ |
|---|
| 1222 | + struct i40e_mac_filter *f; |
|---|
| 1223 | + struct hlist_node *h; |
|---|
| 1224 | + int bkt; |
|---|
| 1225 | + int cnt = 0; |
|---|
| 1226 | + |
|---|
| 1227 | + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) |
|---|
| 1228 | + ++cnt; |
|---|
| 1229 | + |
|---|
| 1230 | + return cnt; |
|---|
| 1132 | 1231 | } |
|---|
| 1133 | 1232 | |
|---|
| 1134 | 1233 | /** |
|---|
| .. | .. |
|---|
| 1530 | 1629 | bool found = false; |
|---|
| 1531 | 1630 | int bkt; |
|---|
| 1532 | 1631 | |
|---|
| 1533 | | - WARN(!spin_is_locked(&vsi->mac_filter_hash_lock), |
|---|
| 1534 | | - "Missing mac_filter_hash_lock\n"); |
|---|
| 1632 | + lockdep_assert_held(&vsi->mac_filter_hash_lock); |
|---|
| 1535 | 1633 | hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { |
|---|
| 1536 | 1634 | if (ether_addr_equal(macaddr, f->macaddr)) { |
|---|
| 1537 | 1635 | __i40e_del_filter(vsi, f); |
|---|
| .. | .. |
|---|
| 1569 | 1667 | return 0; |
|---|
| 1570 | 1668 | } |
|---|
| 1571 | 1669 | |
|---|
| 1572 | | - if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || |
|---|
| 1573 | | - test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) |
|---|
| 1670 | + if (test_bit(__I40E_DOWN, pf->state) || |
|---|
| 1671 | + test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) |
|---|
| 1574 | 1672 | return -EADDRNOTAVAIL; |
|---|
| 1575 | 1673 | |
|---|
| 1576 | 1674 | if (ether_addr_equal(hw->mac.addr, addr->sa_data)) |
|---|
| .. | .. |
|---|
| 1594 | 1692 | if (vsi->type == I40E_VSI_MAIN) { |
|---|
| 1595 | 1693 | i40e_status ret; |
|---|
| 1596 | 1694 | |
|---|
| 1597 | | - ret = i40e_aq_mac_address_write(&vsi->back->hw, |
|---|
| 1598 | | - I40E_AQC_WRITE_TYPE_LAA_WOL, |
|---|
| 1695 | + ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, |
|---|
| 1599 | 1696 | addr->sa_data, NULL); |
|---|
| 1600 | 1697 | if (ret) |
|---|
| 1601 | 1698 | netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", |
|---|
| .. | .. |
|---|
| 1606 | 1703 | /* schedule our worker thread which will take care of |
|---|
| 1607 | 1704 | * applying the new filter changes |
|---|
| 1608 | 1705 | */ |
|---|
| 1609 | | - i40e_service_event_schedule(vsi->back); |
|---|
| 1706 | + i40e_service_event_schedule(pf); |
|---|
| 1610 | 1707 | return 0; |
|---|
| 1611 | 1708 | } |
|---|
| 1612 | 1709 | |
|---|
| .. | .. |
|---|
| 1614 | 1711 | * i40e_config_rss_aq - Prepare for RSS using AQ commands |
|---|
| 1615 | 1712 | * @vsi: vsi structure |
|---|
| 1616 | 1713 | * @seed: RSS hash seed |
|---|
| 1714 | + * @lut: pointer to lookup table of lut_size |
|---|
| 1715 | + * @lut_size: size of the lookup table |
|---|
| 1617 | 1716 | **/ |
|---|
| 1618 | 1717 | static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, |
|---|
| 1619 | 1718 | u8 *lut, u16 lut_size) |
|---|
| .. | .. |
|---|
| 1635 | 1734 | } |
|---|
| 1636 | 1735 | } |
|---|
| 1637 | 1736 | if (lut) { |
|---|
| 1638 | | - bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; |
|---|
| 1737 | + bool pf_lut = vsi->type == I40E_VSI_MAIN; |
|---|
| 1639 | 1738 | |
|---|
| 1640 | 1739 | ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); |
|---|
| 1641 | 1740 | if (ret) { |
|---|
| .. | .. |
|---|
| 1800 | 1899 | |
|---|
| 1801 | 1900 | sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; |
|---|
| 1802 | 1901 | offset = 0; |
|---|
| 1902 | + /* zero out queue mapping, it will get updated on the end of the function */ |
|---|
| 1903 | + memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); |
|---|
| 1803 | 1904 | |
|---|
| 1804 | 1905 | if (vsi->type == I40E_VSI_MAIN) { |
|---|
| 1805 | 1906 | /* This code helps add more queue to the VSI if we have |
|---|
| .. | .. |
|---|
| 1808 | 1909 | * non-zero req_queue_pairs says that user requested a new |
|---|
| 1809 | 1910 | * queue count via ethtool's set_channels, so use this |
|---|
| 1810 | 1911 | * value for queues distribution across traffic classes |
|---|
| 1912 | + * We need at least one queue pair for the interface |
|---|
| 1913 | + * to be usable as we see in else statement. |
|---|
| 1811 | 1914 | */ |
|---|
| 1812 | 1915 | if (vsi->req_queue_pairs > 0) |
|---|
| 1813 | 1916 | vsi->num_queue_pairs = vsi->req_queue_pairs; |
|---|
| 1814 | 1917 | else if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
|---|
| 1815 | 1918 | vsi->num_queue_pairs = pf->num_lan_msix; |
|---|
| 1919 | + else |
|---|
| 1920 | + vsi->num_queue_pairs = 1; |
|---|
| 1816 | 1921 | } |
|---|
| 1817 | 1922 | |
|---|
| 1818 | 1923 | /* Number of queues per enabled TC */ |
|---|
| 1819 | | - if (vsi->type == I40E_VSI_MAIN) |
|---|
| 1924 | + if (vsi->type == I40E_VSI_MAIN || |
|---|
| 1925 | + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) |
|---|
| 1820 | 1926 | num_tc_qps = vsi->num_queue_pairs; |
|---|
| 1821 | 1927 | else |
|---|
| 1822 | 1928 | num_tc_qps = vsi->alloc_queue_pairs; |
|---|
| 1929 | + |
|---|
| 1823 | 1930 | if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { |
|---|
| 1824 | 1931 | /* Find numtc from enabled TC bitmap */ |
|---|
| 1825 | 1932 | for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { |
|---|
| .. | .. |
|---|
| 1858 | 1965 | num_tc_qps); |
|---|
| 1859 | 1966 | break; |
|---|
| 1860 | 1967 | } |
|---|
| 1861 | | - /* fall through */ |
|---|
| 1968 | + fallthrough; |
|---|
| 1862 | 1969 | case I40E_VSI_FDIR: |
|---|
| 1863 | 1970 | case I40E_VSI_SRIOV: |
|---|
| 1864 | 1971 | case I40E_VSI_VMDQ2: |
|---|
| .. | .. |
|---|
| 1897 | 2004 | } |
|---|
| 1898 | 2005 | ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); |
|---|
| 1899 | 2006 | } |
|---|
| 1900 | | - /* Do not change previously set num_queue_pairs for PFs */ |
|---|
| 2007 | + /* Do not change previously set num_queue_pairs for PFs and VFs*/ |
|---|
| 1901 | 2008 | if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || |
|---|
| 1902 | | - vsi->type != I40E_VSI_MAIN) |
|---|
| 2009 | + (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || |
|---|
| 2010 | + (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) |
|---|
| 1903 | 2011 | vsi->num_queue_pairs = offset; |
|---|
| 2012 | + |
|---|
| 1904 | 2013 | /* Scheduler section valid can only be set for ADD VSI */ |
|---|
| 1905 | 2014 | if (is_add) { |
|---|
| 1906 | 2015 | sections |= I40E_AQ_VSI_PROP_SCHED_VALID; |
|---|
| .. | .. |
|---|
| 2154 | 2263 | fcnt = i40e_update_filter_state(num_add, list, add_head); |
|---|
| 2155 | 2264 | |
|---|
| 2156 | 2265 | if (fcnt != num_add) { |
|---|
| 2157 | | - set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); |
|---|
| 2158 | | - dev_warn(&vsi->back->pdev->dev, |
|---|
| 2159 | | - "Error %s adding RX filters on %s, promiscuous mode forced on\n", |
|---|
| 2160 | | - i40e_aq_str(hw, aq_err), |
|---|
| 2161 | | - vsi_name); |
|---|
| 2266 | + if (vsi->type == I40E_VSI_MAIN) { |
|---|
| 2267 | + set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); |
|---|
| 2268 | + dev_warn(&vsi->back->pdev->dev, |
|---|
| 2269 | + "Error %s adding RX filters on %s, promiscuous mode forced on\n", |
|---|
| 2270 | + i40e_aq_str(hw, aq_err), vsi_name); |
|---|
| 2271 | + } else if (vsi->type == I40E_VSI_SRIOV || |
|---|
| 2272 | + vsi->type == I40E_VSI_VMDQ1 || |
|---|
| 2273 | + vsi->type == I40E_VSI_VMDQ2) { |
|---|
| 2274 | + dev_warn(&vsi->back->pdev->dev, |
|---|
| 2275 | + "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n", |
|---|
| 2276 | + i40e_aq_str(hw, aq_err), vsi_name, vsi_name); |
|---|
| 2277 | + } else { |
|---|
| 2278 | + dev_warn(&vsi->back->pdev->dev, |
|---|
| 2279 | + "Error %s adding RX filters on %s, incorrect VSI type: %i.\n", |
|---|
| 2280 | + i40e_aq_str(hw, aq_err), vsi_name, vsi->type); |
|---|
| 2281 | + } |
|---|
| 2162 | 2282 | } |
|---|
| 2163 | 2283 | } |
|---|
| 2164 | 2284 | |
|---|
| .. | .. |
|---|
| 2565 | 2685 | vsi_name, |
|---|
| 2566 | 2686 | i40e_stat_str(hw, aq_ret), |
|---|
| 2567 | 2687 | i40e_aq_str(hw, hw->aq.asq_last_status)); |
|---|
| 2688 | + } else { |
|---|
| 2689 | + dev_info(&pf->pdev->dev, "%s allmulti mode.\n", |
|---|
| 2690 | + cur_multipromisc ? "entering" : "leaving"); |
|---|
| 2568 | 2691 | } |
|---|
| 2569 | 2692 | } |
|---|
| 2570 | 2693 | |
|---|
| .. | .. |
|---|
| 2665 | 2788 | struct i40e_pf *pf = vsi->back; |
|---|
| 2666 | 2789 | |
|---|
| 2667 | 2790 | if (i40e_enabled_xdp_vsi(vsi)) { |
|---|
| 2668 | | - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
|---|
| 2791 | + int frame_size = new_mtu + I40E_PACKET_HDR_PAD; |
|---|
| 2669 | 2792 | |
|---|
| 2670 | 2793 | if (frame_size > i40e_max_xdp_frame_size(vsi)) |
|---|
| 2671 | 2794 | return -EINVAL; |
|---|
| 2672 | 2795 | } |
|---|
| 2673 | 2796 | |
|---|
| 2674 | | - netdev_info(netdev, "changing MTU from %d to %d\n", |
|---|
| 2675 | | - netdev->mtu, new_mtu); |
|---|
| 2797 | + netdev_dbg(netdev, "changing MTU from %d to %d\n", |
|---|
| 2798 | + netdev->mtu, new_mtu); |
|---|
| 2676 | 2799 | netdev->mtu = new_mtu; |
|---|
| 2677 | 2800 | if (netif_running(netdev)) |
|---|
| 2678 | 2801 | i40e_vsi_reinit_locked(vsi); |
|---|
| .. | .. |
|---|
| 3014 | 3137 | **/ |
|---|
| 3015 | 3138 | void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) |
|---|
| 3016 | 3139 | { |
|---|
| 3017 | | - i40e_vlan_stripping_disable(vsi); |
|---|
| 3018 | | - |
|---|
| 3019 | 3140 | vsi->info.pvid = 0; |
|---|
| 3141 | + |
|---|
| 3142 | + i40e_vlan_stripping_disable(vsi); |
|---|
| 3020 | 3143 | } |
|---|
| 3021 | 3144 | |
|---|
| 3022 | 3145 | /** |
|---|
| .. | .. |
|---|
| 3129 | 3252 | } |
|---|
| 3130 | 3253 | |
|---|
| 3131 | 3254 | /** |
|---|
| 3255 | + * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled |
|---|
| 3256 | + * @ring: The Tx or Rx ring |
|---|
| 3257 | + * |
|---|
| 3258 | + * Returns the AF_XDP buffer pool or NULL. |
|---|
| 3259 | + **/ |
|---|
| 3260 | +static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring) |
|---|
| 3261 | +{ |
|---|
| 3262 | + bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); |
|---|
| 3263 | + int qid = ring->queue_index; |
|---|
| 3264 | + |
|---|
| 3265 | + if (ring_is_xdp(ring)) |
|---|
| 3266 | + qid -= ring->vsi->alloc_queue_pairs; |
|---|
| 3267 | + |
|---|
| 3268 | + if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) |
|---|
| 3269 | + return NULL; |
|---|
| 3270 | + |
|---|
| 3271 | + return xsk_get_pool_from_qid(ring->vsi->netdev, qid); |
|---|
| 3272 | +} |
|---|
| 3273 | + |
|---|
| 3274 | +/** |
|---|
| 3132 | 3275 | * i40e_configure_tx_ring - Configure a transmit ring context and rest |
|---|
| 3133 | 3276 | * @ring: The Tx ring to configure |
|---|
| 3134 | 3277 | * |
|---|
| .. | .. |
|---|
| 3142 | 3285 | struct i40e_hmc_obj_txq tx_ctx; |
|---|
| 3143 | 3286 | i40e_status err = 0; |
|---|
| 3144 | 3287 | u32 qtx_ctl = 0; |
|---|
| 3288 | + |
|---|
| 3289 | + if (ring_is_xdp(ring)) |
|---|
| 3290 | + ring->xsk_pool = i40e_xsk_pool(ring); |
|---|
| 3145 | 3291 | |
|---|
| 3146 | 3292 | /* some ATR related tx ring init */ |
|---|
| 3147 | 3293 | if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { |
|---|
| .. | .. |
|---|
| 3252 | 3398 | struct i40e_hw *hw = &vsi->back->hw; |
|---|
| 3253 | 3399 | struct i40e_hmc_obj_rxq rx_ctx; |
|---|
| 3254 | 3400 | i40e_status err = 0; |
|---|
| 3401 | + bool ok; |
|---|
| 3402 | + int ret; |
|---|
| 3255 | 3403 | |
|---|
| 3256 | 3404 | bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); |
|---|
| 3257 | 3405 | |
|---|
| 3258 | 3406 | /* clear the context structure first */ |
|---|
| 3259 | 3407 | memset(&rx_ctx, 0, sizeof(rx_ctx)); |
|---|
| 3260 | 3408 | |
|---|
| 3261 | | - ring->rx_buf_len = vsi->rx_buf_len; |
|---|
| 3409 | + if (ring->vsi->type == I40E_VSI_MAIN) |
|---|
| 3410 | + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); |
|---|
| 3411 | + |
|---|
| 3412 | + ring->xsk_pool = i40e_xsk_pool(ring); |
|---|
| 3413 | + if (ring->xsk_pool) { |
|---|
| 3414 | + ring->rx_buf_len = |
|---|
| 3415 | + xsk_pool_get_rx_frame_size(ring->xsk_pool); |
|---|
| 3416 | + /* For AF_XDP ZC, we disallow packets to span on |
|---|
| 3417 | + * multiple buffers, thus letting us skip that |
|---|
| 3418 | + * handling in the fast-path. |
|---|
| 3419 | + */ |
|---|
| 3420 | + chain_len = 1; |
|---|
| 3421 | + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
|---|
| 3422 | + MEM_TYPE_XSK_BUFF_POOL, |
|---|
| 3423 | + NULL); |
|---|
| 3424 | + if (ret) |
|---|
| 3425 | + return ret; |
|---|
| 3426 | + dev_info(&vsi->back->pdev->dev, |
|---|
| 3427 | + "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", |
|---|
| 3428 | + ring->queue_index); |
|---|
| 3429 | + |
|---|
| 3430 | + } else { |
|---|
| 3431 | + ring->rx_buf_len = vsi->rx_buf_len; |
|---|
| 3432 | + if (ring->vsi->type == I40E_VSI_MAIN) { |
|---|
| 3433 | + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
|---|
| 3434 | + MEM_TYPE_PAGE_SHARED, |
|---|
| 3435 | + NULL); |
|---|
| 3436 | + if (ret) |
|---|
| 3437 | + return ret; |
|---|
| 3438 | + } |
|---|
| 3439 | + } |
|---|
| 3262 | 3440 | |
|---|
| 3263 | 3441 | rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, |
|---|
| 3264 | 3442 | BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); |
|---|
| .. | .. |
|---|
| 3266 | 3444 | rx_ctx.base = (ring->dma / 128); |
|---|
| 3267 | 3445 | rx_ctx.qlen = ring->count; |
|---|
| 3268 | 3446 | |
|---|
| 3269 | | - /* use 32 byte descriptors */ |
|---|
| 3270 | | - rx_ctx.dsize = 1; |
|---|
| 3447 | + /* use 16 byte descriptors */ |
|---|
| 3448 | + rx_ctx.dsize = 0; |
|---|
| 3271 | 3449 | |
|---|
| 3272 | 3450 | /* descriptor type is always zero |
|---|
| 3273 | 3451 | * rx_ctx.dtype = 0; |
|---|
| .. | .. |
|---|
| 3314 | 3492 | ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); |
|---|
| 3315 | 3493 | writel(0, ring->tail); |
|---|
| 3316 | 3494 | |
|---|
| 3317 | | - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); |
|---|
| 3495 | + if (ring->xsk_pool) { |
|---|
| 3496 | + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); |
|---|
| 3497 | + ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)); |
|---|
| 3498 | + } else { |
|---|
| 3499 | + ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); |
|---|
| 3500 | + } |
|---|
| 3501 | + if (!ok) { |
|---|
| 3502 | + /* Log this in case the user has forgotten to give the kernel |
|---|
| 3503 | + * any buffers, even later in the application. |
|---|
| 3504 | + */ |
|---|
| 3505 | + dev_info(&vsi->back->pdev->dev, |
|---|
| 3506 | + "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", |
|---|
| 3507 | + ring->xsk_pool ? "AF_XDP ZC enabled " : "", |
|---|
| 3508 | + ring->queue_index, pf_q); |
|---|
| 3509 | + } |
|---|
| 3318 | 3510 | |
|---|
| 3319 | 3511 | return 0; |
|---|
| 3320 | 3512 | } |
|---|
| .. | .. |
|---|
| 3333 | 3525 | for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) |
|---|
| 3334 | 3526 | err = i40e_configure_tx_ring(vsi->tx_rings[i]); |
|---|
| 3335 | 3527 | |
|---|
| 3336 | | - if (!i40e_enabled_xdp_vsi(vsi)) |
|---|
| 3528 | + if (err || !i40e_enabled_xdp_vsi(vsi)) |
|---|
| 3337 | 3529 | return err; |
|---|
| 3338 | 3530 | |
|---|
| 3339 | 3531 | for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) |
|---|
| .. | .. |
|---|
| 3631 | 3823 | (I40E_QUEUE_TYPE_TX |
|---|
| 3632 | 3824 | << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); |
|---|
| 3633 | 3825 | |
|---|
| 3634 | | - wr32(hw, I40E_QINT_TQCTL(nextqp), val); |
|---|
| 3826 | + wr32(hw, I40E_QINT_TQCTL(nextqp), val); |
|---|
| 3635 | 3827 | } |
|---|
| 3636 | 3828 | |
|---|
| 3637 | 3829 | val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | |
|---|
| .. | .. |
|---|
| 4006 | 4198 | enable_intr: |
|---|
| 4007 | 4199 | /* re-enable interrupt causes */ |
|---|
| 4008 | 4200 | wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); |
|---|
| 4009 | | - if (!test_bit(__I40E_DOWN, pf->state)) { |
|---|
| 4201 | + if (!test_bit(__I40E_DOWN, pf->state) || |
|---|
| 4202 | + test_bit(__I40E_RECOVERY_MODE, pf->state)) { |
|---|
| 4010 | 4203 | i40e_service_event_schedule(pf); |
|---|
| 4011 | 4204 | i40e_irq_dynamic_enable_icr0(pf); |
|---|
| 4012 | 4205 | } |
|---|
| .. | .. |
|---|
| 5321 | 5514 | } |
|---|
| 5322 | 5515 | |
|---|
| 5323 | 5516 | /** |
|---|
| 5517 | + * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI |
|---|
| 5518 | + * @vsi: the VSI being reconfigured |
|---|
| 5519 | + * @vsi_offset: offset from main VF VSI |
|---|
| 5520 | + */ |
|---|
| 5521 | +int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset) |
|---|
| 5522 | +{ |
|---|
| 5523 | + struct i40e_vsi_context ctxt = {}; |
|---|
| 5524 | + struct i40e_pf *pf; |
|---|
| 5525 | + struct i40e_hw *hw; |
|---|
| 5526 | + int ret; |
|---|
| 5527 | + |
|---|
| 5528 | + if (!vsi) |
|---|
| 5529 | + return I40E_ERR_PARAM; |
|---|
| 5530 | + pf = vsi->back; |
|---|
| 5531 | + hw = &pf->hw; |
|---|
| 5532 | + |
|---|
| 5533 | + ctxt.seid = vsi->seid; |
|---|
| 5534 | + ctxt.pf_num = hw->pf_id; |
|---|
| 5535 | + ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; |
|---|
| 5536 | + ctxt.uplink_seid = vsi->uplink_seid; |
|---|
| 5537 | + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
|---|
| 5538 | + ctxt.flags = I40E_AQ_VSI_TYPE_VF; |
|---|
| 5539 | + ctxt.info = vsi->info; |
|---|
| 5540 | + |
|---|
| 5541 | + i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, |
|---|
| 5542 | + false); |
|---|
| 5543 | + if (vsi->reconfig_rss) { |
|---|
| 5544 | + vsi->rss_size = min_t(int, pf->alloc_rss_size, |
|---|
| 5545 | + vsi->num_queue_pairs); |
|---|
| 5546 | + ret = i40e_vsi_config_rss(vsi); |
|---|
| 5547 | + if (ret) { |
|---|
| 5548 | + dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); |
|---|
| 5549 | + return ret; |
|---|
| 5550 | + } |
|---|
| 5551 | + vsi->reconfig_rss = false; |
|---|
| 5552 | + } |
|---|
| 5553 | + |
|---|
| 5554 | + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); |
|---|
| 5555 | + if (ret) { |
|---|
| 5556 | + dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n", |
|---|
| 5557 | + i40e_stat_str(hw, ret), |
|---|
| 5558 | + i40e_aq_str(hw, hw->aq.asq_last_status)); |
|---|
| 5559 | + return ret; |
|---|
| 5560 | + } |
|---|
| 5561 | + /* update the local VSI info with updated queue map */ |
|---|
| 5562 | + i40e_vsi_update_queue_map(vsi, &ctxt); |
|---|
| 5563 | + vsi->info.valid_sections = 0; |
|---|
| 5564 | + |
|---|
| 5565 | + return ret; |
|---|
| 5566 | +} |
|---|
| 5567 | + |
|---|
| 5568 | +/** |
|---|
| 5324 | 5569 | * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map |
|---|
| 5325 | 5570 | * @vsi: VSI to be configured |
|---|
| 5326 | 5571 | * @enabled_tc: TC bitmap |
|---|
| .. | .. |
|---|
| 5482 | 5727 | } |
|---|
| 5483 | 5728 | |
|---|
| 5484 | 5729 | /** |
|---|
| 5730 | + * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits |
|---|
| 5731 | + * @vsi: Pointer to vsi structure |
|---|
| 5732 | + * @max_tx_rate: max TX rate in bytes to be converted into Mbits |
|---|
| 5733 | + * |
|---|
| 5734 | + * Helper function to convert units before send to set BW limit |
|---|
| 5735 | + **/ |
|---|
| 5736 | +static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate) |
|---|
| 5737 | +{ |
|---|
| 5738 | + if (max_tx_rate < I40E_BW_MBPS_DIVISOR) { |
|---|
| 5739 | + dev_warn(&vsi->back->pdev->dev, |
|---|
| 5740 | + "Setting max tx rate to minimum usable value of 50Mbps.\n"); |
|---|
| 5741 | + max_tx_rate = I40E_BW_CREDIT_DIVISOR; |
|---|
| 5742 | + } else { |
|---|
| 5743 | + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); |
|---|
| 5744 | + } |
|---|
| 5745 | + |
|---|
| 5746 | + return max_tx_rate; |
|---|
| 5747 | +} |
|---|
| 5748 | + |
|---|
| 5749 | +/** |
|---|
| 5485 | 5750 | * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate |
|---|
| 5486 | 5751 | * @vsi: VSI to be configured |
|---|
| 5487 | 5752 | * @seid: seid of the channel/VSI |
|---|
| .. | .. |
|---|
| 5503 | 5768 | max_tx_rate, seid); |
|---|
| 5504 | 5769 | return -EINVAL; |
|---|
| 5505 | 5770 | } |
|---|
| 5506 | | - if (max_tx_rate && max_tx_rate < 50) { |
|---|
| 5771 | + if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) { |
|---|
| 5507 | 5772 | dev_warn(&pf->pdev->dev, |
|---|
| 5508 | 5773 | "Setting max tx rate to minimum usable value of 50Mbps.\n"); |
|---|
| 5509 | | - max_tx_rate = 50; |
|---|
| 5774 | + max_tx_rate = I40E_BW_CREDIT_DIVISOR; |
|---|
| 5510 | 5775 | } |
|---|
| 5511 | 5776 | |
|---|
| 5512 | 5777 | /* Tx rate credits are in values of 50Mbps, 0 is disabled */ |
|---|
| .. | .. |
|---|
| 5608 | 5873 | kfree(ch); |
|---|
| 5609 | 5874 | } |
|---|
| 5610 | 5875 | INIT_LIST_HEAD(&vsi->ch_list); |
|---|
| 5611 | | -} |
|---|
| 5612 | | - |
|---|
| 5613 | | -/** |
|---|
| 5614 | | - * i40e_is_any_channel - channel exist or not |
|---|
| 5615 | | - * @vsi: ptr to VSI to which channels are associated with |
|---|
| 5616 | | - * |
|---|
| 5617 | | - * Returns true or false if channel(s) exist for associated VSI or not |
|---|
| 5618 | | - **/ |
|---|
| 5619 | | -static bool i40e_is_any_channel(struct i40e_vsi *vsi) |
|---|
| 5620 | | -{ |
|---|
| 5621 | | - struct i40e_channel *ch, *ch_tmp; |
|---|
| 5622 | | - |
|---|
| 5623 | | - list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { |
|---|
| 5624 | | - if (ch->initialized) |
|---|
| 5625 | | - return true; |
|---|
| 5626 | | - } |
|---|
| 5627 | | - |
|---|
| 5628 | | - return false; |
|---|
| 5629 | 5876 | } |
|---|
| 5630 | 5877 | |
|---|
| 5631 | 5878 | /** |
|---|
| .. | .. |
|---|
| 5764 | 6011 | /** |
|---|
| 5765 | 6012 | * i40e_channel_setup_queue_map - Setup a channel queue map |
|---|
| 5766 | 6013 | * @pf: ptr to PF device |
|---|
| 5767 | | - * @vsi: the VSI being setup |
|---|
| 5768 | 6014 | * @ctxt: VSI context structure |
|---|
| 5769 | 6015 | * @ch: ptr to channel structure |
|---|
| 5770 | 6016 | * |
|---|
| .. | .. |
|---|
| 5852 | 6098 | return -ENOENT; |
|---|
| 5853 | 6099 | } |
|---|
| 5854 | 6100 | |
|---|
| 5855 | | - /* Success, update channel */ |
|---|
| 5856 | | - ch->enabled_tc = enabled_tc; |
|---|
| 6101 | + /* Success, update channel, set enabled_tc only if the channel |
|---|
| 6102 | + * is not a macvlan |
|---|
| 6103 | + */ |
|---|
| 6104 | + ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; |
|---|
| 5857 | 6105 | ch->seid = ctxt.seid; |
|---|
| 5858 | 6106 | ch->vsi_number = ctxt.vsi_number; |
|---|
| 5859 | | - ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); |
|---|
| 6107 | + ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); |
|---|
| 5860 | 6108 | |
|---|
| 5861 | 6109 | /* copy just the sections touched not the entire info |
|---|
| 5862 | 6110 | * since not all sections are valid as returned by |
|---|
| .. | .. |
|---|
| 6005 | 6253 | /** |
|---|
| 6006 | 6254 | * i40e_setup_channel - setup new channel using uplink element |
|---|
| 6007 | 6255 | * @pf: ptr to PF device |
|---|
| 6008 | | - * @type: type of channel to be created (VMDq2/VF) |
|---|
| 6009 | | - * @uplink_seid: underlying HW switching element (VEB) ID |
|---|
| 6256 | + * @vsi: pointer to the VSI to set up the channel within |
|---|
| 6010 | 6257 | * @ch: ptr to channel structure |
|---|
| 6011 | 6258 | * |
|---|
| 6012 | 6259 | * Setup new channel (VSI) based on specified type (VMDq2/VF) |
|---|
| .. | .. |
|---|
| 6133 | 6380 | /* By default we are in VEPA mode, if this is the first VF/VMDq |
|---|
| 6134 | 6381 | * VSI to be added switch to VEB mode. |
|---|
| 6135 | 6382 | */ |
|---|
| 6136 | | - if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || |
|---|
| 6137 | | - (!i40e_is_any_channel(vsi))) { |
|---|
| 6138 | | - if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { |
|---|
| 6139 | | - dev_dbg(&pf->pdev->dev, |
|---|
| 6140 | | - "Failed to create channel. Override queues (%u) not power of 2\n", |
|---|
| 6141 | | - vsi->tc_config.tc_info[0].qcount); |
|---|
| 6142 | | - return -EINVAL; |
|---|
| 6143 | | - } |
|---|
| 6144 | 6383 | |
|---|
| 6145 | | - if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { |
|---|
| 6146 | | - pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; |
|---|
| 6384 | + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { |
|---|
| 6385 | + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; |
|---|
| 6147 | 6386 | |
|---|
| 6148 | | - if (vsi->type == I40E_VSI_MAIN) { |
|---|
| 6149 | | - if (pf->flags & I40E_FLAG_TC_MQPRIO) |
|---|
| 6150 | | - i40e_do_reset(pf, I40E_PF_RESET_FLAG, |
|---|
| 6151 | | - true); |
|---|
| 6152 | | - else |
|---|
| 6153 | | - i40e_do_reset_safe(pf, |
|---|
| 6154 | | - I40E_PF_RESET_FLAG); |
|---|
| 6155 | | - } |
|---|
| 6387 | + if (vsi->type == I40E_VSI_MAIN) { |
|---|
| 6388 | + if (pf->flags & I40E_FLAG_TC_MQPRIO) |
|---|
| 6389 | + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); |
|---|
| 6390 | + else |
|---|
| 6391 | + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); |
|---|
| 6156 | 6392 | } |
|---|
| 6157 | 6393 | /* now onwards for main VSI, number of queues will be value |
|---|
| 6158 | 6394 | * of TC0's queue count |
|---|
| .. | .. |
|---|
| 6416 | 6652 | * Also do not enable DCBx if FW LLDP agent is disabled |
|---|
| 6417 | 6653 | */ |
|---|
| 6418 | 6654 | if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) || |
|---|
| 6419 | | - (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) |
|---|
| 6655 | + (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) { |
|---|
| 6656 | + dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n"); |
|---|
| 6657 | + err = I40E_NOT_SUPPORTED; |
|---|
| 6420 | 6658 | goto out; |
|---|
| 6659 | + } |
|---|
| 6421 | 6660 | |
|---|
| 6422 | | - /* Get the initial DCB configuration */ |
|---|
| 6423 | | - err = i40e_init_dcb(hw); |
|---|
| 6661 | + err = i40e_init_dcb(hw, true); |
|---|
| 6424 | 6662 | if (!err) { |
|---|
| 6425 | 6663 | /* Device/Function is not DCBX capable */ |
|---|
| 6426 | 6664 | if ((!hw->func_caps.dcb) || |
|---|
| .. | .. |
|---|
| 6457 | 6695 | return err; |
|---|
| 6458 | 6696 | } |
|---|
| 6459 | 6697 | #endif /* CONFIG_I40E_DCB */ |
|---|
| 6460 | | -#define SPEED_SIZE 14 |
|---|
| 6461 | | -#define FC_SIZE 8 |
|---|
| 6698 | + |
|---|
| 6462 | 6699 | /** |
|---|
| 6463 | 6700 | * i40e_print_link_message - print link up or down |
|---|
| 6464 | 6701 | * @vsi: the VSI for which link needs a message |
|---|
| .. | .. |
|---|
| 6474 | 6711 | char *req_fec = ""; |
|---|
| 6475 | 6712 | char *an = ""; |
|---|
| 6476 | 6713 | |
|---|
| 6477 | | - new_speed = pf->hw.phy.link_info.link_speed; |
|---|
| 6714 | + if (isup) |
|---|
| 6715 | + new_speed = pf->hw.phy.link_info.link_speed; |
|---|
| 6716 | + else |
|---|
| 6717 | + new_speed = I40E_LINK_SPEED_UNKNOWN; |
|---|
| 6478 | 6718 | |
|---|
| 6479 | 6719 | if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) |
|---|
| 6480 | 6720 | return; |
|---|
| .. | .. |
|---|
| 6507 | 6747 | case I40E_LINK_SPEED_10GB: |
|---|
| 6508 | 6748 | speed = "10 G"; |
|---|
| 6509 | 6749 | break; |
|---|
| 6750 | + case I40E_LINK_SPEED_5GB: |
|---|
| 6751 | + speed = "5 G"; |
|---|
| 6752 | + break; |
|---|
| 6753 | + case I40E_LINK_SPEED_2_5GB: |
|---|
| 6754 | + speed = "2.5 G"; |
|---|
| 6755 | + break; |
|---|
| 6510 | 6756 | case I40E_LINK_SPEED_1GB: |
|---|
| 6511 | 6757 | speed = "1000 M"; |
|---|
| 6512 | 6758 | break; |
|---|
| .. | .. |
|---|
| 6533 | 6779 | } |
|---|
| 6534 | 6780 | |
|---|
| 6535 | 6781 | if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { |
|---|
| 6536 | | - req_fec = ", Requested FEC: None"; |
|---|
| 6537 | | - fec = ", FEC: None"; |
|---|
| 6538 | | - an = ", Autoneg: False"; |
|---|
| 6782 | + req_fec = "None"; |
|---|
| 6783 | + fec = "None"; |
|---|
| 6784 | + an = "False"; |
|---|
| 6539 | 6785 | |
|---|
| 6540 | 6786 | if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) |
|---|
| 6541 | | - an = ", Autoneg: True"; |
|---|
| 6787 | + an = "True"; |
|---|
| 6542 | 6788 | |
|---|
| 6543 | 6789 | if (pf->hw.phy.link_info.fec_info & |
|---|
| 6544 | 6790 | I40E_AQ_CONFIG_FEC_KR_ENA) |
|---|
| 6545 | | - fec = ", FEC: CL74 FC-FEC/BASE-R"; |
|---|
| 6791 | + fec = "CL74 FC-FEC/BASE-R"; |
|---|
| 6546 | 6792 | else if (pf->hw.phy.link_info.fec_info & |
|---|
| 6547 | 6793 | I40E_AQ_CONFIG_FEC_RS_ENA) |
|---|
| 6548 | | - fec = ", FEC: CL108 RS-FEC"; |
|---|
| 6794 | + fec = "CL108 RS-FEC"; |
|---|
| 6549 | 6795 | |
|---|
| 6550 | 6796 | /* 'CL108 RS-FEC' should be displayed when RS is requested, or |
|---|
| 6551 | 6797 | * both RS and FC are requested |
|---|
| .. | .. |
|---|
| 6554 | 6800 | (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { |
|---|
| 6555 | 6801 | if (vsi->back->hw.phy.link_info.req_fec_info & |
|---|
| 6556 | 6802 | I40E_AQ_REQUEST_FEC_RS) |
|---|
| 6557 | | - req_fec = ", Requested FEC: CL108 RS-FEC"; |
|---|
| 6803 | + req_fec = "CL108 RS-FEC"; |
|---|
| 6558 | 6804 | else |
|---|
| 6559 | | - req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; |
|---|
| 6805 | + req_fec = "CL74 FC-FEC/BASE-R"; |
|---|
| 6560 | 6806 | } |
|---|
| 6807 | + netdev_info(vsi->netdev, |
|---|
| 6808 | + "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", |
|---|
| 6809 | + speed, req_fec, fec, an, fc); |
|---|
| 6810 | + } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { |
|---|
| 6811 | + req_fec = "None"; |
|---|
| 6812 | + fec = "None"; |
|---|
| 6813 | + an = "False"; |
|---|
| 6814 | + |
|---|
| 6815 | + if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) |
|---|
| 6816 | + an = "True"; |
|---|
| 6817 | + |
|---|
| 6818 | + if (pf->hw.phy.link_info.fec_info & |
|---|
| 6819 | + I40E_AQ_CONFIG_FEC_KR_ENA) |
|---|
| 6820 | + fec = "CL74 FC-FEC/BASE-R"; |
|---|
| 6821 | + |
|---|
| 6822 | + if (pf->hw.phy.link_info.req_fec_info & |
|---|
| 6823 | + I40E_AQ_REQUEST_FEC_KR) |
|---|
| 6824 | + req_fec = "CL74 FC-FEC/BASE-R"; |
|---|
| 6825 | + |
|---|
| 6826 | + netdev_info(vsi->netdev, |
|---|
| 6827 | + "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", |
|---|
| 6828 | + speed, req_fec, fec, an, fc); |
|---|
| 6829 | + } else { |
|---|
| 6830 | + netdev_info(vsi->netdev, |
|---|
| 6831 | + "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n", |
|---|
| 6832 | + speed, fc); |
|---|
| 6561 | 6833 | } |
|---|
| 6562 | 6834 | |
|---|
| 6563 | | - netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", |
|---|
| 6564 | | - speed, req_fec, fec, an, fc); |
|---|
| 6565 | 6835 | } |
|---|
| 6566 | 6836 | |
|---|
| 6567 | 6837 | /** |
|---|
| .. | .. |
|---|
| 6622 | 6892 | { |
|---|
| 6623 | 6893 | struct i40e_pf *pf = vsi->back; |
|---|
| 6624 | 6894 | |
|---|
| 6625 | | - WARN_ON(in_interrupt()); |
|---|
| 6626 | 6895 | while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) |
|---|
| 6627 | 6896 | usleep_range(1000, 2000); |
|---|
| 6628 | 6897 | i40e_down(vsi); |
|---|
| 6629 | 6898 | |
|---|
| 6630 | 6899 | i40e_up(vsi); |
|---|
| 6631 | 6900 | clear_bit(__I40E_CONFIG_BUSY, pf->state); |
|---|
| 6632 | | -} |
|---|
| 6633 | | - |
|---|
| 6634 | | -/** |
|---|
| 6635 | | - * i40e_up - Bring the connection back up after being down |
|---|
| 6636 | | - * @vsi: the VSI being configured |
|---|
| 6637 | | - **/ |
|---|
| 6638 | | -int i40e_up(struct i40e_vsi *vsi) |
|---|
| 6639 | | -{ |
|---|
| 6640 | | - int err; |
|---|
| 6641 | | - |
|---|
| 6642 | | - err = i40e_vsi_configure(vsi); |
|---|
| 6643 | | - if (!err) |
|---|
| 6644 | | - err = i40e_up_complete(vsi); |
|---|
| 6645 | | - |
|---|
| 6646 | | - return err; |
|---|
| 6647 | 6901 | } |
|---|
| 6648 | 6902 | |
|---|
| 6649 | 6903 | /** |
|---|
| .. | .. |
|---|
| 6655 | 6909 | { |
|---|
| 6656 | 6910 | struct i40e_aq_get_phy_abilities_resp abilities; |
|---|
| 6657 | 6911 | struct i40e_aq_set_phy_config config = {0}; |
|---|
| 6912 | + bool non_zero_phy_type = is_up; |
|---|
| 6658 | 6913 | struct i40e_hw *hw = &pf->hw; |
|---|
| 6659 | 6914 | i40e_status err; |
|---|
| 6660 | 6915 | u64 mask; |
|---|
| .. | .. |
|---|
| 6690 | 6945 | |
|---|
| 6691 | 6946 | /* If link needs to go up, but was not forced to go down, |
|---|
| 6692 | 6947 | * and its speed values are OK, no need for a flap |
|---|
| 6948 | + * if non_zero_phy_type was set, still need to force up |
|---|
| 6693 | 6949 | */ |
|---|
| 6694 | | - if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) |
|---|
| 6950 | + if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) |
|---|
| 6951 | + non_zero_phy_type = true; |
|---|
| 6952 | + else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) |
|---|
| 6695 | 6953 | return I40E_SUCCESS; |
|---|
| 6696 | 6954 | |
|---|
| 6697 | 6955 | /* To force link we need to set bits for all supported PHY types, |
|---|
| .. | .. |
|---|
| 6699 | 6957 | * across two fields. |
|---|
| 6700 | 6958 | */ |
|---|
| 6701 | 6959 | mask = I40E_PHY_TYPES_BITMASK; |
|---|
| 6702 | | - config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; |
|---|
| 6703 | | - config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; |
|---|
| 6960 | + config.phy_type = |
|---|
| 6961 | + non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; |
|---|
| 6962 | + config.phy_type_ext = |
|---|
| 6963 | + non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; |
|---|
| 6704 | 6964 | /* Copy the old settings, except of phy_type */ |
|---|
| 6705 | 6965 | config.abilities = abilities.abilities; |
|---|
| 6966 | + if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { |
|---|
| 6967 | + if (is_up) |
|---|
| 6968 | + config.abilities |= I40E_AQ_PHY_ENABLE_LINK; |
|---|
| 6969 | + else |
|---|
| 6970 | + config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK); |
|---|
| 6971 | + } |
|---|
| 6706 | 6972 | if (abilities.link_speed != 0) |
|---|
| 6707 | 6973 | config.link_speed = abilities.link_speed; |
|---|
| 6708 | 6974 | else |
|---|
| .. | .. |
|---|
| 6733 | 6999 | i40e_update_link_info(hw); |
|---|
| 6734 | 7000 | } |
|---|
| 6735 | 7001 | |
|---|
| 6736 | | - i40e_aq_set_link_restart_an(hw, true, NULL); |
|---|
| 7002 | + i40e_aq_set_link_restart_an(hw, is_up, NULL); |
|---|
| 6737 | 7003 | |
|---|
| 6738 | 7004 | return I40E_SUCCESS; |
|---|
| 7005 | +} |
|---|
| 7006 | + |
|---|
| 7007 | +/** |
|---|
| 7008 | + * i40e_up - Bring the connection back up after being down |
|---|
| 7009 | + * @vsi: the VSI being configured |
|---|
| 7010 | + **/ |
|---|
| 7011 | +int i40e_up(struct i40e_vsi *vsi) |
|---|
| 7012 | +{ |
|---|
| 7013 | + int err; |
|---|
| 7014 | + |
|---|
| 7015 | + if (vsi->type == I40E_VSI_MAIN && |
|---|
| 7016 | + (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || |
|---|
| 7017 | + vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) |
|---|
| 7018 | + i40e_force_link_state(vsi->back, true); |
|---|
| 7019 | + |
|---|
| 7020 | + err = i40e_vsi_configure(vsi); |
|---|
| 7021 | + if (!err) |
|---|
| 7022 | + err = i40e_up_complete(vsi); |
|---|
| 7023 | + |
|---|
| 7024 | + return err; |
|---|
| 6739 | 7025 | } |
|---|
| 6740 | 7026 | |
|---|
| 6741 | 7027 | /** |
|---|
| .. | .. |
|---|
| 6756 | 7042 | i40e_vsi_disable_irq(vsi); |
|---|
| 6757 | 7043 | i40e_vsi_stop_rings(vsi); |
|---|
| 6758 | 7044 | if (vsi->type == I40E_VSI_MAIN && |
|---|
| 6759 | | - vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) |
|---|
| 7045 | + (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || |
|---|
| 7046 | + vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) |
|---|
| 6760 | 7047 | i40e_force_link_state(vsi->back, false); |
|---|
| 6761 | 7048 | i40e_napi_disable_all(vsi); |
|---|
| 6762 | 7049 | |
|---|
| 6763 | 7050 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
|---|
| 6764 | 7051 | i40e_clean_tx_ring(vsi->tx_rings[i]); |
|---|
| 6765 | | - if (i40e_enabled_xdp_vsi(vsi)) |
|---|
| 7052 | + if (i40e_enabled_xdp_vsi(vsi)) { |
|---|
| 7053 | + /* Make sure that in-progress ndo_xdp_xmit and |
|---|
| 7054 | + * ndo_xsk_wakeup calls are completed. |
|---|
| 7055 | + */ |
|---|
| 7056 | + synchronize_rcu(); |
|---|
| 6766 | 7057 | i40e_clean_tx_ring(vsi->xdp_rings[i]); |
|---|
| 7058 | + } |
|---|
| 6767 | 7059 | i40e_clean_rx_ring(vsi->rx_rings[i]); |
|---|
| 6768 | 7060 | } |
|---|
| 6769 | 7061 | |
|---|
| .. | .. |
|---|
| 6841 | 7133 | else |
|---|
| 6842 | 7134 | vsi->tc_config.tc_info[i].qcount = 1; |
|---|
| 6843 | 7135 | vsi->tc_config.tc_info[i].netdev_tc = 0; |
|---|
| 7136 | + } |
|---|
| 7137 | +} |
|---|
| 7138 | + |
|---|
| 7139 | +/** |
|---|
| 7140 | + * i40e_del_macvlan_filter |
|---|
| 7141 | + * @hw: pointer to the HW structure |
|---|
| 7142 | + * @seid: seid of the channel VSI |
|---|
| 7143 | + * @macaddr: the mac address to apply as a filter |
|---|
| 7144 | + * @aq_err: store the admin Q error |
|---|
| 7145 | + * |
|---|
| 7146 | + * This function deletes a mac filter on the channel VSI which serves as the |
|---|
| 7147 | + * macvlan. Returns 0 on success. |
|---|
| 7148 | + **/ |
|---|
| 7149 | +static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid, |
|---|
| 7150 | + const u8 *macaddr, int *aq_err) |
|---|
| 7151 | +{ |
|---|
| 7152 | + struct i40e_aqc_remove_macvlan_element_data element; |
|---|
| 7153 | + i40e_status status; |
|---|
| 7154 | + |
|---|
| 7155 | + memset(&element, 0, sizeof(element)); |
|---|
| 7156 | + ether_addr_copy(element.mac_addr, macaddr); |
|---|
| 7157 | + element.vlan_tag = 0; |
|---|
| 7158 | + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; |
|---|
| 7159 | + status = i40e_aq_remove_macvlan(hw, seid, &element, 1, NULL); |
|---|
| 7160 | + *aq_err = hw->aq.asq_last_status; |
|---|
| 7161 | + |
|---|
| 7162 | + return status; |
|---|
| 7163 | +} |
|---|
| 7164 | + |
|---|
| 7165 | +/** |
|---|
| 7166 | + * i40e_add_macvlan_filter |
|---|
| 7167 | + * @hw: pointer to the HW structure |
|---|
| 7168 | + * @seid: seid of the channel VSI |
|---|
| 7169 | + * @macaddr: the mac address to apply as a filter |
|---|
| 7170 | + * @aq_err: store the admin Q error |
|---|
| 7171 | + * |
|---|
| 7172 | + * This function adds a mac filter on the channel VSI which serves as the |
|---|
| 7173 | + * macvlan. Returns 0 on success. |
|---|
| 7174 | + **/ |
|---|
| 7175 | +static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid, |
|---|
| 7176 | + const u8 *macaddr, int *aq_err) |
|---|
| 7177 | +{ |
|---|
| 7178 | + struct i40e_aqc_add_macvlan_element_data element; |
|---|
| 7179 | + i40e_status status; |
|---|
| 7180 | + u16 cmd_flags = 0; |
|---|
| 7181 | + |
|---|
| 7182 | + ether_addr_copy(element.mac_addr, macaddr); |
|---|
| 7183 | + element.vlan_tag = 0; |
|---|
| 7184 | + element.queue_number = 0; |
|---|
| 7185 | + element.match_method = I40E_AQC_MM_ERR_NO_RES; |
|---|
| 7186 | + cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; |
|---|
| 7187 | + element.flags = cpu_to_le16(cmd_flags); |
|---|
| 7188 | + status = i40e_aq_add_macvlan(hw, seid, &element, 1, NULL); |
|---|
| 7189 | + *aq_err = hw->aq.asq_last_status; |
|---|
| 7190 | + |
|---|
| 7191 | + return status; |
|---|
| 7192 | +} |
|---|
| 7193 | + |
|---|
| 7194 | +/** |
|---|
| 7195 | + * i40e_reset_ch_rings - Reset the queue contexts in a channel |
|---|
| 7196 | + * @vsi: the VSI we want to access |
|---|
| 7197 | + * @ch: the channel we want to access |
|---|
| 7198 | + */ |
|---|
| 7199 | +static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch) |
|---|
| 7200 | +{ |
|---|
| 7201 | + struct i40e_ring *tx_ring, *rx_ring; |
|---|
| 7202 | + u16 pf_q; |
|---|
| 7203 | + int i; |
|---|
| 7204 | + |
|---|
| 7205 | + for (i = 0; i < ch->num_queue_pairs; i++) { |
|---|
| 7206 | + pf_q = ch->base_queue + i; |
|---|
| 7207 | + tx_ring = vsi->tx_rings[pf_q]; |
|---|
| 7208 | + tx_ring->ch = NULL; |
|---|
| 7209 | + rx_ring = vsi->rx_rings[pf_q]; |
|---|
| 7210 | + rx_ring->ch = NULL; |
|---|
| 7211 | + } |
|---|
| 7212 | +} |
|---|
| 7213 | + |
|---|
| 7214 | +/** |
|---|
| 7215 | + * i40e_free_macvlan_channels |
|---|
| 7216 | + * @vsi: the VSI we want to access |
|---|
| 7217 | + * |
|---|
| 7218 | + * This function frees the Qs of the channel VSI from |
|---|
| 7219 | + * the stack and also deletes the channel VSIs which |
|---|
| 7220 | + * serve as macvlans. |
|---|
| 7221 | + */ |
|---|
| 7222 | +static void i40e_free_macvlan_channels(struct i40e_vsi *vsi) |
|---|
| 7223 | +{ |
|---|
| 7224 | + struct i40e_channel *ch, *ch_tmp; |
|---|
| 7225 | + int ret; |
|---|
| 7226 | + |
|---|
| 7227 | + if (list_empty(&vsi->macvlan_list)) |
|---|
| 7228 | + return; |
|---|
| 7229 | + |
|---|
| 7230 | + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { |
|---|
| 7231 | + struct i40e_vsi *parent_vsi; |
|---|
| 7232 | + |
|---|
| 7233 | + if (i40e_is_channel_macvlan(ch)) { |
|---|
| 7234 | + i40e_reset_ch_rings(vsi, ch); |
|---|
| 7235 | + clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); |
|---|
| 7236 | + netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); |
|---|
| 7237 | + netdev_set_sb_channel(ch->fwd->netdev, 0); |
|---|
| 7238 | + kfree(ch->fwd); |
|---|
| 7239 | + ch->fwd = NULL; |
|---|
| 7240 | + } |
|---|
| 7241 | + |
|---|
| 7242 | + list_del(&ch->list); |
|---|
| 7243 | + parent_vsi = ch->parent_vsi; |
|---|
| 7244 | + if (!parent_vsi || !ch->initialized) { |
|---|
| 7245 | + kfree(ch); |
|---|
| 7246 | + continue; |
|---|
| 7247 | + } |
|---|
| 7248 | + |
|---|
| 7249 | + /* remove the VSI */ |
|---|
| 7250 | + ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, |
|---|
| 7251 | + NULL); |
|---|
| 7252 | + if (ret) |
|---|
| 7253 | + dev_err(&vsi->back->pdev->dev, |
|---|
| 7254 | + "unable to remove channel (%d) for parent VSI(%d)\n", |
|---|
| 7255 | + ch->seid, parent_vsi->seid); |
|---|
| 7256 | + kfree(ch); |
|---|
| 7257 | + } |
|---|
| 7258 | + vsi->macvlan_cnt = 0; |
|---|
| 7259 | +} |
|---|
| 7260 | + |
|---|
| 7261 | +/** |
|---|
| 7262 | + * i40e_fwd_ring_up - bring the macvlan device up |
|---|
| 7263 | + * @vsi: the VSI we want to access |
|---|
| 7264 | + * @vdev: macvlan netdevice |
|---|
| 7265 | + * @fwd: the private fwd structure |
|---|
| 7266 | + */ |
|---|
| 7267 | +static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, |
|---|
| 7268 | + struct i40e_fwd_adapter *fwd) |
|---|
| 7269 | +{ |
|---|
| 7270 | + struct i40e_channel *ch = NULL, *ch_tmp, *iter; |
|---|
| 7271 | + int ret = 0, num_tc = 1, i, aq_err; |
|---|
| 7272 | + struct i40e_pf *pf = vsi->back; |
|---|
| 7273 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 7274 | + |
|---|
| 7275 | + /* Go through the list and find an available channel */ |
|---|
| 7276 | + list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { |
|---|
| 7277 | + if (!i40e_is_channel_macvlan(iter)) { |
|---|
| 7278 | + iter->fwd = fwd; |
|---|
| 7279 | + /* record configuration for macvlan interface in vdev */ |
|---|
| 7280 | + for (i = 0; i < num_tc; i++) |
|---|
| 7281 | + netdev_bind_sb_channel_queue(vsi->netdev, vdev, |
|---|
| 7282 | + i, |
|---|
| 7283 | + iter->num_queue_pairs, |
|---|
| 7284 | + iter->base_queue); |
|---|
| 7285 | + for (i = 0; i < iter->num_queue_pairs; i++) { |
|---|
| 7286 | + struct i40e_ring *tx_ring, *rx_ring; |
|---|
| 7287 | + u16 pf_q; |
|---|
| 7288 | + |
|---|
| 7289 | + pf_q = iter->base_queue + i; |
|---|
| 7290 | + |
|---|
| 7291 | + /* Get to TX ring ptr */ |
|---|
| 7292 | + tx_ring = vsi->tx_rings[pf_q]; |
|---|
| 7293 | + tx_ring->ch = iter; |
|---|
| 7294 | + |
|---|
| 7295 | + /* Get the RX ring ptr */ |
|---|
| 7296 | + rx_ring = vsi->rx_rings[pf_q]; |
|---|
| 7297 | + rx_ring->ch = iter; |
|---|
| 7298 | + } |
|---|
| 7299 | + ch = iter; |
|---|
| 7300 | + break; |
|---|
| 7301 | + } |
|---|
| 7302 | + } |
|---|
| 7303 | + |
|---|
| 7304 | + if (!ch) |
|---|
| 7305 | + return -EINVAL; |
|---|
| 7306 | + |
|---|
| 7307 | + /* Guarantee all rings are updated before we update the |
|---|
| 7308 | + * MAC address filter. |
|---|
| 7309 | + */ |
|---|
| 7310 | + wmb(); |
|---|
| 7311 | + |
|---|
| 7312 | + /* Add a mac filter */ |
|---|
| 7313 | + ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); |
|---|
| 7314 | + if (ret) { |
|---|
| 7315 | + /* if we cannot add the MAC rule then disable the offload */ |
|---|
| 7316 | + macvlan_release_l2fw_offload(vdev); |
|---|
| 7317 | + for (i = 0; i < ch->num_queue_pairs; i++) { |
|---|
| 7318 | + struct i40e_ring *rx_ring; |
|---|
| 7319 | + u16 pf_q; |
|---|
| 7320 | + |
|---|
| 7321 | + pf_q = ch->base_queue + i; |
|---|
| 7322 | + rx_ring = vsi->rx_rings[pf_q]; |
|---|
| 7323 | + rx_ring->netdev = NULL; |
|---|
| 7324 | + } |
|---|
| 7325 | + dev_info(&pf->pdev->dev, |
|---|
| 7326 | + "Error adding mac filter on macvlan err %s, aq_err %s\n", |
|---|
| 7327 | + i40e_stat_str(hw, ret), |
|---|
| 7328 | + i40e_aq_str(hw, aq_err)); |
|---|
| 7329 | + netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n"); |
|---|
| 7330 | + } |
|---|
| 7331 | + |
|---|
| 7332 | + return ret; |
|---|
| 7333 | +} |
|---|
| 7334 | + |
|---|
| 7335 | +/** |
|---|
| 7336 | + * i40e_setup_macvlans - create the channels which will be macvlans |
|---|
| 7337 | + * @vsi: the VSI we want to access |
|---|
| 7338 | + * @macvlan_cnt: no. of macvlans to be setup |
|---|
| 7339 | + * @qcnt: no. of Qs per macvlan |
|---|
| 7340 | + * @vdev: macvlan netdevice |
|---|
| 7341 | + */ |
|---|
| 7342 | +static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt, |
|---|
| 7343 | + struct net_device *vdev) |
|---|
| 7344 | +{ |
|---|
| 7345 | + struct i40e_pf *pf = vsi->back; |
|---|
| 7346 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 7347 | + struct i40e_vsi_context ctxt; |
|---|
| 7348 | + u16 sections, qmap, num_qps; |
|---|
| 7349 | + struct i40e_channel *ch; |
|---|
| 7350 | + int i, pow, ret = 0; |
|---|
| 7351 | + u8 offset = 0; |
|---|
| 7352 | + |
|---|
| 7353 | + if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) |
|---|
| 7354 | + return -EINVAL; |
|---|
| 7355 | + |
|---|
| 7356 | + num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); |
|---|
| 7357 | + |
|---|
| 7358 | + /* find the next higher power-of-2 of num queue pairs */ |
|---|
| 7359 | + pow = fls(roundup_pow_of_two(num_qps) - 1); |
|---|
| 7360 | + |
|---|
| 7361 | + qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | |
|---|
| 7362 | + (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); |
|---|
| 7363 | + |
|---|
| 7364 | + /* Setup context bits for the main VSI */ |
|---|
| 7365 | + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; |
|---|
| 7366 | + sections |= I40E_AQ_VSI_PROP_SCHED_VALID; |
|---|
| 7367 | + memset(&ctxt, 0, sizeof(ctxt)); |
|---|
| 7368 | + ctxt.seid = vsi->seid; |
|---|
| 7369 | + ctxt.pf_num = vsi->back->hw.pf_id; |
|---|
| 7370 | + ctxt.vf_num = 0; |
|---|
| 7371 | + ctxt.uplink_seid = vsi->uplink_seid; |
|---|
| 7372 | + ctxt.info = vsi->info; |
|---|
| 7373 | + ctxt.info.tc_mapping[0] = cpu_to_le16(qmap); |
|---|
| 7374 | + ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); |
|---|
| 7375 | + ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); |
|---|
| 7376 | + ctxt.info.valid_sections |= cpu_to_le16(sections); |
|---|
| 7377 | + |
|---|
| 7378 | + /* Reconfigure RSS for main VSI with new max queue count */ |
|---|
| 7379 | + vsi->rss_size = max_t(u16, num_qps, qcnt); |
|---|
| 7380 | + ret = i40e_vsi_config_rss(vsi); |
|---|
| 7381 | + if (ret) { |
|---|
| 7382 | + dev_info(&pf->pdev->dev, |
|---|
| 7383 | + "Failed to reconfig RSS for num_queues (%u)\n", |
|---|
| 7384 | + vsi->rss_size); |
|---|
| 7385 | + return ret; |
|---|
| 7386 | + } |
|---|
| 7387 | + vsi->reconfig_rss = true; |
|---|
| 7388 | + dev_dbg(&vsi->back->pdev->dev, |
|---|
| 7389 | + "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); |
|---|
| 7390 | + vsi->next_base_queue = num_qps; |
|---|
| 7391 | + vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; |
|---|
| 7392 | + |
|---|
| 7393 | + /* Update the VSI after updating the VSI queue-mapping |
|---|
| 7394 | + * information |
|---|
| 7395 | + */ |
|---|
| 7396 | + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); |
|---|
| 7397 | + if (ret) { |
|---|
| 7398 | + dev_info(&pf->pdev->dev, |
|---|
| 7399 | + "Update vsi tc config failed, err %s aq_err %s\n", |
|---|
| 7400 | + i40e_stat_str(hw, ret), |
|---|
| 7401 | + i40e_aq_str(hw, hw->aq.asq_last_status)); |
|---|
| 7402 | + return ret; |
|---|
| 7403 | + } |
|---|
| 7404 | + /* update the local VSI info with updated queue map */ |
|---|
| 7405 | + i40e_vsi_update_queue_map(vsi, &ctxt); |
|---|
| 7406 | + vsi->info.valid_sections = 0; |
|---|
| 7407 | + |
|---|
| 7408 | + /* Create channels for macvlans */ |
|---|
| 7409 | + INIT_LIST_HEAD(&vsi->macvlan_list); |
|---|
| 7410 | + for (i = 0; i < macvlan_cnt; i++) { |
|---|
| 7411 | + ch = kzalloc(sizeof(*ch), GFP_KERNEL); |
|---|
| 7412 | + if (!ch) { |
|---|
| 7413 | + ret = -ENOMEM; |
|---|
| 7414 | + goto err_free; |
|---|
| 7415 | + } |
|---|
| 7416 | + INIT_LIST_HEAD(&ch->list); |
|---|
| 7417 | + ch->num_queue_pairs = qcnt; |
|---|
| 7418 | + if (!i40e_setup_channel(pf, vsi, ch)) { |
|---|
| 7419 | + ret = -EINVAL; |
|---|
| 7420 | + kfree(ch); |
|---|
| 7421 | + goto err_free; |
|---|
| 7422 | + } |
|---|
| 7423 | + ch->parent_vsi = vsi; |
|---|
| 7424 | + vsi->cnt_q_avail -= ch->num_queue_pairs; |
|---|
| 7425 | + vsi->macvlan_cnt++; |
|---|
| 7426 | + list_add_tail(&ch->list, &vsi->macvlan_list); |
|---|
| 7427 | + } |
|---|
| 7428 | + |
|---|
| 7429 | + return ret; |
|---|
| 7430 | + |
|---|
| 7431 | +err_free: |
|---|
| 7432 | + dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); |
|---|
| 7433 | + i40e_free_macvlan_channels(vsi); |
|---|
| 7434 | + |
|---|
| 7435 | + return ret; |
|---|
| 7436 | +} |
|---|
| 7437 | + |
|---|
| 7438 | +/** |
|---|
| 7439 | + * i40e_fwd_add - configure macvlans |
|---|
| 7440 | + * @netdev: net device to configure |
|---|
| 7441 | + * @vdev: macvlan netdevice |
|---|
| 7442 | + **/ |
|---|
| 7443 | +static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev) |
|---|
| 7444 | +{ |
|---|
| 7445 | + struct i40e_netdev_priv *np = netdev_priv(netdev); |
|---|
| 7446 | + u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors; |
|---|
| 7447 | + struct i40e_vsi *vsi = np->vsi; |
|---|
| 7448 | + struct i40e_pf *pf = vsi->back; |
|---|
| 7449 | + struct i40e_fwd_adapter *fwd; |
|---|
| 7450 | + int avail_macvlan, ret; |
|---|
| 7451 | + |
|---|
| 7452 | + if ((pf->flags & I40E_FLAG_DCB_ENABLED)) { |
|---|
| 7453 | + netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n"); |
|---|
| 7454 | + return ERR_PTR(-EINVAL); |
|---|
| 7455 | + } |
|---|
| 7456 | + if ((pf->flags & I40E_FLAG_TC_MQPRIO)) { |
|---|
| 7457 | + netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n"); |
|---|
| 7458 | + return ERR_PTR(-EINVAL); |
|---|
| 7459 | + } |
|---|
| 7460 | + if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { |
|---|
| 7461 | + netdev_info(netdev, "Not enough vectors available to support macvlans\n"); |
|---|
| 7462 | + return ERR_PTR(-EINVAL); |
|---|
| 7463 | + } |
|---|
| 7464 | + |
|---|
| 7465 | + /* The macvlan device has to be a single Q device so that the |
|---|
| 7466 | + * tc_to_txq field can be reused to pick the tx queue. |
|---|
| 7467 | + */ |
|---|
| 7468 | + if (netif_is_multiqueue(vdev)) |
|---|
| 7469 | + return ERR_PTR(-ERANGE); |
|---|
| 7470 | + |
|---|
| 7471 | + if (!vsi->macvlan_cnt) { |
|---|
| 7472 | + /* reserve bit 0 for the pf device */ |
|---|
| 7473 | + set_bit(0, vsi->fwd_bitmask); |
|---|
| 7474 | + |
|---|
| 7475 | + /* Try to reserve as many queues as possible for macvlans. First |
|---|
| 7476 | + * reserve 3/4th of max vectors, then half, then quarter and |
|---|
| 7477 | + * calculate Qs per macvlan as you go |
|---|
| 7478 | + */ |
|---|
| 7479 | + vectors = pf->num_lan_msix; |
|---|
| 7480 | + if (vectors <= I40E_MAX_MACVLANS && vectors > 64) { |
|---|
| 7481 | + /* allocate 4 Qs per macvlan and 32 Qs to the PF*/ |
|---|
| 7482 | + q_per_macvlan = 4; |
|---|
| 7483 | + macvlan_cnt = (vectors - 32) / 4; |
|---|
| 7484 | + } else if (vectors <= 64 && vectors > 32) { |
|---|
| 7485 | + /* allocate 2 Qs per macvlan and 16 Qs to the PF*/ |
|---|
| 7486 | + q_per_macvlan = 2; |
|---|
| 7487 | + macvlan_cnt = (vectors - 16) / 2; |
|---|
| 7488 | + } else if (vectors <= 32 && vectors > 16) { |
|---|
| 7489 | + /* allocate 1 Q per macvlan and 16 Qs to the PF*/ |
|---|
| 7490 | + q_per_macvlan = 1; |
|---|
| 7491 | + macvlan_cnt = vectors - 16; |
|---|
| 7492 | + } else if (vectors <= 16 && vectors > 8) { |
|---|
| 7493 | + /* allocate 1 Q per macvlan and 8 Qs to the PF */ |
|---|
| 7494 | + q_per_macvlan = 1; |
|---|
| 7495 | + macvlan_cnt = vectors - 8; |
|---|
| 7496 | + } else { |
|---|
| 7497 | + /* allocate 1 Q per macvlan and 1 Q to the PF */ |
|---|
| 7498 | + q_per_macvlan = 1; |
|---|
| 7499 | + macvlan_cnt = vectors - 1; |
|---|
| 7500 | + } |
|---|
| 7501 | + |
|---|
| 7502 | + if (macvlan_cnt == 0) |
|---|
| 7503 | + return ERR_PTR(-EBUSY); |
|---|
| 7504 | + |
|---|
| 7505 | + /* Quiesce VSI queues */ |
|---|
| 7506 | + i40e_quiesce_vsi(vsi); |
|---|
| 7507 | + |
|---|
| 7508 | + /* sets up the macvlans but does not "enable" them */ |
|---|
| 7509 | + ret = i40e_setup_macvlans(vsi, macvlan_cnt, q_per_macvlan, |
|---|
| 7510 | + vdev); |
|---|
| 7511 | + if (ret) |
|---|
| 7512 | + return ERR_PTR(ret); |
|---|
| 7513 | + |
|---|
| 7514 | + /* Unquiesce VSI */ |
|---|
| 7515 | + i40e_unquiesce_vsi(vsi); |
|---|
| 7516 | + } |
|---|
| 7517 | + avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, |
|---|
| 7518 | + vsi->macvlan_cnt); |
|---|
| 7519 | + if (avail_macvlan >= I40E_MAX_MACVLANS) |
|---|
| 7520 | + return ERR_PTR(-EBUSY); |
|---|
| 7521 | + |
|---|
| 7522 | + /* create the fwd struct */ |
|---|
| 7523 | + fwd = kzalloc(sizeof(*fwd), GFP_KERNEL); |
|---|
| 7524 | + if (!fwd) |
|---|
| 7525 | + return ERR_PTR(-ENOMEM); |
|---|
| 7526 | + |
|---|
| 7527 | + set_bit(avail_macvlan, vsi->fwd_bitmask); |
|---|
| 7528 | + fwd->bit_no = avail_macvlan; |
|---|
| 7529 | + netdev_set_sb_channel(vdev, avail_macvlan); |
|---|
| 7530 | + fwd->netdev = vdev; |
|---|
| 7531 | + |
|---|
| 7532 | + if (!netif_running(netdev)) |
|---|
| 7533 | + return fwd; |
|---|
| 7534 | + |
|---|
| 7535 | + /* Set fwd ring up */ |
|---|
| 7536 | + ret = i40e_fwd_ring_up(vsi, vdev, fwd); |
|---|
| 7537 | + if (ret) { |
|---|
| 7538 | + /* unbind the queues and drop the subordinate channel config */ |
|---|
| 7539 | + netdev_unbind_sb_channel(netdev, vdev); |
|---|
| 7540 | + netdev_set_sb_channel(vdev, 0); |
|---|
| 7541 | + |
|---|
| 7542 | + kfree(fwd); |
|---|
| 7543 | + return ERR_PTR(-EINVAL); |
|---|
| 7544 | + } |
|---|
| 7545 | + |
|---|
| 7546 | + return fwd; |
|---|
| 7547 | +} |
|---|
| 7548 | + |
|---|
| 7549 | +/** |
|---|
| 7550 | + * i40e_del_all_macvlans - Delete all the mac filters on the channels |
|---|
| 7551 | + * @vsi: the VSI we want to access |
|---|
| 7552 | + */ |
|---|
| 7553 | +static void i40e_del_all_macvlans(struct i40e_vsi *vsi) |
|---|
| 7554 | +{ |
|---|
| 7555 | + struct i40e_channel *ch, *ch_tmp; |
|---|
| 7556 | + struct i40e_pf *pf = vsi->back; |
|---|
| 7557 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 7558 | + int aq_err, ret = 0; |
|---|
| 7559 | + |
|---|
| 7560 | + if (list_empty(&vsi->macvlan_list)) |
|---|
| 7561 | + return; |
|---|
| 7562 | + |
|---|
| 7563 | + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { |
|---|
| 7564 | + if (i40e_is_channel_macvlan(ch)) { |
|---|
| 7565 | + ret = i40e_del_macvlan_filter(hw, ch->seid, |
|---|
| 7566 | + i40e_channel_mac(ch), |
|---|
| 7567 | + &aq_err); |
|---|
| 7568 | + if (!ret) { |
|---|
| 7569 | + /* Reset queue contexts */ |
|---|
| 7570 | + i40e_reset_ch_rings(vsi, ch); |
|---|
| 7571 | + clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); |
|---|
| 7572 | + netdev_unbind_sb_channel(vsi->netdev, |
|---|
| 7573 | + ch->fwd->netdev); |
|---|
| 7574 | + netdev_set_sb_channel(ch->fwd->netdev, 0); |
|---|
| 7575 | + kfree(ch->fwd); |
|---|
| 7576 | + ch->fwd = NULL; |
|---|
| 7577 | + } |
|---|
| 7578 | + } |
|---|
| 7579 | + } |
|---|
| 7580 | +} |
|---|
| 7581 | + |
|---|
| 7582 | +/** |
|---|
| 7583 | + * i40e_fwd_del - delete macvlan interfaces |
|---|
| 7584 | + * @netdev: net device to configure |
|---|
| 7585 | + * @vdev: macvlan netdevice |
|---|
| 7586 | + */ |
|---|
| 7587 | +static void i40e_fwd_del(struct net_device *netdev, void *vdev) |
|---|
| 7588 | +{ |
|---|
| 7589 | + struct i40e_netdev_priv *np = netdev_priv(netdev); |
|---|
| 7590 | + struct i40e_fwd_adapter *fwd = vdev; |
|---|
| 7591 | + struct i40e_channel *ch, *ch_tmp; |
|---|
| 7592 | + struct i40e_vsi *vsi = np->vsi; |
|---|
| 7593 | + struct i40e_pf *pf = vsi->back; |
|---|
| 7594 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 7595 | + int aq_err, ret = 0; |
|---|
| 7596 | + |
|---|
| 7597 | + /* Find the channel associated with the macvlan and del mac filter */ |
|---|
| 7598 | + list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { |
|---|
| 7599 | + if (i40e_is_channel_macvlan(ch) && |
|---|
| 7600 | + ether_addr_equal(i40e_channel_mac(ch), |
|---|
| 7601 | + fwd->netdev->dev_addr)) { |
|---|
| 7602 | + ret = i40e_del_macvlan_filter(hw, ch->seid, |
|---|
| 7603 | + i40e_channel_mac(ch), |
|---|
| 7604 | + &aq_err); |
|---|
| 7605 | + if (!ret) { |
|---|
| 7606 | + /* Reset queue contexts */ |
|---|
| 7607 | + i40e_reset_ch_rings(vsi, ch); |
|---|
| 7608 | + clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); |
|---|
| 7609 | + netdev_unbind_sb_channel(netdev, fwd->netdev); |
|---|
| 7610 | + netdev_set_sb_channel(fwd->netdev, 0); |
|---|
| 7611 | + kfree(ch->fwd); |
|---|
| 7612 | + ch->fwd = NULL; |
|---|
| 7613 | + } else { |
|---|
| 7614 | + dev_info(&pf->pdev->dev, |
|---|
| 7615 | + "Error deleting mac filter on macvlan err %s, aq_err %s\n", |
|---|
| 7616 | + i40e_stat_str(hw, ret), |
|---|
| 7617 | + i40e_aq_str(hw, aq_err)); |
|---|
| 7618 | + } |
|---|
| 7619 | + break; |
|---|
| 7620 | + } |
|---|
| 6844 | 7621 | } |
|---|
| 6845 | 7622 | } |
|---|
| 6846 | 7623 | |
|---|
| .. | .. |
|---|
| 6939 | 7716 | vsi->seid); |
|---|
| 6940 | 7717 | need_reset = true; |
|---|
| 6941 | 7718 | goto exit; |
|---|
| 7719 | + } else if (enabled_tc && |
|---|
| 7720 | + (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { |
|---|
| 7721 | + netdev_info(netdev, |
|---|
| 7722 | + "Failed to create channel. Override queues (%u) not power of 2\n", |
|---|
| 7723 | + vsi->tc_config.tc_info[0].qcount); |
|---|
| 7724 | + ret = -EINVAL; |
|---|
| 7725 | + need_reset = true; |
|---|
| 7726 | + goto exit; |
|---|
| 6942 | 7727 | } |
|---|
| 7728 | + |
|---|
| 7729 | + dev_info(&vsi->back->pdev->dev, |
|---|
| 7730 | + "Setup channel (id:%u) utilizing num_queues %d\n", |
|---|
| 7731 | + vsi->seid, vsi->tc_config.tc_info[0].qcount); |
|---|
| 6943 | 7732 | |
|---|
| 6944 | 7733 | if (pf->flags & I40E_FLAG_TC_MQPRIO) { |
|---|
| 6945 | 7734 | if (vsi->mqprio_qopt.max_rate[0]) { |
|---|
| 6946 | | - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; |
|---|
| 7735 | + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, |
|---|
| 7736 | + vsi->mqprio_qopt.max_rate[0]); |
|---|
| 6947 | 7737 | |
|---|
| 6948 | | - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); |
|---|
| 6949 | 7738 | ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); |
|---|
| 6950 | 7739 | if (!ret) { |
|---|
| 6951 | 7740 | u64 credits = max_tx_rate; |
|---|
| .. | .. |
|---|
| 6994 | 7783 | i40e_set_cld_element(struct i40e_cloud_filter *filter, |
|---|
| 6995 | 7784 | struct i40e_aqc_cloud_filters_element_data *cld) |
|---|
| 6996 | 7785 | { |
|---|
| 6997 | | - int i, j; |
|---|
| 6998 | 7786 | u32 ipa; |
|---|
| 7787 | + int i; |
|---|
| 6999 | 7788 | |
|---|
| 7000 | 7789 | memset(cld, 0, sizeof(*cld)); |
|---|
| 7001 | 7790 | ether_addr_copy(cld->outer_mac, filter->dst_mac); |
|---|
| .. | .. |
|---|
| 7006 | 7795 | |
|---|
| 7007 | 7796 | if (filter->n_proto == ETH_P_IPV6) { |
|---|
| 7008 | 7797 | #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) |
|---|
| 7009 | | - for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); |
|---|
| 7010 | | - i++, j += 2) { |
|---|
| 7798 | + for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { |
|---|
| 7011 | 7799 | ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); |
|---|
| 7012 | | - ipa = cpu_to_le32(ipa); |
|---|
| 7013 | | - memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); |
|---|
| 7800 | + |
|---|
| 7801 | + *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa); |
|---|
| 7014 | 7802 | } |
|---|
| 7015 | 7803 | } else { |
|---|
| 7016 | 7804 | ipa = be32_to_cpu(filter->dst_ipv4); |
|---|
| 7805 | + |
|---|
| 7017 | 7806 | memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); |
|---|
| 7018 | 7807 | } |
|---|
| 7019 | 7808 | |
|---|
| .. | .. |
|---|
| 7208 | 7997 | /** |
|---|
| 7209 | 7998 | * i40e_parse_cls_flower - Parse tc flower filters provided by kernel |
|---|
| 7210 | 7999 | * @vsi: Pointer to VSI |
|---|
| 7211 | | - * @cls_flower: Pointer to struct tc_cls_flower_offload |
|---|
| 8000 | + * @f: Pointer to struct flow_cls_offload |
|---|
| 7212 | 8001 | * @filter: Pointer to cloud filter structure |
|---|
| 7213 | 8002 | * |
|---|
| 7214 | 8003 | **/ |
|---|
| 7215 | 8004 | static int i40e_parse_cls_flower(struct i40e_vsi *vsi, |
|---|
| 7216 | | - struct tc_cls_flower_offload *f, |
|---|
| 8005 | + struct flow_cls_offload *f, |
|---|
| 7217 | 8006 | struct i40e_cloud_filter *filter) |
|---|
| 7218 | 8007 | { |
|---|
| 8008 | + struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
|---|
| 8009 | + struct flow_dissector *dissector = rule->match.dissector; |
|---|
| 7219 | 8010 | u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; |
|---|
| 7220 | 8011 | struct i40e_pf *pf = vsi->back; |
|---|
| 7221 | 8012 | u8 field_flags = 0; |
|---|
| 7222 | 8013 | |
|---|
| 7223 | | - if (f->dissector->used_keys & |
|---|
| 8014 | + if (dissector->used_keys & |
|---|
| 7224 | 8015 | ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | |
|---|
| 7225 | 8016 | BIT(FLOW_DISSECTOR_KEY_BASIC) | |
|---|
| 7226 | 8017 | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
|---|
| .. | .. |
|---|
| 7230 | 8021 | BIT(FLOW_DISSECTOR_KEY_PORTS) | |
|---|
| 7231 | 8022 | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { |
|---|
| 7232 | 8023 | dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", |
|---|
| 7233 | | - f->dissector->used_keys); |
|---|
| 8024 | + dissector->used_keys); |
|---|
| 7234 | 8025 | return -EOPNOTSUPP; |
|---|
| 7235 | 8026 | } |
|---|
| 7236 | 8027 | |
|---|
| 7237 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { |
|---|
| 7238 | | - struct flow_dissector_key_keyid *key = |
|---|
| 7239 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7240 | | - FLOW_DISSECTOR_KEY_ENC_KEYID, |
|---|
| 7241 | | - f->key); |
|---|
| 8028 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { |
|---|
| 8029 | + struct flow_match_enc_keyid match; |
|---|
| 7242 | 8030 | |
|---|
| 7243 | | - struct flow_dissector_key_keyid *mask = |
|---|
| 7244 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7245 | | - FLOW_DISSECTOR_KEY_ENC_KEYID, |
|---|
| 7246 | | - f->mask); |
|---|
| 7247 | | - |
|---|
| 7248 | | - if (mask->keyid != 0) |
|---|
| 8031 | + flow_rule_match_enc_keyid(rule, &match); |
|---|
| 8032 | + if (match.mask->keyid != 0) |
|---|
| 7249 | 8033 | field_flags |= I40E_CLOUD_FIELD_TEN_ID; |
|---|
| 7250 | 8034 | |
|---|
| 7251 | | - filter->tenant_id = be32_to_cpu(key->keyid); |
|---|
| 8035 | + filter->tenant_id = be32_to_cpu(match.key->keyid); |
|---|
| 7252 | 8036 | } |
|---|
| 7253 | 8037 | |
|---|
| 7254 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
|---|
| 7255 | | - struct flow_dissector_key_basic *key = |
|---|
| 7256 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7257 | | - FLOW_DISSECTOR_KEY_BASIC, |
|---|
| 7258 | | - f->key); |
|---|
| 8038 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { |
|---|
| 8039 | + struct flow_match_basic match; |
|---|
| 7259 | 8040 | |
|---|
| 7260 | | - struct flow_dissector_key_basic *mask = |
|---|
| 7261 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7262 | | - FLOW_DISSECTOR_KEY_BASIC, |
|---|
| 7263 | | - f->mask); |
|---|
| 7264 | | - |
|---|
| 7265 | | - n_proto_key = ntohs(key->n_proto); |
|---|
| 7266 | | - n_proto_mask = ntohs(mask->n_proto); |
|---|
| 8041 | + flow_rule_match_basic(rule, &match); |
|---|
| 8042 | + n_proto_key = ntohs(match.key->n_proto); |
|---|
| 8043 | + n_proto_mask = ntohs(match.mask->n_proto); |
|---|
| 7267 | 8044 | |
|---|
| 7268 | 8045 | if (n_proto_key == ETH_P_ALL) { |
|---|
| 7269 | 8046 | n_proto_key = 0; |
|---|
| 7270 | 8047 | n_proto_mask = 0; |
|---|
| 7271 | 8048 | } |
|---|
| 7272 | 8049 | filter->n_proto = n_proto_key & n_proto_mask; |
|---|
| 7273 | | - filter->ip_proto = key->ip_proto; |
|---|
| 8050 | + filter->ip_proto = match.key->ip_proto; |
|---|
| 7274 | 8051 | } |
|---|
| 7275 | 8052 | |
|---|
| 7276 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
|---|
| 7277 | | - struct flow_dissector_key_eth_addrs *key = |
|---|
| 7278 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7279 | | - FLOW_DISSECTOR_KEY_ETH_ADDRS, |
|---|
| 7280 | | - f->key); |
|---|
| 8053 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
|---|
| 8054 | + struct flow_match_eth_addrs match; |
|---|
| 7281 | 8055 | |
|---|
| 7282 | | - struct flow_dissector_key_eth_addrs *mask = |
|---|
| 7283 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7284 | | - FLOW_DISSECTOR_KEY_ETH_ADDRS, |
|---|
| 7285 | | - f->mask); |
|---|
| 8056 | + flow_rule_match_eth_addrs(rule, &match); |
|---|
| 7286 | 8057 | |
|---|
| 7287 | 8058 | /* use is_broadcast and is_zero to check for all 0xf or 0 */ |
|---|
| 7288 | | - if (!is_zero_ether_addr(mask->dst)) { |
|---|
| 7289 | | - if (is_broadcast_ether_addr(mask->dst)) { |
|---|
| 8059 | + if (!is_zero_ether_addr(match.mask->dst)) { |
|---|
| 8060 | + if (is_broadcast_ether_addr(match.mask->dst)) { |
|---|
| 7290 | 8061 | field_flags |= I40E_CLOUD_FIELD_OMAC; |
|---|
| 7291 | 8062 | } else { |
|---|
| 7292 | 8063 | dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", |
|---|
| 7293 | | - mask->dst); |
|---|
| 8064 | + match.mask->dst); |
|---|
| 7294 | 8065 | return I40E_ERR_CONFIG; |
|---|
| 7295 | 8066 | } |
|---|
| 7296 | 8067 | } |
|---|
| 7297 | 8068 | |
|---|
| 7298 | | - if (!is_zero_ether_addr(mask->src)) { |
|---|
| 7299 | | - if (is_broadcast_ether_addr(mask->src)) { |
|---|
| 8069 | + if (!is_zero_ether_addr(match.mask->src)) { |
|---|
| 8070 | + if (is_broadcast_ether_addr(match.mask->src)) { |
|---|
| 7300 | 8071 | field_flags |= I40E_CLOUD_FIELD_IMAC; |
|---|
| 7301 | 8072 | } else { |
|---|
| 7302 | 8073 | dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", |
|---|
| 7303 | | - mask->src); |
|---|
| 8074 | + match.mask->src); |
|---|
| 7304 | 8075 | return I40E_ERR_CONFIG; |
|---|
| 7305 | 8076 | } |
|---|
| 7306 | 8077 | } |
|---|
| 7307 | | - ether_addr_copy(filter->dst_mac, key->dst); |
|---|
| 7308 | | - ether_addr_copy(filter->src_mac, key->src); |
|---|
| 8078 | + ether_addr_copy(filter->dst_mac, match.key->dst); |
|---|
| 8079 | + ether_addr_copy(filter->src_mac, match.key->src); |
|---|
| 7309 | 8080 | } |
|---|
| 7310 | 8081 | |
|---|
| 7311 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { |
|---|
| 7312 | | - struct flow_dissector_key_vlan *key = |
|---|
| 7313 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7314 | | - FLOW_DISSECTOR_KEY_VLAN, |
|---|
| 7315 | | - f->key); |
|---|
| 7316 | | - struct flow_dissector_key_vlan *mask = |
|---|
| 7317 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7318 | | - FLOW_DISSECTOR_KEY_VLAN, |
|---|
| 7319 | | - f->mask); |
|---|
| 8082 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
|---|
| 8083 | + struct flow_match_vlan match; |
|---|
| 7320 | 8084 | |
|---|
| 7321 | | - if (mask->vlan_id) { |
|---|
| 7322 | | - if (mask->vlan_id == VLAN_VID_MASK) { |
|---|
| 8085 | + flow_rule_match_vlan(rule, &match); |
|---|
| 8086 | + if (match.mask->vlan_id) { |
|---|
| 8087 | + if (match.mask->vlan_id == VLAN_VID_MASK) { |
|---|
| 7323 | 8088 | field_flags |= I40E_CLOUD_FIELD_IVLAN; |
|---|
| 7324 | 8089 | |
|---|
| 7325 | 8090 | } else { |
|---|
| 7326 | 8091 | dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", |
|---|
| 7327 | | - mask->vlan_id); |
|---|
| 8092 | + match.mask->vlan_id); |
|---|
| 7328 | 8093 | return I40E_ERR_CONFIG; |
|---|
| 7329 | 8094 | } |
|---|
| 7330 | 8095 | } |
|---|
| 7331 | 8096 | |
|---|
| 7332 | | - filter->vlan_id = cpu_to_be16(key->vlan_id); |
|---|
| 8097 | + filter->vlan_id = cpu_to_be16(match.key->vlan_id); |
|---|
| 7333 | 8098 | } |
|---|
| 7334 | 8099 | |
|---|
| 7335 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { |
|---|
| 7336 | | - struct flow_dissector_key_control *key = |
|---|
| 7337 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7338 | | - FLOW_DISSECTOR_KEY_CONTROL, |
|---|
| 7339 | | - f->key); |
|---|
| 8100 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { |
|---|
| 8101 | + struct flow_match_control match; |
|---|
| 7340 | 8102 | |
|---|
| 7341 | | - addr_type = key->addr_type; |
|---|
| 8103 | + flow_rule_match_control(rule, &match); |
|---|
| 8104 | + addr_type = match.key->addr_type; |
|---|
| 7342 | 8105 | } |
|---|
| 7343 | 8106 | |
|---|
| 7344 | 8107 | if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
|---|
| 7345 | | - struct flow_dissector_key_ipv4_addrs *key = |
|---|
| 7346 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7347 | | - FLOW_DISSECTOR_KEY_IPV4_ADDRS, |
|---|
| 7348 | | - f->key); |
|---|
| 7349 | | - struct flow_dissector_key_ipv4_addrs *mask = |
|---|
| 7350 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7351 | | - FLOW_DISSECTOR_KEY_IPV4_ADDRS, |
|---|
| 7352 | | - f->mask); |
|---|
| 8108 | + struct flow_match_ipv4_addrs match; |
|---|
| 7353 | 8109 | |
|---|
| 7354 | | - if (mask->dst) { |
|---|
| 7355 | | - if (mask->dst == cpu_to_be32(0xffffffff)) { |
|---|
| 8110 | + flow_rule_match_ipv4_addrs(rule, &match); |
|---|
| 8111 | + if (match.mask->dst) { |
|---|
| 8112 | + if (match.mask->dst == cpu_to_be32(0xffffffff)) { |
|---|
| 7356 | 8113 | field_flags |= I40E_CLOUD_FIELD_IIP; |
|---|
| 7357 | 8114 | } else { |
|---|
| 7358 | 8115 | dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", |
|---|
| 7359 | | - &mask->dst); |
|---|
| 8116 | + &match.mask->dst); |
|---|
| 7360 | 8117 | return I40E_ERR_CONFIG; |
|---|
| 7361 | 8118 | } |
|---|
| 7362 | 8119 | } |
|---|
| 7363 | 8120 | |
|---|
| 7364 | | - if (mask->src) { |
|---|
| 7365 | | - if (mask->src == cpu_to_be32(0xffffffff)) { |
|---|
| 8121 | + if (match.mask->src) { |
|---|
| 8122 | + if (match.mask->src == cpu_to_be32(0xffffffff)) { |
|---|
| 7366 | 8123 | field_flags |= I40E_CLOUD_FIELD_IIP; |
|---|
| 7367 | 8124 | } else { |
|---|
| 7368 | 8125 | dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", |
|---|
| 7369 | | - &mask->src); |
|---|
| 8126 | + &match.mask->src); |
|---|
| 7370 | 8127 | return I40E_ERR_CONFIG; |
|---|
| 7371 | 8128 | } |
|---|
| 7372 | 8129 | } |
|---|
| .. | .. |
|---|
| 7375 | 8132 | dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); |
|---|
| 7376 | 8133 | return I40E_ERR_CONFIG; |
|---|
| 7377 | 8134 | } |
|---|
| 7378 | | - filter->dst_ipv4 = key->dst; |
|---|
| 7379 | | - filter->src_ipv4 = key->src; |
|---|
| 8135 | + filter->dst_ipv4 = match.key->dst; |
|---|
| 8136 | + filter->src_ipv4 = match.key->src; |
|---|
| 7380 | 8137 | } |
|---|
| 7381 | 8138 | |
|---|
| 7382 | 8139 | if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
|---|
| 7383 | | - struct flow_dissector_key_ipv6_addrs *key = |
|---|
| 7384 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7385 | | - FLOW_DISSECTOR_KEY_IPV6_ADDRS, |
|---|
| 7386 | | - f->key); |
|---|
| 7387 | | - struct flow_dissector_key_ipv6_addrs *mask = |
|---|
| 7388 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7389 | | - FLOW_DISSECTOR_KEY_IPV6_ADDRS, |
|---|
| 7390 | | - f->mask); |
|---|
| 8140 | + struct flow_match_ipv6_addrs match; |
|---|
| 8141 | + |
|---|
| 8142 | + flow_rule_match_ipv6_addrs(rule, &match); |
|---|
| 7391 | 8143 | |
|---|
| 7392 | 8144 | /* src and dest IPV6 address should not be LOOPBACK |
|---|
| 7393 | 8145 | * (0:0:0:0:0:0:0:1), which can be represented as ::1 |
|---|
| 7394 | 8146 | */ |
|---|
| 7395 | | - if (ipv6_addr_loopback(&key->dst) || |
|---|
| 7396 | | - ipv6_addr_loopback(&key->src)) { |
|---|
| 8147 | + if (ipv6_addr_loopback(&match.key->dst) || |
|---|
| 8148 | + ipv6_addr_loopback(&match.key->src)) { |
|---|
| 7397 | 8149 | dev_err(&pf->pdev->dev, |
|---|
| 7398 | 8150 | "Bad ipv6, addr is LOOPBACK\n"); |
|---|
| 7399 | 8151 | return I40E_ERR_CONFIG; |
|---|
| 7400 | 8152 | } |
|---|
| 7401 | | - if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) |
|---|
| 8153 | + if (!ipv6_addr_any(&match.mask->dst) || |
|---|
| 8154 | + !ipv6_addr_any(&match.mask->src)) |
|---|
| 7402 | 8155 | field_flags |= I40E_CLOUD_FIELD_IIP; |
|---|
| 7403 | 8156 | |
|---|
| 7404 | | - memcpy(&filter->src_ipv6, &key->src.s6_addr32, |
|---|
| 8157 | + memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, |
|---|
| 7405 | 8158 | sizeof(filter->src_ipv6)); |
|---|
| 7406 | | - memcpy(&filter->dst_ipv6, &key->dst.s6_addr32, |
|---|
| 8159 | + memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, |
|---|
| 7407 | 8160 | sizeof(filter->dst_ipv6)); |
|---|
| 7408 | 8161 | } |
|---|
| 7409 | 8162 | |
|---|
| 7410 | | - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { |
|---|
| 7411 | | - struct flow_dissector_key_ports *key = |
|---|
| 7412 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7413 | | - FLOW_DISSECTOR_KEY_PORTS, |
|---|
| 7414 | | - f->key); |
|---|
| 7415 | | - struct flow_dissector_key_ports *mask = |
|---|
| 7416 | | - skb_flow_dissector_target(f->dissector, |
|---|
| 7417 | | - FLOW_DISSECTOR_KEY_PORTS, |
|---|
| 7418 | | - f->mask); |
|---|
| 8163 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { |
|---|
| 8164 | + struct flow_match_ports match; |
|---|
| 7419 | 8165 | |
|---|
| 7420 | | - if (mask->src) { |
|---|
| 7421 | | - if (mask->src == cpu_to_be16(0xffff)) { |
|---|
| 8166 | + flow_rule_match_ports(rule, &match); |
|---|
| 8167 | + if (match.mask->src) { |
|---|
| 8168 | + if (match.mask->src == cpu_to_be16(0xffff)) { |
|---|
| 7422 | 8169 | field_flags |= I40E_CLOUD_FIELD_IIP; |
|---|
| 7423 | 8170 | } else { |
|---|
| 7424 | 8171 | dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", |
|---|
| 7425 | | - be16_to_cpu(mask->src)); |
|---|
| 8172 | + be16_to_cpu(match.mask->src)); |
|---|
| 7426 | 8173 | return I40E_ERR_CONFIG; |
|---|
| 7427 | 8174 | } |
|---|
| 7428 | 8175 | } |
|---|
| 7429 | 8176 | |
|---|
| 7430 | | - if (mask->dst) { |
|---|
| 7431 | | - if (mask->dst == cpu_to_be16(0xffff)) { |
|---|
| 8177 | + if (match.mask->dst) { |
|---|
| 8178 | + if (match.mask->dst == cpu_to_be16(0xffff)) { |
|---|
| 7432 | 8179 | field_flags |= I40E_CLOUD_FIELD_IIP; |
|---|
| 7433 | 8180 | } else { |
|---|
| 7434 | 8181 | dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", |
|---|
| 7435 | | - be16_to_cpu(mask->dst)); |
|---|
| 8182 | + be16_to_cpu(match.mask->dst)); |
|---|
| 7436 | 8183 | return I40E_ERR_CONFIG; |
|---|
| 7437 | 8184 | } |
|---|
| 7438 | 8185 | } |
|---|
| 7439 | 8186 | |
|---|
| 7440 | | - filter->dst_port = key->dst; |
|---|
| 7441 | | - filter->src_port = key->src; |
|---|
| 8187 | + filter->dst_port = match.key->dst; |
|---|
| 8188 | + filter->src_port = match.key->src; |
|---|
| 7442 | 8189 | |
|---|
| 7443 | 8190 | switch (filter->ip_proto) { |
|---|
| 7444 | 8191 | case IPPROTO_TCP: |
|---|
| .. | .. |
|---|
| 7492 | 8239 | /** |
|---|
| 7493 | 8240 | * i40e_configure_clsflower - Configure tc flower filters |
|---|
| 7494 | 8241 | * @vsi: Pointer to VSI |
|---|
| 7495 | | - * @cls_flower: Pointer to struct tc_cls_flower_offload |
|---|
| 8242 | + * @cls_flower: Pointer to struct flow_cls_offload |
|---|
| 7496 | 8243 | * |
|---|
| 7497 | 8244 | **/ |
|---|
| 7498 | 8245 | static int i40e_configure_clsflower(struct i40e_vsi *vsi, |
|---|
| 7499 | | - struct tc_cls_flower_offload *cls_flower) |
|---|
| 8246 | + struct flow_cls_offload *cls_flower) |
|---|
| 7500 | 8247 | { |
|---|
| 7501 | 8248 | int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); |
|---|
| 7502 | 8249 | struct i40e_cloud_filter *filter = NULL; |
|---|
| .. | .. |
|---|
| 7506 | 8253 | if (tc < 0) { |
|---|
| 7507 | 8254 | dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); |
|---|
| 7508 | 8255 | return -EOPNOTSUPP; |
|---|
| 8256 | + } |
|---|
| 8257 | + |
|---|
| 8258 | + if (!tc) { |
|---|
| 8259 | + dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); |
|---|
| 8260 | + return -EINVAL; |
|---|
| 7509 | 8261 | } |
|---|
| 7510 | 8262 | |
|---|
| 7511 | 8263 | if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || |
|---|
| .. | .. |
|---|
| 7587 | 8339 | /** |
|---|
| 7588 | 8340 | * i40e_delete_clsflower - Remove tc flower filters |
|---|
| 7589 | 8341 | * @vsi: Pointer to VSI |
|---|
| 7590 | | - * @cls_flower: Pointer to struct tc_cls_flower_offload |
|---|
| 8342 | + * @cls_flower: Pointer to struct flow_cls_offload |
|---|
| 7591 | 8343 | * |
|---|
| 7592 | 8344 | **/ |
|---|
| 7593 | 8345 | static int i40e_delete_clsflower(struct i40e_vsi *vsi, |
|---|
| 7594 | | - struct tc_cls_flower_offload *cls_flower) |
|---|
| 8346 | + struct flow_cls_offload *cls_flower) |
|---|
| 7595 | 8347 | { |
|---|
| 7596 | 8348 | struct i40e_cloud_filter *filter = NULL; |
|---|
| 7597 | 8349 | struct i40e_pf *pf = vsi->back; |
|---|
| .. | .. |
|---|
| 7630 | 8382 | |
|---|
| 7631 | 8383 | /** |
|---|
| 7632 | 8384 | * i40e_setup_tc_cls_flower - flower classifier offloads |
|---|
| 7633 | | - * @netdev: net device to configure |
|---|
| 7634 | | - * @type_data: offload data |
|---|
| 8385 | + * @np: net device to configure |
|---|
| 8386 | + * @cls_flower: offload data |
|---|
| 7635 | 8387 | **/ |
|---|
| 7636 | 8388 | static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, |
|---|
| 7637 | | - struct tc_cls_flower_offload *cls_flower) |
|---|
| 8389 | + struct flow_cls_offload *cls_flower) |
|---|
| 7638 | 8390 | { |
|---|
| 7639 | 8391 | struct i40e_vsi *vsi = np->vsi; |
|---|
| 7640 | 8392 | |
|---|
| 7641 | 8393 | switch (cls_flower->command) { |
|---|
| 7642 | | - case TC_CLSFLOWER_REPLACE: |
|---|
| 8394 | + case FLOW_CLS_REPLACE: |
|---|
| 7643 | 8395 | return i40e_configure_clsflower(vsi, cls_flower); |
|---|
| 7644 | | - case TC_CLSFLOWER_DESTROY: |
|---|
| 8396 | + case FLOW_CLS_DESTROY: |
|---|
| 7645 | 8397 | return i40e_delete_clsflower(vsi, cls_flower); |
|---|
| 7646 | | - case TC_CLSFLOWER_STATS: |
|---|
| 8398 | + case FLOW_CLS_STATS: |
|---|
| 7647 | 8399 | return -EOPNOTSUPP; |
|---|
| 7648 | 8400 | default: |
|---|
| 7649 | 8401 | return -EOPNOTSUPP; |
|---|
| .. | .. |
|---|
| 7667 | 8419 | } |
|---|
| 7668 | 8420 | } |
|---|
| 7669 | 8421 | |
|---|
| 7670 | | -static int i40e_setup_tc_block(struct net_device *dev, |
|---|
| 7671 | | - struct tc_block_offload *f) |
|---|
| 7672 | | -{ |
|---|
| 7673 | | - struct i40e_netdev_priv *np = netdev_priv(dev); |
|---|
| 7674 | | - |
|---|
| 7675 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
|---|
| 7676 | | - return -EOPNOTSUPP; |
|---|
| 7677 | | - |
|---|
| 7678 | | - switch (f->command) { |
|---|
| 7679 | | - case TC_BLOCK_BIND: |
|---|
| 7680 | | - return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, |
|---|
| 7681 | | - np, np, f->extack); |
|---|
| 7682 | | - case TC_BLOCK_UNBIND: |
|---|
| 7683 | | - tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); |
|---|
| 7684 | | - return 0; |
|---|
| 7685 | | - default: |
|---|
| 7686 | | - return -EOPNOTSUPP; |
|---|
| 7687 | | - } |
|---|
| 7688 | | -} |
|---|
| 8422 | +static LIST_HEAD(i40e_block_cb_list); |
|---|
| 7689 | 8423 | |
|---|
| 7690 | 8424 | static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, |
|---|
| 7691 | 8425 | void *type_data) |
|---|
| 7692 | 8426 | { |
|---|
| 8427 | + struct i40e_netdev_priv *np = netdev_priv(netdev); |
|---|
| 8428 | + |
|---|
| 7693 | 8429 | switch (type) { |
|---|
| 7694 | 8430 | case TC_SETUP_QDISC_MQPRIO: |
|---|
| 7695 | 8431 | return i40e_setup_tc(netdev, type_data); |
|---|
| 7696 | 8432 | case TC_SETUP_BLOCK: |
|---|
| 7697 | | - return i40e_setup_tc_block(netdev, type_data); |
|---|
| 8433 | + return flow_block_cb_setup_simple(type_data, |
|---|
| 8434 | + &i40e_block_cb_list, |
|---|
| 8435 | + i40e_setup_tc_block_cb, |
|---|
| 8436 | + np, np, true); |
|---|
| 7698 | 8437 | default: |
|---|
| 7699 | 8438 | return -EOPNOTSUPP; |
|---|
| 7700 | 8439 | } |
|---|
| .. | .. |
|---|
| 7962 | 8701 | { |
|---|
| 7963 | 8702 | u32 val; |
|---|
| 7964 | 8703 | |
|---|
| 7965 | | - WARN_ON(in_interrupt()); |
|---|
| 7966 | | - |
|---|
| 7967 | | - |
|---|
| 7968 | 8704 | /* do the biggest reset indicated */ |
|---|
| 7969 | 8705 | if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { |
|---|
| 7970 | 8706 | |
|---|
| .. | .. |
|---|
| 8013 | 8749 | */ |
|---|
| 8014 | 8750 | i40e_prep_for_reset(pf, lock_acquired); |
|---|
| 8015 | 8751 | i40e_reset_and_rebuild(pf, true, lock_acquired); |
|---|
| 8752 | + dev_info(&pf->pdev->dev, |
|---|
| 8753 | + pf->flags & I40E_FLAG_DISABLE_FW_LLDP ? |
|---|
| 8754 | + "FW LLDP is disabled\n" : |
|---|
| 8755 | + "FW LLDP is enabled\n"); |
|---|
| 8016 | 8756 | |
|---|
| 8017 | 8757 | } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { |
|---|
| 8018 | 8758 | int v; |
|---|
| .. | .. |
|---|
| 8202 | 8942 | i40e_service_event_schedule(pf); |
|---|
| 8203 | 8943 | } else { |
|---|
| 8204 | 8944 | i40e_pf_unquiesce_all_vsi(pf); |
|---|
| 8205 | | - set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); |
|---|
| 8206 | | - set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); |
|---|
| 8945 | + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); |
|---|
| 8946 | + set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); |
|---|
| 8207 | 8947 | } |
|---|
| 8208 | 8948 | |
|---|
| 8209 | 8949 | exit: |
|---|
| .. | .. |
|---|
| 8489 | 9229 | return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; |
|---|
| 8490 | 9230 | } |
|---|
| 8491 | 9231 | |
|---|
| 8492 | | -/* We can see up to 256 filter programming desc in transit if the filters are |
|---|
| 8493 | | - * being applied really fast; before we see the first |
|---|
| 8494 | | - * filter miss error on Rx queue 0. Accumulating enough error messages before |
|---|
| 8495 | | - * reacting will make sure we don't cause flush too often. |
|---|
| 8496 | | - */ |
|---|
| 8497 | | -#define I40E_MAX_FD_PROGRAM_ERROR 256 |
|---|
| 8498 | | - |
|---|
| 8499 | 9232 | /** |
|---|
| 8500 | 9233 | * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table |
|---|
| 8501 | 9234 | * @pf: board private structure |
|---|
| .. | .. |
|---|
| 8585 | 9318 | i40e_status status; |
|---|
| 8586 | 9319 | bool new_link, old_link; |
|---|
| 8587 | 9320 | |
|---|
| 8588 | | - /* save off old link status information */ |
|---|
| 8589 | | - pf->hw.phy.link_info_old = pf->hw.phy.link_info; |
|---|
| 8590 | | - |
|---|
| 8591 | 9321 | /* set this to force the get_link_status call to refresh state */ |
|---|
| 8592 | 9322 | pf->hw.phy.get_link_info = true; |
|---|
| 8593 | | - |
|---|
| 8594 | 9323 | old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); |
|---|
| 8595 | | - |
|---|
| 8596 | 9324 | status = i40e_get_link_status(&pf->hw, &new_link); |
|---|
| 8597 | 9325 | |
|---|
| 8598 | 9326 | /* On success, disable temp link polling */ |
|---|
| .. | .. |
|---|
| 8622 | 9350 | /* Notify the base of the switch tree connected to |
|---|
| 8623 | 9351 | * the link. Floating VEBs are not notified. |
|---|
| 8624 | 9352 | */ |
|---|
| 8625 | | - if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) |
|---|
| 9353 | + if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) |
|---|
| 8626 | 9354 | i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); |
|---|
| 8627 | 9355 | else |
|---|
| 8628 | 9356 | i40e_vsi_link_event(vsi, new_link); |
|---|
| .. | .. |
|---|
| 9100 | 9828 | /** |
|---|
| 9101 | 9829 | * i40e_get_capabilities - get info about the HW |
|---|
| 9102 | 9830 | * @pf: the PF struct |
|---|
| 9831 | + * @list_type: AQ capability to be queried |
|---|
| 9103 | 9832 | **/ |
|---|
| 9104 | 9833 | static int i40e_get_capabilities(struct i40e_pf *pf, |
|---|
| 9105 | 9834 | enum i40e_admin_queue_opc list_type) |
|---|
| .. | .. |
|---|
| 9337 | 10066 | } |
|---|
| 9338 | 10067 | |
|---|
| 9339 | 10068 | /** |
|---|
| 10069 | + * i40e_clean_xps_state - clean xps state for every tx_ring |
|---|
| 10070 | + * @vsi: ptr to the VSI |
|---|
| 10071 | + **/ |
|---|
| 10072 | +static void i40e_clean_xps_state(struct i40e_vsi *vsi) |
|---|
| 10073 | +{ |
|---|
| 10074 | + int i; |
|---|
| 10075 | + |
|---|
| 10076 | + if (vsi->tx_rings) |
|---|
| 10077 | + for (i = 0; i < vsi->num_queue_pairs; i++) |
|---|
| 10078 | + if (vsi->tx_rings[i]) |
|---|
| 10079 | + clear_bit(__I40E_TX_XPS_INIT_DONE, |
|---|
| 10080 | + vsi->tx_rings[i]->state); |
|---|
| 10081 | +} |
|---|
| 10082 | + |
|---|
| 10083 | +/** |
|---|
| 9340 | 10084 | * i40e_prep_for_reset - prep for the core to reset |
|---|
| 9341 | 10085 | * @pf: board private structure |
|---|
| 9342 | 10086 | * @lock_acquired: indicates whether or not the lock has been acquired |
|---|
| .. | .. |
|---|
| 9367 | 10111 | rtnl_unlock(); |
|---|
| 9368 | 10112 | |
|---|
| 9369 | 10113 | for (v = 0; v < pf->num_alloc_vsi; v++) { |
|---|
| 9370 | | - if (pf->vsi[v]) |
|---|
| 10114 | + if (pf->vsi[v]) { |
|---|
| 10115 | + i40e_clean_xps_state(pf->vsi[v]); |
|---|
| 9371 | 10116 | pf->vsi[v]->seid = 0; |
|---|
| 10117 | + } |
|---|
| 9372 | 10118 | } |
|---|
| 9373 | 10119 | |
|---|
| 9374 | 10120 | i40e_shutdown_adminq(&pf->hw); |
|---|
| .. | .. |
|---|
| 9380 | 10126 | dev_warn(&pf->pdev->dev, |
|---|
| 9381 | 10127 | "shutdown_lan_hmc failed: %d\n", ret); |
|---|
| 9382 | 10128 | } |
|---|
| 10129 | + |
|---|
| 10130 | + /* Save the current PTP time so that we can restore the time after the |
|---|
| 10131 | + * reset completes. |
|---|
| 10132 | + */ |
|---|
| 10133 | + i40e_ptp_save_hw_time(pf); |
|---|
| 9383 | 10134 | } |
|---|
| 9384 | 10135 | |
|---|
| 9385 | 10136 | /** |
|---|
| .. | .. |
|---|
| 9390 | 10141 | { |
|---|
| 9391 | 10142 | struct i40e_driver_version dv; |
|---|
| 9392 | 10143 | |
|---|
| 9393 | | - dv.major_version = DRV_VERSION_MAJOR; |
|---|
| 9394 | | - dv.minor_version = DRV_VERSION_MINOR; |
|---|
| 9395 | | - dv.build_version = DRV_VERSION_BUILD; |
|---|
| 10144 | + dv.major_version = 0xff; |
|---|
| 10145 | + dv.minor_version = 0xff; |
|---|
| 10146 | + dv.build_version = 0xff; |
|---|
| 9396 | 10147 | dv.subbuild_version = 0; |
|---|
| 9397 | | - strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); |
|---|
| 10148 | + strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); |
|---|
| 9398 | 10149 | i40e_aq_send_driver_version(&pf->hw, &dv, NULL); |
|---|
| 9399 | 10150 | } |
|---|
| 9400 | 10151 | |
|---|
| .. | .. |
|---|
| 9472 | 10223 | **/ |
|---|
| 9473 | 10224 | static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
|---|
| 9474 | 10225 | { |
|---|
| 10226 | + const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); |
|---|
| 9475 | 10227 | struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; |
|---|
| 9476 | 10228 | struct i40e_hw *hw = &pf->hw; |
|---|
| 9477 | 10229 | i40e_status ret; |
|---|
| 9478 | 10230 | u32 val; |
|---|
| 9479 | 10231 | int v; |
|---|
| 9480 | 10232 | |
|---|
| 9481 | | - if (test_bit(__I40E_DOWN, pf->state)) |
|---|
| 10233 | + if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && |
|---|
| 10234 | + is_recovery_mode_reported) |
|---|
| 10235 | + i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); |
|---|
| 10236 | + |
|---|
| 10237 | + if (test_bit(__I40E_DOWN, pf->state) && |
|---|
| 10238 | + !test_bit(__I40E_RECOVERY_MODE, pf->state)) |
|---|
| 9482 | 10239 | goto clear_recovery; |
|---|
| 9483 | 10240 | dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); |
|---|
| 9484 | 10241 | |
|---|
| .. | .. |
|---|
| 9500 | 10257 | /* re-verify the eeprom if we just had an EMP reset */ |
|---|
| 9501 | 10258 | if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) |
|---|
| 9502 | 10259 | i40e_verify_eeprom(pf); |
|---|
| 10260 | + |
|---|
| 10261 | + /* if we are going out of or into recovery mode we have to act |
|---|
| 10262 | + * accordingly with regard to resources initialization |
|---|
| 10263 | + * and deinitialization |
|---|
| 10264 | + */ |
|---|
| 10265 | + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { |
|---|
| 10266 | + if (i40e_get_capabilities(pf, |
|---|
| 10267 | + i40e_aqc_opc_list_func_capabilities)) |
|---|
| 10268 | + goto end_unlock; |
|---|
| 10269 | + |
|---|
| 10270 | + if (is_recovery_mode_reported) { |
|---|
| 10271 | + /* we're staying in recovery mode so we'll reinitialize |
|---|
| 10272 | + * misc vector here |
|---|
| 10273 | + */ |
|---|
| 10274 | + if (i40e_setup_misc_vector_for_recovery_mode(pf)) |
|---|
| 10275 | + goto end_unlock; |
|---|
| 10276 | + } else { |
|---|
| 10277 | + if (!lock_acquired) |
|---|
| 10278 | + rtnl_lock(); |
|---|
| 10279 | + /* we're going out of recovery mode so we'll free |
|---|
| 10280 | + * the IRQ allocated specifically for recovery mode |
|---|
| 10281 | + * and restore the interrupt scheme |
|---|
| 10282 | + */ |
|---|
| 10283 | + free_irq(pf->pdev->irq, pf); |
|---|
| 10284 | + i40e_clear_interrupt_scheme(pf); |
|---|
| 10285 | + if (i40e_restore_interrupt_scheme(pf)) |
|---|
| 10286 | + goto end_unlock; |
|---|
| 10287 | + } |
|---|
| 10288 | + |
|---|
| 10289 | + /* tell the firmware that we're starting */ |
|---|
| 10290 | + i40e_send_version(pf); |
|---|
| 10291 | + |
|---|
| 10292 | + /* bail out in case recovery mode was detected, as there is |
|---|
| 10293 | + * no need for further configuration. |
|---|
| 10294 | + */ |
|---|
| 10295 | + goto end_unlock; |
|---|
| 10296 | + } |
|---|
| 9503 | 10297 | |
|---|
| 9504 | 10298 | i40e_clear_pxe_mode(hw); |
|---|
| 9505 | 10299 | ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); |
|---|
| .. | .. |
|---|
| 9532 | 10326 | /* do basic switch setup */ |
|---|
| 9533 | 10327 | if (!lock_acquired) |
|---|
| 9534 | 10328 | rtnl_lock(); |
|---|
| 9535 | | - ret = i40e_setup_pf_switch(pf, reinit); |
|---|
| 10329 | + ret = i40e_setup_pf_switch(pf, reinit, true); |
|---|
| 9536 | 10330 | if (ret) |
|---|
| 9537 | 10331 | goto end_unlock; |
|---|
| 9538 | 10332 | |
|---|
| .. | .. |
|---|
| 9602 | 10396 | } |
|---|
| 9603 | 10397 | |
|---|
| 9604 | 10398 | if (vsi->mqprio_qopt.max_rate[0]) { |
|---|
| 9605 | | - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; |
|---|
| 10399 | + u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi, |
|---|
| 10400 | + vsi->mqprio_qopt.max_rate[0]); |
|---|
| 9606 | 10401 | u64 credits = 0; |
|---|
| 9607 | 10402 | |
|---|
| 9608 | | - do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); |
|---|
| 9609 | 10403 | ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); |
|---|
| 9610 | 10404 | if (ret) |
|---|
| 9611 | 10405 | goto end_unlock; |
|---|
| .. | .. |
|---|
| 9654 | 10448 | pf->hw.aq.asq_last_status)); |
|---|
| 9655 | 10449 | } |
|---|
| 9656 | 10450 | /* reinit the misc interrupt */ |
|---|
| 9657 | | - if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
|---|
| 10451 | + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { |
|---|
| 9658 | 10452 | ret = i40e_setup_misc_vector(pf); |
|---|
| 10453 | + if (ret) |
|---|
| 10454 | + goto end_unlock; |
|---|
| 10455 | + } |
|---|
| 9659 | 10456 | |
|---|
| 9660 | 10457 | /* Add a filter to drop all Flow control frames from any VSI from being |
|---|
| 9661 | 10458 | * transmitted. By doing so we stop a malicious VF from sending out |
|---|
| .. | .. |
|---|
| 9745 | 10542 | { |
|---|
| 9746 | 10543 | struct i40e_hw *hw = &pf->hw; |
|---|
| 9747 | 10544 | bool mdd_detected = false; |
|---|
| 9748 | | - bool pf_mdd_detected = false; |
|---|
| 9749 | 10545 | struct i40e_vf *vf; |
|---|
| 9750 | 10546 | u32 reg; |
|---|
| 9751 | 10547 | int i; |
|---|
| .. | .. |
|---|
| 9791 | 10587 | reg = rd32(hw, I40E_PF_MDET_TX); |
|---|
| 9792 | 10588 | if (reg & I40E_PF_MDET_TX_VALID_MASK) { |
|---|
| 9793 | 10589 | wr32(hw, I40E_PF_MDET_TX, 0xFFFF); |
|---|
| 9794 | | - dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); |
|---|
| 9795 | | - pf_mdd_detected = true; |
|---|
| 10590 | + dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); |
|---|
| 9796 | 10591 | } |
|---|
| 9797 | 10592 | reg = rd32(hw, I40E_PF_MDET_RX); |
|---|
| 9798 | 10593 | if (reg & I40E_PF_MDET_RX_VALID_MASK) { |
|---|
| 9799 | 10594 | wr32(hw, I40E_PF_MDET_RX, 0xFFFF); |
|---|
| 9800 | | - dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); |
|---|
| 9801 | | - pf_mdd_detected = true; |
|---|
| 9802 | | - } |
|---|
| 9803 | | - /* Queue belongs to the PF, initiate a reset */ |
|---|
| 9804 | | - if (pf_mdd_detected) { |
|---|
| 9805 | | - set_bit(__I40E_PF_RESET_REQUESTED, pf->state); |
|---|
| 9806 | | - i40e_service_event_schedule(pf); |
|---|
| 10595 | + dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); |
|---|
| 9807 | 10596 | } |
|---|
| 9808 | 10597 | } |
|---|
| 9809 | 10598 | |
|---|
| .. | .. |
|---|
| 9816 | 10605 | vf->num_mdd_events++; |
|---|
| 9817 | 10606 | dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", |
|---|
| 9818 | 10607 | i); |
|---|
| 10608 | + dev_info(&pf->pdev->dev, |
|---|
| 10609 | + "Use PF Control I/F to re-enable the VF\n"); |
|---|
| 10610 | + set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); |
|---|
| 9819 | 10611 | } |
|---|
| 9820 | 10612 | |
|---|
| 9821 | 10613 | reg = rd32(hw, I40E_VP_MDET_RX(i)); |
|---|
| .. | .. |
|---|
| 9824 | 10616 | vf->num_mdd_events++; |
|---|
| 9825 | 10617 | dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", |
|---|
| 9826 | 10618 | i); |
|---|
| 9827 | | - } |
|---|
| 9828 | | - |
|---|
| 9829 | | - if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { |
|---|
| 9830 | | - dev_info(&pf->pdev->dev, |
|---|
| 9831 | | - "Too many MDD events on VF %d, disabled\n", i); |
|---|
| 9832 | 10619 | dev_info(&pf->pdev->dev, |
|---|
| 9833 | 10620 | "Use PF Control I/F to re-enable the VF\n"); |
|---|
| 9834 | 10621 | set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); |
|---|
| .. | .. |
|---|
| 9843 | 10630 | i40e_flush(hw); |
|---|
| 9844 | 10631 | } |
|---|
| 9845 | 10632 | |
|---|
| 9846 | | -static const char *i40e_tunnel_name(u8 type) |
|---|
| 9847 | | -{ |
|---|
| 9848 | | - switch (type) { |
|---|
| 9849 | | - case UDP_TUNNEL_TYPE_VXLAN: |
|---|
| 9850 | | - return "vxlan"; |
|---|
| 9851 | | - case UDP_TUNNEL_TYPE_GENEVE: |
|---|
| 9852 | | - return "geneve"; |
|---|
| 9853 | | - default: |
|---|
| 9854 | | - return "unknown"; |
|---|
| 9855 | | - } |
|---|
| 9856 | | -} |
|---|
| 9857 | | - |
|---|
| 9858 | | -/** |
|---|
| 9859 | | - * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters |
|---|
| 9860 | | - * @pf: board private structure |
|---|
| 9861 | | - **/ |
|---|
| 9862 | | -static void i40e_sync_udp_filters(struct i40e_pf *pf) |
|---|
| 9863 | | -{ |
|---|
| 9864 | | - int i; |
|---|
| 9865 | | - |
|---|
| 9866 | | - /* loop through and set pending bit for all active UDP filters */ |
|---|
| 9867 | | - for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { |
|---|
| 9868 | | - if (pf->udp_ports[i].port) |
|---|
| 9869 | | - pf->pending_udp_bitmap |= BIT_ULL(i); |
|---|
| 9870 | | - } |
|---|
| 9871 | | - |
|---|
| 9872 | | - set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); |
|---|
| 9873 | | -} |
|---|
| 9874 | | - |
|---|
| 9875 | | -/** |
|---|
| 9876 | | - * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW |
|---|
| 9877 | | - * @pf: board private structure |
|---|
| 9878 | | - **/ |
|---|
| 9879 | | -static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) |
|---|
| 9880 | | -{ |
|---|
| 9881 | | - struct i40e_hw *hw = &pf->hw; |
|---|
| 9882 | | - u8 filter_index, type; |
|---|
| 9883 | | - u16 port; |
|---|
| 9884 | | - int i; |
|---|
| 9885 | | - |
|---|
| 9886 | | - if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state)) |
|---|
| 9887 | | - return; |
|---|
| 9888 | | - |
|---|
| 9889 | | - /* acquire RTNL to maintain state of flags and port requests */ |
|---|
| 9890 | | - rtnl_lock(); |
|---|
| 9891 | | - |
|---|
| 9892 | | - for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { |
|---|
| 9893 | | - if (pf->pending_udp_bitmap & BIT_ULL(i)) { |
|---|
| 9894 | | - struct i40e_udp_port_config *udp_port; |
|---|
| 9895 | | - i40e_status ret = 0; |
|---|
| 9896 | | - |
|---|
| 9897 | | - udp_port = &pf->udp_ports[i]; |
|---|
| 9898 | | - pf->pending_udp_bitmap &= ~BIT_ULL(i); |
|---|
| 9899 | | - |
|---|
| 9900 | | - port = READ_ONCE(udp_port->port); |
|---|
| 9901 | | - type = READ_ONCE(udp_port->type); |
|---|
| 9902 | | - filter_index = READ_ONCE(udp_port->filter_index); |
|---|
| 9903 | | - |
|---|
| 9904 | | - /* release RTNL while we wait on AQ command */ |
|---|
| 9905 | | - rtnl_unlock(); |
|---|
| 9906 | | - |
|---|
| 9907 | | - if (port) |
|---|
| 9908 | | - ret = i40e_aq_add_udp_tunnel(hw, port, |
|---|
| 9909 | | - type, |
|---|
| 9910 | | - &filter_index, |
|---|
| 9911 | | - NULL); |
|---|
| 9912 | | - else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED) |
|---|
| 9913 | | - ret = i40e_aq_del_udp_tunnel(hw, filter_index, |
|---|
| 9914 | | - NULL); |
|---|
| 9915 | | - |
|---|
| 9916 | | - /* reacquire RTNL so we can update filter_index */ |
|---|
| 9917 | | - rtnl_lock(); |
|---|
| 9918 | | - |
|---|
| 9919 | | - if (ret) { |
|---|
| 9920 | | - dev_info(&pf->pdev->dev, |
|---|
| 9921 | | - "%s %s port %d, index %d failed, err %s aq_err %s\n", |
|---|
| 9922 | | - i40e_tunnel_name(type), |
|---|
| 9923 | | - port ? "add" : "delete", |
|---|
| 9924 | | - port, |
|---|
| 9925 | | - filter_index, |
|---|
| 9926 | | - i40e_stat_str(&pf->hw, ret), |
|---|
| 9927 | | - i40e_aq_str(&pf->hw, |
|---|
| 9928 | | - pf->hw.aq.asq_last_status)); |
|---|
| 9929 | | - if (port) { |
|---|
| 9930 | | - /* failed to add, just reset port, |
|---|
| 9931 | | - * drop pending bit for any deletion |
|---|
| 9932 | | - */ |
|---|
| 9933 | | - udp_port->port = 0; |
|---|
| 9934 | | - pf->pending_udp_bitmap &= ~BIT_ULL(i); |
|---|
| 9935 | | - } |
|---|
| 9936 | | - } else if (port) { |
|---|
| 9937 | | - /* record filter index on success */ |
|---|
| 9938 | | - udp_port->filter_index = filter_index; |
|---|
| 9939 | | - } |
|---|
| 9940 | | - } |
|---|
| 9941 | | - } |
|---|
| 9942 | | - |
|---|
| 9943 | | - rtnl_unlock(); |
|---|
| 9944 | | -} |
|---|
| 9945 | | - |
|---|
| 9946 | 10633 | /** |
|---|
| 9947 | 10634 | * i40e_service_task - Run the driver's async subtasks |
|---|
| 9948 | 10635 | * @work: pointer to work_struct containing our data |
|---|
| .. | .. |
|---|
| 9955 | 10642 | unsigned long start_time = jiffies; |
|---|
| 9956 | 10643 | |
|---|
| 9957 | 10644 | /* don't bother with service tasks if a reset is in progress */ |
|---|
| 9958 | | - if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) |
|---|
| 10645 | + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || |
|---|
| 10646 | + test_bit(__I40E_SUSPENDED, pf->state)) |
|---|
| 9959 | 10647 | return; |
|---|
| 9960 | 10648 | |
|---|
| 9961 | 10649 | if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) |
|---|
| 9962 | 10650 | return; |
|---|
| 9963 | 10651 | |
|---|
| 9964 | | - i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); |
|---|
| 9965 | | - i40e_sync_filters_subtask(pf); |
|---|
| 9966 | | - i40e_reset_subtask(pf); |
|---|
| 9967 | | - i40e_handle_mdd_event(pf); |
|---|
| 9968 | | - i40e_vc_process_vflr_event(pf); |
|---|
| 9969 | | - i40e_watchdog_subtask(pf); |
|---|
| 9970 | | - i40e_fdir_reinit_subtask(pf); |
|---|
| 9971 | | - if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { |
|---|
| 9972 | | - /* Client subtask will reopen next time through. */ |
|---|
| 9973 | | - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); |
|---|
| 10652 | + if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { |
|---|
| 10653 | + i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); |
|---|
| 10654 | + i40e_sync_filters_subtask(pf); |
|---|
| 10655 | + i40e_reset_subtask(pf); |
|---|
| 10656 | + i40e_handle_mdd_event(pf); |
|---|
| 10657 | + i40e_vc_process_vflr_event(pf); |
|---|
| 10658 | + i40e_watchdog_subtask(pf); |
|---|
| 10659 | + i40e_fdir_reinit_subtask(pf); |
|---|
| 10660 | + if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { |
|---|
| 10661 | + /* Client subtask will reopen next time through. */ |
|---|
| 10662 | + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], |
|---|
| 10663 | + true); |
|---|
| 10664 | + } else { |
|---|
| 10665 | + i40e_client_subtask(pf); |
|---|
| 10666 | + if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, |
|---|
| 10667 | + pf->state)) |
|---|
| 10668 | + i40e_notify_client_of_l2_param_changes( |
|---|
| 10669 | + pf->vsi[pf->lan_vsi]); |
|---|
| 10670 | + } |
|---|
| 10671 | + i40e_sync_filters_subtask(pf); |
|---|
| 9974 | 10672 | } else { |
|---|
| 9975 | | - i40e_client_subtask(pf); |
|---|
| 9976 | | - if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, |
|---|
| 9977 | | - pf->state)) |
|---|
| 9978 | | - i40e_notify_client_of_l2_param_changes( |
|---|
| 9979 | | - pf->vsi[pf->lan_vsi]); |
|---|
| 10673 | + i40e_reset_subtask(pf); |
|---|
| 9980 | 10674 | } |
|---|
| 9981 | | - i40e_sync_filters_subtask(pf); |
|---|
| 9982 | | - i40e_sync_udp_filters_subtask(pf); |
|---|
| 10675 | + |
|---|
| 9983 | 10676 | i40e_clean_adminq_subtask(pf); |
|---|
| 9984 | 10677 | |
|---|
| 9985 | 10678 | /* flush memory to make sure state is correct before next watchdog */ |
|---|
| .. | .. |
|---|
| 9999 | 10692 | |
|---|
| 10000 | 10693 | /** |
|---|
| 10001 | 10694 | * i40e_service_timer - timer callback |
|---|
| 10002 | | - * @data: pointer to PF struct |
|---|
| 10695 | + * @t: timer list pointer |
|---|
| 10003 | 10696 | **/ |
|---|
| 10004 | 10697 | static void i40e_service_timer(struct timer_list *t) |
|---|
| 10005 | 10698 | { |
|---|
| .. | .. |
|---|
| 10021 | 10714 | switch (vsi->type) { |
|---|
| 10022 | 10715 | case I40E_VSI_MAIN: |
|---|
| 10023 | 10716 | vsi->alloc_queue_pairs = pf->num_lan_qps; |
|---|
| 10024 | | - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10025 | | - I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10717 | + if (!vsi->num_tx_desc) |
|---|
| 10718 | + vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10719 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10720 | + if (!vsi->num_rx_desc) |
|---|
| 10721 | + vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10722 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10026 | 10723 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
|---|
| 10027 | 10724 | vsi->num_q_vectors = pf->num_lan_msix; |
|---|
| 10028 | 10725 | else |
|---|
| .. | .. |
|---|
| 10032 | 10729 | |
|---|
| 10033 | 10730 | case I40E_VSI_FDIR: |
|---|
| 10034 | 10731 | vsi->alloc_queue_pairs = 1; |
|---|
| 10035 | | - vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, |
|---|
| 10036 | | - I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10732 | + vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, |
|---|
| 10733 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10734 | + vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, |
|---|
| 10735 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10037 | 10736 | vsi->num_q_vectors = pf->num_fdsb_msix; |
|---|
| 10038 | 10737 | break; |
|---|
| 10039 | 10738 | |
|---|
| 10040 | 10739 | case I40E_VSI_VMDQ2: |
|---|
| 10041 | 10740 | vsi->alloc_queue_pairs = pf->num_vmdq_qps; |
|---|
| 10042 | | - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10043 | | - I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10741 | + if (!vsi->num_tx_desc) |
|---|
| 10742 | + vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10743 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10744 | + if (!vsi->num_rx_desc) |
|---|
| 10745 | + vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10746 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10044 | 10747 | vsi->num_q_vectors = pf->num_vmdq_msix; |
|---|
| 10045 | 10748 | break; |
|---|
| 10046 | 10749 | |
|---|
| 10047 | 10750 | case I40E_VSI_SRIOV: |
|---|
| 10048 | 10751 | vsi->alloc_queue_pairs = pf->num_vf_qps; |
|---|
| 10049 | | - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10050 | | - I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10752 | + if (!vsi->num_tx_desc) |
|---|
| 10753 | + vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10754 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10755 | + if (!vsi->num_rx_desc) |
|---|
| 10756 | + vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, |
|---|
| 10757 | + I40E_REQ_DESCRIPTOR_MULTIPLE); |
|---|
| 10051 | 10758 | break; |
|---|
| 10052 | 10759 | |
|---|
| 10053 | 10760 | default: |
|---|
| .. | .. |
|---|
| 10160 | 10867 | hash_init(vsi->mac_filter_hash); |
|---|
| 10161 | 10868 | vsi->irqs_ready = false; |
|---|
| 10162 | 10869 | |
|---|
| 10870 | + if (type == I40E_VSI_MAIN) { |
|---|
| 10871 | + vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); |
|---|
| 10872 | + if (!vsi->af_xdp_zc_qps) |
|---|
| 10873 | + goto err_rings; |
|---|
| 10874 | + } |
|---|
| 10875 | + |
|---|
| 10163 | 10876 | ret = i40e_set_num_rings_in_vsi(vsi); |
|---|
| 10164 | 10877 | if (ret) |
|---|
| 10165 | 10878 | goto err_rings; |
|---|
| .. | .. |
|---|
| 10178 | 10891 | goto unlock_pf; |
|---|
| 10179 | 10892 | |
|---|
| 10180 | 10893 | err_rings: |
|---|
| 10894 | + bitmap_free(vsi->af_xdp_zc_qps); |
|---|
| 10181 | 10895 | pf->next_vsi = i - 1; |
|---|
| 10182 | 10896 | kfree(vsi); |
|---|
| 10183 | 10897 | unlock_pf: |
|---|
| .. | .. |
|---|
| 10258 | 10972 | i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); |
|---|
| 10259 | 10973 | i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); |
|---|
| 10260 | 10974 | |
|---|
| 10975 | + bitmap_free(vsi->af_xdp_zc_qps); |
|---|
| 10261 | 10976 | i40e_vsi_free_arrays(vsi, true); |
|---|
| 10262 | 10977 | i40e_clear_rss_config_user(vsi); |
|---|
| 10263 | 10978 | |
|---|
| .. | .. |
|---|
| 10315 | 11030 | ring->vsi = vsi; |
|---|
| 10316 | 11031 | ring->netdev = vsi->netdev; |
|---|
| 10317 | 11032 | ring->dev = &pf->pdev->dev; |
|---|
| 10318 | | - ring->count = vsi->num_desc; |
|---|
| 11033 | + ring->count = vsi->num_tx_desc; |
|---|
| 10319 | 11034 | ring->size = 0; |
|---|
| 10320 | 11035 | ring->dcb_tc = 0; |
|---|
| 10321 | 11036 | if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) |
|---|
| .. | .. |
|---|
| 10332 | 11047 | ring->vsi = vsi; |
|---|
| 10333 | 11048 | ring->netdev = NULL; |
|---|
| 10334 | 11049 | ring->dev = &pf->pdev->dev; |
|---|
| 10335 | | - ring->count = vsi->num_desc; |
|---|
| 11050 | + ring->count = vsi->num_tx_desc; |
|---|
| 10336 | 11051 | ring->size = 0; |
|---|
| 10337 | 11052 | ring->dcb_tc = 0; |
|---|
| 10338 | 11053 | if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) |
|---|
| .. | .. |
|---|
| 10348 | 11063 | ring->vsi = vsi; |
|---|
| 10349 | 11064 | ring->netdev = vsi->netdev; |
|---|
| 10350 | 11065 | ring->dev = &pf->pdev->dev; |
|---|
| 10351 | | - ring->count = vsi->num_desc; |
|---|
| 11066 | + ring->count = vsi->num_rx_desc; |
|---|
| 10352 | 11067 | ring->size = 0; |
|---|
| 10353 | 11068 | ring->dcb_tc = 0; |
|---|
| 10354 | 11069 | ring->itr_setting = pf->rx_itr_default; |
|---|
| .. | .. |
|---|
| 10616 | 11331 | * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector |
|---|
| 10617 | 11332 | * @vsi: the VSI being configured |
|---|
| 10618 | 11333 | * @v_idx: index of the vector in the vsi struct |
|---|
| 10619 | | - * @cpu: cpu to be used on affinity_mask |
|---|
| 10620 | 11334 | * |
|---|
| 10621 | 11335 | * We allocate one q_vector. If allocation fails we return -ENOMEM. |
|---|
| 10622 | 11336 | **/ |
|---|
| 10623 | | -static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) |
|---|
| 11337 | +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) |
|---|
| 10624 | 11338 | { |
|---|
| 10625 | 11339 | struct i40e_q_vector *q_vector; |
|---|
| 10626 | 11340 | |
|---|
| .. | .. |
|---|
| 10653 | 11367 | static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) |
|---|
| 10654 | 11368 | { |
|---|
| 10655 | 11369 | struct i40e_pf *pf = vsi->back; |
|---|
| 10656 | | - int err, v_idx, num_q_vectors, current_cpu; |
|---|
| 11370 | + int err, v_idx, num_q_vectors; |
|---|
| 10657 | 11371 | |
|---|
| 10658 | 11372 | /* if not MSIX, give the one vector only to the LAN VSI */ |
|---|
| 10659 | 11373 | if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
|---|
| .. | .. |
|---|
| 10663 | 11377 | else |
|---|
| 10664 | 11378 | return -EINVAL; |
|---|
| 10665 | 11379 | |
|---|
| 10666 | | - current_cpu = cpumask_first(cpu_online_mask); |
|---|
| 10667 | | - |
|---|
| 10668 | 11380 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { |
|---|
| 10669 | | - err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); |
|---|
| 11381 | + err = i40e_vsi_alloc_q_vector(vsi, v_idx); |
|---|
| 10670 | 11382 | if (err) |
|---|
| 10671 | 11383 | goto err_out; |
|---|
| 10672 | | - current_cpu = cpumask_next(current_cpu, cpu_online_mask); |
|---|
| 10673 | | - if (unlikely(current_cpu >= nr_cpu_ids)) |
|---|
| 10674 | | - current_cpu = cpumask_first(cpu_online_mask); |
|---|
| 10675 | 11384 | } |
|---|
| 10676 | 11385 | |
|---|
| 10677 | 11386 | return 0; |
|---|
| .. | .. |
|---|
| 10793 | 11502 | } |
|---|
| 10794 | 11503 | |
|---|
| 10795 | 11504 | /** |
|---|
| 11505 | + * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle |
|---|
| 11506 | + * non queue events in recovery mode |
|---|
| 11507 | + * @pf: board private structure |
|---|
| 11508 | + * |
|---|
| 11509 | + * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage |
|---|
| 11510 | + * the non-queue interrupts, e.g. AdminQ and errors in recovery mode. |
|---|
| 11511 | + * This is handled differently than in recovery mode since no Tx/Rx resources |
|---|
| 11512 | + * are being allocated. |
|---|
| 11513 | + **/ |
|---|
| 11514 | +static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf) |
|---|
| 11515 | +{ |
|---|
| 11516 | + int err; |
|---|
| 11517 | + |
|---|
| 11518 | + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { |
|---|
| 11519 | + err = i40e_setup_misc_vector(pf); |
|---|
| 11520 | + |
|---|
| 11521 | + if (err) { |
|---|
| 11522 | + dev_info(&pf->pdev->dev, |
|---|
| 11523 | + "MSI-X misc vector request failed, error %d\n", |
|---|
| 11524 | + err); |
|---|
| 11525 | + return err; |
|---|
| 11526 | + } |
|---|
| 11527 | + } else { |
|---|
| 11528 | + u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED; |
|---|
| 11529 | + |
|---|
| 11530 | + err = request_irq(pf->pdev->irq, i40e_intr, flags, |
|---|
| 11531 | + pf->int_name, pf); |
|---|
| 11532 | + |
|---|
| 11533 | + if (err) { |
|---|
| 11534 | + dev_info(&pf->pdev->dev, |
|---|
| 11535 | + "MSI/legacy misc vector request failed, error %d\n", |
|---|
| 11536 | + err); |
|---|
| 11537 | + return err; |
|---|
| 11538 | + } |
|---|
| 11539 | + i40e_enable_misc_int_causes(pf); |
|---|
| 11540 | + i40e_irq_dynamic_enable_icr0(pf); |
|---|
| 11541 | + } |
|---|
| 11542 | + |
|---|
| 11543 | + return 0; |
|---|
| 11544 | +} |
|---|
| 11545 | + |
|---|
| 11546 | +/** |
|---|
| 10796 | 11547 | * i40e_setup_misc_vector - Setup the misc vector to handle non queue events |
|---|
| 10797 | 11548 | * @pf: board private structure |
|---|
| 10798 | 11549 | * |
|---|
| .. | .. |
|---|
| 10861 | 11612 | } |
|---|
| 10862 | 11613 | |
|---|
| 10863 | 11614 | if (lut) { |
|---|
| 10864 | | - bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; |
|---|
| 11615 | + bool pf_lut = vsi->type == I40E_VSI_MAIN; |
|---|
| 10865 | 11616 | |
|---|
| 10866 | 11617 | ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); |
|---|
| 10867 | 11618 | if (ret) { |
|---|
| .. | .. |
|---|
| 11103 | 11854 | if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) |
|---|
| 11104 | 11855 | return 0; |
|---|
| 11105 | 11856 | |
|---|
| 11857 | + queue_count = min_t(int, queue_count, num_online_cpus()); |
|---|
| 11106 | 11858 | new_rss_size = min_t(int, queue_count, pf->rss_size_max); |
|---|
| 11107 | 11859 | |
|---|
| 11108 | 11860 | if (queue_count != vsi->num_queue_pairs) { |
|---|
| .. | .. |
|---|
| 11264 | 12016 | } |
|---|
| 11265 | 12017 | |
|---|
| 11266 | 12018 | /** |
|---|
| 12019 | + * i40e_is_total_port_shutdown_enabled - read NVM and return value |
|---|
| 12020 | + * if total port shutdown feature is enabled for this PF |
|---|
| 12021 | + * @pf: board private structure |
|---|
| 12022 | + **/ |
|---|
| 12023 | +static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) |
|---|
| 12024 | +{ |
|---|
| 12025 | +#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4) |
|---|
| 12026 | +#define I40E_FEATURES_ENABLE_PTR 0x2A |
|---|
| 12027 | +#define I40E_CURRENT_SETTING_PTR 0x2B |
|---|
| 12028 | +#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D |
|---|
| 12029 | +#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 |
|---|
| 12030 | +#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) |
|---|
| 12031 | +#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 |
|---|
| 12032 | + i40e_status read_status = I40E_SUCCESS; |
|---|
| 12033 | + u16 sr_emp_sr_settings_ptr = 0; |
|---|
| 12034 | + u16 features_enable = 0; |
|---|
| 12035 | + u16 link_behavior = 0; |
|---|
| 12036 | + bool ret = false; |
|---|
| 12037 | + |
|---|
| 12038 | + read_status = i40e_read_nvm_word(&pf->hw, |
|---|
| 12039 | + I40E_SR_EMP_SR_SETTINGS_PTR, |
|---|
| 12040 | + &sr_emp_sr_settings_ptr); |
|---|
| 12041 | + if (read_status) |
|---|
| 12042 | + goto err_nvm; |
|---|
| 12043 | + read_status = i40e_read_nvm_word(&pf->hw, |
|---|
| 12044 | + sr_emp_sr_settings_ptr + |
|---|
| 12045 | + I40E_FEATURES_ENABLE_PTR, |
|---|
| 12046 | + &features_enable); |
|---|
| 12047 | + if (read_status) |
|---|
| 12048 | + goto err_nvm; |
|---|
| 12049 | + if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) { |
|---|
| 12050 | + read_status = i40e_read_nvm_module_data(&pf->hw, |
|---|
| 12051 | + I40E_SR_EMP_SR_SETTINGS_PTR, |
|---|
| 12052 | + I40E_CURRENT_SETTING_PTR, |
|---|
| 12053 | + I40E_LINK_BEHAVIOR_WORD_OFFSET, |
|---|
| 12054 | + I40E_LINK_BEHAVIOR_WORD_LENGTH, |
|---|
| 12055 | + &link_behavior); |
|---|
| 12056 | + if (read_status) |
|---|
| 12057 | + goto err_nvm; |
|---|
| 12058 | + link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); |
|---|
| 12059 | + ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior; |
|---|
| 12060 | + } |
|---|
| 12061 | + return ret; |
|---|
| 12062 | + |
|---|
| 12063 | +err_nvm: |
|---|
| 12064 | + dev_warn(&pf->pdev->dev, |
|---|
| 12065 | + "total-port-shutdown feature is off due to read nvm error: %s\n", |
|---|
| 12066 | + i40e_stat_str(&pf->hw, read_status)); |
|---|
| 12067 | + return ret; |
|---|
| 12068 | +} |
|---|
| 12069 | + |
|---|
| 12070 | +/** |
|---|
| 11267 | 12071 | * i40e_sw_init - Initialize general software structures (struct i40e_pf) |
|---|
| 11268 | 12072 | * @pf: board private structure to initialize |
|---|
| 11269 | 12073 | * |
|---|
| .. | .. |
|---|
| 11405 | 12209 | /* IWARP needs one extra vector for CQP just like MISC.*/ |
|---|
| 11406 | 12210 | pf->num_iwarp_msix = (int)num_online_cpus() + 1; |
|---|
| 11407 | 12211 | } |
|---|
| 11408 | | - /* Stopping the FW LLDP engine is only supported on the |
|---|
| 11409 | | - * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP |
|---|
| 11410 | | - * engine is not supported if NPAR is functioning on this |
|---|
| 11411 | | - * part |
|---|
| 12212 | + /* Stopping FW LLDP engine is supported on XL710 and X722 |
|---|
| 12213 | + * starting from FW versions determined in i40e_init_adminq. |
|---|
| 12214 | + * Stopping the FW LLDP engine is not supported on XL710 |
|---|
| 12215 | + * if NPAR is functioning so unset this hw flag in this case. |
|---|
| 11412 | 12216 | */ |
|---|
| 11413 | 12217 | if (pf->hw.mac.type == I40E_MAC_XL710 && |
|---|
| 11414 | | - !pf->hw.func_caps.npar_enable && |
|---|
| 11415 | | - (pf->hw.aq.api_maj_ver > 1 || |
|---|
| 11416 | | - (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6))) |
|---|
| 11417 | | - pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP; |
|---|
| 12218 | + pf->hw.func_caps.npar_enable && |
|---|
| 12219 | + (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) |
|---|
| 12220 | + pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; |
|---|
| 11418 | 12221 | |
|---|
| 11419 | 12222 | #ifdef CONFIG_PCI_IOV |
|---|
| 11420 | 12223 | if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { |
|---|
| .. | .. |
|---|
| 11444 | 12247 | |
|---|
| 11445 | 12248 | pf->tx_timeout_recovery_level = 1; |
|---|
| 11446 | 12249 | |
|---|
| 12250 | + if (pf->hw.mac.type != I40E_MAC_X722 && |
|---|
| 12251 | + i40e_is_total_port_shutdown_enabled(pf)) { |
|---|
| 12252 | + /* Link down on close must be on when total port shutdown |
|---|
| 12253 | + * is enabled for a given port |
|---|
| 12254 | + */ |
|---|
| 12255 | + pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | |
|---|
| 12256 | + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); |
|---|
| 12257 | + dev_info(&pf->pdev->dev, |
|---|
| 12258 | + "total-port-shutdown was enabled, link-down-on-close is forced on\n"); |
|---|
| 12259 | + } |
|---|
| 11447 | 12260 | mutex_init(&pf->switch_mutex); |
|---|
| 11448 | 12261 | |
|---|
| 11449 | 12262 | sw_init_done: |
|---|
| .. | .. |
|---|
| 11550 | 12363 | return -EINVAL; |
|---|
| 11551 | 12364 | } |
|---|
| 11552 | 12365 | |
|---|
| 12366 | + if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) |
|---|
| 12367 | + i40e_del_all_macvlans(vsi); |
|---|
| 12368 | + |
|---|
| 11553 | 12369 | need_reset = i40e_set_ntuple(pf, features); |
|---|
| 11554 | 12370 | |
|---|
| 11555 | 12371 | if (need_reset) |
|---|
| .. | .. |
|---|
| 11558 | 12374 | return 0; |
|---|
| 11559 | 12375 | } |
|---|
| 11560 | 12376 | |
|---|
| 11561 | | -/** |
|---|
| 11562 | | - * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port |
|---|
| 11563 | | - * @pf: board private structure |
|---|
| 11564 | | - * @port: The UDP port to look up |
|---|
| 11565 | | - * |
|---|
| 11566 | | - * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found |
|---|
| 11567 | | - **/ |
|---|
| 11568 | | -static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) |
|---|
| 11569 | | -{ |
|---|
| 11570 | | - u8 i; |
|---|
| 11571 | | - |
|---|
| 11572 | | - for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { |
|---|
| 11573 | | - /* Do not report ports with pending deletions as |
|---|
| 11574 | | - * being available. |
|---|
| 11575 | | - */ |
|---|
| 11576 | | - if (!port && (pf->pending_udp_bitmap & BIT_ULL(i))) |
|---|
| 11577 | | - continue; |
|---|
| 11578 | | - if (pf->udp_ports[i].port == port) |
|---|
| 11579 | | - return i; |
|---|
| 11580 | | - } |
|---|
| 11581 | | - |
|---|
| 11582 | | - return i; |
|---|
| 11583 | | -} |
|---|
| 11584 | | - |
|---|
| 11585 | | -/** |
|---|
| 11586 | | - * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up |
|---|
| 11587 | | - * @netdev: This physical port's netdev |
|---|
| 11588 | | - * @ti: Tunnel endpoint information |
|---|
| 11589 | | - **/ |
|---|
| 11590 | | -static void i40e_udp_tunnel_add(struct net_device *netdev, |
|---|
| 11591 | | - struct udp_tunnel_info *ti) |
|---|
| 12377 | +static int i40e_udp_tunnel_set_port(struct net_device *netdev, |
|---|
| 12378 | + unsigned int table, unsigned int idx, |
|---|
| 12379 | + struct udp_tunnel_info *ti) |
|---|
| 11592 | 12380 | { |
|---|
| 11593 | 12381 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
|---|
| 11594 | | - struct i40e_vsi *vsi = np->vsi; |
|---|
| 11595 | | - struct i40e_pf *pf = vsi->back; |
|---|
| 11596 | | - u16 port = ntohs(ti->port); |
|---|
| 11597 | | - u8 next_idx; |
|---|
| 11598 | | - u8 idx; |
|---|
| 12382 | + struct i40e_hw *hw = &np->vsi->back->hw; |
|---|
| 12383 | + u8 type, filter_index; |
|---|
| 12384 | + i40e_status ret; |
|---|
| 11599 | 12385 | |
|---|
| 11600 | | - idx = i40e_get_udp_port_idx(pf, port); |
|---|
| 12386 | + type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : |
|---|
| 12387 | + I40E_AQC_TUNNEL_TYPE_NGE; |
|---|
| 11601 | 12388 | |
|---|
| 11602 | | - /* Check if port already exists */ |
|---|
| 11603 | | - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { |
|---|
| 11604 | | - netdev_info(netdev, "port %d already offloaded\n", port); |
|---|
| 11605 | | - return; |
|---|
| 12389 | + ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, |
|---|
| 12390 | + NULL); |
|---|
| 12391 | + if (ret) { |
|---|
| 12392 | + netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n", |
|---|
| 12393 | + i40e_stat_str(hw, ret), |
|---|
| 12394 | + i40e_aq_str(hw, hw->aq.asq_last_status)); |
|---|
| 12395 | + return -EIO; |
|---|
| 11606 | 12396 | } |
|---|
| 11607 | 12397 | |
|---|
| 11608 | | - /* Now check if there is space to add the new port */ |
|---|
| 11609 | | - next_idx = i40e_get_udp_port_idx(pf, 0); |
|---|
| 11610 | | - |
|---|
| 11611 | | - if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { |
|---|
| 11612 | | - netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", |
|---|
| 11613 | | - port); |
|---|
| 11614 | | - return; |
|---|
| 11615 | | - } |
|---|
| 11616 | | - |
|---|
| 11617 | | - switch (ti->type) { |
|---|
| 11618 | | - case UDP_TUNNEL_TYPE_VXLAN: |
|---|
| 11619 | | - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; |
|---|
| 11620 | | - break; |
|---|
| 11621 | | - case UDP_TUNNEL_TYPE_GENEVE: |
|---|
| 11622 | | - if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)) |
|---|
| 11623 | | - return; |
|---|
| 11624 | | - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; |
|---|
| 11625 | | - break; |
|---|
| 11626 | | - default: |
|---|
| 11627 | | - return; |
|---|
| 11628 | | - } |
|---|
| 11629 | | - |
|---|
| 11630 | | - /* New port: add it and mark its index in the bitmap */ |
|---|
| 11631 | | - pf->udp_ports[next_idx].port = port; |
|---|
| 11632 | | - pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED; |
|---|
| 11633 | | - pf->pending_udp_bitmap |= BIT_ULL(next_idx); |
|---|
| 11634 | | - set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); |
|---|
| 12398 | + udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index); |
|---|
| 12399 | + return 0; |
|---|
| 11635 | 12400 | } |
|---|
| 11636 | 12401 | |
|---|
| 11637 | | -/** |
|---|
| 11638 | | - * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away |
|---|
| 11639 | | - * @netdev: This physical port's netdev |
|---|
| 11640 | | - * @ti: Tunnel endpoint information |
|---|
| 11641 | | - **/ |
|---|
| 11642 | | -static void i40e_udp_tunnel_del(struct net_device *netdev, |
|---|
| 11643 | | - struct udp_tunnel_info *ti) |
|---|
| 12402 | +static int i40e_udp_tunnel_unset_port(struct net_device *netdev, |
|---|
| 12403 | + unsigned int table, unsigned int idx, |
|---|
| 12404 | + struct udp_tunnel_info *ti) |
|---|
| 11644 | 12405 | { |
|---|
| 11645 | 12406 | struct i40e_netdev_priv *np = netdev_priv(netdev); |
|---|
| 11646 | | - struct i40e_vsi *vsi = np->vsi; |
|---|
| 11647 | | - struct i40e_pf *pf = vsi->back; |
|---|
| 11648 | | - u16 port = ntohs(ti->port); |
|---|
| 11649 | | - u8 idx; |
|---|
| 12407 | + struct i40e_hw *hw = &np->vsi->back->hw; |
|---|
| 12408 | + i40e_status ret; |
|---|
| 11650 | 12409 | |
|---|
| 11651 | | - idx = i40e_get_udp_port_idx(pf, port); |
|---|
| 11652 | | - |
|---|
| 11653 | | - /* Check if port already exists */ |
|---|
| 11654 | | - if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) |
|---|
| 11655 | | - goto not_found; |
|---|
| 11656 | | - |
|---|
| 11657 | | - switch (ti->type) { |
|---|
| 11658 | | - case UDP_TUNNEL_TYPE_VXLAN: |
|---|
| 11659 | | - if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) |
|---|
| 11660 | | - goto not_found; |
|---|
| 11661 | | - break; |
|---|
| 11662 | | - case UDP_TUNNEL_TYPE_GENEVE: |
|---|
| 11663 | | - if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) |
|---|
| 11664 | | - goto not_found; |
|---|
| 11665 | | - break; |
|---|
| 11666 | | - default: |
|---|
| 11667 | | - goto not_found; |
|---|
| 12410 | + ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); |
|---|
| 12411 | + if (ret) { |
|---|
| 12412 | + netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n", |
|---|
| 12413 | + i40e_stat_str(hw, ret), |
|---|
| 12414 | + i40e_aq_str(hw, hw->aq.asq_last_status)); |
|---|
| 12415 | + return -EIO; |
|---|
| 11668 | 12416 | } |
|---|
| 11669 | 12417 | |
|---|
| 11670 | | - /* if port exists, set it to 0 (mark for deletion) |
|---|
| 11671 | | - * and make it pending |
|---|
| 11672 | | - */ |
|---|
| 11673 | | - pf->udp_ports[idx].port = 0; |
|---|
| 11674 | | - |
|---|
| 11675 | | - /* Toggle pending bit instead of setting it. This way if we are |
|---|
| 11676 | | - * deleting a port that has yet to be added we just clear the pending |
|---|
| 11677 | | - * bit and don't have to worry about it. |
|---|
| 11678 | | - */ |
|---|
| 11679 | | - pf->pending_udp_bitmap ^= BIT_ULL(idx); |
|---|
| 11680 | | - set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); |
|---|
| 11681 | | - |
|---|
| 11682 | | - return; |
|---|
| 11683 | | -not_found: |
|---|
| 11684 | | - netdev_warn(netdev, "UDP port %d was not found, not deleting\n", |
|---|
| 11685 | | - port); |
|---|
| 12418 | + return 0; |
|---|
| 11686 | 12419 | } |
|---|
| 11687 | 12420 | |
|---|
| 11688 | 12421 | static int i40e_get_phys_port_id(struct net_device *netdev, |
|---|
| .. | .. |
|---|
| 11709 | 12442 | * @addr: the MAC address entry being added |
|---|
| 11710 | 12443 | * @vid: VLAN ID |
|---|
| 11711 | 12444 | * @flags: instructions from stack about fdb operation |
|---|
| 12445 | + * @extack: netlink extended ack, unused currently |
|---|
| 11712 | 12446 | */ |
|---|
| 11713 | 12447 | static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
|---|
| 11714 | 12448 | struct net_device *dev, |
|---|
| 11715 | 12449 | const unsigned char *addr, u16 vid, |
|---|
| 11716 | | - u16 flags) |
|---|
| 12450 | + u16 flags, |
|---|
| 12451 | + struct netlink_ext_ack *extack) |
|---|
| 11717 | 12452 | { |
|---|
| 11718 | 12453 | struct i40e_netdev_priv *np = netdev_priv(dev); |
|---|
| 11719 | 12454 | struct i40e_pf *pf = np->vsi->back; |
|---|
| .. | .. |
|---|
| 11754 | 12489 | * @dev: the netdev being configured |
|---|
| 11755 | 12490 | * @nlh: RTNL message |
|---|
| 11756 | 12491 | * @flags: bridge flags |
|---|
| 12492 | + * @extack: netlink extended ack |
|---|
| 11757 | 12493 | * |
|---|
| 11758 | 12494 | * Inserts a new hardware bridge if not already created and |
|---|
| 11759 | 12495 | * enables the bridging mode requested (VEB or VEPA). If the |
|---|
| .. | .. |
|---|
| 11766 | 12502 | **/ |
|---|
| 11767 | 12503 | static int i40e_ndo_bridge_setlink(struct net_device *dev, |
|---|
| 11768 | 12504 | struct nlmsghdr *nlh, |
|---|
| 11769 | | - u16 flags) |
|---|
| 12505 | + u16 flags, |
|---|
| 12506 | + struct netlink_ext_ack *extack) |
|---|
| 11770 | 12507 | { |
|---|
| 11771 | 12508 | struct i40e_netdev_priv *np = netdev_priv(dev); |
|---|
| 11772 | 12509 | struct i40e_vsi *vsi = np->vsi; |
|---|
| .. | .. |
|---|
| 11786 | 12523 | } |
|---|
| 11787 | 12524 | |
|---|
| 11788 | 12525 | br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); |
|---|
| 12526 | + if (!br_spec) |
|---|
| 12527 | + return -EINVAL; |
|---|
| 11789 | 12528 | |
|---|
| 11790 | 12529 | nla_for_each_nested(attr, br_spec, rem) { |
|---|
| 11791 | 12530 | __u16 mode; |
|---|
| .. | .. |
|---|
| 11953 | 12692 | |
|---|
| 11954 | 12693 | old_prog = xchg(&vsi->xdp_prog, prog); |
|---|
| 11955 | 12694 | |
|---|
| 11956 | | - if (need_reset) |
|---|
| 12695 | + if (need_reset) { |
|---|
| 12696 | + if (!prog) |
|---|
| 12697 | + /* Wait until ndo_xsk_wakeup completes. */ |
|---|
| 12698 | + synchronize_rcu(); |
|---|
| 11957 | 12699 | i40e_reset_and_rebuild(pf, true, true); |
|---|
| 12700 | + } |
|---|
| 12701 | + |
|---|
| 12702 | + if (!i40e_enabled_xdp_vsi(vsi) && prog) { |
|---|
| 12703 | + if (i40e_realloc_rx_bi_zc(vsi, true)) |
|---|
| 12704 | + return -ENOMEM; |
|---|
| 12705 | + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { |
|---|
| 12706 | + if (i40e_realloc_rx_bi_zc(vsi, false)) |
|---|
| 12707 | + return -ENOMEM; |
|---|
| 12708 | + } |
|---|
| 11958 | 12709 | |
|---|
| 11959 | 12710 | for (i = 0; i < vsi->num_queue_pairs; i++) |
|---|
| 11960 | 12711 | WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); |
|---|
| .. | .. |
|---|
| 11962 | 12713 | if (old_prog) |
|---|
| 11963 | 12714 | bpf_prog_put(old_prog); |
|---|
| 11964 | 12715 | |
|---|
| 12716 | + /* Kick start the NAPI context if there is an AF_XDP socket open |
|---|
| 12717 | + * on that queue id. This so that receiving will start. |
|---|
| 12718 | + */ |
|---|
| 12719 | + if (need_reset && prog) |
|---|
| 12720 | + for (i = 0; i < vsi->num_queue_pairs; i++) |
|---|
| 12721 | + if (vsi->xdp_rings[i]->xsk_pool) |
|---|
| 12722 | + (void)i40e_xsk_wakeup(vsi->netdev, i, |
|---|
| 12723 | + XDP_WAKEUP_RX); |
|---|
| 12724 | + |
|---|
| 11965 | 12725 | return 0; |
|---|
| 12726 | +} |
|---|
| 12727 | + |
|---|
| 12728 | +/** |
|---|
| 12729 | + * i40e_enter_busy_conf - Enters busy config state |
|---|
| 12730 | + * @vsi: vsi |
|---|
| 12731 | + * |
|---|
| 12732 | + * Returns 0 on success, <0 for failure. |
|---|
| 12733 | + **/ |
|---|
| 12734 | +static int i40e_enter_busy_conf(struct i40e_vsi *vsi) |
|---|
| 12735 | +{ |
|---|
| 12736 | + struct i40e_pf *pf = vsi->back; |
|---|
| 12737 | + int timeout = 50; |
|---|
| 12738 | + |
|---|
| 12739 | + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { |
|---|
| 12740 | + timeout--; |
|---|
| 12741 | + if (!timeout) |
|---|
| 12742 | + return -EBUSY; |
|---|
| 12743 | + usleep_range(1000, 2000); |
|---|
| 12744 | + } |
|---|
| 12745 | + |
|---|
| 12746 | + return 0; |
|---|
| 12747 | +} |
|---|
| 12748 | + |
|---|
| 12749 | +/** |
|---|
| 12750 | + * i40e_exit_busy_conf - Exits busy config state |
|---|
| 12751 | + * @vsi: vsi |
|---|
| 12752 | + **/ |
|---|
| 12753 | +static void i40e_exit_busy_conf(struct i40e_vsi *vsi) |
|---|
| 12754 | +{ |
|---|
| 12755 | + struct i40e_pf *pf = vsi->back; |
|---|
| 12756 | + |
|---|
| 12757 | + clear_bit(__I40E_CONFIG_BUSY, pf->state); |
|---|
| 12758 | +} |
|---|
| 12759 | + |
|---|
| 12760 | +/** |
|---|
| 12761 | + * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair |
|---|
| 12762 | + * @vsi: vsi |
|---|
| 12763 | + * @queue_pair: queue pair |
|---|
| 12764 | + **/ |
|---|
| 12765 | +static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) |
|---|
| 12766 | +{ |
|---|
| 12767 | + memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, |
|---|
| 12768 | + sizeof(vsi->rx_rings[queue_pair]->rx_stats)); |
|---|
| 12769 | + memset(&vsi->tx_rings[queue_pair]->stats, 0, |
|---|
| 12770 | + sizeof(vsi->tx_rings[queue_pair]->stats)); |
|---|
| 12771 | + if (i40e_enabled_xdp_vsi(vsi)) { |
|---|
| 12772 | + memset(&vsi->xdp_rings[queue_pair]->stats, 0, |
|---|
| 12773 | + sizeof(vsi->xdp_rings[queue_pair]->stats)); |
|---|
| 12774 | + } |
|---|
| 12775 | +} |
|---|
| 12776 | + |
|---|
| 12777 | +/** |
|---|
| 12778 | + * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair |
|---|
| 12779 | + * @vsi: vsi |
|---|
| 12780 | + * @queue_pair: queue pair |
|---|
| 12781 | + **/ |
|---|
| 12782 | +static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) |
|---|
| 12783 | +{ |
|---|
| 12784 | + i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); |
|---|
| 12785 | + if (i40e_enabled_xdp_vsi(vsi)) { |
|---|
| 12786 | + /* Make sure that in-progress ndo_xdp_xmit calls are |
|---|
| 12787 | + * completed. |
|---|
| 12788 | + */ |
|---|
| 12789 | + synchronize_rcu(); |
|---|
| 12790 | + i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); |
|---|
| 12791 | + } |
|---|
| 12792 | + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); |
|---|
| 12793 | +} |
|---|
| 12794 | + |
|---|
| 12795 | +/** |
|---|
| 12796 | + * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair |
|---|
| 12797 | + * @vsi: vsi |
|---|
| 12798 | + * @queue_pair: queue pair |
|---|
| 12799 | + * @enable: true for enable, false for disable |
|---|
| 12800 | + **/ |
|---|
| 12801 | +static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, |
|---|
| 12802 | + bool enable) |
|---|
| 12803 | +{ |
|---|
| 12804 | + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; |
|---|
| 12805 | + struct i40e_q_vector *q_vector = rxr->q_vector; |
|---|
| 12806 | + |
|---|
| 12807 | + if (!vsi->netdev) |
|---|
| 12808 | + return; |
|---|
| 12809 | + |
|---|
| 12810 | + /* All rings in a qp belong to the same qvector. */ |
|---|
| 12811 | + if (q_vector->rx.ring || q_vector->tx.ring) { |
|---|
| 12812 | + if (enable) |
|---|
| 12813 | + napi_enable(&q_vector->napi); |
|---|
| 12814 | + else |
|---|
| 12815 | + napi_disable(&q_vector->napi); |
|---|
| 12816 | + } |
|---|
| 12817 | +} |
|---|
| 12818 | + |
|---|
| 12819 | +/** |
|---|
| 12820 | + * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair |
|---|
| 12821 | + * @vsi: vsi |
|---|
| 12822 | + * @queue_pair: queue pair |
|---|
| 12823 | + * @enable: true for enable, false for disable |
|---|
| 12824 | + * |
|---|
| 12825 | + * Returns 0 on success, <0 on failure. |
|---|
| 12826 | + **/ |
|---|
| 12827 | +static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, |
|---|
| 12828 | + bool enable) |
|---|
| 12829 | +{ |
|---|
| 12830 | + struct i40e_pf *pf = vsi->back; |
|---|
| 12831 | + int pf_q, ret = 0; |
|---|
| 12832 | + |
|---|
| 12833 | + pf_q = vsi->base_queue + queue_pair; |
|---|
| 12834 | + ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, |
|---|
| 12835 | + false /*is xdp*/, enable); |
|---|
| 12836 | + if (ret) { |
|---|
| 12837 | + dev_info(&pf->pdev->dev, |
|---|
| 12838 | + "VSI seid %d Tx ring %d %sable timeout\n", |
|---|
| 12839 | + vsi->seid, pf_q, (enable ? "en" : "dis")); |
|---|
| 12840 | + return ret; |
|---|
| 12841 | + } |
|---|
| 12842 | + |
|---|
| 12843 | + i40e_control_rx_q(pf, pf_q, enable); |
|---|
| 12844 | + ret = i40e_pf_rxq_wait(pf, pf_q, enable); |
|---|
| 12845 | + if (ret) { |
|---|
| 12846 | + dev_info(&pf->pdev->dev, |
|---|
| 12847 | + "VSI seid %d Rx ring %d %sable timeout\n", |
|---|
| 12848 | + vsi->seid, pf_q, (enable ? "en" : "dis")); |
|---|
| 12849 | + return ret; |
|---|
| 12850 | + } |
|---|
| 12851 | + |
|---|
| 12852 | + /* Due to HW errata, on Rx disable only, the register can |
|---|
| 12853 | + * indicate done before it really is. Needs 50ms to be sure |
|---|
| 12854 | + */ |
|---|
| 12855 | + if (!enable) |
|---|
| 12856 | + mdelay(50); |
|---|
| 12857 | + |
|---|
| 12858 | + if (!i40e_enabled_xdp_vsi(vsi)) |
|---|
| 12859 | + return ret; |
|---|
| 12860 | + |
|---|
| 12861 | + ret = i40e_control_wait_tx_q(vsi->seid, pf, |
|---|
| 12862 | + pf_q + vsi->alloc_queue_pairs, |
|---|
| 12863 | + true /*is xdp*/, enable); |
|---|
| 12864 | + if (ret) { |
|---|
| 12865 | + dev_info(&pf->pdev->dev, |
|---|
| 12866 | + "VSI seid %d XDP Tx ring %d %sable timeout\n", |
|---|
| 12867 | + vsi->seid, pf_q, (enable ? "en" : "dis")); |
|---|
| 12868 | + } |
|---|
| 12869 | + |
|---|
| 12870 | + return ret; |
|---|
| 12871 | +} |
|---|
| 12872 | + |
|---|
| 12873 | +/** |
|---|
| 12874 | + * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair |
|---|
| 12875 | + * @vsi: vsi |
|---|
| 12876 | + * @queue_pair: queue_pair |
|---|
| 12877 | + **/ |
|---|
| 12878 | +static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) |
|---|
| 12879 | +{ |
|---|
| 12880 | + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; |
|---|
| 12881 | + struct i40e_pf *pf = vsi->back; |
|---|
| 12882 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 12883 | + |
|---|
| 12884 | + /* All rings in a qp belong to the same qvector. */ |
|---|
| 12885 | + if (pf->flags & I40E_FLAG_MSIX_ENABLED) |
|---|
| 12886 | + i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); |
|---|
| 12887 | + else |
|---|
| 12888 | + i40e_irq_dynamic_enable_icr0(pf); |
|---|
| 12889 | + |
|---|
| 12890 | + i40e_flush(hw); |
|---|
| 12891 | +} |
|---|
| 12892 | + |
|---|
| 12893 | +/** |
|---|
| 12894 | + * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair |
|---|
| 12895 | + * @vsi: vsi |
|---|
| 12896 | + * @queue_pair: queue_pair |
|---|
| 12897 | + **/ |
|---|
| 12898 | +static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) |
|---|
| 12899 | +{ |
|---|
| 12900 | + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; |
|---|
| 12901 | + struct i40e_pf *pf = vsi->back; |
|---|
| 12902 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 12903 | + |
|---|
| 12904 | + /* For simplicity, instead of removing the qp interrupt causes |
|---|
| 12905 | + * from the interrupt linked list, we simply disable the interrupt, and |
|---|
| 12906 | + * leave the list intact. |
|---|
| 12907 | + * |
|---|
| 12908 | + * All rings in a qp belong to the same qvector. |
|---|
| 12909 | + */ |
|---|
| 12910 | + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { |
|---|
| 12911 | + u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; |
|---|
| 12912 | + |
|---|
| 12913 | + wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); |
|---|
| 12914 | + i40e_flush(hw); |
|---|
| 12915 | + synchronize_irq(pf->msix_entries[intpf].vector); |
|---|
| 12916 | + } else { |
|---|
| 12917 | + /* Legacy and MSI mode - this stops all interrupt handling */ |
|---|
| 12918 | + wr32(hw, I40E_PFINT_ICR0_ENA, 0); |
|---|
| 12919 | + wr32(hw, I40E_PFINT_DYN_CTL0, 0); |
|---|
| 12920 | + i40e_flush(hw); |
|---|
| 12921 | + synchronize_irq(pf->pdev->irq); |
|---|
| 12922 | + } |
|---|
| 12923 | +} |
|---|
| 12924 | + |
|---|
| 12925 | +/** |
|---|
| 12926 | + * i40e_queue_pair_disable - Disables a queue pair |
|---|
| 12927 | + * @vsi: vsi |
|---|
| 12928 | + * @queue_pair: queue pair |
|---|
| 12929 | + * |
|---|
| 12930 | + * Returns 0 on success, <0 on failure. |
|---|
| 12931 | + **/ |
|---|
| 12932 | +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) |
|---|
| 12933 | +{ |
|---|
| 12934 | + int err; |
|---|
| 12935 | + |
|---|
| 12936 | + err = i40e_enter_busy_conf(vsi); |
|---|
| 12937 | + if (err) |
|---|
| 12938 | + return err; |
|---|
| 12939 | + |
|---|
| 12940 | + i40e_queue_pair_disable_irq(vsi, queue_pair); |
|---|
| 12941 | + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); |
|---|
| 12942 | + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); |
|---|
| 12943 | + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); |
|---|
| 12944 | + i40e_queue_pair_clean_rings(vsi, queue_pair); |
|---|
| 12945 | + i40e_queue_pair_reset_stats(vsi, queue_pair); |
|---|
| 12946 | + |
|---|
| 12947 | + return err; |
|---|
| 12948 | +} |
|---|
| 12949 | + |
|---|
| 12950 | +/** |
|---|
| 12951 | + * i40e_queue_pair_enable - Enables a queue pair |
|---|
| 12952 | + * @vsi: vsi |
|---|
| 12953 | + * @queue_pair: queue pair |
|---|
| 12954 | + * |
|---|
| 12955 | + * Returns 0 on success, <0 on failure. |
|---|
| 12956 | + **/ |
|---|
| 12957 | +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) |
|---|
| 12958 | +{ |
|---|
| 12959 | + int err; |
|---|
| 12960 | + |
|---|
| 12961 | + err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); |
|---|
| 12962 | + if (err) |
|---|
| 12963 | + return err; |
|---|
| 12964 | + |
|---|
| 12965 | + if (i40e_enabled_xdp_vsi(vsi)) { |
|---|
| 12966 | + err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); |
|---|
| 12967 | + if (err) |
|---|
| 12968 | + return err; |
|---|
| 12969 | + } |
|---|
| 12970 | + |
|---|
| 12971 | + err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); |
|---|
| 12972 | + if (err) |
|---|
| 12973 | + return err; |
|---|
| 12974 | + |
|---|
| 12975 | + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); |
|---|
| 12976 | + i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); |
|---|
| 12977 | + i40e_queue_pair_enable_irq(vsi, queue_pair); |
|---|
| 12978 | + |
|---|
| 12979 | + i40e_exit_busy_conf(vsi); |
|---|
| 12980 | + |
|---|
| 12981 | + return err; |
|---|
| 11966 | 12982 | } |
|---|
| 11967 | 12983 | |
|---|
| 11968 | 12984 | /** |
|---|
| .. | .. |
|---|
| 11982 | 12998 | switch (xdp->command) { |
|---|
| 11983 | 12999 | case XDP_SETUP_PROG: |
|---|
| 11984 | 13000 | return i40e_xdp_setup(vsi, xdp->prog); |
|---|
| 11985 | | - case XDP_QUERY_PROG: |
|---|
| 11986 | | - xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; |
|---|
| 11987 | | - return 0; |
|---|
| 13001 | + case XDP_SETUP_XSK_POOL: |
|---|
| 13002 | + return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, |
|---|
| 13003 | + xdp->xsk.queue_id); |
|---|
| 11988 | 13004 | default: |
|---|
| 11989 | 13005 | return -EINVAL; |
|---|
| 11990 | 13006 | } |
|---|
| .. | .. |
|---|
| 12007 | 13023 | .ndo_poll_controller = i40e_netpoll, |
|---|
| 12008 | 13024 | #endif |
|---|
| 12009 | 13025 | .ndo_setup_tc = __i40e_setup_tc, |
|---|
| 13026 | + .ndo_select_queue = i40e_lan_select_queue, |
|---|
| 12010 | 13027 | .ndo_set_features = i40e_set_features, |
|---|
| 12011 | 13028 | .ndo_set_vf_mac = i40e_ndo_set_vf_mac, |
|---|
| 12012 | 13029 | .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, |
|---|
| 13030 | + .ndo_get_vf_stats = i40e_get_vf_stats, |
|---|
| 12013 | 13031 | .ndo_set_vf_rate = i40e_ndo_set_vf_bw, |
|---|
| 12014 | 13032 | .ndo_get_vf_config = i40e_ndo_get_vf_config, |
|---|
| 12015 | 13033 | .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, |
|---|
| 12016 | 13034 | .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, |
|---|
| 12017 | 13035 | .ndo_set_vf_trust = i40e_ndo_set_vf_trust, |
|---|
| 12018 | | - .ndo_udp_tunnel_add = i40e_udp_tunnel_add, |
|---|
| 12019 | | - .ndo_udp_tunnel_del = i40e_udp_tunnel_del, |
|---|
| 13036 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
|---|
| 13037 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
|---|
| 12020 | 13038 | .ndo_get_phys_port_id = i40e_get_phys_port_id, |
|---|
| 12021 | 13039 | .ndo_fdb_add = i40e_ndo_fdb_add, |
|---|
| 12022 | 13040 | .ndo_features_check = i40e_features_check, |
|---|
| .. | .. |
|---|
| 12024 | 13042 | .ndo_bridge_setlink = i40e_ndo_bridge_setlink, |
|---|
| 12025 | 13043 | .ndo_bpf = i40e_xdp, |
|---|
| 12026 | 13044 | .ndo_xdp_xmit = i40e_xdp_xmit, |
|---|
| 13045 | + .ndo_xsk_wakeup = i40e_xsk_wakeup, |
|---|
| 13046 | + .ndo_dfwd_add_station = i40e_fwd_add, |
|---|
| 13047 | + .ndo_dfwd_del_station = i40e_fwd_del, |
|---|
| 12027 | 13048 | }; |
|---|
| 12028 | 13049 | |
|---|
| 12029 | 13050 | /** |
|---|
| .. | .. |
|---|
| 12068 | 13089 | NETIF_F_GSO_IPXIP6 | |
|---|
| 12069 | 13090 | NETIF_F_GSO_UDP_TUNNEL | |
|---|
| 12070 | 13091 | NETIF_F_GSO_UDP_TUNNEL_CSUM | |
|---|
| 13092 | + NETIF_F_GSO_UDP_L4 | |
|---|
| 12071 | 13093 | NETIF_F_SCTP_CRC | |
|---|
| 12072 | 13094 | NETIF_F_RXHASH | |
|---|
| 12073 | 13095 | NETIF_F_RXCSUM | |
|---|
| .. | .. |
|---|
| 12076 | 13098 | if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) |
|---|
| 12077 | 13099 | netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; |
|---|
| 12078 | 13100 | |
|---|
| 13101 | + netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; |
|---|
| 13102 | + |
|---|
| 12079 | 13103 | netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; |
|---|
| 12080 | 13104 | |
|---|
| 12081 | 13105 | netdev->hw_enc_features |= hw_enc_features; |
|---|
| .. | .. |
|---|
| 12083 | 13107 | /* record features VLANs can make use of */ |
|---|
| 12084 | 13108 | netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; |
|---|
| 12085 | 13109 | |
|---|
| 12086 | | - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) |
|---|
| 12087 | | - netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; |
|---|
| 13110 | + /* enable macvlan offloads */ |
|---|
| 13111 | + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; |
|---|
| 12088 | 13112 | |
|---|
| 12089 | 13113 | hw_features = hw_enc_features | |
|---|
| 12090 | 13114 | NETIF_F_HW_VLAN_CTAG_TX | |
|---|
| 12091 | 13115 | NETIF_F_HW_VLAN_CTAG_RX; |
|---|
| 13116 | + |
|---|
| 13117 | + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) |
|---|
| 13118 | + hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; |
|---|
| 12092 | 13119 | |
|---|
| 12093 | 13120 | netdev->hw_features |= hw_features; |
|---|
| 12094 | 13121 | |
|---|
| .. | .. |
|---|
| 12195 | 13222 | struct i40e_pf *pf = vsi->back; |
|---|
| 12196 | 13223 | |
|---|
| 12197 | 13224 | /* Uplink is not a bridge so default to VEB */ |
|---|
| 12198 | | - if (vsi->veb_idx == I40E_NO_VEB) |
|---|
| 13225 | + if (vsi->veb_idx >= I40E_MAX_VEB) |
|---|
| 12199 | 13226 | return 1; |
|---|
| 12200 | 13227 | |
|---|
| 12201 | 13228 | veb = pf->veb[vsi->veb_idx]; |
|---|
| .. | .. |
|---|
| 12434 | 13461 | vsi->id = ctxt.vsi_number; |
|---|
| 12435 | 13462 | } |
|---|
| 12436 | 13463 | |
|---|
| 12437 | | - vsi->active_filters = 0; |
|---|
| 12438 | | - clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); |
|---|
| 12439 | 13464 | spin_lock_bh(&vsi->mac_filter_hash_lock); |
|---|
| 13465 | + vsi->active_filters = 0; |
|---|
| 12440 | 13466 | /* If macvlan filters already exist, force them to get loaded */ |
|---|
| 12441 | 13467 | hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { |
|---|
| 12442 | 13468 | f->state = I40E_FILTER_NEW; |
|---|
| 12443 | 13469 | f_count++; |
|---|
| 12444 | 13470 | } |
|---|
| 12445 | 13471 | spin_unlock_bh(&vsi->mac_filter_hash_lock); |
|---|
| 13472 | + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); |
|---|
| 12446 | 13473 | |
|---|
| 12447 | 13474 | if (f_count) { |
|---|
| 12448 | 13475 | vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; |
|---|
| .. | .. |
|---|
| 12836 | 13863 | /* Setup DCB netlink interface */ |
|---|
| 12837 | 13864 | i40e_dcbnl_setup(vsi); |
|---|
| 12838 | 13865 | #endif /* CONFIG_I40E_DCB */ |
|---|
| 12839 | | - /* fall through */ |
|---|
| 12840 | | - |
|---|
| 13866 | + fallthrough; |
|---|
| 12841 | 13867 | case I40E_VSI_FDIR: |
|---|
| 12842 | 13868 | /* set up vectors and rings if needed */ |
|---|
| 12843 | 13869 | ret = i40e_vsi_setup_vectors(vsi); |
|---|
| .. | .. |
|---|
| 12853 | 13879 | |
|---|
| 12854 | 13880 | i40e_vsi_reset_stats(vsi); |
|---|
| 12855 | 13881 | break; |
|---|
| 12856 | | - |
|---|
| 12857 | 13882 | default: |
|---|
| 12858 | 13883 | /* no netdev or rings for the other VSI types */ |
|---|
| 12859 | 13884 | break; |
|---|
| .. | .. |
|---|
| 13179 | 14204 | for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) |
|---|
| 13180 | 14205 | if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) |
|---|
| 13181 | 14206 | break; |
|---|
| 13182 | | - if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { |
|---|
| 14207 | + if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { |
|---|
| 13183 | 14208 | dev_info(&pf->pdev->dev, "vsi seid %d not found\n", |
|---|
| 13184 | 14209 | vsi_seid); |
|---|
| 13185 | 14210 | return NULL; |
|---|
| .. | .. |
|---|
| 13256 | 14281 | /* Main VEB? */ |
|---|
| 13257 | 14282 | if (uplink_seid != pf->mac_seid) |
|---|
| 13258 | 14283 | break; |
|---|
| 13259 | | - if (pf->lan_veb == I40E_NO_VEB) { |
|---|
| 14284 | + if (pf->lan_veb >= I40E_MAX_VEB) { |
|---|
| 13260 | 14285 | int v; |
|---|
| 13261 | 14286 | |
|---|
| 13262 | 14287 | /* find existing or else empty VEB */ |
|---|
| .. | .. |
|---|
| 13266 | 14291 | break; |
|---|
| 13267 | 14292 | } |
|---|
| 13268 | 14293 | } |
|---|
| 13269 | | - if (pf->lan_veb == I40E_NO_VEB) { |
|---|
| 14294 | + if (pf->lan_veb >= I40E_MAX_VEB) { |
|---|
| 13270 | 14295 | v = i40e_veb_mem_alloc(pf); |
|---|
| 13271 | 14296 | if (v < 0) |
|---|
| 13272 | 14297 | break; |
|---|
| 13273 | 14298 | pf->lan_veb = v; |
|---|
| 13274 | 14299 | } |
|---|
| 13275 | 14300 | } |
|---|
| 14301 | + if (pf->lan_veb >= I40E_MAX_VEB) |
|---|
| 14302 | + break; |
|---|
| 13276 | 14303 | |
|---|
| 13277 | 14304 | pf->veb[pf->lan_veb]->seid = seid; |
|---|
| 13278 | 14305 | pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; |
|---|
| .. | .. |
|---|
| 13370 | 14397 | * i40e_setup_pf_switch - Setup the HW switch on startup or after reset |
|---|
| 13371 | 14398 | * @pf: board private structure |
|---|
| 13372 | 14399 | * @reinit: if the Main VSI needs to re-initialized. |
|---|
| 14400 | + * @lock_acquired: indicates whether or not the lock has been acquired |
|---|
| 13373 | 14401 | * |
|---|
| 13374 | 14402 | * Returns 0 on success, negative value on failure |
|---|
| 13375 | 14403 | **/ |
|---|
| 13376 | | -static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) |
|---|
| 14404 | +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) |
|---|
| 13377 | 14405 | { |
|---|
| 13378 | 14406 | u16 flags = 0; |
|---|
| 13379 | 14407 | int ret; |
|---|
| .. | .. |
|---|
| 13426 | 14454 | /* Set up the PF VSI associated with the PF's main VSI |
|---|
| 13427 | 14455 | * that is already in the HW switch |
|---|
| 13428 | 14456 | */ |
|---|
| 13429 | | - if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) |
|---|
| 14457 | + if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) |
|---|
| 13430 | 14458 | uplink_seid = pf->veb[pf->lan_veb]->seid; |
|---|
| 13431 | 14459 | else |
|---|
| 13432 | 14460 | uplink_seid = pf->mac_seid; |
|---|
| .. | .. |
|---|
| 13475 | 14503 | |
|---|
| 13476 | 14504 | i40e_ptp_init(pf); |
|---|
| 13477 | 14505 | |
|---|
| 14506 | + if (!lock_acquired) |
|---|
| 14507 | + rtnl_lock(); |
|---|
| 14508 | + |
|---|
| 13478 | 14509 | /* repopulate tunnel port filters */ |
|---|
| 13479 | | - i40e_sync_udp_filters(pf); |
|---|
| 14510 | + udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); |
|---|
| 14511 | + |
|---|
| 14512 | + if (!lock_acquired) |
|---|
| 14513 | + rtnl_unlock(); |
|---|
| 13480 | 14514 | |
|---|
| 13481 | 14515 | return ret; |
|---|
| 13482 | 14516 | } |
|---|
| .. | .. |
|---|
| 13626 | 14660 | |
|---|
| 13627 | 14661 | i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); |
|---|
| 13628 | 14662 | #ifdef CONFIG_PCI_IOV |
|---|
| 13629 | | - i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); |
|---|
| 14663 | + i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); |
|---|
| 13630 | 14664 | #endif |
|---|
| 13631 | | - i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", |
|---|
| 14665 | + i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", |
|---|
| 13632 | 14666 | pf->hw.func_caps.num_vsis, |
|---|
| 13633 | 14667 | pf->vsi[pf->lan_vsi]->num_queue_pairs); |
|---|
| 13634 | 14668 | if (pf->flags & I40E_FLAG_RSS_ENABLED) |
|---|
| 13635 | | - i += snprintf(&buf[i], REMAIN(i), " RSS"); |
|---|
| 14669 | + i += scnprintf(&buf[i], REMAIN(i), " RSS"); |
|---|
| 13636 | 14670 | if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) |
|---|
| 13637 | | - i += snprintf(&buf[i], REMAIN(i), " FD_ATR"); |
|---|
| 14671 | + i += scnprintf(&buf[i], REMAIN(i), " FD_ATR"); |
|---|
| 13638 | 14672 | if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { |
|---|
| 13639 | | - i += snprintf(&buf[i], REMAIN(i), " FD_SB"); |
|---|
| 13640 | | - i += snprintf(&buf[i], REMAIN(i), " NTUPLE"); |
|---|
| 14673 | + i += scnprintf(&buf[i], REMAIN(i), " FD_SB"); |
|---|
| 14674 | + i += scnprintf(&buf[i], REMAIN(i), " NTUPLE"); |
|---|
| 13641 | 14675 | } |
|---|
| 13642 | 14676 | if (pf->flags & I40E_FLAG_DCB_CAPABLE) |
|---|
| 13643 | | - i += snprintf(&buf[i], REMAIN(i), " DCB"); |
|---|
| 13644 | | - i += snprintf(&buf[i], REMAIN(i), " VxLAN"); |
|---|
| 13645 | | - i += snprintf(&buf[i], REMAIN(i), " Geneve"); |
|---|
| 14677 | + i += scnprintf(&buf[i], REMAIN(i), " DCB"); |
|---|
| 14678 | + i += scnprintf(&buf[i], REMAIN(i), " VxLAN"); |
|---|
| 14679 | + i += scnprintf(&buf[i], REMAIN(i), " Geneve"); |
|---|
| 13646 | 14680 | if (pf->flags & I40E_FLAG_PTP) |
|---|
| 13647 | | - i += snprintf(&buf[i], REMAIN(i), " PTP"); |
|---|
| 14681 | + i += scnprintf(&buf[i], REMAIN(i), " PTP"); |
|---|
| 13648 | 14682 | if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) |
|---|
| 13649 | | - i += snprintf(&buf[i], REMAIN(i), " VEB"); |
|---|
| 14683 | + i += scnprintf(&buf[i], REMAIN(i), " VEB"); |
|---|
| 13650 | 14684 | else |
|---|
| 13651 | | - i += snprintf(&buf[i], REMAIN(i), " VEPA"); |
|---|
| 14685 | + i += scnprintf(&buf[i], REMAIN(i), " VEPA"); |
|---|
| 13652 | 14686 | |
|---|
| 13653 | 14687 | dev_info(&pf->pdev->dev, "%s\n", buf); |
|---|
| 13654 | 14688 | kfree(buf); |
|---|
| .. | .. |
|---|
| 13669 | 14703 | { |
|---|
| 13670 | 14704 | if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) |
|---|
| 13671 | 14705 | i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); |
|---|
| 14706 | +} |
|---|
| 14707 | + |
|---|
| 14708 | +/** |
|---|
| 14709 | + * i40e_set_fec_in_flags - helper function for setting FEC options in flags |
|---|
| 14710 | + * @fec_cfg: FEC option to set in flags |
|---|
| 14711 | + * @flags: ptr to flags in which we set FEC option |
|---|
| 14712 | + **/ |
|---|
| 14713 | +void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) |
|---|
| 14714 | +{ |
|---|
| 14715 | + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) |
|---|
| 14716 | + *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; |
|---|
| 14717 | + if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || |
|---|
| 14718 | + (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { |
|---|
| 14719 | + *flags |= I40E_FLAG_RS_FEC; |
|---|
| 14720 | + *flags &= ~I40E_FLAG_BASE_R_FEC; |
|---|
| 14721 | + } |
|---|
| 14722 | + if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || |
|---|
| 14723 | + (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { |
|---|
| 14724 | + *flags |= I40E_FLAG_BASE_R_FEC; |
|---|
| 14725 | + *flags &= ~I40E_FLAG_RS_FEC; |
|---|
| 14726 | + } |
|---|
| 14727 | + if (fec_cfg == 0) |
|---|
| 14728 | + *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); |
|---|
| 14729 | +} |
|---|
| 14730 | + |
|---|
| 14731 | +/** |
|---|
| 14732 | + * i40e_check_recovery_mode - check if we are running transition firmware |
|---|
| 14733 | + * @pf: board private structure |
|---|
| 14734 | + * |
|---|
| 14735 | + * Check registers indicating the firmware runs in recovery mode. Sets the |
|---|
| 14736 | + * appropriate driver state. |
|---|
| 14737 | + * |
|---|
| 14738 | + * Returns true if the recovery mode was detected, false otherwise |
|---|
| 14739 | + **/ |
|---|
| 14740 | +static bool i40e_check_recovery_mode(struct i40e_pf *pf) |
|---|
| 14741 | +{ |
|---|
| 14742 | + u32 val = rd32(&pf->hw, I40E_GL_FWSTS); |
|---|
| 14743 | + |
|---|
| 14744 | + if (val & I40E_GL_FWSTS_FWS1B_MASK) { |
|---|
| 14745 | + dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); |
|---|
| 14746 | + dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); |
|---|
| 14747 | + set_bit(__I40E_RECOVERY_MODE, pf->state); |
|---|
| 14748 | + |
|---|
| 14749 | + return true; |
|---|
| 14750 | + } |
|---|
| 14751 | + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) |
|---|
| 14752 | + dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n"); |
|---|
| 14753 | + |
|---|
| 14754 | + return false; |
|---|
| 14755 | +} |
|---|
| 14756 | + |
|---|
| 14757 | +/** |
|---|
| 14758 | + * i40e_pf_loop_reset - perform reset in a loop. |
|---|
| 14759 | + * @pf: board private structure |
|---|
| 14760 | + * |
|---|
| 14761 | + * This function is useful when a NIC is about to enter recovery mode. |
|---|
| 14762 | + * When a NIC's internal data structures are corrupted the NIC's |
|---|
| 14763 | + * firmware is going to enter recovery mode. |
|---|
| 14764 | + * Right after a POR it takes about 7 minutes for firmware to enter |
|---|
| 14765 | + * recovery mode. Until that time a NIC is in some kind of intermediate |
|---|
| 14766 | + * state. After that time period the NIC almost surely enters |
|---|
| 14767 | + * recovery mode. The only way for a driver to detect intermediate |
|---|
| 14768 | + * state is to issue a series of pf-resets and check a return value. |
|---|
| 14769 | + * If a PF reset returns success then the firmware could be in recovery |
|---|
| 14770 | + * mode so the caller of this code needs to check for recovery mode |
|---|
| 14771 | + * if this function returns success. There is a little chance that |
|---|
| 14772 | + * firmware will hang in intermediate state forever. |
|---|
| 14773 | + * Since waiting 7 minutes is quite a lot of time this function waits |
|---|
| 14774 | + * 10 seconds and then gives up by returning an error. |
|---|
| 14775 | + * |
|---|
| 14776 | + * Return 0 on success, negative on failure. |
|---|
| 14777 | + **/ |
|---|
| 14778 | +static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) |
|---|
| 14779 | +{ |
|---|
| 14780 | + /* wait max 10 seconds for PF reset to succeed */ |
|---|
| 14781 | + const unsigned long time_end = jiffies + 10 * HZ; |
|---|
| 14782 | + |
|---|
| 14783 | + struct i40e_hw *hw = &pf->hw; |
|---|
| 14784 | + i40e_status ret; |
|---|
| 14785 | + |
|---|
| 14786 | + ret = i40e_pf_reset(hw); |
|---|
| 14787 | + while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { |
|---|
| 14788 | + usleep_range(10000, 20000); |
|---|
| 14789 | + ret = i40e_pf_reset(hw); |
|---|
| 14790 | + } |
|---|
| 14791 | + |
|---|
| 14792 | + if (ret == I40E_SUCCESS) |
|---|
| 14793 | + pf->pfr_count++; |
|---|
| 14794 | + else |
|---|
| 14795 | + dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); |
|---|
| 14796 | + |
|---|
| 14797 | + return ret; |
|---|
| 14798 | +} |
|---|
| 14799 | + |
|---|
| 14800 | +/** |
|---|
| 14801 | + * i40e_check_fw_empr - check if FW issued unexpected EMP Reset |
|---|
| 14802 | + * @pf: board private structure |
|---|
| 14803 | + * |
|---|
| 14804 | + * Check FW registers to determine if FW issued unexpected EMP Reset. |
|---|
| 14805 | + * Every time when unexpected EMP Reset occurs the FW increments |
|---|
| 14806 | + * a counter of unexpected EMP Resets. When the counter reaches 10 |
|---|
| 14807 | + * the FW should enter the Recovery mode |
|---|
| 14808 | + * |
|---|
| 14809 | + * Returns true if FW issued unexpected EMP Reset |
|---|
| 14810 | + **/ |
|---|
| 14811 | +static bool i40e_check_fw_empr(struct i40e_pf *pf) |
|---|
| 14812 | +{ |
|---|
| 14813 | + const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & |
|---|
| 14814 | + I40E_GL_FWSTS_FWS1B_MASK; |
|---|
| 14815 | + return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) && |
|---|
| 14816 | + (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10); |
|---|
| 14817 | +} |
|---|
| 14818 | + |
|---|
| 14819 | +/** |
|---|
| 14820 | + * i40e_handle_resets - handle EMP resets and PF resets |
|---|
| 14821 | + * @pf: board private structure |
|---|
| 14822 | + * |
|---|
| 14823 | + * Handle both EMP resets and PF resets and conclude whether there are |
|---|
| 14824 | + * any issues regarding these resets. If there are any issues then |
|---|
| 14825 | + * generate log entry. |
|---|
| 14826 | + * |
|---|
| 14827 | + * Return 0 if NIC is healthy or negative value when there are issues |
|---|
| 14828 | + * with resets |
|---|
| 14829 | + **/ |
|---|
| 14830 | +static i40e_status i40e_handle_resets(struct i40e_pf *pf) |
|---|
| 14831 | +{ |
|---|
| 14832 | + const i40e_status pfr = i40e_pf_loop_reset(pf); |
|---|
| 14833 | + const bool is_empr = i40e_check_fw_empr(pf); |
|---|
| 14834 | + |
|---|
| 14835 | + if (is_empr || pfr != I40E_SUCCESS) |
|---|
| 14836 | + dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); |
|---|
| 14837 | + |
|---|
| 14838 | + return is_empr ? I40E_ERR_RESET_FAILED : pfr; |
|---|
| 14839 | +} |
|---|
| 14840 | + |
|---|
| 14841 | +/** |
|---|
| 14842 | + * i40e_init_recovery_mode - initialize subsystems needed in recovery mode |
|---|
| 14843 | + * @pf: board private structure |
|---|
| 14844 | + * @hw: ptr to the hardware info |
|---|
| 14845 | + * |
|---|
| 14846 | + * This function does a minimal setup of all subsystems needed for running |
|---|
| 14847 | + * recovery mode. |
|---|
| 14848 | + * |
|---|
| 14849 | + * Returns 0 on success, negative on failure |
|---|
| 14850 | + **/ |
|---|
| 14851 | +static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) |
|---|
| 14852 | +{ |
|---|
| 14853 | + struct i40e_vsi *vsi; |
|---|
| 14854 | + int err; |
|---|
| 14855 | + int v_idx; |
|---|
| 14856 | + |
|---|
| 14857 | + pci_set_drvdata(pf->pdev, pf); |
|---|
| 14858 | + pci_save_state(pf->pdev); |
|---|
| 14859 | + |
|---|
| 14860 | + /* set up periodic task facility */ |
|---|
| 14861 | + timer_setup(&pf->service_timer, i40e_service_timer, 0); |
|---|
| 14862 | + pf->service_timer_period = HZ; |
|---|
| 14863 | + |
|---|
| 14864 | + INIT_WORK(&pf->service_task, i40e_service_task); |
|---|
| 14865 | + clear_bit(__I40E_SERVICE_SCHED, pf->state); |
|---|
| 14866 | + |
|---|
| 14867 | + err = i40e_init_interrupt_scheme(pf); |
|---|
| 14868 | + if (err) |
|---|
| 14869 | + goto err_switch_setup; |
|---|
| 14870 | + |
|---|
| 14871 | + /* The number of VSIs reported by the FW is the minimum guaranteed |
|---|
| 14872 | + * to us; HW supports far more and we share the remaining pool with |
|---|
| 14873 | + * the other PFs. We allocate space for more than the guarantee with |
|---|
| 14874 | + * the understanding that we might not get them all later. |
|---|
| 14875 | + */ |
|---|
| 14876 | + if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) |
|---|
| 14877 | + pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; |
|---|
| 14878 | + else |
|---|
| 14879 | + pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; |
|---|
| 14880 | + |
|---|
| 14881 | + /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */ |
|---|
| 14882 | + pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), |
|---|
| 14883 | + GFP_KERNEL); |
|---|
| 14884 | + if (!pf->vsi) { |
|---|
| 14885 | + err = -ENOMEM; |
|---|
| 14886 | + goto err_switch_setup; |
|---|
| 14887 | + } |
|---|
| 14888 | + |
|---|
| 14889 | + /* We allocate one VSI which is needed as absolute minimum |
|---|
| 14890 | + * in order to register the netdev |
|---|
| 14891 | + */ |
|---|
| 14892 | + v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN); |
|---|
| 14893 | + if (v_idx < 0) { |
|---|
| 14894 | + err = v_idx; |
|---|
| 14895 | + goto err_switch_setup; |
|---|
| 14896 | + } |
|---|
| 14897 | + pf->lan_vsi = v_idx; |
|---|
| 14898 | + vsi = pf->vsi[v_idx]; |
|---|
| 14899 | + if (!vsi) { |
|---|
| 14900 | + err = -EFAULT; |
|---|
| 14901 | + goto err_switch_setup; |
|---|
| 14902 | + } |
|---|
| 14903 | + vsi->alloc_queue_pairs = 1; |
|---|
| 14904 | + err = i40e_config_netdev(vsi); |
|---|
| 14905 | + if (err) |
|---|
| 14906 | + goto err_switch_setup; |
|---|
| 14907 | + err = register_netdev(vsi->netdev); |
|---|
| 14908 | + if (err) |
|---|
| 14909 | + goto err_switch_setup; |
|---|
| 14910 | + vsi->netdev_registered = true; |
|---|
| 14911 | + i40e_dbg_pf_init(pf); |
|---|
| 14912 | + |
|---|
| 14913 | + err = i40e_setup_misc_vector_for_recovery_mode(pf); |
|---|
| 14914 | + if (err) |
|---|
| 14915 | + goto err_switch_setup; |
|---|
| 14916 | + |
|---|
| 14917 | + /* tell the firmware that we're starting */ |
|---|
| 14918 | + i40e_send_version(pf); |
|---|
| 14919 | + |
|---|
| 14920 | + /* since everything's happy, start the service_task timer */ |
|---|
| 14921 | + mod_timer(&pf->service_timer, |
|---|
| 14922 | + round_jiffies(jiffies + pf->service_timer_period)); |
|---|
| 14923 | + |
|---|
| 14924 | + return 0; |
|---|
| 14925 | + |
|---|
| 14926 | +err_switch_setup: |
|---|
| 14927 | + i40e_reset_interrupt_capability(pf); |
|---|
| 14928 | + del_timer_sync(&pf->service_timer); |
|---|
| 14929 | + i40e_shutdown_adminq(hw); |
|---|
| 14930 | + iounmap(hw->hw_addr); |
|---|
| 14931 | + pci_disable_pcie_error_reporting(pf->pdev); |
|---|
| 14932 | + pci_release_mem_regions(pf->pdev); |
|---|
| 14933 | + pci_disable_device(pf->pdev); |
|---|
| 14934 | + kfree(pf); |
|---|
| 14935 | + |
|---|
| 14936 | + return err; |
|---|
| 13672 | 14937 | } |
|---|
| 13673 | 14938 | |
|---|
| 13674 | 14939 | /** |
|---|
| .. | .. |
|---|
| 13739 | 15004 | |
|---|
| 13740 | 15005 | pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), |
|---|
| 13741 | 15006 | I40E_MAX_CSR_SPACE); |
|---|
| 13742 | | - |
|---|
| 15007 | + /* We believe that the highest register to read is |
|---|
| 15008 | + * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size |
|---|
| 15009 | + * is not less than that before mapping to prevent a |
|---|
| 15010 | + * kernel panic. |
|---|
| 15011 | + */ |
|---|
| 15012 | + if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { |
|---|
| 15013 | + dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", |
|---|
| 15014 | + pf->ioremap_len); |
|---|
| 15015 | + err = -ENOMEM; |
|---|
| 15016 | + goto err_ioremap; |
|---|
| 15017 | + } |
|---|
| 13743 | 15018 | hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); |
|---|
| 13744 | 15019 | if (!hw->hw_addr) { |
|---|
| 13745 | 15020 | err = -EIO; |
|---|
| .. | .. |
|---|
| 13767 | 15042 | |
|---|
| 13768 | 15043 | INIT_LIST_HEAD(&pf->l3_flex_pit_list); |
|---|
| 13769 | 15044 | INIT_LIST_HEAD(&pf->l4_flex_pit_list); |
|---|
| 15045 | + INIT_LIST_HEAD(&pf->ddp_old_prof); |
|---|
| 13770 | 15046 | |
|---|
| 13771 | 15047 | /* set up the locks for the AQ, do this only once in probe |
|---|
| 13772 | 15048 | * and destroy them only once in remove |
|---|
| .. | .. |
|---|
| 13794 | 15070 | |
|---|
| 13795 | 15071 | /* Reset here to make sure all is clean and to define PF 'n' */ |
|---|
| 13796 | 15072 | i40e_clear_hw(hw); |
|---|
| 13797 | | - err = i40e_pf_reset(hw); |
|---|
| 15073 | + |
|---|
| 15074 | + err = i40e_set_mac_type(hw); |
|---|
| 13798 | 15075 | if (err) { |
|---|
| 13799 | | - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); |
|---|
| 15076 | + dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", |
|---|
| 15077 | + err); |
|---|
| 13800 | 15078 | goto err_pf_reset; |
|---|
| 13801 | 15079 | } |
|---|
| 13802 | | - pf->pfr_count++; |
|---|
| 15080 | + |
|---|
| 15081 | + err = i40e_handle_resets(pf); |
|---|
| 15082 | + if (err) |
|---|
| 15083 | + goto err_pf_reset; |
|---|
| 15084 | + |
|---|
| 15085 | + i40e_check_recovery_mode(pf); |
|---|
| 13803 | 15086 | |
|---|
| 13804 | 15087 | hw->aq.num_arq_entries = I40E_AQ_LEN; |
|---|
| 13805 | 15088 | hw->aq.num_asq_entries = I40E_AQ_LEN; |
|---|
| .. | .. |
|---|
| 13825 | 15108 | if (err) { |
|---|
| 13826 | 15109 | if (err == I40E_ERR_FIRMWARE_API_VERSION) |
|---|
| 13827 | 15110 | dev_info(&pdev->dev, |
|---|
| 13828 | | - "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); |
|---|
| 15111 | + "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n", |
|---|
| 15112 | + hw->aq.api_maj_ver, |
|---|
| 15113 | + hw->aq.api_min_ver, |
|---|
| 15114 | + I40E_FW_API_VERSION_MAJOR, |
|---|
| 15115 | + I40E_FW_MINOR_VERSION(hw)); |
|---|
| 13829 | 15116 | else |
|---|
| 13830 | 15117 | dev_info(&pdev->dev, |
|---|
| 13831 | 15118 | "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n"); |
|---|
| .. | .. |
|---|
| 13834 | 15121 | } |
|---|
| 13835 | 15122 | i40e_get_oem_version(hw); |
|---|
| 13836 | 15123 | |
|---|
| 13837 | | - /* provide nvm, fw, api versions */ |
|---|
| 13838 | | - dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", |
|---|
| 15124 | + /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */ |
|---|
| 15125 | + dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", |
|---|
| 13839 | 15126 | hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, |
|---|
| 13840 | 15127 | hw->aq.api_maj_ver, hw->aq.api_min_ver, |
|---|
| 13841 | | - i40e_nvm_version_str(hw)); |
|---|
| 15128 | + i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id, |
|---|
| 15129 | + hw->subsystem_vendor_id, hw->subsystem_device_id); |
|---|
| 13842 | 15130 | |
|---|
| 13843 | 15131 | if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && |
|---|
| 13844 | 15132 | hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) |
|---|
| 13845 | | - dev_info(&pdev->dev, |
|---|
| 13846 | | - "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); |
|---|
| 15133 | + dev_dbg(&pdev->dev, |
|---|
| 15134 | + "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n", |
|---|
| 15135 | + hw->aq.api_maj_ver, |
|---|
| 15136 | + hw->aq.api_min_ver, |
|---|
| 15137 | + I40E_FW_API_VERSION_MAJOR, |
|---|
| 15138 | + I40E_FW_MINOR_VERSION(hw)); |
|---|
| 13847 | 15139 | else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) |
|---|
| 13848 | 15140 | dev_info(&pdev->dev, |
|---|
| 13849 | | - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); |
|---|
| 15141 | + "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n", |
|---|
| 15142 | + hw->aq.api_maj_ver, |
|---|
| 15143 | + hw->aq.api_min_ver, |
|---|
| 15144 | + I40E_FW_API_VERSION_MAJOR, |
|---|
| 15145 | + I40E_FW_MINOR_VERSION(hw)); |
|---|
| 13850 | 15146 | |
|---|
| 13851 | 15147 | i40e_verify_eeprom(pf); |
|---|
| 13852 | 15148 | |
|---|
| .. | .. |
|---|
| 13855 | 15151 | dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); |
|---|
| 13856 | 15152 | |
|---|
| 13857 | 15153 | i40e_clear_pxe_mode(hw); |
|---|
| 15154 | + |
|---|
| 13858 | 15155 | err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); |
|---|
| 13859 | 15156 | if (err) |
|---|
| 13860 | 15157 | goto err_adminq_setup; |
|---|
| .. | .. |
|---|
| 13864 | 15161 | dev_info(&pdev->dev, "sw_init failed: %d\n", err); |
|---|
| 13865 | 15162 | goto err_sw_init; |
|---|
| 13866 | 15163 | } |
|---|
| 15164 | + |
|---|
| 15165 | + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) |
|---|
| 15166 | + return i40e_init_recovery_mode(pf, hw); |
|---|
| 13867 | 15167 | |
|---|
| 13868 | 15168 | err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, |
|---|
| 13869 | 15169 | hw->func_caps.num_rx_qp, 0, 0); |
|---|
| .. | .. |
|---|
| 13885 | 15185 | */ |
|---|
| 13886 | 15186 | if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { |
|---|
| 13887 | 15187 | dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); |
|---|
| 13888 | | - i40e_aq_stop_lldp(hw, true, NULL); |
|---|
| 15188 | + i40e_aq_stop_lldp(hw, true, false, NULL); |
|---|
| 13889 | 15189 | } |
|---|
| 13890 | 15190 | |
|---|
| 13891 | 15191 | /* allow a platform config to override the HW addr */ |
|---|
| .. | .. |
|---|
| 13904 | 15204 | |
|---|
| 13905 | 15205 | pci_set_drvdata(pdev, pf); |
|---|
| 13906 | 15206 | pci_save_state(pdev); |
|---|
| 15207 | + |
|---|
| 15208 | + dev_info(&pdev->dev, |
|---|
| 15209 | + (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ? |
|---|
| 15210 | + "FW LLDP is disabled\n" : |
|---|
| 15211 | + "FW LLDP is enabled\n"); |
|---|
| 13907 | 15212 | |
|---|
| 13908 | 15213 | /* Enable FW to write default DCB config on link-up */ |
|---|
| 13909 | 15214 | i40e_aq_set_dcb_parameters(hw, true, NULL); |
|---|
| .. | .. |
|---|
| 13938 | 15243 | if (err) |
|---|
| 13939 | 15244 | goto err_switch_setup; |
|---|
| 13940 | 15245 | |
|---|
| 15246 | + pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; |
|---|
| 15247 | + pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; |
|---|
| 15248 | + pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; |
|---|
| 15249 | + pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; |
|---|
| 15250 | + pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; |
|---|
| 15251 | + pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | |
|---|
| 15252 | + UDP_TUNNEL_TYPE_GENEVE; |
|---|
| 15253 | + |
|---|
| 13941 | 15254 | /* The number of VSIs reported by the FW is the minimum guaranteed |
|---|
| 13942 | 15255 | * to us; HW supports far more and we share the remaining pool with |
|---|
| 13943 | 15256 | * the other PFs. We allocate space for more than the guarantee with |
|---|
| .. | .. |
|---|
| 13947 | 15260 | pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; |
|---|
| 13948 | 15261 | else |
|---|
| 13949 | 15262 | pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; |
|---|
| 15263 | + if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { |
|---|
| 15264 | + dev_warn(&pf->pdev->dev, |
|---|
| 15265 | + "limiting the VSI count due to UDP tunnel limitation %d > %d\n", |
|---|
| 15266 | + pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); |
|---|
| 15267 | + pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; |
|---|
| 15268 | + } |
|---|
| 13950 | 15269 | |
|---|
| 13951 | 15270 | /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ |
|---|
| 13952 | 15271 | pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), |
|---|
| .. | .. |
|---|
| 13965 | 15284 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; |
|---|
| 13966 | 15285 | } |
|---|
| 13967 | 15286 | #endif |
|---|
| 13968 | | - err = i40e_setup_pf_switch(pf, false); |
|---|
| 15287 | + err = i40e_setup_pf_switch(pf, false, false); |
|---|
| 13969 | 15288 | if (err) { |
|---|
| 13970 | 15289 | dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); |
|---|
| 13971 | 15290 | goto err_vsis; |
|---|
| .. | .. |
|---|
| 14106 | 15425 | |
|---|
| 14107 | 15426 | switch (hw->bus.speed) { |
|---|
| 14108 | 15427 | case i40e_bus_speed_8000: |
|---|
| 14109 | | - strncpy(speed, "8.0", PCI_SPEED_SIZE); break; |
|---|
| 15428 | + strlcpy(speed, "8.0", PCI_SPEED_SIZE); break; |
|---|
| 14110 | 15429 | case i40e_bus_speed_5000: |
|---|
| 14111 | | - strncpy(speed, "5.0", PCI_SPEED_SIZE); break; |
|---|
| 15430 | + strlcpy(speed, "5.0", PCI_SPEED_SIZE); break; |
|---|
| 14112 | 15431 | case i40e_bus_speed_2500: |
|---|
| 14113 | | - strncpy(speed, "2.5", PCI_SPEED_SIZE); break; |
|---|
| 15432 | + strlcpy(speed, "2.5", PCI_SPEED_SIZE); break; |
|---|
| 14114 | 15433 | default: |
|---|
| 14115 | 15434 | break; |
|---|
| 14116 | 15435 | } |
|---|
| 14117 | 15436 | switch (hw->bus.width) { |
|---|
| 14118 | 15437 | case i40e_bus_width_pcie_x8: |
|---|
| 14119 | | - strncpy(width, "8", PCI_WIDTH_SIZE); break; |
|---|
| 15438 | + strlcpy(width, "8", PCI_WIDTH_SIZE); break; |
|---|
| 14120 | 15439 | case i40e_bus_width_pcie_x4: |
|---|
| 14121 | | - strncpy(width, "4", PCI_WIDTH_SIZE); break; |
|---|
| 15440 | + strlcpy(width, "4", PCI_WIDTH_SIZE); break; |
|---|
| 14122 | 15441 | case i40e_bus_width_pcie_x2: |
|---|
| 14123 | | - strncpy(width, "2", PCI_WIDTH_SIZE); break; |
|---|
| 15442 | + strlcpy(width, "2", PCI_WIDTH_SIZE); break; |
|---|
| 14124 | 15443 | case i40e_bus_width_pcie_x1: |
|---|
| 14125 | | - strncpy(width, "1", PCI_WIDTH_SIZE); break; |
|---|
| 15444 | + strlcpy(width, "1", PCI_WIDTH_SIZE); break; |
|---|
| 14126 | 15445 | default: |
|---|
| 14127 | 15446 | break; |
|---|
| 14128 | 15447 | } |
|---|
| .. | .. |
|---|
| 14145 | 15464 | i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); |
|---|
| 14146 | 15465 | pf->hw.phy.link_info.requested_speeds = abilities.link_speed; |
|---|
| 14147 | 15466 | |
|---|
| 15467 | + /* set the FEC config due to the board capabilities */ |
|---|
| 15468 | + i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); |
|---|
| 15469 | + |
|---|
| 14148 | 15470 | /* get the supported phy types from the fw */ |
|---|
| 14149 | 15471 | err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); |
|---|
| 14150 | 15472 | if (err) |
|---|
| 14151 | 15473 | dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n", |
|---|
| 14152 | 15474 | i40e_stat_str(&pf->hw, err), |
|---|
| 14153 | 15475 | i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); |
|---|
| 15476 | + |
|---|
| 15477 | + /* make sure the MFS hasn't been set lower than the default */ |
|---|
| 15478 | +#define MAX_FRAME_SIZE_DEFAULT 0x2600 |
|---|
| 15479 | + val = (rd32(&pf->hw, I40E_PRTGL_SAH) & |
|---|
| 15480 | + I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; |
|---|
| 15481 | + if (val < MAX_FRAME_SIZE_DEFAULT) |
|---|
| 15482 | + dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", |
|---|
| 15483 | + i, val); |
|---|
| 14154 | 15484 | |
|---|
| 14155 | 15485 | /* Add a filter to drop all Flow control frames from any VSI from being |
|---|
| 14156 | 15486 | * transmitted. By doing so we stop a malicious VF from sending out |
|---|
| .. | .. |
|---|
| 14239 | 15569 | if (pf->service_task.func) |
|---|
| 14240 | 15570 | cancel_work_sync(&pf->service_task); |
|---|
| 14241 | 15571 | |
|---|
| 15572 | + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { |
|---|
| 15573 | + struct i40e_vsi *vsi = pf->vsi[0]; |
|---|
| 15574 | + |
|---|
| 15575 | + /* We know that we have allocated only one vsi for this PF, |
|---|
| 15576 | + * it was just for registering netdevice, so the interface |
|---|
| 15577 | + * could be visible in the 'ifconfig' output |
|---|
| 15578 | + */ |
|---|
| 15579 | + unregister_netdev(vsi->netdev); |
|---|
| 15580 | + free_netdev(vsi->netdev); |
|---|
| 15581 | + |
|---|
| 15582 | + goto unmap; |
|---|
| 15583 | + } |
|---|
| 15584 | + |
|---|
| 14242 | 15585 | /* Client close must be called explicitly here because the timer |
|---|
| 14243 | 15586 | * has been stopped. |
|---|
| 14244 | 15587 | */ |
|---|
| .. | .. |
|---|
| 14283 | 15626 | ret_code); |
|---|
| 14284 | 15627 | } |
|---|
| 14285 | 15628 | |
|---|
| 15629 | +unmap: |
|---|
| 15630 | + /* Free MSI/legacy interrupt 0 when in recovery mode. */ |
|---|
| 15631 | + if (test_bit(__I40E_RECOVERY_MODE, pf->state) && |
|---|
| 15632 | + !(pf->flags & I40E_FLAG_MSIX_ENABLED)) |
|---|
| 15633 | + free_irq(pf->pdev->irq, pf); |
|---|
| 15634 | + |
|---|
| 14286 | 15635 | /* shutdown the adminq */ |
|---|
| 14287 | 15636 | i40e_shutdown_adminq(hw); |
|---|
| 14288 | 15637 | |
|---|
| .. | .. |
|---|
| 14295 | 15644 | i40e_clear_interrupt_scheme(pf); |
|---|
| 14296 | 15645 | for (i = 0; i < pf->num_alloc_vsi; i++) { |
|---|
| 14297 | 15646 | if (pf->vsi[i]) { |
|---|
| 14298 | | - i40e_vsi_clear_rings(pf->vsi[i]); |
|---|
| 15647 | + if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) |
|---|
| 15648 | + i40e_vsi_clear_rings(pf->vsi[i]); |
|---|
| 14299 | 15649 | i40e_vsi_clear(pf->vsi[i]); |
|---|
| 14300 | 15650 | pf->vsi[i] = NULL; |
|---|
| 14301 | 15651 | } |
|---|
| .. | .. |
|---|
| 14328 | 15678 | * remediation. |
|---|
| 14329 | 15679 | **/ |
|---|
| 14330 | 15680 | static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, |
|---|
| 14331 | | - enum pci_channel_state error) |
|---|
| 15681 | + pci_channel_state_t error) |
|---|
| 14332 | 15682 | { |
|---|
| 14333 | 15683 | struct i40e_pf *pf = pci_get_drvdata(pdev); |
|---|
| 14334 | 15684 | |
|---|
| .. | .. |
|---|
| 14361 | 15711 | { |
|---|
| 14362 | 15712 | struct i40e_pf *pf = pci_get_drvdata(pdev); |
|---|
| 14363 | 15713 | pci_ers_result_t result; |
|---|
| 14364 | | - int err; |
|---|
| 14365 | 15714 | u32 reg; |
|---|
| 14366 | 15715 | |
|---|
| 14367 | 15716 | dev_dbg(&pdev->dev, "%s\n", __func__); |
|---|
| .. | .. |
|---|
| 14380 | 15729 | result = PCI_ERS_RESULT_RECOVERED; |
|---|
| 14381 | 15730 | else |
|---|
| 14382 | 15731 | result = PCI_ERS_RESULT_DISCONNECT; |
|---|
| 14383 | | - } |
|---|
| 14384 | | - |
|---|
| 14385 | | - err = pci_cleanup_aer_uncorrect_error_status(pdev); |
|---|
| 14386 | | - if (err) { |
|---|
| 14387 | | - dev_info(&pdev->dev, |
|---|
| 14388 | | - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", |
|---|
| 14389 | | - err); |
|---|
| 14390 | | - /* non-fatal, continue */ |
|---|
| 14391 | 15732 | } |
|---|
| 14392 | 15733 | |
|---|
| 14393 | 15734 | return result; |
|---|
| .. | .. |
|---|
| 14512 | 15853 | wr32(hw, I40E_PFPM_WUFC, |
|---|
| 14513 | 15854 | (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); |
|---|
| 14514 | 15855 | |
|---|
| 15856 | + /* Free MSI/legacy interrupt 0 when in recovery mode. */ |
|---|
| 15857 | + if (test_bit(__I40E_RECOVERY_MODE, pf->state) && |
|---|
| 15858 | + !(pf->flags & I40E_FLAG_MSIX_ENABLED)) |
|---|
| 15859 | + free_irq(pf->pdev->irq, pf); |
|---|
| 15860 | + |
|---|
| 14515 | 15861 | /* Since we're going to destroy queues during the |
|---|
| 14516 | 15862 | * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this |
|---|
| 14517 | 15863 | * whole section |
|---|
| .. | .. |
|---|
| 14532 | 15878 | **/ |
|---|
| 14533 | 15879 | static int __maybe_unused i40e_suspend(struct device *dev) |
|---|
| 14534 | 15880 | { |
|---|
| 14535 | | - struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 14536 | | - struct i40e_pf *pf = pci_get_drvdata(pdev); |
|---|
| 15881 | + struct i40e_pf *pf = dev_get_drvdata(dev); |
|---|
| 14537 | 15882 | struct i40e_hw *hw = &pf->hw; |
|---|
| 14538 | 15883 | |
|---|
| 14539 | 15884 | /* If we're already suspended, then there is nothing to do */ |
|---|
| .. | .. |
|---|
| 14583 | 15928 | **/ |
|---|
| 14584 | 15929 | static int __maybe_unused i40e_resume(struct device *dev) |
|---|
| 14585 | 15930 | { |
|---|
| 14586 | | - struct pci_dev *pdev = to_pci_dev(dev); |
|---|
| 14587 | | - struct i40e_pf *pf = pci_get_drvdata(pdev); |
|---|
| 15931 | + struct i40e_pf *pf = dev_get_drvdata(dev); |
|---|
| 14588 | 15932 | int err; |
|---|
| 14589 | 15933 | |
|---|
| 14590 | 15934 | /* If we're not suspended, then there is nothing to do */ |
|---|
| .. | .. |
|---|
| 14601 | 15945 | */ |
|---|
| 14602 | 15946 | err = i40e_restore_interrupt_scheme(pf); |
|---|
| 14603 | 15947 | if (err) { |
|---|
| 14604 | | - dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", |
|---|
| 15948 | + dev_err(dev, "Cannot restore interrupt scheme: %d\n", |
|---|
| 14605 | 15949 | err); |
|---|
| 14606 | 15950 | } |
|---|
| 14607 | 15951 | |
|---|
| .. | .. |
|---|
| 14651 | 15995 | **/ |
|---|
| 14652 | 15996 | static int __init i40e_init_module(void) |
|---|
| 14653 | 15997 | { |
|---|
| 14654 | | - pr_info("%s: %s - version %s\n", i40e_driver_name, |
|---|
| 14655 | | - i40e_driver_string, i40e_driver_version_str); |
|---|
| 15998 | + int err; |
|---|
| 15999 | + |
|---|
| 16000 | + pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string); |
|---|
| 14656 | 16001 | pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); |
|---|
| 14657 | 16002 | |
|---|
| 14658 | 16003 | /* There is no need to throttle the number of active tasks because |
|---|
| .. | .. |
|---|
| 14669 | 16014 | } |
|---|
| 14670 | 16015 | |
|---|
| 14671 | 16016 | i40e_dbg_init(); |
|---|
| 14672 | | - return pci_register_driver(&i40e_driver); |
|---|
| 16017 | + err = pci_register_driver(&i40e_driver); |
|---|
| 16018 | + if (err) { |
|---|
| 16019 | + destroy_workqueue(i40e_wq); |
|---|
| 16020 | + i40e_dbg_exit(); |
|---|
| 16021 | + return err; |
|---|
| 16022 | + } |
|---|
| 16023 | + |
|---|
| 16024 | + return 0; |
|---|
| 14673 | 16025 | } |
|---|
| 14674 | 16026 | module_init(i40e_init_module); |
|---|
| 14675 | 16027 | |
|---|