| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
|---|
| 1 | 2 | /* QLogic qede NIC Driver |
|---|
| 2 | 3 | * Copyright (c) 2015-2017 QLogic Corporation |
|---|
| 3 | | - * |
|---|
| 4 | | - * This software is available to you under a choice of one of two |
|---|
| 5 | | - * licenses. You may choose to be licensed under the terms of the GNU |
|---|
| 6 | | - * General Public License (GPL) Version 2, available from the file |
|---|
| 7 | | - * COPYING in the main directory of this source tree, or the |
|---|
| 8 | | - * OpenIB.org BSD license below: |
|---|
| 9 | | - * |
|---|
| 10 | | - * Redistribution and use in source and binary forms, with or |
|---|
| 11 | | - * without modification, are permitted provided that the following |
|---|
| 12 | | - * conditions are met: |
|---|
| 13 | | - * |
|---|
| 14 | | - * - Redistributions of source code must retain the above |
|---|
| 15 | | - * copyright notice, this list of conditions and the following |
|---|
| 16 | | - * disclaimer. |
|---|
| 17 | | - * |
|---|
| 18 | | - * - Redistributions in binary form must reproduce the above |
|---|
| 19 | | - * copyright notice, this list of conditions and the following |
|---|
| 20 | | - * disclaimer in the documentation and /or other materials |
|---|
| 21 | | - * provided with the distribution. |
|---|
| 22 | | - * |
|---|
| 23 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
|---|
| 24 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
|---|
| 25 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
|---|
| 26 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
|---|
| 27 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
|---|
| 28 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
|---|
| 29 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|---|
| 30 | | - * SOFTWARE. |
|---|
| 4 | + * Copyright (c) 2019-2020 Marvell International Ltd. |
|---|
| 31 | 5 | */ |
|---|
| 6 | + |
|---|
| 32 | 7 | #include <linux/crash_dump.h> |
|---|
| 33 | 8 | #include <linux/module.h> |
|---|
| 34 | 9 | #include <linux/pci.h> |
|---|
| .. | .. |
|---|
| 61 | 36 | #include <net/ip6_checksum.h> |
|---|
| 62 | 37 | #include <linux/bitops.h> |
|---|
| 63 | 38 | #include <linux/vmalloc.h> |
|---|
| 39 | +#include <linux/aer.h> |
|---|
| 64 | 40 | #include "qede.h" |
|---|
| 65 | 41 | #include "qede_ptp.h" |
|---|
| 66 | 42 | |
|---|
| .. | .. |
|---|
| 125 | 101 | MODULE_DEVICE_TABLE(pci, qede_pci_tbl); |
|---|
| 126 | 102 | |
|---|
| 127 | 103 | static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); |
|---|
| 104 | +static pci_ers_result_t |
|---|
| 105 | +qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); |
|---|
| 128 | 106 | |
|---|
| 129 | 107 | #define TX_TIMEOUT (5 * HZ) |
|---|
| 130 | 108 | |
|---|
| .. | .. |
|---|
| 134 | 112 | static void qede_remove(struct pci_dev *pdev); |
|---|
| 135 | 113 | static void qede_shutdown(struct pci_dev *pdev); |
|---|
| 136 | 114 | static void qede_link_update(void *dev, struct qed_link_output *link); |
|---|
| 115 | +static void qede_schedule_recovery_handler(void *dev); |
|---|
| 116 | +static void qede_recovery_handler(struct qede_dev *edev); |
|---|
| 117 | +static void qede_schedule_hw_err_handler(void *dev, |
|---|
| 118 | + enum qed_hw_err_type err_type); |
|---|
| 137 | 119 | static void qede_get_eth_tlv_data(void *edev, void *data); |
|---|
| 138 | 120 | static void qede_get_generic_tlv_data(void *edev, |
|---|
| 139 | 121 | struct qed_generic_tlvs *data); |
|---|
| 140 | | - |
|---|
| 141 | | -/* The qede lock is used to protect driver state change and driver flows that |
|---|
| 142 | | - * are not reentrant. |
|---|
| 143 | | - */ |
|---|
| 144 | | -void __qede_lock(struct qede_dev *edev) |
|---|
| 145 | | -{ |
|---|
| 146 | | - mutex_lock(&edev->qede_lock); |
|---|
| 147 | | -} |
|---|
| 148 | | - |
|---|
| 149 | | -void __qede_unlock(struct qede_dev *edev) |
|---|
| 150 | | -{ |
|---|
| 151 | | - mutex_unlock(&edev->qede_lock); |
|---|
| 152 | | -} |
|---|
| 153 | | - |
|---|
| 122 | +static void qede_generic_hw_err_handler(struct qede_dev *edev); |
|---|
| 154 | 123 | #ifdef CONFIG_QED_SRIOV |
|---|
| 155 | 124 | static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, |
|---|
| 156 | 125 | __be16 vlan_proto) |
|---|
| .. | .. |
|---|
| 175 | 144 | { |
|---|
| 176 | 145 | struct qede_dev *edev = netdev_priv(ndev); |
|---|
| 177 | 146 | |
|---|
| 178 | | - DP_VERBOSE(edev, QED_MSG_IOV, |
|---|
| 179 | | - "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", |
|---|
| 180 | | - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); |
|---|
| 147 | + DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx); |
|---|
| 181 | 148 | |
|---|
| 182 | 149 | if (!is_valid_ether_addr(mac)) { |
|---|
| 183 | 150 | DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); |
|---|
| .. | .. |
|---|
| 215 | 182 | } |
|---|
| 216 | 183 | #endif |
|---|
| 217 | 184 | |
|---|
| 185 | +static const struct pci_error_handlers qede_err_handler = { |
|---|
| 186 | + .error_detected = qede_io_error_detected, |
|---|
| 187 | +}; |
|---|
| 188 | + |
|---|
| 218 | 189 | static struct pci_driver qede_pci_driver = { |
|---|
| 219 | 190 | .name = "qede", |
|---|
| 220 | 191 | .id_table = qede_pci_tbl, |
|---|
| .. | .. |
|---|
| 224 | 195 | #ifdef CONFIG_QED_SRIOV |
|---|
| 225 | 196 | .sriov_configure = qede_sriov_configure, |
|---|
| 226 | 197 | #endif |
|---|
| 198 | + .err_handler = &qede_err_handler, |
|---|
| 227 | 199 | }; |
|---|
| 228 | 200 | |
|---|
| 229 | 201 | static struct qed_eth_cb_ops qede_ll_ops = { |
|---|
| .. | .. |
|---|
| 232 | 204 | .arfs_filter_op = qede_arfs_filter_op, |
|---|
| 233 | 205 | #endif |
|---|
| 234 | 206 | .link_update = qede_link_update, |
|---|
| 207 | + .schedule_recovery_handler = qede_schedule_recovery_handler, |
|---|
| 208 | + .schedule_hw_err_handler = qede_schedule_hw_err_handler, |
|---|
| 235 | 209 | .get_generic_tlv_data = qede_get_generic_tlv_data, |
|---|
| 236 | 210 | .get_protocol_tlv_data = qede_get_eth_tlv_data, |
|---|
| 237 | 211 | }, |
|---|
| .. | .. |
|---|
| 286 | 260 | int ret; |
|---|
| 287 | 261 | |
|---|
| 288 | 262 | pr_info("qede_init: %s\n", version); |
|---|
| 263 | + |
|---|
| 264 | + qede_forced_speed_maps_init(); |
|---|
| 289 | 265 | |
|---|
| 290 | 266 | qed_ops = qed_get_eth_ops(); |
|---|
| 291 | 267 | if (!qed_ops) { |
|---|
| .. | .. |
|---|
| 401 | 377 | p_common->brb_discards = stats.common.brb_discards; |
|---|
| 402 | 378 | p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; |
|---|
| 403 | 379 | p_common->link_change_count = stats.common.link_change_count; |
|---|
| 380 | + p_common->ptp_skip_txts = edev->ptp_skip_txts; |
|---|
| 404 | 381 | |
|---|
| 405 | 382 | if (QEDE_IS_BB(edev)) { |
|---|
| 406 | 383 | struct qede_stats_bb *p_bb = &edev->stats.bb; |
|---|
| .. | .. |
|---|
| 537 | 514 | return 0; |
|---|
| 538 | 515 | } |
|---|
| 539 | 516 | |
|---|
| 517 | +static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq) |
|---|
| 518 | +{ |
|---|
| 519 | + DP_NOTICE(edev, |
|---|
| 520 | + "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", |
|---|
| 521 | + txq->index, le16_to_cpu(*txq->hw_cons_ptr), |
|---|
| 522 | + qed_chain_get_cons_idx(&txq->tx_pbl), |
|---|
| 523 | + qed_chain_get_prod_idx(&txq->tx_pbl), |
|---|
| 524 | + jiffies); |
|---|
| 525 | +} |
|---|
| 526 | + |
|---|
| 527 | +static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) |
|---|
| 528 | +{ |
|---|
| 529 | + struct qede_dev *edev = netdev_priv(dev); |
|---|
| 530 | + struct qede_tx_queue *txq; |
|---|
| 531 | + int cos; |
|---|
| 532 | + |
|---|
| 533 | + netif_carrier_off(dev); |
|---|
| 534 | + DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); |
|---|
| 535 | + |
|---|
| 536 | + if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX)) |
|---|
| 537 | + return; |
|---|
| 538 | + |
|---|
| 539 | + for_each_cos_in_txq(edev, cos) { |
|---|
| 540 | + txq = &edev->fp_array[txqueue].txq[cos]; |
|---|
| 541 | + |
|---|
| 542 | + if (qed_chain_get_cons_idx(&txq->tx_pbl) != |
|---|
| 543 | + qed_chain_get_prod_idx(&txq->tx_pbl)) |
|---|
| 544 | + qede_tx_log_print(edev, txq); |
|---|
| 545 | + } |
|---|
| 546 | + |
|---|
| 547 | + if (IS_VF(edev)) |
|---|
| 548 | + return; |
|---|
| 549 | + |
|---|
| 550 | + if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || |
|---|
| 551 | + edev->state == QEDE_STATE_RECOVERY) { |
|---|
| 552 | + DP_INFO(edev, |
|---|
| 553 | + "Avoid handling a Tx timeout while another HW error is being handled\n"); |
|---|
| 554 | + return; |
|---|
| 555 | + } |
|---|
| 556 | + |
|---|
| 557 | + set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags); |
|---|
| 558 | + set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); |
|---|
| 559 | + schedule_delayed_work(&edev->sp_task, 0); |
|---|
| 560 | +} |
|---|
| 561 | + |
|---|
| 540 | 562 | static int qede_setup_tc(struct net_device *ndev, u8 num_tc) |
|---|
| 541 | 563 | { |
|---|
| 542 | 564 | struct qede_dev *edev = netdev_priv(ndev); |
|---|
| .. | .. |
|---|
| 558 | 580 | } |
|---|
| 559 | 581 | |
|---|
| 560 | 582 | static int |
|---|
| 561 | | -qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f, |
|---|
| 583 | +qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f, |
|---|
| 562 | 584 | __be16 proto) |
|---|
| 563 | 585 | { |
|---|
| 564 | 586 | switch (f->command) { |
|---|
| 565 | | - case TC_CLSFLOWER_REPLACE: |
|---|
| 587 | + case FLOW_CLS_REPLACE: |
|---|
| 566 | 588 | return qede_add_tc_flower_fltr(edev, proto, f); |
|---|
| 567 | | - case TC_CLSFLOWER_DESTROY: |
|---|
| 589 | + case FLOW_CLS_DESTROY: |
|---|
| 568 | 590 | return qede_delete_flow_filter(edev, f->cookie); |
|---|
| 569 | 591 | default: |
|---|
| 570 | 592 | return -EOPNOTSUPP; |
|---|
| .. | .. |
|---|
| 574 | 596 | static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
|---|
| 575 | 597 | void *cb_priv) |
|---|
| 576 | 598 | { |
|---|
| 577 | | - struct tc_cls_flower_offload *f; |
|---|
| 599 | + struct flow_cls_offload *f; |
|---|
| 578 | 600 | struct qede_dev *edev = cb_priv; |
|---|
| 579 | 601 | |
|---|
| 580 | 602 | if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data)) |
|---|
| .. | .. |
|---|
| 589 | 611 | } |
|---|
| 590 | 612 | } |
|---|
| 591 | 613 | |
|---|
| 592 | | -static int qede_setup_tc_block(struct qede_dev *edev, |
|---|
| 593 | | - struct tc_block_offload *f) |
|---|
| 594 | | -{ |
|---|
| 595 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
|---|
| 596 | | - return -EOPNOTSUPP; |
|---|
| 597 | | - |
|---|
| 598 | | - switch (f->command) { |
|---|
| 599 | | - case TC_BLOCK_BIND: |
|---|
| 600 | | - return tcf_block_cb_register(f->block, |
|---|
| 601 | | - qede_setup_tc_block_cb, |
|---|
| 602 | | - edev, edev, f->extack); |
|---|
| 603 | | - case TC_BLOCK_UNBIND: |
|---|
| 604 | | - tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev); |
|---|
| 605 | | - return 0; |
|---|
| 606 | | - default: |
|---|
| 607 | | - return -EOPNOTSUPP; |
|---|
| 608 | | - } |
|---|
| 609 | | -} |
|---|
| 614 | +static LIST_HEAD(qede_block_cb_list); |
|---|
| 610 | 615 | |
|---|
| 611 | 616 | static int |
|---|
| 612 | 617 | qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, |
|---|
| .. | .. |
|---|
| 617 | 622 | |
|---|
| 618 | 623 | switch (type) { |
|---|
| 619 | 624 | case TC_SETUP_BLOCK: |
|---|
| 620 | | - return qede_setup_tc_block(edev, type_data); |
|---|
| 625 | + return flow_block_cb_setup_simple(type_data, |
|---|
| 626 | + &qede_block_cb_list, |
|---|
| 627 | + qede_setup_tc_block_cb, |
|---|
| 628 | + edev, edev, true); |
|---|
| 621 | 629 | case TC_SETUP_QDISC_MQPRIO: |
|---|
| 622 | 630 | mqprio = type_data; |
|---|
| 623 | 631 | |
|---|
| .. | .. |
|---|
| 629 | 637 | } |
|---|
| 630 | 638 | |
|---|
| 631 | 639 | static const struct net_device_ops qede_netdev_ops = { |
|---|
| 632 | | - .ndo_open = qede_open, |
|---|
| 633 | | - .ndo_stop = qede_close, |
|---|
| 634 | | - .ndo_start_xmit = qede_start_xmit, |
|---|
| 635 | | - .ndo_select_queue = qede_select_queue, |
|---|
| 636 | | - .ndo_set_rx_mode = qede_set_rx_mode, |
|---|
| 637 | | - .ndo_set_mac_address = qede_set_mac_addr, |
|---|
| 638 | | - .ndo_validate_addr = eth_validate_addr, |
|---|
| 639 | | - .ndo_change_mtu = qede_change_mtu, |
|---|
| 640 | | - .ndo_do_ioctl = qede_ioctl, |
|---|
| 640 | + .ndo_open = qede_open, |
|---|
| 641 | + .ndo_stop = qede_close, |
|---|
| 642 | + .ndo_start_xmit = qede_start_xmit, |
|---|
| 643 | + .ndo_select_queue = qede_select_queue, |
|---|
| 644 | + .ndo_set_rx_mode = qede_set_rx_mode, |
|---|
| 645 | + .ndo_set_mac_address = qede_set_mac_addr, |
|---|
| 646 | + .ndo_validate_addr = eth_validate_addr, |
|---|
| 647 | + .ndo_change_mtu = qede_change_mtu, |
|---|
| 648 | + .ndo_do_ioctl = qede_ioctl, |
|---|
| 649 | + .ndo_tx_timeout = qede_tx_timeout, |
|---|
| 641 | 650 | #ifdef CONFIG_QED_SRIOV |
|---|
| 642 | | - .ndo_set_vf_mac = qede_set_vf_mac, |
|---|
| 643 | | - .ndo_set_vf_vlan = qede_set_vf_vlan, |
|---|
| 644 | | - .ndo_set_vf_trust = qede_set_vf_trust, |
|---|
| 651 | + .ndo_set_vf_mac = qede_set_vf_mac, |
|---|
| 652 | + .ndo_set_vf_vlan = qede_set_vf_vlan, |
|---|
| 653 | + .ndo_set_vf_trust = qede_set_vf_trust, |
|---|
| 645 | 654 | #endif |
|---|
| 646 | | - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
|---|
| 647 | | - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
|---|
| 648 | | - .ndo_fix_features = qede_fix_features, |
|---|
| 649 | | - .ndo_set_features = qede_set_features, |
|---|
| 650 | | - .ndo_get_stats64 = qede_get_stats64, |
|---|
| 655 | + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
|---|
| 656 | + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
|---|
| 657 | + .ndo_fix_features = qede_fix_features, |
|---|
| 658 | + .ndo_set_features = qede_set_features, |
|---|
| 659 | + .ndo_get_stats64 = qede_get_stats64, |
|---|
| 651 | 660 | #ifdef CONFIG_QED_SRIOV |
|---|
| 652 | | - .ndo_set_vf_link_state = qede_set_vf_link_state, |
|---|
| 653 | | - .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, |
|---|
| 654 | | - .ndo_get_vf_config = qede_get_vf_config, |
|---|
| 655 | | - .ndo_set_vf_rate = qede_set_vf_rate, |
|---|
| 661 | + .ndo_set_vf_link_state = qede_set_vf_link_state, |
|---|
| 662 | + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, |
|---|
| 663 | + .ndo_get_vf_config = qede_get_vf_config, |
|---|
| 664 | + .ndo_set_vf_rate = qede_set_vf_rate, |
|---|
| 656 | 665 | #endif |
|---|
| 657 | | - .ndo_udp_tunnel_add = qede_udp_tunnel_add, |
|---|
| 658 | | - .ndo_udp_tunnel_del = qede_udp_tunnel_del, |
|---|
| 659 | | - .ndo_features_check = qede_features_check, |
|---|
| 660 | | - .ndo_bpf = qede_xdp, |
|---|
| 666 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
|---|
| 667 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
|---|
| 668 | + .ndo_features_check = qede_features_check, |
|---|
| 669 | + .ndo_bpf = qede_xdp, |
|---|
| 661 | 670 | #ifdef CONFIG_RFS_ACCEL |
|---|
| 662 | | - .ndo_rx_flow_steer = qede_rx_flow_steer, |
|---|
| 671 | + .ndo_rx_flow_steer = qede_rx_flow_steer, |
|---|
| 663 | 672 | #endif |
|---|
| 664 | | - .ndo_setup_tc = qede_setup_tc_offload, |
|---|
| 673 | + .ndo_xdp_xmit = qede_xdp_transmit, |
|---|
| 674 | + .ndo_setup_tc = qede_setup_tc_offload, |
|---|
| 665 | 675 | }; |
|---|
| 666 | 676 | |
|---|
| 667 | 677 | static const struct net_device_ops qede_netdev_vf_ops = { |
|---|
| 668 | | - .ndo_open = qede_open, |
|---|
| 669 | | - .ndo_stop = qede_close, |
|---|
| 670 | | - .ndo_start_xmit = qede_start_xmit, |
|---|
| 671 | | - .ndo_select_queue = qede_select_queue, |
|---|
| 672 | | - .ndo_set_rx_mode = qede_set_rx_mode, |
|---|
| 673 | | - .ndo_set_mac_address = qede_set_mac_addr, |
|---|
| 674 | | - .ndo_validate_addr = eth_validate_addr, |
|---|
| 675 | | - .ndo_change_mtu = qede_change_mtu, |
|---|
| 676 | | - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
|---|
| 677 | | - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
|---|
| 678 | | - .ndo_fix_features = qede_fix_features, |
|---|
| 679 | | - .ndo_set_features = qede_set_features, |
|---|
| 680 | | - .ndo_get_stats64 = qede_get_stats64, |
|---|
| 681 | | - .ndo_udp_tunnel_add = qede_udp_tunnel_add, |
|---|
| 682 | | - .ndo_udp_tunnel_del = qede_udp_tunnel_del, |
|---|
| 683 | | - .ndo_features_check = qede_features_check, |
|---|
| 678 | + .ndo_open = qede_open, |
|---|
| 679 | + .ndo_stop = qede_close, |
|---|
| 680 | + .ndo_start_xmit = qede_start_xmit, |
|---|
| 681 | + .ndo_select_queue = qede_select_queue, |
|---|
| 682 | + .ndo_set_rx_mode = qede_set_rx_mode, |
|---|
| 683 | + .ndo_set_mac_address = qede_set_mac_addr, |
|---|
| 684 | + .ndo_validate_addr = eth_validate_addr, |
|---|
| 685 | + .ndo_change_mtu = qede_change_mtu, |
|---|
| 686 | + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
|---|
| 687 | + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
|---|
| 688 | + .ndo_fix_features = qede_fix_features, |
|---|
| 689 | + .ndo_set_features = qede_set_features, |
|---|
| 690 | + .ndo_get_stats64 = qede_get_stats64, |
|---|
| 691 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
|---|
| 692 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
|---|
| 693 | + .ndo_features_check = qede_features_check, |
|---|
| 684 | 694 | }; |
|---|
| 685 | 695 | |
|---|
| 686 | 696 | static const struct net_device_ops qede_netdev_vf_xdp_ops = { |
|---|
| 687 | | - .ndo_open = qede_open, |
|---|
| 688 | | - .ndo_stop = qede_close, |
|---|
| 689 | | - .ndo_start_xmit = qede_start_xmit, |
|---|
| 690 | | - .ndo_select_queue = qede_select_queue, |
|---|
| 691 | | - .ndo_set_rx_mode = qede_set_rx_mode, |
|---|
| 692 | | - .ndo_set_mac_address = qede_set_mac_addr, |
|---|
| 693 | | - .ndo_validate_addr = eth_validate_addr, |
|---|
| 694 | | - .ndo_change_mtu = qede_change_mtu, |
|---|
| 695 | | - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
|---|
| 696 | | - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
|---|
| 697 | | - .ndo_fix_features = qede_fix_features, |
|---|
| 698 | | - .ndo_set_features = qede_set_features, |
|---|
| 699 | | - .ndo_get_stats64 = qede_get_stats64, |
|---|
| 700 | | - .ndo_udp_tunnel_add = qede_udp_tunnel_add, |
|---|
| 701 | | - .ndo_udp_tunnel_del = qede_udp_tunnel_del, |
|---|
| 702 | | - .ndo_features_check = qede_features_check, |
|---|
| 703 | | - .ndo_bpf = qede_xdp, |
|---|
| 697 | + .ndo_open = qede_open, |
|---|
| 698 | + .ndo_stop = qede_close, |
|---|
| 699 | + .ndo_start_xmit = qede_start_xmit, |
|---|
| 700 | + .ndo_select_queue = qede_select_queue, |
|---|
| 701 | + .ndo_set_rx_mode = qede_set_rx_mode, |
|---|
| 702 | + .ndo_set_mac_address = qede_set_mac_addr, |
|---|
| 703 | + .ndo_validate_addr = eth_validate_addr, |
|---|
| 704 | + .ndo_change_mtu = qede_change_mtu, |
|---|
| 705 | + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
|---|
| 706 | + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
|---|
| 707 | + .ndo_fix_features = qede_fix_features, |
|---|
| 708 | + .ndo_set_features = qede_set_features, |
|---|
| 709 | + .ndo_get_stats64 = qede_get_stats64, |
|---|
| 710 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
|---|
| 711 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
|---|
| 712 | + .ndo_features_check = qede_features_check, |
|---|
| 713 | + .ndo_bpf = qede_xdp, |
|---|
| 714 | + .ndo_xdp_xmit = qede_xdp_transmit, |
|---|
| 704 | 715 | }; |
|---|
| 705 | 716 | |
|---|
| 706 | 717 | /* ------------------------------------------------------------------------- |
|---|
| .. | .. |
|---|
| 793 | 804 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
|---|
| 794 | 805 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; |
|---|
| 795 | 806 | |
|---|
| 796 | | - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) |
|---|
| 807 | + if (edev->dev_info.common.b_arfs_capable) |
|---|
| 797 | 808 | hw_features |= NETIF_F_NTUPLE; |
|---|
| 798 | 809 | |
|---|
| 799 | 810 | if (edev->dev_info.common.vxlan_enable || |
|---|
| .. | .. |
|---|
| 813 | 824 | NETIF_F_GSO_UDP_TUNNEL_CSUM); |
|---|
| 814 | 825 | ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | |
|---|
| 815 | 826 | NETIF_F_GSO_UDP_TUNNEL_CSUM); |
|---|
| 827 | + |
|---|
| 828 | + qede_set_udp_tunnels(edev); |
|---|
| 816 | 829 | } |
|---|
| 817 | 830 | |
|---|
| 818 | 831 | if (edev->dev_info.common.gre_enable) { |
|---|
| .. | .. |
|---|
| 960 | 973 | return -ENOMEM; |
|---|
| 961 | 974 | } |
|---|
| 962 | 975 | |
|---|
| 976 | +/* The qede lock is used to protect driver state change and driver flows that |
|---|
| 977 | + * are not reentrant. |
|---|
| 978 | + */ |
|---|
| 979 | +void __qede_lock(struct qede_dev *edev) |
|---|
| 980 | +{ |
|---|
| 981 | + mutex_lock(&edev->qede_lock); |
|---|
| 982 | +} |
|---|
| 983 | + |
|---|
| 984 | +void __qede_unlock(struct qede_dev *edev) |
|---|
| 985 | +{ |
|---|
| 986 | + mutex_unlock(&edev->qede_lock); |
|---|
| 987 | +} |
|---|
| 988 | + |
|---|
| 989 | +/* This version of the lock should be used when acquiring the RTNL lock is also |
|---|
| 990 | + * needed in addition to the internal qede lock. |
|---|
| 991 | + */ |
|---|
| 992 | +static void qede_lock(struct qede_dev *edev) |
|---|
| 993 | +{ |
|---|
| 994 | + rtnl_lock(); |
|---|
| 995 | + __qede_lock(edev); |
|---|
| 996 | +} |
|---|
| 997 | + |
|---|
| 998 | +static void qede_unlock(struct qede_dev *edev) |
|---|
| 999 | +{ |
|---|
| 1000 | + __qede_unlock(edev); |
|---|
| 1001 | + rtnl_unlock(); |
|---|
| 1002 | +} |
|---|
| 1003 | + |
|---|
| 963 | 1004 | static void qede_sp_task(struct work_struct *work) |
|---|
| 964 | 1005 | { |
|---|
| 965 | 1006 | struct qede_dev *edev = container_of(work, struct qede_dev, |
|---|
| 966 | 1007 | sp_task.work); |
|---|
| 1008 | + |
|---|
| 1009 | + /* Disable execution of this deferred work once |
|---|
| 1010 | + * qede removal is in progress, this stop any future |
|---|
| 1011 | + * scheduling of sp_task. |
|---|
| 1012 | + */ |
|---|
| 1013 | + if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags)) |
|---|
| 1014 | + return; |
|---|
| 1015 | + |
|---|
| 1016 | + /* The locking scheme depends on the specific flag: |
|---|
| 1017 | + * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to |
|---|
| 1018 | + * ensure that ongoing flows are ended and new ones are not started. |
|---|
| 1019 | + * In other cases - only the internal qede lock should be acquired. |
|---|
| 1020 | + */ |
|---|
| 1021 | + |
|---|
| 1022 | + if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { |
|---|
| 1023 | +#ifdef CONFIG_QED_SRIOV |
|---|
| 1024 | + /* SRIOV must be disabled outside the lock to avoid a deadlock. |
|---|
| 1025 | + * The recovery of the active VFs is currently not supported. |
|---|
| 1026 | + */ |
|---|
| 1027 | + if (pci_num_vf(edev->pdev)) |
|---|
| 1028 | + qede_sriov_configure(edev->pdev, 0); |
|---|
| 1029 | +#endif |
|---|
| 1030 | + qede_lock(edev); |
|---|
| 1031 | + qede_recovery_handler(edev); |
|---|
| 1032 | + qede_unlock(edev); |
|---|
| 1033 | + } |
|---|
| 967 | 1034 | |
|---|
| 968 | 1035 | __qede_lock(edev); |
|---|
| 969 | 1036 | |
|---|
| .. | .. |
|---|
| 977 | 1044 | qede_process_arfs_filters(edev, false); |
|---|
| 978 | 1045 | } |
|---|
| 979 | 1046 | #endif |
|---|
| 1047 | + if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags)) |
|---|
| 1048 | + qede_generic_hw_err_handler(edev); |
|---|
| 980 | 1049 | __qede_unlock(edev); |
|---|
| 1050 | + |
|---|
| 1051 | + if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) { |
|---|
| 1052 | +#ifdef CONFIG_QED_SRIOV |
|---|
| 1053 | + /* SRIOV must be disabled outside the lock to avoid a deadlock. |
|---|
| 1054 | + * The recovery of the active VFs is currently not supported. |
|---|
| 1055 | + */ |
|---|
| 1056 | + if (pci_num_vf(edev->pdev)) |
|---|
| 1057 | + qede_sriov_configure(edev->pdev, 0); |
|---|
| 1058 | +#endif |
|---|
| 1059 | + edev->ops->common->recovery_process(edev->cdev); |
|---|
| 1060 | + } |
|---|
| 981 | 1061 | } |
|---|
| 982 | 1062 | |
|---|
| 983 | 1063 | static void qede_update_pf_params(struct qed_dev *cdev) |
|---|
| .. | .. |
|---|
| 1041 | 1121 | |
|---|
| 1042 | 1122 | enum qede_probe_mode { |
|---|
| 1043 | 1123 | QEDE_PROBE_NORMAL, |
|---|
| 1124 | + QEDE_PROBE_RECOVERY, |
|---|
| 1044 | 1125 | }; |
|---|
| 1045 | 1126 | |
|---|
| 1046 | 1127 | static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, |
|---|
| .. | .. |
|---|
| 1061 | 1142 | probe_params.dp_module = dp_module; |
|---|
| 1062 | 1143 | probe_params.dp_level = dp_level; |
|---|
| 1063 | 1144 | probe_params.is_vf = is_vf; |
|---|
| 1145 | + probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); |
|---|
| 1064 | 1146 | cdev = qed_ops->common->probe(pdev, &probe_params); |
|---|
| 1065 | 1147 | if (!cdev) { |
|---|
| 1066 | 1148 | rc = -ENODEV; |
|---|
| .. | .. |
|---|
| 1088 | 1170 | if (rc) |
|---|
| 1089 | 1171 | goto err2; |
|---|
| 1090 | 1172 | |
|---|
| 1091 | | - edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, |
|---|
| 1092 | | - dp_level); |
|---|
| 1093 | | - if (!edev) { |
|---|
| 1094 | | - rc = -ENOMEM; |
|---|
| 1095 | | - goto err2; |
|---|
| 1173 | + if (mode != QEDE_PROBE_RECOVERY) { |
|---|
| 1174 | + edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, |
|---|
| 1175 | + dp_level); |
|---|
| 1176 | + if (!edev) { |
|---|
| 1177 | + rc = -ENOMEM; |
|---|
| 1178 | + goto err2; |
|---|
| 1179 | + } |
|---|
| 1180 | + |
|---|
| 1181 | + edev->devlink = qed_ops->common->devlink_register(cdev); |
|---|
| 1182 | + if (IS_ERR(edev->devlink)) { |
|---|
| 1183 | + DP_NOTICE(edev, "Cannot register devlink\n"); |
|---|
| 1184 | + edev->devlink = NULL; |
|---|
| 1185 | + /* Go on, we can live without devlink */ |
|---|
| 1186 | + } |
|---|
| 1187 | + } else { |
|---|
| 1188 | + struct net_device *ndev = pci_get_drvdata(pdev); |
|---|
| 1189 | + |
|---|
| 1190 | + edev = netdev_priv(ndev); |
|---|
| 1191 | + |
|---|
| 1192 | + if (edev->devlink) { |
|---|
| 1193 | + struct qed_devlink *qdl = devlink_priv(edev->devlink); |
|---|
| 1194 | + |
|---|
| 1195 | + qdl->cdev = cdev; |
|---|
| 1196 | + } |
|---|
| 1197 | + edev->cdev = cdev; |
|---|
| 1198 | + memset(&edev->stats, 0, sizeof(edev->stats)); |
|---|
| 1199 | + memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); |
|---|
| 1096 | 1200 | } |
|---|
| 1097 | 1201 | |
|---|
| 1098 | 1202 | if (is_vf) |
|---|
| 1099 | | - edev->flags |= QEDE_FLAG_IS_VF; |
|---|
| 1203 | + set_bit(QEDE_FLAGS_IS_VF, &edev->flags); |
|---|
| 1100 | 1204 | |
|---|
| 1101 | 1205 | qede_init_ndev(edev); |
|---|
| 1102 | 1206 | |
|---|
| 1103 | | - rc = qede_rdma_dev_add(edev); |
|---|
| 1207 | + rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); |
|---|
| 1104 | 1208 | if (rc) |
|---|
| 1105 | 1209 | goto err3; |
|---|
| 1106 | 1210 | |
|---|
| 1107 | | - /* Prepare the lock prior to the registration of the netdev, |
|---|
| 1108 | | - * as once it's registered we might reach flows requiring it |
|---|
| 1109 | | - * [it's even possible to reach a flow needing it directly |
|---|
| 1110 | | - * from there, although it's unlikely]. |
|---|
| 1111 | | - */ |
|---|
| 1112 | | - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
|---|
| 1113 | | - mutex_init(&edev->qede_lock); |
|---|
| 1114 | | - rc = register_netdev(edev->ndev); |
|---|
| 1115 | | - if (rc) { |
|---|
| 1116 | | - DP_NOTICE(edev, "Cannot register net-device\n"); |
|---|
| 1117 | | - goto err4; |
|---|
| 1211 | + if (mode != QEDE_PROBE_RECOVERY) { |
|---|
| 1212 | + /* Prepare the lock prior to the registration of the netdev, |
|---|
| 1213 | + * as once it's registered we might reach flows requiring it |
|---|
| 1214 | + * [it's even possible to reach a flow needing it directly |
|---|
| 1215 | + * from there, although it's unlikely]. |
|---|
| 1216 | + */ |
|---|
| 1217 | + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
|---|
| 1218 | + mutex_init(&edev->qede_lock); |
|---|
| 1219 | + |
|---|
| 1220 | + rc = register_netdev(edev->ndev); |
|---|
| 1221 | + if (rc) { |
|---|
| 1222 | + DP_NOTICE(edev, "Cannot register net-device\n"); |
|---|
| 1223 | + goto err4; |
|---|
| 1224 | + } |
|---|
| 1118 | 1225 | } |
|---|
| 1119 | 1226 | |
|---|
| 1120 | 1227 | edev->ops->common->set_name(cdev, edev->ndev->name); |
|---|
| 1121 | 1228 | |
|---|
| 1122 | 1229 | /* PTP not supported on VFs */ |
|---|
| 1123 | 1230 | if (!is_vf) |
|---|
| 1124 | | - qede_ptp_enable(edev, true); |
|---|
| 1231 | + qede_ptp_enable(edev); |
|---|
| 1125 | 1232 | |
|---|
| 1126 | 1233 | edev->ops->register_ops(cdev, &qede_ll_ops, edev); |
|---|
| 1127 | 1234 | |
|---|
| .. | .. |
|---|
| 1136 | 1243 | return 0; |
|---|
| 1137 | 1244 | |
|---|
| 1138 | 1245 | err4: |
|---|
| 1139 | | - qede_rdma_dev_remove(edev); |
|---|
| 1246 | + qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); |
|---|
| 1140 | 1247 | err3: |
|---|
| 1141 | | - free_netdev(edev->ndev); |
|---|
| 1248 | + if (mode != QEDE_PROBE_RECOVERY) |
|---|
| 1249 | + free_netdev(edev->ndev); |
|---|
| 1250 | + else |
|---|
| 1251 | + edev->cdev = NULL; |
|---|
| 1142 | 1252 | err2: |
|---|
| 1143 | 1253 | qed_ops->common->slowpath_stop(cdev); |
|---|
| 1144 | 1254 | err1: |
|---|
| .. | .. |
|---|
| 1172 | 1282 | |
|---|
| 1173 | 1283 | enum qede_remove_mode { |
|---|
| 1174 | 1284 | QEDE_REMOVE_NORMAL, |
|---|
| 1285 | + QEDE_REMOVE_RECOVERY, |
|---|
| 1175 | 1286 | }; |
|---|
| 1176 | 1287 | |
|---|
| 1177 | 1288 | static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) |
|---|
| .. | .. |
|---|
| 1190 | 1301 | |
|---|
| 1191 | 1302 | DP_INFO(edev, "Starting qede_remove\n"); |
|---|
| 1192 | 1303 | |
|---|
| 1193 | | - qede_rdma_dev_remove(edev); |
|---|
| 1194 | | - unregister_netdev(ndev); |
|---|
| 1195 | | - cancel_delayed_work_sync(&edev->sp_task); |
|---|
| 1304 | + qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); |
|---|
| 1305 | + |
|---|
| 1306 | + if (mode != QEDE_REMOVE_RECOVERY) { |
|---|
| 1307 | + set_bit(QEDE_SP_DISABLE, &edev->sp_flags); |
|---|
| 1308 | + unregister_netdev(ndev); |
|---|
| 1309 | + |
|---|
| 1310 | + cancel_delayed_work_sync(&edev->sp_task); |
|---|
| 1311 | + |
|---|
| 1312 | + edev->ops->common->set_power_state(cdev, PCI_D0); |
|---|
| 1313 | + |
|---|
| 1314 | + pci_set_drvdata(pdev, NULL); |
|---|
| 1315 | + } |
|---|
| 1196 | 1316 | |
|---|
| 1197 | 1317 | qede_ptp_disable(edev); |
|---|
| 1198 | | - |
|---|
| 1199 | | - edev->ops->common->set_power_state(cdev, PCI_D0); |
|---|
| 1200 | | - |
|---|
| 1201 | | - pci_set_drvdata(pdev, NULL); |
|---|
| 1202 | 1318 | |
|---|
| 1203 | 1319 | /* Use global ops since we've freed edev */ |
|---|
| 1204 | 1320 | qed_ops->common->slowpath_stop(cdev); |
|---|
| 1205 | 1321 | if (system_state == SYSTEM_POWER_OFF) |
|---|
| 1206 | 1322 | return; |
|---|
| 1323 | + |
|---|
| 1324 | + if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) { |
|---|
| 1325 | + qed_ops->common->devlink_unregister(edev->devlink); |
|---|
| 1326 | + edev->devlink = NULL; |
|---|
| 1327 | + } |
|---|
| 1207 | 1328 | qed_ops->common->remove(cdev); |
|---|
| 1329 | + edev->cdev = NULL; |
|---|
| 1208 | 1330 | |
|---|
| 1209 | 1331 | /* Since this can happen out-of-sync with other flows, |
|---|
| 1210 | 1332 | * don't release the netdevice until after slowpath stop |
|---|
| .. | .. |
|---|
| 1212 | 1334 | * [e.g., QED register callbacks] won't break anything when |
|---|
| 1213 | 1335 | * accessing the netdevice. |
|---|
| 1214 | 1336 | */ |
|---|
| 1215 | | - free_netdev(ndev); |
|---|
| 1337 | + if (mode != QEDE_REMOVE_RECOVERY) |
|---|
| 1338 | + free_netdev(ndev); |
|---|
| 1216 | 1339 | |
|---|
| 1217 | 1340 | dev_info(&pdev->dev, "Ending qede_remove successfully\n"); |
|---|
| 1218 | 1341 | } |
|---|
| .. | .. |
|---|
| 1265 | 1388 | u16 sb_id) |
|---|
| 1266 | 1389 | { |
|---|
| 1267 | 1390 | if (sb_info->sb_virt) { |
|---|
| 1268 | | - edev->ops->common->sb_release(edev->cdev, sb_info, sb_id); |
|---|
| 1391 | + edev->ops->common->sb_release(edev->cdev, sb_info, sb_id, |
|---|
| 1392 | + QED_SB_TYPE_L2_QUEUE); |
|---|
| 1269 | 1393 | dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), |
|---|
| 1270 | 1394 | (void *)sb_info->sb_virt, sb_info->sb_phys); |
|---|
| 1271 | 1395 | memset(sb_info, 0, sizeof(*sb_info)); |
|---|
| .. | .. |
|---|
| 1347 | 1471 | /* This function allocates all memory needed per Rx queue */ |
|---|
| 1348 | 1472 | static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) |
|---|
| 1349 | 1473 | { |
|---|
| 1474 | + struct qed_chain_init_params params = { |
|---|
| 1475 | + .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
|---|
| 1476 | + .num_elems = RX_RING_SIZE, |
|---|
| 1477 | + }; |
|---|
| 1478 | + struct qed_dev *cdev = edev->cdev; |
|---|
| 1350 | 1479 | int i, rc, size; |
|---|
| 1351 | 1480 | |
|---|
| 1352 | 1481 | rxq->num_rx_buffers = edev->q_num_rx_buffers; |
|---|
| .. | .. |
|---|
| 1361 | 1490 | if (rxq->rx_buf_size + size > PAGE_SIZE) |
|---|
| 1362 | 1491 | rxq->rx_buf_size = PAGE_SIZE - size; |
|---|
| 1363 | 1492 | |
|---|
| 1364 | | - /* Segment size to spilt a page in multiple equal parts , |
|---|
| 1493 | + /* Segment size to split a page in multiple equal parts, |
|---|
| 1365 | 1494 | * unless XDP is used in which case we'd use the entire page. |
|---|
| 1366 | 1495 | */ |
|---|
| 1367 | 1496 | if (!edev->xdp_prog) { |
|---|
| .. | .. |
|---|
| 1382 | 1511 | } |
|---|
| 1383 | 1512 | |
|---|
| 1384 | 1513 | /* Allocate FW Rx ring */ |
|---|
| 1385 | | - rc = edev->ops->common->chain_alloc(edev->cdev, |
|---|
| 1386 | | - QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
|---|
| 1387 | | - QED_CHAIN_MODE_NEXT_PTR, |
|---|
| 1388 | | - QED_CHAIN_CNT_TYPE_U16, |
|---|
| 1389 | | - RX_RING_SIZE, |
|---|
| 1390 | | - sizeof(struct eth_rx_bd), |
|---|
| 1391 | | - &rxq->rx_bd_ring, NULL); |
|---|
| 1514 | + params.mode = QED_CHAIN_MODE_NEXT_PTR; |
|---|
| 1515 | + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; |
|---|
| 1516 | + params.elem_size = sizeof(struct eth_rx_bd); |
|---|
| 1517 | + |
|---|
| 1518 | + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms); |
|---|
| 1392 | 1519 | if (rc) |
|---|
| 1393 | 1520 | goto err; |
|---|
| 1394 | 1521 | |
|---|
| 1395 | 1522 | /* Allocate FW completion ring */ |
|---|
| 1396 | | - rc = edev->ops->common->chain_alloc(edev->cdev, |
|---|
| 1397 | | - QED_CHAIN_USE_TO_CONSUME, |
|---|
| 1398 | | - QED_CHAIN_MODE_PBL, |
|---|
| 1399 | | - QED_CHAIN_CNT_TYPE_U16, |
|---|
| 1400 | | - RX_RING_SIZE, |
|---|
| 1401 | | - sizeof(union eth_rx_cqe), |
|---|
| 1402 | | - &rxq->rx_comp_ring, NULL); |
|---|
| 1523 | + params.mode = QED_CHAIN_MODE_PBL; |
|---|
| 1524 | + params.intended_use = QED_CHAIN_USE_TO_CONSUME; |
|---|
| 1525 | + params.elem_size = sizeof(union eth_rx_cqe); |
|---|
| 1526 | + |
|---|
| 1527 | + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms); |
|---|
| 1403 | 1528 | if (rc) |
|---|
| 1404 | 1529 | goto err; |
|---|
| 1405 | 1530 | |
|---|
| .. | .. |
|---|
| 1436 | 1561 | /* This function allocates all memory needed per Tx queue */ |
|---|
| 1437 | 1562 | static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) |
|---|
| 1438 | 1563 | { |
|---|
| 1439 | | - union eth_tx_bd_types *p_virt; |
|---|
| 1564 | + struct qed_chain_init_params params = { |
|---|
| 1565 | + .mode = QED_CHAIN_MODE_PBL, |
|---|
| 1566 | + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
|---|
| 1567 | + .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
|---|
| 1568 | + .num_elems = edev->q_num_tx_buffers, |
|---|
| 1569 | + .elem_size = sizeof(union eth_tx_bd_types), |
|---|
| 1570 | + }; |
|---|
| 1440 | 1571 | int size, rc; |
|---|
| 1441 | 1572 | |
|---|
| 1442 | 1573 | txq->num_tx_buffers = edev->q_num_tx_buffers; |
|---|
| .. | .. |
|---|
| 1454 | 1585 | goto err; |
|---|
| 1455 | 1586 | } |
|---|
| 1456 | 1587 | |
|---|
| 1457 | | - rc = edev->ops->common->chain_alloc(edev->cdev, |
|---|
| 1458 | | - QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
|---|
| 1459 | | - QED_CHAIN_MODE_PBL, |
|---|
| 1460 | | - QED_CHAIN_CNT_TYPE_U16, |
|---|
| 1461 | | - txq->num_tx_buffers, |
|---|
| 1462 | | - sizeof(*p_virt), |
|---|
| 1463 | | - &txq->tx_pbl, NULL); |
|---|
| 1588 | + rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms); |
|---|
| 1464 | 1589 | if (rc) |
|---|
| 1465 | 1590 | goto err; |
|---|
| 1466 | 1591 | |
|---|
| .. | .. |
|---|
| 1559 | 1684 | return 0; |
|---|
| 1560 | 1685 | } |
|---|
| 1561 | 1686 | |
|---|
| 1687 | +static void qede_empty_tx_queue(struct qede_dev *edev, |
|---|
| 1688 | + struct qede_tx_queue *txq) |
|---|
| 1689 | +{ |
|---|
| 1690 | + unsigned int pkts_compl = 0, bytes_compl = 0; |
|---|
| 1691 | + struct netdev_queue *netdev_txq; |
|---|
| 1692 | + int rc, len = 0; |
|---|
| 1693 | + |
|---|
| 1694 | + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); |
|---|
| 1695 | + |
|---|
| 1696 | + while (qed_chain_get_cons_idx(&txq->tx_pbl) != |
|---|
| 1697 | + qed_chain_get_prod_idx(&txq->tx_pbl)) { |
|---|
| 1698 | + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, |
|---|
| 1699 | + "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", |
|---|
| 1700 | + txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), |
|---|
| 1701 | + qed_chain_get_prod_idx(&txq->tx_pbl)); |
|---|
| 1702 | + |
|---|
| 1703 | + rc = qede_free_tx_pkt(edev, txq, &len); |
|---|
| 1704 | + if (rc) { |
|---|
| 1705 | + DP_NOTICE(edev, |
|---|
| 1706 | + "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", |
|---|
| 1707 | + txq->index, |
|---|
| 1708 | + qed_chain_get_cons_idx(&txq->tx_pbl), |
|---|
| 1709 | + qed_chain_get_prod_idx(&txq->tx_pbl)); |
|---|
| 1710 | + break; |
|---|
| 1711 | + } |
|---|
| 1712 | + |
|---|
| 1713 | + bytes_compl += len; |
|---|
| 1714 | + pkts_compl++; |
|---|
| 1715 | + txq->sw_tx_cons++; |
|---|
| 1716 | + } |
|---|
| 1717 | + |
|---|
| 1718 | + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); |
|---|
| 1719 | +} |
|---|
| 1720 | + |
|---|
| 1721 | +static void qede_empty_tx_queues(struct qede_dev *edev) |
|---|
| 1722 | +{ |
|---|
| 1723 | + int i; |
|---|
| 1724 | + |
|---|
| 1725 | + for_each_queue(i) |
|---|
| 1726 | + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { |
|---|
| 1727 | + int cos; |
|---|
| 1728 | + |
|---|
| 1729 | + for_each_cos_in_txq(edev, cos) { |
|---|
| 1730 | + struct qede_fastpath *fp; |
|---|
| 1731 | + |
|---|
| 1732 | + fp = &edev->fp_array[i]; |
|---|
| 1733 | + qede_empty_tx_queue(edev, |
|---|
| 1734 | + &fp->txq[cos]); |
|---|
| 1735 | + } |
|---|
| 1736 | + } |
|---|
| 1737 | +} |
|---|
| 1738 | + |
|---|
| 1562 | 1739 | /* This function inits fp content and resets the SB, RXQ and TXQ structures */ |
|---|
| 1563 | 1740 | static void qede_init_fp(struct qede_dev *edev) |
|---|
| 1564 | 1741 | { |
|---|
| 1565 | 1742 | int queue_id, rxq_index = 0, txq_index = 0; |
|---|
| 1566 | 1743 | struct qede_fastpath *fp; |
|---|
| 1744 | + bool init_xdp = false; |
|---|
| 1567 | 1745 | |
|---|
| 1568 | 1746 | for_each_queue(queue_id) { |
|---|
| 1569 | 1747 | fp = &edev->fp_array[queue_id]; |
|---|
| .. | .. |
|---|
| 1575 | 1753 | fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, |
|---|
| 1576 | 1754 | rxq_index); |
|---|
| 1577 | 1755 | fp->xdp_tx->is_xdp = 1; |
|---|
| 1756 | + |
|---|
| 1757 | + spin_lock_init(&fp->xdp_tx->xdp_tx_lock); |
|---|
| 1758 | + init_xdp = true; |
|---|
| 1578 | 1759 | } |
|---|
| 1579 | 1760 | |
|---|
| 1580 | 1761 | if (fp->type & QEDE_FASTPATH_RX) { |
|---|
| .. | .. |
|---|
| 1590 | 1771 | /* Driver have no error path from here */ |
|---|
| 1591 | 1772 | WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, |
|---|
| 1592 | 1773 | fp->rxq->rxq_id) < 0); |
|---|
| 1774 | + |
|---|
| 1775 | + if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, |
|---|
| 1776 | + MEM_TYPE_PAGE_ORDER0, |
|---|
| 1777 | + NULL)) { |
|---|
| 1778 | + DP_NOTICE(edev, |
|---|
| 1779 | + "Failed to register XDP memory model\n"); |
|---|
| 1780 | + } |
|---|
| 1593 | 1781 | } |
|---|
| 1594 | 1782 | |
|---|
| 1595 | 1783 | if (fp->type & QEDE_FASTPATH_TX) { |
|---|
| .. | .. |
|---|
| 1605 | 1793 | txq->ndev_txq_id = ndev_tx_id; |
|---|
| 1606 | 1794 | |
|---|
| 1607 | 1795 | if (edev->dev_info.is_legacy) |
|---|
| 1608 | | - txq->is_legacy = 1; |
|---|
| 1796 | + txq->is_legacy = true; |
|---|
| 1609 | 1797 | txq->dev = &edev->pdev->dev; |
|---|
| 1610 | 1798 | } |
|---|
| 1611 | 1799 | |
|---|
| .. | .. |
|---|
| 1614 | 1802 | |
|---|
| 1615 | 1803 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", |
|---|
| 1616 | 1804 | edev->ndev->name, queue_id); |
|---|
| 1805 | + } |
|---|
| 1806 | + |
|---|
| 1807 | + if (init_xdp) { |
|---|
| 1808 | + edev->total_xdp_queues = QEDE_RSS_COUNT(edev); |
|---|
| 1809 | + DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues); |
|---|
| 1617 | 1810 | } |
|---|
| 1618 | 1811 | } |
|---|
| 1619 | 1812 | |
|---|
| .. | .. |
|---|
| 1793 | 1986 | static int qede_stop_txq(struct qede_dev *edev, |
|---|
| 1794 | 1987 | struct qede_tx_queue *txq, int rss_id) |
|---|
| 1795 | 1988 | { |
|---|
| 1989 | + /* delete doorbell from doorbell recovery mechanism */ |
|---|
| 1990 | + edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr, |
|---|
| 1991 | + &txq->tx_db); |
|---|
| 1992 | + |
|---|
| 1796 | 1993 | return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); |
|---|
| 1797 | 1994 | } |
|---|
| 1798 | 1995 | |
|---|
| .. | .. |
|---|
| 1929 | 2126 | DQ_XCM_ETH_TX_BD_PROD_CMD); |
|---|
| 1930 | 2127 | txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; |
|---|
| 1931 | 2128 | |
|---|
| 2129 | + /* register doorbell with doorbell recovery mechanism */ |
|---|
| 2130 | + rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr, |
|---|
| 2131 | + &txq->tx_db, DB_REC_WIDTH_32B, |
|---|
| 2132 | + DB_REC_KERNEL); |
|---|
| 2133 | + |
|---|
| 1932 | 2134 | return rc; |
|---|
| 1933 | 2135 | } |
|---|
| 1934 | 2136 | |
|---|
| .. | .. |
|---|
| 2018 | 2220 | if (rc) |
|---|
| 2019 | 2221 | goto out; |
|---|
| 2020 | 2222 | |
|---|
| 2021 | | - fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); |
|---|
| 2022 | | - if (IS_ERR(fp->rxq->xdp_prog)) { |
|---|
| 2023 | | - rc = PTR_ERR(fp->rxq->xdp_prog); |
|---|
| 2024 | | - fp->rxq->xdp_prog = NULL; |
|---|
| 2025 | | - goto out; |
|---|
| 2026 | | - } |
|---|
| 2223 | + bpf_prog_add(edev->xdp_prog, 1); |
|---|
| 2224 | + fp->rxq->xdp_prog = edev->xdp_prog; |
|---|
| 2027 | 2225 | } |
|---|
| 2028 | 2226 | |
|---|
| 2029 | 2227 | if (fp->type & QEDE_FASTPATH_TX) { |
|---|
| .. | .. |
|---|
| 2063 | 2261 | |
|---|
| 2064 | 2262 | enum qede_unload_mode { |
|---|
| 2065 | 2263 | QEDE_UNLOAD_NORMAL, |
|---|
| 2264 | + QEDE_UNLOAD_RECOVERY, |
|---|
| 2066 | 2265 | }; |
|---|
| 2067 | 2266 | |
|---|
| 2068 | 2267 | static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, |
|---|
| .. | .. |
|---|
| 2076 | 2275 | if (!is_locked) |
|---|
| 2077 | 2276 | __qede_lock(edev); |
|---|
| 2078 | 2277 | |
|---|
| 2079 | | - edev->state = QEDE_STATE_CLOSED; |
|---|
| 2278 | + clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); |
|---|
| 2279 | + |
|---|
| 2280 | + if (mode != QEDE_UNLOAD_RECOVERY) |
|---|
| 2281 | + edev->state = QEDE_STATE_CLOSED; |
|---|
| 2080 | 2282 | |
|---|
| 2081 | 2283 | qede_rdma_dev_event_close(edev); |
|---|
| 2082 | 2284 | |
|---|
| .. | .. |
|---|
| 2084 | 2286 | netif_tx_disable(edev->ndev); |
|---|
| 2085 | 2287 | netif_carrier_off(edev->ndev); |
|---|
| 2086 | 2288 | |
|---|
| 2087 | | - /* Reset the link */ |
|---|
| 2088 | | - memset(&link_params, 0, sizeof(link_params)); |
|---|
| 2089 | | - link_params.link_up = false; |
|---|
| 2090 | | - edev->ops->common->set_link(edev->cdev, &link_params); |
|---|
| 2091 | | - rc = qede_stop_queues(edev); |
|---|
| 2092 | | - if (rc) { |
|---|
| 2093 | | - qede_sync_free_irqs(edev); |
|---|
| 2094 | | - goto out; |
|---|
| 2095 | | - } |
|---|
| 2289 | + if (mode != QEDE_UNLOAD_RECOVERY) { |
|---|
| 2290 | + /* Reset the link */ |
|---|
| 2291 | + memset(&link_params, 0, sizeof(link_params)); |
|---|
| 2292 | + link_params.link_up = false; |
|---|
| 2293 | + edev->ops->common->set_link(edev->cdev, &link_params); |
|---|
| 2096 | 2294 | |
|---|
| 2097 | | - DP_INFO(edev, "Stopped Queues\n"); |
|---|
| 2295 | + rc = qede_stop_queues(edev); |
|---|
| 2296 | + if (rc) { |
|---|
| 2297 | + qede_sync_free_irqs(edev); |
|---|
| 2298 | + goto out; |
|---|
| 2299 | + } |
|---|
| 2300 | + |
|---|
| 2301 | + DP_INFO(edev, "Stopped Queues\n"); |
|---|
| 2302 | + } |
|---|
| 2098 | 2303 | |
|---|
| 2099 | 2304 | qede_vlan_mark_nonconfigured(edev); |
|---|
| 2100 | 2305 | edev->ops->fastpath_stop(edev->cdev); |
|---|
| 2101 | 2306 | |
|---|
| 2102 | | - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { |
|---|
| 2307 | + if (edev->dev_info.common.b_arfs_capable) { |
|---|
| 2103 | 2308 | qede_poll_for_freeing_arfs_filters(edev); |
|---|
| 2104 | 2309 | qede_free_arfs(edev); |
|---|
| 2105 | 2310 | } |
|---|
| .. | .. |
|---|
| 2110 | 2315 | |
|---|
| 2111 | 2316 | qede_napi_disable_remove(edev); |
|---|
| 2112 | 2317 | |
|---|
| 2318 | + if (mode == QEDE_UNLOAD_RECOVERY) |
|---|
| 2319 | + qede_empty_tx_queues(edev); |
|---|
| 2320 | + |
|---|
| 2113 | 2321 | qede_free_mem_load(edev); |
|---|
| 2114 | 2322 | qede_free_fp_array(edev); |
|---|
| 2115 | 2323 | |
|---|
| 2116 | 2324 | out: |
|---|
| 2117 | 2325 | if (!is_locked) |
|---|
| 2118 | 2326 | __qede_unlock(edev); |
|---|
| 2327 | + |
|---|
| 2328 | + if (mode != QEDE_UNLOAD_RECOVERY) |
|---|
| 2329 | + DP_NOTICE(edev, "Link is down\n"); |
|---|
| 2330 | + |
|---|
| 2331 | + edev->ptp_skip_txts = 0; |
|---|
| 2332 | + |
|---|
| 2119 | 2333 | DP_INFO(edev, "Ending qede unload\n"); |
|---|
| 2120 | 2334 | } |
|---|
| 2121 | 2335 | |
|---|
| 2122 | 2336 | enum qede_load_mode { |
|---|
| 2123 | 2337 | QEDE_LOAD_NORMAL, |
|---|
| 2124 | 2338 | QEDE_LOAD_RELOAD, |
|---|
| 2339 | + QEDE_LOAD_RECOVERY, |
|---|
| 2125 | 2340 | }; |
|---|
| 2126 | 2341 | |
|---|
| 2127 | 2342 | static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, |
|---|
| .. | .. |
|---|
| 2156 | 2371 | if (rc) |
|---|
| 2157 | 2372 | goto err2; |
|---|
| 2158 | 2373 | |
|---|
| 2159 | | - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { |
|---|
| 2160 | | - rc = qede_alloc_arfs(edev); |
|---|
| 2161 | | - if (rc) |
|---|
| 2162 | | - DP_NOTICE(edev, "aRFS memory allocation failed\n"); |
|---|
| 2374 | + if (qede_alloc_arfs(edev)) { |
|---|
| 2375 | + edev->ndev->features &= ~NETIF_F_NTUPLE; |
|---|
| 2376 | + edev->dev_info.common.b_arfs_capable = false; |
|---|
| 2163 | 2377 | } |
|---|
| 2164 | 2378 | |
|---|
| 2165 | 2379 | qede_napi_add_enable(edev); |
|---|
| .. | .. |
|---|
| 2181 | 2395 | |
|---|
| 2182 | 2396 | /* Program un-configured VLANs */ |
|---|
| 2183 | 2397 | qede_configure_vlan_filters(edev); |
|---|
| 2398 | + |
|---|
| 2399 | + set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); |
|---|
| 2184 | 2400 | |
|---|
| 2185 | 2401 | /* Ask for link-up using current configuration */ |
|---|
| 2186 | 2402 | memset(&link_params, 0, sizeof(link_params)); |
|---|
| .. | .. |
|---|
| 2254 | 2470 | if (rc) |
|---|
| 2255 | 2471 | return rc; |
|---|
| 2256 | 2472 | |
|---|
| 2257 | | - udp_tunnel_get_rx_info(ndev); |
|---|
| 2473 | + udp_tunnel_nic_reset_ntf(ndev); |
|---|
| 2258 | 2474 | |
|---|
| 2259 | 2475 | edev->ops->common->update_drv_state(edev->cdev, true); |
|---|
| 2260 | 2476 | |
|---|
| .. | .. |
|---|
| 2267 | 2483 | |
|---|
| 2268 | 2484 | qede_unload(edev, QEDE_UNLOAD_NORMAL, false); |
|---|
| 2269 | 2485 | |
|---|
| 2270 | | - edev->ops->common->update_drv_state(edev->cdev, false); |
|---|
| 2486 | + if (edev->cdev) |
|---|
| 2487 | + edev->ops->common->update_drv_state(edev->cdev, false); |
|---|
| 2271 | 2488 | |
|---|
| 2272 | 2489 | return 0; |
|---|
| 2273 | 2490 | } |
|---|
| .. | .. |
|---|
| 2276 | 2493 | { |
|---|
| 2277 | 2494 | struct qede_dev *edev = dev; |
|---|
| 2278 | 2495 | |
|---|
| 2279 | | - if (!netif_running(edev->ndev)) { |
|---|
| 2280 | | - DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); |
|---|
| 2496 | + if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) { |
|---|
| 2497 | + DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n"); |
|---|
| 2281 | 2498 | return; |
|---|
| 2282 | 2499 | } |
|---|
| 2283 | 2500 | |
|---|
| .. | .. |
|---|
| 2296 | 2513 | qede_rdma_dev_event_close(edev); |
|---|
| 2297 | 2514 | } |
|---|
| 2298 | 2515 | } |
|---|
| 2516 | +} |
|---|
| 2517 | + |
|---|
| 2518 | +static void qede_schedule_recovery_handler(void *dev) |
|---|
| 2519 | +{ |
|---|
| 2520 | + struct qede_dev *edev = dev; |
|---|
| 2521 | + |
|---|
| 2522 | + if (edev->state == QEDE_STATE_RECOVERY) { |
|---|
| 2523 | + DP_NOTICE(edev, |
|---|
| 2524 | + "Avoid scheduling a recovery handling since already in recovery state\n"); |
|---|
| 2525 | + return; |
|---|
| 2526 | + } |
|---|
| 2527 | + |
|---|
| 2528 | + set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); |
|---|
| 2529 | + schedule_delayed_work(&edev->sp_task, 0); |
|---|
| 2530 | + |
|---|
| 2531 | + DP_INFO(edev, "Scheduled a recovery handler\n"); |
|---|
| 2532 | +} |
|---|
| 2533 | + |
|---|
| 2534 | +static void qede_recovery_failed(struct qede_dev *edev) |
|---|
| 2535 | +{ |
|---|
| 2536 | + netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); |
|---|
| 2537 | + |
|---|
| 2538 | + netif_device_detach(edev->ndev); |
|---|
| 2539 | + |
|---|
| 2540 | + if (edev->cdev) |
|---|
| 2541 | + edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); |
|---|
| 2542 | +} |
|---|
| 2543 | + |
|---|
| 2544 | +static void qede_recovery_handler(struct qede_dev *edev) |
|---|
| 2545 | +{ |
|---|
| 2546 | + u32 curr_state = edev->state; |
|---|
| 2547 | + int rc; |
|---|
| 2548 | + |
|---|
| 2549 | + DP_NOTICE(edev, "Starting a recovery process\n"); |
|---|
| 2550 | + |
|---|
| 2551 | + /* No need to acquire first the qede_lock since is done by qede_sp_task |
|---|
| 2552 | + * before calling this function. |
|---|
| 2553 | + */ |
|---|
| 2554 | + edev->state = QEDE_STATE_RECOVERY; |
|---|
| 2555 | + |
|---|
| 2556 | + edev->ops->common->recovery_prolog(edev->cdev); |
|---|
| 2557 | + |
|---|
| 2558 | + if (curr_state == QEDE_STATE_OPEN) |
|---|
| 2559 | + qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); |
|---|
| 2560 | + |
|---|
| 2561 | + __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); |
|---|
| 2562 | + |
|---|
| 2563 | + rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, |
|---|
| 2564 | + IS_VF(edev), QEDE_PROBE_RECOVERY); |
|---|
| 2565 | + if (rc) { |
|---|
| 2566 | + edev->cdev = NULL; |
|---|
| 2567 | + goto err; |
|---|
| 2568 | + } |
|---|
| 2569 | + |
|---|
| 2570 | + if (curr_state == QEDE_STATE_OPEN) { |
|---|
| 2571 | + rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); |
|---|
| 2572 | + if (rc) |
|---|
| 2573 | + goto err; |
|---|
| 2574 | + |
|---|
| 2575 | + qede_config_rx_mode(edev->ndev); |
|---|
| 2576 | + udp_tunnel_nic_reset_ntf(edev->ndev); |
|---|
| 2577 | + } |
|---|
| 2578 | + |
|---|
| 2579 | + edev->state = curr_state; |
|---|
| 2580 | + |
|---|
| 2581 | + DP_NOTICE(edev, "Recovery handling is done\n"); |
|---|
| 2582 | + |
|---|
| 2583 | + return; |
|---|
| 2584 | + |
|---|
| 2585 | +err: |
|---|
| 2586 | + qede_recovery_failed(edev); |
|---|
| 2587 | +} |
|---|
| 2588 | + |
|---|
| 2589 | +static void qede_atomic_hw_err_handler(struct qede_dev *edev) |
|---|
| 2590 | +{ |
|---|
| 2591 | + struct qed_dev *cdev = edev->cdev; |
|---|
| 2592 | + |
|---|
| 2593 | + DP_NOTICE(edev, |
|---|
| 2594 | + "Generic non-sleepable HW error handling started - err_flags 0x%lx\n", |
|---|
| 2595 | + edev->err_flags); |
|---|
| 2596 | + |
|---|
| 2597 | + /* Get a call trace of the flow that led to the error */ |
|---|
| 2598 | + WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags)); |
|---|
| 2599 | + |
|---|
| 2600 | + /* Prevent HW attentions from being reasserted */ |
|---|
| 2601 | + if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags)) |
|---|
| 2602 | + edev->ops->common->attn_clr_enable(cdev, true); |
|---|
| 2603 | + |
|---|
| 2604 | + DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n"); |
|---|
| 2605 | +} |
|---|
| 2606 | + |
|---|
| 2607 | +static void qede_generic_hw_err_handler(struct qede_dev *edev) |
|---|
| 2608 | +{ |
|---|
| 2609 | + DP_NOTICE(edev, |
|---|
| 2610 | + "Generic sleepable HW error handling started - err_flags 0x%lx\n", |
|---|
| 2611 | + edev->err_flags); |
|---|
| 2612 | + |
|---|
| 2613 | + if (edev->devlink) |
|---|
| 2614 | + edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); |
|---|
| 2615 | + |
|---|
| 2616 | + clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); |
|---|
| 2617 | + |
|---|
| 2618 | + DP_NOTICE(edev, "Generic sleepable HW error handling is done\n"); |
|---|
| 2619 | +} |
|---|
| 2620 | + |
|---|
| 2621 | +static void qede_set_hw_err_flags(struct qede_dev *edev, |
|---|
| 2622 | + enum qed_hw_err_type err_type) |
|---|
| 2623 | +{ |
|---|
| 2624 | + unsigned long err_flags = 0; |
|---|
| 2625 | + |
|---|
| 2626 | + switch (err_type) { |
|---|
| 2627 | + case QED_HW_ERR_DMAE_FAIL: |
|---|
| 2628 | + set_bit(QEDE_ERR_WARN, &err_flags); |
|---|
| 2629 | + fallthrough; |
|---|
| 2630 | + case QED_HW_ERR_MFW_RESP_FAIL: |
|---|
| 2631 | + case QED_HW_ERR_HW_ATTN: |
|---|
| 2632 | + case QED_HW_ERR_RAMROD_FAIL: |
|---|
| 2633 | + case QED_HW_ERR_FW_ASSERT: |
|---|
| 2634 | + set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); |
|---|
| 2635 | + set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); |
|---|
| 2636 | + break; |
|---|
| 2637 | + |
|---|
| 2638 | + default: |
|---|
| 2639 | + DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type); |
|---|
| 2640 | + break; |
|---|
| 2641 | + } |
|---|
| 2642 | + |
|---|
| 2643 | + edev->err_flags |= err_flags; |
|---|
| 2644 | +} |
|---|
| 2645 | + |
|---|
| 2646 | +static void qede_schedule_hw_err_handler(void *dev, |
|---|
| 2647 | + enum qed_hw_err_type err_type) |
|---|
| 2648 | +{ |
|---|
| 2649 | + struct qede_dev *edev = dev; |
|---|
| 2650 | + |
|---|
| 2651 | + /* Fan failure cannot be masked by handling of another HW error or by a |
|---|
| 2652 | + * concurrent recovery process. |
|---|
| 2653 | + */ |
|---|
| 2654 | + if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || |
|---|
| 2655 | + edev->state == QEDE_STATE_RECOVERY) && |
|---|
| 2656 | + err_type != QED_HW_ERR_FAN_FAIL) { |
|---|
| 2657 | + DP_INFO(edev, |
|---|
| 2658 | + "Avoid scheduling an error handling while another HW error is being handled\n"); |
|---|
| 2659 | + return; |
|---|
| 2660 | + } |
|---|
| 2661 | + |
|---|
| 2662 | + if (err_type >= QED_HW_ERR_LAST) { |
|---|
| 2663 | + DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type); |
|---|
| 2664 | + clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); |
|---|
| 2665 | + return; |
|---|
| 2666 | + } |
|---|
| 2667 | + |
|---|
| 2668 | + edev->last_err_type = err_type; |
|---|
| 2669 | + qede_set_hw_err_flags(edev, err_type); |
|---|
| 2670 | + qede_atomic_hw_err_handler(edev); |
|---|
| 2671 | + set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); |
|---|
| 2672 | + schedule_delayed_work(&edev->sp_task, 0); |
|---|
| 2673 | + |
|---|
| 2674 | + DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type); |
|---|
| 2299 | 2675 | } |
|---|
| 2300 | 2676 | |
|---|
| 2301 | 2677 | static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) |
|---|
| .. | .. |
|---|
| 2321 | 2697 | data->feat_flags |= QED_TLV_LSO; |
|---|
| 2322 | 2698 | |
|---|
| 2323 | 2699 | ether_addr_copy(data->mac[0], edev->ndev->dev_addr); |
|---|
| 2324 | | - memset(data->mac[1], 0, ETH_ALEN); |
|---|
| 2325 | | - memset(data->mac[2], 0, ETH_ALEN); |
|---|
| 2700 | + eth_zero_addr(data->mac[1]); |
|---|
| 2701 | + eth_zero_addr(data->mac[2]); |
|---|
| 2326 | 2702 | /* Copy the first two UC macs */ |
|---|
| 2327 | 2703 | netif_addr_lock_bh(edev->ndev); |
|---|
| 2328 | 2704 | i = 1; |
|---|
| .. | .. |
|---|
| 2395 | 2771 | etlv->num_txqs_full_set = true; |
|---|
| 2396 | 2772 | etlv->num_rxqs_full_set = true; |
|---|
| 2397 | 2773 | } |
|---|
| 2774 | + |
|---|
| 2775 | +/** |
|---|
| 2776 | + * qede_io_error_detected - called when PCI error is detected |
|---|
| 2777 | + * @pdev: Pointer to PCI device |
|---|
| 2778 | + * @state: The current pci connection state |
|---|
| 2779 | + * |
|---|
| 2780 | + * This function is called after a PCI bus error affecting |
|---|
| 2781 | + * this device has been detected. |
|---|
| 2782 | + */ |
|---|
| 2783 | +static pci_ers_result_t |
|---|
| 2784 | +qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
|---|
| 2785 | +{ |
|---|
| 2786 | + struct net_device *dev = pci_get_drvdata(pdev); |
|---|
| 2787 | + struct qede_dev *edev = netdev_priv(dev); |
|---|
| 2788 | + |
|---|
| 2789 | + if (!edev) |
|---|
| 2790 | + return PCI_ERS_RESULT_NONE; |
|---|
| 2791 | + |
|---|
| 2792 | + DP_NOTICE(edev, "IO error detected [%d]\n", state); |
|---|
| 2793 | + |
|---|
| 2794 | + __qede_lock(edev); |
|---|
| 2795 | + if (edev->state == QEDE_STATE_RECOVERY) { |
|---|
| 2796 | + DP_NOTICE(edev, "Device already in the recovery state\n"); |
|---|
| 2797 | + __qede_unlock(edev); |
|---|
| 2798 | + return PCI_ERS_RESULT_NONE; |
|---|
| 2799 | + } |
|---|
| 2800 | + |
|---|
| 2801 | + /* PF handles the recovery of its VFs */ |
|---|
| 2802 | + if (IS_VF(edev)) { |
|---|
| 2803 | + DP_VERBOSE(edev, QED_MSG_IOV, |
|---|
| 2804 | + "VF recovery is handled by its PF\n"); |
|---|
| 2805 | + __qede_unlock(edev); |
|---|
| 2806 | + return PCI_ERS_RESULT_RECOVERED; |
|---|
| 2807 | + } |
|---|
| 2808 | + |
|---|
| 2809 | + /* Close OS Tx */ |
|---|
| 2810 | + netif_tx_disable(edev->ndev); |
|---|
| 2811 | + netif_carrier_off(edev->ndev); |
|---|
| 2812 | + |
|---|
| 2813 | + set_bit(QEDE_SP_AER, &edev->sp_flags); |
|---|
| 2814 | + schedule_delayed_work(&edev->sp_task, 0); |
|---|
| 2815 | + |
|---|
| 2816 | + __qede_unlock(edev); |
|---|
| 2817 | + |
|---|
| 2818 | + return PCI_ERS_RESULT_CAN_RECOVER; |
|---|
| 2819 | +} |
|---|