From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/net/ethernet/qlogic/qede/qede_main.c | 878 ++++++++++++++++++++++++++++++++++++++++++++--------------
1 files changed, 666 insertions(+), 212 deletions(-)
diff --git a/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c b/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c
index 756c594..681ec14 100644
--- a/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/kernel/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qede NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
+
#include <linux/crash_dump.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -61,6 +36,7 @@
#include <net/ip6_checksum.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
+#include <linux/aer.h>
#include "qede.h"
#include "qede_ptp.h"
@@ -125,6 +101,8 @@
MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
#define TX_TIMEOUT (5 * HZ)
@@ -134,23 +112,14 @@
static void qede_remove(struct pci_dev *pdev);
static void qede_shutdown(struct pci_dev *pdev);
static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_schedule_recovery_handler(void *dev);
+static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static void qede_get_eth_tlv_data(void *edev, void *data);
static void qede_get_generic_tlv_data(void *edev,
struct qed_generic_tlvs *data);
-
-/* The qede lock is used to protect driver state change and driver flows that
- * are not reentrant.
- */
-void __qede_lock(struct qede_dev *edev)
-{
- mutex_lock(&edev->qede_lock);
-}
-
-void __qede_unlock(struct qede_dev *edev)
-{
- mutex_unlock(&edev->qede_lock);
-}
-
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
@@ -175,9 +144,7 @@
{
struct qede_dev *edev = netdev_priv(ndev);
- DP_VERBOSE(edev, QED_MSG_IOV,
- "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
- mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
if (!is_valid_ether_addr(mac)) {
DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
@@ -215,6 +182,10 @@
}
#endif
+static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+};
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
@@ -224,6 +195,7 @@
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
+ .err_handler = &qede_err_handler,
};
static struct qed_eth_cb_ops qede_ll_ops = {
@@ -232,6 +204,8 @@
.arfs_filter_op = qede_arfs_filter_op,
#endif
.link_update = qede_link_update,
+ .schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
.get_generic_tlv_data = qede_get_generic_tlv_data,
.get_protocol_tlv_data = qede_get_eth_tlv_data,
},
@@ -287,6 +261,8 @@
pr_info("qede_init: %s\n", version);
+ qede_forced_speed_maps_init();
+
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
@@ -336,6 +312,8 @@
struct qed_eth_stats stats;
edev->ops->get_vport_stats(edev->cdev, &stats);
+
+ spin_lock(&edev->stats_lock);
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
@@ -401,6 +379,7 @@
p_common->brb_discards = stats.common.brb_discards;
p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
p_common->link_change_count = stats.common.link_change_count;
+ p_common->ptp_skip_txts = edev->ptp_skip_txts;
if (QEDE_IS_BB(edev)) {
struct qede_stats_bb *p_bb = &edev->stats.bb;
@@ -433,6 +412,8 @@
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
+
+ spin_unlock(&edev->stats_lock);
}
static void qede_get_stats64(struct net_device *dev,
@@ -441,8 +422,9 @@
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
- qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
+
+ spin_lock(&edev->stats_lock);
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
@@ -463,6 +445,8 @@
stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
+
+ spin_unlock(&edev->stats_lock);
}
#ifdef CONFIG_QED_SRIOV
@@ -537,6 +521,51 @@
return 0;
}
+static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ DP_NOTICE(edev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl),
+ jiffies);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_tx_queue *txq;
+ int cos;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
+ return;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &edev->fp_array[txqueue].txq[cos];
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, txq);
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -558,13 +587,13 @@
}
static int
-qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f,
+qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
__be16 proto)
{
switch (f->command) {
- case TC_CLSFLOWER_REPLACE:
+ case FLOW_CLS_REPLACE:
return qede_add_tc_flower_fltr(edev, proto, f);
- case TC_CLSFLOWER_DESTROY:
+ case FLOW_CLS_DESTROY:
return qede_delete_flow_filter(edev, f->cookie);
default:
return -EOPNOTSUPP;
@@ -574,7 +603,7 @@
static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
- struct tc_cls_flower_offload *f;
+ struct flow_cls_offload *f;
struct qede_dev *edev = cb_priv;
if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
@@ -589,24 +618,7 @@
}
}
-static int qede_setup_tc_block(struct qede_dev *edev,
- struct tc_block_offload *f)
-{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block,
- qede_setup_tc_block_cb,
- edev, edev, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(qede_block_cb_list);
static int
qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
@@ -617,7 +629,10 @@
switch (type) {
case TC_SETUP_BLOCK:
- return qede_setup_tc_block(edev, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &qede_block_cb_list,
+ qede_setup_tc_block_cb,
+ edev, edev, true);
case TC_SETUP_QDISC_MQPRIO:
mqprio = type_data;
@@ -629,78 +644,81 @@
}
static const struct net_device_ops qede_netdev_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_do_ioctl = qede_ioctl,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_do_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
#ifdef CONFIG_QED_SRIOV
- .ndo_set_vf_mac = qede_set_vf_mac,
- .ndo_set_vf_vlan = qede_set_vf_vlan,
- .ndo_set_vf_trust = qede_set_vf_trust,
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+ .ndo_set_vf_trust = qede_set_vf_trust,
#endif
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
- .ndo_set_vf_link_state = qede_set_vf_link_state,
- .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
- .ndo_get_vf_config = qede_get_vf_config,
- .ndo_set_vf_rate = qede_set_vf_rate,
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = qede_udp_tunnel_add,
- .ndo_udp_tunnel_del = qede_udp_tunnel_del,
- .ndo_features_check = qede_features_check,
- .ndo_bpf = qede_xdp,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = qede_rx_flow_steer,
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
- .ndo_setup_tc = qede_setup_tc_offload,
+ .ndo_xdp_xmit = qede_xdp_transmit,
+ .ndo_setup_tc = qede_setup_tc_offload,
};
static const struct net_device_ops qede_netdev_vf_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = qede_udp_tunnel_add,
- .ndo_udp_tunnel_del = qede_udp_tunnel_del,
- .ndo_features_check = qede_features_check,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = qede_features_check,
};
static const struct net_device_ops qede_netdev_vf_xdp_ops = {
- .ndo_open = qede_open,
- .ndo_stop = qede_close,
- .ndo_start_xmit = qede_start_xmit,
- .ndo_select_queue = qede_select_queue,
- .ndo_set_rx_mode = qede_set_rx_mode,
- .ndo_set_mac_address = qede_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = qede_change_mtu,
- .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
- .ndo_fix_features = qede_fix_features,
- .ndo_set_features = qede_set_features,
- .ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = qede_udp_tunnel_add,
- .ndo_udp_tunnel_del = qede_udp_tunnel_del,
- .ndo_features_check = qede_features_check,
- .ndo_bpf = qede_xdp,
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
+ .ndo_xdp_xmit = qede_xdp_transmit,
};
/* -------------------------------------------------------------------------
@@ -793,7 +811,7 @@
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ if (edev->dev_info.common.b_arfs_capable)
hw_features |= NETIF_F_NTUPLE;
if (edev->dev_info.common.vxlan_enable ||
@@ -813,6 +831,8 @@
NETIF_F_GSO_UDP_TUNNEL_CSUM);
ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
+
+ qede_set_udp_tunnels(edev);
}
if (edev->dev_info.common.gre_enable) {
@@ -960,10 +980,82 @@
return -ENOMEM;
}
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+ mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* This version of the lock should be used when acquiring the RTNL lock is also
+ * needed in addition to the internal qede lock.
+ */
+static void qede_lock(struct qede_dev *edev)
+{
+ rtnl_lock();
+ __qede_lock(edev);
+}
+
+static void qede_unlock(struct qede_dev *edev)
+{
+ __qede_unlock(edev);
+ rtnl_unlock();
+}
+
+static void qede_periodic_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ periodic_task.work);
+
+ qede_fill_by_demand_stats(edev);
+ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
+}
+
+static void qede_init_periodic_task(struct qede_dev *edev)
+{
+ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
+ spin_lock_init(&edev->stats_lock);
+ edev->stats_coal_usecs = USEC_PER_SEC;
+ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
+}
+
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+
+ /* Disable execution of this deferred work once
+ * qede removal is in progress, this stop any future
+ * scheduling of sp_task.
+ */
+ if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+ return;
+
+ /* The locking scheme depends on the specific flag:
+ * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
+ * ensure that ongoing flows are ended and new ones are not started.
+ * In other cases - only the internal qede lock should be acquired.
+ */
+
+ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+ cancel_delayed_work_sync(&edev->periodic_task);
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ qede_lock(edev);
+ qede_recovery_handler(edev);
+ qede_unlock(edev);
+ }
__qede_lock(edev);
@@ -977,7 +1069,20 @@
qede_process_arfs_filters(edev, false);
}
#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
__qede_unlock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ edev->ops->common->recovery_process(edev->cdev);
+ }
}
static void qede_update_pf_params(struct qed_dev *cdev)
@@ -1041,6 +1146,7 @@
enum qede_probe_mode {
QEDE_PROBE_NORMAL,
+ QEDE_PROBE_RECOVERY,
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
@@ -1061,6 +1167,7 @@
probe_params.dp_module = dp_module;
probe_params.dp_level = dp_level;
probe_params.is_vf = is_vf;
+ probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
@@ -1088,40 +1195,66 @@
if (rc)
goto err2;
- edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
- dp_level);
- if (!edev) {
- rc = -ENOMEM;
- goto err2;
+ if (mode != QEDE_PROBE_RECOVERY) {
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ edev->devlink = qed_ops->common->devlink_register(cdev);
+ if (IS_ERR(edev->devlink)) {
+ DP_NOTICE(edev, "Cannot register devlink\n");
+ edev->devlink = NULL;
+ /* Go on, we can live without devlink */
+ }
+ } else {
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ edev = netdev_priv(ndev);
+
+ if (edev->devlink) {
+ struct qed_devlink *qdl = devlink_priv(edev->devlink);
+
+ qdl->cdev = cdev;
+ }
+ edev->cdev = cdev;
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
}
if (is_vf)
- edev->flags |= QEDE_FLAG_IS_VF;
+ set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
qede_init_ndev(edev);
- rc = qede_rdma_dev_add(edev);
+ rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
if (rc)
goto err3;
- /* Prepare the lock prior to the registration of the netdev,
- * as once it's registered we might reach flows requiring it
- * [it's even possible to reach a flow needing it directly
- * from there, although it's unlikely].
- */
- INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
- mutex_init(&edev->qede_lock);
- rc = register_netdev(edev->ndev);
- if (rc) {
- DP_NOTICE(edev, "Cannot register net-device\n");
- goto err4;
+ if (mode != QEDE_PROBE_RECOVERY) {
+ /* Prepare the lock prior to the registration of the netdev,
+ * as once it's registered we might reach flows requiring it
+ * [it's even possible to reach a flow needing it directly
+ * from there, although it's unlikely].
+ */
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+ qede_init_periodic_task(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err4;
+ }
}
edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, true);
+ qede_ptp_enable(edev);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -1133,12 +1266,20 @@
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
qede_log_probe(edev);
+
+ /* retain user config (for example - after recovery) */
+ if (edev->stats_coal_usecs)
+ schedule_delayed_work(&edev->periodic_task, 0);
+
return 0;
err4:
- qede_rdma_dev_remove(edev);
+ qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
- free_netdev(edev->ndev);
+ if (mode != QEDE_PROBE_RECOVERY)
+ free_netdev(edev->ndev);
+ else
+ edev->cdev = NULL;
err2:
qed_ops->common->slowpath_stop(cdev);
err1:
@@ -1172,6 +1313,7 @@
enum qede_remove_mode {
QEDE_REMOVE_NORMAL,
+ QEDE_REMOVE_RECOVERY,
};
static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
@@ -1190,21 +1332,33 @@
DP_INFO(edev, "Starting qede_remove\n");
- qede_rdma_dev_remove(edev);
- unregister_netdev(ndev);
- cancel_delayed_work_sync(&edev->sp_task);
+ qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
+
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
+ unregister_netdev(ndev);
+
+ cancel_delayed_work_sync(&edev->sp_task);
+ cancel_delayed_work_sync(&edev->periodic_task);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+ }
qede_ptp_disable(edev);
-
- edev->ops->common->set_power_state(cdev, PCI_D0);
-
- pci_set_drvdata(pdev, NULL);
/* Use global ops since we've freed edev */
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
+
+ if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
+ qed_ops->common->devlink_unregister(edev->devlink);
+ edev->devlink = NULL;
+ }
qed_ops->common->remove(cdev);
+ edev->cdev = NULL;
/* Since this can happen out-of-sync with other flows,
* don't release the netdevice until after slowpath stop
@@ -1212,7 +1366,8 @@
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
- free_netdev(ndev);
+ if (mode != QEDE_REMOVE_RECOVERY)
+ free_netdev(ndev);
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -1265,7 +1420,8 @@
u16 sb_id)
{
if (sb_info->sb_virt) {
- edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
memset(sb_info, 0, sizeof(*sb_info));
@@ -1347,6 +1503,11 @@
/* This function allocates all memory needed per Rx queue */
static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
+ struct qed_chain_init_params params = {
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = RX_RING_SIZE,
+ };
+ struct qed_dev *cdev = edev->cdev;
int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -1361,7 +1522,7 @@
if (rxq->rx_buf_size + size > PAGE_SIZE)
rxq->rx_buf_size = PAGE_SIZE - size;
- /* Segment size to spilt a page in multiple equal parts ,
+ /* Segment size to split a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
*/
if (!edev->xdp_prog) {
@@ -1382,24 +1543,20 @@
}
/* Allocate FW Rx ring */
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_NEXT_PTR,
- QED_CHAIN_CNT_TYPE_U16,
- RX_RING_SIZE,
- sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring, NULL);
+ params.mode = QED_CHAIN_MODE_NEXT_PTR;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.elem_size = sizeof(struct eth_rx_bd);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
if (rc)
goto err;
/* Allocate FW completion ring */
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- RX_RING_SIZE,
- sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring, NULL);
+ params.mode = QED_CHAIN_MODE_PBL;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME;
+ params.elem_size = sizeof(union eth_rx_cqe);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
if (rc)
goto err;
@@ -1436,7 +1593,13 @@
/* This function allocates all memory needed per Tx queue */
static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- union eth_tx_bd_types *p_virt;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = edev->q_num_tx_buffers,
+ .elem_size = sizeof(union eth_tx_bd_types),
+ };
int size, rc;
txq->num_tx_buffers = edev->q_num_tx_buffers;
@@ -1454,13 +1617,7 @@
goto err;
}
- rc = edev->ops->common->chain_alloc(edev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- txq->num_tx_buffers,
- sizeof(*p_virt),
- &txq->tx_pbl, NULL);
+ rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
if (rc)
goto err;
@@ -1559,11 +1716,64 @@
return 0;
}
+static void qede_empty_tx_queue(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct netdev_queue *netdev_txq;
+ int rc, len = 0;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
+
+ while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl)) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev,
+ "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index,
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+}
+
+static void qede_empty_tx_queues(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i)
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_fastpath *fp;
+
+ fp = &edev->fp_array[i];
+ qede_empty_tx_queue(edev,
+ &fp->txq[cos]);
+ }
+ }
+}
+
/* This function inits fp content and resets the SB, RXQ and TXQ structures */
static void qede_init_fp(struct qede_dev *edev)
{
int queue_id, rxq_index = 0, txq_index = 0;
struct qede_fastpath *fp;
+ bool init_xdp = false;
for_each_queue(queue_id) {
fp = &edev->fp_array[queue_id];
@@ -1575,6 +1785,9 @@
fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
rxq_index);
fp->xdp_tx->is_xdp = 1;
+
+ spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
+ init_xdp = true;
}
if (fp->type & QEDE_FASTPATH_RX) {
@@ -1590,6 +1803,13 @@
/* Driver have no error path from here */
WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
fp->rxq->rxq_id) < 0);
+
+ if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0,
+ NULL)) {
+ DP_NOTICE(edev,
+ "Failed to register XDP memory model\n");
+ }
}
if (fp->type & QEDE_FASTPATH_TX) {
@@ -1605,7 +1825,7 @@
txq->ndev_txq_id = ndev_tx_id;
if (edev->dev_info.is_legacy)
- txq->is_legacy = 1;
+ txq->is_legacy = true;
txq->dev = &edev->pdev->dev;
}
@@ -1614,6 +1834,11 @@
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, queue_id);
+ }
+
+ if (init_xdp) {
+ edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
+ DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
}
}
@@ -1793,6 +2018,10 @@
static int qede_stop_txq(struct qede_dev *edev,
struct qede_tx_queue *txq, int rss_id)
{
+ /* delete doorbell from doorbell recovery mechanism */
+ edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db);
+
return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
}
@@ -1929,6 +2158,11 @@
DQ_XCM_ETH_TX_BD_PROD_CMD);
txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ /* register doorbell with doorbell recovery mechanism */
+ rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+
return rc;
}
@@ -2018,12 +2252,8 @@
if (rc)
goto out;
- fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
- if (IS_ERR(fp->rxq->xdp_prog)) {
- rc = PTR_ERR(fp->rxq->xdp_prog);
- fp->rxq->xdp_prog = NULL;
- goto out;
- }
+ bpf_prog_add(edev->xdp_prog, 1);
+ fp->rxq->xdp_prog = edev->xdp_prog;
}
if (fp->type & QEDE_FASTPATH_TX) {
@@ -2063,6 +2293,7 @@
enum qede_unload_mode {
QEDE_UNLOAD_NORMAL,
+ QEDE_UNLOAD_RECOVERY,
};
static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
@@ -2076,7 +2307,10 @@
if (!is_locked)
__qede_lock(edev);
- edev->state = QEDE_STATE_CLOSED;
+ clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ edev->state = QEDE_STATE_CLOSED;
qede_rdma_dev_event_close(edev);
@@ -2084,22 +2318,25 @@
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
- /* Reset the link */
- memset(&link_params, 0, sizeof(link_params));
- link_params.link_up = false;
- edev->ops->common->set_link(edev->cdev, &link_params);
- rc = qede_stop_queues(edev);
- if (rc) {
- qede_sync_free_irqs(edev);
- goto out;
- }
+ if (mode != QEDE_UNLOAD_RECOVERY) {
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
- DP_INFO(edev, "Stopped Queues\n");
+ rc = qede_stop_queues(edev);
+ if (rc) {
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+ }
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ if (edev->dev_info.common.b_arfs_capable) {
qede_poll_for_freeing_arfs_filters(edev);
qede_free_arfs(edev);
}
@@ -2110,18 +2347,28 @@
qede_napi_disable_remove(edev);
+ if (mode == QEDE_UNLOAD_RECOVERY)
+ qede_empty_tx_queues(edev);
+
qede_free_mem_load(edev);
qede_free_fp_array(edev);
out:
if (!is_locked)
__qede_unlock(edev);
+
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ DP_NOTICE(edev, "Link is down\n");
+
+ edev->ptp_skip_txts = 0;
+
DP_INFO(edev, "Ending qede unload\n");
}
enum qede_load_mode {
QEDE_LOAD_NORMAL,
QEDE_LOAD_RELOAD,
+ QEDE_LOAD_RECOVERY,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
@@ -2156,10 +2403,9 @@
if (rc)
goto err2;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
- rc = qede_alloc_arfs(edev);
- if (rc)
- DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ if (qede_alloc_arfs(edev)) {
+ edev->ndev->features &= ~NETIF_F_NTUPLE;
+ edev->dev_info.common.b_arfs_capable = false;
}
qede_napi_add_enable(edev);
@@ -2181,6 +2427,8 @@
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
+
+ set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
@@ -2254,7 +2502,7 @@
if (rc)
return rc;
- udp_tunnel_get_rx_info(ndev);
+ udp_tunnel_nic_reset_ntf(ndev);
edev->ops->common->update_drv_state(edev->cdev, true);
@@ -2267,7 +2515,8 @@
qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
- edev->ops->common->update_drv_state(edev->cdev, false);
+ if (edev->cdev)
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -2276,8 +2525,8 @@
{
struct qede_dev *edev = dev;
- if (!netif_running(edev->ndev)) {
- DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
return;
}
@@ -2296,6 +2545,165 @@
qede_rdma_dev_event_close(edev);
}
}
+}
+
+static void qede_schedule_recovery_handler(void *dev)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev,
+ "Avoid scheduling a recovery handling since already in recovery state\n");
+ return;
+ }
+
+ set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a recovery handler\n");
+}
+
+static void qede_recovery_failed(struct qede_dev *edev)
+{
+ netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
+
+ netif_device_detach(edev->ndev);
+
+ if (edev->cdev)
+ edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
+}
+
+static void qede_recovery_handler(struct qede_dev *edev)
+{
+ u32 curr_state = edev->state;
+ int rc;
+
+ DP_NOTICE(edev, "Starting a recovery process\n");
+
+ /* No need to acquire first the qede_lock since is done by qede_sp_task
+ * before calling this function.
+ */
+ edev->state = QEDE_STATE_RECOVERY;
+
+ edev->ops->common->recovery_prolog(edev->cdev);
+
+ if (curr_state == QEDE_STATE_OPEN)
+ qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
+
+ __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
+
+ rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
+ IS_VF(edev), QEDE_PROBE_RECOVERY);
+ if (rc) {
+ edev->cdev = NULL;
+ goto err;
+ }
+
+ if (curr_state == QEDE_STATE_OPEN) {
+ rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
+ if (rc)
+ goto err;
+
+ qede_config_rx_mode(edev->ndev);
+ udp_tunnel_nic_reset_ntf(edev->ndev);
+ }
+
+ edev->state = curr_state;
+
+ DP_NOTICE(edev, "Recovery handling is done\n");
+
+ return;
+
+err:
+ qede_recovery_failed(edev);
+}
+
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ if (edev->devlink)
+ edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ edev->last_err_type = err_type;
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
}
static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
@@ -2321,8 +2729,8 @@
data->feat_flags |= QED_TLV_LSO;
ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
- memset(data->mac[1], 0, ETH_ALEN);
- memset(data->mac[2], 0, ETH_ALEN);
+ eth_zero_addr(data->mac[1]);
+ eth_zero_addr(data->mac[2]);
/* Copy the first two UC macs */
netif_addr_lock_bh(edev->ndev);
i = 1;
@@ -2395,3 +2803,49 @@
etlv->num_txqs_full_set = true;
etlv->num_rxqs_full_set = true;
}
+
+/**
+ * qede_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev)
+ return PCI_ERS_RESULT_NONE;
+
+ DP_NOTICE(edev, "IO error detected [%d]\n", state);
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev, "Device already in the recovery state\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* PF handles the recovery of its VFs */
+ if (IS_VF(edev)) {
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "VF recovery is handled by its PF\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ set_bit(QEDE_SP_AER, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ __qede_unlock(edev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
--
Gitblit v1.6.2