.. | .. |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
---|
1 | 2 | /* QLogic qede NIC Driver |
---|
2 | 3 | * Copyright (c) 2015-2017 QLogic Corporation |
---|
3 | | - * |
---|
4 | | - * This software is available to you under a choice of one of two |
---|
5 | | - * licenses. You may choose to be licensed under the terms of the GNU |
---|
6 | | - * General Public License (GPL) Version 2, available from the file |
---|
7 | | - * COPYING in the main directory of this source tree, or the |
---|
8 | | - * OpenIB.org BSD license below: |
---|
9 | | - * |
---|
10 | | - * Redistribution and use in source and binary forms, with or |
---|
11 | | - * without modification, are permitted provided that the following |
---|
12 | | - * conditions are met: |
---|
13 | | - * |
---|
14 | | - * - Redistributions of source code must retain the above |
---|
15 | | - * copyright notice, this list of conditions and the following |
---|
16 | | - * disclaimer. |
---|
17 | | - * |
---|
18 | | - * - Redistributions in binary form must reproduce the above |
---|
19 | | - * copyright notice, this list of conditions and the following |
---|
20 | | - * disclaimer in the documentation and /or other materials |
---|
21 | | - * provided with the distribution. |
---|
22 | | - * |
---|
23 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
24 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
25 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
26 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
27 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
28 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
29 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
30 | | - * SOFTWARE. |
---|
| 4 | + * Copyright (c) 2019-2020 Marvell International Ltd. |
---|
31 | 5 | */ |
---|
| 6 | + |
---|
32 | 7 | #include <linux/crash_dump.h> |
---|
33 | 8 | #include <linux/module.h> |
---|
34 | 9 | #include <linux/pci.h> |
---|
.. | .. |
---|
61 | 36 | #include <net/ip6_checksum.h> |
---|
62 | 37 | #include <linux/bitops.h> |
---|
63 | 38 | #include <linux/vmalloc.h> |
---|
| 39 | +#include <linux/aer.h> |
---|
64 | 40 | #include "qede.h" |
---|
65 | 41 | #include "qede_ptp.h" |
---|
66 | 42 | |
---|
.. | .. |
---|
125 | 101 | MODULE_DEVICE_TABLE(pci, qede_pci_tbl); |
---|
126 | 102 | |
---|
127 | 103 | static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); |
---|
| 104 | +static pci_ers_result_t |
---|
| 105 | +qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); |
---|
128 | 106 | |
---|
129 | 107 | #define TX_TIMEOUT (5 * HZ) |
---|
130 | 108 | |
---|
.. | .. |
---|
134 | 112 | static void qede_remove(struct pci_dev *pdev); |
---|
135 | 113 | static void qede_shutdown(struct pci_dev *pdev); |
---|
136 | 114 | static void qede_link_update(void *dev, struct qed_link_output *link); |
---|
| 115 | +static void qede_schedule_recovery_handler(void *dev); |
---|
| 116 | +static void qede_recovery_handler(struct qede_dev *edev); |
---|
| 117 | +static void qede_schedule_hw_err_handler(void *dev, |
---|
| 118 | + enum qed_hw_err_type err_type); |
---|
137 | 119 | static void qede_get_eth_tlv_data(void *edev, void *data); |
---|
138 | 120 | static void qede_get_generic_tlv_data(void *edev, |
---|
139 | 121 | struct qed_generic_tlvs *data); |
---|
140 | | - |
---|
141 | | -/* The qede lock is used to protect driver state change and driver flows that |
---|
142 | | - * are not reentrant. |
---|
143 | | - */ |
---|
144 | | -void __qede_lock(struct qede_dev *edev) |
---|
145 | | -{ |
---|
146 | | - mutex_lock(&edev->qede_lock); |
---|
147 | | -} |
---|
148 | | - |
---|
149 | | -void __qede_unlock(struct qede_dev *edev) |
---|
150 | | -{ |
---|
151 | | - mutex_unlock(&edev->qede_lock); |
---|
152 | | -} |
---|
153 | | - |
---|
| 122 | +static void qede_generic_hw_err_handler(struct qede_dev *edev); |
---|
154 | 123 | #ifdef CONFIG_QED_SRIOV |
---|
155 | 124 | static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, |
---|
156 | 125 | __be16 vlan_proto) |
---|
.. | .. |
---|
175 | 144 | { |
---|
176 | 145 | struct qede_dev *edev = netdev_priv(ndev); |
---|
177 | 146 | |
---|
178 | | - DP_VERBOSE(edev, QED_MSG_IOV, |
---|
179 | | - "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", |
---|
180 | | - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); |
---|
| 147 | + DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx); |
---|
181 | 148 | |
---|
182 | 149 | if (!is_valid_ether_addr(mac)) { |
---|
183 | 150 | DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); |
---|
.. | .. |
---|
215 | 182 | } |
---|
216 | 183 | #endif |
---|
217 | 184 | |
---|
| 185 | +static const struct pci_error_handlers qede_err_handler = { |
---|
| 186 | + .error_detected = qede_io_error_detected, |
---|
| 187 | +}; |
---|
| 188 | + |
---|
218 | 189 | static struct pci_driver qede_pci_driver = { |
---|
219 | 190 | .name = "qede", |
---|
220 | 191 | .id_table = qede_pci_tbl, |
---|
.. | .. |
---|
224 | 195 | #ifdef CONFIG_QED_SRIOV |
---|
225 | 196 | .sriov_configure = qede_sriov_configure, |
---|
226 | 197 | #endif |
---|
| 198 | + .err_handler = &qede_err_handler, |
---|
227 | 199 | }; |
---|
228 | 200 | |
---|
229 | 201 | static struct qed_eth_cb_ops qede_ll_ops = { |
---|
.. | .. |
---|
232 | 204 | .arfs_filter_op = qede_arfs_filter_op, |
---|
233 | 205 | #endif |
---|
234 | 206 | .link_update = qede_link_update, |
---|
| 207 | + .schedule_recovery_handler = qede_schedule_recovery_handler, |
---|
| 208 | + .schedule_hw_err_handler = qede_schedule_hw_err_handler, |
---|
235 | 209 | .get_generic_tlv_data = qede_get_generic_tlv_data, |
---|
236 | 210 | .get_protocol_tlv_data = qede_get_eth_tlv_data, |
---|
237 | 211 | }, |
---|
.. | .. |
---|
287 | 261 | |
---|
288 | 262 | pr_info("qede_init: %s\n", version); |
---|
289 | 263 | |
---|
| 264 | + qede_forced_speed_maps_init(); |
---|
| 265 | + |
---|
290 | 266 | qed_ops = qed_get_eth_ops(); |
---|
291 | 267 | if (!qed_ops) { |
---|
292 | 268 | pr_notice("Failed to get qed ethtool operations\n"); |
---|
.. | .. |
---|
336 | 312 | struct qed_eth_stats stats; |
---|
337 | 313 | |
---|
338 | 314 | edev->ops->get_vport_stats(edev->cdev, &stats); |
---|
| 315 | + |
---|
| 316 | + spin_lock(&edev->stats_lock); |
---|
339 | 317 | |
---|
340 | 318 | p_common->no_buff_discards = stats.common.no_buff_discards; |
---|
341 | 319 | p_common->packet_too_big_discard = stats.common.packet_too_big_discard; |
---|
.. | .. |
---|
401 | 379 | p_common->brb_discards = stats.common.brb_discards; |
---|
402 | 380 | p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; |
---|
403 | 381 | p_common->link_change_count = stats.common.link_change_count; |
---|
| 382 | + p_common->ptp_skip_txts = edev->ptp_skip_txts; |
---|
404 | 383 | |
---|
405 | 384 | if (QEDE_IS_BB(edev)) { |
---|
406 | 385 | struct qede_stats_bb *p_bb = &edev->stats.bb; |
---|
.. | .. |
---|
433 | 412 | p_ah->tx_1519_to_max_byte_packets = |
---|
434 | 413 | stats.ah.tx_1519_to_max_byte_packets; |
---|
435 | 414 | } |
---|
| 415 | + |
---|
| 416 | + spin_unlock(&edev->stats_lock); |
---|
436 | 417 | } |
---|
437 | 418 | |
---|
438 | 419 | static void qede_get_stats64(struct net_device *dev, |
---|
.. | .. |
---|
441 | 422 | struct qede_dev *edev = netdev_priv(dev); |
---|
442 | 423 | struct qede_stats_common *p_common; |
---|
443 | 424 | |
---|
444 | | - qede_fill_by_demand_stats(edev); |
---|
445 | 425 | p_common = &edev->stats.common; |
---|
| 426 | + |
---|
| 427 | + spin_lock(&edev->stats_lock); |
---|
446 | 428 | |
---|
447 | 429 | stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + |
---|
448 | 430 | p_common->rx_bcast_pkts; |
---|
.. | .. |
---|
463 | 445 | stats->collisions = edev->stats.bb.tx_total_collisions; |
---|
464 | 446 | stats->rx_crc_errors = p_common->rx_crc_errors; |
---|
465 | 447 | stats->rx_frame_errors = p_common->rx_align_errors; |
---|
| 448 | + |
---|
| 449 | + spin_unlock(&edev->stats_lock); |
---|
466 | 450 | } |
---|
467 | 451 | |
---|
468 | 452 | #ifdef CONFIG_QED_SRIOV |
---|
.. | .. |
---|
537 | 521 | return 0; |
---|
538 | 522 | } |
---|
539 | 523 | |
---|
| 524 | +static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq) |
---|
| 525 | +{ |
---|
| 526 | + DP_NOTICE(edev, |
---|
| 527 | + "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n", |
---|
| 528 | + txq->index, le16_to_cpu(*txq->hw_cons_ptr), |
---|
| 529 | + qed_chain_get_cons_idx(&txq->tx_pbl), |
---|
| 530 | + qed_chain_get_prod_idx(&txq->tx_pbl), |
---|
| 531 | + jiffies); |
---|
| 532 | +} |
---|
| 533 | + |
---|
| 534 | +static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue) |
---|
| 535 | +{ |
---|
| 536 | + struct qede_dev *edev = netdev_priv(dev); |
---|
| 537 | + struct qede_tx_queue *txq; |
---|
| 538 | + int cos; |
---|
| 539 | + |
---|
| 540 | + netif_carrier_off(dev); |
---|
| 541 | + DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue); |
---|
| 542 | + |
---|
| 543 | + if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX)) |
---|
| 544 | + return; |
---|
| 545 | + |
---|
| 546 | + for_each_cos_in_txq(edev, cos) { |
---|
| 547 | + txq = &edev->fp_array[txqueue].txq[cos]; |
---|
| 548 | + |
---|
| 549 | + if (qed_chain_get_cons_idx(&txq->tx_pbl) != |
---|
| 550 | + qed_chain_get_prod_idx(&txq->tx_pbl)) |
---|
| 551 | + qede_tx_log_print(edev, txq); |
---|
| 552 | + } |
---|
| 553 | + |
---|
| 554 | + if (IS_VF(edev)) |
---|
| 555 | + return; |
---|
| 556 | + |
---|
| 557 | + if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || |
---|
| 558 | + edev->state == QEDE_STATE_RECOVERY) { |
---|
| 559 | + DP_INFO(edev, |
---|
| 560 | + "Avoid handling a Tx timeout while another HW error is being handled\n"); |
---|
| 561 | + return; |
---|
| 562 | + } |
---|
| 563 | + |
---|
| 564 | + set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags); |
---|
| 565 | + set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); |
---|
| 566 | + schedule_delayed_work(&edev->sp_task, 0); |
---|
| 567 | +} |
---|
| 568 | + |
---|
540 | 569 | static int qede_setup_tc(struct net_device *ndev, u8 num_tc) |
---|
541 | 570 | { |
---|
542 | 571 | struct qede_dev *edev = netdev_priv(ndev); |
---|
.. | .. |
---|
558 | 587 | } |
---|
559 | 588 | |
---|
560 | 589 | static int |
---|
561 | | -qede_set_flower(struct qede_dev *edev, struct tc_cls_flower_offload *f, |
---|
| 590 | +qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f, |
---|
562 | 591 | __be16 proto) |
---|
563 | 592 | { |
---|
564 | 593 | switch (f->command) { |
---|
565 | | - case TC_CLSFLOWER_REPLACE: |
---|
| 594 | + case FLOW_CLS_REPLACE: |
---|
566 | 595 | return qede_add_tc_flower_fltr(edev, proto, f); |
---|
567 | | - case TC_CLSFLOWER_DESTROY: |
---|
| 596 | + case FLOW_CLS_DESTROY: |
---|
568 | 597 | return qede_delete_flow_filter(edev, f->cookie); |
---|
569 | 598 | default: |
---|
570 | 599 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
574 | 603 | static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data, |
---|
575 | 604 | void *cb_priv) |
---|
576 | 605 | { |
---|
577 | | - struct tc_cls_flower_offload *f; |
---|
| 606 | + struct flow_cls_offload *f; |
---|
578 | 607 | struct qede_dev *edev = cb_priv; |
---|
579 | 608 | |
---|
580 | 609 | if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data)) |
---|
.. | .. |
---|
589 | 618 | } |
---|
590 | 619 | } |
---|
591 | 620 | |
---|
592 | | -static int qede_setup_tc_block(struct qede_dev *edev, |
---|
593 | | - struct tc_block_offload *f) |
---|
594 | | -{ |
---|
595 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
596 | | - return -EOPNOTSUPP; |
---|
597 | | - |
---|
598 | | - switch (f->command) { |
---|
599 | | - case TC_BLOCK_BIND: |
---|
600 | | - return tcf_block_cb_register(f->block, |
---|
601 | | - qede_setup_tc_block_cb, |
---|
602 | | - edev, edev, f->extack); |
---|
603 | | - case TC_BLOCK_UNBIND: |
---|
604 | | - tcf_block_cb_unregister(f->block, qede_setup_tc_block_cb, edev); |
---|
605 | | - return 0; |
---|
606 | | - default: |
---|
607 | | - return -EOPNOTSUPP; |
---|
608 | | - } |
---|
609 | | -} |
---|
| 621 | +static LIST_HEAD(qede_block_cb_list); |
---|
610 | 622 | |
---|
611 | 623 | static int |
---|
612 | 624 | qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, |
---|
.. | .. |
---|
617 | 629 | |
---|
618 | 630 | switch (type) { |
---|
619 | 631 | case TC_SETUP_BLOCK: |
---|
620 | | - return qede_setup_tc_block(edev, type_data); |
---|
| 632 | + return flow_block_cb_setup_simple(type_data, |
---|
| 633 | + &qede_block_cb_list, |
---|
| 634 | + qede_setup_tc_block_cb, |
---|
| 635 | + edev, edev, true); |
---|
621 | 636 | case TC_SETUP_QDISC_MQPRIO: |
---|
622 | 637 | mqprio = type_data; |
---|
623 | 638 | |
---|
.. | .. |
---|
629 | 644 | } |
---|
630 | 645 | |
---|
631 | 646 | static const struct net_device_ops qede_netdev_ops = { |
---|
632 | | - .ndo_open = qede_open, |
---|
633 | | - .ndo_stop = qede_close, |
---|
634 | | - .ndo_start_xmit = qede_start_xmit, |
---|
635 | | - .ndo_select_queue = qede_select_queue, |
---|
636 | | - .ndo_set_rx_mode = qede_set_rx_mode, |
---|
637 | | - .ndo_set_mac_address = qede_set_mac_addr, |
---|
638 | | - .ndo_validate_addr = eth_validate_addr, |
---|
639 | | - .ndo_change_mtu = qede_change_mtu, |
---|
640 | | - .ndo_do_ioctl = qede_ioctl, |
---|
| 647 | + .ndo_open = qede_open, |
---|
| 648 | + .ndo_stop = qede_close, |
---|
| 649 | + .ndo_start_xmit = qede_start_xmit, |
---|
| 650 | + .ndo_select_queue = qede_select_queue, |
---|
| 651 | + .ndo_set_rx_mode = qede_set_rx_mode, |
---|
| 652 | + .ndo_set_mac_address = qede_set_mac_addr, |
---|
| 653 | + .ndo_validate_addr = eth_validate_addr, |
---|
| 654 | + .ndo_change_mtu = qede_change_mtu, |
---|
| 655 | + .ndo_do_ioctl = qede_ioctl, |
---|
| 656 | + .ndo_tx_timeout = qede_tx_timeout, |
---|
641 | 657 | #ifdef CONFIG_QED_SRIOV |
---|
642 | | - .ndo_set_vf_mac = qede_set_vf_mac, |
---|
643 | | - .ndo_set_vf_vlan = qede_set_vf_vlan, |
---|
644 | | - .ndo_set_vf_trust = qede_set_vf_trust, |
---|
| 658 | + .ndo_set_vf_mac = qede_set_vf_mac, |
---|
| 659 | + .ndo_set_vf_vlan = qede_set_vf_vlan, |
---|
| 660 | + .ndo_set_vf_trust = qede_set_vf_trust, |
---|
645 | 661 | #endif |
---|
646 | | - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
---|
647 | | - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
---|
648 | | - .ndo_fix_features = qede_fix_features, |
---|
649 | | - .ndo_set_features = qede_set_features, |
---|
650 | | - .ndo_get_stats64 = qede_get_stats64, |
---|
| 662 | + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
---|
| 663 | + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
---|
| 664 | + .ndo_fix_features = qede_fix_features, |
---|
| 665 | + .ndo_set_features = qede_set_features, |
---|
| 666 | + .ndo_get_stats64 = qede_get_stats64, |
---|
651 | 667 | #ifdef CONFIG_QED_SRIOV |
---|
652 | | - .ndo_set_vf_link_state = qede_set_vf_link_state, |
---|
653 | | - .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, |
---|
654 | | - .ndo_get_vf_config = qede_get_vf_config, |
---|
655 | | - .ndo_set_vf_rate = qede_set_vf_rate, |
---|
| 668 | + .ndo_set_vf_link_state = qede_set_vf_link_state, |
---|
| 669 | + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, |
---|
| 670 | + .ndo_get_vf_config = qede_get_vf_config, |
---|
| 671 | + .ndo_set_vf_rate = qede_set_vf_rate, |
---|
656 | 672 | #endif |
---|
657 | | - .ndo_udp_tunnel_add = qede_udp_tunnel_add, |
---|
658 | | - .ndo_udp_tunnel_del = qede_udp_tunnel_del, |
---|
659 | | - .ndo_features_check = qede_features_check, |
---|
660 | | - .ndo_bpf = qede_xdp, |
---|
| 673 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 674 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
| 675 | + .ndo_features_check = qede_features_check, |
---|
| 676 | + .ndo_bpf = qede_xdp, |
---|
661 | 677 | #ifdef CONFIG_RFS_ACCEL |
---|
662 | | - .ndo_rx_flow_steer = qede_rx_flow_steer, |
---|
| 678 | + .ndo_rx_flow_steer = qede_rx_flow_steer, |
---|
663 | 679 | #endif |
---|
664 | | - .ndo_setup_tc = qede_setup_tc_offload, |
---|
| 680 | + .ndo_xdp_xmit = qede_xdp_transmit, |
---|
| 681 | + .ndo_setup_tc = qede_setup_tc_offload, |
---|
665 | 682 | }; |
---|
666 | 683 | |
---|
667 | 684 | static const struct net_device_ops qede_netdev_vf_ops = { |
---|
668 | | - .ndo_open = qede_open, |
---|
669 | | - .ndo_stop = qede_close, |
---|
670 | | - .ndo_start_xmit = qede_start_xmit, |
---|
671 | | - .ndo_select_queue = qede_select_queue, |
---|
672 | | - .ndo_set_rx_mode = qede_set_rx_mode, |
---|
673 | | - .ndo_set_mac_address = qede_set_mac_addr, |
---|
674 | | - .ndo_validate_addr = eth_validate_addr, |
---|
675 | | - .ndo_change_mtu = qede_change_mtu, |
---|
676 | | - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
---|
677 | | - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
---|
678 | | - .ndo_fix_features = qede_fix_features, |
---|
679 | | - .ndo_set_features = qede_set_features, |
---|
680 | | - .ndo_get_stats64 = qede_get_stats64, |
---|
681 | | - .ndo_udp_tunnel_add = qede_udp_tunnel_add, |
---|
682 | | - .ndo_udp_tunnel_del = qede_udp_tunnel_del, |
---|
683 | | - .ndo_features_check = qede_features_check, |
---|
| 685 | + .ndo_open = qede_open, |
---|
| 686 | + .ndo_stop = qede_close, |
---|
| 687 | + .ndo_start_xmit = qede_start_xmit, |
---|
| 688 | + .ndo_select_queue = qede_select_queue, |
---|
| 689 | + .ndo_set_rx_mode = qede_set_rx_mode, |
---|
| 690 | + .ndo_set_mac_address = qede_set_mac_addr, |
---|
| 691 | + .ndo_validate_addr = eth_validate_addr, |
---|
| 692 | + .ndo_change_mtu = qede_change_mtu, |
---|
| 693 | + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
---|
| 694 | + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
---|
| 695 | + .ndo_fix_features = qede_fix_features, |
---|
| 696 | + .ndo_set_features = qede_set_features, |
---|
| 697 | + .ndo_get_stats64 = qede_get_stats64, |
---|
| 698 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 699 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
| 700 | + .ndo_features_check = qede_features_check, |
---|
684 | 701 | }; |
---|
685 | 702 | |
---|
686 | 703 | static const struct net_device_ops qede_netdev_vf_xdp_ops = { |
---|
687 | | - .ndo_open = qede_open, |
---|
688 | | - .ndo_stop = qede_close, |
---|
689 | | - .ndo_start_xmit = qede_start_xmit, |
---|
690 | | - .ndo_select_queue = qede_select_queue, |
---|
691 | | - .ndo_set_rx_mode = qede_set_rx_mode, |
---|
692 | | - .ndo_set_mac_address = qede_set_mac_addr, |
---|
693 | | - .ndo_validate_addr = eth_validate_addr, |
---|
694 | | - .ndo_change_mtu = qede_change_mtu, |
---|
695 | | - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
---|
696 | | - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
---|
697 | | - .ndo_fix_features = qede_fix_features, |
---|
698 | | - .ndo_set_features = qede_set_features, |
---|
699 | | - .ndo_get_stats64 = qede_get_stats64, |
---|
700 | | - .ndo_udp_tunnel_add = qede_udp_tunnel_add, |
---|
701 | | - .ndo_udp_tunnel_del = qede_udp_tunnel_del, |
---|
702 | | - .ndo_features_check = qede_features_check, |
---|
703 | | - .ndo_bpf = qede_xdp, |
---|
| 704 | + .ndo_open = qede_open, |
---|
| 705 | + .ndo_stop = qede_close, |
---|
| 706 | + .ndo_start_xmit = qede_start_xmit, |
---|
| 707 | + .ndo_select_queue = qede_select_queue, |
---|
| 708 | + .ndo_set_rx_mode = qede_set_rx_mode, |
---|
| 709 | + .ndo_set_mac_address = qede_set_mac_addr, |
---|
| 710 | + .ndo_validate_addr = eth_validate_addr, |
---|
| 711 | + .ndo_change_mtu = qede_change_mtu, |
---|
| 712 | + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
---|
| 713 | + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
---|
| 714 | + .ndo_fix_features = qede_fix_features, |
---|
| 715 | + .ndo_set_features = qede_set_features, |
---|
| 716 | + .ndo_get_stats64 = qede_get_stats64, |
---|
| 717 | + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, |
---|
| 718 | + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, |
---|
| 719 | + .ndo_features_check = qede_features_check, |
---|
| 720 | + .ndo_bpf = qede_xdp, |
---|
| 721 | + .ndo_xdp_xmit = qede_xdp_transmit, |
---|
704 | 722 | }; |
---|
705 | 723 | |
---|
706 | 724 | /* ------------------------------------------------------------------------- |
---|
.. | .. |
---|
793 | 811 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
---|
794 | 812 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; |
---|
795 | 813 | |
---|
796 | | - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) |
---|
| 814 | + if (edev->dev_info.common.b_arfs_capable) |
---|
797 | 815 | hw_features |= NETIF_F_NTUPLE; |
---|
798 | 816 | |
---|
799 | 817 | if (edev->dev_info.common.vxlan_enable || |
---|
.. | .. |
---|
813 | 831 | NETIF_F_GSO_UDP_TUNNEL_CSUM); |
---|
814 | 832 | ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | |
---|
815 | 833 | NETIF_F_GSO_UDP_TUNNEL_CSUM); |
---|
| 834 | + |
---|
| 835 | + qede_set_udp_tunnels(edev); |
---|
816 | 836 | } |
---|
817 | 837 | |
---|
818 | 838 | if (edev->dev_info.common.gre_enable) { |
---|
.. | .. |
---|
960 | 980 | return -ENOMEM; |
---|
961 | 981 | } |
---|
962 | 982 | |
---|
| 983 | +/* The qede lock is used to protect driver state change and driver flows that |
---|
| 984 | + * are not reentrant. |
---|
| 985 | + */ |
---|
| 986 | +void __qede_lock(struct qede_dev *edev) |
---|
| 987 | +{ |
---|
| 988 | + mutex_lock(&edev->qede_lock); |
---|
| 989 | +} |
---|
| 990 | + |
---|
| 991 | +void __qede_unlock(struct qede_dev *edev) |
---|
| 992 | +{ |
---|
| 993 | + mutex_unlock(&edev->qede_lock); |
---|
| 994 | +} |
---|
| 995 | + |
---|
| 996 | +/* This version of the lock should be used when acquiring the RTNL lock is also |
---|
| 997 | + * needed in addition to the internal qede lock. |
---|
| 998 | + */ |
---|
| 999 | +static void qede_lock(struct qede_dev *edev) |
---|
| 1000 | +{ |
---|
| 1001 | + rtnl_lock(); |
---|
| 1002 | + __qede_lock(edev); |
---|
| 1003 | +} |
---|
| 1004 | + |
---|
| 1005 | +static void qede_unlock(struct qede_dev *edev) |
---|
| 1006 | +{ |
---|
| 1007 | + __qede_unlock(edev); |
---|
| 1008 | + rtnl_unlock(); |
---|
| 1009 | +} |
---|
| 1010 | + |
---|
| 1011 | +static void qede_periodic_task(struct work_struct *work) |
---|
| 1012 | +{ |
---|
| 1013 | + struct qede_dev *edev = container_of(work, struct qede_dev, |
---|
| 1014 | + periodic_task.work); |
---|
| 1015 | + |
---|
| 1016 | + qede_fill_by_demand_stats(edev); |
---|
| 1017 | + schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks); |
---|
| 1018 | +} |
---|
| 1019 | + |
---|
| 1020 | +static void qede_init_periodic_task(struct qede_dev *edev) |
---|
| 1021 | +{ |
---|
| 1022 | + INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task); |
---|
| 1023 | + spin_lock_init(&edev->stats_lock); |
---|
| 1024 | + edev->stats_coal_usecs = USEC_PER_SEC; |
---|
| 1025 | + edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC); |
---|
| 1026 | +} |
---|
| 1027 | + |
---|
963 | 1028 | static void qede_sp_task(struct work_struct *work) |
---|
964 | 1029 | { |
---|
965 | 1030 | struct qede_dev *edev = container_of(work, struct qede_dev, |
---|
966 | 1031 | sp_task.work); |
---|
| 1032 | + |
---|
| 1033 | + /* Disable execution of this deferred work once |
---|
| 1034 | + * qede removal is in progress, this stop any future |
---|
| 1035 | + * scheduling of sp_task. |
---|
| 1036 | + */ |
---|
| 1037 | + if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags)) |
---|
| 1038 | + return; |
---|
| 1039 | + |
---|
| 1040 | + /* The locking scheme depends on the specific flag: |
---|
| 1041 | + * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to |
---|
| 1042 | + * ensure that ongoing flows are ended and new ones are not started. |
---|
| 1043 | + * In other cases - only the internal qede lock should be acquired. |
---|
| 1044 | + */ |
---|
| 1045 | + |
---|
| 1046 | + if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { |
---|
| 1047 | + cancel_delayed_work_sync(&edev->periodic_task); |
---|
| 1048 | +#ifdef CONFIG_QED_SRIOV |
---|
| 1049 | + /* SRIOV must be disabled outside the lock to avoid a deadlock. |
---|
| 1050 | + * The recovery of the active VFs is currently not supported. |
---|
| 1051 | + */ |
---|
| 1052 | + if (pci_num_vf(edev->pdev)) |
---|
| 1053 | + qede_sriov_configure(edev->pdev, 0); |
---|
| 1054 | +#endif |
---|
| 1055 | + qede_lock(edev); |
---|
| 1056 | + qede_recovery_handler(edev); |
---|
| 1057 | + qede_unlock(edev); |
---|
| 1058 | + } |
---|
967 | 1059 | |
---|
968 | 1060 | __qede_lock(edev); |
---|
969 | 1061 | |
---|
.. | .. |
---|
977 | 1069 | qede_process_arfs_filters(edev, false); |
---|
978 | 1070 | } |
---|
979 | 1071 | #endif |
---|
| 1072 | + if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags)) |
---|
| 1073 | + qede_generic_hw_err_handler(edev); |
---|
980 | 1074 | __qede_unlock(edev); |
---|
| 1075 | + |
---|
| 1076 | + if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) { |
---|
| 1077 | +#ifdef CONFIG_QED_SRIOV |
---|
| 1078 | + /* SRIOV must be disabled outside the lock to avoid a deadlock. |
---|
| 1079 | + * The recovery of the active VFs is currently not supported. |
---|
| 1080 | + */ |
---|
| 1081 | + if (pci_num_vf(edev->pdev)) |
---|
| 1082 | + qede_sriov_configure(edev->pdev, 0); |
---|
| 1083 | +#endif |
---|
| 1084 | + edev->ops->common->recovery_process(edev->cdev); |
---|
| 1085 | + } |
---|
981 | 1086 | } |
---|
982 | 1087 | |
---|
983 | 1088 | static void qede_update_pf_params(struct qed_dev *cdev) |
---|
.. | .. |
---|
1041 | 1146 | |
---|
1042 | 1147 | enum qede_probe_mode { |
---|
1043 | 1148 | QEDE_PROBE_NORMAL, |
---|
| 1149 | + QEDE_PROBE_RECOVERY, |
---|
1044 | 1150 | }; |
---|
1045 | 1151 | |
---|
1046 | 1152 | static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, |
---|
.. | .. |
---|
1061 | 1167 | probe_params.dp_module = dp_module; |
---|
1062 | 1168 | probe_params.dp_level = dp_level; |
---|
1063 | 1169 | probe_params.is_vf = is_vf; |
---|
| 1170 | + probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); |
---|
1064 | 1171 | cdev = qed_ops->common->probe(pdev, &probe_params); |
---|
1065 | 1172 | if (!cdev) { |
---|
1066 | 1173 | rc = -ENODEV; |
---|
.. | .. |
---|
1088 | 1195 | if (rc) |
---|
1089 | 1196 | goto err2; |
---|
1090 | 1197 | |
---|
1091 | | - edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, |
---|
1092 | | - dp_level); |
---|
1093 | | - if (!edev) { |
---|
1094 | | - rc = -ENOMEM; |
---|
1095 | | - goto err2; |
---|
| 1198 | + if (mode != QEDE_PROBE_RECOVERY) { |
---|
| 1199 | + edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, |
---|
| 1200 | + dp_level); |
---|
| 1201 | + if (!edev) { |
---|
| 1202 | + rc = -ENOMEM; |
---|
| 1203 | + goto err2; |
---|
| 1204 | + } |
---|
| 1205 | + |
---|
| 1206 | + edev->devlink = qed_ops->common->devlink_register(cdev); |
---|
| 1207 | + if (IS_ERR(edev->devlink)) { |
---|
| 1208 | + DP_NOTICE(edev, "Cannot register devlink\n"); |
---|
| 1209 | + edev->devlink = NULL; |
---|
| 1210 | + /* Go on, we can live without devlink */ |
---|
| 1211 | + } |
---|
| 1212 | + } else { |
---|
| 1213 | + struct net_device *ndev = pci_get_drvdata(pdev); |
---|
| 1214 | + |
---|
| 1215 | + edev = netdev_priv(ndev); |
---|
| 1216 | + |
---|
| 1217 | + if (edev->devlink) { |
---|
| 1218 | + struct qed_devlink *qdl = devlink_priv(edev->devlink); |
---|
| 1219 | + |
---|
| 1220 | + qdl->cdev = cdev; |
---|
| 1221 | + } |
---|
| 1222 | + edev->cdev = cdev; |
---|
| 1223 | + memset(&edev->stats, 0, sizeof(edev->stats)); |
---|
| 1224 | + memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); |
---|
1096 | 1225 | } |
---|
1097 | 1226 | |
---|
1098 | 1227 | if (is_vf) |
---|
1099 | | - edev->flags |= QEDE_FLAG_IS_VF; |
---|
| 1228 | + set_bit(QEDE_FLAGS_IS_VF, &edev->flags); |
---|
1100 | 1229 | |
---|
1101 | 1230 | qede_init_ndev(edev); |
---|
1102 | 1231 | |
---|
1103 | | - rc = qede_rdma_dev_add(edev); |
---|
| 1232 | + rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); |
---|
1104 | 1233 | if (rc) |
---|
1105 | 1234 | goto err3; |
---|
1106 | 1235 | |
---|
1107 | | - /* Prepare the lock prior to the registration of the netdev, |
---|
1108 | | - * as once it's registered we might reach flows requiring it |
---|
1109 | | - * [it's even possible to reach a flow needing it directly |
---|
1110 | | - * from there, although it's unlikely]. |
---|
1111 | | - */ |
---|
1112 | | - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
---|
1113 | | - mutex_init(&edev->qede_lock); |
---|
1114 | | - rc = register_netdev(edev->ndev); |
---|
1115 | | - if (rc) { |
---|
1116 | | - DP_NOTICE(edev, "Cannot register net-device\n"); |
---|
1117 | | - goto err4; |
---|
| 1236 | + if (mode != QEDE_PROBE_RECOVERY) { |
---|
| 1237 | + /* Prepare the lock prior to the registration of the netdev, |
---|
| 1238 | + * as once it's registered we might reach flows requiring it |
---|
| 1239 | + * [it's even possible to reach a flow needing it directly |
---|
| 1240 | + * from there, although it's unlikely]. |
---|
| 1241 | + */ |
---|
| 1242 | + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
---|
| 1243 | + mutex_init(&edev->qede_lock); |
---|
| 1244 | + qede_init_periodic_task(edev); |
---|
| 1245 | + |
---|
| 1246 | + rc = register_netdev(edev->ndev); |
---|
| 1247 | + if (rc) { |
---|
| 1248 | + DP_NOTICE(edev, "Cannot register net-device\n"); |
---|
| 1249 | + goto err4; |
---|
| 1250 | + } |
---|
1118 | 1251 | } |
---|
1119 | 1252 | |
---|
1120 | 1253 | edev->ops->common->set_name(cdev, edev->ndev->name); |
---|
1121 | 1254 | |
---|
1122 | 1255 | /* PTP not supported on VFs */ |
---|
1123 | 1256 | if (!is_vf) |
---|
1124 | | - qede_ptp_enable(edev, true); |
---|
| 1257 | + qede_ptp_enable(edev); |
---|
1125 | 1258 | |
---|
1126 | 1259 | edev->ops->register_ops(cdev, &qede_ll_ops, edev); |
---|
1127 | 1260 | |
---|
.. | .. |
---|
1133 | 1266 | edev->rx_copybreak = QEDE_RX_HDR_SIZE; |
---|
1134 | 1267 | |
---|
1135 | 1268 | qede_log_probe(edev); |
---|
| 1269 | + |
---|
| 1270 | + /* retain user config (for example - after recovery) */ |
---|
| 1271 | + if (edev->stats_coal_usecs) |
---|
| 1272 | + schedule_delayed_work(&edev->periodic_task, 0); |
---|
| 1273 | + |
---|
1136 | 1274 | return 0; |
---|
1137 | 1275 | |
---|
1138 | 1276 | err4: |
---|
1139 | | - qede_rdma_dev_remove(edev); |
---|
| 1277 | + qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); |
---|
1140 | 1278 | err3: |
---|
1141 | | - free_netdev(edev->ndev); |
---|
| 1279 | + if (mode != QEDE_PROBE_RECOVERY) |
---|
| 1280 | + free_netdev(edev->ndev); |
---|
| 1281 | + else |
---|
| 1282 | + edev->cdev = NULL; |
---|
1142 | 1283 | err2: |
---|
1143 | 1284 | qed_ops->common->slowpath_stop(cdev); |
---|
1144 | 1285 | err1: |
---|
.. | .. |
---|
1172 | 1313 | |
---|
1173 | 1314 | enum qede_remove_mode { |
---|
1174 | 1315 | QEDE_REMOVE_NORMAL, |
---|
| 1316 | + QEDE_REMOVE_RECOVERY, |
---|
1175 | 1317 | }; |
---|
1176 | 1318 | |
---|
1177 | 1319 | static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) |
---|
.. | .. |
---|
1190 | 1332 | |
---|
1191 | 1333 | DP_INFO(edev, "Starting qede_remove\n"); |
---|
1192 | 1334 | |
---|
1193 | | - qede_rdma_dev_remove(edev); |
---|
1194 | | - unregister_netdev(ndev); |
---|
1195 | | - cancel_delayed_work_sync(&edev->sp_task); |
---|
| 1335 | + qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); |
---|
| 1336 | + |
---|
| 1337 | + if (mode != QEDE_REMOVE_RECOVERY) { |
---|
| 1338 | + set_bit(QEDE_SP_DISABLE, &edev->sp_flags); |
---|
| 1339 | + unregister_netdev(ndev); |
---|
| 1340 | + |
---|
| 1341 | + cancel_delayed_work_sync(&edev->sp_task); |
---|
| 1342 | + cancel_delayed_work_sync(&edev->periodic_task); |
---|
| 1343 | + |
---|
| 1344 | + edev->ops->common->set_power_state(cdev, PCI_D0); |
---|
| 1345 | + |
---|
| 1346 | + pci_set_drvdata(pdev, NULL); |
---|
| 1347 | + } |
---|
1196 | 1348 | |
---|
1197 | 1349 | qede_ptp_disable(edev); |
---|
1198 | | - |
---|
1199 | | - edev->ops->common->set_power_state(cdev, PCI_D0); |
---|
1200 | | - |
---|
1201 | | - pci_set_drvdata(pdev, NULL); |
---|
1202 | 1350 | |
---|
1203 | 1351 | /* Use global ops since we've freed edev */ |
---|
1204 | 1352 | qed_ops->common->slowpath_stop(cdev); |
---|
1205 | 1353 | if (system_state == SYSTEM_POWER_OFF) |
---|
1206 | 1354 | return; |
---|
| 1355 | + |
---|
| 1356 | + if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) { |
---|
| 1357 | + qed_ops->common->devlink_unregister(edev->devlink); |
---|
| 1358 | + edev->devlink = NULL; |
---|
| 1359 | + } |
---|
1207 | 1360 | qed_ops->common->remove(cdev); |
---|
| 1361 | + edev->cdev = NULL; |
---|
1208 | 1362 | |
---|
1209 | 1363 | /* Since this can happen out-of-sync with other flows, |
---|
1210 | 1364 | * don't release the netdevice until after slowpath stop |
---|
.. | .. |
---|
1212 | 1366 | * [e.g., QED register callbacks] won't break anything when |
---|
1213 | 1367 | * accessing the netdevice. |
---|
1214 | 1368 | */ |
---|
1215 | | - free_netdev(ndev); |
---|
| 1369 | + if (mode != QEDE_REMOVE_RECOVERY) |
---|
| 1370 | + free_netdev(ndev); |
---|
1216 | 1371 | |
---|
1217 | 1372 | dev_info(&pdev->dev, "Ending qede_remove successfully\n"); |
---|
1218 | 1373 | } |
---|
.. | .. |
---|
1265 | 1420 | u16 sb_id) |
---|
1266 | 1421 | { |
---|
1267 | 1422 | if (sb_info->sb_virt) { |
---|
1268 | | - edev->ops->common->sb_release(edev->cdev, sb_info, sb_id); |
---|
| 1423 | + edev->ops->common->sb_release(edev->cdev, sb_info, sb_id, |
---|
| 1424 | + QED_SB_TYPE_L2_QUEUE); |
---|
1269 | 1425 | dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), |
---|
1270 | 1426 | (void *)sb_info->sb_virt, sb_info->sb_phys); |
---|
1271 | 1427 | memset(sb_info, 0, sizeof(*sb_info)); |
---|
.. | .. |
---|
1347 | 1503 | /* This function allocates all memory needed per Rx queue */ |
---|
1348 | 1504 | static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) |
---|
1349 | 1505 | { |
---|
| 1506 | + struct qed_chain_init_params params = { |
---|
| 1507 | + .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
---|
| 1508 | + .num_elems = RX_RING_SIZE, |
---|
| 1509 | + }; |
---|
| 1510 | + struct qed_dev *cdev = edev->cdev; |
---|
1350 | 1511 | int i, rc, size; |
---|
1351 | 1512 | |
---|
1352 | 1513 | rxq->num_rx_buffers = edev->q_num_rx_buffers; |
---|
.. | .. |
---|
1361 | 1522 | if (rxq->rx_buf_size + size > PAGE_SIZE) |
---|
1362 | 1523 | rxq->rx_buf_size = PAGE_SIZE - size; |
---|
1363 | 1524 | |
---|
1364 | | - /* Segment size to spilt a page in multiple equal parts , |
---|
| 1525 | + /* Segment size to split a page in multiple equal parts, |
---|
1365 | 1526 | * unless XDP is used in which case we'd use the entire page. |
---|
1366 | 1527 | */ |
---|
1367 | 1528 | if (!edev->xdp_prog) { |
---|
.. | .. |
---|
1382 | 1543 | } |
---|
1383 | 1544 | |
---|
1384 | 1545 | /* Allocate FW Rx ring */ |
---|
1385 | | - rc = edev->ops->common->chain_alloc(edev->cdev, |
---|
1386 | | - QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
---|
1387 | | - QED_CHAIN_MODE_NEXT_PTR, |
---|
1388 | | - QED_CHAIN_CNT_TYPE_U16, |
---|
1389 | | - RX_RING_SIZE, |
---|
1390 | | - sizeof(struct eth_rx_bd), |
---|
1391 | | - &rxq->rx_bd_ring, NULL); |
---|
| 1546 | + params.mode = QED_CHAIN_MODE_NEXT_PTR; |
---|
| 1547 | + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; |
---|
| 1548 | + params.elem_size = sizeof(struct eth_rx_bd); |
---|
| 1549 | + |
---|
| 1550 | + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms); |
---|
1392 | 1551 | if (rc) |
---|
1393 | 1552 | goto err; |
---|
1394 | 1553 | |
---|
1395 | 1554 | /* Allocate FW completion ring */ |
---|
1396 | | - rc = edev->ops->common->chain_alloc(edev->cdev, |
---|
1397 | | - QED_CHAIN_USE_TO_CONSUME, |
---|
1398 | | - QED_CHAIN_MODE_PBL, |
---|
1399 | | - QED_CHAIN_CNT_TYPE_U16, |
---|
1400 | | - RX_RING_SIZE, |
---|
1401 | | - sizeof(union eth_rx_cqe), |
---|
1402 | | - &rxq->rx_comp_ring, NULL); |
---|
| 1555 | + params.mode = QED_CHAIN_MODE_PBL; |
---|
| 1556 | + params.intended_use = QED_CHAIN_USE_TO_CONSUME; |
---|
| 1557 | + params.elem_size = sizeof(union eth_rx_cqe); |
---|
| 1558 | + |
---|
| 1559 | + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms); |
---|
1403 | 1560 | if (rc) |
---|
1404 | 1561 | goto err; |
---|
1405 | 1562 | |
---|
.. | .. |
---|
1436 | 1593 | /* This function allocates all memory needed per Tx queue */ |
---|
1437 | 1594 | static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) |
---|
1438 | 1595 | { |
---|
1439 | | - union eth_tx_bd_types *p_virt; |
---|
| 1596 | + struct qed_chain_init_params params = { |
---|
| 1597 | + .mode = QED_CHAIN_MODE_PBL, |
---|
| 1598 | + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
---|
| 1599 | + .cnt_type = QED_CHAIN_CNT_TYPE_U16, |
---|
| 1600 | + .num_elems = edev->q_num_tx_buffers, |
---|
| 1601 | + .elem_size = sizeof(union eth_tx_bd_types), |
---|
| 1602 | + }; |
---|
1440 | 1603 | int size, rc; |
---|
1441 | 1604 | |
---|
1442 | 1605 | txq->num_tx_buffers = edev->q_num_tx_buffers; |
---|
.. | .. |
---|
1454 | 1617 | goto err; |
---|
1455 | 1618 | } |
---|
1456 | 1619 | |
---|
1457 | | - rc = edev->ops->common->chain_alloc(edev->cdev, |
---|
1458 | | - QED_CHAIN_USE_TO_CONSUME_PRODUCE, |
---|
1459 | | - QED_CHAIN_MODE_PBL, |
---|
1460 | | - QED_CHAIN_CNT_TYPE_U16, |
---|
1461 | | - txq->num_tx_buffers, |
---|
1462 | | - sizeof(*p_virt), |
---|
1463 | | - &txq->tx_pbl, NULL); |
---|
| 1620 | + rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms); |
---|
1464 | 1621 | if (rc) |
---|
1465 | 1622 | goto err; |
---|
1466 | 1623 | |
---|
.. | .. |
---|
1559 | 1716 | return 0; |
---|
1560 | 1717 | } |
---|
1561 | 1718 | |
---|
| 1719 | +static void qede_empty_tx_queue(struct qede_dev *edev, |
---|
| 1720 | + struct qede_tx_queue *txq) |
---|
| 1721 | +{ |
---|
| 1722 | + unsigned int pkts_compl = 0, bytes_compl = 0; |
---|
| 1723 | + struct netdev_queue *netdev_txq; |
---|
| 1724 | + int rc, len = 0; |
---|
| 1725 | + |
---|
| 1726 | + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); |
---|
| 1727 | + |
---|
| 1728 | + while (qed_chain_get_cons_idx(&txq->tx_pbl) != |
---|
| 1729 | + qed_chain_get_prod_idx(&txq->tx_pbl)) { |
---|
| 1730 | + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, |
---|
| 1731 | + "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", |
---|
| 1732 | + txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), |
---|
| 1733 | + qed_chain_get_prod_idx(&txq->tx_pbl)); |
---|
| 1734 | + |
---|
| 1735 | + rc = qede_free_tx_pkt(edev, txq, &len); |
---|
| 1736 | + if (rc) { |
---|
| 1737 | + DP_NOTICE(edev, |
---|
| 1738 | + "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", |
---|
| 1739 | + txq->index, |
---|
| 1740 | + qed_chain_get_cons_idx(&txq->tx_pbl), |
---|
| 1741 | + qed_chain_get_prod_idx(&txq->tx_pbl)); |
---|
| 1742 | + break; |
---|
| 1743 | + } |
---|
| 1744 | + |
---|
| 1745 | + bytes_compl += len; |
---|
| 1746 | + pkts_compl++; |
---|
| 1747 | + txq->sw_tx_cons++; |
---|
| 1748 | + } |
---|
| 1749 | + |
---|
| 1750 | + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); |
---|
| 1751 | +} |
---|
| 1752 | + |
---|
| 1753 | +static void qede_empty_tx_queues(struct qede_dev *edev) |
---|
| 1754 | +{ |
---|
| 1755 | + int i; |
---|
| 1756 | + |
---|
| 1757 | + for_each_queue(i) |
---|
| 1758 | + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { |
---|
| 1759 | + int cos; |
---|
| 1760 | + |
---|
| 1761 | + for_each_cos_in_txq(edev, cos) { |
---|
| 1762 | + struct qede_fastpath *fp; |
---|
| 1763 | + |
---|
| 1764 | + fp = &edev->fp_array[i]; |
---|
| 1765 | + qede_empty_tx_queue(edev, |
---|
| 1766 | + &fp->txq[cos]); |
---|
| 1767 | + } |
---|
| 1768 | + } |
---|
| 1769 | +} |
---|
| 1770 | + |
---|
1562 | 1771 | /* This function inits fp content and resets the SB, RXQ and TXQ structures */ |
---|
1563 | 1772 | static void qede_init_fp(struct qede_dev *edev) |
---|
1564 | 1773 | { |
---|
1565 | 1774 | int queue_id, rxq_index = 0, txq_index = 0; |
---|
1566 | 1775 | struct qede_fastpath *fp; |
---|
| 1776 | + bool init_xdp = false; |
---|
1567 | 1777 | |
---|
1568 | 1778 | for_each_queue(queue_id) { |
---|
1569 | 1779 | fp = &edev->fp_array[queue_id]; |
---|
.. | .. |
---|
1575 | 1785 | fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, |
---|
1576 | 1786 | rxq_index); |
---|
1577 | 1787 | fp->xdp_tx->is_xdp = 1; |
---|
| 1788 | + |
---|
| 1789 | + spin_lock_init(&fp->xdp_tx->xdp_tx_lock); |
---|
| 1790 | + init_xdp = true; |
---|
1578 | 1791 | } |
---|
1579 | 1792 | |
---|
1580 | 1793 | if (fp->type & QEDE_FASTPATH_RX) { |
---|
.. | .. |
---|
1590 | 1803 | /* Driver have no error path from here */ |
---|
1591 | 1804 | WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, |
---|
1592 | 1805 | fp->rxq->rxq_id) < 0); |
---|
| 1806 | + |
---|
| 1807 | + if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, |
---|
| 1808 | + MEM_TYPE_PAGE_ORDER0, |
---|
| 1809 | + NULL)) { |
---|
| 1810 | + DP_NOTICE(edev, |
---|
| 1811 | + "Failed to register XDP memory model\n"); |
---|
| 1812 | + } |
---|
1593 | 1813 | } |
---|
1594 | 1814 | |
---|
1595 | 1815 | if (fp->type & QEDE_FASTPATH_TX) { |
---|
.. | .. |
---|
1605 | 1825 | txq->ndev_txq_id = ndev_tx_id; |
---|
1606 | 1826 | |
---|
1607 | 1827 | if (edev->dev_info.is_legacy) |
---|
1608 | | - txq->is_legacy = 1; |
---|
| 1828 | + txq->is_legacy = true; |
---|
1609 | 1829 | txq->dev = &edev->pdev->dev; |
---|
1610 | 1830 | } |
---|
1611 | 1831 | |
---|
.. | .. |
---|
1614 | 1834 | |
---|
1615 | 1835 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", |
---|
1616 | 1836 | edev->ndev->name, queue_id); |
---|
| 1837 | + } |
---|
| 1838 | + |
---|
| 1839 | + if (init_xdp) { |
---|
| 1840 | + edev->total_xdp_queues = QEDE_RSS_COUNT(edev); |
---|
| 1841 | + DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues); |
---|
1617 | 1842 | } |
---|
1618 | 1843 | } |
---|
1619 | 1844 | |
---|
.. | .. |
---|
1793 | 2018 | static int qede_stop_txq(struct qede_dev *edev, |
---|
1794 | 2019 | struct qede_tx_queue *txq, int rss_id) |
---|
1795 | 2020 | { |
---|
| 2021 | + /* delete doorbell from doorbell recovery mechanism */ |
---|
| 2022 | + edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr, |
---|
| 2023 | + &txq->tx_db); |
---|
| 2024 | + |
---|
1796 | 2025 | return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); |
---|
1797 | 2026 | } |
---|
1798 | 2027 | |
---|
.. | .. |
---|
1929 | 2158 | DQ_XCM_ETH_TX_BD_PROD_CMD); |
---|
1930 | 2159 | txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; |
---|
1931 | 2160 | |
---|
| 2161 | + /* register doorbell with doorbell recovery mechanism */ |
---|
| 2162 | + rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr, |
---|
| 2163 | + &txq->tx_db, DB_REC_WIDTH_32B, |
---|
| 2164 | + DB_REC_KERNEL); |
---|
| 2165 | + |
---|
1932 | 2166 | return rc; |
---|
1933 | 2167 | } |
---|
1934 | 2168 | |
---|
.. | .. |
---|
2018 | 2252 | if (rc) |
---|
2019 | 2253 | goto out; |
---|
2020 | 2254 | |
---|
2021 | | - fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1); |
---|
2022 | | - if (IS_ERR(fp->rxq->xdp_prog)) { |
---|
2023 | | - rc = PTR_ERR(fp->rxq->xdp_prog); |
---|
2024 | | - fp->rxq->xdp_prog = NULL; |
---|
2025 | | - goto out; |
---|
2026 | | - } |
---|
| 2255 | + bpf_prog_add(edev->xdp_prog, 1); |
---|
| 2256 | + fp->rxq->xdp_prog = edev->xdp_prog; |
---|
2027 | 2257 | } |
---|
2028 | 2258 | |
---|
2029 | 2259 | if (fp->type & QEDE_FASTPATH_TX) { |
---|
.. | .. |
---|
2063 | 2293 | |
---|
2064 | 2294 | enum qede_unload_mode { |
---|
2065 | 2295 | QEDE_UNLOAD_NORMAL, |
---|
| 2296 | + QEDE_UNLOAD_RECOVERY, |
---|
2066 | 2297 | }; |
---|
2067 | 2298 | |
---|
2068 | 2299 | static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, |
---|
.. | .. |
---|
2076 | 2307 | if (!is_locked) |
---|
2077 | 2308 | __qede_lock(edev); |
---|
2078 | 2309 | |
---|
2079 | | - edev->state = QEDE_STATE_CLOSED; |
---|
| 2310 | + clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); |
---|
| 2311 | + |
---|
| 2312 | + if (mode != QEDE_UNLOAD_RECOVERY) |
---|
| 2313 | + edev->state = QEDE_STATE_CLOSED; |
---|
2080 | 2314 | |
---|
2081 | 2315 | qede_rdma_dev_event_close(edev); |
---|
2082 | 2316 | |
---|
.. | .. |
---|
2084 | 2318 | netif_tx_disable(edev->ndev); |
---|
2085 | 2319 | netif_carrier_off(edev->ndev); |
---|
2086 | 2320 | |
---|
2087 | | - /* Reset the link */ |
---|
2088 | | - memset(&link_params, 0, sizeof(link_params)); |
---|
2089 | | - link_params.link_up = false; |
---|
2090 | | - edev->ops->common->set_link(edev->cdev, &link_params); |
---|
2091 | | - rc = qede_stop_queues(edev); |
---|
2092 | | - if (rc) { |
---|
2093 | | - qede_sync_free_irqs(edev); |
---|
2094 | | - goto out; |
---|
2095 | | - } |
---|
| 2321 | + if (mode != QEDE_UNLOAD_RECOVERY) { |
---|
| 2322 | + /* Reset the link */ |
---|
| 2323 | + memset(&link_params, 0, sizeof(link_params)); |
---|
| 2324 | + link_params.link_up = false; |
---|
| 2325 | + edev->ops->common->set_link(edev->cdev, &link_params); |
---|
2096 | 2326 | |
---|
2097 | | - DP_INFO(edev, "Stopped Queues\n"); |
---|
| 2327 | + rc = qede_stop_queues(edev); |
---|
| 2328 | + if (rc) { |
---|
| 2329 | + qede_sync_free_irqs(edev); |
---|
| 2330 | + goto out; |
---|
| 2331 | + } |
---|
| 2332 | + |
---|
| 2333 | + DP_INFO(edev, "Stopped Queues\n"); |
---|
| 2334 | + } |
---|
2098 | 2335 | |
---|
2099 | 2336 | qede_vlan_mark_nonconfigured(edev); |
---|
2100 | 2337 | edev->ops->fastpath_stop(edev->cdev); |
---|
2101 | 2338 | |
---|
2102 | | - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { |
---|
| 2339 | + if (edev->dev_info.common.b_arfs_capable) { |
---|
2103 | 2340 | qede_poll_for_freeing_arfs_filters(edev); |
---|
2104 | 2341 | qede_free_arfs(edev); |
---|
2105 | 2342 | } |
---|
.. | .. |
---|
2110 | 2347 | |
---|
2111 | 2348 | qede_napi_disable_remove(edev); |
---|
2112 | 2349 | |
---|
| 2350 | + if (mode == QEDE_UNLOAD_RECOVERY) |
---|
| 2351 | + qede_empty_tx_queues(edev); |
---|
| 2352 | + |
---|
2113 | 2353 | qede_free_mem_load(edev); |
---|
2114 | 2354 | qede_free_fp_array(edev); |
---|
2115 | 2355 | |
---|
2116 | 2356 | out: |
---|
2117 | 2357 | if (!is_locked) |
---|
2118 | 2358 | __qede_unlock(edev); |
---|
| 2359 | + |
---|
| 2360 | + if (mode != QEDE_UNLOAD_RECOVERY) |
---|
| 2361 | + DP_NOTICE(edev, "Link is down\n"); |
---|
| 2362 | + |
---|
| 2363 | + edev->ptp_skip_txts = 0; |
---|
| 2364 | + |
---|
2119 | 2365 | DP_INFO(edev, "Ending qede unload\n"); |
---|
2120 | 2366 | } |
---|
2121 | 2367 | |
---|
2122 | 2368 | enum qede_load_mode { |
---|
2123 | 2369 | QEDE_LOAD_NORMAL, |
---|
2124 | 2370 | QEDE_LOAD_RELOAD, |
---|
| 2371 | + QEDE_LOAD_RECOVERY, |
---|
2125 | 2372 | }; |
---|
2126 | 2373 | |
---|
2127 | 2374 | static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, |
---|
.. | .. |
---|
2156 | 2403 | if (rc) |
---|
2157 | 2404 | goto err2; |
---|
2158 | 2405 | |
---|
2159 | | - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { |
---|
2160 | | - rc = qede_alloc_arfs(edev); |
---|
2161 | | - if (rc) |
---|
2162 | | - DP_NOTICE(edev, "aRFS memory allocation failed\n"); |
---|
| 2406 | + if (qede_alloc_arfs(edev)) { |
---|
| 2407 | + edev->ndev->features &= ~NETIF_F_NTUPLE; |
---|
| 2408 | + edev->dev_info.common.b_arfs_capable = false; |
---|
2163 | 2409 | } |
---|
2164 | 2410 | |
---|
2165 | 2411 | qede_napi_add_enable(edev); |
---|
.. | .. |
---|
2181 | 2427 | |
---|
2182 | 2428 | /* Program un-configured VLANs */ |
---|
2183 | 2429 | qede_configure_vlan_filters(edev); |
---|
| 2430 | + |
---|
| 2431 | + set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); |
---|
2184 | 2432 | |
---|
2185 | 2433 | /* Ask for link-up using current configuration */ |
---|
2186 | 2434 | memset(&link_params, 0, sizeof(link_params)); |
---|
.. | .. |
---|
2254 | 2502 | if (rc) |
---|
2255 | 2503 | return rc; |
---|
2256 | 2504 | |
---|
2257 | | - udp_tunnel_get_rx_info(ndev); |
---|
| 2505 | + udp_tunnel_nic_reset_ntf(ndev); |
---|
2258 | 2506 | |
---|
2259 | 2507 | edev->ops->common->update_drv_state(edev->cdev, true); |
---|
2260 | 2508 | |
---|
.. | .. |
---|
2267 | 2515 | |
---|
2268 | 2516 | qede_unload(edev, QEDE_UNLOAD_NORMAL, false); |
---|
2269 | 2517 | |
---|
2270 | | - edev->ops->common->update_drv_state(edev->cdev, false); |
---|
| 2518 | + if (edev->cdev) |
---|
| 2519 | + edev->ops->common->update_drv_state(edev->cdev, false); |
---|
2271 | 2520 | |
---|
2272 | 2521 | return 0; |
---|
2273 | 2522 | } |
---|
.. | .. |
---|
2276 | 2525 | { |
---|
2277 | 2526 | struct qede_dev *edev = dev; |
---|
2278 | 2527 | |
---|
2279 | | - if (!netif_running(edev->ndev)) { |
---|
2280 | | - DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); |
---|
| 2528 | + if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) { |
---|
| 2529 | + DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n"); |
---|
2281 | 2530 | return; |
---|
2282 | 2531 | } |
---|
2283 | 2532 | |
---|
.. | .. |
---|
2296 | 2545 | qede_rdma_dev_event_close(edev); |
---|
2297 | 2546 | } |
---|
2298 | 2547 | } |
---|
| 2548 | +} |
---|
| 2549 | + |
---|
| 2550 | +static void qede_schedule_recovery_handler(void *dev) |
---|
| 2551 | +{ |
---|
| 2552 | + struct qede_dev *edev = dev; |
---|
| 2553 | + |
---|
| 2554 | + if (edev->state == QEDE_STATE_RECOVERY) { |
---|
| 2555 | + DP_NOTICE(edev, |
---|
| 2556 | + "Avoid scheduling a recovery handling since already in recovery state\n"); |
---|
| 2557 | + return; |
---|
| 2558 | + } |
---|
| 2559 | + |
---|
| 2560 | + set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); |
---|
| 2561 | + schedule_delayed_work(&edev->sp_task, 0); |
---|
| 2562 | + |
---|
| 2563 | + DP_INFO(edev, "Scheduled a recovery handler\n"); |
---|
| 2564 | +} |
---|
| 2565 | + |
---|
| 2566 | +static void qede_recovery_failed(struct qede_dev *edev) |
---|
| 2567 | +{ |
---|
| 2568 | + netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); |
---|
| 2569 | + |
---|
| 2570 | + netif_device_detach(edev->ndev); |
---|
| 2571 | + |
---|
| 2572 | + if (edev->cdev) |
---|
| 2573 | + edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); |
---|
| 2574 | +} |
---|
| 2575 | + |
---|
| 2576 | +static void qede_recovery_handler(struct qede_dev *edev) |
---|
| 2577 | +{ |
---|
| 2578 | + u32 curr_state = edev->state; |
---|
| 2579 | + int rc; |
---|
| 2580 | + |
---|
| 2581 | + DP_NOTICE(edev, "Starting a recovery process\n"); |
---|
| 2582 | + |
---|
| 2583 | + /* No need to acquire first the qede_lock since is done by qede_sp_task |
---|
| 2584 | + * before calling this function. |
---|
| 2585 | + */ |
---|
| 2586 | + edev->state = QEDE_STATE_RECOVERY; |
---|
| 2587 | + |
---|
| 2588 | + edev->ops->common->recovery_prolog(edev->cdev); |
---|
| 2589 | + |
---|
| 2590 | + if (curr_state == QEDE_STATE_OPEN) |
---|
| 2591 | + qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); |
---|
| 2592 | + |
---|
| 2593 | + __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); |
---|
| 2594 | + |
---|
| 2595 | + rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, |
---|
| 2596 | + IS_VF(edev), QEDE_PROBE_RECOVERY); |
---|
| 2597 | + if (rc) { |
---|
| 2598 | + edev->cdev = NULL; |
---|
| 2599 | + goto err; |
---|
| 2600 | + } |
---|
| 2601 | + |
---|
| 2602 | + if (curr_state == QEDE_STATE_OPEN) { |
---|
| 2603 | + rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); |
---|
| 2604 | + if (rc) |
---|
| 2605 | + goto err; |
---|
| 2606 | + |
---|
| 2607 | + qede_config_rx_mode(edev->ndev); |
---|
| 2608 | + udp_tunnel_nic_reset_ntf(edev->ndev); |
---|
| 2609 | + } |
---|
| 2610 | + |
---|
| 2611 | + edev->state = curr_state; |
---|
| 2612 | + |
---|
| 2613 | + DP_NOTICE(edev, "Recovery handling is done\n"); |
---|
| 2614 | + |
---|
| 2615 | + return; |
---|
| 2616 | + |
---|
| 2617 | +err: |
---|
| 2618 | + qede_recovery_failed(edev); |
---|
| 2619 | +} |
---|
| 2620 | + |
---|
| 2621 | +static void qede_atomic_hw_err_handler(struct qede_dev *edev) |
---|
| 2622 | +{ |
---|
| 2623 | + struct qed_dev *cdev = edev->cdev; |
---|
| 2624 | + |
---|
| 2625 | + DP_NOTICE(edev, |
---|
| 2626 | + "Generic non-sleepable HW error handling started - err_flags 0x%lx\n", |
---|
| 2627 | + edev->err_flags); |
---|
| 2628 | + |
---|
| 2629 | + /* Get a call trace of the flow that led to the error */ |
---|
| 2630 | + WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags)); |
---|
| 2631 | + |
---|
| 2632 | + /* Prevent HW attentions from being reasserted */ |
---|
| 2633 | + if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags)) |
---|
| 2634 | + edev->ops->common->attn_clr_enable(cdev, true); |
---|
| 2635 | + |
---|
| 2636 | + DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n"); |
---|
| 2637 | +} |
---|
| 2638 | + |
---|
| 2639 | +static void qede_generic_hw_err_handler(struct qede_dev *edev) |
---|
| 2640 | +{ |
---|
| 2641 | + DP_NOTICE(edev, |
---|
| 2642 | + "Generic sleepable HW error handling started - err_flags 0x%lx\n", |
---|
| 2643 | + edev->err_flags); |
---|
| 2644 | + |
---|
| 2645 | + if (edev->devlink) |
---|
| 2646 | + edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); |
---|
| 2647 | + |
---|
| 2648 | + clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); |
---|
| 2649 | + |
---|
| 2650 | + DP_NOTICE(edev, "Generic sleepable HW error handling is done\n"); |
---|
| 2651 | +} |
---|
| 2652 | + |
---|
| 2653 | +static void qede_set_hw_err_flags(struct qede_dev *edev, |
---|
| 2654 | + enum qed_hw_err_type err_type) |
---|
| 2655 | +{ |
---|
| 2656 | + unsigned long err_flags = 0; |
---|
| 2657 | + |
---|
| 2658 | + switch (err_type) { |
---|
| 2659 | + case QED_HW_ERR_DMAE_FAIL: |
---|
| 2660 | + set_bit(QEDE_ERR_WARN, &err_flags); |
---|
| 2661 | + fallthrough; |
---|
| 2662 | + case QED_HW_ERR_MFW_RESP_FAIL: |
---|
| 2663 | + case QED_HW_ERR_HW_ATTN: |
---|
| 2664 | + case QED_HW_ERR_RAMROD_FAIL: |
---|
| 2665 | + case QED_HW_ERR_FW_ASSERT: |
---|
| 2666 | + set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); |
---|
| 2667 | + set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); |
---|
| 2668 | + break; |
---|
| 2669 | + |
---|
| 2670 | + default: |
---|
| 2671 | + DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type); |
---|
| 2672 | + break; |
---|
| 2673 | + } |
---|
| 2674 | + |
---|
| 2675 | + edev->err_flags |= err_flags; |
---|
| 2676 | +} |
---|
| 2677 | + |
---|
| 2678 | +static void qede_schedule_hw_err_handler(void *dev, |
---|
| 2679 | + enum qed_hw_err_type err_type) |
---|
| 2680 | +{ |
---|
| 2681 | + struct qede_dev *edev = dev; |
---|
| 2682 | + |
---|
| 2683 | + /* Fan failure cannot be masked by handling of another HW error or by a |
---|
| 2684 | + * concurrent recovery process. |
---|
| 2685 | + */ |
---|
| 2686 | + if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) || |
---|
| 2687 | + edev->state == QEDE_STATE_RECOVERY) && |
---|
| 2688 | + err_type != QED_HW_ERR_FAN_FAIL) { |
---|
| 2689 | + DP_INFO(edev, |
---|
| 2690 | + "Avoid scheduling an error handling while another HW error is being handled\n"); |
---|
| 2691 | + return; |
---|
| 2692 | + } |
---|
| 2693 | + |
---|
| 2694 | + if (err_type >= QED_HW_ERR_LAST) { |
---|
| 2695 | + DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type); |
---|
| 2696 | + clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); |
---|
| 2697 | + return; |
---|
| 2698 | + } |
---|
| 2699 | + |
---|
| 2700 | + edev->last_err_type = err_type; |
---|
| 2701 | + qede_set_hw_err_flags(edev, err_type); |
---|
| 2702 | + qede_atomic_hw_err_handler(edev); |
---|
| 2703 | + set_bit(QEDE_SP_HW_ERR, &edev->sp_flags); |
---|
| 2704 | + schedule_delayed_work(&edev->sp_task, 0); |
---|
| 2705 | + |
---|
| 2706 | + DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type); |
---|
2299 | 2707 | } |
---|
2300 | 2708 | |
---|
2301 | 2709 | static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) |
---|
.. | .. |
---|
2321 | 2729 | data->feat_flags |= QED_TLV_LSO; |
---|
2322 | 2730 | |
---|
2323 | 2731 | ether_addr_copy(data->mac[0], edev->ndev->dev_addr); |
---|
2324 | | - memset(data->mac[1], 0, ETH_ALEN); |
---|
2325 | | - memset(data->mac[2], 0, ETH_ALEN); |
---|
| 2732 | + eth_zero_addr(data->mac[1]); |
---|
| 2733 | + eth_zero_addr(data->mac[2]); |
---|
2326 | 2734 | /* Copy the first two UC macs */ |
---|
2327 | 2735 | netif_addr_lock_bh(edev->ndev); |
---|
2328 | 2736 | i = 1; |
---|
.. | .. |
---|
2395 | 2803 | etlv->num_txqs_full_set = true; |
---|
2396 | 2804 | etlv->num_rxqs_full_set = true; |
---|
2397 | 2805 | } |
---|
| 2806 | + |
---|
| 2807 | +/** |
---|
| 2808 | + * qede_io_error_detected - called when PCI error is detected |
---|
| 2809 | + * @pdev: Pointer to PCI device |
---|
| 2810 | + * @state: The current pci connection state |
---|
| 2811 | + * |
---|
| 2812 | + * This function is called after a PCI bus error affecting |
---|
| 2813 | + * this device has been detected. |
---|
| 2814 | + */ |
---|
| 2815 | +static pci_ers_result_t |
---|
| 2816 | +qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
---|
| 2817 | +{ |
---|
| 2818 | + struct net_device *dev = pci_get_drvdata(pdev); |
---|
| 2819 | + struct qede_dev *edev = netdev_priv(dev); |
---|
| 2820 | + |
---|
| 2821 | + if (!edev) |
---|
| 2822 | + return PCI_ERS_RESULT_NONE; |
---|
| 2823 | + |
---|
| 2824 | + DP_NOTICE(edev, "IO error detected [%d]\n", state); |
---|
| 2825 | + |
---|
| 2826 | + __qede_lock(edev); |
---|
| 2827 | + if (edev->state == QEDE_STATE_RECOVERY) { |
---|
| 2828 | + DP_NOTICE(edev, "Device already in the recovery state\n"); |
---|
| 2829 | + __qede_unlock(edev); |
---|
| 2830 | + return PCI_ERS_RESULT_NONE; |
---|
| 2831 | + } |
---|
| 2832 | + |
---|
| 2833 | + /* PF handles the recovery of its VFs */ |
---|
| 2834 | + if (IS_VF(edev)) { |
---|
| 2835 | + DP_VERBOSE(edev, QED_MSG_IOV, |
---|
| 2836 | + "VF recovery is handled by its PF\n"); |
---|
| 2837 | + __qede_unlock(edev); |
---|
| 2838 | + return PCI_ERS_RESULT_RECOVERED; |
---|
| 2839 | + } |
---|
| 2840 | + |
---|
| 2841 | + /* Close OS Tx */ |
---|
| 2842 | + netif_tx_disable(edev->ndev); |
---|
| 2843 | + netif_carrier_off(edev->ndev); |
---|
| 2844 | + |
---|
| 2845 | + set_bit(QEDE_SP_AER, &edev->sp_flags); |
---|
| 2846 | + schedule_delayed_work(&edev->sp_task, 0); |
---|
| 2847 | + |
---|
| 2848 | + __qede_unlock(edev); |
---|
| 2849 | + |
---|
| 2850 | + return PCI_ERS_RESULT_CAN_RECOVER; |
---|
| 2851 | +} |
---|