From 297b60346df8beafee954a0fd7c2d64f33f3b9bc Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 11 May 2024 01:44:05 +0000
Subject: [PATCH] rtl8211F_led_control
---
kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c | 1055 +++++++++++++++++++++++++++++++++++++++++++++++++++-------
1 files changed, 929 insertions(+), 126 deletions(-)
diff --git a/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index d16cadc..2cd14ee 100644
--- a/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
*/
#include <linux/types.h>
@@ -47,6 +21,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
+
+#define GRCBASE_MCP 0xe00000
#define QED_MCP_RESP_ITER_US 10
@@ -583,6 +559,8 @@
if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
qed_mcp_cmd_set_blocking(p_hwfn, true);
+ qed_hw_err_notify(p_hwfn, p_ptt,
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
return -EAGAIN;
}
@@ -1081,6 +1059,27 @@
return 0;
}
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ ¶m);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return 0;
+}
+
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_mb_params mb_params;
@@ -1097,7 +1096,7 @@
DP_NOTICE(p_hwfn,
"Unknown WoL configuration %02x\n",
p_hwfn->cdev->wol_config);
- /* Fallthrough */
+ fallthrough;
case QED_OV_WOL_DEFAULT:
wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
}
@@ -1258,6 +1257,52 @@
p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
}
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data, int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr;
+ u32 i, size;
+
+ func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MIN_BW);
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MAX_BW);
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_reset)
{
@@ -1285,10 +1330,29 @@
goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Virtual link_up = %d\n", p_link->link_up);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Physical link_up = %d\n", p_link->link_up);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1312,7 +1376,7 @@
break;
case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
p_link->full_duplex = false;
- /* Fall-through */
+ fallthrough;
case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
p_link->speed = 1000;
break;
@@ -1393,6 +1457,25 @@
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+ switch (status & LINK_STATUS_FEC_MODE_MASK) {
+ case LINK_STATUS_FEC_MODE_NONE:
+ p_link->fec_active = QED_FEC_MODE_NONE;
+ break;
+ case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
+ p_link->fec_active = QED_FEC_MODE_FIRECODE;
+ break;
+ case LINK_STATUS_FEC_MODE_RS_CL91:
+ p_link->fec_active = QED_FEC_MODE_RS;
+ break;
+ default:
+ p_link->fec_active = QED_FEC_MODE_AUTO;
+ }
+ } else {
+ p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
+ }
+
qed_link_update(p_hwfn, p_ptt);
out:
spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
@@ -1403,8 +1486,9 @@
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
struct eth_phy_cfg phy_cfg;
+ u32 cmd, fec_bit = 0;
+ u32 val, ext_speed;
int rc = 0;
- u32 cmd;
/* Set the shmem configuration according to params */
memset(&phy_cfg, 0, sizeof(phy_cfg));
@@ -1436,19 +1520,91 @@
EEE_TX_TIMER_USEC_MASK;
}
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+ if (params->fec & QED_FEC_MODE_NONE)
+ fec_bit |= FEC_FORCE_MODE_NONE;
+ else if (params->fec & QED_FEC_MODE_FIRECODE)
+ fec_bit |= FEC_FORCE_MODE_FIRECODE;
+ else if (params->fec & QED_FEC_MODE_RS)
+ fec_bit |= FEC_FORCE_MODE_RS;
+ else if (params->fec & QED_FEC_MODE_AUTO)
+ fec_bit |= FEC_FORCE_MODE_AUTO;
+
+ SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
+ }
+
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
+ ext_speed = 0;
+ if (params->ext_speed.autoneg)
+ ext_speed |= ETH_EXT_SPEED_AN;
+
+ val = params->ext_speed.forced_speed;
+ if (val & QED_EXT_SPEED_1G)
+ ext_speed |= ETH_EXT_SPEED_1G;
+ if (val & QED_EXT_SPEED_10G)
+ ext_speed |= ETH_EXT_SPEED_10G;
+ if (val & QED_EXT_SPEED_20G)
+ ext_speed |= ETH_EXT_SPEED_20G;
+ if (val & QED_EXT_SPEED_25G)
+ ext_speed |= ETH_EXT_SPEED_25G;
+ if (val & QED_EXT_SPEED_40G)
+ ext_speed |= ETH_EXT_SPEED_40G;
+ if (val & QED_EXT_SPEED_50G_R)
+ ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
+ if (val & QED_EXT_SPEED_50G_R2)
+ ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
+ if (val & QED_EXT_SPEED_100G_R2)
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
+ if (val & QED_EXT_SPEED_100G_R4)
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
+ if (val & QED_EXT_SPEED_100G_P4)
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
+
+ SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
+ ext_speed);
+
+ ext_speed = 0;
+
+ val = params->ext_speed.advertised_speeds;
+ if (val & QED_EXT_SPEED_MASK_1G)
+ ext_speed |= ETH_EXT_ADV_SPEED_1G;
+ if (val & QED_EXT_SPEED_MASK_10G)
+ ext_speed |= ETH_EXT_ADV_SPEED_10G;
+ if (val & QED_EXT_SPEED_MASK_20G)
+ ext_speed |= ETH_EXT_ADV_SPEED_20G;
+ if (val & QED_EXT_SPEED_MASK_25G)
+ ext_speed |= ETH_EXT_ADV_SPEED_25G;
+ if (val & QED_EXT_SPEED_MASK_40G)
+ ext_speed |= ETH_EXT_ADV_SPEED_40G;
+ if (val & QED_EXT_SPEED_MASK_50G_R)
+ ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
+ if (val & QED_EXT_SPEED_MASK_50G_R2)
+ ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
+ if (val & QED_EXT_SPEED_MASK_100G_R2)
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
+ if (val & QED_EXT_SPEED_MASK_100G_R4)
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
+ if (val & QED_EXT_SPEED_MASK_100G_P4)
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
+
+ phy_cfg.extended_speed |= ext_speed;
+
+ SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
+ params->ext_fec_mode);
+ }
+
p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg.speed,
- phy_cfg.pause,
- phy_cfg.adv_speed,
- phy_cfg.loopback_mode,
- phy_cfg.feature_config_flags);
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode, phy_cfg.fec_mode,
+ phy_cfg.extended_speed);
} else {
- DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
- "Resetting link\n");
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
}
memset(&mb_params, 0, sizeof(mb_params));
@@ -1472,6 +1628,60 @@
qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
return 0;
+}
+
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(cdev))
+ return;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Ignoring the indication since a recovery process is already in progress\n");
+ return;
+ }
+
+ cdev->recov_in_prog = true;
+
+ proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
+
+ qed_schedule_recovery_handler(p_hwfn);
}
static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
@@ -1513,53 +1723,6 @@
mb_params.p_data_src = &stats;
mb_params.data_src_size = sizeof(stats);
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
-}
-
-static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
-{
- struct qed_mcp_function_info *p_info;
-
- p_info = &p_hwfn->mcp_info->func_info;
-
- p_info->bandwidth_min = (p_shmem_info->config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- p_info->bandwidth_min);
- p_info->bandwidth_min = 1;
- }
-
- p_info->bandwidth_max = (p_shmem_info->config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- p_info->bandwidth_max);
- p_info->bandwidth_max = 100;
- }
-}
-
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data, int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
- return size;
}
static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1612,12 +1775,133 @@
qed_sp_pf_update_stag(p_hwfn);
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
&resp, ¶m);
+}
+
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
+}
+
+struct qed_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static int
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = -EOPNOTSUPP;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+ int rc;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
+
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct mdump_retain_data_stc mdump_retain;
+ int rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == 0 && mdump_retain.valid)
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch,
+ mdump_retain.pf, mdump_retain.status);
+ else
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device\n");
+
+ DP_NOTICE(p_hwfn,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
}
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1634,7 +1918,9 @@
val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
OEM_CFG_CHANNEL_TYPE_OFFSET;
if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
- DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Incorrect UFP Channel type %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
if (val == OEM_CFG_SCHED_TYPE_ETS) {
@@ -1643,7 +1929,9 @@
p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
} else {
p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown UFP scheduling mode %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
@@ -1658,13 +1946,15 @@
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
} else {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown Host priority control %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
DP_NOTICE(p_hwfn,
- "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
- p_hwfn->ufp_info.mode,
- p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
+ "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
}
static int
@@ -1745,6 +2035,9 @@
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ qed_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_GET_LAN_STATS:
case MFW_DRV_MSG_GET_FCOE_STATS:
case MFW_DRV_MSG_GET_ISCSI_STATS:
@@ -1756,6 +2049,12 @@
break;
case MFW_DRV_MSG_S_TAG_UPDATE:
qed_mcp_update_stag(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_GET_TLV_REQ:
qed_mfw_tlv_req(p_hwfn);
@@ -1863,6 +2162,8 @@
int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_media_type)
{
+ *p_media_type = MEDIA_UNSPECIFIED;
+
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
@@ -1880,6 +2181,196 @@
p_hwfn->mcp_info->port_addr +
offsetof(struct public_port,
media_type));
+
+ return 0;
+}
+
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
+{
+ u32 transceiver_info;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+
+ transceiver_info = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ *p_transceiver_state = (transceiver_info &
+ ETH_TRANSCEIVER_STATE_MASK) >>
+ ETH_TRANSCEIVER_STATE_OFFSET;
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ *p_transceiver_type = (transceiver_info &
+ ETH_TRANSCEIVER_TYPE_MASK) >>
+ ETH_TRANSCEIVER_TYPE_OFFSET;
+ else
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
+
+ return 0;
+}
+static bool qed_is_transceiver_ready(u32 transceiver_state,
+ u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return true;
+
+ return false;
+}
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask)
+{
+ u32 transceiver_type, transceiver_state;
+ int ret;
+
+ ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+ if (ret)
+ return ret;
+
+ if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
+ false)
+ return -EINVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return 0;
+}
+
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ return -EINVAL;
+ }
+
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
return 0;
}
@@ -1971,7 +2462,7 @@
break;
case FUNC_MF_CFG_PROTOCOL_ROCE:
DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
- /* Fallthrough */
+ fallthrough;
default:
rc = -EINVAL;
}
@@ -2038,11 +2529,10 @@
}
DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
- "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
info->pause_on_host, info->protocol,
info->bandwidth_min, info->bandwidth_max,
- info->mac[0], info->mac[1], info->mac[2],
- info->mac[3], info->mac[4], info->mac[5],
+ info->mac,
info->wwn_port, info->wwn_node,
info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
@@ -2103,6 +2593,43 @@
*p_flash_size = flash_size;
return 0;
+}
+
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Avoid triggering a recovery since such a process is already in progress\n");
+ return -EAGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return 0;
+}
+
+#define QED_RECOVERY_PROLOG_SLEEP_MS 100
+
+int qed_recovery_prolog(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ int rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
}
static int
@@ -2547,24 +3074,6 @@
return 0;
}
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- u32 resp, param;
- int rc;
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
- &resp, ¶m);
- cdev->mcp_nvm_resp = resp;
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-}
-
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len)
{
@@ -2578,6 +3087,9 @@
return -EBUSY;
switch (cmd) {
+ case QED_PUT_FILE_BEGIN:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ break;
case QED_PUT_FILE_DATA:
nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
break;
@@ -2590,10 +3102,14 @@
goto out;
}
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
while (buf_idx < len) {
- buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
- nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
- addr) + buf_idx;
+ if (cmd == QED_PUT_FILE_BEGIN)
+ nvm_offset = addr;
+ else
+ nvm_offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
+ buf_idx;
rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
&resp, ¶m, buf_size,
(u32 *)&p_buf[buf_idx]);
@@ -2618,7 +3134,19 @@
if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
usleep_range(1000, 2000);
- buf_idx += buf_size;
+ /* For MBI upgrade, MFW response includes the next buffer offset
+ * to be delivered to MFW.
+ */
+ if (param && cmd == QED_PUT_FILE_DATA) {
+ buf_idx = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ } else {
+ buf_idx += buf_size;
+ buf_size = min_t(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ }
}
cdev->mcp_nvm_resp = resp;
@@ -2835,6 +3363,13 @@
return rc;
}
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = NULL;
+ p_hwfn->nvm_info.valid = false;
+}
+
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
@@ -2851,6 +3386,9 @@
break;
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
+ break;
+ case QED_NVM_IMAGE_MDUMP:
+ type = NVM_TYPE_MDUMP;
break;
case QED_NVM_IMAGE_NVM_CFG1:
type = NVM_TYPE_NVM_CFG1;
@@ -2951,8 +3489,11 @@
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
- case QED_LL2_QUEUE:
+ case QED_LL2_RAM_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ mfw_res_id = RESOURCE_LL2_CQS_E;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
@@ -3020,7 +3561,7 @@
switch (p_in_params->cmd) {
case DRV_MSG_SET_RESOURCE_VALUE_MSG:
mfw_resc_info.size = p_in_params->resc_max_val;
- /* Fallthrough */
+ fallthrough;
case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
break;
default:
@@ -3297,7 +3838,7 @@
DP_INFO(p_hwfn,
"Resource unlock request for an already released resource [%d]\n",
p_params->resource);
- /* Fallthrough */
+ fallthrough;
case RESOURCE_OPCODE_RELEASED:
p_params->b_released = true;
break;
@@ -3344,6 +3885,12 @@
}
}
+bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 mcp_resp;
@@ -3363,8 +3910,264 @@
{
u32 mcp_resp, mcp_param, features;
- features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
+
+ if (QED_IS_E5(p_hwfn->cdev))
+ features |=
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
+
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 fir_valid, l2_valid;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_engine_config command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+ if (fir_valid)
+ cdev->fir_affin =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+ l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+ if (l2_valid)
+ cdev->l2_affin_hint =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+ DP_INFO(p_hwfn,
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+ fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
+
+ return 0;
+}
+
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_PPFID_BITMAP);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
+ cdev->ppfid_bitmap);
+
+ return 0;
+}
+
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len)
+{
+ u32 mb_param = 0, resp, param;
+ int rc;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION,
+ mb_param, &resp, ¶m, p_len, (u32 *)p_buf);
+
+ return rc;
+}
+
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len)
+{
+ u32 mb_param = 0, resp, param;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_ALL)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_COMMIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION,
+ mb_param, &resp, ¶m, len, (u32 *)p_buf);
+}
+
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
+
+static int
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
+ DP_ERR(p_hwfn,
+ "Debug data size is %d while it should not exceed %d\n",
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
+ mb_params.p_data_src = p_buf;
+ mb_params.data_src_size = size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
+ return -EBUSY;
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
+ mb_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_mcp_dbg_data_type {
+ QED_MCP_DBG_DATA_TYPE_RAW,
+};
+
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
+
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
+
+static int
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
+{
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
+ u32 tmp_size = size, *p_header, *p_payload;
+ u8 flags = 0;
+ u16 seq;
+ int rc;
+
+ p_header = (u32 *)raw_data;
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
+
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
+
+ /* First chunk is marked as 'first' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+
+ *p_header = 0;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
+
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ QED_MCP_DBG_DATA_MAX_SIZE);
+ if (rc)
+ return rc;
+
+ /* Clear the 'first' marking after sending the first chunk */
+ if (p_tmp_buf == p_buf) {
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
+ flags);
+ }
+
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ }
+
+ /* Last chunk is marked as 'last' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ memcpy(p_payload, p_tmp_buf, tmp_size);
+
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
+ tmp_size));
+}
+
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
+{
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
+}
--
Gitblit v1.6.2