forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/drivers/net/ethernet/qlogic/qed/qed_mcp.c
....@@ -1,33 +1,7 @@
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
12 /* QLogic qed NIC Driver
23 * Copyright (c) 2015-2017 QLogic Corporation
3
- *
4
- * This software is available to you under a choice of one of two
5
- * licenses. You may choose to be licensed under the terms of the GNU
6
- * General Public License (GPL) Version 2, available from the file
7
- * COPYING in the main directory of this source tree, or the
8
- * OpenIB.org BSD license below:
9
- *
10
- * Redistribution and use in source and binary forms, with or
11
- * without modification, are permitted provided that the following
12
- * conditions are met:
13
- *
14
- * - Redistributions of source code must retain the above
15
- * copyright notice, this list of conditions and the following
16
- * disclaimer.
17
- *
18
- * - Redistributions in binary form must reproduce the above
19
- * copyright notice, this list of conditions and the following
20
- * disclaimer in the documentation and /or other materials
21
- * provided with the distribution.
22
- *
23
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
- * SOFTWARE.
4
+ * Copyright (c) 2019-2020 Marvell International Ltd.
315 */
326
337 #include <linux/types.h>
....@@ -47,6 +21,8 @@
4721 #include "qed_mcp.h"
4822 #include "qed_reg_addr.h"
4923 #include "qed_sriov.h"
24
+
25
+#define GRCBASE_MCP 0xe00000
5026
5127 #define QED_MCP_RESP_ITER_US 10
5228
....@@ -583,6 +559,8 @@
583559 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
584560 qed_mcp_cmd_set_blocking(p_hwfn, true);
585561
562
+ qed_hw_err_notify(p_hwfn, p_ptt,
563
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
586564 return -EAGAIN;
587565 }
588566
....@@ -1081,6 +1059,27 @@
10811059 return 0;
10821060 }
10831061
1062
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1063
+{
1064
+ u32 resp = 0, param = 0;
1065
+ int rc;
1066
+
1067
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1068
+ &param);
1069
+ if (rc) {
1070
+ DP_NOTICE(p_hwfn,
1071
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1072
+ return rc;
1073
+ }
1074
+
1075
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
1076
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1077
+ DP_NOTICE(p_hwfn,
1078
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1079
+
1080
+ return 0;
1081
+}
1082
+
10841083 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
10851084 {
10861085 struct qed_mcp_mb_params mb_params;
....@@ -1097,7 +1096,7 @@
10971096 DP_NOTICE(p_hwfn,
10981097 "Unknown WoL configuration %02x\n",
10991098 p_hwfn->cdev->wol_config);
1100
- /* Fallthrough */
1099
+ fallthrough;
11011100 case QED_OV_WOL_DEFAULT:
11021101 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
11031102 }
....@@ -1258,6 +1257,52 @@
12581257 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
12591258 }
12601259
1260
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1261
+ struct qed_ptt *p_ptt,
1262
+ struct public_func *p_data, int pfid)
1263
+{
1264
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1265
+ PUBLIC_FUNC);
1266
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1267
+ u32 func_addr;
1268
+ u32 i, size;
1269
+
1270
+ func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1271
+ memset(p_data, 0, sizeof(*p_data));
1272
+
1273
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1274
+ for (i = 0; i < size / sizeof(u32); i++)
1275
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1276
+ func_addr + (i << 2));
1277
+ return size;
1278
+}
1279
+
1280
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1281
+ struct public_func *p_shmem_info)
1282
+{
1283
+ struct qed_mcp_function_info *p_info;
1284
+
1285
+ p_info = &p_hwfn->mcp_info->func_info;
1286
+
1287
+ p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1288
+ FUNC_MF_CFG_MIN_BW);
1289
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1290
+ DP_INFO(p_hwfn,
1291
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1292
+ p_info->bandwidth_min);
1293
+ p_info->bandwidth_min = 1;
1294
+ }
1295
+
1296
+ p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1297
+ FUNC_MF_CFG_MAX_BW);
1298
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1299
+ DP_INFO(p_hwfn,
1300
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1301
+ p_info->bandwidth_max);
1302
+ p_info->bandwidth_max = 100;
1303
+ }
1304
+}
1305
+
12611306 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
12621307 struct qed_ptt *p_ptt, bool b_reset)
12631308 {
....@@ -1285,10 +1330,29 @@
12851330 goto out;
12861331 }
12871332
1288
- if (p_hwfn->b_drv_link_init)
1289
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1290
- else
1333
+ if (p_hwfn->b_drv_link_init) {
1334
+ /* Link indication with modern MFW arrives as per-PF
1335
+ * indication.
1336
+ */
1337
+ if (p_hwfn->mcp_info->capabilities &
1338
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1339
+ struct public_func shmem_info;
1340
+
1341
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1342
+ MCP_PF_ID(p_hwfn));
1343
+ p_link->link_up = !!(shmem_info.status &
1344
+ FUNC_STATUS_VIRTUAL_LINK_UP);
1345
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1346
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1347
+ "Virtual link_up = %d\n", p_link->link_up);
1348
+ } else {
1349
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1350
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1351
+ "Physical link_up = %d\n", p_link->link_up);
1352
+ }
1353
+ } else {
12911354 p_link->link_up = false;
1355
+ }
12921356
12931357 p_link->full_duplex = true;
12941358 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
....@@ -1312,7 +1376,7 @@
13121376 break;
13131377 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
13141378 p_link->full_duplex = false;
1315
- /* Fall-through */
1379
+ fallthrough;
13161380 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
13171381 p_link->speed = 1000;
13181382 break;
....@@ -1393,6 +1457,25 @@
13931457 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
13941458 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
13951459
1460
+ if (p_hwfn->mcp_info->capabilities &
1461
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1462
+ switch (status & LINK_STATUS_FEC_MODE_MASK) {
1463
+ case LINK_STATUS_FEC_MODE_NONE:
1464
+ p_link->fec_active = QED_FEC_MODE_NONE;
1465
+ break;
1466
+ case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
1467
+ p_link->fec_active = QED_FEC_MODE_FIRECODE;
1468
+ break;
1469
+ case LINK_STATUS_FEC_MODE_RS_CL91:
1470
+ p_link->fec_active = QED_FEC_MODE_RS;
1471
+ break;
1472
+ default:
1473
+ p_link->fec_active = QED_FEC_MODE_AUTO;
1474
+ }
1475
+ } else {
1476
+ p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
1477
+ }
1478
+
13961479 qed_link_update(p_hwfn, p_ptt);
13971480 out:
13981481 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
....@@ -1403,8 +1486,9 @@
14031486 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
14041487 struct qed_mcp_mb_params mb_params;
14051488 struct eth_phy_cfg phy_cfg;
1489
+ u32 cmd, fec_bit = 0;
1490
+ u32 val, ext_speed;
14061491 int rc = 0;
1407
- u32 cmd;
14081492
14091493 /* Set the shmem configuration according to params */
14101494 memset(&phy_cfg, 0, sizeof(phy_cfg));
....@@ -1436,19 +1520,91 @@
14361520 EEE_TX_TIMER_USEC_MASK;
14371521 }
14381522
1523
+ if (p_hwfn->mcp_info->capabilities &
1524
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1525
+ if (params->fec & QED_FEC_MODE_NONE)
1526
+ fec_bit |= FEC_FORCE_MODE_NONE;
1527
+ else if (params->fec & QED_FEC_MODE_FIRECODE)
1528
+ fec_bit |= FEC_FORCE_MODE_FIRECODE;
1529
+ else if (params->fec & QED_FEC_MODE_RS)
1530
+ fec_bit |= FEC_FORCE_MODE_RS;
1531
+ else if (params->fec & QED_FEC_MODE_AUTO)
1532
+ fec_bit |= FEC_FORCE_MODE_AUTO;
1533
+
1534
+ SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
1535
+ }
1536
+
1537
+ if (p_hwfn->mcp_info->capabilities &
1538
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
1539
+ ext_speed = 0;
1540
+ if (params->ext_speed.autoneg)
1541
+ ext_speed |= ETH_EXT_SPEED_AN;
1542
+
1543
+ val = params->ext_speed.forced_speed;
1544
+ if (val & QED_EXT_SPEED_1G)
1545
+ ext_speed |= ETH_EXT_SPEED_1G;
1546
+ if (val & QED_EXT_SPEED_10G)
1547
+ ext_speed |= ETH_EXT_SPEED_10G;
1548
+ if (val & QED_EXT_SPEED_20G)
1549
+ ext_speed |= ETH_EXT_SPEED_20G;
1550
+ if (val & QED_EXT_SPEED_25G)
1551
+ ext_speed |= ETH_EXT_SPEED_25G;
1552
+ if (val & QED_EXT_SPEED_40G)
1553
+ ext_speed |= ETH_EXT_SPEED_40G;
1554
+ if (val & QED_EXT_SPEED_50G_R)
1555
+ ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
1556
+ if (val & QED_EXT_SPEED_50G_R2)
1557
+ ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
1558
+ if (val & QED_EXT_SPEED_100G_R2)
1559
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
1560
+ if (val & QED_EXT_SPEED_100G_R4)
1561
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
1562
+ if (val & QED_EXT_SPEED_100G_P4)
1563
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
1564
+
1565
+ SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
1566
+ ext_speed);
1567
+
1568
+ ext_speed = 0;
1569
+
1570
+ val = params->ext_speed.advertised_speeds;
1571
+ if (val & QED_EXT_SPEED_MASK_1G)
1572
+ ext_speed |= ETH_EXT_ADV_SPEED_1G;
1573
+ if (val & QED_EXT_SPEED_MASK_10G)
1574
+ ext_speed |= ETH_EXT_ADV_SPEED_10G;
1575
+ if (val & QED_EXT_SPEED_MASK_20G)
1576
+ ext_speed |= ETH_EXT_ADV_SPEED_20G;
1577
+ if (val & QED_EXT_SPEED_MASK_25G)
1578
+ ext_speed |= ETH_EXT_ADV_SPEED_25G;
1579
+ if (val & QED_EXT_SPEED_MASK_40G)
1580
+ ext_speed |= ETH_EXT_ADV_SPEED_40G;
1581
+ if (val & QED_EXT_SPEED_MASK_50G_R)
1582
+ ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
1583
+ if (val & QED_EXT_SPEED_MASK_50G_R2)
1584
+ ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
1585
+ if (val & QED_EXT_SPEED_MASK_100G_R2)
1586
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
1587
+ if (val & QED_EXT_SPEED_MASK_100G_R4)
1588
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
1589
+ if (val & QED_EXT_SPEED_MASK_100G_P4)
1590
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
1591
+
1592
+ phy_cfg.extended_speed |= ext_speed;
1593
+
1594
+ SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
1595
+ params->ext_fec_mode);
1596
+ }
1597
+
14391598 p_hwfn->b_drv_link_init = b_up;
14401599
14411600 if (b_up) {
14421601 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1443
- "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1444
- phy_cfg.speed,
1445
- phy_cfg.pause,
1446
- phy_cfg.adv_speed,
1447
- phy_cfg.loopback_mode,
1448
- phy_cfg.feature_config_flags);
1602
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1603
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1604
+ phy_cfg.loopback_mode, phy_cfg.fec_mode,
1605
+ phy_cfg.extended_speed);
14491606 } else {
1450
- DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1451
- "Resetting link\n");
1607
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
14521608 }
14531609
14541610 memset(&mb_params, 0, sizeof(mb_params));
....@@ -1472,6 +1628,60 @@
14721628 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
14731629
14741630 return 0;
1631
+}
1632
+
1633
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1634
+ struct qed_ptt *p_ptt)
1635
+{
1636
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1637
+
1638
+ if (IS_VF(p_hwfn->cdev))
1639
+ return -EINVAL;
1640
+
1641
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1642
+ PUBLIC_PATH);
1643
+ path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1644
+ path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1645
+
1646
+ proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1647
+ path_addr +
1648
+ offsetof(struct public_path, process_kill)) &
1649
+ PROCESS_KILL_COUNTER_MASK;
1650
+
1651
+ return proc_kill_cnt;
1652
+}
1653
+
1654
+static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1655
+ struct qed_ptt *p_ptt)
1656
+{
1657
+ struct qed_dev *cdev = p_hwfn->cdev;
1658
+ u32 proc_kill_cnt;
1659
+
1660
+ /* Prevent possible attentions/interrupts during the recovery handling
1661
+ * and till its load phase, during which they will be re-enabled.
1662
+ */
1663
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
1664
+
1665
+ DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1666
+
1667
+ /* The following operations should be done once, and thus in CMT mode
1668
+ * are carried out by only the first HW function.
1669
+ */
1670
+ if (p_hwfn != QED_LEADING_HWFN(cdev))
1671
+ return;
1672
+
1673
+ if (cdev->recov_in_prog) {
1674
+ DP_NOTICE(p_hwfn,
1675
+ "Ignoring the indication since a recovery process is already in progress\n");
1676
+ return;
1677
+ }
1678
+
1679
+ cdev->recov_in_prog = true;
1680
+
1681
+ proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1682
+ DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1683
+
1684
+ qed_schedule_recovery_handler(p_hwfn);
14751685 }
14761686
14771687 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
....@@ -1513,53 +1723,6 @@
15131723 mb_params.p_data_src = &stats;
15141724 mb_params.data_src_size = sizeof(stats);
15151725 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1516
-}
1517
-
1518
-static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1519
- struct public_func *p_shmem_info)
1520
-{
1521
- struct qed_mcp_function_info *p_info;
1522
-
1523
- p_info = &p_hwfn->mcp_info->func_info;
1524
-
1525
- p_info->bandwidth_min = (p_shmem_info->config &
1526
- FUNC_MF_CFG_MIN_BW_MASK) >>
1527
- FUNC_MF_CFG_MIN_BW_SHIFT;
1528
- if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1529
- DP_INFO(p_hwfn,
1530
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1531
- p_info->bandwidth_min);
1532
- p_info->bandwidth_min = 1;
1533
- }
1534
-
1535
- p_info->bandwidth_max = (p_shmem_info->config &
1536
- FUNC_MF_CFG_MAX_BW_MASK) >>
1537
- FUNC_MF_CFG_MAX_BW_SHIFT;
1538
- if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1539
- DP_INFO(p_hwfn,
1540
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1541
- p_info->bandwidth_max);
1542
- p_info->bandwidth_max = 100;
1543
- }
1544
-}
1545
-
1546
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1547
- struct qed_ptt *p_ptt,
1548
- struct public_func *p_data, int pfid)
1549
-{
1550
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1551
- PUBLIC_FUNC);
1552
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1553
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1554
- u32 i, size;
1555
-
1556
- memset(p_data, 0, sizeof(*p_data));
1557
-
1558
- size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1559
- for (i = 0; i < size / sizeof(u32); i++)
1560
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1561
- func_addr + (i << 2));
1562
- return size;
15631726 }
15641727
15651728 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
....@@ -1612,12 +1775,133 @@
16121775 qed_sp_pf_update_stag(p_hwfn);
16131776 }
16141777
1615
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1778
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
16161779 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
16171780
16181781 /* Acknowledge the MFW */
16191782 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
16201783 &resp, &param);
1784
+}
1785
+
1786
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1787
+ struct qed_ptt *p_ptt)
1788
+{
1789
+ /* A single notification should be sent to upper driver in CMT mode */
1790
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1791
+ return;
1792
+
1793
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1794
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1795
+}
1796
+
1797
+struct qed_mdump_cmd_params {
1798
+ u32 cmd;
1799
+ void *p_data_src;
1800
+ u8 data_src_size;
1801
+ void *p_data_dst;
1802
+ u8 data_dst_size;
1803
+ u32 mcp_resp;
1804
+};
1805
+
1806
+static int
1807
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1808
+ struct qed_ptt *p_ptt,
1809
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
1810
+{
1811
+ struct qed_mcp_mb_params mb_params;
1812
+ int rc;
1813
+
1814
+ memset(&mb_params, 0, sizeof(mb_params));
1815
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1816
+ mb_params.param = p_mdump_cmd_params->cmd;
1817
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1818
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1819
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1820
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1821
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1822
+ if (rc)
1823
+ return rc;
1824
+
1825
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1826
+
1827
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1828
+ DP_INFO(p_hwfn,
1829
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1830
+ p_mdump_cmd_params->cmd);
1831
+ rc = -EOPNOTSUPP;
1832
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1833
+ DP_INFO(p_hwfn,
1834
+ "The mdump command is not supported by the MFW\n");
1835
+ rc = -EOPNOTSUPP;
1836
+ }
1837
+
1838
+ return rc;
1839
+}
1840
+
1841
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1842
+{
1843
+ struct qed_mdump_cmd_params mdump_cmd_params;
1844
+
1845
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1846
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1847
+
1848
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1849
+}
1850
+
1851
+int
1852
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1853
+ struct qed_ptt *p_ptt,
1854
+ struct mdump_retain_data_stc *p_mdump_retain)
1855
+{
1856
+ struct qed_mdump_cmd_params mdump_cmd_params;
1857
+ int rc;
1858
+
1859
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1860
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1861
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
1862
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1863
+
1864
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1865
+ if (rc)
1866
+ return rc;
1867
+
1868
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1869
+ DP_INFO(p_hwfn,
1870
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1871
+ mdump_cmd_params.mcp_resp);
1872
+ return -EINVAL;
1873
+ }
1874
+
1875
+ return 0;
1876
+}
1877
+
1878
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1879
+ struct qed_ptt *p_ptt)
1880
+{
1881
+ struct mdump_retain_data_stc mdump_retain;
1882
+ int rc;
1883
+
1884
+ /* In CMT mode - no need for more than a single acknowledgment to the
1885
+ * MFW, and no more than a single notification to the upper driver.
1886
+ */
1887
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1888
+ return;
1889
+
1890
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1891
+ if (rc == 0 && mdump_retain.valid)
1892
+ DP_NOTICE(p_hwfn,
1893
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1894
+ mdump_retain.epoch,
1895
+ mdump_retain.pf, mdump_retain.status);
1896
+ else
1897
+ DP_NOTICE(p_hwfn,
1898
+ "The MFW notified that a critical error occurred in the device\n");
1899
+
1900
+ DP_NOTICE(p_hwfn,
1901
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1902
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
1903
+
1904
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
16211905 }
16221906
16231907 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
....@@ -1634,7 +1918,9 @@
16341918 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
16351919 OEM_CFG_CHANNEL_TYPE_OFFSET;
16361920 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1637
- DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
1921
+ DP_NOTICE(p_hwfn,
1922
+ "Incorrect UFP Channel type %d port_id 0x%02x\n",
1923
+ val, MFW_PORT(p_hwfn));
16381924
16391925 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
16401926 if (val == OEM_CFG_SCHED_TYPE_ETS) {
....@@ -1643,7 +1929,9 @@
16431929 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
16441930 } else {
16451931 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1646
- DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
1932
+ DP_NOTICE(p_hwfn,
1933
+ "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1934
+ val, MFW_PORT(p_hwfn));
16471935 }
16481936
16491937 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
....@@ -1658,13 +1946,15 @@
16581946 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
16591947 } else {
16601948 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1661
- DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
1949
+ DP_NOTICE(p_hwfn,
1950
+ "Unknown Host priority control %d port_id 0x%02x\n",
1951
+ val, MFW_PORT(p_hwfn));
16621952 }
16631953
16641954 DP_NOTICE(p_hwfn,
1665
- "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1666
- p_hwfn->ufp_info.mode,
1667
- p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
1955
+ "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1956
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1957
+ p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
16681958 }
16691959
16701960 static int
....@@ -1745,6 +2035,9 @@
17452035 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
17462036 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
17472037 break;
2038
+ case MFW_DRV_MSG_ERROR_RECOVERY:
2039
+ qed_mcp_handle_process_kill(p_hwfn, p_ptt);
2040
+ break;
17482041 case MFW_DRV_MSG_GET_LAN_STATS:
17492042 case MFW_DRV_MSG_GET_FCOE_STATS:
17502043 case MFW_DRV_MSG_GET_ISCSI_STATS:
....@@ -1756,6 +2049,12 @@
17562049 break;
17572050 case MFW_DRV_MSG_S_TAG_UPDATE:
17582051 qed_mcp_update_stag(p_hwfn, p_ptt);
2052
+ break;
2053
+ case MFW_DRV_MSG_FAILURE_DETECTED:
2054
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
2055
+ break;
2056
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2057
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
17592058 break;
17602059 case MFW_DRV_MSG_GET_TLV_REQ:
17612060 qed_mfw_tlv_req(p_hwfn);
....@@ -1863,6 +2162,8 @@
18632162 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
18642163 struct qed_ptt *p_ptt, u32 *p_media_type)
18652164 {
2165
+ *p_media_type = MEDIA_UNSPECIFIED;
2166
+
18662167 if (IS_VF(p_hwfn->cdev))
18672168 return -EINVAL;
18682169
....@@ -1880,6 +2181,196 @@
18802181 p_hwfn->mcp_info->port_addr +
18812182 offsetof(struct public_port,
18822183 media_type));
2184
+
2185
+ return 0;
2186
+}
2187
+
2188
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2189
+ struct qed_ptt *p_ptt,
2190
+ u32 *p_transceiver_state,
2191
+ u32 *p_transceiver_type)
2192
+{
2193
+ u32 transceiver_info;
2194
+
2195
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2196
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2197
+
2198
+ if (IS_VF(p_hwfn->cdev))
2199
+ return -EINVAL;
2200
+
2201
+ if (!qed_mcp_is_init(p_hwfn)) {
2202
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2203
+ return -EBUSY;
2204
+ }
2205
+
2206
+ transceiver_info = qed_rd(p_hwfn, p_ptt,
2207
+ p_hwfn->mcp_info->port_addr +
2208
+ offsetof(struct public_port,
2209
+ transceiver_data));
2210
+
2211
+ *p_transceiver_state = (transceiver_info &
2212
+ ETH_TRANSCEIVER_STATE_MASK) >>
2213
+ ETH_TRANSCEIVER_STATE_OFFSET;
2214
+
2215
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2216
+ *p_transceiver_type = (transceiver_info &
2217
+ ETH_TRANSCEIVER_TYPE_MASK) >>
2218
+ ETH_TRANSCEIVER_TYPE_OFFSET;
2219
+ else
2220
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2221
+
2222
+ return 0;
2223
+}
2224
+static bool qed_is_transceiver_ready(u32 transceiver_state,
2225
+ u32 transceiver_type)
2226
+{
2227
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2228
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2229
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2230
+ return true;
2231
+
2232
+ return false;
2233
+}
2234
+
2235
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2236
+ struct qed_ptt *p_ptt, u32 *p_speed_mask)
2237
+{
2238
+ u32 transceiver_type, transceiver_state;
2239
+ int ret;
2240
+
2241
+ ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2242
+ &transceiver_type);
2243
+ if (ret)
2244
+ return ret;
2245
+
2246
+ if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2247
+ false)
2248
+ return -EINVAL;
2249
+
2250
+ switch (transceiver_type) {
2251
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
2252
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
2253
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
2254
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
2255
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
2256
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2257
+ break;
2258
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
2259
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
2260
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
2261
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
2262
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
2263
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
2264
+ case ETH_TRANSCEIVER_TYPE_4x10G:
2265
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2266
+ break;
2267
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
2268
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
2269
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2270
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2271
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2272
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2273
+ break;
2274
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
2275
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
2276
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
2277
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
2278
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
2279
+ *p_speed_mask =
2280
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2281
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2282
+ break;
2283
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
2284
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
2285
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
2286
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2287
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2288
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2289
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2290
+ break;
2291
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2292
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2293
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2294
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2295
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2296
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2297
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2298
+ break;
2299
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2300
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
2301
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2302
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2303
+ break;
2304
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
2305
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2306
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2307
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2308
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2309
+ break;
2310
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
2311
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2312
+ *p_speed_mask =
2313
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2314
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2315
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2316
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2317
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2318
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2319
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2320
+ break;
2321
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2322
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2323
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2324
+ *p_speed_mask =
2325
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2326
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2327
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2328
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2329
+ break;
2330
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
2331
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2332
+ break;
2333
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
2334
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
2335
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
2336
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2337
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2338
+ break;
2339
+ default:
2340
+ DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2341
+ transceiver_type);
2342
+ *p_speed_mask = 0xff;
2343
+ break;
2344
+ }
2345
+
2346
+ return 0;
2347
+}
2348
+
2349
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2350
+ struct qed_ptt *p_ptt, u32 *p_board_config)
2351
+{
2352
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2353
+
2354
+ if (IS_VF(p_hwfn->cdev))
2355
+ return -EINVAL;
2356
+
2357
+ if (!qed_mcp_is_init(p_hwfn)) {
2358
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2359
+ return -EBUSY;
2360
+ }
2361
+ if (!p_ptt) {
2362
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2363
+ return -EINVAL;
2364
+ }
2365
+
2366
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2367
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2368
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2369
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2370
+ *p_board_config = qed_rd(p_hwfn, p_ptt,
2371
+ port_cfg_addr +
2372
+ offsetof(struct nvm_cfg1_port,
2373
+ board_cfg));
18832374
18842375 return 0;
18852376 }
....@@ -1971,7 +2462,7 @@
19712462 break;
19722463 case FUNC_MF_CFG_PROTOCOL_ROCE:
19732464 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
1974
- /* Fallthrough */
2465
+ fallthrough;
19752466 default:
19762467 rc = -EINVAL;
19772468 }
....@@ -2038,11 +2529,10 @@
20382529 }
20392530
20402531 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2041
- "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2532
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
20422533 info->pause_on_host, info->protocol,
20432534 info->bandwidth_min, info->bandwidth_max,
2044
- info->mac[0], info->mac[1], info->mac[2],
2045
- info->mac[3], info->mac[4], info->mac[5],
2535
+ info->mac,
20462536 info->wwn_port, info->wwn_node,
20472537 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
20482538
....@@ -2103,6 +2593,43 @@
21032593 *p_flash_size = flash_size;
21042594
21052595 return 0;
2596
+}
2597
+
2598
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2599
+{
2600
+ struct qed_dev *cdev = p_hwfn->cdev;
2601
+
2602
+ if (cdev->recov_in_prog) {
2603
+ DP_NOTICE(p_hwfn,
2604
+ "Avoid triggering a recovery since such a process is already in progress\n");
2605
+ return -EAGAIN;
2606
+ }
2607
+
2608
+ DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2609
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2610
+
2611
+ return 0;
2612
+}
2613
+
2614
+#define QED_RECOVERY_PROLOG_SLEEP_MS 100
2615
+
2616
+int qed_recovery_prolog(struct qed_dev *cdev)
2617
+{
2618
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2619
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2620
+ int rc;
2621
+
2622
+ /* Allow ongoing PCIe transactions to complete */
2623
+ msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2624
+
2625
+ /* Clear the PF's internal FID_enable in the PXP */
2626
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2627
+ if (rc)
2628
+ DP_NOTICE(p_hwfn,
2629
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2630
+ rc);
2631
+
2632
+ return rc;
21062633 }
21072634
21082635 static int
....@@ -2547,24 +3074,6 @@
25473074 return 0;
25483075 }
25493076
2550
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
2551
-{
2552
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2553
- struct qed_ptt *p_ptt;
2554
- u32 resp, param;
2555
- int rc;
2556
-
2557
- p_ptt = qed_ptt_acquire(p_hwfn);
2558
- if (!p_ptt)
2559
- return -EBUSY;
2560
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2561
- &resp, &param);
2562
- cdev->mcp_nvm_resp = resp;
2563
- qed_ptt_release(p_hwfn, p_ptt);
2564
-
2565
- return rc;
2566
-}
2567
-
25683077 int qed_mcp_nvm_write(struct qed_dev *cdev,
25693078 u32 cmd, u32 addr, u8 *p_buf, u32 len)
25703079 {
....@@ -2578,6 +3087,9 @@
25783087 return -EBUSY;
25793088
25803089 switch (cmd) {
3090
+ case QED_PUT_FILE_BEGIN:
3091
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
3092
+ break;
25813093 case QED_PUT_FILE_DATA:
25823094 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
25833095 break;
....@@ -2590,10 +3102,14 @@
25903102 goto out;
25913103 }
25923104
3105
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
25933106 while (buf_idx < len) {
2594
- buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2595
- nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2596
- addr) + buf_idx;
3107
+ if (cmd == QED_PUT_FILE_BEGIN)
3108
+ nvm_offset = addr;
3109
+ else
3110
+ nvm_offset = ((buf_size <<
3111
+ DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3112
+ buf_idx;
25973113 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
25983114 &resp, &param, buf_size,
25993115 (u32 *)&p_buf[buf_idx]);
....@@ -2618,7 +3134,19 @@
26183134 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
26193135 usleep_range(1000, 2000);
26203136
2621
- buf_idx += buf_size;
3137
+ /* For MBI upgrade, MFW response includes the next buffer offset
3138
+ * to be delivered to MFW.
3139
+ */
3140
+ if (param && cmd == QED_PUT_FILE_DATA) {
3141
+ buf_idx = QED_MFW_GET_FIELD(param,
3142
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3143
+ buf_size = QED_MFW_GET_FIELD(param,
3144
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3145
+ } else {
3146
+ buf_idx += buf_size;
3147
+ buf_size = min_t(u32, (len - buf_idx),
3148
+ MCP_DRV_NVM_BUF_LEN);
3149
+ }
26223150 }
26233151
26243152 cdev->mcp_nvm_resp = resp;
....@@ -2835,6 +3363,13 @@
28353363 return rc;
28363364 }
28373365
3366
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3367
+{
3368
+ kfree(p_hwfn->nvm_info.image_att);
3369
+ p_hwfn->nvm_info.image_att = NULL;
3370
+ p_hwfn->nvm_info.valid = false;
3371
+}
3372
+
28383373 int
28393374 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
28403375 enum qed_nvm_images image_id,
....@@ -2851,6 +3386,9 @@
28513386 break;
28523387 case QED_NVM_IMAGE_FCOE_CFG:
28533388 type = NVM_TYPE_FCOE_CFG;
3389
+ break;
3390
+ case QED_NVM_IMAGE_MDUMP:
3391
+ type = NVM_TYPE_MDUMP;
28543392 break;
28553393 case QED_NVM_IMAGE_NVM_CFG1:
28563394 type = NVM_TYPE_NVM_CFG1;
....@@ -2951,8 +3489,11 @@
29513489 case QED_ILT:
29523490 mfw_res_id = RESOURCE_ILT_E;
29533491 break;
2954
- case QED_LL2_QUEUE:
3492
+ case QED_LL2_RAM_QUEUE:
29553493 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3494
+ break;
3495
+ case QED_LL2_CTX_QUEUE:
3496
+ mfw_res_id = RESOURCE_LL2_CQS_E;
29563497 break;
29573498 case QED_RDMA_CNQ_RAM:
29583499 case QED_CMDQS_CQS:
....@@ -3020,7 +3561,7 @@
30203561 switch (p_in_params->cmd) {
30213562 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
30223563 mfw_resc_info.size = p_in_params->resc_max_val;
3023
- /* Fallthrough */
3564
+ fallthrough;
30243565 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
30253566 break;
30263567 default:
....@@ -3297,7 +3838,7 @@
32973838 DP_INFO(p_hwfn,
32983839 "Resource unlock request for an already released resource [%d]\n",
32993840 p_params->resource);
3300
- /* Fallthrough */
3841
+ fallthrough;
33013842 case RESOURCE_OPCODE_RELEASED:
33023843 p_params->b_released = true;
33033844 break;
....@@ -3344,6 +3885,12 @@
33443885 }
33453886 }
33463887
3888
+bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3889
+{
3890
+ return !!(p_hwfn->mcp_info->capabilities &
3891
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3892
+}
3893
+
33473894 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
33483895 {
33493896 u32 mcp_resp;
....@@ -3363,8 +3910,264 @@
33633910 {
33643911 u32 mcp_resp, mcp_param, features;
33653912
3366
- features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3913
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3914
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
3915
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
3916
+
3917
+ if (QED_IS_E5(p_hwfn->cdev))
3918
+ features |=
3919
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
33673920
33683921 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
33693922 features, &mcp_resp, &mcp_param);
33703923 }
3924
+
3925
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3926
+{
3927
+ struct qed_mcp_mb_params mb_params = {0};
3928
+ struct qed_dev *cdev = p_hwfn->cdev;
3929
+ u8 fir_valid, l2_valid;
3930
+ int rc;
3931
+
3932
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3933
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3934
+ if (rc)
3935
+ return rc;
3936
+
3937
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3938
+ DP_INFO(p_hwfn,
3939
+ "The get_engine_config command is unsupported by the MFW\n");
3940
+ return -EOPNOTSUPP;
3941
+ }
3942
+
3943
+ fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3944
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3945
+ if (fir_valid)
3946
+ cdev->fir_affin =
3947
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
3948
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3949
+
3950
+ l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3951
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3952
+ if (l2_valid)
3953
+ cdev->l2_affin_hint =
3954
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
3955
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3956
+
3957
+ DP_INFO(p_hwfn,
3958
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3959
+ fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3960
+
3961
+ return 0;
3962
+}
3963
+
3964
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3965
+{
3966
+ struct qed_mcp_mb_params mb_params = {0};
3967
+ struct qed_dev *cdev = p_hwfn->cdev;
3968
+ int rc;
3969
+
3970
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3971
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3972
+ if (rc)
3973
+ return rc;
3974
+
3975
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3976
+ DP_INFO(p_hwfn,
3977
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
3978
+ return -EOPNOTSUPP;
3979
+ }
3980
+
3981
+ cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3982
+ FW_MB_PARAM_PPFID_BITMAP);
3983
+
3984
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3985
+ cdev->ppfid_bitmap);
3986
+
3987
+ return 0;
3988
+}
3989
+
3990
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3991
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3992
+ u32 *p_len)
3993
+{
3994
+ u32 mb_param = 0, resp, param;
3995
+ int rc;
3996
+
3997
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3998
+ if (flags & QED_NVM_CFG_OPTION_INIT)
3999
+ QED_MFW_SET_FIELD(mb_param,
4000
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4001
+ if (flags & QED_NVM_CFG_OPTION_FREE)
4002
+ QED_MFW_SET_FIELD(mb_param,
4003
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4004
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4005
+ QED_MFW_SET_FIELD(mb_param,
4006
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4007
+ QED_MFW_SET_FIELD(mb_param,
4008
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4009
+ entity_id);
4010
+ }
4011
+
4012
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4013
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION,
4014
+ mb_param, &resp, &param, p_len, (u32 *)p_buf);
4015
+
4016
+ return rc;
4017
+}
4018
+
4019
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4020
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
4021
+ u32 len)
4022
+{
4023
+ u32 mb_param = 0, resp, param;
4024
+
4025
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
4026
+ if (flags & QED_NVM_CFG_OPTION_ALL)
4027
+ QED_MFW_SET_FIELD(mb_param,
4028
+ DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
4029
+ if (flags & QED_NVM_CFG_OPTION_INIT)
4030
+ QED_MFW_SET_FIELD(mb_param,
4031
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4032
+ if (flags & QED_NVM_CFG_OPTION_COMMIT)
4033
+ QED_MFW_SET_FIELD(mb_param,
4034
+ DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
4035
+ if (flags & QED_NVM_CFG_OPTION_FREE)
4036
+ QED_MFW_SET_FIELD(mb_param,
4037
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4038
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4039
+ QED_MFW_SET_FIELD(mb_param,
4040
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4041
+ QED_MFW_SET_FIELD(mb_param,
4042
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4043
+ entity_id);
4044
+ }
4045
+
4046
+ return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
4047
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION,
4048
+ mb_param, &resp, &param, len, (u32 *)p_buf);
4049
+}
4050
+
4051
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
4052
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
4053
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4054
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4055
+
4056
+static int
4057
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4058
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
4059
+{
4060
+ struct qed_mcp_mb_params mb_params;
4061
+ int rc;
4062
+
4063
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
4064
+ DP_ERR(p_hwfn,
4065
+ "Debug data size is %d while it should not exceed %d\n",
4066
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
4067
+ return -EINVAL;
4068
+ }
4069
+
4070
+ memset(&mb_params, 0, sizeof(mb_params));
4071
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
4072
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
4073
+ mb_params.p_data_src = p_buf;
4074
+ mb_params.data_src_size = size;
4075
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4076
+ if (rc)
4077
+ return rc;
4078
+
4079
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4080
+ DP_INFO(p_hwfn,
4081
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4082
+ return -EOPNOTSUPP;
4083
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
4084
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
4085
+ return -EBUSY;
4086
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
4087
+ DP_NOTICE(p_hwfn,
4088
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
4089
+ mb_params.mcp_resp);
4090
+ return -EINVAL;
4091
+ }
4092
+
4093
+ return 0;
4094
+}
4095
+
4096
+enum qed_mcp_dbg_data_type {
4097
+ QED_MCP_DBG_DATA_TYPE_RAW,
4098
+};
4099
+
4100
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4101
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
4102
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
4103
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
4104
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
4105
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
4106
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4107
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
4108
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
4109
+
4110
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
4111
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4112
+
4113
+static int
4114
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4115
+ struct qed_ptt *p_ptt,
4116
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4117
+{
4118
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4119
+ u32 tmp_size = size, *p_header, *p_payload;
4120
+ u8 flags = 0;
4121
+ u16 seq;
4122
+ int rc;
4123
+
4124
+ p_header = (u32 *)raw_data;
4125
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4126
+
4127
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4128
+
4129
+ /* First chunk is marked as 'first' */
4130
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4131
+
4132
+ *p_header = 0;
4133
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4134
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4135
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4136
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4137
+
4138
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4139
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4140
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4141
+ QED_MCP_DBG_DATA_MAX_SIZE);
4142
+ if (rc)
4143
+ return rc;
4144
+
4145
+ /* Clear the 'first' marking after sending the first chunk */
4146
+ if (p_tmp_buf == p_buf) {
4147
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4148
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4149
+ flags);
4150
+ }
4151
+
4152
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4153
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4154
+ }
4155
+
4156
+ /* Last chunk is marked as 'last' */
4157
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4158
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4159
+ memcpy(p_payload, p_tmp_buf, tmp_size);
4160
+
4161
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
4162
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4163
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4164
+ tmp_size));
4165
+}
4166
+
4167
+int
4168
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4169
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4170
+{
4171
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4172
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
4173
+}