forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/drivers/scsi/lpfc/lpfc_attr.c
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -37,8 +37,6 @@
3737 #include <scsi/scsi_transport_fc.h>
3838 #include <scsi/fc/fc_fs.h>
3939
40
-#include <linux/nvme-fc-driver.h>
41
-
4240 #include "lpfc_hw4.h"
4341 #include "lpfc_hw.h"
4442 #include "lpfc_sli.h"
....@@ -48,7 +46,6 @@
4846 #include "lpfc.h"
4947 #include "lpfc_scsi.h"
5048 #include "lpfc_nvme.h"
51
-#include "lpfc_nvmet.h"
5249 #include "lpfc_logmsg.h"
5350 #include "lpfc_version.h"
5451 #include "lpfc_compat.h"
....@@ -64,15 +61,29 @@
6461 #define LPFC_MIN_MRQ_POST 512
6562 #define LPFC_MAX_MRQ_POST 2048
6663
67
-#define LPFC_MAX_NVME_INFO_TMP_LEN 100
68
-#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
69
-
7064 /*
7165 * Write key size should be multiple of 4. If write key is changed
7266 * make sure that library write key is also changed.
7367 */
7468 #define LPFC_REG_WRITE_KEY_SIZE 4
7569 #define LPFC_REG_WRITE_KEY "EMLX"
70
+
71
+const char *const trunk_errmsg[] = { /* map errcode */
72
+ "", /* There is no such error code at index 0*/
73
+ "link negotiated speed does not match existing"
74
+ " trunk - link was \"low\" speed",
75
+ "link negotiated speed does not match"
76
+ " existing trunk - link was \"middle\" speed",
77
+ "link negotiated speed does not match existing"
78
+ " trunk - link was \"high\" speed",
79
+ "Attached to non-trunking port - F_Port",
80
+ "Attached to non-trunking port - N_Port",
81
+ "FLOGI response timeout",
82
+ "non-FLOGI frame received",
83
+ "Invalid FLOGI response",
84
+ "Trunking initialization protocol",
85
+ "Trunk peer device mismatch",
86
+};
7687
7788 /**
7889 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
....@@ -155,7 +166,7 @@
155166 struct lpfc_nvme_rport *rport;
156167 struct lpfc_nodelist *ndlp;
157168 struct nvme_fc_remote_port *nrport;
158
- struct lpfc_nvme_ctrl_stat *cstat;
169
+ struct lpfc_fc4_ctrl_stat *cstat;
159170 uint64_t data1, data2, data3;
160171 uint64_t totin, totout, tot;
161172 char *statep;
....@@ -163,7 +174,7 @@
163174 int len = 0;
164175 char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
165176
166
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
177
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
167178 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
168179 return len;
169180 }
....@@ -333,11 +344,10 @@
333344 goto buffer_done;
334345
335346 scnprintf(tmp, sizeof(tmp),
336
- "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
347
+ "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
337348 phba->brd_no,
338349 phba->sli4_hba.max_cfg_param.max_xri,
339
- phba->sli4_hba.nvme_xri_max,
340
- phba->sli4_hba.scsi_xri_max,
350
+ phba->sli4_hba.io_xri_max,
341351 lpfc_sli4_get_els_iocb_cnt(phba));
342352 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
343353 goto buffer_done;
....@@ -458,13 +468,13 @@
458468
459469 totin = 0;
460470 totout = 0;
461
- for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
462
- cstat = &lport->cstat[i];
463
- tot = atomic_read(&cstat->fc4NvmeIoCmpls);
471
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
472
+ cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
473
+ tot = cstat->io_cmpls;
464474 totin += tot;
465
- data1 = atomic_read(&cstat->fc4NvmeInputRequests);
466
- data2 = atomic_read(&cstat->fc4NvmeOutputRequests);
467
- data3 = atomic_read(&cstat->fc4NvmeControlRequests);
475
+ data1 = cstat->input_requests;
476
+ data2 = cstat->output_requests;
477
+ data3 = cstat->control_requests;
468478 totout += (data1 + data2 + data3);
469479 }
470480 scnprintf(tmp, sizeof(tmp),
....@@ -512,6 +522,57 @@
512522 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
513523 + 1);
514524 }
525
+
526
+ return len;
527
+}
528
+
529
+static ssize_t
530
+lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
531
+ char *buf)
532
+{
533
+ struct Scsi_Host *shost = class_to_shost(dev);
534
+ struct lpfc_vport *vport = shost_priv(shost);
535
+ struct lpfc_hba *phba = vport->phba;
536
+ int len;
537
+ struct lpfc_fc4_ctrl_stat *cstat;
538
+ u64 data1, data2, data3;
539
+ u64 tot, totin, totout;
540
+ int i;
541
+ char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
542
+
543
+ if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
544
+ (phba->sli_rev != LPFC_SLI_REV4))
545
+ return 0;
546
+
547
+ scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
548
+
549
+ totin = 0;
550
+ totout = 0;
551
+ for (i = 0; i < phba->cfg_hdw_queue; i++) {
552
+ cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
553
+ tot = cstat->io_cmpls;
554
+ totin += tot;
555
+ data1 = cstat->input_requests;
556
+ data2 = cstat->output_requests;
557
+ data3 = cstat->control_requests;
558
+ totout += (data1 + data2 + data3);
559
+
560
+ scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
561
+ "IO %016llx ", i, data1, data2, data3);
562
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
563
+ goto buffer_done;
564
+
565
+ scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
566
+ tot, ((data1 + data2 + data3) - tot));
567
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
568
+ goto buffer_done;
569
+ }
570
+ scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
571
+ "OutIO %016llx\n", totin, totout, totout - totin);
572
+ strlcat(buf, tmp, PAGE_SIZE);
573
+
574
+buffer_done:
575
+ len = strnlen(buf, PAGE_SIZE);
515576
516577 return len;
517578 }
....@@ -777,7 +838,8 @@
777838 lpfc_vpd_t *vp = &phba->vpd;
778839
779840 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
780
- return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
841
+ return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
842
+ vp->rev.smRev, vp->rev.smFwRev);
781843 }
782844
783845 /**
....@@ -892,6 +954,42 @@
892954 len += scnprintf(buf + len, PAGE_SIZE-len,
893955 " Point-2-Point\n");
894956 }
957
+ }
958
+
959
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
960
+ ((bf_get(lpfc_sli_intf_if_type,
961
+ &phba->sli4_hba.sli_intf) ==
962
+ LPFC_SLI_INTF_IF_TYPE_6))) {
963
+ struct lpfc_trunk_link link = phba->trunk_link;
964
+
965
+ if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
966
+ len += scnprintf(buf + len, PAGE_SIZE - len,
967
+ "Trunk port 0: Link %s %s\n",
968
+ (link.link0.state == LPFC_LINK_UP) ?
969
+ "Up" : "Down. ",
970
+ trunk_errmsg[link.link0.fault]);
971
+
972
+ if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
973
+ len += scnprintf(buf + len, PAGE_SIZE - len,
974
+ "Trunk port 1: Link %s %s\n",
975
+ (link.link1.state == LPFC_LINK_UP) ?
976
+ "Up" : "Down. ",
977
+ trunk_errmsg[link.link1.fault]);
978
+
979
+ if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
980
+ len += scnprintf(buf + len, PAGE_SIZE - len,
981
+ "Trunk port 2: Link %s %s\n",
982
+ (link.link2.state == LPFC_LINK_UP) ?
983
+ "Up" : "Down. ",
984
+ trunk_errmsg[link.link2.fault]);
985
+
986
+ if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
987
+ len += scnprintf(buf + len, PAGE_SIZE - len,
988
+ "Trunk port 3: Link %s %s\n",
989
+ (link.link3.state == LPFC_LINK_UP) ?
990
+ "Up" : "Down. ",
991
+ trunk_errmsg[link.link3.fault]);
992
+
895993 }
896994
897995 return len;
....@@ -1044,6 +1142,9 @@
10441142 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
10451143 pmboxq->u.mb.mbxOwner = OWN_HOST;
10461144
1145
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
1146
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
1147
+
10471148 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
10481149
10491150 if ((mbxstatus == MBX_SUCCESS) &&
....@@ -1132,6 +1233,20 @@
11321233
11331234 psli = &phba->sli;
11341235
1236
+ /*
1237
+ * If freeing the queues have already started, don't access them.
1238
+ * Otherwise set FREE_WAIT to indicate that queues are being used
1239
+ * to hold the freeing process until we finish.
1240
+ */
1241
+ spin_lock_irq(&phba->hbalock);
1242
+ if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1243
+ psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1244
+ } else {
1245
+ spin_unlock_irq(&phba->hbalock);
1246
+ goto skip_wait;
1247
+ }
1248
+ spin_unlock_irq(&phba->hbalock);
1249
+
11351250 /* Wait a little for things to settle down, but not
11361251 * long enough for dev loss timeout to expire.
11371252 */
....@@ -1153,6 +1268,11 @@
11531268 }
11541269 }
11551270 out:
1271
+ spin_lock_irq(&phba->hbalock);
1272
+ psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1273
+ spin_unlock_irq(&phba->hbalock);
1274
+
1275
+skip_wait:
11561276 init_completion(&online_compl);
11571277 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
11581278 if (rc == 0)
....@@ -1164,6 +1284,82 @@
11641284 return -EIO;
11651285
11661286 return 0;
1287
+}
1288
+
1289
+/**
1290
+ * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1291
+ * @phba: lpfc_hba pointer.
1292
+ *
1293
+ * Description:
1294
+ * Issues a PCI secondary bus reset for the phba->pcidev.
1295
+ *
1296
+ * Notes:
1297
+ * First walks the bus_list to ensure only PCI devices with Emulex
1298
+ * vendor id, device ids that support hot reset, only one occurrence
1299
+ * of function 0, and all ports on the bus are in offline mode to ensure the
1300
+ * hot reset only affects one valid HBA.
1301
+ *
1302
+ * Returns:
1303
+ * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1304
+ * -ENODEV, NULL ptr to pcidev
1305
+ * -EBADSLT, detected invalid device
1306
+ * -EBUSY, port is not in offline state
1307
+ * 0, successful
1308
+ */
1309
+static int
1310
+lpfc_reset_pci_bus(struct lpfc_hba *phba)
1311
+{
1312
+ struct pci_dev *pdev = phba->pcidev;
1313
+ struct Scsi_Host *shost = NULL;
1314
+ struct lpfc_hba *phba_other = NULL;
1315
+ struct pci_dev *ptr = NULL;
1316
+ int res;
1317
+
1318
+ if (phba->cfg_enable_hba_reset != 2)
1319
+ return -ENOTSUPP;
1320
+
1321
+ if (!pdev) {
1322
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1323
+ return -ENODEV;
1324
+ }
1325
+
1326
+ res = lpfc_check_pci_resettable(phba);
1327
+ if (res)
1328
+ return res;
1329
+
1330
+ /* Walk the list of devices on the pci_dev's bus */
1331
+ list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1332
+ /* Check port is offline */
1333
+ shost = pci_get_drvdata(ptr);
1334
+ if (shost) {
1335
+ phba_other =
1336
+ ((struct lpfc_vport *)shost->hostdata)->phba;
1337
+ if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1338
+ lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1339
+ "8349 WWPN = 0x%02x%02x%02x%02x"
1340
+ "%02x%02x%02x%02x is not "
1341
+ "offline!\n",
1342
+ phba_other->wwpn[0],
1343
+ phba_other->wwpn[1],
1344
+ phba_other->wwpn[2],
1345
+ phba_other->wwpn[3],
1346
+ phba_other->wwpn[4],
1347
+ phba_other->wwpn[5],
1348
+ phba_other->wwpn[6],
1349
+ phba_other->wwpn[7]);
1350
+ return -EBUSY;
1351
+ }
1352
+ }
1353
+ }
1354
+
1355
+ /* Issue PCI bus reset */
1356
+ res = pci_reset_bus(pdev);
1357
+ if (res) {
1358
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1359
+ "8350 PCI reset bus failed: %d\n", res);
1360
+ }
1361
+
1362
+ return res;
11671363 }
11681364
11691365 /**
....@@ -1279,8 +1475,9 @@
12791475 int i;
12801476
12811477 msleep(100);
1282
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1283
- &portstat_reg.word0);
1478
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1479
+ &portstat_reg.word0))
1480
+ return -EIO;
12841481
12851482 /* verify if privileged for the request operation */
12861483 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
....@@ -1290,8 +1487,9 @@
12901487 /* wait for the SLI port firmware ready after firmware reset */
12911488 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
12921489 msleep(10);
1293
- lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1294
- &portstat_reg.word0);
1490
+ if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1491
+ &portstat_reg.word0))
1492
+ continue;
12951493 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
12961494 continue;
12971495 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
....@@ -1341,24 +1539,24 @@
13411539 before_fc_flag = phba->pport->fc_flag;
13421540 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
13431541
1344
- /* Disable SR-IOV virtual functions if enabled */
1345
- if (phba->cfg_sriov_nr_virtfn) {
1346
- pci_disable_sriov(pdev);
1347
- phba->cfg_sriov_nr_virtfn = 0;
1542
+ if (opcode == LPFC_FW_DUMP) {
1543
+ init_completion(&online_compl);
1544
+ phba->fw_dump_cmpl = &online_compl;
1545
+ } else {
1546
+ /* Disable SR-IOV virtual functions if enabled */
1547
+ if (phba->cfg_sriov_nr_virtfn) {
1548
+ pci_disable_sriov(pdev);
1549
+ phba->cfg_sriov_nr_virtfn = 0;
1550
+ }
1551
+
1552
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1553
+
1554
+ if (status != 0)
1555
+ return status;
1556
+
1557
+ /* wait for the device to be quiesced before firmware reset */
1558
+ msleep(100);
13481559 }
1349
-
1350
- if (opcode == LPFC_FW_DUMP)
1351
- phba->hba_flag |= HBA_FW_DUMP_OP;
1352
-
1353
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1354
-
1355
- if (status != 0) {
1356
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
1357
- return status;
1358
- }
1359
-
1360
- /* wait for the device to be quiesced before firmware reset */
1361
- msleep(100);
13621560
13631561 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
13641562 LPFC_CTL_PDEV_CTL_OFFSET);
....@@ -1388,24 +1586,42 @@
13881586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13891587 "3153 Fail to perform the requested "
13901588 "access: x%x\n", reg_val);
1589
+ if (phba->fw_dump_cmpl)
1590
+ phba->fw_dump_cmpl = NULL;
13911591 return rc;
13921592 }
13931593
13941594 /* keep the original port state */
1395
- if (before_fc_flag & FC_OFFLINE_MODE)
1595
+ if (before_fc_flag & FC_OFFLINE_MODE) {
1596
+ if (phba->fw_dump_cmpl)
1597
+ phba->fw_dump_cmpl = NULL;
13961598 goto out;
1599
+ }
13971600
1398
- init_completion(&online_compl);
1399
- job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1400
- LPFC_EVT_ONLINE);
1401
- if (!job_posted)
1402
- goto out;
1601
+ /* Firmware dump will trigger an HA_ERATT event, and
1602
+ * lpfc_handle_eratt_s4 routine already handles bringing the port back
1603
+ * online.
1604
+ */
1605
+ if (opcode == LPFC_FW_DUMP) {
1606
+ wait_for_completion(phba->fw_dump_cmpl);
1607
+ } else {
1608
+ init_completion(&online_compl);
1609
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1610
+ LPFC_EVT_ONLINE);
1611
+ if (!job_posted)
1612
+ goto out;
14031613
1404
- wait_for_completion(&online_compl);
1405
-
1614
+ wait_for_completion(&online_compl);
1615
+ }
14061616 out:
14071617 /* in any case, restore the virtual functions enabled as before */
14081618 if (sriov_nr_virtfn) {
1619
+ /* If fw_dump was performed, first disable to clean up */
1620
+ if (opcode == LPFC_FW_DUMP) {
1621
+ pci_disable_sriov(pdev);
1622
+ phba->cfg_sriov_nr_virtfn = 0;
1623
+ }
1624
+
14091625 sriov_err =
14101626 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
14111627 if (!sriov_err)
....@@ -1439,6 +1655,65 @@
14391655 struct lpfc_hba *phba = vport->phba;
14401656
14411657 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1658
+}
1659
+
1660
+static int
1661
+lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1662
+{
1663
+ LPFC_MBOXQ_t *mbox = NULL;
1664
+ unsigned long val = 0;
1665
+ char *pval = NULL;
1666
+ int rc = 0;
1667
+
1668
+ if (!strncmp("enable", buff_out,
1669
+ strlen("enable"))) {
1670
+ pval = buff_out + strlen("enable") + 1;
1671
+ rc = kstrtoul(pval, 0, &val);
1672
+ if (rc)
1673
+ return rc; /* Invalid number */
1674
+ } else if (!strncmp("disable", buff_out,
1675
+ strlen("disable"))) {
1676
+ val = 0;
1677
+ } else {
1678
+ return -EINVAL; /* Invalid command */
1679
+ }
1680
+
1681
+ switch (val) {
1682
+ case 0:
1683
+ val = 0x0; /* Disable */
1684
+ break;
1685
+ case 2:
1686
+ val = 0x1; /* Enable two port trunk */
1687
+ break;
1688
+ case 4:
1689
+ val = 0x2; /* Enable four port trunk */
1690
+ break;
1691
+ default:
1692
+ return -EINVAL;
1693
+ }
1694
+
1695
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1696
+ "0070 Set trunk mode with val %ld ", val);
1697
+
1698
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1699
+ if (!mbox)
1700
+ return -ENOMEM;
1701
+
1702
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1703
+ LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1704
+ 12, LPFC_SLI4_MBX_EMBED);
1705
+
1706
+ bf_set(lpfc_mbx_set_trunk_mode,
1707
+ &mbox->u.mqe.un.set_trunk_mode,
1708
+ val);
1709
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1710
+ if (rc)
1711
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1712
+ "0071 Set trunk mode failed with status: %d",
1713
+ rc);
1714
+ mempool_free(mbox, phba->mbox_mem_pool);
1715
+
1716
+ return 0;
14421717 }
14431718
14441719 /**
....@@ -1533,6 +1808,11 @@
15331808 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
15341809 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
15351810 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1811
+ else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1812
+ == 0)
1813
+ status = lpfc_reset_pci_bus(phba);
1814
+ else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1815
+ status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
15361816 else
15371817 status = -EINVAL;
15381818
....@@ -1601,7 +1881,7 @@
16011881 pmb = &pmboxq->u.mb;
16021882 pmb->mbxCommand = MBX_READ_CONFIG;
16031883 pmb->mbxOwner = OWN_HOST;
1604
- pmboxq->context1 = NULL;
1884
+ pmboxq->ctx_buf = NULL;
16051885
16061886 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
16071887 rc = MBX_NOT_FINISHED;
....@@ -1965,66 +2245,6 @@
19652245 spin_unlock_irq(&phba->hbalock);
19662246
19672247 return strlen(buf);
1968
-}
1969
-
1970
-/**
1971
- * lpfc_fips_level_show - Return the current FIPS level for the HBA
1972
- * @dev: class unused variable.
1973
- * @attr: device attribute, not used.
1974
- * @buf: on return contains the module description text.
1975
- *
1976
- * Returns: size of formatted string.
1977
- **/
1978
-static ssize_t
1979
-lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
1980
- char *buf)
1981
-{
1982
- struct Scsi_Host *shost = class_to_shost(dev);
1983
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1984
- struct lpfc_hba *phba = vport->phba;
1985
-
1986
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
1987
-}
1988
-
1989
-/**
1990
- * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
1991
- * @dev: class unused variable.
1992
- * @attr: device attribute, not used.
1993
- * @buf: on return contains the module description text.
1994
- *
1995
- * Returns: size of formatted string.
1996
- **/
1997
-static ssize_t
1998
-lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
1999
- char *buf)
2000
-{
2001
- struct Scsi_Host *shost = class_to_shost(dev);
2002
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2003
- struct lpfc_hba *phba = vport->phba;
2004
-
2005
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
2006
-}
2007
-
2008
-/**
2009
- * lpfc_dss_show - Return the current state of dss and the configured state
2010
- * @dev: class converted to a Scsi_host structure.
2011
- * @attr: device attribute, not used.
2012
- * @buf: on return contains the formatted text.
2013
- *
2014
- * Returns: size of formatted string.
2015
- **/
2016
-static ssize_t
2017
-lpfc_dss_show(struct device *dev, struct device_attribute *attr,
2018
- char *buf)
2019
-{
2020
- struct Scsi_Host *shost = class_to_shost(dev);
2021
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2022
- struct lpfc_hba *phba = vport->phba;
2023
-
2024
- return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
2025
- (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
2026
- (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
2027
- "" : "Not ");
20282248 }
20292249
20302250 /**
....@@ -2408,6 +2628,7 @@
24082628
24092629
24102630 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2631
+static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
24112632 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
24122633 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
24132634 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
....@@ -2441,9 +2662,6 @@
24412662 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
24422663 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
24432664 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2444
-static DEVICE_ATTR_RO(lpfc_fips_level);
2445
-static DEVICE_ATTR_RO(lpfc_fips_rev);
2446
-static DEVICE_ATTR_RO(lpfc_dss);
24472665 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
24482666 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
24492667 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
....@@ -3271,6 +3489,31 @@
32713489 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
32723490 LPFC_DELAY_INIT_LINK_INDEFINITELY,
32733491 "Suppress Link Up at initialization");
3492
+
3493
+static ssize_t
3494
+lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3495
+{
3496
+ struct Scsi_Host *shost = class_to_shost(dev);
3497
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3498
+
3499
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
3500
+ phba->sli4_hba.pc_sli4_params.pls);
3501
+}
3502
+static DEVICE_ATTR(pls, 0444,
3503
+ lpfc_pls_show, NULL);
3504
+
3505
+static ssize_t
3506
+lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3507
+{
3508
+ struct Scsi_Host *shost = class_to_shost(dev);
3509
+ struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3510
+
3511
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
3512
+ (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3513
+}
3514
+static DEVICE_ATTR(pt, 0444,
3515
+ lpfc_pt_show, NULL);
3516
+
32743517 /*
32753518 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
32763519 # 1 - (1024)
....@@ -3317,9 +3560,6 @@
33173560
33183561 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
33193562 lpfc_txcmplq_hw_show, NULL);
3320
-
3321
-LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
3322
- "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
33233563
33243564 /*
33253565 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
....@@ -3421,8 +3661,8 @@
34213661 if (rport)
34223662 remoteport = rport->remoteport;
34233663 spin_unlock(&vport->phba->hbalock);
3424
- if (remoteport)
3425
- nvme_fc_set_remoteport_devloss(rport->remoteport,
3664
+ if (rport && remoteport)
3665
+ nvme_fc_set_remoteport_devloss(remoteport,
34263666 vport->cfg_devloss_tmo);
34273667 #endif
34283668 }
....@@ -3558,27 +3798,11 @@
35583798 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
35593799 * Supported Values: 1 - register just FCP
35603800 * 3 - register both FCP and NVME
3561
- * Supported values are [1,3]. Default value is 1
3801
+ * Supported values are [1,3]. Default value is 3
35623802 */
3563
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
3564
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3803
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
3804
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
35653805 "Enable FC4 Protocol support - FCP / NVME");
3566
-
3567
-/*
3568
- * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
3569
- * This parameter is only used if:
3570
- * lpfc_enable_fc4_type is 3 - register both FCP and NVME and
3571
- * port is not configured for NVMET.
3572
- *
3573
- * ELS/CT always get 10% of XRIs, up to a maximum of 250
3574
- * The remaining XRIs get split up based on lpfc_xri_split per port:
3575
- *
3576
- * Supported Values are in percentages
3577
- * the xri_split value is the percentage the SCSI port will get. The remaining
3578
- * percentage will go to NVME.
3579
- */
3580
-LPFC_ATTR_R(xri_split, 50, 10, 90,
3581
- "Percentage of FCP XRI resources versus NVME");
35823806
35833807 /*
35843808 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
....@@ -3598,12 +3822,9 @@
35983822
35993823 /*
36003824 # lun_queue_depth: This parameter is used to limit the number of outstanding
3601
-# commands per FCP LUN. Value range is [1,512]. Default value is 30.
3602
-# If this parameter value is greater than 1/8th the maximum number of exchanges
3603
-# supported by the HBA port, then the lun queue depth will be reduced to
3604
-# 1/8th the maximum number of exchanges.
3825
+# commands per FCP LUN.
36053826 */
3606
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
3827
+LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
36073828 "Max number of FCP commands we can queue to a specific LUN");
36083829
36093830 /*
....@@ -3850,8 +4071,16 @@
38504071 val);
38514072 return -EINVAL;
38524073 }
3853
- if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
3854
- phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) &&
4074
+ /*
4075
+ * The 'topology' is not a configurable parameter if :
4076
+ * - persistent topology enabled
4077
+ * - G7/G6 with no private loop support
4078
+ */
4079
+
4080
+ if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4081
+ (!phba->sli4_hba.pc_sli4_params.pls &&
4082
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4083
+ phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
38554084 val == 4) {
38564085 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
38574086 "3114 Loop mode not supported\n");
....@@ -4505,7 +4734,7 @@
45054734 * Description:
45064735 * If the @buf contains 1 and the device currently has the AER support
45074736 * enabled, then invokes the kernel AER helper routine
4508
- * pci_cleanup_aer_uncorrect_error_status to clean up the uncorrectable
4737
+ * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
45094738 * error status register.
45104739 *
45114740 * Notes:
....@@ -4531,7 +4760,7 @@
45314760 return -EINVAL;
45324761
45334762 if (phba->hba_flag & HBA_AER_ENABLED)
4534
- rc = pci_cleanup_aer_uncorrect_error_status(phba->pcidev);
4763
+ rc = pci_aer_clear_nonfatal_status(phba->pcidev);
45354764
45364765 if (rc == 0)
45374766 return strlen(buf);
....@@ -4665,7 +4894,7 @@
46654894 struct Scsi_Host *shost = class_to_shost(dev);
46664895 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
46674896 struct lpfc_hba *phba = vport->phba;
4668
- int val = 0, rc = -EINVAL;
4897
+ int val = 0, rc;
46694898
46704899 /* Sanity check on user data */
46714900 if (!isdigit(buf[0]))
....@@ -4714,6 +4943,64 @@
47144943 lpfc_request_firmware_upgrade_store);
47154944
47164945 /**
4946
+ * lpfc_force_rscn_store
4947
+ *
4948
+ * @dev: class device that is converted into a Scsi_host.
4949
+ * @attr: device attribute, not used.
4950
+ * @buf: unused string
4951
+ * @count: unused variable.
4952
+ *
4953
+ * Description:
4954
+ * Force the switch to send a RSCN to all other NPorts in our zone
4955
+ * If we are direct connect pt2pt, build the RSCN command ourself
4956
+ * and send to the other NPort. Not supported for private loop.
4957
+ *
4958
+ * Returns:
4959
+ * 0 - on success
4960
+ * -EIO - if command is not sent
4961
+ **/
4962
+static ssize_t
4963
+lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4964
+ const char *buf, size_t count)
4965
+{
4966
+ struct Scsi_Host *shost = class_to_shost(dev);
4967
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4968
+ int i;
4969
+
4970
+ i = lpfc_issue_els_rscn(vport, 0);
4971
+ if (i)
4972
+ return -EIO;
4973
+ return strlen(buf);
4974
+}
4975
+
4976
+/*
4977
+ * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
4978
+ * connected to the HBA.
4979
+ *
4980
+ * Value range is any ascii value
4981
+ */
4982
+static int lpfc_force_rscn;
4983
+module_param(lpfc_force_rscn, int, 0644);
4984
+MODULE_PARM_DESC(lpfc_force_rscn,
4985
+ "Force an RSCN to be sent to all remote NPorts");
4986
+lpfc_param_show(force_rscn)
4987
+
4988
+/**
4989
+ * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
4990
+ * @phba: lpfc_hba pointer.
4991
+ * @val: unused value.
4992
+ *
4993
+ * Returns:
4994
+ * zero if val saved.
4995
+ **/
4996
+static int
4997
+lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4998
+{
4999
+ return 0;
5000
+}
5001
+static DEVICE_ATTR_RW(lpfc_force_rscn);
5002
+
5003
+/**
47175004 * lpfc_fcp_imax_store
47185005 *
47195006 * @dev: class device that is converted into a Scsi_host.
....@@ -4737,6 +5024,8 @@
47375024 struct Scsi_Host *shost = class_to_shost(dev);
47385025 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
47395026 struct lpfc_hba *phba = vport->phba;
5027
+ struct lpfc_eq_intr_info *eqi;
5028
+ uint32_t usdelay;
47405029 int val = 0, i;
47415030
47425031 /* fcp_imax is only valid for SLI4 */
....@@ -4757,12 +5046,27 @@
47575046 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
47585047 return -EINVAL;
47595048
4760
- phba->cfg_fcp_imax = (uint32_t)val;
4761
- phba->initial_imax = phba->cfg_fcp_imax;
5049
+ phba->cfg_auto_imax = (val) ? 0 : 1;
5050
+ if (phba->cfg_fcp_imax && !val) {
5051
+ queue_delayed_work(phba->wq, &phba->eq_delay_work,
5052
+ msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
47625053
4763
- for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5054
+ for_each_present_cpu(i) {
5055
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5056
+ eqi->icnt = 0;
5057
+ }
5058
+ }
5059
+
5060
+ phba->cfg_fcp_imax = (uint32_t)val;
5061
+
5062
+ if (phba->cfg_fcp_imax)
5063
+ usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5064
+ else
5065
+ usdelay = 0;
5066
+
5067
+ for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
47645068 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
4765
- val);
5069
+ usdelay);
47665070
47675071 return strlen(buf);
47685072 }
....@@ -4816,15 +5120,120 @@
48165120
48175121 static DEVICE_ATTR_RW(lpfc_fcp_imax);
48185122
5123
+/**
5124
+ * lpfc_cq_max_proc_limit_store
5125
+ *
5126
+ * @dev: class device that is converted into a Scsi_host.
5127
+ * @attr: device attribute, not used.
5128
+ * @buf: string with the cq max processing limit of cqes
5129
+ * @count: unused variable.
5130
+ *
5131
+ * Description:
5132
+ * If val is in a valid range, then set value on each cq
5133
+ *
5134
+ * Returns:
5135
+ * The length of the buf: if successful
5136
+ * -ERANGE: if val is not in the valid range
5137
+ * -EINVAL: if bad value format or intended mode is not supported.
5138
+ **/
5139
+static ssize_t
5140
+lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5141
+ const char *buf, size_t count)
5142
+{
5143
+ struct Scsi_Host *shost = class_to_shost(dev);
5144
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5145
+ struct lpfc_hba *phba = vport->phba;
5146
+ struct lpfc_queue *eq, *cq;
5147
+ unsigned long val;
5148
+ int i;
5149
+
5150
+ /* cq_max_proc_limit is only valid for SLI4 */
5151
+ if (phba->sli_rev != LPFC_SLI_REV4)
5152
+ return -EINVAL;
5153
+
5154
+ /* Sanity check on user data */
5155
+ if (!isdigit(buf[0]))
5156
+ return -EINVAL;
5157
+ if (kstrtoul(buf, 0, &val))
5158
+ return -EINVAL;
5159
+
5160
+ if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5161
+ return -ERANGE;
5162
+
5163
+ phba->cfg_cq_max_proc_limit = (uint32_t)val;
5164
+
5165
+ /* set the values on the cq's */
5166
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
5167
+ /* Get the EQ corresponding to the IRQ vector */
5168
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5169
+ if (!eq)
5170
+ continue;
5171
+
5172
+ list_for_each_entry(cq, &eq->child_list, list)
5173
+ cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5174
+ cq->entry_count);
5175
+ }
5176
+
5177
+ return strlen(buf);
5178
+}
5179
+
48195180 /*
4820
- * lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
4821
- * 0 No auto_imax support
4822
- * 1 auto imax on
4823
- * Auto imax will change the value of fcp_imax on a per EQ basis, using
4824
- * the EQ Delay Multiplier, depending on the activity for that EQ.
4825
- * Value range [0,1]. Default value is 1.
5181
+ * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5182
+ * itteration of CQ processing.
48265183 */
4827
-LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
5184
+static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5185
+module_param(lpfc_cq_max_proc_limit, int, 0644);
5186
+MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5187
+ "Set the maximum number CQEs processed in an iteration of "
5188
+ "CQ processing");
5189
+lpfc_param_show(cq_max_proc_limit)
5190
+
5191
+/*
5192
+ * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5193
+ * single handler call which should request a polled completion rather
5194
+ * than re-enabling interrupts.
5195
+ */
5196
+LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5197
+ LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5198
+ LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5199
+ "CQE Processing Threshold to enable Polling");
5200
+
5201
+/**
5202
+ * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5203
+ * @phba: lpfc_hba pointer.
5204
+ * @val: entry limit
5205
+ *
5206
+ * Description:
5207
+ * If val is in a valid range, then initialize the adapter's maximum
5208
+ * value.
5209
+ *
5210
+ * Returns:
5211
+ * Always returns 0 for success, even if value not always set to
5212
+ * requested value. If value out of range or not supported, will fall
5213
+ * back to default.
5214
+ **/
5215
+static int
5216
+lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5217
+{
5218
+ phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5219
+
5220
+ if (phba->sli_rev != LPFC_SLI_REV4)
5221
+ return 0;
5222
+
5223
+ if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5224
+ phba->cfg_cq_max_proc_limit = val;
5225
+ return 0;
5226
+ }
5227
+
5228
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5229
+ "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
5230
+ "%d out of range, using default\n",
5231
+ phba->cfg_cq_max_proc_limit);
5232
+
5233
+ return 0;
5234
+}
5235
+
5236
+static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
48285237
48295238 /**
48305239 * lpfc_state_show - Display current driver CPU affinity
....@@ -4857,50 +5266,79 @@
48575266 case 1:
48585267 len += scnprintf(buf + len, PAGE_SIZE-len,
48595268 "fcp_cpu_map: HBA centric mapping (%d): "
4860
- "%d online CPUs\n",
4861
- phba->cfg_fcp_cpu_map,
4862
- phba->sli4_hba.num_online_cpu);
4863
- break;
4864
- case 2:
4865
- len += scnprintf(buf + len, PAGE_SIZE-len,
4866
- "fcp_cpu_map: Driver centric mapping (%d): "
4867
- "%d online CPUs\n",
4868
- phba->cfg_fcp_cpu_map,
4869
- phba->sli4_hba.num_online_cpu);
5269
+ "%d of %d CPUs online from %d possible CPUs\n",
5270
+ phba->cfg_fcp_cpu_map, num_online_cpus(),
5271
+ num_present_cpus(),
5272
+ phba->sli4_hba.num_possible_cpu);
48705273 break;
48715274 }
48725275
4873
- while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) {
5276
+ while (phba->sli4_hba.curr_disp_cpu <
5277
+ phba->sli4_hba.num_possible_cpu) {
48745278 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
48755279
4876
- /* margin should fit in this and the truncated message */
4877
- if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
4878
- len += scnprintf(buf + len, PAGE_SIZE-len,
4879
- "CPU %02d io_chan %02d "
4880
- "physid %d coreid %d\n",
5280
+ if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5281
+ len += scnprintf(buf + len, PAGE_SIZE - len,
5282
+ "CPU %02d not present\n",
5283
+ phba->sli4_hba.curr_disp_cpu);
5284
+ else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5285
+ if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5286
+ len += scnprintf(
5287
+ buf + len, PAGE_SIZE - len,
5288
+ "CPU %02d hdwq None "
5289
+ "physid %d coreid %d ht %d ua %d\n",
48815290 phba->sli4_hba.curr_disp_cpu,
4882
- cpup->channel_id, cpup->phys_id,
4883
- cpup->core_id);
4884
- else
4885
- len += scnprintf(buf + len, PAGE_SIZE-len,
4886
- "CPU %02d io_chan %02d "
4887
- "physid %d coreid %d IRQ %d\n",
5291
+ cpup->phys_id, cpup->core_id,
5292
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
5293
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5294
+ else
5295
+ len += scnprintf(
5296
+ buf + len, PAGE_SIZE - len,
5297
+ "CPU %02d EQ None hdwq %04d "
5298
+ "physid %d coreid %d ht %d ua %d\n",
48885299 phba->sli4_hba.curr_disp_cpu,
4889
- cpup->channel_id, cpup->phys_id,
4890
- cpup->core_id, cpup->irq);
5300
+ cpup->hdwq, cpup->phys_id,
5301
+ cpup->core_id,
5302
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
5303
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5304
+ } else {
5305
+ if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5306
+ len += scnprintf(
5307
+ buf + len, PAGE_SIZE - len,
5308
+ "CPU %02d hdwq None "
5309
+ "physid %d coreid %d ht %d ua %d IRQ %d\n",
5310
+ phba->sli4_hba.curr_disp_cpu,
5311
+ cpup->phys_id,
5312
+ cpup->core_id,
5313
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
5314
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5315
+ lpfc_get_irq(cpup->eq));
5316
+ else
5317
+ len += scnprintf(
5318
+ buf + len, PAGE_SIZE - len,
5319
+ "CPU %02d EQ %04d hdwq %04d "
5320
+ "physid %d coreid %d ht %d ua %d IRQ %d\n",
5321
+ phba->sli4_hba.curr_disp_cpu,
5322
+ cpup->eq, cpup->hdwq, cpup->phys_id,
5323
+ cpup->core_id,
5324
+ (cpup->flag & LPFC_CPU_MAP_HYPER),
5325
+ (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5326
+ lpfc_get_irq(cpup->eq));
5327
+ }
48915328
48925329 phba->sli4_hba.curr_disp_cpu++;
48935330
48945331 /* display max number of CPUs keeping some margin */
48955332 if (phba->sli4_hba.curr_disp_cpu <
4896
- phba->sli4_hba.num_present_cpu &&
5333
+ phba->sli4_hba.num_possible_cpu &&
48975334 (len >= (PAGE_SIZE - 64))) {
4898
- len += scnprintf(buf + len, PAGE_SIZE-len, "more...\n");
5335
+ len += scnprintf(buf + len,
5336
+ PAGE_SIZE - len, "more...\n");
48995337 break;
49005338 }
49015339 }
49025340
4903
- if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu)
5341
+ if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
49045342 phba->sli4_hba.curr_disp_cpu = 0;
49055343
49065344 return len;
....@@ -4920,22 +5358,20 @@
49205358 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
49215359 const char *buf, size_t count)
49225360 {
4923
- int status = -EINVAL;
4924
- return status;
5361
+ return -EINVAL;
49255362 }
49265363
49275364 /*
49285365 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
49295366 # for the HBA.
49305367 #
4931
-# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
5368
+# Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
49325369 # 0 - Do not affinitze IRQ vectors
49335370 # 1 - Affintize HBA vectors with respect to each HBA
49345371 # (start with CPU0 for each HBA)
4935
-# 2 - Affintize HBA vectors with respect to the entire driver
4936
-# (round robin thru all CPUs across all HBAs)
5372
+# This also defines how Hardware Queues are mapped to specific CPUs.
49375373 */
4938
-static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
5374
+static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
49395375 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
49405376 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
49415377 "Defines how to map CPUs to IRQ vectors per HBA");
....@@ -4969,7 +5405,7 @@
49695405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
49705406 "3326 lpfc_fcp_cpu_map: %d out of range, using "
49715407 "default\n", val);
4972
- phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
5408
+ phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
49735409
49745410 return 0;
49755411 }
....@@ -5014,15 +5450,12 @@
50145450 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
50155451 * For the Initiator (I), enabling this parameter means that an NVMET
50165452 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5017
- * processed by the initiator for subsequent NVME FCP IO. For the target
5018
- * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
5019
- * driver parameter as the target function's first burst size returned to the
5020
- * initiator in the target's NVME PRLI response. Parameter supported on physical
5021
- * port only - no NPIV support.
5453
+ * processed by the initiator for subsequent NVME FCP IO.
5454
+ * Currently, this feature is not supported on the NVME target
50225455 * Value range is [0,1]. Default value is 0 (disabled).
50235456 */
50245457 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5025
- "Enable First Burst feature on I and T functions.");
5458
+ "Enable First Burst feature for NVME Initiator.");
50265459
50275460 /*
50285461 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
....@@ -5069,13 +5502,20 @@
50695502 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
50705503
50715504 /*
5505
+# lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5506
+# range is [0,1]. Default value is 1.
5507
+*/
5508
+LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5509
+
5510
+/*
50725511 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
50735512 * range is [0,1]. Default value is 0.
5074
- * For [0], FCP commands are issued to Work Queues ina round robin fashion.
5513
+ * For [0], FCP commands are issued to Work Queues based on upper layer
5514
+ * hardware queue index.
50755515 * For [1], FCP commands are issued to a Work Queue associated with the
50765516 * current CPU.
50775517 *
5078
- * LPFC_FCP_SCHED_ROUND_ROBIN == 0
5518
+ * LPFC_FCP_SCHED_BY_HDWQ == 0
50795519 * LPFC_FCP_SCHED_BY_CPU == 1
50805520 *
50815521 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
....@@ -5083,11 +5523,23 @@
50835523 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
50845524 * through WQs will be used.
50855525 */
5086
-LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
5087
- LPFC_FCP_SCHED_ROUND_ROBIN,
5526
+LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5527
+ LPFC_FCP_SCHED_BY_HDWQ,
50885528 LPFC_FCP_SCHED_BY_CPU,
50895529 "Determine scheduling algorithm for "
5090
- "issuing commands [0] - Round Robin, [1] - Current CPU");
5530
+ "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5531
+
5532
+/*
5533
+ * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5534
+ * range is [0,1]. Default value is 0.
5535
+ * For [0], GID_FT is used for NameServer queries after RSCN (default)
5536
+ * For [1], GID_PT is used for NameServer queries after RSCN
5537
+ *
5538
+ */
5539
+LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5540
+ LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5541
+ "Determine algorithm NameServer queries after RSCN "
5542
+ "[0] - GID_FT, [1] - GID_PT");
50915543
50925544 /*
50935545 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
....@@ -5237,49 +5689,223 @@
52375689 "Embed NVME Command in WQE");
52385690
52395691 /*
5240
- * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
5241
- * will advertise it supports to the SCSI layer. This also will map to
5242
- * the number of WQs the driver will create.
5692
+ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5693
+ * the driver will advertise it supports to the SCSI layer.
52435694 *
5244
- * 0 = Configure the number of io channels to the number of active CPUs.
5245
- * 1,32 = Manually specify how many io channels to use.
5695
+ * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5696
+ * 1,256 = Manually specify nr_hw_queue value to be advertised,
52465697 *
5247
- * Value range is [0,32]. Default value is 4.
5698
+ * Value range is [0,256]. Default value is 8.
52485699 */
5249
-LPFC_ATTR_R(fcp_io_channel,
5250
- LPFC_FCP_IO_CHAN_DEF,
5251
- LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
5252
- "Set the number of FCP I/O channels");
5700
+LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5701
+ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5702
+ "Set the number of SCSI Queues advertised");
52535703
52545704 /*
5255
- * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
5256
- * will advertise it supports to the NVME layer. This also will map to
5257
- * the number of WQs the driver will create.
5258
- *
5259
- * This module parameter is valid when lpfc_enable_fc4_type is set
5260
- * to support NVME.
5705
+ * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5706
+ * will advertise it supports to the NVME and SCSI layers. This also
5707
+ * will map to the number of CQ/WQ pairs the driver will create.
52615708 *
52625709 * The NVME Layer will try to create this many, plus 1 administrative
52635710 * hardware queue. The administrative queue will always map to WQ 0
5264
- * A hardware IO queue maps (qidx) to a specific driver WQ.
5711
+ * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
52655712 *
5266
- * 0 = Configure the number of io channels to the number of active CPUs.
5267
- * 1,32 = Manually specify how many io channels to use.
5713
+ * 0 = Configure the number of hdw queues to the number of active CPUs.
5714
+ * 1,256 = Manually specify how many hdw queues to use.
52685715 *
5269
- * Value range is [0,32]. Default value is 0.
5716
+ * Value range is [0,256]. Default value is 0.
52705717 */
5271
-LPFC_ATTR_R(nvme_io_channel,
5272
- LPFC_NVME_IO_CHAN_DEF,
5273
- LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
5274
- "Set the number of NVME I/O channels");
5718
+LPFC_ATTR_R(hdw_queue,
5719
+ LPFC_HBA_HDWQ_DEF,
5720
+ LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5721
+ "Set the number of I/O Hardware Queues");
5722
+
5723
+#if IS_ENABLED(CONFIG_X86)
5724
+/**
5725
+ * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5726
+ * irq_chann_mode
5727
+ * @phba: Pointer to HBA context object.
5728
+ **/
5729
+static void
5730
+lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5731
+{
5732
+ unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5733
+ const struct cpumask *sibling_mask;
5734
+ struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5735
+
5736
+ cpumask_clear(aff_mask);
5737
+
5738
+ if (phba->irq_chann_mode == NUMA_MODE) {
5739
+ /* Check if we're a NUMA architecture */
5740
+ numa_node = dev_to_node(&phba->pcidev->dev);
5741
+ if (numa_node == NUMA_NO_NODE) {
5742
+ phba->irq_chann_mode = NORMAL_MODE;
5743
+ return;
5744
+ }
5745
+ }
5746
+
5747
+ for_each_possible_cpu(cpu) {
5748
+ switch (phba->irq_chann_mode) {
5749
+ case NUMA_MODE:
5750
+ if (cpu_to_node(cpu) == numa_node)
5751
+ cpumask_set_cpu(cpu, aff_mask);
5752
+ break;
5753
+ case NHT_MODE:
5754
+ sibling_mask = topology_sibling_cpumask(cpu);
5755
+ first_cpu = cpumask_first(sibling_mask);
5756
+ if (first_cpu < nr_cpu_ids)
5757
+ cpumask_set_cpu(first_cpu, aff_mask);
5758
+ break;
5759
+ default:
5760
+ break;
5761
+ }
5762
+ }
5763
+}
5764
+#endif
5765
+
5766
+static void
5767
+lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5768
+{
5769
+#if IS_ENABLED(CONFIG_X86)
5770
+ switch (boot_cpu_data.x86_vendor) {
5771
+ case X86_VENDOR_AMD:
5772
+ /* If AMD architecture, then default is NUMA_MODE */
5773
+ phba->irq_chann_mode = NUMA_MODE;
5774
+ break;
5775
+ case X86_VENDOR_INTEL:
5776
+ /* If Intel architecture, then default is no hyperthread mode */
5777
+ phba->irq_chann_mode = NHT_MODE;
5778
+ break;
5779
+ default:
5780
+ phba->irq_chann_mode = NORMAL_MODE;
5781
+ break;
5782
+ }
5783
+ lpfc_cpumask_irq_mode_init(phba);
5784
+#else
5785
+ phba->irq_chann_mode = NORMAL_MODE;
5786
+#endif
5787
+}
5788
+
5789
+/*
5790
+ * lpfc_irq_chann: Set the number of IRQ vectors that are available
5791
+ * for Hardware Queues to utilize. This also will map to the number
5792
+ * of EQ / MSI-X vectors the driver will create. This should never be
5793
+ * more than the number of Hardware Queues
5794
+ *
5795
+ * 0 = Configure number of IRQ Channels to:
5796
+ * if AMD architecture, number of CPUs on HBA's NUMA node
5797
+ * if Intel architecture, number of physical CPUs.
5798
+ * otherwise, number of active CPUs.
5799
+ * [1,256] = Manually specify how many IRQ Channels to use.
5800
+ *
5801
+ * Value range is [0,256]. Default value is [0].
5802
+ */
5803
+static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5804
+module_param(lpfc_irq_chann, uint, 0444);
5805
+MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5806
+
5807
+/* lpfc_irq_chann_init - Set the hba irq_chann initial value
5808
+ * @phba: lpfc_hba pointer.
5809
+ * @val: contains the initial value
5810
+ *
5811
+ * Description:
5812
+ * Validates the initial value is within range and assigns it to the
5813
+ * adapter. If not in range, an error message is posted and the
5814
+ * default value is assigned.
5815
+ *
5816
+ * Returns:
5817
+ * zero if value is in range and is set
5818
+ * -EINVAL if value was out of range
5819
+ **/
5820
+static int
5821
+lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5822
+{
5823
+ const struct cpumask *aff_mask;
5824
+
5825
+ if (phba->cfg_use_msi != 2) {
5826
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5827
+ "8532 use_msi = %u ignoring cfg_irq_numa\n",
5828
+ phba->cfg_use_msi);
5829
+ phba->irq_chann_mode = NORMAL_MODE;
5830
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5831
+ return 0;
5832
+ }
5833
+
5834
+ /* Check if default setting was passed */
5835
+ if (val == LPFC_IRQ_CHANN_DEF)
5836
+ lpfc_assign_default_irq_chann(phba);
5837
+
5838
+ if (phba->irq_chann_mode != NORMAL_MODE) {
5839
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
5840
+
5841
+ if (cpumask_empty(aff_mask)) {
5842
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5843
+ "8533 Could not identify CPUS for "
5844
+ "mode %d, ignoring\n",
5845
+ phba->irq_chann_mode);
5846
+ phba->irq_chann_mode = NORMAL_MODE;
5847
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5848
+ } else {
5849
+ phba->cfg_irq_chann = cpumask_weight(aff_mask);
5850
+
5851
+ /* If no hyperthread mode, then set hdwq count to
5852
+ * aff_mask weight as well
5853
+ */
5854
+ if (phba->irq_chann_mode == NHT_MODE)
5855
+ phba->cfg_hdw_queue = phba->cfg_irq_chann;
5856
+
5857
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5858
+ "8543 lpfc_irq_chann set to %u "
5859
+ "(mode: %d)\n", phba->cfg_irq_chann,
5860
+ phba->irq_chann_mode);
5861
+ }
5862
+ } else {
5863
+ if (val > LPFC_IRQ_CHANN_MAX) {
5864
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5865
+ "8545 lpfc_irq_chann attribute cannot "
5866
+ "be set to %u, allowed range is "
5867
+ "[%u,%u]\n",
5868
+ val,
5869
+ LPFC_IRQ_CHANN_MIN,
5870
+ LPFC_IRQ_CHANN_MAX);
5871
+ phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5872
+ return -EINVAL;
5873
+ }
5874
+ phba->cfg_irq_chann = val;
5875
+ }
5876
+
5877
+ return 0;
5878
+}
5879
+
5880
+/**
5881
+ * lpfc_irq_chann_show - Display value of irq_chann
5882
+ * @dev: class converted to a Scsi_host structure.
5883
+ * @attr: device attribute, not used.
5884
+ * @buf: on return contains a string with the list sizes
5885
+ *
5886
+ * Returns: size of formatted string.
5887
+ **/
5888
+static ssize_t
5889
+lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5890
+ char *buf)
5891
+{
5892
+ struct Scsi_Host *shost = class_to_shost(dev);
5893
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5894
+ struct lpfc_hba *phba = vport->phba;
5895
+
5896
+ return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5897
+}
5898
+
5899
+static DEVICE_ATTR_RO(lpfc_irq_chann);
52755900
52765901 /*
52775902 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
52785903 # 0 = HBA resets disabled
52795904 # 1 = HBA resets enabled (default)
5280
-# Value range is [0,1]. Default value is 1.
5905
+# 2 = HBA reset via PCI bus reset enabled
5906
+# Value range is [0,2]. Default value is 1.
52815907 */
5282
-LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
5908
+LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
52835909
52845910 /*
52855911 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
....@@ -5311,16 +5937,6 @@
53115937 # Value range is [0,1]. Default value is 0.
53125938 */
53135939 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5314
-
5315
-/*
5316
-# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine
5317
-# 0 = disabled (default)
5318
-# 1 = enabled
5319
-# Value range is [0,1]. Default value is 0.
5320
-#
5321
-# This feature in under investigation and may be supported in the future.
5322
-*/
5323
-unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
53245940
53255941 /*
53265942 # lpfc_prot_mask: i
....@@ -5378,15 +5994,75 @@
53785994
53795995 /*
53805996 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5381
- * This value can be set to values between 64 and 4096. The default value is
5382
- * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
5383
- * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
5997
+ * This value can be set to values between 64 and 4096. The default value
5998
+ * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5999
+ * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
53846000 * Because of the additional overhead involved in setting up T10-DIF,
53856001 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
53866002 * and will be limited to 512 if BlockGuard is enabled under SLI3.
53876003 */
5388
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
5389
- LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
6004
+static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6005
+module_param(lpfc_sg_seg_cnt, uint, 0444);
6006
+MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
6007
+
6008
+/**
6009
+ * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
6010
+ * configured for the adapter
6011
+ * @dev: class converted to a Scsi_host structure.
6012
+ * @attr: device attribute, not used.
6013
+ * @buf: on return contains a string with the list sizes
6014
+ *
6015
+ * Returns: size of formatted string.
6016
+ **/
6017
+static ssize_t
6018
+lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6019
+ char *buf)
6020
+{
6021
+ struct Scsi_Host *shost = class_to_shost(dev);
6022
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6023
+ struct lpfc_hba *phba = vport->phba;
6024
+ int len;
6025
+
6026
+ len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
6027
+ phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6028
+
6029
+ len += scnprintf(buf + len, PAGE_SIZE - len,
6030
+ "Cfg: %d SCSI: %d NVME: %d\n",
6031
+ phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6032
+ phba->cfg_nvme_seg_cnt);
6033
+ return len;
6034
+}
6035
+
6036
+static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6037
+
6038
+/**
6039
+ * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6040
+ * @phba: lpfc_hba pointer.
6041
+ * @val: contains the initial value
6042
+ *
6043
+ * Description:
6044
+ * Validates the initial value is within range and assigns it to the
6045
+ * adapter. If not in range, an error message is posted and the
6046
+ * default value is assigned.
6047
+ *
6048
+ * Returns:
6049
+ * zero if value is in range and is set
6050
+ * -EINVAL if value was out of range
6051
+ **/
6052
+static int
6053
+lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6054
+{
6055
+ if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6056
+ phba->cfg_sg_seg_cnt = val;
6057
+ return 0;
6058
+ }
6059
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6060
+ "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
6061
+ "be set to %d, allowed range is [%d, %d]\n",
6062
+ val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6063
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6064
+ return -EINVAL;
6065
+}
53906066
53916067 /*
53926068 * lpfc_enable_mds_diags: Enable MDS Diagnostics
....@@ -5394,7 +6070,78 @@
53946070 * 1 = MDS Diagnostics enabled
53956071 * Value range is [0,1]. Default value is 0.
53966072 */
5397
-LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6073
+LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6074
+
6075
+/*
6076
+ * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6077
+ * 0 = Disable firmware logging (default)
6078
+ * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
6079
+ * Value range [0..4]. Default value is 0
6080
+ */
6081
+LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6082
+lpfc_param_show(ras_fwlog_buffsize);
6083
+
6084
+static ssize_t
6085
+lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6086
+{
6087
+ int ret = 0;
6088
+ enum ras_state state;
6089
+
6090
+ if (!lpfc_rangecheck(val, 0, 4))
6091
+ return -EINVAL;
6092
+
6093
+ if (phba->cfg_ras_fwlog_buffsize == val)
6094
+ return 0;
6095
+
6096
+ if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6097
+ return -EINVAL;
6098
+
6099
+ spin_lock_irq(&phba->hbalock);
6100
+ state = phba->ras_fwlog.state;
6101
+ spin_unlock_irq(&phba->hbalock);
6102
+
6103
+ if (state == REG_INPROGRESS) {
6104
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6105
+ "registration is in progress\n");
6106
+ return -EBUSY;
6107
+ }
6108
+
6109
+ /* For disable logging: stop the logs and free the DMA.
6110
+ * For ras_fwlog_buffsize size change we still need to free and
6111
+ * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6112
+ */
6113
+ phba->cfg_ras_fwlog_buffsize = val;
6114
+ if (state == ACTIVE) {
6115
+ lpfc_ras_stop_fwlog(phba);
6116
+ lpfc_sli4_ras_dma_free(phba);
6117
+ }
6118
+
6119
+ lpfc_sli4_ras_init(phba);
6120
+ if (phba->ras_fwlog.ras_enabled)
6121
+ ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6122
+ LPFC_RAS_ENABLE_LOGGING);
6123
+ return ret;
6124
+}
6125
+
6126
+lpfc_param_store(ras_fwlog_buffsize);
6127
+static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6128
+
6129
+/*
6130
+ * lpfc_ras_fwlog_level: Firmware logging verbosity level
6131
+ * Valid only if firmware logging is enabled
6132
+ * 0(Least Verbosity) 4 (most verbosity)
6133
+ * Value range is [0..4]. Default value is 0
6134
+ */
6135
+LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6136
+
6137
+/*
6138
+ * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6139
+ * Default function which has RAS support : 0
6140
+ * Value Range is [0..7].
6141
+ * FW logging is a global action and enablement is via a specific
6142
+ * port.
6143
+ */
6144
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
53986145
53996146 /*
54006147 * lpfc_enable_bbcr: Enable BB Credit Recovery
....@@ -5414,6 +6161,7 @@
54146161
54156162 struct device_attribute *lpfc_hba_attrs[] = {
54166163 &dev_attr_nvme_info,
6164
+ &dev_attr_scsi_stat,
54176165 &dev_attr_bg_info,
54186166 &dev_attr_bg_guard_err,
54196167 &dev_attr_bg_apptag_err,
....@@ -5441,15 +6189,16 @@
54416189 &dev_attr_lpfc_nodev_tmo,
54426190 &dev_attr_lpfc_devloss_tmo,
54436191 &dev_attr_lpfc_enable_fc4_type,
5444
- &dev_attr_lpfc_xri_split,
54456192 &dev_attr_lpfc_fcp_class,
54466193 &dev_attr_lpfc_use_adisc,
54476194 &dev_attr_lpfc_first_burst_size,
54486195 &dev_attr_lpfc_ack0,
6196
+ &dev_attr_lpfc_xri_rebalancing,
54496197 &dev_attr_lpfc_topology,
54506198 &dev_attr_lpfc_scan_down,
54516199 &dev_attr_lpfc_link_speed,
54526200 &dev_attr_lpfc_fcp_io_sched,
6201
+ &dev_attr_lpfc_ns_query,
54536202 &dev_attr_lpfc_fcp2_no_tgt_reset,
54546203 &dev_attr_lpfc_cr_delay,
54556204 &dev_attr_lpfc_cr_count,
....@@ -5478,12 +6227,15 @@
54786227 &dev_attr_lpfc_use_msi,
54796228 &dev_attr_lpfc_nvme_oas,
54806229 &dev_attr_lpfc_nvme_embed_cmd,
5481
- &dev_attr_lpfc_auto_imax,
54826230 &dev_attr_lpfc_fcp_imax,
6231
+ &dev_attr_lpfc_force_rscn,
6232
+ &dev_attr_lpfc_cq_poll_threshold,
6233
+ &dev_attr_lpfc_cq_max_proc_limit,
54836234 &dev_attr_lpfc_fcp_cpu_map,
5484
- &dev_attr_lpfc_fcp_io_channel,
6235
+ &dev_attr_lpfc_fcp_mq_threshold,
6236
+ &dev_attr_lpfc_hdw_queue,
6237
+ &dev_attr_lpfc_irq_chann,
54856238 &dev_attr_lpfc_suppress_rsp,
5486
- &dev_attr_lpfc_nvme_io_channel,
54876239 &dev_attr_lpfc_nvmet_mrq,
54886240 &dev_attr_lpfc_nvmet_mrq_post,
54896241 &dev_attr_lpfc_nvme_enable_fb,
....@@ -5510,17 +6262,18 @@
55106262 &dev_attr_lpfc_sriov_nr_virtfn,
55116263 &dev_attr_lpfc_req_fw_upgrade,
55126264 &dev_attr_lpfc_suppress_link_up,
5513
- &dev_attr_lpfc_iocb_cnt,
55146265 &dev_attr_iocb_hw,
6266
+ &dev_attr_pls,
6267
+ &dev_attr_pt,
55156268 &dev_attr_txq_hw,
55166269 &dev_attr_txcmplq_hw,
5517
- &dev_attr_lpfc_fips_level,
5518
- &dev_attr_lpfc_fips_rev,
5519
- &dev_attr_lpfc_dss,
55206270 &dev_attr_lpfc_sriov_hw_max_virtfn,
55216271 &dev_attr_protocol,
55226272 &dev_attr_lpfc_xlane_supported,
55236273 &dev_attr_lpfc_enable_mds_diags,
6274
+ &dev_attr_lpfc_ras_fwlog_buffsize,
6275
+ &dev_attr_lpfc_ras_fwlog_level,
6276
+ &dev_attr_lpfc_ras_fwlog_func,
55246277 &dev_attr_lpfc_enable_bbcr,
55256278 &dev_attr_lpfc_enable_dpp,
55266279 NULL,
....@@ -5549,8 +6302,6 @@
55496302 &dev_attr_lpfc_max_scsicmpl_time,
55506303 &dev_attr_lpfc_stat_data_ctrl,
55516304 &dev_attr_lpfc_static_vport,
5552
- &dev_attr_lpfc_fips_level,
5553
- &dev_attr_lpfc_fips_rev,
55546305 NULL,
55556306 };
55566307
....@@ -5939,14 +6690,23 @@
59396690 case LPFC_LINK_SPEED_64GHZ:
59406691 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
59416692 break;
6693
+ case LPFC_LINK_SPEED_128GHZ:
6694
+ fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6695
+ break;
59426696 default:
59436697 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
59446698 break;
59456699 }
59466700 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
59476701 switch (phba->fc_linkspeed) {
6702
+ case LPFC_ASYNC_LINK_SPEED_1GBPS:
6703
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6704
+ break;
59486705 case LPFC_ASYNC_LINK_SPEED_10GBPS:
59496706 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6707
+ break;
6708
+ case LPFC_ASYNC_LINK_SPEED_20GBPS:
6709
+ fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
59506710 break;
59516711 case LPFC_ASYNC_LINK_SPEED_25GBPS:
59526712 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
....@@ -6038,18 +6798,22 @@
60386798 pmb = &pmboxq->u.mb;
60396799 pmb->mbxCommand = MBX_READ_STATUS;
60406800 pmb->mbxOwner = OWN_HOST;
6041
- pmboxq->context1 = NULL;
6801
+ pmboxq->ctx_buf = NULL;
60426802 pmboxq->vport = vport;
60436803
6044
- if (vport->fc_flag & FC_OFFLINE_MODE)
6804
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
60456805 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6046
- else
6047
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6048
-
6049
- if (rc != MBX_SUCCESS) {
6050
- if (rc != MBX_TIMEOUT)
6806
+ if (rc != MBX_SUCCESS) {
60516807 mempool_free(pmboxq, phba->mbox_mem_pool);
6052
- return NULL;
6808
+ return NULL;
6809
+ }
6810
+ } else {
6811
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6812
+ if (rc != MBX_SUCCESS) {
6813
+ if (rc != MBX_TIMEOUT)
6814
+ mempool_free(pmboxq, phba->mbox_mem_pool);
6815
+ return NULL;
6816
+ }
60536817 }
60546818
60556819 memset(hs, 0, sizeof (struct fc_host_statistics));
....@@ -6070,18 +6834,22 @@
60706834 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
60716835 pmb->mbxCommand = MBX_READ_LNK_STAT;
60726836 pmb->mbxOwner = OWN_HOST;
6073
- pmboxq->context1 = NULL;
6837
+ pmboxq->ctx_buf = NULL;
60746838 pmboxq->vport = vport;
60756839
6076
- if (vport->fc_flag & FC_OFFLINE_MODE)
6840
+ if (vport->fc_flag & FC_OFFLINE_MODE) {
60776841 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6078
- else
6079
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6080
-
6081
- if (rc != MBX_SUCCESS) {
6082
- if (rc != MBX_TIMEOUT)
6842
+ if (rc != MBX_SUCCESS) {
60836843 mempool_free(pmboxq, phba->mbox_mem_pool);
6084
- return NULL;
6844
+ return NULL;
6845
+ }
6846
+ } else {
6847
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6848
+ if (rc != MBX_SUCCESS) {
6849
+ if (rc != MBX_TIMEOUT)
6850
+ mempool_free(pmboxq, phba->mbox_mem_pool);
6851
+ return NULL;
6852
+ }
60856853 }
60866854
60876855 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
....@@ -6150,37 +6918,45 @@
61506918 pmb->mbxCommand = MBX_READ_STATUS;
61516919 pmb->mbxOwner = OWN_HOST;
61526920 pmb->un.varWords[0] = 0x1; /* reset request */
6153
- pmboxq->context1 = NULL;
6921
+ pmboxq->ctx_buf = NULL;
61546922 pmboxq->vport = vport;
61556923
61566924 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6157
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
6925
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
61586926 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6159
- else
6160
- rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6161
-
6162
- if (rc != MBX_SUCCESS) {
6163
- if (rc != MBX_TIMEOUT)
6927
+ if (rc != MBX_SUCCESS) {
61646928 mempool_free(pmboxq, phba->mbox_mem_pool);
6165
- return;
6929
+ return;
6930
+ }
6931
+ } else {
6932
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6933
+ if (rc != MBX_SUCCESS) {
6934
+ if (rc != MBX_TIMEOUT)
6935
+ mempool_free(pmboxq, phba->mbox_mem_pool);
6936
+ return;
6937
+ }
61666938 }
61676939
61686940 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
61696941 pmb->mbxCommand = MBX_READ_LNK_STAT;
61706942 pmb->mbxOwner = OWN_HOST;
6171
- pmboxq->context1 = NULL;
6943
+ pmboxq->ctx_buf = NULL;
61726944 pmboxq->vport = vport;
61736945
61746946 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6175
- (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
6947
+ (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
61766948 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6177
- else
6949
+ if (rc != MBX_SUCCESS) {
6950
+ mempool_free(pmboxq, phba->mbox_mem_pool);
6951
+ return;
6952
+ }
6953
+ } else {
61786954 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6179
-
6180
- if (rc != MBX_SUCCESS) {
6181
- if (rc != MBX_TIMEOUT)
6182
- mempool_free( pmboxq, phba->mbox_mem_pool);
6183
- return;
6955
+ if (rc != MBX_SUCCESS) {
6956
+ if (rc != MBX_TIMEOUT)
6957
+ mempool_free(pmboxq, phba->mbox_mem_pool);
6958
+ return;
6959
+ }
61846960 }
61856961
61866962 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
....@@ -6290,10 +7066,31 @@
62907066 static void
62917067 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
62927068 {
7069
+ struct lpfc_rport_data *rdata = rport->dd_data;
7070
+ struct lpfc_nodelist *ndlp = rdata->pnode;
7071
+#if (IS_ENABLED(CONFIG_NVME_FC))
7072
+ struct lpfc_nvme_rport *nrport = NULL;
7073
+#endif
7074
+
62937075 if (timeout)
62947076 rport->dev_loss_tmo = timeout;
62957077 else
62967078 rport->dev_loss_tmo = 1;
7079
+
7080
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
7081
+ dev_info(&rport->dev, "Cannot find remote node to "
7082
+ "set rport dev loss tmo, port_id x%x\n",
7083
+ rport->port_id);
7084
+ return;
7085
+ }
7086
+
7087
+#if (IS_ENABLED(CONFIG_NVME_FC))
7088
+ nrport = lpfc_ndlp_get_nrport(ndlp);
7089
+
7090
+ if (nrport && nrport->remoteport)
7091
+ nvme_fc_set_remoteport_devloss(nrport->remoteport,
7092
+ rport->dev_loss_tmo);
7093
+#endif
62977094 }
62987095
62997096 /**
....@@ -6490,13 +7287,41 @@
64907287 };
64917288
64927289 /**
7290
+ * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7291
+ * Mode
7292
+ * @phba: lpfc_hba pointer.
7293
+ **/
7294
+static void
7295
+lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7296
+{
7297
+ /* If the adapter supports FCoE mode */
7298
+ switch (phba->pcidev->device) {
7299
+ case PCI_DEVICE_ID_SKYHAWK:
7300
+ case PCI_DEVICE_ID_SKYHAWK_VF:
7301
+ case PCI_DEVICE_ID_LANCER_FCOE:
7302
+ case PCI_DEVICE_ID_LANCER_FCOE_VF:
7303
+ case PCI_DEVICE_ID_ZEPHYR_DCSP:
7304
+ case PCI_DEVICE_ID_HORNET:
7305
+ case PCI_DEVICE_ID_TIGERSHARK:
7306
+ case PCI_DEVICE_ID_TOMCAT:
7307
+ phba->hba_flag |= HBA_FCOE_MODE;
7308
+ break;
7309
+ default:
7310
+ /* for others, clear the flag */
7311
+ phba->hba_flag &= ~HBA_FCOE_MODE;
7312
+ }
7313
+}
7314
+
7315
+/**
64937316 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
64947317 * @phba: lpfc_hba pointer.
64957318 **/
64967319 void
64977320 lpfc_get_cfgparam(struct lpfc_hba *phba)
64987321 {
7322
+ lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
64997323 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7324
+ lpfc_ns_query_init(phba, lpfc_ns_query);
65007325 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
65017326 lpfc_cr_delay_init(phba, lpfc_cr_delay);
65027327 lpfc_cr_count_init(phba, lpfc_cr_count);
....@@ -6504,6 +7329,7 @@
65047329 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
65057330 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
65067331 lpfc_ack0_init(phba, lpfc_ack0);
7332
+ lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
65077333 lpfc_topology_init(phba, lpfc_topology);
65087334 lpfc_link_speed_init(phba, lpfc_link_speed);
65097335 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
....@@ -6516,8 +7342,10 @@
65167342 lpfc_use_msi_init(phba, lpfc_use_msi);
65177343 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
65187344 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
6519
- lpfc_auto_imax_init(phba, lpfc_auto_imax);
65207345 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7346
+ lpfc_force_rscn_init(phba, lpfc_force_rscn);
7347
+ lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7348
+ lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
65217349 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
65227350 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
65237351 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
....@@ -6541,8 +7369,18 @@
65417369 else
65427370 phba->cfg_poll = lpfc_poll;
65437371
6544
- if (phba->cfg_enable_bg)
7372
+ /* Get the function mode */
7373
+ lpfc_get_hba_function_mode(phba);
7374
+
7375
+ /* BlockGuard allowed for FC only. */
7376
+ if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7377
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7378
+ "0581 BlockGuard feature not supported\n");
7379
+ /* If set, clear the BlockGuard support param */
7380
+ phba->cfg_enable_bg = 0;
7381
+ } else if (phba->cfg_enable_bg) {
65457382 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7383
+ }
65467384
65477385 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
65487386
....@@ -6553,60 +7391,52 @@
65537391 /* Initialize first burst. Target vs Initiator are different. */
65547392 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
65557393 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
6556
- lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
6557
- lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
7394
+ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7395
+ lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7396
+ lpfc_irq_chann_init(phba, lpfc_irq_chann);
65587397 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
65597398 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
65607399
65617400 if (phba->sli_rev != LPFC_SLI_REV4) {
65627401 /* NVME only supported on SLI4 */
65637402 phba->nvmet_support = 0;
7403
+ phba->cfg_nvmet_mrq = 0;
65647404 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
65657405 phba->cfg_enable_bbcr = 0;
7406
+ phba->cfg_xri_rebalancing = 0;
65667407 } else {
65677408 /* We MUST have FCP support */
65687409 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
65697410 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
65707411 }
65717412
6572
- if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
6573
- phba->cfg_auto_imax = 0;
6574
- phba->initial_imax = phba->cfg_fcp_imax;
7413
+ phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
65757414
65767415 phba->cfg_enable_pbde = 0;
65777416
65787417 /* A value of 0 means use the number of CPUs found in the system */
6579
- if (phba->cfg_fcp_io_channel == 0)
6580
- phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
6581
- if (phba->cfg_nvme_io_channel == 0)
6582
- phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
6583
-
6584
- if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
6585
- phba->cfg_fcp_io_channel = 0;
6586
-
6587
- if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
6588
- phba->cfg_nvme_io_channel = 0;
6589
-
6590
- if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
6591
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
6592
- else
6593
- phba->io_channel_irqs = phba->cfg_nvme_io_channel;
7418
+ if (phba->cfg_hdw_queue == 0)
7419
+ phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7420
+ if (phba->cfg_irq_chann == 0)
7421
+ phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7422
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
7423
+ phba->cfg_irq_chann = phba->cfg_hdw_queue;
65947424
65957425 phba->cfg_soft_wwnn = 0L;
65967426 phba->cfg_soft_wwpn = 0L;
6597
- lpfc_xri_split_init(phba, lpfc_xri_split);
65987427 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
65997428 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
6600
- lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
66017429 lpfc_aer_support_init(phba, lpfc_aer_support);
66027430 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
66037431 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
66047432 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
6605
- lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
66067433 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
66077434 lpfc_sli_mode_init(phba, lpfc_sli_mode);
6608
- phba->cfg_enable_dss = 1;
66097435 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7436
+ lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7437
+ lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7438
+ lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7439
+
66107440 return;
66117441 }
66127442
....@@ -6618,16 +7448,30 @@
66187448 void
66197449 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
66207450 {
6621
- if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
6622
- phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
7451
+ int logit = 0;
66237452
6624
- if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
6625
- phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
7453
+ if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7454
+ phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7455
+ logit = 1;
7456
+ }
7457
+ if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7458
+ phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7459
+ logit = 1;
7460
+ }
7461
+ if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7462
+ phba->cfg_irq_chann = phba->cfg_hdw_queue;
7463
+ logit = 1;
7464
+ }
7465
+ if (logit)
7466
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7467
+ "2006 Reducing Queues - CPU limitation: "
7468
+ "IRQ %d HDWQ %d\n",
7469
+ phba->cfg_irq_chann,
7470
+ phba->cfg_hdw_queue);
66267471
66277472 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
66287473 phba->nvmet_support) {
66297474 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
6630
- phba->cfg_fcp_io_channel = 0;
66317475
66327476 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
66337477 "6013 %s x%x fb_size x%x, fb_max x%x\n",
....@@ -6644,11 +7488,11 @@
66447488 }
66457489
66467490 if (!phba->cfg_nvmet_mrq)
6647
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
7491
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
66487492
66497493 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
6650
- if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
6651
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
7494
+ if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7495
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
66527496 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
66537497 "6018 Adjust lpfc_nvmet_mrq to %d\n",
66547498 phba->cfg_nvmet_mrq);
....@@ -6659,14 +7503,9 @@
66597503 } else {
66607504 /* Not NVME Target mode. Turn off Target parameters. */
66617505 phba->nvmet_support = 0;
6662
- phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
7506
+ phba->cfg_nvmet_mrq = 0;
66637507 phba->cfg_nvmet_fb_size = 0;
66647508 }
6665
-
6666
- if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
6667
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
6668
- else
6669
- phba->io_channel_irqs = phba->cfg_nvme_io_channel;
66707509 }
66717510
66727511 /**