hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/scsi/lpfc/lpfc_init.c
....@@ -1,7 +1,7 @@
11 /*******************************************************************
22 * This file is part of the Emulex Linux Device Driver for *
33 * Fibre Channel Host Bus Adapters. *
4
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
4
+ * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
77 * EMULEX and SLI are trademarks of Emulex. *
....@@ -37,7 +37,11 @@
3737 #include <linux/miscdevice.h>
3838 #include <linux/percpu.h>
3939 #include <linux/msi.h>
40
+#include <linux/irq.h>
4041 #include <linux/bitops.h>
42
+#include <linux/crash_dump.h>
43
+#include <linux/cpu.h>
44
+#include <linux/cpuhotplug.h>
4145
4246 #include <scsi/scsi.h>
4347 #include <scsi/scsi_device.h>
....@@ -45,8 +49,6 @@
4549 #include <scsi/scsi_transport_fc.h>
4650 #include <scsi/scsi_tcq.h>
4751 #include <scsi/fc/fc_fs.h>
48
-
49
-#include <linux/nvme-fc-driver.h>
5052
5153 #include "lpfc_hw4.h"
5254 #include "lpfc_hw.h"
....@@ -57,23 +59,19 @@
5759 #include "lpfc.h"
5860 #include "lpfc_scsi.h"
5961 #include "lpfc_nvme.h"
60
-#include "lpfc_nvmet.h"
6162 #include "lpfc_logmsg.h"
6263 #include "lpfc_crtn.h"
6364 #include "lpfc_vport.h"
6465 #include "lpfc_version.h"
6566 #include "lpfc_ids.h"
6667
67
-char *_dump_buf_data;
68
-unsigned long _dump_buf_data_order;
69
-char *_dump_buf_dif;
70
-unsigned long _dump_buf_dif_order;
71
-spinlock_t _dump_buf_lock;
72
-
68
+static enum cpuhp_state lpfc_cpuhp_state;
7369 /* Used when mapping IRQ vectors in a driver centric manner */
74
-uint16_t *lpfc_used_cpu;
75
-uint32_t lpfc_present_cpu;
70
+static uint32_t lpfc_present_cpu;
7671
72
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74
+static void lpfc_cpuhp_add(struct lpfc_hba *phba);
7775 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
7876 static int lpfc_post_rcv_buf(struct lpfc_hba *);
7977 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
....@@ -93,6 +91,8 @@
9391 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
9492 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
9593 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94
+static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95
+static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
9696
9797 static struct scsi_transport_template *lpfc_transport_template = NULL;
9898 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
....@@ -153,7 +153,7 @@
153153 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
154154
155155 if (rc != MBX_SUCCESS) {
156
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
156
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
157157 "0324 Config Port initialization "
158158 "error, mbxCmd x%x READ_NVPARM, "
159159 "mbxStatus x%x\n",
....@@ -177,7 +177,7 @@
177177 lpfc_read_rev(phba, pmb);
178178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179179 if (rc != MBX_SUCCESS) {
180
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
180
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
181181 "0439 Adapter failed to init, mbxCmd x%x "
182182 "READ_REV, mbxStatus x%x\n",
183183 mb->mbxCommand, mb->mbxStatus);
....@@ -192,7 +192,7 @@
192192 */
193193 if (mb->un.varRdRev.rr == 0) {
194194 vp->rev.rBit = 0;
195
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
195
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
196196 "0440 Adapter failed to init, READ_REV has "
197197 "missing revision information.\n");
198198 mempool_free(pmb, phba->mbox_mem_pool);
....@@ -253,13 +253,15 @@
253253 */
254254 if (mb->un.varDmp.word_cnt == 0)
255255 break;
256
- if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
257
- mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
256
+
257
+ i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
258
+ if (offset + i > DMP_VPD_SIZE)
259
+ i = DMP_VPD_SIZE - offset;
258260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
259
- lpfc_vpd_data + offset,
260
- mb->un.varDmp.word_cnt);
261
- offset += mb->un.varDmp.word_cnt;
262
- } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
261
+ lpfc_vpd_data + offset, i);
262
+ offset += i;
263
+ } while (offset < DMP_VPD_SIZE);
264
+
263265 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
264266
265267 kfree(lpfc_vpd_data);
....@@ -442,24 +444,24 @@
442444
443445 pmb->vport = vport;
444446 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
445
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
447
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
446448 "0448 Adapter failed init, mbxCmd x%x "
447449 "READ_SPARM mbxStatus x%x\n",
448450 mb->mbxCommand, mb->mbxStatus);
449451 phba->link_state = LPFC_HBA_ERROR;
450
- mp = (struct lpfc_dmabuf *) pmb->context1;
452
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
451453 mempool_free(pmb, phba->mbox_mem_pool);
452454 lpfc_mbuf_free(phba, mp->virt, mp->phys);
453455 kfree(mp);
454456 return -EIO;
455457 }
456458
457
- mp = (struct lpfc_dmabuf *) pmb->context1;
459
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
458460
459461 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
460462 lpfc_mbuf_free(phba, mp->virt, mp->phys);
461463 kfree(mp);
462
- pmb->context1 = NULL;
464
+ pmb->ctx_buf = NULL;
463465 lpfc_update_vport_wwn(vport);
464466
465467 /* Update the fc_host data structures with new wwn. */
....@@ -496,7 +498,7 @@
496498 lpfc_read_config(phba, pmb);
497499 pmb->vport = vport;
498500 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
499
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
501
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
500502 "0453 Adapter failed to init, mbxCmd x%x "
501503 "READ_CONFIG, mbxStatus x%x\n",
502504 mb->mbxCommand, mb->mbxStatus);
....@@ -509,21 +511,12 @@
509511 lpfc_sli_read_link_ste(phba);
510512
511513 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
512
- i = (mb->un.varRdConfig.max_xri + 1);
513
- if (phba->cfg_hba_queue_depth > i) {
514
+ if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
514515 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
515516 "3359 HBA queue depth changed from %d to %d\n",
516
- phba->cfg_hba_queue_depth, i);
517
- phba->cfg_hba_queue_depth = i;
518
- }
519
-
520
- /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
521
- i = (mb->un.varRdConfig.max_xri >> 3);
522
- if (phba->pport->cfg_lun_queue_depth > i) {
523
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
524
- "3360 LUN queue depth changed from %d to %d\n",
525
- phba->pport->cfg_lun_queue_depth, i);
526
- phba->pport->cfg_lun_queue_depth = i;
517
+ phba->cfg_hba_queue_depth,
518
+ mb->un.varRdConfig.max_xri);
519
+ phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
527520 }
528521
529522 phba->lmt = mb->un.varRdConfig.lmt;
....@@ -554,7 +547,7 @@
554547 }
555548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
556549 if (rc != MBX_SUCCESS) {
557
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
550
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
558551 "0352 Config MSI mailbox command "
559552 "failed, mbxCmd x%x, mbxStatus x%x\n",
560553 pmb->u.mb.mbxCommand,
....@@ -605,17 +598,15 @@
605598 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
606599
607600 if (phba->hba_flag & LINK_DISABLED) {
608
- lpfc_printf_log(phba,
609
- KERN_ERR, LOG_INIT,
610
- "2598 Adapter Link is disabled.\n");
601
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
602
+ "2598 Adapter Link is disabled.\n");
611603 lpfc_down_link(phba, pmb);
612604 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
613605 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
614606 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
615
- lpfc_printf_log(phba,
616
- KERN_ERR, LOG_INIT,
617
- "2599 Adapter failed to issue DOWN_LINK"
618
- " mbox command rc 0x%x\n", rc);
607
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
608
+ "2599 Adapter failed to issue DOWN_LINK"
609
+ " mbox command rc 0x%x\n", rc);
619610
620611 mempool_free(pmb, phba->mbox_mem_pool);
621612 return -EIO;
....@@ -639,9 +630,7 @@
639630 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
640631
641632 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
642
- lpfc_printf_log(phba,
643
- KERN_ERR,
644
- LOG_INIT,
633
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
645634 "0456 Adapter failed to issue "
646635 "ASYNCEVT_ENABLE mbox status x%x\n",
647636 rc);
....@@ -661,7 +650,8 @@
661650 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
662651
663652 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
664
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
653
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
654
+ "0435 Adapter failed "
665655 "to get Option ROM version status x%x\n", rc);
666656 mempool_free(pmb, phba->mbox_mem_pool);
667657 }
....@@ -739,10 +729,10 @@
739729 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
740730 !(phba->lmt & LMT_64Gb))) {
741731 /* Reset link speed to auto */
742
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
743
- "1302 Invalid speed for this board:%d "
744
- "Reset link speed to auto.\n",
745
- phba->cfg_link_speed);
732
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
733
+ "1302 Invalid speed for this board:%d "
734
+ "Reset link speed to auto.\n",
735
+ phba->cfg_link_speed);
746736 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
747737 }
748738 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
....@@ -751,10 +741,10 @@
751741 lpfc_set_loopback_flag(phba);
752742 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
753743 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
754
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
755
- "0498 Adapter failed to init, mbxCmd x%x "
756
- "INIT_LINK, mbxStatus x%x\n",
757
- mb->mbxCommand, mb->mbxStatus);
744
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
745
+ "0498 Adapter failed to init, mbxCmd x%x "
746
+ "INIT_LINK, mbxStatus x%x\n",
747
+ mb->mbxCommand, mb->mbxStatus);
758748 if (phba->sli_rev <= LPFC_SLI_REV3) {
759749 /* Clear all interrupt enable conditions */
760750 writel(0, phba->HCregaddr);
....@@ -800,17 +790,15 @@
800790 return -ENOMEM;
801791 }
802792
803
- lpfc_printf_log(phba,
804
- KERN_ERR, LOG_INIT,
805
- "0491 Adapter Link is disabled.\n");
793
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
794
+ "0491 Adapter Link is disabled.\n");
806795 lpfc_down_link(phba, pmb);
807796 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
808797 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
809798 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
810
- lpfc_printf_log(phba,
811
- KERN_ERR, LOG_INIT,
812
- "2522 Adapter failed to issue DOWN_LINK"
813
- " mbox command rc 0x%x\n", rc);
799
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
800
+ "2522 Adapter failed to issue DOWN_LINK"
801
+ " mbox command rc 0x%x\n", rc);
814802
815803 mempool_free(pmb, phba->mbox_mem_pool);
816804 return -EIO;
....@@ -1005,7 +993,6 @@
1005993
1006994 /**
1007995 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1008
- int i;
1009996 * @phba: pointer to lpfc HBA data structure.
1010997 *
1011998 * This routine will do uninitialization after the HBA is reset when bring
....@@ -1037,14 +1024,14 @@
10371024 static int
10381025 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
10391026 {
1040
- struct lpfc_scsi_buf *psb, *psb_next;
1041
- struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
1027
+ struct lpfc_io_buf *psb, *psb_next;
1028
+ struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1029
+ struct lpfc_sli4_hdw_queue *qp;
10421030 LIST_HEAD(aborts);
10431031 LIST_HEAD(nvme_aborts);
10441032 LIST_HEAD(nvmet_aborts);
1045
- unsigned long iflag = 0;
10461033 struct lpfc_sglq *sglq_entry = NULL;
1047
- int cnt;
1034
+ int cnt, idx;
10481035
10491036
10501037 lpfc_sli_hbqbuf_free_all(phba);
....@@ -1071,55 +1058,47 @@
10711058
10721059
10731060 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1074
- /* abts_scsi_buf_list_lock required because worker thread uses this
1061
+
1062
+ /* abts_xxxx_buf_list_lock required because worker thread uses this
10751063 * list.
10761064 */
1077
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
1078
- spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1079
- list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
1065
+ cnt = 0;
1066
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1067
+ qp = &phba->sli4_hba.hdwq[idx];
1068
+
1069
+ spin_lock(&qp->abts_io_buf_list_lock);
1070
+ list_splice_init(&qp->lpfc_abts_io_buf_list,
10801071 &aborts);
1081
- spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
1082
- }
10831072
1084
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1085
- spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1086
- list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
1087
- &nvme_aborts);
1088
- list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1089
- &nvmet_aborts);
1090
- spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1091
- }
1092
-
1093
- spin_unlock_irq(&phba->hbalock);
1094
-
1095
- list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1096
- psb->pCmd = NULL;
1097
- psb->status = IOSTAT_SUCCESS;
1098
- }
1099
- spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1100
- list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
1101
- spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1102
-
1103
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1104
- cnt = 0;
1105
- list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
1073
+ list_for_each_entry_safe(psb, psb_next, &aborts, list) {
11061074 psb->pCmd = NULL;
11071075 psb->status = IOSTAT_SUCCESS;
11081076 cnt++;
11091077 }
1110
- spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
1111
- phba->put_nvme_bufs += cnt;
1112
- list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
1113
- spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
1078
+ spin_lock(&qp->io_buf_list_put_lock);
1079
+ list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1080
+ qp->put_io_bufs += qp->abts_scsi_io_bufs;
1081
+ qp->put_io_bufs += qp->abts_nvme_io_bufs;
1082
+ qp->abts_scsi_io_bufs = 0;
1083
+ qp->abts_nvme_io_bufs = 0;
1084
+ spin_unlock(&qp->io_buf_list_put_lock);
1085
+ spin_unlock(&qp->abts_io_buf_list_lock);
1086
+ }
1087
+ spin_unlock_irq(&phba->hbalock);
11141088
1089
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1090
+ spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1091
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1092
+ &nvmet_aborts);
1093
+ spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
11151094 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1116
- ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
1095
+ ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
11171096 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
11181097 }
11191098 }
11201099
11211100 lpfc_sli4_free_sp_events(phba);
1122
- return 0;
1101
+ return cnt;
11231102 }
11241103
11251104 /**
....@@ -1141,7 +1120,7 @@
11411120
11421121 /**
11431122 * lpfc_hb_timeout - The HBA-timer timeout handler
1144
- * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1123
+ * @t: timer context used to obtain the pointer to lpfc hba data structure.
11451124 *
11461125 * This is the HBA-timer timeout handler registered to the lpfc driver. When
11471126 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
....@@ -1175,7 +1154,7 @@
11751154
11761155 /**
11771156 * lpfc_rrq_timeout - The RRQ-timer timeout handler
1178
- * @ptr: unsigned long holds the pointer to lpfc hba data structure.
1157
+ * @t: timer context used to obtain the pointer to lpfc hba data structure.
11791158 *
11801159 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
11811160 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
....@@ -1239,6 +1218,168 @@
12391218 return;
12401219 }
12411220
1221
+/*
1222
+ * lpfc_idle_stat_delay_work - idle_stat tracking
1223
+ *
1224
+ * This routine tracks per-cq idle_stat and determines polling decisions.
1225
+ *
1226
+ * Return codes:
1227
+ * None
1228
+ **/
1229
+static void
1230
+lpfc_idle_stat_delay_work(struct work_struct *work)
1231
+{
1232
+ struct lpfc_hba *phba = container_of(to_delayed_work(work),
1233
+ struct lpfc_hba,
1234
+ idle_stat_delay_work);
1235
+ struct lpfc_queue *cq;
1236
+ struct lpfc_sli4_hdw_queue *hdwq;
1237
+ struct lpfc_idle_stat *idle_stat;
1238
+ u32 i, idle_percent;
1239
+ u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1240
+
1241
+ if (phba->pport->load_flag & FC_UNLOADING)
1242
+ return;
1243
+
1244
+ if (phba->link_state == LPFC_HBA_ERROR ||
1245
+ phba->pport->fc_flag & FC_OFFLINE_MODE)
1246
+ goto requeue;
1247
+
1248
+ for_each_present_cpu(i) {
1249
+ hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1250
+ cq = hdwq->io_cq;
1251
+
1252
+ /* Skip if we've already handled this cq's primary CPU */
1253
+ if (cq->chann != i)
1254
+ continue;
1255
+
1256
+ idle_stat = &phba->sli4_hba.idle_stat[i];
1257
+
1258
+ /* get_cpu_idle_time returns values as running counters. Thus,
1259
+ * to know the amount for this period, the prior counter values
1260
+ * need to be subtracted from the current counter values.
1261
+ * From there, the idle time stat can be calculated as a
1262
+ * percentage of 100 - the sum of the other consumption times.
1263
+ */
1264
+ wall_idle = get_cpu_idle_time(i, &wall, 1);
1265
+ diff_idle = wall_idle - idle_stat->prev_idle;
1266
+ diff_wall = wall - idle_stat->prev_wall;
1267
+
1268
+ if (diff_wall <= diff_idle)
1269
+ busy_time = 0;
1270
+ else
1271
+ busy_time = diff_wall - diff_idle;
1272
+
1273
+ idle_percent = div64_u64(100 * busy_time, diff_wall);
1274
+ idle_percent = 100 - idle_percent;
1275
+
1276
+ if (idle_percent < 15)
1277
+ cq->poll_mode = LPFC_QUEUE_WORK;
1278
+ else
1279
+ cq->poll_mode = LPFC_IRQ_POLL;
1280
+
1281
+ idle_stat->prev_idle = wall_idle;
1282
+ idle_stat->prev_wall = wall;
1283
+ }
1284
+
1285
+requeue:
1286
+ schedule_delayed_work(&phba->idle_stat_delay_work,
1287
+ msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1288
+}
1289
+
1290
+static void
1291
+lpfc_hb_eq_delay_work(struct work_struct *work)
1292
+{
1293
+ struct lpfc_hba *phba = container_of(to_delayed_work(work),
1294
+ struct lpfc_hba, eq_delay_work);
1295
+ struct lpfc_eq_intr_info *eqi, *eqi_new;
1296
+ struct lpfc_queue *eq, *eq_next;
1297
+ unsigned char *ena_delay = NULL;
1298
+ uint32_t usdelay;
1299
+ int i;
1300
+
1301
+ if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1302
+ return;
1303
+
1304
+ if (phba->link_state == LPFC_HBA_ERROR ||
1305
+ phba->pport->fc_flag & FC_OFFLINE_MODE)
1306
+ goto requeue;
1307
+
1308
+ ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1309
+ GFP_KERNEL);
1310
+ if (!ena_delay)
1311
+ goto requeue;
1312
+
1313
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
1314
+ /* Get the EQ corresponding to the IRQ vector */
1315
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1316
+ if (!eq)
1317
+ continue;
1318
+ if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1319
+ eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1320
+ ena_delay[eq->last_cpu] = 1;
1321
+ }
1322
+ }
1323
+
1324
+ for_each_present_cpu(i) {
1325
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1326
+ if (ena_delay[i]) {
1327
+ usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1328
+ if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1329
+ usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1330
+ } else {
1331
+ usdelay = 0;
1332
+ }
1333
+
1334
+ eqi->icnt = 0;
1335
+
1336
+ list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1337
+ if (unlikely(eq->last_cpu != i)) {
1338
+ eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1339
+ eq->last_cpu);
1340
+ list_move_tail(&eq->cpu_list, &eqi_new->list);
1341
+ continue;
1342
+ }
1343
+ if (usdelay != eq->q_mode)
1344
+ lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1345
+ usdelay);
1346
+ }
1347
+ }
1348
+
1349
+ kfree(ena_delay);
1350
+
1351
+requeue:
1352
+ queue_delayed_work(phba->wq, &phba->eq_delay_work,
1353
+ msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1354
+}
1355
+
1356
+/**
1357
+ * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1358
+ * @phba: pointer to lpfc hba data structure.
1359
+ *
1360
+ * For each heartbeat, this routine does some heuristic methods to adjust
1361
+ * XRI distribution. The goal is to fully utilize free XRIs.
1362
+ **/
1363
+static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1364
+{
1365
+ u32 i;
1366
+ u32 hwq_count;
1367
+
1368
+ hwq_count = phba->cfg_hdw_queue;
1369
+ for (i = 0; i < hwq_count; i++) {
1370
+ /* Adjust XRIs in private pool */
1371
+ lpfc_adjust_pvt_pool_count(phba, i);
1372
+
1373
+ /* Adjust high watermark */
1374
+ lpfc_adjust_high_watermark(phba, i);
1375
+
1376
+#ifdef LPFC_MXP_STAT
1377
+ /* Snapshot pbl, pvt and busy count */
1378
+ lpfc_snapshot_mxp(phba, i);
1379
+#endif
1380
+ }
1381
+}
1382
+
12421383 /**
12431384 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
12441385 * @phba: pointer to lpfc hba data structure.
....@@ -1264,22 +1405,17 @@
12641405 int retval, i;
12651406 struct lpfc_sli *psli = &phba->sli;
12661407 LIST_HEAD(completions);
1267
- struct lpfc_queue *qp;
1268
- unsigned long time_elapsed;
1269
- uint32_t tick_cqe, max_cqe, val;
1270
- uint64_t tot, data1, data2, data3;
1271
- struct lpfc_nvmet_tgtport *tgtp;
1272
- struct lpfc_register reg_data;
1273
- struct nvme_fc_local_port *localport;
1274
- struct lpfc_nvme_lport *lport;
1275
- struct lpfc_nvme_ctrl_stat *cstat;
1276
- void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
1408
+
1409
+ if (phba->cfg_xri_rebalancing) {
1410
+ /* Multi-XRI pools handler */
1411
+ lpfc_hb_mxp_handler(phba);
1412
+ }
12771413
12781414 vports = lpfc_create_vport_work_array(phba);
12791415 if (vports != NULL)
12801416 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
12811417 lpfc_rcv_seq_check_edtov(vports[i]);
1282
- lpfc_fdmi_num_disc_check(vports[i]);
1418
+ lpfc_fdmi_change_check(vports[i]);
12831419 }
12841420 lpfc_destroy_vport_work_array(phba, vports);
12851421
....@@ -1288,107 +1424,6 @@
12881424 (phba->pport->fc_flag & FC_OFFLINE_MODE))
12891425 return;
12901426
1291
- if (phba->cfg_auto_imax) {
1292
- if (!phba->last_eqdelay_time) {
1293
- phba->last_eqdelay_time = jiffies;
1294
- goto skip_eqdelay;
1295
- }
1296
- time_elapsed = jiffies - phba->last_eqdelay_time;
1297
- phba->last_eqdelay_time = jiffies;
1298
-
1299
- tot = 0xffff;
1300
- /* Check outstanding IO count */
1301
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1302
- if (phba->nvmet_support) {
1303
- tgtp = phba->targetport->private;
1304
- /* Calculate outstanding IOs */
1305
- tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
1306
- tot += atomic_read(&tgtp->xmt_fcp_release);
1307
- tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
1308
- } else {
1309
- localport = phba->pport->localport;
1310
- if (!localport || !localport->private)
1311
- goto skip_eqdelay;
1312
- lport = (struct lpfc_nvme_lport *)
1313
- localport->private;
1314
- tot = 0;
1315
- for (i = 0;
1316
- i < phba->cfg_nvme_io_channel; i++) {
1317
- cstat = &lport->cstat[i];
1318
- data1 = atomic_read(
1319
- &cstat->fc4NvmeInputRequests);
1320
- data2 = atomic_read(
1321
- &cstat->fc4NvmeOutputRequests);
1322
- data3 = atomic_read(
1323
- &cstat->fc4NvmeControlRequests);
1324
- tot += (data1 + data2 + data3);
1325
- tot -= atomic_read(
1326
- &cstat->fc4NvmeIoCmpls);
1327
- }
1328
- }
1329
- }
1330
-
1331
- /* Interrupts per sec per EQ */
1332
- val = phba->cfg_fcp_imax / phba->io_channel_irqs;
1333
- tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
1334
-
1335
- /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
1336
- max_cqe = time_elapsed * tick_cqe;
1337
-
1338
- for (i = 0; i < phba->io_channel_irqs; i++) {
1339
- /* Fast-path EQ */
1340
- qp = phba->sli4_hba.hba_eq[i];
1341
- if (!qp)
1342
- continue;
1343
-
1344
- /* Use no EQ delay if we don't have many outstanding
1345
- * IOs, or if we are only processing 1 CQE/ISR or less.
1346
- * Otherwise, assume we can process up to lpfc_fcp_imax
1347
- * interrupts per HBA.
1348
- */
1349
- if (tot < LPFC_NODELAY_MAX_IO ||
1350
- qp->EQ_cqe_cnt <= max_cqe)
1351
- val = 0;
1352
- else
1353
- val = phba->cfg_fcp_imax;
1354
-
1355
- if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
1356
- /* Use EQ Delay Register method */
1357
-
1358
- /* Convert for EQ Delay register */
1359
- if (val) {
1360
- /* First, interrupts per sec per EQ */
1361
- val = phba->cfg_fcp_imax /
1362
- phba->io_channel_irqs;
1363
-
1364
- /* us delay between each interrupt */
1365
- val = LPFC_SEC_TO_USEC / val;
1366
- }
1367
- if (val != qp->q_mode) {
1368
- reg_data.word0 = 0;
1369
- bf_set(lpfc_sliport_eqdelay_id,
1370
- &reg_data, qp->queue_id);
1371
- bf_set(lpfc_sliport_eqdelay_delay,
1372
- &reg_data, val);
1373
- writel(reg_data.word0, eqdreg);
1374
- }
1375
- } else {
1376
- /* Use mbox command method */
1377
- if (val != qp->q_mode)
1378
- lpfc_modify_hba_eq_delay(phba, i,
1379
- 1, val);
1380
- }
1381
-
1382
- /*
1383
- * val is cfg_fcp_imax or 0 for mbox delay or us delay
1384
- * between interrupts for EQDR.
1385
- */
1386
- qp->q_mode = val;
1387
- qp->EQ_cqe_cnt = 0;
1388
- }
1389
- }
1390
-
1391
-skip_eqdelay:
13921427 spin_lock_irq(&phba->pport->work_port_lock);
13931428
13941429 if (time_after(phba->last_completion_time +
....@@ -1537,6 +1572,7 @@
15371572 spin_unlock_irq(&phba->hbalock);
15381573
15391574 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1575
+ lpfc_sli_flush_io_rings(phba);
15401576 lpfc_offline(phba);
15411577 lpfc_hba_down_post(phba);
15421578 lpfc_unblock_mgmt_io(phba);
....@@ -1567,11 +1603,11 @@
15671603 return;
15681604 }
15691605
1570
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1571
- "0479 Deferred Adapter Hardware Error "
1572
- "Data: x%x x%x x%x\n",
1573
- phba->work_hs,
1574
- phba->work_status[0], phba->work_status[1]);
1606
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1607
+ "0479 Deferred Adapter Hardware Error "
1608
+ "Data: x%x x%x x%x\n",
1609
+ phba->work_hs, phba->work_status[0],
1610
+ phba->work_status[1]);
15751611
15761612 spin_lock_irq(&phba->hbalock);
15771613 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
....@@ -1722,7 +1758,7 @@
17221758 temp_event_data.event_code = LPFC_CRIT_TEMP;
17231759 temp_event_data.data = (uint32_t)temperature;
17241760
1725
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1761
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17261762 "0406 Adapter maximum temperature exceeded "
17271763 "(%ld), taking this port offline "
17281764 "Data: x%x x%x x%x\n",
....@@ -1746,7 +1782,7 @@
17461782 * failure is a value other than FFER6. Do not call the offline
17471783 * twice. This is the adapter hardware error path.
17481784 */
1749
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1785
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17501786 "0457 Adapter Hardware Error "
17511787 "Data: x%x x%x x%x\n",
17521788 phba->work_hs,
....@@ -1767,7 +1803,7 @@
17671803 * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
17681804 * @phba: pointer to lpfc hba data structure.
17691805 * @mbx_action: flag for mailbox shutdown action.
1770
- *
1806
+ * @en_rn_msg: send reset/port recovery message.
17711807 * This routine is invoked to perform an SLI4 port PCI function reset in
17721808 * response to port status register polling attention. It waits for port
17731809 * status register (ERR, RDY, RN) bits before proceeding with function reset.
....@@ -1794,23 +1830,24 @@
17941830
17951831 /* need reset: attempt for port recovery */
17961832 if (en_rn_msg)
1797
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1833
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17981834 "2887 Reset Needed: Attempting Port "
17991835 "Recovery...\n");
18001836 lpfc_offline_prep(phba, mbx_action);
1837
+ lpfc_sli_flush_io_rings(phba);
18011838 lpfc_offline(phba);
18021839 /* release interrupt for possible resource change */
18031840 lpfc_sli4_disable_intr(phba);
18041841 rc = lpfc_sli_brdrestart(phba);
18051842 if (rc) {
1806
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1843
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18071844 "6309 Failed to restart board\n");
18081845 return rc;
18091846 }
18101847 /* request and enable interrupt */
18111848 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
18121849 if (intr_mode == LPFC_INTR_ERROR) {
1813
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1850
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18141851 "3175 Failed to enable interrupt\n");
18151852 return -EIO;
18161853 }
....@@ -1848,8 +1885,12 @@
18481885 /* If the pci channel is offline, ignore possible errors, since
18491886 * we cannot communicate with the pci card anyway.
18501887 */
1851
- if (pci_channel_offline(phba->pcidev))
1888
+ if (pci_channel_offline(phba->pcidev)) {
1889
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1890
+ "3166 pci channel is offline\n");
1891
+ lpfc_sli4_offline_eratt(phba);
18521892 return;
1893
+ }
18531894
18541895 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
18551896 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
....@@ -1868,7 +1909,7 @@
18681909 lpfc_sli4_offline_eratt(phba);
18691910 return;
18701911 }
1871
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1912
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18721913 "7623 Checking UE recoverable");
18731914
18741915 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
....@@ -1885,7 +1926,7 @@
18851926 msleep(1000);
18861927 }
18871928
1888
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1929
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18891930 "4827 smphr_port_status x%x : Waited %dSec",
18901931 smphr_port_status, i);
18911932
....@@ -1903,17 +1944,17 @@
19031944 LPFC_MBX_NO_WAIT, en_rn_msg);
19041945 if (rc == 0)
19051946 return;
1906
- lpfc_printf_log(phba,
1907
- KERN_ERR, LOG_INIT,
1947
+ lpfc_printf_log(phba, KERN_ERR,
1948
+ LOG_TRACE_EVENT,
19081949 "4215 Failed to recover UE");
19091950 break;
19101951 }
19111952 }
19121953 }
1913
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1954
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19141955 "7624 Firmware not ready: Failing UE recovery,"
19151956 " waited %dSec", i);
1916
- lpfc_sli4_offline_eratt(phba);
1957
+ phba->link_state = LPFC_HBA_ERROR;
19171958 break;
19181959
19191960 case LPFC_SLI_INTF_IF_TYPE_2:
....@@ -1923,18 +1964,19 @@
19231964 &portstat_reg.word0);
19241965 /* consider PCI bus read error as pci_channel_offline */
19251966 if (pci_rd_rc1 == -EIO) {
1926
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1967
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19271968 "3151 PCI bus read access failure: x%x\n",
19281969 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1970
+ lpfc_sli4_offline_eratt(phba);
19291971 return;
19301972 }
19311973 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
19321974 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
19331975 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1934
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1935
- "2889 Port Overtemperature event, "
1936
- "taking port offline Data: x%x x%x\n",
1937
- reg_err1, reg_err2);
1976
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1977
+ "2889 Port Overtemperature event, "
1978
+ "taking port offline Data: x%x x%x\n",
1979
+ reg_err1, reg_err2);
19381980
19391981 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
19401982 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
....@@ -1956,17 +1998,17 @@
19561998 }
19571999 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
19582000 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
1959
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2001
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19602002 "3143 Port Down: Firmware Update "
19612003 "Detected\n");
19622004 en_rn_msg = false;
19632005 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
19642006 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1965
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2007
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19662008 "3144 Port Down: Debug Dump\n");
19672009 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
19682010 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1969
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2011
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19702012 "3145 Port Down: Provisioning\n");
19712013
19722014 /* If resets are disabled then leave the HBA alone and return */
....@@ -1985,10 +2027,9 @@
19852027 break;
19862028 }
19872029 /* fall through for not able to recover */
1988
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1989
- "3152 Unrecoverable error, bring the port "
1990
- "offline\n");
1991
- lpfc_sli4_offline_eratt(phba);
2030
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2031
+ "3152 Unrecoverable error\n");
2032
+ phba->link_state = LPFC_HBA_ERROR;
19922033 break;
19932034 case LPFC_SLI_INTF_IF_TYPE_1:
19942035 default:
....@@ -2104,8 +2145,8 @@
21042145 lpfc_linkdown(phba);
21052146 phba->link_state = LPFC_HBA_ERROR;
21062147
2107
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2108
- "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2148
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2149
+ "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
21092150
21102151 return;
21112152 }
....@@ -2854,13 +2895,14 @@
28542895 */
28552896 while (!list_empty(&vport->fc_nodes)) {
28562897 if (i++ > 3000) {
2857
- lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2898
+ lpfc_printf_vlog(vport, KERN_ERR,
2899
+ LOG_TRACE_EVENT,
28582900 "0233 Nodelist not empty\n");
28592901 list_for_each_entry_safe(ndlp, next_ndlp,
28602902 &vport->fc_nodes, nlp_listp) {
28612903 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2862
- LOG_NODE,
2863
- "0282 did:x%x ndlp:x%p "
2904
+ LOG_TRACE_EVENT,
2905
+ "0282 did:x%x ndlp:x%px "
28642906 "usgmap:x%x refcnt:%d\n",
28652907 ndlp->nlp_DID, (void *)ndlp,
28662908 ndlp->nlp_usg_map,
....@@ -2943,7 +2985,10 @@
29432985 void
29442986 lpfc_stop_hba_timers(struct lpfc_hba *phba)
29452987 {
2946
- lpfc_stop_vport_timers(phba->pport);
2988
+ if (phba->pport)
2989
+ lpfc_stop_vport_timers(phba->pport);
2990
+ cancel_delayed_work_sync(&phba->eq_delay_work);
2991
+ cancel_delayed_work_sync(&phba->idle_stat_delay_work);
29472992 del_timer_sync(&phba->sli.mbox_tmo);
29482993 del_timer_sync(&phba->fabric_block_timer);
29492994 del_timer_sync(&phba->eratt_poll);
....@@ -2960,11 +3005,11 @@
29603005 del_timer_sync(&phba->fcp_poll_timer);
29613006 break;
29623007 case LPFC_PCI_DEV_OC:
2963
- /* Stop any OneConnect device sepcific driver timers */
3008
+ /* Stop any OneConnect device specific driver timers */
29643009 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
29653010 break;
29663011 default:
2967
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3012
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
29683013 "0297 Invalid device group (x%x)\n",
29693014 phba->pci_dev_grp);
29703015 break;
....@@ -2975,6 +3020,7 @@
29753020 /**
29763021 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
29773022 * @phba: pointer to lpfc hba data structure.
3023
+ * @mbx_action: flag for mailbox no wait action.
29783024 *
29793025 * This routine marks a HBA's management interface as blocked. Once the HBA's
29803026 * management interface is marked as blocked, all the user space access to
....@@ -3011,10 +3057,10 @@
30113057 /* Check active mailbox complete status every 2ms */
30123058 msleep(2);
30133059 if (time_after(jiffies, timeout)) {
3014
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3015
- "2813 Mgmt IO is Blocked %x "
3016
- "- mbox cmd %x still active\n",
3017
- phba->sli.sli_flag, actcmd);
3060
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3061
+ "2813 Mgmt IO is Blocked %x "
3062
+ "- mbox cmd %x still active\n",
3063
+ phba->sli.sli_flag, actcmd);
30183064 break;
30193065 }
30203066 }
....@@ -3060,14 +3106,254 @@
30603106 continue;
30613107 }
30623108 ndlp->nlp_rpi = rpi;
3063
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3064
- "0009 rpi:%x DID:%x "
3065
- "flg:%x map:%x %p\n", ndlp->nlp_rpi,
3066
- ndlp->nlp_DID, ndlp->nlp_flag,
3067
- ndlp->nlp_usg_map, ndlp);
3109
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3110
+ LOG_NODE | LOG_DISCOVERY,
3111
+ "0009 Assign RPI x%x to ndlp x%px "
3112
+ "DID:x%06x flg:x%x map:x%x\n",
3113
+ ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3114
+ ndlp->nlp_flag, ndlp->nlp_usg_map);
30683115 }
30693116 }
30703117 lpfc_destroy_vport_work_array(phba, vports);
3118
+}
3119
+
3120
+/**
3121
+ * lpfc_create_expedite_pool - create expedite pool
3122
+ * @phba: pointer to lpfc hba data structure.
3123
+ *
3124
+ * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3125
+ * to expedite pool. Mark them as expedite.
3126
+ **/
3127
+static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3128
+{
3129
+ struct lpfc_sli4_hdw_queue *qp;
3130
+ struct lpfc_io_buf *lpfc_ncmd;
3131
+ struct lpfc_io_buf *lpfc_ncmd_next;
3132
+ struct lpfc_epd_pool *epd_pool;
3133
+ unsigned long iflag;
3134
+
3135
+ epd_pool = &phba->epd_pool;
3136
+ qp = &phba->sli4_hba.hdwq[0];
3137
+
3138
+ spin_lock_init(&epd_pool->lock);
3139
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3140
+ spin_lock(&epd_pool->lock);
3141
+ INIT_LIST_HEAD(&epd_pool->list);
3142
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3143
+ &qp->lpfc_io_buf_list_put, list) {
3144
+ list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3145
+ lpfc_ncmd->expedite = true;
3146
+ qp->put_io_bufs--;
3147
+ epd_pool->count++;
3148
+ if (epd_pool->count >= XRI_BATCH)
3149
+ break;
3150
+ }
3151
+ spin_unlock(&epd_pool->lock);
3152
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3153
+}
3154
+
3155
+/**
3156
+ * lpfc_destroy_expedite_pool - destroy expedite pool
3157
+ * @phba: pointer to lpfc hba data structure.
3158
+ *
3159
+ * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3160
+ * of HWQ 0. Clear the mark.
3161
+ **/
3162
+static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3163
+{
3164
+ struct lpfc_sli4_hdw_queue *qp;
3165
+ struct lpfc_io_buf *lpfc_ncmd;
3166
+ struct lpfc_io_buf *lpfc_ncmd_next;
3167
+ struct lpfc_epd_pool *epd_pool;
3168
+ unsigned long iflag;
3169
+
3170
+ epd_pool = &phba->epd_pool;
3171
+ qp = &phba->sli4_hba.hdwq[0];
3172
+
3173
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3174
+ spin_lock(&epd_pool->lock);
3175
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3176
+ &epd_pool->list, list) {
3177
+ list_move_tail(&lpfc_ncmd->list,
3178
+ &qp->lpfc_io_buf_list_put);
3179
+ lpfc_ncmd->flags = false;
3180
+ qp->put_io_bufs++;
3181
+ epd_pool->count--;
3182
+ }
3183
+ spin_unlock(&epd_pool->lock);
3184
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3185
+}
3186
+
3187
+/**
3188
+ * lpfc_create_multixri_pools - create multi-XRI pools
3189
+ * @phba: pointer to lpfc hba data structure.
3190
+ *
3191
+ * This routine initialize public, private per HWQ. Then, move XRIs from
3192
+ * lpfc_io_buf_list_put to public pool. High and low watermark are also
3193
+ * Initialized.
3194
+ **/
3195
+void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3196
+{
3197
+ u32 i, j;
3198
+ u32 hwq_count;
3199
+ u32 count_per_hwq;
3200
+ struct lpfc_io_buf *lpfc_ncmd;
3201
+ struct lpfc_io_buf *lpfc_ncmd_next;
3202
+ unsigned long iflag;
3203
+ struct lpfc_sli4_hdw_queue *qp;
3204
+ struct lpfc_multixri_pool *multixri_pool;
3205
+ struct lpfc_pbl_pool *pbl_pool;
3206
+ struct lpfc_pvt_pool *pvt_pool;
3207
+
3208
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3209
+ "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3210
+ phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3211
+ phba->sli4_hba.io_xri_cnt);
3212
+
3213
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3214
+ lpfc_create_expedite_pool(phba);
3215
+
3216
+ hwq_count = phba->cfg_hdw_queue;
3217
+ count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3218
+
3219
+ for (i = 0; i < hwq_count; i++) {
3220
+ multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3221
+
3222
+ if (!multixri_pool) {
3223
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3224
+ "1238 Failed to allocate memory for "
3225
+ "multixri_pool\n");
3226
+
3227
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3228
+ lpfc_destroy_expedite_pool(phba);
3229
+
3230
+ j = 0;
3231
+ while (j < i) {
3232
+ qp = &phba->sli4_hba.hdwq[j];
3233
+ kfree(qp->p_multixri_pool);
3234
+ j++;
3235
+ }
3236
+ phba->cfg_xri_rebalancing = 0;
3237
+ return;
3238
+ }
3239
+
3240
+ qp = &phba->sli4_hba.hdwq[i];
3241
+ qp->p_multixri_pool = multixri_pool;
3242
+
3243
+ multixri_pool->xri_limit = count_per_hwq;
3244
+ multixri_pool->rrb_next_hwqid = i;
3245
+
3246
+ /* Deal with public free xri pool */
3247
+ pbl_pool = &multixri_pool->pbl_pool;
3248
+ spin_lock_init(&pbl_pool->lock);
3249
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3250
+ spin_lock(&pbl_pool->lock);
3251
+ INIT_LIST_HEAD(&pbl_pool->list);
3252
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3253
+ &qp->lpfc_io_buf_list_put, list) {
3254
+ list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3255
+ qp->put_io_bufs--;
3256
+ pbl_pool->count++;
3257
+ }
3258
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3259
+ "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3260
+ pbl_pool->count, i);
3261
+ spin_unlock(&pbl_pool->lock);
3262
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3263
+
3264
+ /* Deal with private free xri pool */
3265
+ pvt_pool = &multixri_pool->pvt_pool;
3266
+ pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3267
+ pvt_pool->low_watermark = XRI_BATCH;
3268
+ spin_lock_init(&pvt_pool->lock);
3269
+ spin_lock_irqsave(&pvt_pool->lock, iflag);
3270
+ INIT_LIST_HEAD(&pvt_pool->list);
3271
+ pvt_pool->count = 0;
3272
+ spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3273
+ }
3274
+}
3275
+
3276
+/**
3277
+ * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3278
+ * @phba: pointer to lpfc hba data structure.
3279
+ *
3280
+ * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3281
+ **/
3282
+static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3283
+{
3284
+ u32 i;
3285
+ u32 hwq_count;
3286
+ struct lpfc_io_buf *lpfc_ncmd;
3287
+ struct lpfc_io_buf *lpfc_ncmd_next;
3288
+ unsigned long iflag;
3289
+ struct lpfc_sli4_hdw_queue *qp;
3290
+ struct lpfc_multixri_pool *multixri_pool;
3291
+ struct lpfc_pbl_pool *pbl_pool;
3292
+ struct lpfc_pvt_pool *pvt_pool;
3293
+
3294
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3295
+ lpfc_destroy_expedite_pool(phba);
3296
+
3297
+ if (!(phba->pport->load_flag & FC_UNLOADING))
3298
+ lpfc_sli_flush_io_rings(phba);
3299
+
3300
+ hwq_count = phba->cfg_hdw_queue;
3301
+
3302
+ for (i = 0; i < hwq_count; i++) {
3303
+ qp = &phba->sli4_hba.hdwq[i];
3304
+ multixri_pool = qp->p_multixri_pool;
3305
+ if (!multixri_pool)
3306
+ continue;
3307
+
3308
+ qp->p_multixri_pool = NULL;
3309
+
3310
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3311
+
3312
+ /* Deal with public free xri pool */
3313
+ pbl_pool = &multixri_pool->pbl_pool;
3314
+ spin_lock(&pbl_pool->lock);
3315
+
3316
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3317
+ "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3318
+ pbl_pool->count, i);
3319
+
3320
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3321
+ &pbl_pool->list, list) {
3322
+ list_move_tail(&lpfc_ncmd->list,
3323
+ &qp->lpfc_io_buf_list_put);
3324
+ qp->put_io_bufs++;
3325
+ pbl_pool->count--;
3326
+ }
3327
+
3328
+ INIT_LIST_HEAD(&pbl_pool->list);
3329
+ pbl_pool->count = 0;
3330
+
3331
+ spin_unlock(&pbl_pool->lock);
3332
+
3333
+ /* Deal with private free xri pool */
3334
+ pvt_pool = &multixri_pool->pvt_pool;
3335
+ spin_lock(&pvt_pool->lock);
3336
+
3337
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3338
+ "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3339
+ pvt_pool->count, i);
3340
+
3341
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3342
+ &pvt_pool->list, list) {
3343
+ list_move_tail(&lpfc_ncmd->list,
3344
+ &qp->lpfc_io_buf_list_put);
3345
+ qp->put_io_bufs++;
3346
+ pvt_pool->count--;
3347
+ }
3348
+
3349
+ INIT_LIST_HEAD(&pvt_pool->list);
3350
+ pvt_pool->count = 0;
3351
+
3352
+ spin_unlock(&pvt_pool->lock);
3353
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3354
+
3355
+ kfree(multixri_pool);
3356
+ }
30713357 }
30723358
30733359 /**
....@@ -3119,7 +3405,7 @@
31193405 !phba->nvmet_support) {
31203406 error = lpfc_nvme_create_localport(phba->pport);
31213407 if (error)
3122
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3408
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
31233409 "6132 NVME restore reg failed "
31243410 "on nvmei error x%x\n", error);
31253411 }
....@@ -3152,6 +3438,11 @@
31523438 }
31533439 lpfc_destroy_vport_work_array(phba, vports);
31543440
3441
+ if (phba->cfg_xri_rebalancing)
3442
+ lpfc_create_multixri_pools(phba);
3443
+
3444
+ lpfc_cpuhp_add(phba);
3445
+
31553446 lpfc_unblock_mgmt_io(phba);
31563447 return 0;
31573448 }
....@@ -3180,6 +3471,7 @@
31803471 /**
31813472 * lpfc_offline_prep - Prepare a HBA to be brought offline
31823473 * @phba: pointer to lpfc hba data structure.
3474
+ * @mbx_action: flag for mailbox shutdown action.
31833475 *
31843476 * This routine is invoked to prepare a HBA to be brought offline. It performs
31853477 * unregistration login to all the nodes on all vports and flushes the mailbox
....@@ -3218,10 +3510,15 @@
32183510 list_for_each_entry_safe(ndlp, next_ndlp,
32193511 &vports[i]->fc_nodes,
32203512 nlp_listp) {
3221
- if (!NLP_CHK_NODE_ACT(ndlp))
3513
+ if ((!NLP_CHK_NODE_ACT(ndlp)) ||
3514
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3515
+ /* Driver must assume RPI is invalid for
3516
+ * any unused or inactive node.
3517
+ */
3518
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
32223519 continue;
3223
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3224
- continue;
3520
+ }
3521
+
32253522 if (ndlp->nlp_type & NLP_FABRIC) {
32263523 lpfc_disc_state_machine(vports[i], ndlp,
32273524 NULL, NLP_EVT_DEVICE_RECOVERY);
....@@ -3237,16 +3534,16 @@
32373534 * comes back online.
32383535 */
32393536 if (phba->sli_rev == LPFC_SLI_REV4) {
3240
- lpfc_printf_vlog(ndlp->vport,
3241
- KERN_INFO, LOG_NODE,
3242
- "0011 lpfc_offline: "
3243
- "ndlp:x%p did %x "
3244
- "usgmap:x%x rpi:%x\n",
3245
- ndlp, ndlp->nlp_DID,
3246
- ndlp->nlp_usg_map,
3247
- ndlp->nlp_rpi);
3248
-
3537
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3538
+ LOG_NODE | LOG_DISCOVERY,
3539
+ "0011 Free RPI x%x on "
3540
+ "ndlp:x%px did x%x "
3541
+ "usgmap:x%x\n",
3542
+ ndlp->nlp_rpi, ndlp,
3543
+ ndlp->nlp_DID,
3544
+ ndlp->nlp_usg_map);
32493545 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3546
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
32503547 }
32513548 lpfc_unreg_rpi(vports[i], ndlp);
32523549 }
....@@ -3310,6 +3607,10 @@
33103607 spin_unlock_irq(shost->host_lock);
33113608 }
33123609 lpfc_destroy_vport_work_array(phba, vports);
3610
+ __lpfc_cpuhp_remove(phba);
3611
+
3612
+ if (phba->cfg_xri_rebalancing)
3613
+ lpfc_destroy_multixri_pools(phba);
33133614 }
33143615
33153616 /**
....@@ -3323,7 +3624,7 @@
33233624 static void
33243625 lpfc_scsi_free(struct lpfc_hba *phba)
33253626 {
3326
- struct lpfc_scsi_buf *sb, *sb_next;
3627
+ struct lpfc_io_buf *sb, *sb_next;
33273628
33283629 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
33293630 return;
....@@ -3355,50 +3656,59 @@
33553656 spin_unlock(&phba->scsi_buf_list_get_lock);
33563657 spin_unlock_irq(&phba->hbalock);
33573658 }
3659
+
33583660 /**
3359
- * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
3661
+ * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
33603662 * @phba: pointer to lpfc hba data structure.
33613663 *
3362
- * This routine is to free all the NVME buffers and IOCBs from the driver
3664
+ * This routine is to free all the IO buffers and IOCBs from the driver
33633665 * list back to kernel. It is called from lpfc_pci_remove_one to free
33643666 * the internal resources before the device is removed from the system.
33653667 **/
3366
-static void
3367
-lpfc_nvme_free(struct lpfc_hba *phba)
3668
+void
3669
+lpfc_io_free(struct lpfc_hba *phba)
33683670 {
3369
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
3671
+ struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3672
+ struct lpfc_sli4_hdw_queue *qp;
3673
+ int idx;
33703674
3371
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3372
- return;
3675
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3676
+ qp = &phba->sli4_hba.hdwq[idx];
3677
+ /* Release all the lpfc_nvme_bufs maintained by this host. */
3678
+ spin_lock(&qp->io_buf_list_put_lock);
3679
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3680
+ &qp->lpfc_io_buf_list_put,
3681
+ list) {
3682
+ list_del(&lpfc_ncmd->list);
3683
+ qp->put_io_bufs--;
3684
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3685
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3686
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
3687
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3688
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3689
+ kfree(lpfc_ncmd);
3690
+ qp->total_io_bufs--;
3691
+ }
3692
+ spin_unlock(&qp->io_buf_list_put_lock);
33733693
3374
- spin_lock_irq(&phba->hbalock);
3375
-
3376
- /* Release all the lpfc_nvme_bufs maintained by this host. */
3377
- spin_lock(&phba->nvme_buf_list_put_lock);
3378
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3379
- &phba->lpfc_nvme_buf_list_put, list) {
3380
- list_del(&lpfc_ncmd->list);
3381
- phba->put_nvme_bufs--;
3382
- dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3383
- lpfc_ncmd->dma_handle);
3384
- kfree(lpfc_ncmd);
3385
- phba->total_nvme_bufs--;
3694
+ spin_lock(&qp->io_buf_list_get_lock);
3695
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3696
+ &qp->lpfc_io_buf_list_get,
3697
+ list) {
3698
+ list_del(&lpfc_ncmd->list);
3699
+ qp->get_io_bufs--;
3700
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3701
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3702
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
3703
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3704
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3705
+ kfree(lpfc_ncmd);
3706
+ qp->total_io_bufs--;
3707
+ }
3708
+ spin_unlock(&qp->io_buf_list_get_lock);
33863709 }
3387
- spin_unlock(&phba->nvme_buf_list_put_lock);
3388
-
3389
- spin_lock(&phba->nvme_buf_list_get_lock);
3390
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3391
- &phba->lpfc_nvme_buf_list_get, list) {
3392
- list_del(&lpfc_ncmd->list);
3393
- phba->get_nvme_bufs--;
3394
- dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
3395
- lpfc_ncmd->dma_handle);
3396
- kfree(lpfc_ncmd);
3397
- phba->total_nvme_bufs--;
3398
- }
3399
- spin_unlock(&phba->nvme_buf_list_get_lock);
3400
- spin_unlock_irq(&phba->hbalock);
34013710 }
3711
+
34023712 /**
34033713 * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
34043714 * @phba: pointer to lpfc hba data structure.
....@@ -3436,7 +3746,8 @@
34363746 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
34373747 GFP_KERNEL);
34383748 if (sglq_entry == NULL) {
3439
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3749
+ lpfc_printf_log(phba, KERN_ERR,
3750
+ LOG_TRACE_EVENT,
34403751 "2562 Failure to allocate an "
34413752 "ELS sgl entry:%d\n", i);
34423753 rc = -ENOMEM;
....@@ -3447,7 +3758,8 @@
34473758 &sglq_entry->phys);
34483759 if (sglq_entry->virt == NULL) {
34493760 kfree(sglq_entry);
3450
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3761
+ lpfc_printf_log(phba, KERN_ERR,
3762
+ LOG_TRACE_EVENT,
34513763 "2563 Failure to allocate an "
34523764 "ELS mbuf:%d\n", i);
34533765 rc = -ENOMEM;
....@@ -3502,7 +3814,8 @@
35023814 &phba->sli4_hba.lpfc_els_sgl_list, list) {
35033815 lxri = lpfc_sli4_next_xritag(phba);
35043816 if (lxri == NO_XRI) {
3505
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3817
+ lpfc_printf_log(phba, KERN_ERR,
3818
+ LOG_TRACE_EVENT,
35063819 "2400 Failed to allocate xri for "
35073820 "ELS sgl\n");
35083821 rc = -ENOMEM;
....@@ -3557,7 +3870,8 @@
35573870 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
35583871 GFP_KERNEL);
35593872 if (sglq_entry == NULL) {
3560
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3873
+ lpfc_printf_log(phba, KERN_ERR,
3874
+ LOG_TRACE_EVENT,
35613875 "6303 Failure to allocate an "
35623876 "NVMET sgl entry:%d\n", i);
35633877 rc = -ENOMEM;
....@@ -3568,7 +3882,8 @@
35683882 &sglq_entry->phys);
35693883 if (sglq_entry->virt == NULL) {
35703884 kfree(sglq_entry);
3571
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3885
+ lpfc_printf_log(phba, KERN_ERR,
3886
+ LOG_TRACE_EVENT,
35723887 "6304 Failure to allocate an "
35733888 "NVMET buf:%d\n", i);
35743889 rc = -ENOMEM;
....@@ -3624,7 +3939,8 @@
36243939 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
36253940 lxri = lpfc_sli4_next_xritag(phba);
36263941 if (lxri == NO_XRI) {
3627
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3942
+ lpfc_printf_log(phba, KERN_ERR,
3943
+ LOG_TRACE_EVENT,
36283944 "6307 Failed to allocate xri for "
36293945 "NVMET sgl\n");
36303946 rc = -ENOMEM;
....@@ -3640,8 +3956,102 @@
36403956 return rc;
36413957 }
36423958
3959
+int
3960
+lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3961
+{
3962
+ LIST_HEAD(blist);
3963
+ struct lpfc_sli4_hdw_queue *qp;
3964
+ struct lpfc_io_buf *lpfc_cmd;
3965
+ struct lpfc_io_buf *iobufp, *prev_iobufp;
3966
+ int idx, cnt, xri, inserted;
3967
+
3968
+ cnt = 0;
3969
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3970
+ qp = &phba->sli4_hba.hdwq[idx];
3971
+ spin_lock_irq(&qp->io_buf_list_get_lock);
3972
+ spin_lock(&qp->io_buf_list_put_lock);
3973
+
3974
+ /* Take everything off the get and put lists */
3975
+ list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
3976
+ list_splice(&qp->lpfc_io_buf_list_put, &blist);
3977
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
3978
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
3979
+ cnt += qp->get_io_bufs + qp->put_io_bufs;
3980
+ qp->get_io_bufs = 0;
3981
+ qp->put_io_bufs = 0;
3982
+ qp->total_io_bufs = 0;
3983
+ spin_unlock(&qp->io_buf_list_put_lock);
3984
+ spin_unlock_irq(&qp->io_buf_list_get_lock);
3985
+ }
3986
+
3987
+ /*
3988
+ * Take IO buffers off blist and put on cbuf sorted by XRI.
3989
+ * This is because POST_SGL takes a sequential range of XRIs
3990
+ * to post to the firmware.
3991
+ */
3992
+ for (idx = 0; idx < cnt; idx++) {
3993
+ list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
3994
+ if (!lpfc_cmd)
3995
+ return cnt;
3996
+ if (idx == 0) {
3997
+ list_add_tail(&lpfc_cmd->list, cbuf);
3998
+ continue;
3999
+ }
4000
+ xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4001
+ inserted = 0;
4002
+ prev_iobufp = NULL;
4003
+ list_for_each_entry(iobufp, cbuf, list) {
4004
+ if (xri < iobufp->cur_iocbq.sli4_xritag) {
4005
+ if (prev_iobufp)
4006
+ list_add(&lpfc_cmd->list,
4007
+ &prev_iobufp->list);
4008
+ else
4009
+ list_add(&lpfc_cmd->list, cbuf);
4010
+ inserted = 1;
4011
+ break;
4012
+ }
4013
+ prev_iobufp = iobufp;
4014
+ }
4015
+ if (!inserted)
4016
+ list_add_tail(&lpfc_cmd->list, cbuf);
4017
+ }
4018
+ return cnt;
4019
+}
4020
+
4021
+int
4022
+lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4023
+{
4024
+ struct lpfc_sli4_hdw_queue *qp;
4025
+ struct lpfc_io_buf *lpfc_cmd;
4026
+ int idx, cnt;
4027
+
4028
+ qp = phba->sli4_hba.hdwq;
4029
+ cnt = 0;
4030
+ while (!list_empty(cbuf)) {
4031
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4032
+ list_remove_head(cbuf, lpfc_cmd,
4033
+ struct lpfc_io_buf, list);
4034
+ if (!lpfc_cmd)
4035
+ return cnt;
4036
+ cnt++;
4037
+ qp = &phba->sli4_hba.hdwq[idx];
4038
+ lpfc_cmd->hdwq_no = idx;
4039
+ lpfc_cmd->hdwq = qp;
4040
+ lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4041
+ lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4042
+ spin_lock(&qp->io_buf_list_put_lock);
4043
+ list_add_tail(&lpfc_cmd->list,
4044
+ &qp->lpfc_io_buf_list_put);
4045
+ qp->put_io_bufs++;
4046
+ qp->total_io_bufs++;
4047
+ spin_unlock(&qp->io_buf_list_put_lock);
4048
+ }
4049
+ }
4050
+ return cnt;
4051
+}
4052
+
36434053 /**
3644
- * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
4054
+ * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
36454055 * @phba: pointer to lpfc hba data structure.
36464056 *
36474057 * This routine first calculates the sizes of the current els and allocated
....@@ -3653,92 +4063,190 @@
36534063 * 0 - successful (for now, it always returns 0)
36544064 **/
36554065 int
3656
-lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
4066
+lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
36574067 {
3658
- struct lpfc_scsi_buf *psb, *psb_next;
3659
- uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
3660
- LIST_HEAD(scsi_sgl_list);
3661
- int rc;
4068
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4069
+ uint16_t i, lxri, els_xri_cnt;
4070
+ uint16_t io_xri_cnt, io_xri_max;
4071
+ LIST_HEAD(io_sgl_list);
4072
+ int rc, cnt;
36624073
36634074 /*
3664
- * update on pci function's els xri-sgl list
4075
+ * update on pci function's allocated nvme xri-sgl list
36654076 */
4077
+
4078
+ /* maximum number of xris available for nvme buffers */
36664079 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3667
- phba->total_scsi_bufs = 0;
3668
-
3669
- /*
3670
- * update on pci function's allocated scsi xri-sgl list
3671
- */
3672
- /* maximum number of xris available for scsi buffers */
3673
- phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3674
- els_xri_cnt;
3675
-
3676
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3677
- return 0;
3678
-
3679
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3680
- phba->sli4_hba.scsi_xri_max = /* Split them up */
3681
- (phba->sli4_hba.scsi_xri_max *
3682
- phba->cfg_xri_split) / 100;
3683
-
3684
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
3685
- spin_lock(&phba->scsi_buf_list_put_lock);
3686
- list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3687
- list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3688
- spin_unlock(&phba->scsi_buf_list_put_lock);
3689
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
4080
+ io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4081
+ phba->sli4_hba.io_xri_max = io_xri_max;
36904082
36914083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3692
- "6060 Current allocated SCSI xri-sgl count:%d, "
3693
- "maximum SCSI xri count:%d (split:%d)\n",
3694
- phba->sli4_hba.scsi_xri_cnt,
3695
- phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
4084
+ "6074 Current allocated XRI sgl count:%d, "
4085
+ "maximum XRI count:%d\n",
4086
+ phba->sli4_hba.io_xri_cnt,
4087
+ phba->sli4_hba.io_xri_max);
36964088
3697
- if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3698
- /* max scsi xri shrinked below the allocated scsi buffers */
3699
- scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3700
- phba->sli4_hba.scsi_xri_max;
3701
- /* release the extra allocated scsi buffers */
3702
- for (i = 0; i < scsi_xri_cnt; i++) {
3703
- list_remove_head(&scsi_sgl_list, psb,
3704
- struct lpfc_scsi_buf, list);
3705
- if (psb) {
4089
+ cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4090
+
4091
+ if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4092
+ /* max nvme xri shrunk below the allocated nvme buffers */
4093
+ io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4094
+ phba->sli4_hba.io_xri_max;
4095
+ /* release the extra allocated nvme buffers */
4096
+ for (i = 0; i < io_xri_cnt; i++) {
4097
+ list_remove_head(&io_sgl_list, lpfc_ncmd,
4098
+ struct lpfc_io_buf, list);
4099
+ if (lpfc_ncmd) {
37064100 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3707
- psb->data, psb->dma_handle);
3708
- kfree(psb);
4101
+ lpfc_ncmd->data,
4102
+ lpfc_ncmd->dma_handle);
4103
+ kfree(lpfc_ncmd);
37094104 }
37104105 }
3711
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
3712
- phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3713
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
4106
+ phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
37144107 }
37154108
3716
- /* update xris associated to remaining allocated scsi buffers */
3717
- psb = NULL;
3718
- psb_next = NULL;
3719
- list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
4109
+ /* update xris associated to remaining allocated nvme buffers */
4110
+ lpfc_ncmd = NULL;
4111
+ lpfc_ncmd_next = NULL;
4112
+ phba->sli4_hba.io_xri_cnt = cnt;
4113
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4114
+ &io_sgl_list, list) {
37204115 lxri = lpfc_sli4_next_xritag(phba);
37214116 if (lxri == NO_XRI) {
3722
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3723
- "2560 Failed to allocate xri for "
3724
- "scsi buffer\n");
4117
+ lpfc_printf_log(phba, KERN_ERR,
4118
+ LOG_TRACE_EVENT,
4119
+ "6075 Failed to allocate xri for "
4120
+ "nvme buffer\n");
37254121 rc = -ENOMEM;
37264122 goto out_free_mem;
37274123 }
3728
- psb->cur_iocbq.sli4_lxritag = lxri;
3729
- psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4124
+ lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4125
+ lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
37304126 }
3731
- spin_lock_irq(&phba->scsi_buf_list_get_lock);
3732
- spin_lock(&phba->scsi_buf_list_put_lock);
3733
- list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3734
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3735
- spin_unlock(&phba->scsi_buf_list_put_lock);
3736
- spin_unlock_irq(&phba->scsi_buf_list_get_lock);
4127
+ cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
37374128 return 0;
37384129
37394130 out_free_mem:
3740
- lpfc_scsi_free(phba);
4131
+ lpfc_io_free(phba);
37414132 return rc;
4133
+}
4134
+
4135
+/**
4136
+ * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4137
+ * @phba: Pointer to lpfc hba data structure.
4138
+ * @num_to_alloc: The requested number of buffers to allocate.
4139
+ *
4140
+ * This routine allocates nvme buffers for device with SLI-4 interface spec,
4141
+ * the nvme buffer contains all the necessary information needed to initiate
4142
+ * an I/O. After allocating up to @num_to_allocate IO buffers and put
4143
+ * them on a list, it post them to the port by using SGL block post.
4144
+ *
4145
+ * Return codes:
4146
+ * int - number of IO buffers that were allocated and posted.
4147
+ * 0 = failure, less than num_to_alloc is a partial failure.
4148
+ **/
4149
+int
4150
+lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4151
+{
4152
+ struct lpfc_io_buf *lpfc_ncmd;
4153
+ struct lpfc_iocbq *pwqeq;
4154
+ uint16_t iotag, lxri = 0;
4155
+ int bcnt, num_posted;
4156
+ LIST_HEAD(prep_nblist);
4157
+ LIST_HEAD(post_nblist);
4158
+ LIST_HEAD(nvme_nblist);
4159
+
4160
+ phba->sli4_hba.io_xri_cnt = 0;
4161
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4162
+ lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4163
+ if (!lpfc_ncmd)
4164
+ break;
4165
+ /*
4166
+ * Get memory from the pci pool to map the virt space to
4167
+ * pci bus space for an I/O. The DMA buffer includes the
4168
+ * number of SGE's necessary to support the sg_tablesize.
4169
+ */
4170
+ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4171
+ GFP_KERNEL,
4172
+ &lpfc_ncmd->dma_handle);
4173
+ if (!lpfc_ncmd->data) {
4174
+ kfree(lpfc_ncmd);
4175
+ break;
4176
+ }
4177
+
4178
+ if (phba->cfg_xpsgl && !phba->nvmet_support) {
4179
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4180
+ } else {
4181
+ /*
4182
+ * 4K Page alignment is CRITICAL to BlockGuard, double
4183
+ * check to be sure.
4184
+ */
4185
+ if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4186
+ (((unsigned long)(lpfc_ncmd->data) &
4187
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4188
+ lpfc_printf_log(phba, KERN_ERR,
4189
+ LOG_TRACE_EVENT,
4190
+ "3369 Memory alignment err: "
4191
+ "addr=%lx\n",
4192
+ (unsigned long)lpfc_ncmd->data);
4193
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4194
+ lpfc_ncmd->data,
4195
+ lpfc_ncmd->dma_handle);
4196
+ kfree(lpfc_ncmd);
4197
+ break;
4198
+ }
4199
+ }
4200
+
4201
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4202
+
4203
+ lxri = lpfc_sli4_next_xritag(phba);
4204
+ if (lxri == NO_XRI) {
4205
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4206
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4207
+ kfree(lpfc_ncmd);
4208
+ break;
4209
+ }
4210
+ pwqeq = &lpfc_ncmd->cur_iocbq;
4211
+
4212
+ /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4213
+ iotag = lpfc_sli_next_iotag(phba, pwqeq);
4214
+ if (iotag == 0) {
4215
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4216
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4217
+ kfree(lpfc_ncmd);
4218
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4219
+ "6121 Failed to allocate IOTAG for"
4220
+ " XRI:0x%x\n", lxri);
4221
+ lpfc_sli4_free_xri(phba, lxri);
4222
+ break;
4223
+ }
4224
+ pwqeq->sli4_lxritag = lxri;
4225
+ pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4226
+ pwqeq->context1 = lpfc_ncmd;
4227
+
4228
+ /* Initialize local short-hand pointers. */
4229
+ lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4230
+ lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4231
+ lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
4232
+ spin_lock_init(&lpfc_ncmd->buf_lock);
4233
+
4234
+ /* add the nvme buffer to a post list */
4235
+ list_add_tail(&lpfc_ncmd->list, &post_nblist);
4236
+ phba->sli4_hba.io_xri_cnt++;
4237
+ }
4238
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4239
+ "6114 Allocate %d out of %d requested new NVME "
4240
+ "buffers\n", bcnt, num_to_alloc);
4241
+
4242
+ /* post the list of nvme buffer sgls to port if available */
4243
+ if (!list_empty(&post_nblist))
4244
+ num_posted = lpfc_sli4_post_io_sgl_list(
4245
+ phba, &post_nblist, bcnt);
4246
+ else
4247
+ num_posted = 0;
4248
+
4249
+ return num_posted;
37424250 }
37434251
37444252 static uint64_t
....@@ -3758,7 +4266,7 @@
37584266 lpfc_read_nv(phba, mboxq);
37594267 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
37604268 if (rc != MBX_SUCCESS) {
3761
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4269
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
37624270 "6019 Mailbox failed , mbxCmd x%x "
37634271 "READ_NV, mbxStatus x%x\n",
37644272 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
....@@ -3774,111 +4282,6 @@
37744282 return be64_to_cpu(wwn);
37754283 else
37764284 return rol64(wwn, 32);
3777
-}
3778
-
3779
-/**
3780
- * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
3781
- * @phba: pointer to lpfc hba data structure.
3782
- *
3783
- * This routine first calculates the sizes of the current els and allocated
3784
- * scsi sgl lists, and then goes through all sgls to updates the physical
3785
- * XRIs assigned due to port function reset. During port initialization, the
3786
- * current els and allocated scsi sgl lists are 0s.
3787
- *
3788
- * Return codes
3789
- * 0 - successful (for now, it always returns 0)
3790
- **/
3791
-int
3792
-lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
3793
-{
3794
- struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
3795
- uint16_t i, lxri, els_xri_cnt;
3796
- uint16_t nvme_xri_cnt, nvme_xri_max;
3797
- LIST_HEAD(nvme_sgl_list);
3798
- int rc, cnt;
3799
-
3800
- phba->total_nvme_bufs = 0;
3801
- phba->get_nvme_bufs = 0;
3802
- phba->put_nvme_bufs = 0;
3803
-
3804
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
3805
- return 0;
3806
- /*
3807
- * update on pci function's allocated nvme xri-sgl list
3808
- */
3809
-
3810
- /* maximum number of xris available for nvme buffers */
3811
- els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3812
- nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3813
- phba->sli4_hba.nvme_xri_max = nvme_xri_max;
3814
- phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
3815
-
3816
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3817
- "6074 Current allocated NVME xri-sgl count:%d, "
3818
- "maximum NVME xri count:%d\n",
3819
- phba->sli4_hba.nvme_xri_cnt,
3820
- phba->sli4_hba.nvme_xri_max);
3821
-
3822
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
3823
- spin_lock(&phba->nvme_buf_list_put_lock);
3824
- list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
3825
- list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
3826
- cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
3827
- phba->get_nvme_bufs = 0;
3828
- phba->put_nvme_bufs = 0;
3829
- spin_unlock(&phba->nvme_buf_list_put_lock);
3830
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3831
-
3832
- if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
3833
- /* max nvme xri shrunk below the allocated nvme buffers */
3834
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
3835
- nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
3836
- phba->sli4_hba.nvme_xri_max;
3837
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3838
- /* release the extra allocated nvme buffers */
3839
- for (i = 0; i < nvme_xri_cnt; i++) {
3840
- list_remove_head(&nvme_sgl_list, lpfc_ncmd,
3841
- struct lpfc_nvme_buf, list);
3842
- if (lpfc_ncmd) {
3843
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3844
- lpfc_ncmd->data,
3845
- lpfc_ncmd->dma_handle);
3846
- kfree(lpfc_ncmd);
3847
- }
3848
- }
3849
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
3850
- phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
3851
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3852
- }
3853
-
3854
- /* update xris associated to remaining allocated nvme buffers */
3855
- lpfc_ncmd = NULL;
3856
- lpfc_ncmd_next = NULL;
3857
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3858
- &nvme_sgl_list, list) {
3859
- lxri = lpfc_sli4_next_xritag(phba);
3860
- if (lxri == NO_XRI) {
3861
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3862
- "6075 Failed to allocate xri for "
3863
- "nvme buffer\n");
3864
- rc = -ENOMEM;
3865
- goto out_free_mem;
3866
- }
3867
- lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
3868
- lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3869
- }
3870
- spin_lock_irq(&phba->nvme_buf_list_get_lock);
3871
- spin_lock(&phba->nvme_buf_list_put_lock);
3872
- list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
3873
- phba->get_nvme_bufs = cnt;
3874
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
3875
- spin_unlock(&phba->nvme_buf_list_put_lock);
3876
- spin_unlock_irq(&phba->nvme_buf_list_get_lock);
3877
- return 0;
3878
-
3879
-out_free_mem:
3880
- lpfc_nvme_free(phba);
3881
- return rc;
38824285 }
38834286
38844287 /**
....@@ -3902,6 +4305,7 @@
39024305 {
39034306 struct lpfc_vport *vport;
39044307 struct Scsi_Host *shost = NULL;
4308
+ struct scsi_host_template *template;
39054309 int error = 0;
39064310 int i;
39074311 uint64_t wwn;
....@@ -3922,7 +4326,8 @@
39224326
39234327 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
39244328 if (wwn == lpfc_no_hba_reset[i]) {
3925
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4329
+ lpfc_printf_log(phba, KERN_ERR,
4330
+ LOG_TRACE_EVENT,
39264331 "6020 Setting use_no_reset port=%llx\n",
39274332 wwn);
39284333 use_no_reset_hba = true;
....@@ -3930,22 +4335,50 @@
39304335 }
39314336 }
39324337
3933
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
3934
- if (dev != &phba->pcidev->dev) {
3935
- shost = scsi_host_alloc(&lpfc_vport_template,
3936
- sizeof(struct lpfc_vport));
4338
+ /* Seed template for SCSI host registration */
4339
+ if (dev == &phba->pcidev->dev) {
4340
+ template = &phba->port_template;
4341
+
4342
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4343
+ /* Seed physical port template */
4344
+ memcpy(template, &lpfc_template, sizeof(*template));
4345
+
4346
+ if (use_no_reset_hba) {
4347
+ /* template is for a no reset SCSI Host */
4348
+ template->max_sectors = 0xffff;
4349
+ template->eh_host_reset_handler = NULL;
4350
+ }
4351
+
4352
+ /* Template for all vports this physical port creates */
4353
+ memcpy(&phba->vport_template, &lpfc_template,
4354
+ sizeof(*template));
4355
+ phba->vport_template.max_sectors = 0xffff;
4356
+ phba->vport_template.shost_attrs = lpfc_vport_attrs;
4357
+ phba->vport_template.eh_bus_reset_handler = NULL;
4358
+ phba->vport_template.eh_host_reset_handler = NULL;
4359
+ phba->vport_template.vendor_id = 0;
4360
+
4361
+ /* Initialize the host templates with updated value */
4362
+ if (phba->sli_rev == LPFC_SLI_REV4) {
4363
+ template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4364
+ phba->vport_template.sg_tablesize =
4365
+ phba->cfg_scsi_seg_cnt;
4366
+ } else {
4367
+ template->sg_tablesize = phba->cfg_sg_seg_cnt;
4368
+ phba->vport_template.sg_tablesize =
4369
+ phba->cfg_sg_seg_cnt;
4370
+ }
4371
+
39374372 } else {
3938
- if (!use_no_reset_hba)
3939
- shost = scsi_host_alloc(&lpfc_template,
3940
- sizeof(struct lpfc_vport));
3941
- else
3942
- shost = scsi_host_alloc(&lpfc_template_no_hr,
3943
- sizeof(struct lpfc_vport));
4373
+ /* NVMET is for physical port only */
4374
+ memcpy(template, &lpfc_template_nvme,
4375
+ sizeof(*template));
39444376 }
3945
- } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
3946
- shost = scsi_host_alloc(&lpfc_template_nvme,
3947
- sizeof(struct lpfc_vport));
4377
+ } else {
4378
+ template = &phba->vport_template;
39484379 }
4380
+
4381
+ shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
39494382 if (!shost)
39504383 goto out;
39514384
....@@ -3956,17 +4389,35 @@
39564389 vport->fc_rscn_flush = 0;
39574390 lpfc_get_vport_cfgparam(vport);
39584391
4392
+ /* Adjust value in vport */
4393
+ vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4394
+
39594395 shost->unique_id = instance;
39604396 shost->max_id = LPFC_MAX_TARGET;
39614397 shost->max_lun = vport->cfg_max_luns;
39624398 shost->this_id = -1;
39634399 shost->max_cmd_len = 16;
3964
- shost->nr_hw_queues = phba->cfg_fcp_io_channel;
4400
+
39654401 if (phba->sli_rev == LPFC_SLI_REV4) {
4402
+ if (!phba->cfg_fcp_mq_threshold ||
4403
+ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4404
+ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4405
+
4406
+ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4407
+ phba->cfg_fcp_mq_threshold);
4408
+
39664409 shost->dma_boundary =
39674410 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3968
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3969
- }
4411
+
4412
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
4413
+ shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4414
+ else
4415
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4416
+ } else
4417
+ /* SLI-3 has a limited number of hardware queues (3),
4418
+ * thus there is only one for FCP processing.
4419
+ */
4420
+ shost->nr_hw_queues = 1;
39704421
39714422 /*
39724423 * Set initial can_queue value since 0 is no longer supported and
....@@ -3982,6 +4433,12 @@
39824433 vport->port_type = LPFC_PHYSICAL_PORT;
39834434 }
39844435
4436
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4437
+ "9081 CreatePort TMPLATE type %x TBLsize %d "
4438
+ "SEGcnt %d/%d\n",
4439
+ vport->port_type, shost->sg_tablesize,
4440
+ phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4441
+
39854442 /* Initialize all internally managed lists. */
39864443 INIT_LIST_HEAD(&vport->fc_nodes);
39874444 INIT_LIST_HEAD(&vport->rcv_buffer_list);
....@@ -3993,13 +4450,16 @@
39934450
39944451 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
39954452
4453
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4454
+ lpfc_setup_bg(phba, shost);
4455
+
39964456 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
39974457 if (error)
39984458 goto out_put_shost;
39994459
4000
- spin_lock_irq(&phba->hbalock);
4460
+ spin_lock_irq(&phba->port_list_lock);
40014461 list_add_tail(&vport->listentry, &phba->port_list);
4002
- spin_unlock_irq(&phba->hbalock);
4462
+ spin_unlock_irq(&phba->port_list_lock);
40034463 return vport;
40044464
40054465 out_put_shost:
....@@ -4025,9 +4485,9 @@
40254485 fc_remove_host(shost);
40264486 scsi_remove_host(shost);
40274487
4028
- spin_lock_irq(&phba->hbalock);
4488
+ spin_lock_irq(&phba->port_list_lock);
40294489 list_del_init(&vport->listentry);
4030
- spin_unlock_irq(&phba->hbalock);
4490
+ spin_unlock_irq(&phba->port_list_lock);
40314491
40324492 lpfc_cleanup(vport);
40334493 return;
....@@ -4111,6 +4571,39 @@
41114571 return stat;
41124572 }
41134573
4574
+static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4575
+{
4576
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4577
+ struct lpfc_hba *phba = vport->phba;
4578
+
4579
+ fc_host_supported_speeds(shost) = 0;
4580
+ /*
4581
+ * Avoid reporting supported link speed for FCoE as it can't be
4582
+ * controlled via FCoE.
4583
+ */
4584
+ if (phba->hba_flag & HBA_FCOE_MODE)
4585
+ return;
4586
+
4587
+ if (phba->lmt & LMT_128Gb)
4588
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4589
+ if (phba->lmt & LMT_64Gb)
4590
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4591
+ if (phba->lmt & LMT_32Gb)
4592
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4593
+ if (phba->lmt & LMT_16Gb)
4594
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4595
+ if (phba->lmt & LMT_10Gb)
4596
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4597
+ if (phba->lmt & LMT_8Gb)
4598
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4599
+ if (phba->lmt & LMT_4Gb)
4600
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4601
+ if (phba->lmt & LMT_2Gb)
4602
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4603
+ if (phba->lmt & LMT_1Gb)
4604
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4605
+}
4606
+
41144607 /**
41154608 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
41164609 * @shost: pointer to SCSI host data structure.
....@@ -4138,23 +4631,7 @@
41384631 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
41394632 sizeof fc_host_symbolic_name(shost));
41404633
4141
- fc_host_supported_speeds(shost) = 0;
4142
- if (phba->lmt & LMT_64Gb)
4143
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4144
- if (phba->lmt & LMT_32Gb)
4145
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4146
- if (phba->lmt & LMT_16Gb)
4147
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4148
- if (phba->lmt & LMT_10Gb)
4149
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4150
- if (phba->lmt & LMT_8Gb)
4151
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4152
- if (phba->lmt & LMT_4Gb)
4153
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4154
- if (phba->lmt & LMT_2Gb)
4155
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4156
- if (phba->lmt & LMT_1Gb)
4157
- fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4634
+ lpfc_host_supported_speeds_set(shost);
41584635
41594636 fc_host_maxframe_size(shost) =
41604637 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
....@@ -4210,7 +4687,8 @@
42104687 {
42114688 /* Reset some HBA SLI4 setup states */
42124689 lpfc_stop_hba_timers(phba);
4213
- phba->pport->work_port_events = 0;
4690
+ if (phba->pport)
4691
+ phba->pport->work_port_events = 0;
42144692 phba->sli4_hba.intr_enable = 0;
42154693 }
42164694
....@@ -4253,7 +4731,7 @@
42534731
42544732 /**
42554733 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
4256
- * @ptr: Map to lpfc_hba data structure pointer.
4734
+ * @t: Timer context used to obtain the pointer to lpfc hba data structure.
42574735 *
42584736 * This routine is invoked when waiting for FCF table rediscover has been
42594737 * timed out. If new FCF record(s) has (have) been discovered during the
....@@ -4301,7 +4779,7 @@
43014779 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
43024780 break;
43034781 default:
4304
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4782
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
43054783 "0398 Unknown link fault code: x%x\n",
43064784 bf_get(lpfc_acqe_link_fault, acqe_link));
43074785 break;
....@@ -4337,7 +4815,7 @@
43374815 att_type = LPFC_ATT_LINK_UP;
43384816 break;
43394817 default:
4340
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4818
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
43414819 "0399 Invalid link attention type: x%x\n",
43424820 bf_get(lpfc_acqe_link_status, acqe_link));
43434821 att_type = LPFC_ATT_RESERVED;
....@@ -4439,6 +4917,9 @@
44394917 case LPFC_ASYNC_LINK_SPEED_40GBPS:
44404918 port_speed = 40000;
44414919 break;
4920
+ case LPFC_ASYNC_LINK_SPEED_100GBPS:
4921
+ port_speed = 100000;
4922
+ break;
44424923 default:
44434924 port_speed = 0;
44444925 }
....@@ -4471,6 +4952,9 @@
44714952 break;
44724953 case LPFC_FC_LA_SPEED_64G:
44734954 port_speed = 64000;
4955
+ break;
4956
+ case LPFC_FC_LA_SPEED_128G:
4957
+ port_speed = 128000;
44744958 break;
44754959 default:
44764960 port_speed = 0;
....@@ -4506,19 +4990,19 @@
45064990 phba->fcoe_eventtag = acqe_link->event_tag;
45074991 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
45084992 if (!pmb) {
4509
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4993
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
45104994 "0395 The mboxq allocation failed\n");
45114995 return;
45124996 }
45134997 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
45144998 if (!mp) {
4515
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4999
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
45165000 "0396 The lpfc_dmabuf allocation failed\n");
45175001 goto out_free_pmb;
45185002 }
45195003 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
45205004 if (!mp->virt) {
4521
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5005
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
45225006 "0397 The mbuf allocation failed\n");
45235007 goto out_free_dmabuf;
45245008 }
....@@ -4614,6 +5098,135 @@
46145098 }
46155099
46165100 /**
5101
+ * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5102
+ * topology.
5103
+ * @phba: pointer to lpfc hba data structure.
5104
+ * @speed_code: asynchronous event link speed code.
5105
+ *
5106
+ * This routine is to parse the giving SLI4 async event link speed code into
5107
+ * value of Read topology link speed.
5108
+ *
5109
+ * Return: link speed in terms of Read topology.
5110
+ **/
5111
+static uint8_t
5112
+lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5113
+{
5114
+ uint8_t port_speed;
5115
+
5116
+ switch (speed_code) {
5117
+ case LPFC_FC_LA_SPEED_1G:
5118
+ port_speed = LPFC_LINK_SPEED_1GHZ;
5119
+ break;
5120
+ case LPFC_FC_LA_SPEED_2G:
5121
+ port_speed = LPFC_LINK_SPEED_2GHZ;
5122
+ break;
5123
+ case LPFC_FC_LA_SPEED_4G:
5124
+ port_speed = LPFC_LINK_SPEED_4GHZ;
5125
+ break;
5126
+ case LPFC_FC_LA_SPEED_8G:
5127
+ port_speed = LPFC_LINK_SPEED_8GHZ;
5128
+ break;
5129
+ case LPFC_FC_LA_SPEED_16G:
5130
+ port_speed = LPFC_LINK_SPEED_16GHZ;
5131
+ break;
5132
+ case LPFC_FC_LA_SPEED_32G:
5133
+ port_speed = LPFC_LINK_SPEED_32GHZ;
5134
+ break;
5135
+ case LPFC_FC_LA_SPEED_64G:
5136
+ port_speed = LPFC_LINK_SPEED_64GHZ;
5137
+ break;
5138
+ case LPFC_FC_LA_SPEED_128G:
5139
+ port_speed = LPFC_LINK_SPEED_128GHZ;
5140
+ break;
5141
+ case LPFC_FC_LA_SPEED_256G:
5142
+ port_speed = LPFC_LINK_SPEED_256GHZ;
5143
+ break;
5144
+ default:
5145
+ port_speed = 0;
5146
+ break;
5147
+ }
5148
+
5149
+ return port_speed;
5150
+}
5151
+
5152
+#define trunk_link_status(__idx)\
5153
+ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5154
+ ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
5155
+ "Link up" : "Link down") : "NA"
5156
+/* Did port __idx reported an error */
5157
+#define trunk_port_fault(__idx)\
5158
+ bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
5159
+ (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
5160
+
5161
+static void
5162
+lpfc_update_trunk_link_status(struct lpfc_hba *phba,
5163
+ struct lpfc_acqe_fc_la *acqe_fc)
5164
+{
5165
+ uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
5166
+ uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
5167
+
5168
+ phba->sli4_hba.link_state.speed =
5169
+ lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
5170
+ bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5171
+
5172
+ phba->sli4_hba.link_state.logical_speed =
5173
+ bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5174
+ /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
5175
+ phba->fc_linkspeed =
5176
+ lpfc_async_link_speed_to_read_top(
5177
+ phba,
5178
+ bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
5179
+
5180
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
5181
+ phba->trunk_link.link0.state =
5182
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
5183
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5184
+ phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
5185
+ }
5186
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
5187
+ phba->trunk_link.link1.state =
5188
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
5189
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5190
+ phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
5191
+ }
5192
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
5193
+ phba->trunk_link.link2.state =
5194
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
5195
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5196
+ phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
5197
+ }
5198
+ if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
5199
+ phba->trunk_link.link3.state =
5200
+ bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
5201
+ ? LPFC_LINK_UP : LPFC_LINK_DOWN;
5202
+ phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
5203
+ }
5204
+
5205
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5206
+ "2910 Async FC Trunking Event - Speed:%d\n"
5207
+ "\tLogical speed:%d "
5208
+ "port0: %s port1: %s port2: %s port3: %s\n",
5209
+ phba->sli4_hba.link_state.speed,
5210
+ phba->sli4_hba.link_state.logical_speed,
5211
+ trunk_link_status(0), trunk_link_status(1),
5212
+ trunk_link_status(2), trunk_link_status(3));
5213
+
5214
+ if (port_fault)
5215
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5216
+ "3202 trunk error:0x%x (%s) seen on port0:%s "
5217
+ /*
5218
+ * SLI-4: We have only 0xA error codes
5219
+ * defined as of now. print an appropriate
5220
+ * message in case driver needs to be updated.
5221
+ */
5222
+ "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
5223
+ "UNDEFINED. update driver." : trunk_errmsg[err],
5224
+ trunk_port_fault(0), trunk_port_fault(1),
5225
+ trunk_port_fault(2), trunk_port_fault(3));
5226
+}
5227
+
5228
+
5229
+/**
46175230 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
46185231 * @phba: pointer to lpfc hba data structure.
46195232 * @acqe_fc: pointer to the async fc completion queue entry.
....@@ -4633,11 +5246,18 @@
46335246
46345247 if (bf_get(lpfc_trailer_type, acqe_fc) !=
46355248 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
4636
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5249
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
46375250 "2895 Non FC link Event detected.(%d)\n",
46385251 bf_get(lpfc_trailer_type, acqe_fc));
46395252 return;
46405253 }
5254
+
5255
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5256
+ LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
5257
+ lpfc_update_trunk_link_status(phba, acqe_fc);
5258
+ return;
5259
+ }
5260
+
46415261 /* Keep the link status for extra SLI4 state machine reference */
46425262 phba->sli4_hba.link_state.speed =
46435263 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
....@@ -4653,8 +5273,14 @@
46535273 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
46545274 phba->sli4_hba.link_state.fault =
46555275 bf_get(lpfc_acqe_link_fault, acqe_fc);
4656
- phba->sli4_hba.link_state.logical_speed =
5276
+
5277
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
5278
+ LPFC_FC_LA_TYPE_LINK_DOWN)
5279
+ phba->sli4_hba.link_state.logical_speed = 0;
5280
+ else if (!phba->sli4_hba.conf_trunk)
5281
+ phba->sli4_hba.link_state.logical_speed =
46575282 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
5283
+
46585284 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
46595285 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
46605286 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
....@@ -4668,19 +5294,19 @@
46685294 phba->sli4_hba.link_state.fault);
46695295 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
46705296 if (!pmb) {
4671
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5297
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
46725298 "2897 The mboxq allocation failed\n");
46735299 return;
46745300 }
46755301 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
46765302 if (!mp) {
4677
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5303
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
46785304 "2898 The lpfc_dmabuf allocation failed\n");
46795305 goto out_free_pmb;
46805306 }
46815307 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
46825308 if (!mp->virt) {
4683
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5309
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
46845310 "2899 The mbuf allocation failed\n");
46855311 goto out_free_dmabuf;
46865312 }
....@@ -4752,7 +5378,7 @@
47525378 /**
47535379 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
47545380 * @phba: pointer to lpfc hba data structure.
4755
- * @acqe_fc: pointer to the async SLI completion queue entry.
5381
+ * @acqe_sli: pointer to the async SLI completion queue entry.
47565382 *
47575383 * This routine is to handle the SLI4 asynchronous SLI events.
47585384 **/
....@@ -4767,14 +5393,16 @@
47675393 struct temp_event temp_event_data;
47685394 struct lpfc_acqe_misconfigured_event *misconfigured;
47695395 struct Scsi_Host *shost;
5396
+ struct lpfc_vport **vports;
5397
+ int rc, i;
47705398
47715399 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
47725400
47735401 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4774
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
4775
- "x%08x SLI Event Type:%d\n",
5402
+ "2901 Async SLI event - Type:%d, Event Data: x%08x "
5403
+ "x%08x x%08x x%08x\n", evt_type,
47765404 acqe_sli->event_data1, acqe_sli->event_data2,
4777
- evt_type);
5405
+ acqe_sli->reserved, acqe_sli->trailer);
47785406
47795407 port_name = phba->Port[0];
47805408 if (port_name == 0x00)
....@@ -4845,7 +5473,7 @@
48455473 &misconfigured->theEvent);
48465474 break;
48475475 default:
4848
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5476
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
48495477 "3296 "
48505478 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
48515479 "event: Invalid link %d",
....@@ -4892,6 +5520,26 @@
48925520 sprintf(message, "Unknown event status x%02x", status);
48935521 break;
48945522 }
5523
+
5524
+ /* Issue READ_CONFIG mbox command to refresh supported speeds */
5525
+ rc = lpfc_sli4_read_config(phba);
5526
+ if (rc) {
5527
+ phba->lmt = 0;
5528
+ lpfc_printf_log(phba, KERN_ERR,
5529
+ LOG_TRACE_EVENT,
5530
+ "3194 Unable to retrieve supported "
5531
+ "speeds, rc = 0x%x\n", rc);
5532
+ }
5533
+ vports = lpfc_create_vport_work_array(phba);
5534
+ if (vports != NULL) {
5535
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL;
5536
+ i++) {
5537
+ shost = lpfc_shost_from_vport(vports[i]);
5538
+ lpfc_host_supported_speeds_set(shost);
5539
+ }
5540
+ }
5541
+ lpfc_destroy_vport_work_array(phba, vports);
5542
+
48955543 phba->sli4_hba.lnk_info.optic_state = status;
48965544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
48975545 "3176 Port Name %c %s\n", port_name, message);
....@@ -4902,11 +5550,26 @@
49025550 "Event Data1:x%08x Event Data2: x%08x\n",
49035551 acqe_sli->event_data1, acqe_sli->event_data2);
49045552 break;
5553
+ case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
5554
+ /* Misconfigured WWN. Reports that the SLI Port is configured
5555
+ * to use FA-WWN, but the attached device doesn’t support it.
5556
+ * No driver action is required.
5557
+ * Event Data1 - N.A, Event Data2 - N.A
5558
+ */
5559
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
5560
+ "2699 Misconfigured FA-WWN - Attached device does "
5561
+ "not support FA-WWN\n");
5562
+ break;
5563
+ case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
5564
+ /* EEPROM failure. No driver action is required */
5565
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5566
+ "2518 EEPROM failure - "
5567
+ "Event Data1: x%08x Event Data2: x%08x\n",
5568
+ acqe_sli->event_data1, acqe_sli->event_data2);
5569
+ break;
49055570 default:
49065571 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4907
- "3193 Async SLI event - Event Data1:x%08x Event Data2:"
4908
- "x%08x SLI Event Type:%d\n",
4909
- acqe_sli->event_data1, acqe_sli->event_data2,
5572
+ "3193 Unrecognized SLI event, type: 0x%x",
49105573 evt_type);
49115574 break;
49125575 }
....@@ -4971,7 +5634,7 @@
49715634
49725635 /**
49735636 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
4974
- * @vport: pointer to lpfc hba data structure.
5637
+ * @phba: pointer to lpfc hba data structure.
49755638 *
49765639 * This routine is to perform Clear Virtual Link (CVL) on all vports in
49775640 * response to a FCF dead event.
....@@ -4992,7 +5655,7 @@
49925655 /**
49935656 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
49945657 * @phba: pointer to lpfc hba data structure.
4995
- * @acqe_link: pointer to the async fcoe completion queue entry.
5658
+ * @acqe_fip: pointer to the async fcoe completion queue entry.
49965659 *
49975660 * This routine is to handle the SLI4 asynchronous fcoe event.
49985661 **/
....@@ -5015,8 +5678,7 @@
50155678 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
50165679 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
50175680 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
5018
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5019
- LOG_DISCOVERY,
5681
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50205682 "2546 New FCF event, evt_tag:x%x, "
50215683 "index:x%x\n",
50225684 acqe_fip->event_tag,
....@@ -5069,23 +5731,24 @@
50695731 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
50705732 LPFC_FCOE_FCF_GET_FIRST);
50715733 if (rc)
5072
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5734
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50735735 "2547 Issue FCF scan read FCF mailbox "
50745736 "command failed (x%x)\n", rc);
50755737 break;
50765738
50775739 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
5078
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5079
- "2548 FCF Table full count 0x%x tag 0x%x\n",
5080
- bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5081
- acqe_fip->event_tag);
5740
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5741
+ "2548 FCF Table full count 0x%x tag 0x%x\n",
5742
+ bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
5743
+ acqe_fip->event_tag);
50825744 break;
50835745
50845746 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
50855747 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5086
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5087
- "2549 FCF (x%x) disconnected from network, "
5088
- "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
5748
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5749
+ "2549 FCF (x%x) disconnected from network, "
5750
+ "tag:x%x\n", acqe_fip->index,
5751
+ acqe_fip->event_tag);
50895752 /*
50905753 * If we are in the middle of FCF failover process, clear
50915754 * the corresponding FCF bit in the roundrobin bitmap.
....@@ -5122,7 +5785,7 @@
51225785 rc = lpfc_sli4_redisc_fcf_table(phba);
51235786 if (rc) {
51245787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5125
- LOG_DISCOVERY,
5788
+ LOG_TRACE_EVENT,
51265789 "2772 Issue FCF rediscover mailbox "
51275790 "command failed, fail through to FCF "
51285791 "dead event\n");
....@@ -5146,7 +5809,8 @@
51465809 break;
51475810 case LPFC_FIP_EVENT_TYPE_CVL:
51485811 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
5149
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
5812
+ lpfc_printf_log(phba, KERN_ERR,
5813
+ LOG_TRACE_EVENT,
51505814 "2718 Clear Virtual Link Received for VPI 0x%x"
51515815 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
51525816
....@@ -5213,7 +5877,7 @@
52135877 rc = lpfc_sli4_redisc_fcf_table(phba);
52145878 if (rc) {
52155879 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
5216
- LOG_DISCOVERY,
5880
+ LOG_TRACE_EVENT,
52175881 "2774 Issue FCF rediscover "
52185882 "mailbox command failed, "
52195883 "through to CVL event\n");
....@@ -5234,9 +5898,9 @@
52345898 }
52355899 break;
52365900 default:
5237
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5238
- "0288 Unknown FCoE event type 0x%x event tag "
5239
- "0x%x\n", event_type, acqe_fip->event_tag);
5901
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5902
+ "0288 Unknown FCoE event type 0x%x event tag "
5903
+ "0x%x\n", event_type, acqe_fip->event_tag);
52405904 break;
52415905 }
52425906 }
....@@ -5244,7 +5908,7 @@
52445908 /**
52455909 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
52465910 * @phba: pointer to lpfc hba data structure.
5247
- * @acqe_link: pointer to the async dcbx completion queue entry.
5911
+ * @acqe_dcbx: pointer to the async dcbx completion queue entry.
52485912 *
52495913 * This routine is to handle the SLI4 asynchronous dcbx event.
52505914 **/
....@@ -5253,7 +5917,7 @@
52535917 struct lpfc_acqe_dcbx *acqe_dcbx)
52545918 {
52555919 phba->fc_eventTag = acqe_dcbx->event_tag;
5256
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5920
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
52575921 "0290 The SLI4 DCBX asynchronous event is not "
52585922 "handled yet\n");
52595923 }
....@@ -5261,7 +5925,7 @@
52615925 /**
52625926 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
52635927 * @phba: pointer to lpfc hba data structure.
5264
- * @acqe_link: pointer to the async grp5 completion queue entry.
5928
+ * @acqe_grp5: pointer to the async grp5 completion queue entry.
52655929 *
52665930 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
52675931 * is an asynchronous notified of a logical link speed change. The Port
....@@ -5294,18 +5958,21 @@
52945958 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
52955959 {
52965960 struct lpfc_cq_event *cq_event;
5961
+ unsigned long iflags;
52975962
52985963 /* First, declare the async event has been handled */
5299
- spin_lock_irq(&phba->hbalock);
5964
+ spin_lock_irqsave(&phba->hbalock, iflags);
53005965 phba->hba_flag &= ~ASYNC_EVENT;
5301
- spin_unlock_irq(&phba->hbalock);
5966
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
5967
+
53025968 /* Now, handle all the async events */
5969
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
53035970 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
5304
- /* Get the first event from the head of the event queue */
5305
- spin_lock_irq(&phba->hbalock);
53065971 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
53075972 cq_event, struct lpfc_cq_event, list);
5308
- spin_unlock_irq(&phba->hbalock);
5973
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
5974
+ iflags);
5975
+
53095976 /* Process the asynchronous event */
53105977 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
53115978 case LPFC_TRAILER_CODE_LINK:
....@@ -5330,15 +5997,19 @@
53305997 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
53315998 break;
53325999 default:
5333
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5334
- "1804 Invalid asynchrous event code: "
6000
+ lpfc_printf_log(phba, KERN_ERR,
6001
+ LOG_TRACE_EVENT,
6002
+ "1804 Invalid asynchronous event code: "
53356003 "x%x\n", bf_get(lpfc_trailer_code,
53366004 &cq_event->cqe.mcqe_cmpl));
53376005 break;
53386006 }
6007
+
53396008 /* Free the completion event processed to the free pool */
53406009 lpfc_sli4_cq_event_release(phba, cq_event);
6010
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
53416011 }
6012
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
53426013 }
53436014
53446015 /**
....@@ -5366,7 +6037,7 @@
53666037 "2777 Start post-quiescent FCF table scan\n");
53676038 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
53686039 if (rc)
5369
- lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
6040
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
53706041 "2747 Issue FCF scan read FCF mailbox "
53716042 "command failed 0x%x\n", rc);
53726043 }
....@@ -5437,7 +6108,7 @@
54376108 "0480 Enabled MSI-X interrupt mode.\n");
54386109 break;
54396110 default:
5440
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6111
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
54416112 "0482 Illegal interrupt mode.\n");
54426113 break;
54436114 }
....@@ -5485,7 +6156,7 @@
54856156 out_disable_device:
54866157 pci_disable_device(pdev);
54876158 out_error:
5488
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6159
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
54896160 "1401 Failed to enable pci device\n");
54906161 return -ENODEV;
54916162 }
....@@ -5586,7 +6257,7 @@
55866257
55876258 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
55886259 if (nr_vfn > max_nr_vfn) {
5589
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6260
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
55906261 "3057 Requested vfs (%d) greater than "
55916262 "supported vfs (%d)", nr_vfn, max_nr_vfn);
55926263 return -EINVAL;
....@@ -5625,12 +6296,18 @@
56256296 * Driver resources common to all SLI revisions
56266297 */
56276298 atomic_set(&phba->fast_event_count, 0);
6299
+ atomic_set(&phba->dbg_log_idx, 0);
6300
+ atomic_set(&phba->dbg_log_cnt, 0);
6301
+ atomic_set(&phba->dbg_log_dmping, 0);
56286302 spin_lock_init(&phba->hbalock);
56296303
56306304 /* Initialize ndlp management spinlock */
56316305 spin_lock_init(&phba->ndlp_lock);
56326306
6307
+ /* Initialize port_list spinlock */
6308
+ spin_lock_init(&phba->port_list_lock);
56336309 INIT_LIST_HEAD(&phba->port_list);
6310
+
56346311 INIT_LIST_HEAD(&phba->work_list);
56356312 init_waitqueue_head(&phba->wait_4_mlo_m_q);
56366313
....@@ -5645,24 +6322,11 @@
56456322 "NVME" : " "),
56466323 (phba->nvmet_support ? "NVMET" : " "));
56476324
5648
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
5649
- /* Initialize the scsi buffer list used by driver for scsi IO */
5650
- spin_lock_init(&phba->scsi_buf_list_get_lock);
5651
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5652
- spin_lock_init(&phba->scsi_buf_list_put_lock);
5653
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5654
- }
5655
-
5656
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
5657
- (phba->nvmet_support == 0)) {
5658
- /* Initialize the NVME buffer list used by driver for NVME IO */
5659
- spin_lock_init(&phba->nvme_buf_list_get_lock);
5660
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
5661
- phba->get_nvme_bufs = 0;
5662
- spin_lock_init(&phba->nvme_buf_list_put_lock);
5663
- INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
5664
- phba->put_nvme_bufs = 0;
5665
- }
6325
+ /* Initialize the IO buffer list used by driver for SLI3 SCSI */
6326
+ spin_lock_init(&phba->scsi_buf_list_get_lock);
6327
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
6328
+ spin_lock_init(&phba->scsi_buf_list_put_lock);
6329
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
56666330
56676331 /* Initialize the fabric iocb list */
56686332 INIT_LIST_HEAD(&phba->fabric_iocb_list);
....@@ -5686,6 +6350,11 @@
56866350 /* Heartbeat timer */
56876351 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
56886352
6353
+ INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
6354
+
6355
+ INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
6356
+ lpfc_idle_stat_delay_work);
6357
+
56896358 return 0;
56906359 }
56916360
....@@ -5703,7 +6372,7 @@
57036372 static int
57046373 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
57056374 {
5706
- int rc;
6375
+ int rc, entry_sz;
57076376
57086377 /*
57096378 * Initialize timers used by driver
....@@ -5743,10 +6412,10 @@
57436412 * used to create the sg_dma_buf_pool must be dynamically calculated.
57446413 */
57456414
5746
- /* Initialize the host templates the configured values. */
5747
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5748
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5749
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
6415
+ if (phba->sli_rev == LPFC_SLI_REV4)
6416
+ entry_sz = sizeof(struct sli4_sge);
6417
+ else
6418
+ entry_sz = sizeof(struct ulp_bde64);
57506419
57516420 /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
57526421 if (phba->cfg_enable_bg) {
....@@ -5761,7 +6430,7 @@
57616430 */
57626431 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
57636432 sizeof(struct fcp_rsp) +
5764
- (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
6433
+ (LPFC_MAX_SG_SEG_CNT * entry_sz);
57656434
57666435 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
57676436 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
....@@ -5776,14 +6445,14 @@
57766445 */
57776446 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
57786447 sizeof(struct fcp_rsp) +
5779
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
6448
+ ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
57806449
57816450 /* Total BDEs in BPL for scsi_sg_list */
57826451 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
57836452 }
57846453
57856454 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5786
- "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
6455
+ "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
57876456 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
57886457 phba->cfg_total_seg_cnt);
57896458
....@@ -5800,6 +6469,24 @@
58006469 /* Allocate device driver memory */
58016470 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
58026471 return -ENOMEM;
6472
+
6473
+ phba->lpfc_sg_dma_buf_pool =
6474
+ dma_pool_create("lpfc_sg_dma_buf_pool",
6475
+ &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
6476
+ BPL_ALIGN_SZ, 0);
6477
+
6478
+ if (!phba->lpfc_sg_dma_buf_pool)
6479
+ goto fail_free_mem;
6480
+
6481
+ phba->lpfc_cmd_rsp_buf_pool =
6482
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
6483
+ &phba->pcidev->dev,
6484
+ sizeof(struct fcp_cmnd) +
6485
+ sizeof(struct fcp_rsp),
6486
+ BPL_ALIGN_SZ, 0);
6487
+
6488
+ if (!phba->lpfc_cmd_rsp_buf_pool)
6489
+ goto fail_free_dma_buf_pool;
58036490
58046491 /*
58056492 * Enable sr-iov virtual functions if supported and configured
....@@ -5819,6 +6506,13 @@
58196506 }
58206507
58216508 return 0;
6509
+
6510
+fail_free_dma_buf_pool:
6511
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
6512
+ phba->lpfc_sg_dma_buf_pool = NULL;
6513
+fail_free_mem:
6514
+ lpfc_mem_free(phba);
6515
+ return -ENOMEM;
58226516 }
58236517
58246518 /**
....@@ -5855,14 +6549,13 @@
58556549 MAILBOX_t *mb;
58566550 int rc, i, max_buf_size;
58576551 int longs;
5858
- int fof_vectors = 0;
58596552 int extra;
58606553 uint64_t wwn;
58616554 u32 if_type;
58626555 u32 if_fam;
58636556
5864
- phba->sli4_hba.num_online_cpu = num_online_cpus();
58656557 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6558
+ phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
58666559 phba->sli4_hba.curr_disp_cpu = 0;
58676560
58686561 /* Get all the module params for configuring this host */
....@@ -5877,6 +6570,11 @@
58776570 rc = lpfc_sli4_post_status_check(phba);
58786571 if (rc)
58796572 return -ENODEV;
6573
+
6574
+ /* Allocate all driver workqueues here */
6575
+
6576
+ /* The lpfc_wq workqueue for deferred irq use */
6577
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
58806578
58816579 /*
58826580 * Initialize timers used by driver
....@@ -5912,82 +6610,6 @@
59126610 * The WQ create will allocate the ring.
59136611 */
59146612
5915
- /*
5916
- * 1 for cmd, 1 for rsp, NVME adds an extra one
5917
- * for boundary conditions in its max_sgl_segment template.
5918
- */
5919
- extra = 2;
5920
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
5921
- extra++;
5922
-
5923
- /*
5924
- * It doesn't matter what family our adapter is in, we are
5925
- * limited to 2 Pages, 512 SGEs, for our SGL.
5926
- * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
5927
- */
5928
- max_buf_size = (2 * SLI4_PAGE_SIZE);
5929
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
5930
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
5931
-
5932
- /*
5933
- * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
5934
- * used to create the sg_dma_buf_pool must be calculated.
5935
- */
5936
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5937
- /*
5938
- * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
5939
- * the FCP rsp, and a SGE. Sice we have no control
5940
- * over how many protection segments the SCSI Layer
5941
- * will hand us (ie: there could be one for every block
5942
- * in the IO), just allocate enough SGEs to accomidate
5943
- * our max amount and we need to limit lpfc_sg_seg_cnt
5944
- * to minimize the risk of running out.
5945
- */
5946
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5947
- sizeof(struct fcp_rsp) + max_buf_size;
5948
-
5949
- /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
5950
- phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
5951
-
5952
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
5953
- phba->cfg_sg_seg_cnt =
5954
- LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
5955
- } else {
5956
- /*
5957
- * The scsi_buf for a regular I/O holds the FCP cmnd,
5958
- * the FCP rsp, a SGE for each, and a SGE for up to
5959
- * cfg_sg_seg_cnt data segments.
5960
- */
5961
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
5962
- sizeof(struct fcp_rsp) +
5963
- ((phba->cfg_sg_seg_cnt + extra) *
5964
- sizeof(struct sli4_sge));
5965
-
5966
- /* Total SGEs for scsi_sg_list */
5967
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
5968
-
5969
- /*
5970
- * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
5971
- * need to post 1 page for the SGL.
5972
- */
5973
- }
5974
-
5975
- /* Initialize the host templates with the updated values. */
5976
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5977
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
5978
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
5979
-
5980
- if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
5981
- phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
5982
- else
5983
- phba->cfg_sg_dma_buf_size =
5984
- SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
5985
-
5986
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
5987
- "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
5988
- phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
5989
- phba->cfg_total_seg_cnt);
5990
-
59916613 /* Initialize buffer queue management fields */
59926614 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
59936615 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
....@@ -5996,23 +6618,24 @@
59966618 /*
59976619 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
59986620 */
5999
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6000
- /* Initialize the Abort scsi buffer list used by driver */
6001
- spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
6002
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
6003
- }
6621
+ /* Initialize the Abort buffer list used by driver */
6622
+ spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
6623
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
60046624
60056625 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
60066626 /* Initialize the Abort nvme buffer list used by driver */
6007
- spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
6008
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
6627
+ spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
60096628 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
60106629 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
6630
+ spin_lock_init(&phba->sli4_hba.t_active_list_lock);
6631
+ INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
60116632 }
60126633
60136634 /* This abort list used by worker thread */
60146635 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
60156636 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
6637
+ spin_lock_init(&phba->sli4_hba.asynce_list_lock);
6638
+ spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
60166639
60176640 /*
60186641 * Initialize driver internal slow-path work queues
....@@ -6024,8 +6647,6 @@
60246647 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
60256648 /* Asynchronous event CQ Event work queue list */
60266649 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
6027
- /* Fast-path XRI aborted CQ Event work queue list */
6028
- INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
60296650 /* Slow-path XRI aborted CQ Event work queue list */
60306651 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
60316652 /* Receive queue CQ Event work queue list */
....@@ -6049,7 +6670,7 @@
60496670 /* Allocate device driver memory */
60506671 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
60516672 if (rc)
6052
- return -ENOMEM;
6673
+ goto out_destroy_workqueue;
60536674
60546675 /* IF Type 2 ports get initialized now. */
60556676 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
....@@ -6103,7 +6724,8 @@
61036724 lpfc_read_nv(phba, mboxq);
61046725 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
61056726 if (rc != MBX_SUCCESS) {
6106
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6727
+ lpfc_printf_log(phba, KERN_ERR,
6728
+ LOG_TRACE_EVENT,
61076729 "6016 Mailbox failed , mbxCmd x%x "
61086730 "READ_NV, mbxStatus x%x\n",
61096731 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
....@@ -6132,15 +6754,26 @@
61326754
61336755 phba->nvmet_support = 1; /* a match */
61346756
6135
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6757
+ lpfc_printf_log(phba, KERN_ERR,
6758
+ LOG_TRACE_EVENT,
61366759 "6017 NVME Target %016llx\n",
61376760 wwn);
61386761 #else
6139
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6762
+ lpfc_printf_log(phba, KERN_ERR,
6763
+ LOG_TRACE_EVENT,
61406764 "6021 Can't enable NVME Target."
61416765 " NVME_TARGET_FC infrastructure"
61426766 " is not in kernel\n");
61436767 #endif
6768
+ /* Not supported for NVMET */
6769
+ phba->cfg_xri_rebalancing = 0;
6770
+ if (phba->irq_chann_mode == NHT_MODE) {
6771
+ phba->cfg_irq_chann =
6772
+ phba->sli4_hba.num_present_cpu;
6773
+ phba->cfg_hdw_queue =
6774
+ phba->sli4_hba.num_present_cpu;
6775
+ phba->irq_chann_mode = NORMAL_MODE;
6776
+ }
61446777 break;
61456778 }
61466779 }
....@@ -6161,9 +6794,9 @@
61616794 &phba->sli4_hba.sli_intf);
61626795 if (phba->sli4_hba.extents_in_use &&
61636796 phba->sli4_hba.rpi_hdrs_in_use) {
6164
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6165
- "2999 Unsupported SLI4 Parameters "
6166
- "Extents and RPI headers enabled.\n");
6797
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6798
+ "2999 Unsupported SLI4 Parameters "
6799
+ "Extents and RPI headers enabled.\n");
61676800 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
61686801 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
61696802 mempool_free(mboxq, phba->mbox_mem_pool);
....@@ -6179,22 +6812,143 @@
61796812 }
61806813 }
61816814
6815
+ /*
6816
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
6817
+ * for boundary conditions in its max_sgl_segment template.
6818
+ */
6819
+ extra = 2;
6820
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
6821
+ extra++;
6822
+
6823
+ /*
6824
+ * It doesn't matter what family our adapter is in, we are
6825
+ * limited to 2 Pages, 512 SGEs, for our SGL.
6826
+ * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
6827
+ */
6828
+ max_buf_size = (2 * SLI4_PAGE_SIZE);
6829
+
6830
+ /*
6831
+ * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
6832
+ * used to create the sg_dma_buf_pool must be calculated.
6833
+ */
6834
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6835
+ /* Both cfg_enable_bg and cfg_external_dif code paths */
6836
+
6837
+ /*
6838
+ * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
6839
+ * the FCP rsp, and a SGE. Sice we have no control
6840
+ * over how many protection segments the SCSI Layer
6841
+ * will hand us (ie: there could be one for every block
6842
+ * in the IO), just allocate enough SGEs to accomidate
6843
+ * our max amount and we need to limit lpfc_sg_seg_cnt
6844
+ * to minimize the risk of running out.
6845
+ */
6846
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6847
+ sizeof(struct fcp_rsp) + max_buf_size;
6848
+
6849
+ /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
6850
+ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
6851
+
6852
+ /*
6853
+ * If supporting DIF, reduce the seg count for scsi to
6854
+ * allow room for the DIF sges.
6855
+ */
6856
+ if (phba->cfg_enable_bg &&
6857
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
6858
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
6859
+ else
6860
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6861
+
6862
+ } else {
6863
+ /*
6864
+ * The scsi_buf for a regular I/O holds the FCP cmnd,
6865
+ * the FCP rsp, a SGE for each, and a SGE for up to
6866
+ * cfg_sg_seg_cnt data segments.
6867
+ */
6868
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
6869
+ sizeof(struct fcp_rsp) +
6870
+ ((phba->cfg_sg_seg_cnt + extra) *
6871
+ sizeof(struct sli4_sge));
6872
+
6873
+ /* Total SGEs for scsi_sg_list */
6874
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
6875
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
6876
+
6877
+ /*
6878
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
6879
+ * need to post 1 page for the SGL.
6880
+ */
6881
+ }
6882
+
6883
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
6884
+ phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
6885
+ else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
6886
+ phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
6887
+ else
6888
+ phba->cfg_sg_dma_buf_size =
6889
+ SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
6890
+
6891
+ phba->border_sge_num = phba->cfg_sg_dma_buf_size /
6892
+ sizeof(struct sli4_sge);
6893
+
6894
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
6895
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
6896
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
6897
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
6898
+ "6300 Reducing NVME sg segment "
6899
+ "cnt to %d\n",
6900
+ LPFC_MAX_NVME_SEG_CNT);
6901
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
6902
+ } else
6903
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
6904
+ }
6905
+
6906
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
6907
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
6908
+ "total:%d scsi:%d nvme:%d\n",
6909
+ phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
6910
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
6911
+ phba->cfg_nvme_seg_cnt);
6912
+
6913
+ if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
6914
+ i = phba->cfg_sg_dma_buf_size;
6915
+ else
6916
+ i = SLI4_PAGE_SIZE;
6917
+
6918
+ phba->lpfc_sg_dma_buf_pool =
6919
+ dma_pool_create("lpfc_sg_dma_buf_pool",
6920
+ &phba->pcidev->dev,
6921
+ phba->cfg_sg_dma_buf_size,
6922
+ i, 0);
6923
+ if (!phba->lpfc_sg_dma_buf_pool)
6924
+ goto out_free_bsmbx;
6925
+
6926
+ phba->lpfc_cmd_rsp_buf_pool =
6927
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
6928
+ &phba->pcidev->dev,
6929
+ sizeof(struct fcp_cmnd) +
6930
+ sizeof(struct fcp_rsp),
6931
+ i, 0);
6932
+ if (!phba->lpfc_cmd_rsp_buf_pool)
6933
+ goto out_free_sg_dma_buf;
6934
+
61826935 mempool_free(mboxq, phba->mbox_mem_pool);
61836936
61846937 /* Verify OAS is supported */
61856938 lpfc_sli4_oas_verify(phba);
6186
- if (phba->cfg_fof)
6187
- fof_vectors = 1;
6939
+
6940
+ /* Verify RAS support on adapter */
6941
+ lpfc_sli4_ras_init(phba);
61886942
61896943 /* Verify all the SLI4 queues */
61906944 rc = lpfc_sli4_queue_verify(phba);
61916945 if (rc)
6192
- goto out_free_bsmbx;
6946
+ goto out_free_cmd_rsp_buf;
61936947
61946948 /* Create driver internal CQE event pool */
61956949 rc = lpfc_sli4_cq_event_pool_create(phba);
61966950 if (rc)
6197
- goto out_free_bsmbx;
6951
+ goto out_free_cmd_rsp_buf;
61986952
61996953 /* Initialize sgl lists per host */
62006954 lpfc_init_sgl_list(phba);
....@@ -6202,13 +6956,13 @@
62026956 /* Allocate and initialize active sgl array */
62036957 rc = lpfc_init_active_sgl_array(phba);
62046958 if (rc) {
6205
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6959
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62066960 "1430 Failed to initialize sgl list.\n");
62076961 goto out_destroy_cq_event_pool;
62086962 }
62096963 rc = lpfc_sli4_init_rpi_hdrs(phba);
62106964 if (rc) {
6211
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6965
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62126966 "1432 Failed to initialize rpi headers.\n");
62136967 goto out_free_active_sgl;
62146968 }
....@@ -6218,48 +6972,62 @@
62186972 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
62196973 GFP_KERNEL);
62206974 if (!phba->fcf.fcf_rr_bmask) {
6221
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6975
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62226976 "2759 Failed allocate memory for FCF round "
62236977 "robin failover bmask\n");
62246978 rc = -ENOMEM;
62256979 goto out_remove_rpi_hdrs;
62266980 }
62276981
6228
- phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
6229
- sizeof(struct lpfc_hba_eq_hdl),
6230
- GFP_KERNEL);
6982
+ phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
6983
+ sizeof(struct lpfc_hba_eq_hdl),
6984
+ GFP_KERNEL);
62316985 if (!phba->sli4_hba.hba_eq_hdl) {
6232
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6986
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62336987 "2572 Failed allocate memory for "
62346988 "fast-path per-EQ handle array\n");
62356989 rc = -ENOMEM;
62366990 goto out_free_fcf_rr_bmask;
62376991 }
62386992
6239
- phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
6993
+ phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
62406994 sizeof(struct lpfc_vector_map_info),
62416995 GFP_KERNEL);
62426996 if (!phba->sli4_hba.cpu_map) {
6243
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6997
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
62446998 "3327 Failed allocate memory for msi-x "
62456999 "interrupt vector mapping\n");
62467000 rc = -ENOMEM;
62477001 goto out_free_hba_eq_hdl;
62487002 }
6249
- if (lpfc_used_cpu == NULL) {
6250
- lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
6251
- GFP_KERNEL);
6252
- if (!lpfc_used_cpu) {
6253
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6254
- "3335 Failed allocate memory for msi-x "
6255
- "interrupt vector mapping\n");
6256
- kfree(phba->sli4_hba.cpu_map);
6257
- rc = -ENOMEM;
6258
- goto out_free_hba_eq_hdl;
6259
- }
6260
- for (i = 0; i < lpfc_present_cpu; i++)
6261
- lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
7003
+
7004
+ phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
7005
+ if (!phba->sli4_hba.eq_info) {
7006
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7007
+ "3321 Failed allocation for per_cpu stats\n");
7008
+ rc = -ENOMEM;
7009
+ goto out_free_hba_cpu_map;
62627010 }
7011
+
7012
+ phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
7013
+ sizeof(*phba->sli4_hba.idle_stat),
7014
+ GFP_KERNEL);
7015
+ if (!phba->sli4_hba.idle_stat) {
7016
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7017
+ "3390 Failed allocation for idle_stat\n");
7018
+ rc = -ENOMEM;
7019
+ goto out_free_hba_eq_info;
7020
+ }
7021
+
7022
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7023
+ phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
7024
+ if (!phba->sli4_hba.c_stat) {
7025
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7026
+ "3332 Failed allocating per cpu hdwq stats\n");
7027
+ rc = -ENOMEM;
7028
+ goto out_free_hba_idle_stat;
7029
+ }
7030
+#endif
62637031
62647032 /*
62657033 * Enable sr-iov virtual functions if supported and configured
....@@ -6280,6 +7048,14 @@
62807048
62817049 return 0;
62827050
7051
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7052
+out_free_hba_idle_stat:
7053
+ kfree(phba->sli4_hba.idle_stat);
7054
+#endif
7055
+out_free_hba_eq_info:
7056
+ free_percpu(phba->sli4_hba.eq_info);
7057
+out_free_hba_cpu_map:
7058
+ kfree(phba->sli4_hba.cpu_map);
62837059 out_free_hba_eq_hdl:
62847060 kfree(phba->sli4_hba.hba_eq_hdl);
62857061 out_free_fcf_rr_bmask:
....@@ -6290,10 +7066,19 @@
62907066 lpfc_free_active_sgl(phba);
62917067 out_destroy_cq_event_pool:
62927068 lpfc_sli4_cq_event_pool_destroy(phba);
7069
+out_free_cmd_rsp_buf:
7070
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
7071
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
7072
+out_free_sg_dma_buf:
7073
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7074
+ phba->lpfc_sg_dma_buf_pool = NULL;
62937075 out_free_bsmbx:
62947076 lpfc_destroy_bootstrap_mbox(phba);
62957077 out_free_mem:
62967078 lpfc_mem_free(phba);
7079
+out_destroy_workqueue:
7080
+ destroy_workqueue(phba->wq);
7081
+ phba->wq = NULL;
62977082 return rc;
62987083 }
62997084
....@@ -6309,11 +7094,18 @@
63097094 {
63107095 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
63117096
7097
+ free_percpu(phba->sli4_hba.eq_info);
7098
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
7099
+ free_percpu(phba->sli4_hba.c_stat);
7100
+#endif
7101
+ kfree(phba->sli4_hba.idle_stat);
7102
+
63127103 /* Free memory allocated for msi-x interrupt vector to CPU mapping */
63137104 kfree(phba->sli4_hba.cpu_map);
7105
+ phba->sli4_hba.num_possible_cpu = 0;
63147106 phba->sli4_hba.num_present_cpu = 0;
6315
- phba->sli4_hba.num_online_cpu = 0;
63167107 phba->sli4_hba.curr_disp_cpu = 0;
7108
+ cpumask_clear(&phba->sli4_hba.irq_aff_mask);
63177109
63187110 /* Free memory allocated for fast-path work queue handles */
63197111 kfree(phba->sli4_hba.hba_eq_hdl);
....@@ -6381,7 +7173,7 @@
63817173 phba->lpfc_stop_port = lpfc_stop_port_s4;
63827174 break;
63837175 default:
6384
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7176
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
63857177 "1431 Invalid HBA PCI-device group: 0x%x\n",
63867178 dev_grp);
63877179 return -ENODEV;
....@@ -6413,12 +7205,6 @@
64137205 error = PTR_ERR(phba->worker_thread);
64147206 return error;
64157207 }
6416
-
6417
- /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
6418
- if (phba->sli_rev == LPFC_SLI_REV4)
6419
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
6420
- else
6421
- phba->wq = NULL;
64227208
64237209 return 0;
64247210 }
....@@ -6471,6 +7257,7 @@
64717257 /**
64727258 * lpfc_init_iocb_list - Allocate and initialize iocb list.
64737259 * @phba: pointer to lpfc hba data structure.
7260
+ * @iocb_count: number of requested iocbs
64747261 *
64757262 * This routine is invoked to allocate and initizlize the driver's IOCB
64767263 * list and set up the IOCB tag array accordingly.
....@@ -6493,7 +7280,7 @@
64937280 if (iocbq_entry == NULL) {
64947281 printk(KERN_ERR "%s: only allocated %d iocbs of "
64957282 "expected %d count. Unloading driver.\n",
6496
- __func__, i, LPFC_IOCB_LIST_CNT);
7283
+ __func__, i, iocb_count);
64977284 goto out_free_iocbq;
64987285 }
64997286
....@@ -6650,11 +7437,8 @@
66507437 /* els xri-sgl book keeping */
66517438 phba->sli4_hba.els_xri_cnt = 0;
66527439
6653
- /* scsi xri-buffer book keeping */
6654
- phba->sli4_hba.scsi_xri_cnt = 0;
6655
-
66567440 /* nvme xri-buffer book keeping */
6657
- phba->sli4_hba.nvme_xri_cnt = 0;
7441
+ phba->sli4_hba.io_xri_cnt = 0;
66587442 }
66597443
66607444 /**
....@@ -6685,7 +7469,7 @@
66857469
66867470 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
66877471 if (!rpi_hdr) {
6688
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7472
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
66897473 "0391 Error during rpi post operation\n");
66907474 lpfc_sli4_remove_rpis(phba);
66917475 rc = -ENODEV;
....@@ -6748,9 +7532,9 @@
67487532 if (!dmabuf)
67497533 return NULL;
67507534
6751
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
6752
- LPFC_HDR_TEMPLATE_SIZE,
6753
- &dmabuf->phys, GFP_KERNEL);
7535
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
7536
+ LPFC_HDR_TEMPLATE_SIZE,
7537
+ &dmabuf->phys, GFP_KERNEL);
67547538 if (!dmabuf->virt) {
67557539 rpi_hdr = NULL;
67567540 goto err_free_dmabuf;
....@@ -6868,6 +7652,9 @@
68687652 static void
68697653 lpfc_hba_free(struct lpfc_hba *phba)
68707654 {
7655
+ if (phba->sli_rev == LPFC_SLI_REV4)
7656
+ kfree(phba->sli4_hba.hdwq);
7657
+
68717658 /* Release the driver assigned board number */
68727659 idr_remove(&lpfc_hba_index, phba->brd_no);
68737660
....@@ -6903,10 +7690,6 @@
69037690 phba->fc_arbtov = FF_DEF_ARBTOV;
69047691
69057692 atomic_set(&phba->sdev_cnt, 0);
6906
- atomic_set(&phba->fc4ScsiInputRequests, 0);
6907
- atomic_set(&phba->fc4ScsiOutputRequests, 0);
6908
- atomic_set(&phba->fc4ScsiControlRequests, 0);
6909
- atomic_set(&phba->fc4ScsiIoCmpls, 0);
69107693 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
69117694 if (!vport)
69127695 return -ENODEV;
....@@ -6916,18 +7699,10 @@
69167699
69177700 if (phba->nvmet_support) {
69187701 /* Only 1 vport (pport) will support NVME target */
6919
- if (phba->txrdy_payload_pool == NULL) {
6920
- phba->txrdy_payload_pool = dma_pool_create(
6921
- "txrdy_pool", &phba->pcidev->dev,
6922
- TXRDY_PAYLOAD_LEN, 16, 0);
6923
- if (phba->txrdy_payload_pool) {
6924
- phba->targetport = NULL;
6925
- phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
6926
- lpfc_printf_log(phba, KERN_INFO,
6927
- LOG_INIT | LOG_NVME_DISC,
6928
- "6076 NVME Target Found\n");
6929
- }
6930
- }
7702
+ phba->targetport = NULL;
7703
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
7704
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
7705
+ "6076 NVME Target Found\n");
69317706 }
69327707
69337708 lpfc_debugfs_initialize(vport);
....@@ -6984,7 +7759,6 @@
69847759 uint32_t old_mask;
69857760 uint32_t old_guard;
69867761
6987
- int pagecnt = 10;
69887762 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
69897763 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
69907764 "1478 Registering BlockGuard with the "
....@@ -7007,7 +7781,7 @@
70077781 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
70087782 if ((old_mask != phba->cfg_prot_mask) ||
70097783 (old_guard != phba->cfg_prot_guard))
7010
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7784
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70117785 "1475 Registering BlockGuard with the "
70127786 "SCSI layer: mask %d guard %d\n",
70137787 phba->cfg_prot_mask,
....@@ -7016,61 +7790,11 @@
70167790 scsi_host_set_prot(shost, phba->cfg_prot_mask);
70177791 scsi_host_set_guard(shost, phba->cfg_prot_guard);
70187792 } else
7019
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7793
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
70207794 "1479 Not Registering BlockGuard with the SCSI "
70217795 "layer, Bad protection parameters: %d %d\n",
70227796 old_mask, old_guard);
70237797 }
7024
-
7025
- if (!_dump_buf_data) {
7026
- while (pagecnt) {
7027
- spin_lock_init(&_dump_buf_lock);
7028
- _dump_buf_data =
7029
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7030
- if (_dump_buf_data) {
7031
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7032
- "9043 BLKGRD: allocated %d pages for "
7033
- "_dump_buf_data at 0x%p\n",
7034
- (1 << pagecnt), _dump_buf_data);
7035
- _dump_buf_data_order = pagecnt;
7036
- memset(_dump_buf_data, 0,
7037
- ((1 << PAGE_SHIFT) << pagecnt));
7038
- break;
7039
- } else
7040
- --pagecnt;
7041
- }
7042
- if (!_dump_buf_data_order)
7043
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7044
- "9044 BLKGRD: ERROR unable to allocate "
7045
- "memory for hexdump\n");
7046
- } else
7047
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7048
- "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
7049
- "\n", _dump_buf_data);
7050
- if (!_dump_buf_dif) {
7051
- while (pagecnt) {
7052
- _dump_buf_dif =
7053
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
7054
- if (_dump_buf_dif) {
7055
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7056
- "9046 BLKGRD: allocated %d pages for "
7057
- "_dump_buf_dif at 0x%p\n",
7058
- (1 << pagecnt), _dump_buf_dif);
7059
- _dump_buf_dif_order = pagecnt;
7060
- memset(_dump_buf_dif, 0,
7061
- ((1 << PAGE_SHIFT) << pagecnt));
7062
- break;
7063
- } else
7064
- --pagecnt;
7065
- }
7066
- if (!_dump_buf_dif_order)
7067
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7068
- "9047 BLKGRD: ERROR unable to allocate "
7069
- "memory for hexdump\n");
7070
- } else
7071
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7072
- "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
7073
- _dump_buf_dif);
70747798 }
70757799
70767800 /**
....@@ -7095,8 +7819,6 @@
70957819 */
70967820 shost = pci_get_drvdata(phba->pcidev);
70977821 shost->can_queue = phba->cfg_hba_queue_depth - 10;
7098
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
7099
- lpfc_setup_bg(phba, shost);
71007822
71017823 lpfc_host_attrib_init(shost);
71027824
....@@ -7132,26 +7854,22 @@
71327854 static int
71337855 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
71347856 {
7135
- struct pci_dev *pdev;
7857
+ struct pci_dev *pdev = phba->pcidev;
71367858 unsigned long bar0map_len, bar2map_len;
71377859 int i, hbq_count;
71387860 void *ptr;
7139
- int error = -ENODEV;
7861
+ int error;
71407862
7141
- /* Obtain PCI device reference */
7142
- if (!phba->pcidev)
7143
- return error;
7144
- else
7145
- pdev = phba->pcidev;
7863
+ if (!pdev)
7864
+ return -ENODEV;
71467865
71477866 /* Set the device DMA mask size */
7148
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7149
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7150
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7151
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7152
- return error;
7153
- }
7154
- }
7867
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7868
+ if (error)
7869
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7870
+ if (error)
7871
+ return error;
7872
+ error = -ENODEV;
71557873
71567874 /* Get the bus address of Bar0 and Bar2 and the number of bytes
71577875 * required by each mapping.
....@@ -7179,8 +7897,8 @@
71797897 }
71807898
71817899 /* Allocate memory for SLI-2 structures */
7182
- phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7183
- &phba->slim2p.phys, GFP_KERNEL);
7900
+ phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7901
+ &phba->slim2p.phys, GFP_KERNEL);
71847902 if (!phba->slim2p.virt)
71857903 goto out_iounmap;
71867904
....@@ -7303,7 +8021,7 @@
73038021 * other register reads as the data may not be valid. Just exit.
73048022 */
73058023 if (port_error) {
7306
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8024
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
73078025 "1408 Port Failed POST - portsmphr=0x%x, "
73088026 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
73098027 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
....@@ -7352,7 +8070,8 @@
73528070 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
73538071 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
73548072 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
7355
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8073
+ lpfc_printf_log(phba, KERN_ERR,
8074
+ LOG_TRACE_EVENT,
73568075 "1422 Unrecoverable Error "
73578076 "Detected during POST "
73588077 "uerr_lo_reg=0x%x, "
....@@ -7379,7 +8098,7 @@
73798098 phba->work_status[1] =
73808099 readl(phba->sli4_hba.u.if_type2.
73818100 ERR2regaddr);
7382
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8101
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
73838102 "2888 Unrecoverable port error "
73848103 "following POST: port status reg "
73858104 "0x%x, port_smphr reg 0x%x, "
....@@ -7492,6 +8211,7 @@
74928211 /**
74938212 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
74948213 * @phba: pointer to lpfc hba data structure.
8214
+ * @if_type: sli if type to operate on.
74958215 *
74968216 * This routine is invoked to set up SLI4 BAR1 register memory map.
74978217 **/
....@@ -7598,8 +8318,8 @@
75988318 * plus an alignment restriction of 16 bytes.
75998319 */
76008320 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
7601
- dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
7602
- &dmabuf->phys, GFP_KERNEL);
8321
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
8322
+ &dmabuf->phys, GFP_KERNEL);
76038323 if (!dmabuf->virt) {
76048324 kfree(dmabuf);
76058325 return -ENOMEM;
....@@ -7663,6 +8383,85 @@
76638383 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
76648384 }
76658385
8386
+static const char * const lpfc_topo_to_str[] = {
8387
+ "Loop then P2P",
8388
+ "Loopback",
8389
+ "P2P Only",
8390
+ "Unsupported",
8391
+ "Loop Only",
8392
+ "Unsupported",
8393
+ "P2P then Loop",
8394
+};
8395
+
8396
+#define LINK_FLAGS_DEF 0x0
8397
+#define LINK_FLAGS_P2P 0x1
8398
+#define LINK_FLAGS_LOOP 0x2
8399
+/**
8400
+ * lpfc_map_topology - Map the topology read from READ_CONFIG
8401
+ * @phba: pointer to lpfc hba data structure.
8402
+ * @rd_config: pointer to read config data
8403
+ *
8404
+ * This routine is invoked to map the topology values as read
8405
+ * from the read config mailbox command. If the persistent
8406
+ * topology feature is supported, the firmware will provide the
8407
+ * saved topology information to be used in INIT_LINK
8408
+ **/
8409
+static void
8410
+lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
8411
+{
8412
+ u8 ptv, tf, pt;
8413
+
8414
+ ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
8415
+ tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
8416
+ pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
8417
+
8418
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8419
+ "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
8420
+ ptv, tf, pt);
8421
+ if (!ptv) {
8422
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8423
+ "2019 FW does not support persistent topology "
8424
+ "Using driver parameter defined value [%s]",
8425
+ lpfc_topo_to_str[phba->cfg_topology]);
8426
+ return;
8427
+ }
8428
+ /* FW supports persistent topology - override module parameter value */
8429
+ phba->hba_flag |= HBA_PERSISTENT_TOPO;
8430
+ switch (phba->pcidev->device) {
8431
+ case PCI_DEVICE_ID_LANCER_G7_FC:
8432
+ case PCI_DEVICE_ID_LANCER_G6_FC:
8433
+ if (!tf) {
8434
+ phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
8435
+ ? FLAGS_TOPOLOGY_MODE_LOOP
8436
+ : FLAGS_TOPOLOGY_MODE_PT_PT);
8437
+ } else {
8438
+ phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
8439
+ }
8440
+ break;
8441
+ default: /* G5 */
8442
+ if (tf) {
8443
+ /* If topology failover set - pt is '0' or '1' */
8444
+ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
8445
+ FLAGS_TOPOLOGY_MODE_LOOP_PT);
8446
+ } else {
8447
+ phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
8448
+ ? FLAGS_TOPOLOGY_MODE_PT_PT
8449
+ : FLAGS_TOPOLOGY_MODE_LOOP);
8450
+ }
8451
+ break;
8452
+ }
8453
+ if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
8454
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8455
+ "2020 Using persistent topology value [%s]",
8456
+ lpfc_topo_to_str[phba->cfg_topology]);
8457
+ } else {
8458
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8459
+ "2021 Invalid topology values from FW "
8460
+ "Using driver parameter defined value [%s]",
8461
+ lpfc_topo_to_str[phba->cfg_topology]);
8462
+ }
8463
+}
8464
+
76668465 /**
76678466 * lpfc_sli4_read_config - Get the config parameters.
76688467 * @phba: pointer to lpfc hba data structure.
....@@ -7688,12 +8487,12 @@
76888487 struct lpfc_rsrc_desc_fcfcoe *desc;
76898488 char *pdesc_0;
76908489 uint16_t forced_link_speed;
7691
- uint32_t if_type;
8490
+ uint32_t if_type, qmin;
76928491 int length, i, rc = 0, rc2;
76938492
76948493 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
76958494 if (!pmb) {
7696
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8495
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
76978496 "2011 Unable to allocate memory for issuing "
76988497 "SLI_CONFIG_SPECIAL mailbox command\n");
76998498 return -ENOMEM;
....@@ -7703,11 +8502,11 @@
77038502
77048503 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
77058504 if (rc != MBX_SUCCESS) {
7706
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7707
- "2012 Mailbox failed , mbxCmd x%x "
7708
- "READ_CONFIG, mbxStatus x%x\n",
7709
- bf_get(lpfc_mqe_command, &pmb->u.mqe),
7710
- bf_get(lpfc_mqe_status, &pmb->u.mqe));
8505
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8506
+ "2012 Mailbox failed , mbxCmd x%x "
8507
+ "READ_CONFIG, mbxStatus x%x\n",
8508
+ bf_get(lpfc_mqe_command, &pmb->u.mqe),
8509
+ bf_get(lpfc_mqe_status, &pmb->u.mqe));
77118510 rc = -EIO;
77128511 } else {
77138512 rd_config = &pmb->u.mqe.un.rd_config;
....@@ -7730,10 +8529,16 @@
77308529 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
77318530 }
77328531
8532
+ phba->sli4_hba.conf_trunk =
8533
+ bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
77338534 phba->sli4_hba.extents_in_use =
77348535 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
77358536 phba->sli4_hba.max_cfg_param.max_xri =
77368537 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
8538
+ /* Reduce resource usage in kdump environment */
8539
+ if (is_kdump_kernel() &&
8540
+ phba->sli4_hba.max_cfg_param.max_xri > 512)
8541
+ phba->sli4_hba.max_cfg_param.max_xri = 512;
77378542 phba->sli4_hba.max_cfg_param.xri_base =
77388543 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
77398544 phba->sli4_hba.max_cfg_param.max_vpi =
....@@ -7768,13 +8573,14 @@
77688573 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
77698574 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
77708575 phba->max_vports = phba->max_vpi;
8576
+ lpfc_map_topology(phba, rd_config);
77718577 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
77728578 "2003 cfg params Extents? %d "
77738579 "XRI(B:%d M:%d), "
77748580 "VPI(B:%d M:%d) "
77758581 "VFI(B:%d M:%d) "
77768582 "RPI(B:%d M:%d) "
7777
- "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
8583
+ "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
77788584 phba->sli4_hba.extents_in_use,
77798585 phba->sli4_hba.max_cfg_param.xri_base,
77808586 phba->sli4_hba.max_cfg_param.max_xri,
....@@ -7788,41 +8594,44 @@
77888594 phba->sli4_hba.max_cfg_param.max_eq,
77898595 phba->sli4_hba.max_cfg_param.max_cq,
77908596 phba->sli4_hba.max_cfg_param.max_wq,
7791
- phba->sli4_hba.max_cfg_param.max_rq);
8597
+ phba->sli4_hba.max_cfg_param.max_rq,
8598
+ phba->lmt);
77928599
77938600 /*
7794
- * Calculate NVME queue resources based on how
7795
- * many WQ/CQs are available.
8601
+ * Calculate queue resources based on how
8602
+ * many WQ/CQ/EQs are available.
77968603 */
7797
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
7798
- length = phba->sli4_hba.max_cfg_param.max_wq;
7799
- if (phba->sli4_hba.max_cfg_param.max_cq <
7800
- phba->sli4_hba.max_cfg_param.max_wq)
7801
- length = phba->sli4_hba.max_cfg_param.max_cq;
8604
+ qmin = phba->sli4_hba.max_cfg_param.max_wq;
8605
+ if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
8606
+ qmin = phba->sli4_hba.max_cfg_param.max_cq;
8607
+ if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
8608
+ qmin = phba->sli4_hba.max_cfg_param.max_eq;
8609
+ /*
8610
+ * Whats left after this can go toward NVME / FCP.
8611
+ * The minus 4 accounts for ELS, NVME LS, MBOX
8612
+ * plus one extra. When configured for
8613
+ * NVMET, FCP io channel WQs are not created.
8614
+ */
8615
+ qmin -= 4;
78028616
7803
- /*
7804
- * Whats left after this can go toward NVME.
7805
- * The minus 6 accounts for ELS, NVME LS, MBOX
7806
- * fof plus a couple extra. When configured for
7807
- * NVMET, FCP io channel WQs are not created.
7808
- */
7809
- length -= 6;
7810
- if (!phba->nvmet_support)
7811
- length -= phba->cfg_fcp_io_channel;
7812
-
7813
- if (phba->cfg_nvme_io_channel > length) {
7814
- lpfc_printf_log(
7815
- phba, KERN_ERR, LOG_SLI,
7816
- "2005 Reducing NVME IO channel to %d: "
7817
- "WQ %d CQ %d NVMEIO %d FCPIO %d\n",
7818
- length,
8617
+ /* Check to see if there is enough for NVME */
8618
+ if ((phba->cfg_irq_chann > qmin) ||
8619
+ (phba->cfg_hdw_queue > qmin)) {
8620
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8621
+ "2005 Reducing Queues - "
8622
+ "FW resource limitation: "
8623
+ "WQ %d CQ %d EQ %d: min %d: "
8624
+ "IRQ %d HDWQ %d\n",
78198625 phba->sli4_hba.max_cfg_param.max_wq,
78208626 phba->sli4_hba.max_cfg_param.max_cq,
7821
- phba->cfg_nvme_io_channel,
7822
- phba->cfg_fcp_io_channel);
8627
+ phba->sli4_hba.max_cfg_param.max_eq,
8628
+ qmin, phba->cfg_irq_chann,
8629
+ phba->cfg_hdw_queue);
78238630
7824
- phba->cfg_nvme_io_channel = length;
7825
- }
8631
+ if (phba->cfg_irq_chann > qmin)
8632
+ phba->cfg_irq_chann = qmin;
8633
+ if (phba->cfg_hdw_queue > qmin)
8634
+ phba->cfg_hdw_queue = qmin;
78268635 }
78278636 }
78288637
....@@ -7875,7 +8684,8 @@
78758684 LPFC_USER_LINK_SPEED_AUTO;
78768685 break;
78778686 default:
7878
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8687
+ lpfc_printf_log(phba, KERN_ERR,
8688
+ LOG_TRACE_EVENT,
78798689 "0047 Unrecognized link "
78808690 "speed : %d\n",
78818691 forced_link_speed);
....@@ -7912,7 +8722,7 @@
79128722 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
79138723 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
79148724 if (rc2 || shdr_status || shdr_add_status) {
7915
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8725
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
79168726 "3026 Mailbox failed , mbxCmd x%x "
79178727 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
79188728 bf_get(lpfc_mqe_command, &pmb->u.mqe),
....@@ -7949,9 +8759,9 @@
79498759 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
79508760 phba->sli4_hba.iov.vf_number);
79518761 else
7952
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8762
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
79538763 "3028 GET_FUNCTION_CONFIG: failed to find "
7954
- "Resrouce Descriptor:x%x\n",
8764
+ "Resource Descriptor:x%x\n",
79558765 LPFC_RSRC_DESC_TYPE_FCFCOE);
79568766
79578767 read_cfg_out:
....@@ -7986,7 +8796,7 @@
79868796 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
79878797 GFP_KERNEL);
79888798 if (!mboxq) {
7989
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8799
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
79908800 "0492 Unable to allocate memory for "
79918801 "issuing SLI_CONFIG_SPECIAL mailbox "
79928802 "command\n");
....@@ -8001,7 +8811,7 @@
80018811 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
80028812 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
80038813 if (rc != MBX_SUCCESS) {
8004
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8814
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
80058815 "0493 SLI_CONFIG_SPECIAL mailbox "
80068816 "failed with status x%x\n",
80078817 rc);
....@@ -8034,53 +8844,22 @@
80348844 static int
80358845 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
80368846 {
8037
- int io_channel;
8038
- int fof_vectors = phba->cfg_fof ? 1 : 0;
8039
-
80408847 /*
80418848 * Sanity check for configured queue parameters against the run-time
80428849 * device parameters
80438850 */
80448851
8045
- /* Sanity check on HBA EQ parameters */
8046
- io_channel = phba->io_channel_irqs;
8047
-
8048
- if (phba->sli4_hba.num_online_cpu < io_channel) {
8049
- lpfc_printf_log(phba,
8050
- KERN_ERR, LOG_INIT,
8051
- "3188 Reducing IO channels to match number of "
8052
- "online CPUs: from %d to %d\n",
8053
- io_channel, phba->sli4_hba.num_online_cpu);
8054
- io_channel = phba->sli4_hba.num_online_cpu;
8055
- }
8056
-
8057
- if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
8058
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8059
- "2575 Reducing IO channels to match number of "
8060
- "available EQs: from %d to %d\n",
8061
- io_channel,
8062
- phba->sli4_hba.max_cfg_param.max_eq);
8063
- io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
8064
- }
8065
-
8066
- /* The actual number of FCP / NVME event queues adopted */
8067
- if (io_channel != phba->io_channel_irqs)
8068
- phba->io_channel_irqs = io_channel;
8069
- if (phba->cfg_fcp_io_channel > io_channel)
8070
- phba->cfg_fcp_io_channel = io_channel;
8071
- if (phba->cfg_nvme_io_channel > io_channel)
8072
- phba->cfg_nvme_io_channel = io_channel;
80738852 if (phba->nvmet_support) {
8074
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
8075
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
8853
+ if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
8854
+ phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
8855
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8856
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
80768857 }
8077
- if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
8078
- phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
80798858
80808859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8081
- "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
8082
- phba->io_channel_irqs, phba->cfg_fcp_io_channel,
8083
- phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
8860
+ "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
8861
+ phba->cfg_hdw_queue, phba->cfg_irq_chann,
8862
+ phba->cfg_nvmet_mrq);
80848863
80858864 /* Get EQ depth from module parameter, fake the default for now */
80868865 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
....@@ -8093,80 +8872,57 @@
80938872 }
80948873
80958874 static int
8096
-lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
8875
+lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
80978876 {
80988877 struct lpfc_queue *qdesc;
8878
+ u32 wqesize;
8879
+ int cpu;
80998880
8100
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8101
- phba->sli4_hba.cq_esize,
8102
- LPFC_CQE_EXP_COUNT);
8103
- if (!qdesc) {
8104
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8105
- "0508 Failed allocate fast-path NVME CQ (%d)\n",
8106
- wqidx);
8107
- return 1;
8108
- }
8109
- qdesc->qe_valid = 1;
8110
- phba->sli4_hba.nvme_cq[wqidx] = qdesc;
8111
-
8112
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
8113
- LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
8114
- if (!qdesc) {
8115
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8116
- "0509 Failed allocate fast-path NVME WQ (%d)\n",
8117
- wqidx);
8118
- return 1;
8119
- }
8120
- phba->sli4_hba.nvme_wq[wqidx] = qdesc;
8121
- list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
8122
- return 0;
8123
-}
8124
-
8125
-static int
8126
-lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
8127
-{
8128
- struct lpfc_queue *qdesc;
8129
- uint32_t wqesize;
8130
-
8131
- /* Create Fast Path FCP CQs */
8881
+ cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
8882
+ /* Create Fast Path IO CQs */
81328883 if (phba->enab_exp_wqcq_pages)
81338884 /* Increase the CQ size when WQEs contain an embedded cdb */
81348885 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
81358886 phba->sli4_hba.cq_esize,
8136
- LPFC_CQE_EXP_COUNT);
8887
+ LPFC_CQE_EXP_COUNT, cpu);
81378888
81388889 else
81398890 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
81408891 phba->sli4_hba.cq_esize,
8141
- phba->sli4_hba.cq_ecount);
8892
+ phba->sli4_hba.cq_ecount, cpu);
81428893 if (!qdesc) {
8143
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8144
- "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
8894
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8895
+ "0499 Failed allocate fast-path IO CQ (%d)\n",
8896
+ idx);
81458897 return 1;
81468898 }
81478899 qdesc->qe_valid = 1;
8148
- phba->sli4_hba.fcp_cq[wqidx] = qdesc;
8900
+ qdesc->hdwq = idx;
8901
+ qdesc->chann = cpu;
8902
+ phba->sli4_hba.hdwq[idx].io_cq = qdesc;
81498903
8150
- /* Create Fast Path FCP WQs */
8904
+ /* Create Fast Path IO WQs */
81518905 if (phba->enab_exp_wqcq_pages) {
81528906 /* Increase the WQ size when WQEs contain an embedded cdb */
81538907 wqesize = (phba->fcp_embed_io) ?
81548908 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
81558909 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
81568910 wqesize,
8157
- LPFC_WQE_EXP_COUNT);
8911
+ LPFC_WQE_EXP_COUNT, cpu);
81588912 } else
81598913 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
81608914 phba->sli4_hba.wq_esize,
8161
- phba->sli4_hba.wq_ecount);
8915
+ phba->sli4_hba.wq_ecount, cpu);
81628916
81638917 if (!qdesc) {
8164
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8165
- "0503 Failed allocate fast-path FCP WQ (%d)\n",
8166
- wqidx);
8918
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8919
+ "0503 Failed allocate fast-path IO WQ (%d)\n",
8920
+ idx);
81678921 return 1;
81688922 }
8169
- phba->sli4_hba.fcp_wq[wqidx] = qdesc;
8923
+ qdesc->hdwq = idx;
8924
+ qdesc->chann = cpu;
8925
+ phba->sli4_hba.hdwq[idx].io_wq = qdesc;
81708926 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
81718927 return 0;
81728928 }
....@@ -8189,16 +8945,16 @@
81898945 lpfc_sli4_queue_create(struct lpfc_hba *phba)
81908946 {
81918947 struct lpfc_queue *qdesc;
8192
- int idx, io_channel;
8948
+ int idx, cpu, eqcpu;
8949
+ struct lpfc_sli4_hdw_queue *qp;
8950
+ struct lpfc_vector_map_info *cpup;
8951
+ struct lpfc_vector_map_info *eqcpup;
8952
+ struct lpfc_eq_intr_info *eqi;
81938953
81948954 /*
81958955 * Create HBA Record arrays.
81968956 * Both NVME and FCP will share that same vectors / EQs
81978957 */
8198
- io_channel = phba->io_channel_irqs;
8199
- if (!io_channel)
8200
- return -ERANGE;
8201
-
82028958 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
82038959 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
82048960 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
....@@ -8210,94 +8966,44 @@
82108966 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
82118967 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
82128968
8213
- phba->sli4_hba.hba_eq = kcalloc(io_channel,
8214
- sizeof(struct lpfc_queue *),
8215
- GFP_KERNEL);
8216
- if (!phba->sli4_hba.hba_eq) {
8217
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8218
- "2576 Failed allocate memory for "
8219
- "fast-path EQ record array\n");
8220
- goto out_error;
8221
- }
8222
-
8223
- if (phba->cfg_fcp_io_channel) {
8224
- phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
8225
- sizeof(struct lpfc_queue *),
8226
- GFP_KERNEL);
8227
- if (!phba->sli4_hba.fcp_cq) {
8228
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8229
- "2577 Failed allocate memory for "
8230
- "fast-path CQ record array\n");
8969
+ if (!phba->sli4_hba.hdwq) {
8970
+ phba->sli4_hba.hdwq = kcalloc(
8971
+ phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
8972
+ GFP_KERNEL);
8973
+ if (!phba->sli4_hba.hdwq) {
8974
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8975
+ "6427 Failed allocate memory for "
8976
+ "fast-path Hardware Queue array\n");
82318977 goto out_error;
82328978 }
8233
- phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
8234
- sizeof(struct lpfc_queue *),
8235
- GFP_KERNEL);
8236
- if (!phba->sli4_hba.fcp_wq) {
8237
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8238
- "2578 Failed allocate memory for "
8239
- "fast-path FCP WQ record array\n");
8240
- goto out_error;
8241
- }
8242
- /*
8243
- * Since the first EQ can have multiple CQs associated with it,
8244
- * this array is used to quickly see if we have a FCP fast-path
8245
- * CQ match.
8246
- */
8247
- phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
8248
- sizeof(uint16_t),
8249
- GFP_KERNEL);
8250
- if (!phba->sli4_hba.fcp_cq_map) {
8251
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8252
- "2545 Failed allocate memory for "
8253
- "fast-path CQ map\n");
8254
- goto out_error;
8979
+ /* Prepare hardware queues to take IO buffers */
8980
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
8981
+ qp = &phba->sli4_hba.hdwq[idx];
8982
+ spin_lock_init(&qp->io_buf_list_get_lock);
8983
+ spin_lock_init(&qp->io_buf_list_put_lock);
8984
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
8985
+ INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
8986
+ qp->get_io_bufs = 0;
8987
+ qp->put_io_bufs = 0;
8988
+ qp->total_io_bufs = 0;
8989
+ spin_lock_init(&qp->abts_io_buf_list_lock);
8990
+ INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
8991
+ qp->abts_scsi_io_bufs = 0;
8992
+ qp->abts_nvme_io_bufs = 0;
8993
+ INIT_LIST_HEAD(&qp->sgl_list);
8994
+ INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
8995
+ spin_lock_init(&qp->hdwq_lock);
82558996 }
82568997 }
82578998
8258
- if (phba->cfg_nvme_io_channel) {
8259
- phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
8260
- sizeof(struct lpfc_queue *),
8261
- GFP_KERNEL);
8262
- if (!phba->sli4_hba.nvme_cq) {
8263
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8264
- "6077 Failed allocate memory for "
8265
- "fast-path CQ record array\n");
8266
- goto out_error;
8267
- }
8268
-
8269
- phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
8270
- sizeof(struct lpfc_queue *),
8271
- GFP_KERNEL);
8272
- if (!phba->sli4_hba.nvme_wq) {
8273
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8274
- "2581 Failed allocate memory for "
8275
- "fast-path NVME WQ record array\n");
8276
- goto out_error;
8277
- }
8278
-
8279
- /*
8280
- * Since the first EQ can have multiple CQs associated with it,
8281
- * this array is used to quickly see if we have a NVME fast-path
8282
- * CQ match.
8283
- */
8284
- phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
8285
- sizeof(uint16_t),
8286
- GFP_KERNEL);
8287
- if (!phba->sli4_hba.nvme_cq_map) {
8288
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8289
- "6078 Failed allocate memory for "
8290
- "fast-path CQ map\n");
8291
- goto out_error;
8292
- }
8293
-
8999
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
82949000 if (phba->nvmet_support) {
82959001 phba->sli4_hba.nvmet_cqset = kcalloc(
82969002 phba->cfg_nvmet_mrq,
82979003 sizeof(struct lpfc_queue *),
82989004 GFP_KERNEL);
82999005 if (!phba->sli4_hba.nvmet_cqset) {
8300
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9006
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
83019007 "3121 Fail allocate memory for "
83029008 "fast-path CQ set array\n");
83039009 goto out_error;
....@@ -8307,7 +9013,7 @@
83079013 sizeof(struct lpfc_queue *),
83089014 GFP_KERNEL);
83099015 if (!phba->sli4_hba.nvmet_mrq_hdr) {
8310
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9016
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
83119017 "3122 Fail allocate memory for "
83129018 "fast-path RQ set hdr array\n");
83139019 goto out_error;
....@@ -8317,7 +9023,7 @@
83179023 sizeof(struct lpfc_queue *),
83189024 GFP_KERNEL);
83199025 if (!phba->sli4_hba.nvmet_mrq_data) {
8320
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9026
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
83219027 "3124 Fail allocate memory for "
83229028 "fast-path RQ set data array\n");
83239029 goto out_error;
....@@ -8328,43 +9034,85 @@
83289034 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
83299035
83309036 /* Create HBA Event Queues (EQs) */
8331
- for (idx = 0; idx < io_channel; idx++) {
8332
- /* Create EQs */
9037
+ for_each_present_cpu(cpu) {
9038
+ /* We only want to create 1 EQ per vector, even though
9039
+ * multiple CPUs might be using that vector. so only
9040
+ * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
9041
+ */
9042
+ cpup = &phba->sli4_hba.cpu_map[cpu];
9043
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9044
+ continue;
9045
+
9046
+ /* Get a ptr to the Hardware Queue associated with this CPU */
9047
+ qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9048
+
9049
+ /* Allocate an EQ */
83339050 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
83349051 phba->sli4_hba.eq_esize,
8335
- phba->sli4_hba.eq_ecount);
9052
+ phba->sli4_hba.eq_ecount, cpu);
83369053 if (!qdesc) {
8337
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8338
- "0497 Failed allocate EQ (%d)\n", idx);
9054
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9055
+ "0497 Failed allocate EQ (%d)\n",
9056
+ cpup->hdwq);
83399057 goto out_error;
83409058 }
83419059 qdesc->qe_valid = 1;
8342
- phba->sli4_hba.hba_eq[idx] = qdesc;
9060
+ qdesc->hdwq = cpup->hdwq;
9061
+ qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
9062
+ qdesc->last_cpu = qdesc->chann;
9063
+
9064
+ /* Save the allocated EQ in the Hardware Queue */
9065
+ qp->hba_eq = qdesc;
9066
+
9067
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
9068
+ list_add(&qdesc->cpu_list, &eqi->list);
83439069 }
83449070
8345
- /* FCP and NVME io channels are not required to be balanced */
9071
+ /* Now we need to populate the other Hardware Queues, that share
9072
+ * an IRQ vector, with the associated EQ ptr.
9073
+ */
9074
+ for_each_present_cpu(cpu) {
9075
+ cpup = &phba->sli4_hba.cpu_map[cpu];
83469076
8347
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8348
- if (lpfc_alloc_fcp_wq_cq(phba, idx))
8349
- goto out_error;
9077
+ /* Check for EQ already allocated in previous loop */
9078
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
9079
+ continue;
83509080
8351
- for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
8352
- if (lpfc_alloc_nvme_wq_cq(phba, idx))
9081
+ /* Check for multiple CPUs per hdwq */
9082
+ qp = &phba->sli4_hba.hdwq[cpup->hdwq];
9083
+ if (qp->hba_eq)
9084
+ continue;
9085
+
9086
+ /* We need to share an EQ for this hdwq */
9087
+ eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
9088
+ eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
9089
+ qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
9090
+ }
9091
+
9092
+ /* Allocate IO Path SLI4 CQ/WQs */
9093
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9094
+ if (lpfc_alloc_io_wq_cq(phba, idx))
83539095 goto out_error;
9096
+ }
83549097
83559098 if (phba->nvmet_support) {
83569099 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9100
+ cpu = lpfc_find_cpu_handle(phba, idx,
9101
+ LPFC_FIND_BY_HDWQ);
83579102 qdesc = lpfc_sli4_queue_alloc(phba,
83589103 LPFC_DEFAULT_PAGE_SIZE,
83599104 phba->sli4_hba.cq_esize,
8360
- phba->sli4_hba.cq_ecount);
9105
+ phba->sli4_hba.cq_ecount,
9106
+ cpu);
83619107 if (!qdesc) {
8362
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8363
- "3142 Failed allocate NVME "
8364
- "CQ Set (%d)\n", idx);
9108
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9109
+ "3142 Failed allocate NVME "
9110
+ "CQ Set (%d)\n", idx);
83659111 goto out_error;
83669112 }
83679113 qdesc->qe_valid = 1;
9114
+ qdesc->hdwq = idx;
9115
+ qdesc->chann = cpu;
83689116 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
83699117 }
83709118 }
....@@ -8373,12 +9121,13 @@
83739121 * Create Slow Path Completion Queues (CQs)
83749122 */
83759123
9124
+ cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
83769125 /* Create slow-path Mailbox Command Complete Queue */
83779126 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
83789127 phba->sli4_hba.cq_esize,
8379
- phba->sli4_hba.cq_ecount);
9128
+ phba->sli4_hba.cq_ecount, cpu);
83809129 if (!qdesc) {
8381
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9130
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
83829131 "0500 Failed allocate slow-path mailbox CQ\n");
83839132 goto out_error;
83849133 }
....@@ -8388,13 +9137,14 @@
83889137 /* Create slow-path ELS Complete Queue */
83899138 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
83909139 phba->sli4_hba.cq_esize,
8391
- phba->sli4_hba.cq_ecount);
9140
+ phba->sli4_hba.cq_ecount, cpu);
83929141 if (!qdesc) {
8393
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9142
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
83949143 "0501 Failed allocate slow-path ELS CQ\n");
83959144 goto out_error;
83969145 }
83979146 qdesc->qe_valid = 1;
9147
+ qdesc->chann = cpu;
83989148 phba->sli4_hba.els_cq = qdesc;
83999149
84009150
....@@ -8406,12 +9156,13 @@
84069156
84079157 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
84089158 phba->sli4_hba.mq_esize,
8409
- phba->sli4_hba.mq_ecount);
9159
+ phba->sli4_hba.mq_ecount, cpu);
84109160 if (!qdesc) {
8411
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9161
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84129162 "0505 Failed allocate slow-path MQ\n");
84139163 goto out_error;
84149164 }
9165
+ qdesc->chann = cpu;
84159166 phba->sli4_hba.mbx_wq = qdesc;
84169167
84179168 /*
....@@ -8421,12 +9172,13 @@
84219172 /* Create slow-path ELS Work Queue */
84229173 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
84239174 phba->sli4_hba.wq_esize,
8424
- phba->sli4_hba.wq_ecount);
9175
+ phba->sli4_hba.wq_ecount, cpu);
84259176 if (!qdesc) {
8426
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9177
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84279178 "0504 Failed allocate slow-path ELS WQ\n");
84289179 goto out_error;
84299180 }
9181
+ qdesc->chann = cpu;
84309182 phba->sli4_hba.els_wq = qdesc;
84319183 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
84329184
....@@ -8434,24 +9186,26 @@
84349186 /* Create NVME LS Complete Queue */
84359187 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
84369188 phba->sli4_hba.cq_esize,
8437
- phba->sli4_hba.cq_ecount);
9189
+ phba->sli4_hba.cq_ecount, cpu);
84389190 if (!qdesc) {
8439
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9191
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84409192 "6079 Failed allocate NVME LS CQ\n");
84419193 goto out_error;
84429194 }
9195
+ qdesc->chann = cpu;
84439196 qdesc->qe_valid = 1;
84449197 phba->sli4_hba.nvmels_cq = qdesc;
84459198
84469199 /* Create NVME LS Work Queue */
84479200 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
84489201 phba->sli4_hba.wq_esize,
8449
- phba->sli4_hba.wq_ecount);
9202
+ phba->sli4_hba.wq_ecount, cpu);
84509203 if (!qdesc) {
8451
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9204
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84529205 "6080 Failed allocate NVME LS WQ\n");
84539206 goto out_error;
84549207 }
9208
+ qdesc->chann = cpu;
84559209 phba->sli4_hba.nvmels_wq = qdesc;
84569210 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
84579211 }
....@@ -8463,9 +9217,9 @@
84639217 /* Create Receive Queue for header */
84649218 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
84659219 phba->sli4_hba.rq_esize,
8466
- phba->sli4_hba.rq_ecount);
9220
+ phba->sli4_hba.rq_ecount, cpu);
84679221 if (!qdesc) {
8468
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9222
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84699223 "0506 Failed allocate receive HRQ\n");
84709224 goto out_error;
84719225 }
....@@ -8474,34 +9228,40 @@
84749228 /* Create Receive Queue for data */
84759229 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
84769230 phba->sli4_hba.rq_esize,
8477
- phba->sli4_hba.rq_ecount);
9231
+ phba->sli4_hba.rq_ecount, cpu);
84789232 if (!qdesc) {
8479
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9233
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84809234 "0507 Failed allocate receive DRQ\n");
84819235 goto out_error;
84829236 }
84839237 phba->sli4_hba.dat_rq = qdesc;
84849238
8485
- if (phba->nvmet_support) {
9239
+ if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
9240
+ phba->nvmet_support) {
84869241 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
9242
+ cpu = lpfc_find_cpu_handle(phba, idx,
9243
+ LPFC_FIND_BY_HDWQ);
84879244 /* Create NVMET Receive Queue for header */
84889245 qdesc = lpfc_sli4_queue_alloc(phba,
84899246 LPFC_DEFAULT_PAGE_SIZE,
84909247 phba->sli4_hba.rq_esize,
8491
- LPFC_NVMET_RQE_DEF_COUNT);
9248
+ LPFC_NVMET_RQE_DEF_COUNT,
9249
+ cpu);
84929250 if (!qdesc) {
8493
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9251
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
84949252 "3146 Failed allocate "
84959253 "receive HRQ\n");
84969254 goto out_error;
84979255 }
9256
+ qdesc->hdwq = idx;
84989257 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
84999258
85009259 /* Only needed for header of RQ pair */
8501
- qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
8502
- GFP_KERNEL);
9260
+ qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
9261
+ GFP_KERNEL,
9262
+ cpu_to_node(cpu));
85039263 if (qdesc->rqbp == NULL) {
8504
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9264
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
85059265 "6131 Failed allocate "
85069266 "Header RQBP\n");
85079267 goto out_error;
....@@ -8514,20 +9274,35 @@
85149274 qdesc = lpfc_sli4_queue_alloc(phba,
85159275 LPFC_DEFAULT_PAGE_SIZE,
85169276 phba->sli4_hba.rq_esize,
8517
- LPFC_NVMET_RQE_DEF_COUNT);
9277
+ LPFC_NVMET_RQE_DEF_COUNT,
9278
+ cpu);
85189279 if (!qdesc) {
8519
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9280
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
85209281 "3156 Failed allocate "
85219282 "receive DRQ\n");
85229283 goto out_error;
85239284 }
9285
+ qdesc->hdwq = idx;
85249286 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
85259287 }
85269288 }
85279289
8528
- /* Create the Queues needed for Flash Optimized Fabric operations */
8529
- if (phba->cfg_fof)
8530
- lpfc_fof_queue_create(phba);
9290
+ /* Clear NVME stats */
9291
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
9292
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9293
+ memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
9294
+ sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
9295
+ }
9296
+ }
9297
+
9298
+ /* Clear SCSI stats */
9299
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
9300
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9301
+ memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
9302
+ sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
9303
+ }
9304
+ }
9305
+
85319306 return 0;
85329307
85339308 out_error:
....@@ -8560,11 +9335,32 @@
85609335 }
85619336
85629337 static inline void
8563
-lpfc_sli4_release_queue_map(uint16_t **qmap)
9338
+lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
85649339 {
8565
- if (*qmap != NULL) {
8566
- kfree(*qmap);
8567
- *qmap = NULL;
9340
+ struct lpfc_sli4_hdw_queue *hdwq;
9341
+ struct lpfc_queue *eq;
9342
+ uint32_t idx;
9343
+
9344
+ hdwq = phba->sli4_hba.hdwq;
9345
+
9346
+ /* Loop thru all Hardware Queues */
9347
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
9348
+ /* Free the CQ/WQ corresponding to the Hardware Queue */
9349
+ lpfc_sli4_queue_free(hdwq[idx].io_cq);
9350
+ lpfc_sli4_queue_free(hdwq[idx].io_wq);
9351
+ hdwq[idx].hba_eq = NULL;
9352
+ hdwq[idx].io_cq = NULL;
9353
+ hdwq[idx].io_wq = NULL;
9354
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
9355
+ lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
9356
+ lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
9357
+ }
9358
+ /* Loop thru all IRQ vectors */
9359
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
9360
+ /* Free the EQ corresponding to the IRQ vector */
9361
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
9362
+ lpfc_sli4_queue_free(eq);
9363
+ phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
85689364 }
85699365 }
85709366
....@@ -8583,33 +9379,25 @@
85839379 void
85849380 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
85859381 {
8586
- if (phba->cfg_fof)
8587
- lpfc_fof_queue_destroy(phba);
9382
+ /*
9383
+ * Set FREE_INIT before beginning to free the queues.
9384
+ * Wait until the users of queues to acknowledge to
9385
+ * release queues by clearing FREE_WAIT.
9386
+ */
9387
+ spin_lock_irq(&phba->hbalock);
9388
+ phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
9389
+ while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
9390
+ spin_unlock_irq(&phba->hbalock);
9391
+ msleep(20);
9392
+ spin_lock_irq(&phba->hbalock);
9393
+ }
9394
+ spin_unlock_irq(&phba->hbalock);
9395
+
9396
+ lpfc_sli4_cleanup_poll_list(phba);
85889397
85899398 /* Release HBA eqs */
8590
- lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
8591
-
8592
- /* Release FCP cqs */
8593
- lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
8594
- phba->cfg_fcp_io_channel);
8595
-
8596
- /* Release FCP wqs */
8597
- lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
8598
- phba->cfg_fcp_io_channel);
8599
-
8600
- /* Release FCP CQ mapping array */
8601
- lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
8602
-
8603
- /* Release NVME cqs */
8604
- lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
8605
- phba->cfg_nvme_io_channel);
8606
-
8607
- /* Release NVME wqs */
8608
- lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
8609
- phba->cfg_nvme_io_channel);
8610
-
8611
- /* Release NVME CQ mapping array */
8612
- lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
9399
+ if (phba->sli4_hba.hdwq)
9400
+ lpfc_sli4_release_hdwq(phba);
86139401
86149402 if (phba->nvmet_support) {
86159403 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
....@@ -8645,6 +9433,11 @@
86459433
86469434 /* Everything on this list has been freed */
86479435 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
9436
+
9437
+ /* Done with freeing the queues */
9438
+ spin_lock_irq(&phba->hbalock);
9439
+ phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
9440
+ spin_unlock_irq(&phba->hbalock);
86489441 }
86499442
86509443 int
....@@ -8675,7 +9468,7 @@
86759468 int rc;
86769469
86779470 if (!eq || !cq || !wq) {
8678
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9471
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
86799472 "6085 Fast-path %s (%d) not allocated\n",
86809473 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
86819474 return -ENOMEM;
....@@ -8685,15 +9478,14 @@
86859478 rc = lpfc_cq_create(phba, cq, eq,
86869479 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
86879480 if (rc) {
8688
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8689
- "6086 Failed setup of CQ (%d), rc = 0x%x\n",
8690
- qidx, (uint32_t)rc);
9481
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9482
+ "6086 Failed setup of CQ (%d), rc = 0x%x\n",
9483
+ qidx, (uint32_t)rc);
86919484 return rc;
86929485 }
8693
- cq->chann = qidx;
86949486
86959487 if (qtype != LPFC_MBOX) {
8696
- /* Setup nvme_cq_map for fast lookup */
9488
+ /* Setup cq_map for fast lookup */
86979489 if (cq_map)
86989490 *cq_map = cq->queue_id;
86999491
....@@ -8704,13 +9496,12 @@
87049496 /* create the wq */
87059497 rc = lpfc_wq_create(phba, wq, cq, qtype);
87069498 if (rc) {
8707
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8708
- "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
9499
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9500
+ "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
87099501 qidx, (uint32_t)rc);
87109502 /* no need to tear down cq - caller will do so */
87119503 return rc;
87129504 }
8713
- wq->chann = qidx;
87149505
87159506 /* Bind this CQ/WQ to the NVME ring */
87169507 pring = wq->pring;
....@@ -8723,9 +9514,9 @@
87239514 } else {
87249515 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
87259516 if (rc) {
8726
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8727
- "0539 Failed setup of slow-path MQ: "
8728
- "rc = 0x%x\n", rc);
9517
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9518
+ "0539 Failed setup of slow-path MQ: "
9519
+ "rc = 0x%x\n", rc);
87299520 /* no need to tear down cq - caller will do so */
87309521 return rc;
87319522 }
....@@ -8737,6 +9528,38 @@
87379528 }
87389529
87399530 return 0;
9531
+}
9532
+
9533
+/**
9534
+ * lpfc_setup_cq_lookup - Setup the CQ lookup table
9535
+ * @phba: pointer to lpfc hba data structure.
9536
+ *
9537
+ * This routine will populate the cq_lookup table by all
9538
+ * available CQ queue_id's.
9539
+ **/
9540
+static void
9541
+lpfc_setup_cq_lookup(struct lpfc_hba *phba)
9542
+{
9543
+ struct lpfc_queue *eq, *childq;
9544
+ int qidx;
9545
+
9546
+ memset(phba->sli4_hba.cq_lookup, 0,
9547
+ (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
9548
+ /* Loop thru all IRQ vectors */
9549
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9550
+ /* Get the EQ corresponding to the IRQ vector */
9551
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
9552
+ if (!eq)
9553
+ continue;
9554
+ /* Loop through all CQs associated with that EQ */
9555
+ list_for_each_entry(childq, &eq->child_list, list) {
9556
+ if (childq->queue_id > phba->sli4_hba.cq_max)
9557
+ continue;
9558
+ if (childq->subtype == LPFC_IO)
9559
+ phba->sli4_hba.cq_lookup[childq->queue_id] =
9560
+ childq;
9561
+ }
9562
+ }
87409563 }
87419564
87429565 /**
....@@ -8756,15 +9579,17 @@
87569579 {
87579580 uint32_t shdr_status, shdr_add_status;
87589581 union lpfc_sli4_cfg_shdr *shdr;
9582
+ struct lpfc_vector_map_info *cpup;
9583
+ struct lpfc_sli4_hdw_queue *qp;
87599584 LPFC_MBOXQ_t *mboxq;
8760
- int qidx;
8761
- uint32_t length, io_channel;
9585
+ int qidx, cpu;
9586
+ uint32_t length, usdelay;
87629587 int rc = -ENOMEM;
87639588
87649589 /* Check for dual-ULP support */
87659590 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
87669591 if (!mboxq) {
8767
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9592
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
87689593 "3249 Unable to allocate memory for "
87699594 "QUERY_FW_CFG mailbox command\n");
87709595 return -ENOMEM;
....@@ -8782,12 +9607,11 @@
87829607 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
87839608 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
87849609 if (shdr_status || shdr_add_status || rc) {
8785
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9610
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
87869611 "3250 QUERY_FW_CFG mailbox failed with status "
87879612 "x%x add_status x%x, mbx status x%x\n",
87889613 shdr_status, shdr_add_status, rc);
8789
- if (rc != MBX_TIMEOUT)
8790
- mempool_free(mboxq, phba->mbox_mem_pool);
9614
+ mempool_free(mboxq, phba->mbox_mem_pool);
87919615 rc = -ENXIO;
87929616 goto out_error;
87939617 }
....@@ -8803,95 +9627,76 @@
88039627 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
88049628 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
88059629
8806
- if (rc != MBX_TIMEOUT)
8807
- mempool_free(mboxq, phba->mbox_mem_pool);
9630
+ mempool_free(mboxq, phba->mbox_mem_pool);
88089631
88099632 /*
88109633 * Set up HBA Event Queues (EQs)
88119634 */
8812
- io_channel = phba->io_channel_irqs;
9635
+ qp = phba->sli4_hba.hdwq;
88139636
88149637 /* Set up HBA event queue */
8815
- if (io_channel && !phba->sli4_hba.hba_eq) {
8816
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9638
+ if (!qp) {
9639
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
88179640 "3147 Fast-path EQs not allocated\n");
88189641 rc = -ENOMEM;
88199642 goto out_error;
88209643 }
8821
- for (qidx = 0; qidx < io_channel; qidx++) {
8822
- if (!phba->sli4_hba.hba_eq[qidx]) {
8823
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8824
- "0522 Fast-path EQ (%d) not "
8825
- "allocated\n", qidx);
8826
- rc = -ENOMEM;
8827
- goto out_destroy;
9644
+
9645
+ /* Loop thru all IRQ vectors */
9646
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
9647
+ /* Create HBA Event Queues (EQs) in order */
9648
+ for_each_present_cpu(cpu) {
9649
+ cpup = &phba->sli4_hba.cpu_map[cpu];
9650
+
9651
+ /* Look for the CPU thats using that vector with
9652
+ * LPFC_CPU_FIRST_IRQ set.
9653
+ */
9654
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
9655
+ continue;
9656
+ if (qidx != cpup->eq)
9657
+ continue;
9658
+
9659
+ /* Create an EQ for that vector */
9660
+ rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
9661
+ phba->cfg_fcp_imax);
9662
+ if (rc) {
9663
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9664
+ "0523 Failed setup of fast-path"
9665
+ " EQ (%d), rc = 0x%x\n",
9666
+ cpup->eq, (uint32_t)rc);
9667
+ goto out_destroy;
9668
+ }
9669
+
9670
+ /* Save the EQ for that vector in the hba_eq_hdl */
9671
+ phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
9672
+ qp[cpup->hdwq].hba_eq;
9673
+
9674
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9675
+ "2584 HBA EQ setup: queue[%d]-id=%d\n",
9676
+ cpup->eq,
9677
+ qp[cpup->hdwq].hba_eq->queue_id);
88289678 }
8829
- rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
8830
- phba->cfg_fcp_imax);
9679
+ }
9680
+
9681
+ /* Loop thru all Hardware Queues */
9682
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9683
+ cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
9684
+ cpup = &phba->sli4_hba.cpu_map[cpu];
9685
+
9686
+ /* Create the CQ/WQ corresponding to the Hardware Queue */
9687
+ rc = lpfc_create_wq_cq(phba,
9688
+ phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
9689
+ qp[qidx].io_cq,
9690
+ qp[qidx].io_wq,
9691
+ &phba->sli4_hba.hdwq[qidx].io_cq_map,
9692
+ qidx,
9693
+ LPFC_IO);
88319694 if (rc) {
8832
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8833
- "0523 Failed setup of fast-path EQ "
8834
- "(%d), rc = 0x%x\n", qidx,
8835
- (uint32_t)rc);
8836
- goto out_destroy;
8837
- }
8838
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8839
- "2584 HBA EQ setup: queue[%d]-id=%d\n",
8840
- qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
8841
- }
8842
-
8843
- if (phba->cfg_nvme_io_channel) {
8844
- if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
8845
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8846
- "6084 Fast-path NVME %s array not allocated\n",
8847
- (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
8848
- rc = -ENOMEM;
8849
- goto out_destroy;
8850
- }
8851
-
8852
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
8853
- rc = lpfc_create_wq_cq(phba,
8854
- phba->sli4_hba.hba_eq[
8855
- qidx % io_channel],
8856
- phba->sli4_hba.nvme_cq[qidx],
8857
- phba->sli4_hba.nvme_wq[qidx],
8858
- &phba->sli4_hba.nvme_cq_map[qidx],
8859
- qidx, LPFC_NVME);
8860
- if (rc) {
8861
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8862
- "6123 Failed to setup fastpath "
8863
- "NVME WQ/CQ (%d), rc = 0x%x\n",
8864
- qidx, (uint32_t)rc);
8865
- goto out_destroy;
8866
- }
8867
- }
8868
- }
8869
-
8870
- if (phba->cfg_fcp_io_channel) {
8871
- /* Set up fast-path FCP Response Complete Queue */
8872
- if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
8873
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8874
- "3148 Fast-path FCP %s array not allocated\n",
8875
- phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
8876
- rc = -ENOMEM;
8877
- goto out_destroy;
8878
- }
8879
-
8880
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
8881
- rc = lpfc_create_wq_cq(phba,
8882
- phba->sli4_hba.hba_eq[
8883
- qidx % io_channel],
8884
- phba->sli4_hba.fcp_cq[qidx],
8885
- phba->sli4_hba.fcp_wq[qidx],
8886
- &phba->sli4_hba.fcp_cq_map[qidx],
8887
- qidx, LPFC_FCP);
8888
- if (rc) {
8889
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9695
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
88909696 "0535 Failed to setup fastpath "
8891
- "FCP WQ/CQ (%d), rc = 0x%x\n",
9697
+ "IO WQ/CQ (%d), rc = 0x%x\n",
88929698 qidx, (uint32_t)rc);
8893
- goto out_destroy;
8894
- }
9699
+ goto out_destroy;
88959700 }
88969701 }
88979702
....@@ -8902,7 +9707,7 @@
89029707 /* Set up slow-path MBOX CQ/MQ */
89039708
89049709 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
8905
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9710
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89069711 "0528 %s not allocated\n",
89079712 phba->sli4_hba.mbx_cq ?
89089713 "Mailbox WQ" : "Mailbox CQ");
....@@ -8910,19 +9715,19 @@
89109715 goto out_destroy;
89119716 }
89129717
8913
- rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
9718
+ rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
89149719 phba->sli4_hba.mbx_cq,
89159720 phba->sli4_hba.mbx_wq,
89169721 NULL, 0, LPFC_MBOX);
89179722 if (rc) {
8918
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9723
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89199724 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
89209725 (uint32_t)rc);
89219726 goto out_destroy;
89229727 }
89239728 if (phba->nvmet_support) {
89249729 if (!phba->sli4_hba.nvmet_cqset) {
8925
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9730
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89269731 "3165 Fast-path NVME CQ Set "
89279732 "array not allocated\n");
89289733 rc = -ENOMEM;
....@@ -8931,10 +9736,10 @@
89319736 if (phba->cfg_nvmet_mrq > 1) {
89329737 rc = lpfc_cq_create_set(phba,
89339738 phba->sli4_hba.nvmet_cqset,
8934
- phba->sli4_hba.hba_eq,
9739
+ qp,
89359740 LPFC_WCQ, LPFC_NVMET);
89369741 if (rc) {
8937
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9742
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89389743 "3164 Failed setup of NVME CQ "
89399744 "Set, rc = 0x%x\n",
89409745 (uint32_t)rc);
....@@ -8943,10 +9748,10 @@
89439748 } else {
89449749 /* Set up NVMET Receive Complete Queue */
89459750 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
8946
- phba->sli4_hba.hba_eq[0],
9751
+ qp[0].hba_eq,
89479752 LPFC_WCQ, LPFC_NVMET);
89489753 if (rc) {
8949
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9754
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89509755 "6089 Failed setup NVMET CQ: "
89519756 "rc = 0x%x\n", (uint32_t)rc);
89529757 goto out_destroy;
....@@ -8957,26 +9762,26 @@
89579762 "6090 NVMET CQ setup: cq-id=%d, "
89589763 "parent eq-id=%d\n",
89599764 phba->sli4_hba.nvmet_cqset[0]->queue_id,
8960
- phba->sli4_hba.hba_eq[0]->queue_id);
9765
+ qp[0].hba_eq->queue_id);
89619766 }
89629767 }
89639768
89649769 /* Set up slow-path ELS WQ/CQ */
89659770 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
8966
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9771
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89679772 "0530 ELS %s not allocated\n",
89689773 phba->sli4_hba.els_cq ? "WQ" : "CQ");
89699774 rc = -ENOMEM;
89709775 goto out_destroy;
89719776 }
8972
- rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8973
- phba->sli4_hba.els_cq,
8974
- phba->sli4_hba.els_wq,
8975
- NULL, 0, LPFC_ELS);
9777
+ rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9778
+ phba->sli4_hba.els_cq,
9779
+ phba->sli4_hba.els_wq,
9780
+ NULL, 0, LPFC_ELS);
89769781 if (rc) {
8977
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8978
- "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
8979
- (uint32_t)rc);
9782
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9783
+ "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
9784
+ (uint32_t)rc);
89809785 goto out_destroy;
89819786 }
89829787 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
....@@ -8984,23 +9789,23 @@
89849789 phba->sli4_hba.els_wq->queue_id,
89859790 phba->sli4_hba.els_cq->queue_id);
89869791
8987
- if (phba->cfg_nvme_io_channel) {
9792
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
89889793 /* Set up NVME LS Complete Queue */
89899794 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
8990
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9795
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
89919796 "6091 LS %s not allocated\n",
89929797 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
89939798 rc = -ENOMEM;
89949799 goto out_destroy;
89959800 }
8996
- rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
8997
- phba->sli4_hba.nvmels_cq,
8998
- phba->sli4_hba.nvmels_wq,
8999
- NULL, 0, LPFC_NVME_LS);
9801
+ rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
9802
+ phba->sli4_hba.nvmels_cq,
9803
+ phba->sli4_hba.nvmels_wq,
9804
+ NULL, 0, LPFC_NVME_LS);
90009805 if (rc) {
9001
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9002
- "0529 Failed setup of NVVME LS WQ/CQ: "
9003
- "rc = 0x%x\n", (uint32_t)rc);
9806
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9807
+ "0526 Failed setup of NVVME LS WQ/CQ: "
9808
+ "rc = 0x%x\n", (uint32_t)rc);
90049809 goto out_destroy;
90059810 }
90069811
....@@ -9018,7 +9823,7 @@
90189823 if ((!phba->sli4_hba.nvmet_cqset) ||
90199824 (!phba->sli4_hba.nvmet_mrq_hdr) ||
90209825 (!phba->sli4_hba.nvmet_mrq_data)) {
9021
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9826
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
90229827 "6130 MRQ CQ Queues not "
90239828 "allocated\n");
90249829 rc = -ENOMEM;
....@@ -9031,7 +9836,7 @@
90319836 phba->sli4_hba.nvmet_cqset,
90329837 LPFC_NVMET);
90339838 if (rc) {
9034
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9839
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
90359840 "6098 Failed setup of NVMET "
90369841 "MRQ: rc = 0x%x\n",
90379842 (uint32_t)rc);
....@@ -9045,7 +9850,7 @@
90459850 phba->sli4_hba.nvmet_cqset[0],
90469851 LPFC_NVMET);
90479852 if (rc) {
9048
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9853
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
90499854 "6057 Failed setup of NVMET "
90509855 "Receive Queue: rc = 0x%x\n",
90519856 (uint32_t)rc);
....@@ -9064,7 +9869,7 @@
90649869 }
90659870
90669871 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
9067
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9872
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
90689873 "0540 Receive Queue not allocated\n");
90699874 rc = -ENOMEM;
90709875 goto out_destroy;
....@@ -9073,7 +9878,7 @@
90739878 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
90749879 phba->sli4_hba.els_cq, LPFC_USOL);
90759880 if (rc) {
9076
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9881
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
90779882 "0541 Failed setup of Receive Queue: "
90789883 "rc = 0x%x\n", (uint32_t)rc);
90799884 goto out_destroy;
....@@ -9086,20 +9891,29 @@
90869891 phba->sli4_hba.dat_rq->queue_id,
90879892 phba->sli4_hba.els_cq->queue_id);
90889893
9089
- if (phba->cfg_fof) {
9090
- rc = lpfc_fof_queue_setup(phba);
9091
- if (rc) {
9092
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9093
- "0549 Failed setup of FOF Queues: "
9094
- "rc = 0x%x\n", rc);
9894
+ if (phba->cfg_fcp_imax)
9895
+ usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
9896
+ else
9897
+ usdelay = 0;
9898
+
9899
+ for (qidx = 0; qidx < phba->cfg_irq_chann;
9900
+ qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9901
+ lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9902
+ usdelay);
9903
+
9904
+ if (phba->sli4_hba.cq_max) {
9905
+ kfree(phba->sli4_hba.cq_lookup);
9906
+ phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
9907
+ sizeof(struct lpfc_queue *), GFP_KERNEL);
9908
+ if (!phba->sli4_hba.cq_lookup) {
9909
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9910
+ "0549 Failed setup of CQ Lookup table: "
9911
+ "size 0x%x\n", phba->sli4_hba.cq_max);
9912
+ rc = -ENOMEM;
90959913 goto out_destroy;
90969914 }
9915
+ lpfc_setup_cq_lookup(phba);
90979916 }
9098
-
9099
- for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
9100
- lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
9101
- phba->cfg_fcp_imax);
9102
-
91039917 return 0;
91049918
91059919 out_destroy:
....@@ -9123,11 +9937,9 @@
91239937 void
91249938 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
91259939 {
9940
+ struct lpfc_sli4_hdw_queue *qp;
9941
+ struct lpfc_queue *eq;
91269942 int qidx;
9127
-
9128
- /* Unset the queues created for Flash Optimized Fabric operations */
9129
- if (phba->cfg_fof)
9130
- lpfc_fof_queue_destroy(phba);
91319943
91329944 /* Unset mailbox command work queue */
91339945 if (phba->sli4_hba.mbx_wq)
....@@ -9146,17 +9958,6 @@
91469958 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
91479959 phba->sli4_hba.dat_rq);
91489960
9149
- /* Unset FCP work queue */
9150
- if (phba->sli4_hba.fcp_wq)
9151
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
9152
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
9153
-
9154
- /* Unset NVME work queue */
9155
- if (phba->sli4_hba.nvme_wq) {
9156
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
9157
- lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
9158
- }
9159
-
91609961 /* Unset mailbox command complete queue */
91619962 if (phba->sli4_hba.mbx_cq)
91629963 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
....@@ -9168,11 +9969,6 @@
91689969 /* Unset NVME LS complete queue */
91699970 if (phba->sli4_hba.nvmels_cq)
91709971 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
9171
-
9172
- /* Unset NVME response complete queue */
9173
- if (phba->sli4_hba.nvme_cq)
9174
- for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
9175
- lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
91769972
91779973 if (phba->nvmet_support) {
91789974 /* Unset NVMET MRQ queue */
....@@ -9192,15 +9988,26 @@
91929988 }
91939989 }
91949990
9195
- /* Unset FCP response complete queue */
9196
- if (phba->sli4_hba.fcp_cq)
9197
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
9198
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
9991
+ /* Unset fast-path SLI4 queues */
9992
+ if (phba->sli4_hba.hdwq) {
9993
+ /* Loop thru all Hardware Queues */
9994
+ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
9995
+ /* Destroy the CQ/WQ corresponding to Hardware Queue */
9996
+ qp = &phba->sli4_hba.hdwq[qidx];
9997
+ lpfc_wq_destroy(phba, qp->io_wq);
9998
+ lpfc_cq_destroy(phba, qp->io_cq);
9999
+ }
10000
+ /* Loop thru all IRQ vectors */
10001
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
10002
+ /* Destroy the EQ corresponding to the IRQ vector */
10003
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
10004
+ lpfc_eq_destroy(phba, eq);
10005
+ }
10006
+ }
919910007
9200
- /* Unset fast-path event queue */
9201
- if (phba->sli4_hba.hba_eq)
9202
- for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
9203
- lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
10008
+ kfree(phba->sli4_hba.cq_lookup);
10009
+ phba->sli4_hba.cq_lookup = NULL;
10010
+ phba->sli4_hba.cq_max = 0;
920410011 }
920510012
920610013 /**
....@@ -9346,26 +10153,28 @@
934610153 static void
934710154 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
934810155 {
9349
- LIST_HEAD(cqelist);
9350
- struct lpfc_cq_event *cqe;
10156
+ LIST_HEAD(cq_event_list);
10157
+ struct lpfc_cq_event *cq_event;
935110158 unsigned long iflags;
935210159
935310160 /* Retrieve all the pending WCQEs from pending WCQE lists */
9354
- spin_lock_irqsave(&phba->hbalock, iflags);
9355
- /* Pending FCP XRI abort events */
9356
- list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
9357
- &cqelist);
9358
- /* Pending ELS XRI abort events */
9359
- list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
9360
- &cqelist);
9361
- /* Pending asynnc events */
9362
- list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
9363
- &cqelist);
9364
- spin_unlock_irqrestore(&phba->hbalock, iflags);
936510161
9366
- while (!list_empty(&cqelist)) {
9367
- list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
9368
- lpfc_sli4_cq_event_release(phba, cqe);
10162
+ /* Pending ELS XRI abort events */
10163
+ spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10164
+ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10165
+ &cq_event_list);
10166
+ spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
10167
+
10168
+ /* Pending asynnc events */
10169
+ spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
10170
+ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
10171
+ &cq_event_list);
10172
+ spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
10173
+
10174
+ while (!list_empty(&cq_event_list)) {
10175
+ list_remove_head(&cq_event_list, cq_event,
10176
+ struct lpfc_cq_event, list);
10177
+ lpfc_sli4_cq_event_release(phba, cq_event);
936910178 }
937010179 }
937110180
....@@ -9399,7 +10208,7 @@
939910208 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
940010209 GFP_KERNEL);
940110210 if (!mboxq) {
9402
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10211
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
940310212 "0494 Unable to allocate memory for "
940410213 "issuing SLI_FUNCTION_RESET mailbox "
940510214 "command\n");
....@@ -9416,10 +10225,9 @@
941610225 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
941710226 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
941810227 &shdr->response);
9419
- if (rc != MBX_TIMEOUT)
9420
- mempool_free(mboxq, phba->mbox_mem_pool);
10228
+ mempool_free(mboxq, phba->mbox_mem_pool);
942110229 if (shdr_status || shdr_add_status || rc) {
9422
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10230
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
942310231 "0495 SLI_FUNCTION_RESET mailbox "
942410232 "failed with status x%x add_status x%x,"
942510233 " mbx status x%x\n",
....@@ -9451,7 +10259,7 @@
945110259 phba->sli4_hba.u.if_type2.ERR1regaddr);
945210260 phba->work_status[1] = readl(
945310261 phba->sli4_hba.u.if_type2.ERR2regaddr);
9454
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10262
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
945510263 "2890 Port not ready, port status reg "
945610264 "0x%x error 1=0x%x, error 2=0x%x\n",
945710265 reg_data.word0,
....@@ -9493,7 +10301,7 @@
949310301 out:
949410302 /* Catch the not-ready port failure after a port reset. */
949510303 if (rc) {
9496
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10304
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
949710305 "3317 HBA not functional: IP Reset Failed "
949810306 "try: echo fw_reset > board_mode\n");
949910307 rc = -ENODEV;
....@@ -9516,25 +10324,20 @@
951610324 static int
951710325 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
951810326 {
9519
- struct pci_dev *pdev;
10327
+ struct pci_dev *pdev = phba->pcidev;
952010328 unsigned long bar0map_len, bar1map_len, bar2map_len;
9521
- int error = -ENODEV;
10329
+ int error;
952210330 uint32_t if_type;
952310331
9524
- /* Obtain PCI device reference */
9525
- if (!phba->pcidev)
9526
- return error;
9527
- else
9528
- pdev = phba->pcidev;
10332
+ if (!pdev)
10333
+ return -ENODEV;
952910334
953010335 /* Set the device DMA mask size */
9531
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
9532
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
9533
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
9534
- || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
9535
- return error;
9536
- }
9537
- }
10336
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10337
+ if (error)
10338
+ error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10339
+ if (error)
10340
+ return error;
953810341
953910342 /*
954010343 * The BARs and register set definitions and offset locations are
....@@ -9542,17 +10345,17 @@
954210345 */
954310346 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
954410347 &phba->sli4_hba.sli_intf.word0)) {
9545
- return error;
10348
+ return -ENODEV;
954610349 }
954710350
954810351 /* There is no SLI3 failback for SLI4 devices. */
954910352 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
955010353 LPFC_SLI_INTF_VALID) {
9551
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10354
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
955210355 "2894 SLI_INTF reg contents invalid "
955310356 "sli_intf reg 0x%x\n",
955410357 phba->sli4_hba.sli_intf.word0);
9555
- return error;
10358
+ return -ENODEV;
955610359 }
955710360
955810361 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
....@@ -9576,7 +10379,7 @@
957610379 dev_printk(KERN_ERR, &pdev->dev,
957710380 "ioremap failed for SLI4 PCI config "
957810381 "registers.\n");
9579
- goto out;
10382
+ return -ENODEV;
958010383 }
958110384 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
958210385 /* Set up BAR0 PCI config space register memory map */
....@@ -9587,7 +10390,7 @@
958710390 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
958810391 dev_printk(KERN_ERR, &pdev->dev,
958910392 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
9590
- goto out;
10393
+ return -ENODEV;
959110394 }
959210395 phba->sli4_hba.conf_regs_memmap_p =
959310396 ioremap(phba->pci_bar0_map, bar0map_len);
....@@ -9595,7 +10398,7 @@
959510398 dev_printk(KERN_ERR, &pdev->dev,
959610399 "ioremap failed for SLI4 PCI config "
959710400 "registers.\n");
9598
- goto out;
10401
+ return -ENODEV;
959910402 }
960010403 lpfc_sli4_bar0_register_memmap(phba, if_type);
960110404 }
....@@ -9641,6 +10444,7 @@
964110444 if (!phba->sli4_hba.drbl_regs_memmap_p) {
964210445 dev_err(&pdev->dev,
964310446 "ioremap failed for SLI4 HBA doorbell registers.\n");
10447
+ error = -ENOMEM;
964410448 goto out_iounmap_conf;
964510449 }
964610450 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
....@@ -9673,7 +10477,7 @@
967310477 goto out_iounmap_all;
967410478 } else {
967510479 error = -ENOMEM;
9676
- goto out_iounmap_all;
10480
+ goto out_iounmap_ctrl;
967710481 }
967810482 }
967910483
....@@ -9690,7 +10494,8 @@
969010494 if (!phba->sli4_hba.dpp_regs_memmap_p) {
969110495 dev_err(&pdev->dev,
969210496 "ioremap failed for SLI4 HBA dpp registers.\n");
9693
- goto out_iounmap_ctrl;
10497
+ error = -ENOMEM;
10498
+ goto out_iounmap_all;
969410499 }
969510500 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
969610501 }
....@@ -9700,13 +10505,13 @@
970010505 case LPFC_SLI_INTF_IF_TYPE_0:
970110506 case LPFC_SLI_INTF_IF_TYPE_2:
970210507 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
9703
- phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release;
9704
- phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release;
10508
+ phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
10509
+ phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
970510510 break;
970610511 case LPFC_SLI_INTF_IF_TYPE_6:
970710512 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
9708
- phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release;
9709
- phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release;
10513
+ phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
10514
+ phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
971010515 break;
971110516 default:
971210517 break;
....@@ -9715,12 +10520,14 @@
971510520 return 0;
971610521
971710522 out_iounmap_all:
9718
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
10523
+ if (phba->sli4_hba.drbl_regs_memmap_p)
10524
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
971910525 out_iounmap_ctrl:
9720
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10526
+ if (phba->sli4_hba.ctrl_regs_memmap_p)
10527
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
972110528 out_iounmap_conf:
972210529 iounmap(phba->sli4_hba.conf_regs_memmap_p);
9723
-out:
10530
+
972410531 return error;
972510532 }
972610533
....@@ -9749,6 +10556,8 @@
974910556 case LPFC_SLI_INTF_IF_TYPE_6:
975010557 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
975110558 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10559
+ if (phba->sli4_hba.dpp_regs_memmap_p)
10560
+ iounmap(phba->sli4_hba.dpp_regs_memmap_p);
975210561 break;
975310562 case LPFC_SLI_INTF_IF_TYPE_1:
975410563 default:
....@@ -9819,7 +10628,7 @@
981910628
982010629 if (!pmb) {
982110630 rc = -ENOMEM;
9822
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10631
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
982310632 "0474 Unable to allocate memory for issuing "
982410633 "MBOX_CONFIG_MSI command\n");
982510634 goto mem_fail_out;
....@@ -9902,6 +10711,7 @@
990210711 /**
990310712 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
990410713 * @phba: pointer to lpfc hba data structure.
10714
+ * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
990510715 *
990610716 * This routine is invoked to enable device interrupt and associate driver's
990710717 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
....@@ -9987,65 +10797,714 @@
998710797 }
998810798
998910799 /**
10800
+ * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
10801
+ * @phba: pointer to lpfc hba data structure.
10802
+ * @id: EQ vector index or Hardware Queue index
10803
+ * @match: LPFC_FIND_BY_EQ = match by EQ
10804
+ * LPFC_FIND_BY_HDWQ = match by Hardware Queue
10805
+ * Return the CPU that matches the selection criteria
10806
+ */
10807
+static uint16_t
10808
+lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
10809
+{
10810
+ struct lpfc_vector_map_info *cpup;
10811
+ int cpu;
10812
+
10813
+ /* Loop through all CPUs */
10814
+ for_each_present_cpu(cpu) {
10815
+ cpup = &phba->sli4_hba.cpu_map[cpu];
10816
+
10817
+ /* If we are matching by EQ, there may be multiple CPUs using
10818
+ * using the same vector, so select the one with
10819
+ * LPFC_CPU_FIRST_IRQ set.
10820
+ */
10821
+ if ((match == LPFC_FIND_BY_EQ) &&
10822
+ (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
10823
+ (cpup->eq == id))
10824
+ return cpu;
10825
+
10826
+ /* If matching by HDWQ, select the first CPU that matches */
10827
+ if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
10828
+ return cpu;
10829
+ }
10830
+ return 0;
10831
+}
10832
+
10833
+#ifdef CONFIG_X86
10834
+/**
10835
+ * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
10836
+ * @phba: pointer to lpfc hba data structure.
10837
+ * @cpu: CPU map index
10838
+ * @phys_id: CPU package physical id
10839
+ * @core_id: CPU core id
10840
+ */
10841
+static int
10842
+lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
10843
+ uint16_t phys_id, uint16_t core_id)
10844
+{
10845
+ struct lpfc_vector_map_info *cpup;
10846
+ int idx;
10847
+
10848
+ for_each_present_cpu(idx) {
10849
+ cpup = &phba->sli4_hba.cpu_map[idx];
10850
+ /* Does the cpup match the one we are looking for */
10851
+ if ((cpup->phys_id == phys_id) &&
10852
+ (cpup->core_id == core_id) &&
10853
+ (cpu != idx))
10854
+ return 1;
10855
+ }
10856
+ return 0;
10857
+}
10858
+#endif
10859
+
10860
+/*
10861
+ * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
10862
+ * @phba: pointer to lpfc hba data structure.
10863
+ * @eqidx: index for eq and irq vector
10864
+ * @flag: flags to set for vector_map structure
10865
+ * @cpu: cpu used to index vector_map structure
10866
+ *
10867
+ * The routine assigns eq info into vector_map structure
10868
+ */
10869
+static inline void
10870
+lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
10871
+ unsigned int cpu)
10872
+{
10873
+ struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
10874
+ struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
10875
+
10876
+ cpup->eq = eqidx;
10877
+ cpup->flag |= flag;
10878
+
10879
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10880
+ "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
10881
+ cpu, eqhdl->irq, cpup->eq, cpup->flag);
10882
+}
10883
+
10884
+/**
10885
+ * lpfc_cpu_map_array_init - Initialize cpu_map structure
10886
+ * @phba: pointer to lpfc hba data structure.
10887
+ *
10888
+ * The routine initializes the cpu_map array structure
10889
+ */
10890
+static void
10891
+lpfc_cpu_map_array_init(struct lpfc_hba *phba)
10892
+{
10893
+ struct lpfc_vector_map_info *cpup;
10894
+ struct lpfc_eq_intr_info *eqi;
10895
+ int cpu;
10896
+
10897
+ for_each_possible_cpu(cpu) {
10898
+ cpup = &phba->sli4_hba.cpu_map[cpu];
10899
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
10900
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
10901
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
10902
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
10903
+ cpup->flag = 0;
10904
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
10905
+ INIT_LIST_HEAD(&eqi->list);
10906
+ eqi->icnt = 0;
10907
+ }
10908
+}
10909
+
10910
+/**
10911
+ * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
10912
+ * @phba: pointer to lpfc hba data structure.
10913
+ *
10914
+ * The routine initializes the hba_eq_hdl array structure
10915
+ */
10916
+static void
10917
+lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
10918
+{
10919
+ struct lpfc_hba_eq_hdl *eqhdl;
10920
+ int i;
10921
+
10922
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
10923
+ eqhdl = lpfc_get_eq_hdl(i);
10924
+ eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
10925
+ eqhdl->phba = phba;
10926
+ }
10927
+}
10928
+
10929
+/**
999010930 * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
999110931 * @phba: pointer to lpfc hba data structure.
999210932 * @vectors: number of msix vectors allocated.
999310933 *
999410934 * The routine will figure out the CPU affinity assignment for every
9995
- * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
9996
- * with a pointer to the CPU mask that defines ALL the CPUs this vector
9997
- * can be associated with. If the vector can be unquely associated with
9998
- * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
10935
+ * MSI-X vector allocated for the HBA.
999910936 * In addition, the CPU to IO channel mapping will be calculated
1000010937 * and the phba->sli4_hba.cpu_map array will reflect this.
1000110938 */
1000210939 static void
1000310940 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
1000410941 {
10942
+ int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
10943
+ int max_phys_id, min_phys_id;
10944
+ int max_core_id, min_core_id;
1000510945 struct lpfc_vector_map_info *cpup;
10006
- int index = 0;
10007
- int vec = 0;
10008
- int cpu;
10946
+ struct lpfc_vector_map_info *new_cpup;
1000910947 #ifdef CONFIG_X86
1001010948 struct cpuinfo_x86 *cpuinfo;
1001110949 #endif
10950
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
10951
+ struct lpfc_hdwq_stat *c_stat;
10952
+#endif
1001210953
10013
- /* Init cpu_map array */
10014
- memset(phba->sli4_hba.cpu_map, 0xff,
10015
- (sizeof(struct lpfc_vector_map_info) *
10016
- phba->sli4_hba.num_present_cpu));
10954
+ max_phys_id = 0;
10955
+ min_phys_id = LPFC_VECTOR_MAP_EMPTY;
10956
+ max_core_id = 0;
10957
+ min_core_id = LPFC_VECTOR_MAP_EMPTY;
1001710958
1001810959 /* Update CPU map with physical id and core id of each CPU */
10019
- cpup = phba->sli4_hba.cpu_map;
10020
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
10960
+ for_each_present_cpu(cpu) {
10961
+ cpup = &phba->sli4_hba.cpu_map[cpu];
1002110962 #ifdef CONFIG_X86
1002210963 cpuinfo = &cpu_data(cpu);
1002310964 cpup->phys_id = cpuinfo->phys_proc_id;
1002410965 cpup->core_id = cpuinfo->cpu_core_id;
10966
+ if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
10967
+ cpup->flag |= LPFC_CPU_MAP_HYPER;
1002510968 #else
1002610969 /* No distinction between CPUs for other platforms */
1002710970 cpup->phys_id = 0;
10028
- cpup->core_id = 0;
10971
+ cpup->core_id = cpu;
1002910972 #endif
10030
- cpup->channel_id = index; /* For now round robin */
10031
- cpup->irq = pci_irq_vector(phba->pcidev, vec);
10032
- vec++;
10033
- if (vec >= vectors)
10034
- vec = 0;
10035
- index++;
10036
- if (index >= phba->cfg_fcp_io_channel)
10037
- index = 0;
10038
- cpup++;
10973
+
10974
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10975
+ "3328 CPU %d physid %d coreid %d flag x%x\n",
10976
+ cpu, cpup->phys_id, cpup->core_id, cpup->flag);
10977
+
10978
+ if (cpup->phys_id > max_phys_id)
10979
+ max_phys_id = cpup->phys_id;
10980
+ if (cpup->phys_id < min_phys_id)
10981
+ min_phys_id = cpup->phys_id;
10982
+
10983
+ if (cpup->core_id > max_core_id)
10984
+ max_core_id = cpup->core_id;
10985
+ if (cpup->core_id < min_core_id)
10986
+ min_core_id = cpup->core_id;
10987
+ }
10988
+
10989
+ /* After looking at each irq vector assigned to this pcidev, its
10990
+ * possible to see that not ALL CPUs have been accounted for.
10991
+ * Next we will set any unassigned (unaffinitized) cpu map
10992
+ * entries to a IRQ on the same phys_id.
10993
+ */
10994
+ first_cpu = cpumask_first(cpu_present_mask);
10995
+ start_cpu = first_cpu;
10996
+
10997
+ for_each_present_cpu(cpu) {
10998
+ cpup = &phba->sli4_hba.cpu_map[cpu];
10999
+
11000
+ /* Is this CPU entry unassigned */
11001
+ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11002
+ /* Mark CPU as IRQ not assigned by the kernel */
11003
+ cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11004
+
11005
+ /* If so, find a new_cpup thats on the the SAME
11006
+ * phys_id as cpup. start_cpu will start where we
11007
+ * left off so all unassigned entries don't get assgined
11008
+ * the IRQ of the first entry.
11009
+ */
11010
+ new_cpu = start_cpu;
11011
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11012
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11013
+ if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11014
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
11015
+ (new_cpup->phys_id == cpup->phys_id))
11016
+ goto found_same;
11017
+ new_cpu = cpumask_next(
11018
+ new_cpu, cpu_present_mask);
11019
+ if (new_cpu == nr_cpumask_bits)
11020
+ new_cpu = first_cpu;
11021
+ }
11022
+ /* At this point, we leave the CPU as unassigned */
11023
+ continue;
11024
+found_same:
11025
+ /* We found a matching phys_id, so copy the IRQ info */
11026
+ cpup->eq = new_cpup->eq;
11027
+
11028
+ /* Bump start_cpu to the next slot to minmize the
11029
+ * chance of having multiple unassigned CPU entries
11030
+ * selecting the same IRQ.
11031
+ */
11032
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11033
+ if (start_cpu == nr_cpumask_bits)
11034
+ start_cpu = first_cpu;
11035
+
11036
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11037
+ "3337 Set Affinity: CPU %d "
11038
+ "eq %d from peer cpu %d same "
11039
+ "phys_id (%d)\n",
11040
+ cpu, cpup->eq, new_cpu,
11041
+ cpup->phys_id);
11042
+ }
11043
+ }
11044
+
11045
+ /* Set any unassigned cpu map entries to a IRQ on any phys_id */
11046
+ start_cpu = first_cpu;
11047
+
11048
+ for_each_present_cpu(cpu) {
11049
+ cpup = &phba->sli4_hba.cpu_map[cpu];
11050
+
11051
+ /* Is this entry unassigned */
11052
+ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
11053
+ /* Mark it as IRQ not assigned by the kernel */
11054
+ cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
11055
+
11056
+ /* If so, find a new_cpup thats on ANY phys_id
11057
+ * as the cpup. start_cpu will start where we
11058
+ * left off so all unassigned entries don't get
11059
+ * assigned the IRQ of the first entry.
11060
+ */
11061
+ new_cpu = start_cpu;
11062
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11063
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11064
+ if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
11065
+ (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
11066
+ goto found_any;
11067
+ new_cpu = cpumask_next(
11068
+ new_cpu, cpu_present_mask);
11069
+ if (new_cpu == nr_cpumask_bits)
11070
+ new_cpu = first_cpu;
11071
+ }
11072
+ /* We should never leave an entry unassigned */
11073
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11074
+ "3339 Set Affinity: CPU %d "
11075
+ "eq %d UNASSIGNED\n",
11076
+ cpup->hdwq, cpup->eq);
11077
+ continue;
11078
+found_any:
11079
+ /* We found an available entry, copy the IRQ info */
11080
+ cpup->eq = new_cpup->eq;
11081
+
11082
+ /* Bump start_cpu to the next slot to minmize the
11083
+ * chance of having multiple unassigned CPU entries
11084
+ * selecting the same IRQ.
11085
+ */
11086
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11087
+ if (start_cpu == nr_cpumask_bits)
11088
+ start_cpu = first_cpu;
11089
+
11090
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11091
+ "3338 Set Affinity: CPU %d "
11092
+ "eq %d from peer cpu %d (%d/%d)\n",
11093
+ cpu, cpup->eq, new_cpu,
11094
+ new_cpup->phys_id, new_cpup->core_id);
11095
+ }
11096
+ }
11097
+
11098
+ /* Assign hdwq indices that are unique across all cpus in the map
11099
+ * that are also FIRST_CPUs.
11100
+ */
11101
+ idx = 0;
11102
+ for_each_present_cpu(cpu) {
11103
+ cpup = &phba->sli4_hba.cpu_map[cpu];
11104
+
11105
+ /* Only FIRST IRQs get a hdwq index assignment. */
11106
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11107
+ continue;
11108
+
11109
+ /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
11110
+ cpup->hdwq = idx;
11111
+ idx++;
11112
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11113
+ "3333 Set Affinity: CPU %d (phys %d core %d): "
11114
+ "hdwq %d eq %d flg x%x\n",
11115
+ cpu, cpup->phys_id, cpup->core_id,
11116
+ cpup->hdwq, cpup->eq, cpup->flag);
11117
+ }
11118
+ /* Associate a hdwq with each cpu_map entry
11119
+ * This will be 1 to 1 - hdwq to cpu, unless there are less
11120
+ * hardware queues then CPUs. For that case we will just round-robin
11121
+ * the available hardware queues as they get assigned to CPUs.
11122
+ * The next_idx is the idx from the FIRST_CPU loop above to account
11123
+ * for irq_chann < hdwq. The idx is used for round-robin assignments
11124
+ * and needs to start at 0.
11125
+ */
11126
+ next_idx = idx;
11127
+ start_cpu = 0;
11128
+ idx = 0;
11129
+ for_each_present_cpu(cpu) {
11130
+ cpup = &phba->sli4_hba.cpu_map[cpu];
11131
+
11132
+ /* FIRST cpus are already mapped. */
11133
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
11134
+ continue;
11135
+
11136
+ /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
11137
+ * of the unassigned cpus to the next idx so that all
11138
+ * hdw queues are fully utilized.
11139
+ */
11140
+ if (next_idx < phba->cfg_hdw_queue) {
11141
+ cpup->hdwq = next_idx;
11142
+ next_idx++;
11143
+ continue;
11144
+ }
11145
+
11146
+ /* Not a First CPU and all hdw_queues are used. Reuse a
11147
+ * Hardware Queue for another CPU, so be smart about it
11148
+ * and pick one that has its IRQ/EQ mapped to the same phys_id
11149
+ * (CPU package) and core_id.
11150
+ */
11151
+ new_cpu = start_cpu;
11152
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11153
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11154
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11155
+ new_cpup->phys_id == cpup->phys_id &&
11156
+ new_cpup->core_id == cpup->core_id) {
11157
+ goto found_hdwq;
11158
+ }
11159
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11160
+ if (new_cpu == nr_cpumask_bits)
11161
+ new_cpu = first_cpu;
11162
+ }
11163
+
11164
+ /* If we can't match both phys_id and core_id,
11165
+ * settle for just a phys_id match.
11166
+ */
11167
+ new_cpu = start_cpu;
11168
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
11169
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
11170
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
11171
+ new_cpup->phys_id == cpup->phys_id)
11172
+ goto found_hdwq;
11173
+
11174
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
11175
+ if (new_cpu == nr_cpumask_bits)
11176
+ new_cpu = first_cpu;
11177
+ }
11178
+
11179
+ /* Otherwise just round robin on cfg_hdw_queue */
11180
+ cpup->hdwq = idx % phba->cfg_hdw_queue;
11181
+ idx++;
11182
+ goto logit;
11183
+ found_hdwq:
11184
+ /* We found an available entry, copy the IRQ info */
11185
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
11186
+ if (start_cpu == nr_cpumask_bits)
11187
+ start_cpu = first_cpu;
11188
+ cpup->hdwq = new_cpup->hdwq;
11189
+ logit:
11190
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11191
+ "3335 Set Affinity: CPU %d (phys %d core %d): "
11192
+ "hdwq %d eq %d flg x%x\n",
11193
+ cpu, cpup->phys_id, cpup->core_id,
11194
+ cpup->hdwq, cpup->eq, cpup->flag);
11195
+ }
11196
+
11197
+ /*
11198
+ * Initialize the cpu_map slots for not-present cpus in case
11199
+ * a cpu is hot-added. Perform a simple hdwq round robin assignment.
11200
+ */
11201
+ idx = 0;
11202
+ for_each_possible_cpu(cpu) {
11203
+ cpup = &phba->sli4_hba.cpu_map[cpu];
11204
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11205
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
11206
+ c_stat->hdwq_no = cpup->hdwq;
11207
+#endif
11208
+ if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
11209
+ continue;
11210
+
11211
+ cpup->hdwq = idx++ % phba->cfg_hdw_queue;
11212
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
11213
+ c_stat->hdwq_no = cpup->hdwq;
11214
+#endif
11215
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11216
+ "3340 Set Affinity: not present "
11217
+ "CPU %d hdwq %d\n",
11218
+ cpu, cpup->hdwq);
11219
+ }
11220
+
11221
+ /* The cpu_map array will be used later during initialization
11222
+ * when EQ / CQ / WQs are allocated and configured.
11223
+ */
11224
+ return;
11225
+}
11226
+
11227
+/**
11228
+ * lpfc_cpuhp_get_eq
11229
+ *
11230
+ * @phba: pointer to lpfc hba data structure.
11231
+ * @cpu: cpu going offline
11232
+ * @eqlist: eq list to append to
11233
+ */
11234
+static int
11235
+lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
11236
+ struct list_head *eqlist)
11237
+{
11238
+ const struct cpumask *maskp;
11239
+ struct lpfc_queue *eq;
11240
+ struct cpumask *tmp;
11241
+ u16 idx;
11242
+
11243
+ tmp = kzalloc(cpumask_size(), GFP_KERNEL);
11244
+ if (!tmp)
11245
+ return -ENOMEM;
11246
+
11247
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11248
+ maskp = pci_irq_get_affinity(phba->pcidev, idx);
11249
+ if (!maskp)
11250
+ continue;
11251
+ /*
11252
+ * if irq is not affinitized to the cpu going
11253
+ * then we don't need to poll the eq attached
11254
+ * to it.
11255
+ */
11256
+ if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
11257
+ continue;
11258
+ /* get the cpus that are online and are affini-
11259
+ * tized to this irq vector. If the count is
11260
+ * more than 1 then cpuhp is not going to shut-
11261
+ * down this vector. Since this cpu has not
11262
+ * gone offline yet, we need >1.
11263
+ */
11264
+ cpumask_and(tmp, maskp, cpu_online_mask);
11265
+ if (cpumask_weight(tmp) > 1)
11266
+ continue;
11267
+
11268
+ /* Now that we have an irq to shutdown, get the eq
11269
+ * mapped to this irq. Note: multiple hdwq's in
11270
+ * the software can share an eq, but eventually
11271
+ * only eq will be mapped to this vector
11272
+ */
11273
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
11274
+ list_add(&eq->_poll_list, eqlist);
11275
+ }
11276
+ kfree(tmp);
11277
+ return 0;
11278
+}
11279
+
11280
+static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
11281
+{
11282
+ if (phba->sli_rev != LPFC_SLI_REV4)
11283
+ return;
11284
+
11285
+ cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
11286
+ &phba->cpuhp);
11287
+ /*
11288
+ * unregistering the instance doesn't stop the polling
11289
+ * timer. Wait for the poll timer to retire.
11290
+ */
11291
+ synchronize_rcu();
11292
+ del_timer_sync(&phba->cpuhp_poll_timer);
11293
+}
11294
+
11295
+static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
11296
+{
11297
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
11298
+ return;
11299
+
11300
+ __lpfc_cpuhp_remove(phba);
11301
+}
11302
+
11303
+static void lpfc_cpuhp_add(struct lpfc_hba *phba)
11304
+{
11305
+ if (phba->sli_rev != LPFC_SLI_REV4)
11306
+ return;
11307
+
11308
+ rcu_read_lock();
11309
+
11310
+ if (!list_empty(&phba->poll_list))
11311
+ mod_timer(&phba->cpuhp_poll_timer,
11312
+ jiffies + msecs_to_jiffies(LPFC_POLL_HB));
11313
+
11314
+ rcu_read_unlock();
11315
+
11316
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
11317
+ &phba->cpuhp);
11318
+}
11319
+
11320
+static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
11321
+{
11322
+ if (phba->pport->load_flag & FC_UNLOADING) {
11323
+ *retval = -EAGAIN;
11324
+ return true;
11325
+ }
11326
+
11327
+ if (phba->sli_rev != LPFC_SLI_REV4) {
11328
+ *retval = 0;
11329
+ return true;
11330
+ }
11331
+
11332
+ /* proceed with the hotplug */
11333
+ return false;
11334
+}
11335
+
11336
+/**
11337
+ * lpfc_irq_set_aff - set IRQ affinity
11338
+ * @eqhdl: EQ handle
11339
+ * @cpu: cpu to set affinity
11340
+ *
11341
+ **/
11342
+static inline void
11343
+lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
11344
+{
11345
+ cpumask_clear(&eqhdl->aff_mask);
11346
+ cpumask_set_cpu(cpu, &eqhdl->aff_mask);
11347
+ irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11348
+ irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
11349
+}
11350
+
11351
+/**
11352
+ * lpfc_irq_clear_aff - clear IRQ affinity
11353
+ * @eqhdl: EQ handle
11354
+ *
11355
+ **/
11356
+static inline void
11357
+lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
11358
+{
11359
+ cpumask_clear(&eqhdl->aff_mask);
11360
+ irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
11361
+}
11362
+
11363
+/**
11364
+ * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
11365
+ * @phba: pointer to HBA context object.
11366
+ * @cpu: cpu going offline/online
11367
+ * @offline: true, cpu is going offline. false, cpu is coming online.
11368
+ *
11369
+ * If cpu is going offline, we'll try our best effort to find the next
11370
+ * online cpu on the phba's original_mask and migrate all offlining IRQ
11371
+ * affinities.
11372
+ *
11373
+ * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
11374
+ *
11375
+ * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
11376
+ * PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
11377
+ *
11378
+ **/
11379
+static void
11380
+lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
11381
+{
11382
+ struct lpfc_vector_map_info *cpup;
11383
+ struct cpumask *aff_mask;
11384
+ unsigned int cpu_select, cpu_next, idx;
11385
+ const struct cpumask *orig_mask;
11386
+
11387
+ if (phba->irq_chann_mode == NORMAL_MODE)
11388
+ return;
11389
+
11390
+ orig_mask = &phba->sli4_hba.irq_aff_mask;
11391
+
11392
+ if (!cpumask_test_cpu(cpu, orig_mask))
11393
+ return;
11394
+
11395
+ cpup = &phba->sli4_hba.cpu_map[cpu];
11396
+
11397
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11398
+ return;
11399
+
11400
+ if (offline) {
11401
+ /* Find next online CPU on original mask */
11402
+ cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
11403
+ cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
11404
+
11405
+ /* Found a valid CPU */
11406
+ if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
11407
+ /* Go through each eqhdl and ensure offlining
11408
+ * cpu aff_mask is migrated
11409
+ */
11410
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11411
+ aff_mask = lpfc_get_aff_mask(idx);
11412
+
11413
+ /* Migrate affinity */
11414
+ if (cpumask_test_cpu(cpu, aff_mask))
11415
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
11416
+ cpu_select);
11417
+ }
11418
+ } else {
11419
+ /* Rely on irqbalance if no online CPUs left on NUMA */
11420
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++)
11421
+ lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
11422
+ }
11423
+ } else {
11424
+ /* Migrate affinity back to this CPU */
11425
+ lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
1003911426 }
1004011427 }
1004111428
11429
+static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
11430
+{
11431
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11432
+ struct lpfc_queue *eq, *next;
11433
+ LIST_HEAD(eqlist);
11434
+ int retval;
11435
+
11436
+ if (!phba) {
11437
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11438
+ return 0;
11439
+ }
11440
+
11441
+ if (__lpfc_cpuhp_checks(phba, &retval))
11442
+ return retval;
11443
+
11444
+ lpfc_irq_rebalance(phba, cpu, true);
11445
+
11446
+ retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
11447
+ if (retval)
11448
+ return retval;
11449
+
11450
+ /* start polling on these eq's */
11451
+ list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
11452
+ list_del_init(&eq->_poll_list);
11453
+ lpfc_sli4_start_polling(eq);
11454
+ }
11455
+
11456
+ return 0;
11457
+}
11458
+
11459
+static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
11460
+{
11461
+ struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
11462
+ struct lpfc_queue *eq, *next;
11463
+ unsigned int n;
11464
+ int retval;
11465
+
11466
+ if (!phba) {
11467
+ WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
11468
+ return 0;
11469
+ }
11470
+
11471
+ if (__lpfc_cpuhp_checks(phba, &retval))
11472
+ return retval;
11473
+
11474
+ lpfc_irq_rebalance(phba, cpu, false);
11475
+
11476
+ list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
11477
+ n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
11478
+ if (n == cpu)
11479
+ lpfc_sli4_stop_polling(eq);
11480
+ }
11481
+
11482
+ return 0;
11483
+}
1004211484
1004311485 /**
1004411486 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
1004511487 * @phba: pointer to lpfc hba data structure.
1004611488 *
1004711489 * This routine is invoked to enable the MSI-X interrupt vectors to device
10048
- * with SLI-4 interface spec.
11490
+ * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them
11491
+ * to cpus on the system.
11492
+ *
11493
+ * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
11494
+ * the number of cpus on the same numa node as this adapter. The vectors are
11495
+ * allocated without requesting OS affinity mapping. A vector will be
11496
+ * allocated and assigned to each online and offline cpu. If the cpu is
11497
+ * online, then affinity will be set to that cpu. If the cpu is offline, then
11498
+ * affinity will be set to the nearest peer cpu within the numa node that is
11499
+ * online. If there are no online cpus within the numa node, affinity is not
11500
+ * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
11501
+ * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
11502
+ * configured.
11503
+ *
11504
+ * If numa mode is not enabled and there is more than 1 vector allocated, then
11505
+ * the driver relies on the managed irq interface where the OS assigns vector to
11506
+ * cpu affinity. The driver will then use that affinity mapping to setup its
11507
+ * cpu mapping table.
1004911508 *
1005011509 * Return codes
1005111510 * 0 - successful
....@@ -10056,15 +11515,33 @@
1005611515 {
1005711516 int vectors, rc, index;
1005811517 char *name;
11518
+ const struct cpumask *aff_mask = NULL;
11519
+ unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
11520
+ struct lpfc_vector_map_info *cpup;
11521
+ struct lpfc_hba_eq_hdl *eqhdl;
11522
+ const struct cpumask *maskp;
11523
+ unsigned int flags = PCI_IRQ_MSIX;
1005911524
1006011525 /* Set up MSI-X multi-message vectors */
10061
- vectors = phba->io_channel_irqs;
10062
- if (phba->cfg_fof)
10063
- vectors++;
11526
+ vectors = phba->cfg_irq_chann;
1006411527
10065
- rc = pci_alloc_irq_vectors(phba->pcidev,
10066
- (phba->nvmet_support) ? 1 : 2,
10067
- vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
11528
+ if (phba->irq_chann_mode != NORMAL_MODE)
11529
+ aff_mask = &phba->sli4_hba.irq_aff_mask;
11530
+
11531
+ if (aff_mask) {
11532
+ cpu_cnt = cpumask_weight(aff_mask);
11533
+ vectors = min(phba->cfg_irq_chann, cpu_cnt);
11534
+
11535
+ /* cpu: iterates over aff_mask including offline or online
11536
+ * cpu_select: iterates over online aff_mask to set affinity
11537
+ */
11538
+ cpu = cpumask_first(aff_mask);
11539
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11540
+ } else {
11541
+ flags |= PCI_IRQ_AFFINITY;
11542
+ }
11543
+
11544
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
1006811545 if (rc < 0) {
1006911546 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1007011547 "0484 PCI enable MSI-X failed (%d)\n", rc);
....@@ -10074,58 +11551,92 @@
1007411551
1007511552 /* Assign MSI-X vectors to interrupt handlers */
1007611553 for (index = 0; index < vectors; index++) {
10077
- name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
11554
+ eqhdl = lpfc_get_eq_hdl(index);
11555
+ name = eqhdl->handler_name;
1007811556 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
1007911557 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
1008011558 LPFC_DRIVER_HANDLER_NAME"%d", index);
1008111559
10082
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
10083
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
10084
- atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
10085
- if (phba->cfg_fof && (index == (vectors - 1)))
10086
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
10087
- &lpfc_sli4_fof_intr_handler, 0,
10088
- name,
10089
- &phba->sli4_hba.hba_eq_hdl[index]);
10090
- else
10091
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
10092
- &lpfc_sli4_hba_intr_handler, 0,
10093
- name,
10094
- &phba->sli4_hba.hba_eq_hdl[index]);
11560
+ eqhdl->idx = index;
11561
+ rc = request_irq(pci_irq_vector(phba->pcidev, index),
11562
+ &lpfc_sli4_hba_intr_handler, 0,
11563
+ name, eqhdl);
1009511564 if (rc) {
1009611565 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1009711566 "0486 MSI-X fast-path (%d) "
1009811567 "request_irq failed (%d)\n", index, rc);
1009911568 goto cfg_fail_out;
1010011569 }
11570
+
11571
+ eqhdl->irq = pci_irq_vector(phba->pcidev, index);
11572
+
11573
+ if (aff_mask) {
11574
+ /* If found a neighboring online cpu, set affinity */
11575
+ if (cpu_select < nr_cpu_ids)
11576
+ lpfc_irq_set_aff(eqhdl, cpu_select);
11577
+
11578
+ /* Assign EQ to cpu_map */
11579
+ lpfc_assign_eq_map_info(phba, index,
11580
+ LPFC_CPU_FIRST_IRQ,
11581
+ cpu);
11582
+
11583
+ /* Iterate to next offline or online cpu in aff_mask */
11584
+ cpu = cpumask_next(cpu, aff_mask);
11585
+
11586
+ /* Find next online cpu in aff_mask to set affinity */
11587
+ cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
11588
+ } else if (vectors == 1) {
11589
+ cpu = cpumask_first(cpu_present_mask);
11590
+ lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
11591
+ cpu);
11592
+ } else {
11593
+ maskp = pci_irq_get_affinity(phba->pcidev, index);
11594
+
11595
+ /* Loop through all CPUs associated with vector index */
11596
+ for_each_cpu_and(cpu, maskp, cpu_present_mask) {
11597
+ cpup = &phba->sli4_hba.cpu_map[cpu];
11598
+
11599
+ /* If this is the first CPU thats assigned to
11600
+ * this vector, set LPFC_CPU_FIRST_IRQ.
11601
+ *
11602
+ * With certain platforms its possible that irq
11603
+ * vectors are affinitized to all the cpu's.
11604
+ * This can result in each cpu_map.eq to be set
11605
+ * to the last vector, resulting in overwrite
11606
+ * of all the previous cpu_map.eq. Ensure that
11607
+ * each vector receives a place in cpu_map.
11608
+ * Later call to lpfc_cpu_affinity_check will
11609
+ * ensure we are nicely balanced out.
11610
+ */
11611
+ if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
11612
+ continue;
11613
+ lpfc_assign_eq_map_info(phba, index,
11614
+ LPFC_CPU_FIRST_IRQ,
11615
+ cpu);
11616
+ break;
11617
+ }
11618
+ }
1010111619 }
1010211620
10103
- if (phba->cfg_fof)
10104
- vectors--;
10105
-
10106
- if (vectors != phba->io_channel_irqs) {
10107
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11621
+ if (vectors != phba->cfg_irq_chann) {
11622
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1010811623 "3238 Reducing IO channels to match number of "
1010911624 "MSI-X vectors, requested %d got %d\n",
10110
- phba->io_channel_irqs, vectors);
10111
- if (phba->cfg_fcp_io_channel > vectors)
10112
- phba->cfg_fcp_io_channel = vectors;
10113
- if (phba->cfg_nvme_io_channel > vectors)
10114
- phba->cfg_nvme_io_channel = vectors;
10115
- if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
10116
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10117
- else
10118
- phba->io_channel_irqs = phba->cfg_nvme_io_channel;
11625
+ phba->cfg_irq_chann, vectors);
11626
+ if (phba->cfg_irq_chann > vectors)
11627
+ phba->cfg_irq_chann = vectors;
1011911628 }
10120
- lpfc_cpu_affinity_check(phba, vectors);
1012111629
1012211630 return rc;
1012311631
1012411632 cfg_fail_out:
1012511633 /* free the irq already requested */
10126
- for (--index; index >= 0; index--)
10127
- free_irq(pci_irq_vector(phba->pcidev, index),
10128
- &phba->sli4_hba.hba_eq_hdl[index]);
11634
+ for (--index; index >= 0; index--) {
11635
+ eqhdl = lpfc_get_eq_hdl(index);
11636
+ lpfc_irq_clear_aff(eqhdl);
11637
+ irq_set_affinity_hint(eqhdl->irq, NULL);
11638
+ free_irq(eqhdl->irq, eqhdl);
11639
+ }
1012911640
1013011641 /* Unconfigure MSI-X capability structure */
1013111642 pci_free_irq_vectors(phba->pcidev);
....@@ -10139,10 +11650,10 @@
1013911650 * @phba: pointer to lpfc hba data structure.
1014011651 *
1014111652 * This routine is invoked to enable the MSI interrupt mode to device with
10142
- * SLI-4 interface spec. The kernel function pci_enable_msi() is called
10143
- * to enable the MSI vector. The device driver is responsible for calling
10144
- * the request_irq() to register MSI vector with a interrupt the handler,
10145
- * which is done in this function.
11653
+ * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
11654
+ * called to enable the MSI vector. The device driver is responsible for
11655
+ * calling the request_irq() to register MSI vector with a interrupt the
11656
+ * handler, which is done in this function.
1014611657 *
1014711658 * Return codes
1014811659 * 0 - successful
....@@ -10152,41 +11663,47 @@
1015211663 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
1015311664 {
1015411665 int rc, index;
11666
+ unsigned int cpu;
11667
+ struct lpfc_hba_eq_hdl *eqhdl;
1015511668
10156
- rc = pci_enable_msi(phba->pcidev);
10157
- if (!rc)
11669
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
11670
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
11671
+ if (rc > 0)
1015811672 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1015911673 "0487 PCI enable MSI mode success.\n");
1016011674 else {
1016111675 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1016211676 "0488 PCI enable MSI mode failed (%d)\n", rc);
10163
- return rc;
11677
+ return rc ? rc : -1;
1016411678 }
1016511679
1016611680 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
1016711681 0, LPFC_DRIVER_NAME, phba);
1016811682 if (rc) {
10169
- pci_disable_msi(phba->pcidev);
11683
+ pci_free_irq_vectors(phba->pcidev);
1017011684 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1017111685 "0490 MSI request_irq failed (%d)\n", rc);
1017211686 return rc;
1017311687 }
1017411688
10175
- for (index = 0; index < phba->io_channel_irqs; index++) {
10176
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
10177
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
11689
+ eqhdl = lpfc_get_eq_hdl(0);
11690
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11691
+
11692
+ cpu = cpumask_first(cpu_present_mask);
11693
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
11694
+
11695
+ for (index = 0; index < phba->cfg_irq_chann; index++) {
11696
+ eqhdl = lpfc_get_eq_hdl(index);
11697
+ eqhdl->idx = index;
1017811698 }
1017911699
10180
- if (phba->cfg_fof) {
10181
- phba->sli4_hba.hba_eq_hdl[index].idx = index;
10182
- phba->sli4_hba.hba_eq_hdl[index].phba = phba;
10183
- }
1018411700 return 0;
1018511701 }
1018611702
1018711703 /**
1018811704 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
1018911705 * @phba: pointer to lpfc hba data structure.
11706
+ * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
1019011707 *
1019111708 * This routine is invoked to enable device interrupt and associate driver's
1019211709 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
....@@ -10236,22 +11753,21 @@
1023611753 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
1023711754 if (!retval) {
1023811755 struct lpfc_hba_eq_hdl *eqhdl;
11756
+ unsigned int cpu;
1023911757
1024011758 /* Indicate initialization to INTx mode */
1024111759 phba->intr_type = INTx;
1024211760 intr_mode = 0;
1024311761
10244
- for (idx = 0; idx < phba->io_channel_irqs; idx++) {
10245
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
11762
+ eqhdl = lpfc_get_eq_hdl(0);
11763
+ eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
11764
+
11765
+ cpu = cpumask_first(cpu_present_mask);
11766
+ lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
11767
+ cpu);
11768
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
11769
+ eqhdl = lpfc_get_eq_hdl(idx);
1024611770 eqhdl->idx = idx;
10247
- eqhdl->phba = phba;
10248
- atomic_set(&eqhdl->hba_eq_in_use, 1);
10249
- }
10250
- if (phba->cfg_fof) {
10251
- eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
10252
- eqhdl->idx = idx;
10253
- eqhdl->phba = phba;
10254
- atomic_set(&eqhdl->hba_eq_in_use, 1);
1025511771 }
1025611772 }
1025711773 }
....@@ -10273,15 +11789,15 @@
1027311789 /* Disable the currently initialized interrupt mode */
1027411790 if (phba->intr_type == MSIX) {
1027511791 int index;
11792
+ struct lpfc_hba_eq_hdl *eqhdl;
1027611793
1027711794 /* Free up MSI-X multi-message vectors */
10278
- for (index = 0; index < phba->io_channel_irqs; index++)
10279
- free_irq(pci_irq_vector(phba->pcidev, index),
10280
- &phba->sli4_hba.hba_eq_hdl[index]);
10281
-
10282
- if (phba->cfg_fof)
10283
- free_irq(pci_irq_vector(phba->pcidev, index),
10284
- &phba->sli4_hba.hba_eq_hdl[index]);
11795
+ for (index = 0; index < phba->cfg_irq_chann; index++) {
11796
+ eqhdl = lpfc_get_eq_hdl(index);
11797
+ lpfc_irq_clear_aff(eqhdl);
11798
+ irq_set_affinity_hint(eqhdl->irq, NULL);
11799
+ free_irq(eqhdl->irq, eqhdl);
11800
+ }
1028511801 } else {
1028611802 free_irq(phba->pcidev->irq, phba);
1028711803 }
....@@ -10342,10 +11858,11 @@
1034211858 static void
1034311859 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
1034411860 {
11861
+ struct lpfc_sli4_hdw_queue *qp;
11862
+ int idx, ccnt;
1034511863 int wait_time = 0;
10346
- int nvme_xri_cmpl = 1;
11864
+ int io_xri_cmpl = 1;
1034711865 int nvmet_xri_cmpl = 1;
10348
- int fcp_xri_cmpl = 1;
1034911866 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
1035011867
1035111868 /* Driver just aborted IOs during the hba_unset process. Pause
....@@ -10358,36 +11875,35 @@
1035811875 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
1035911876 lpfc_nvme_wait_for_io_drain(phba);
1036011877
10361
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10362
- fcp_xri_cmpl =
10363
- list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
11878
+ ccnt = 0;
11879
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11880
+ qp = &phba->sli4_hba.hdwq[idx];
11881
+ io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
11882
+ if (!io_xri_cmpl) /* if list is NOT empty */
11883
+ ccnt++;
11884
+ }
11885
+ if (ccnt)
11886
+ io_xri_cmpl = 0;
11887
+
1036411888 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10365
- nvme_xri_cmpl =
10366
- list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
1036711889 nvmet_xri_cmpl =
1036811890 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
1036911891 }
1037011892
10371
- while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
10372
- !nvmet_xri_cmpl) {
11893
+ while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
1037311894 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
1037411895 if (!nvmet_xri_cmpl)
10375
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11896
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1037611897 "6424 NVMET XRI exchange busy "
1037711898 "wait time: %d seconds.\n",
1037811899 wait_time/1000);
10379
- if (!nvme_xri_cmpl)
10380
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10381
- "6100 NVME XRI exchange busy "
10382
- "wait time: %d seconds.\n",
10383
- wait_time/1000);
10384
- if (!fcp_xri_cmpl)
10385
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10386
- "2877 FCP XRI exchange busy "
11900
+ if (!io_xri_cmpl)
11901
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11902
+ "6100 IO XRI exchange busy "
1038711903 "wait time: %d seconds.\n",
1038811904 wait_time/1000);
1038911905 if (!els_xri_cmpl)
10390
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11906
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1039111907 "2878 ELS XRI exchange busy "
1039211908 "wait time: %d seconds.\n",
1039311909 wait_time/1000);
....@@ -10397,17 +11913,22 @@
1039711913 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
1039811914 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
1039911915 }
11916
+
11917
+ ccnt = 0;
11918
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
11919
+ qp = &phba->sli4_hba.hdwq[idx];
11920
+ io_xri_cmpl = list_empty(
11921
+ &qp->lpfc_abts_io_buf_list);
11922
+ if (!io_xri_cmpl) /* if list is NOT empty */
11923
+ ccnt++;
11924
+ }
11925
+ if (ccnt)
11926
+ io_xri_cmpl = 0;
11927
+
1040011928 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10401
- nvme_xri_cmpl = list_empty(
10402
- &phba->sli4_hba.lpfc_abts_nvme_buf_list);
1040311929 nvmet_xri_cmpl = list_empty(
1040411930 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
1040511931 }
10406
-
10407
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
10408
- fcp_xri_cmpl = list_empty(
10409
- &phba->sli4_hba.lpfc_abts_scsi_buf_list);
10410
-
1041111932 els_xri_cmpl =
1041211933 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
1041311934
....@@ -10432,7 +11953,8 @@
1043211953 struct pci_dev *pdev = phba->pcidev;
1043311954
1043411955 lpfc_stop_hba_timers(phba);
10435
- phba->sli4_hba.intr_enable = 0;
11956
+ if (phba->pport)
11957
+ phba->sli4_hba.intr_enable = 0;
1043611958
1043711959 /*
1043811960 * Gracefully wait out the potential current outstanding asynchronous
....@@ -10466,6 +11988,10 @@
1046611988 /* Wait for completion of device XRI exchange busy */
1046711989 lpfc_sli4_xri_exchange_busy_wait(phba);
1046811990
11991
+ /* per-phba callback de-registration for hotplug event */
11992
+ if (phba->pport)
11993
+ lpfc_cpuhp_remove(phba);
11994
+
1046911995 /* Disable PCI subsystem interrupt */
1047011996 lpfc_sli4_disable_intr(phba);
1047111997
....@@ -10476,6 +12002,9 @@
1047612002 /* Stop kthread signal shall trigger work_done one more time */
1047712003 kthread_stop(phba->worker_thread);
1047812004
12005
+ /* Disable FW logging to host memory */
12006
+ lpfc_ras_stop_fwlog(phba);
12007
+
1047912008 /* Unset the queues shared with the hardware then release all
1048012009 * allocated resources.
1048112010 */
....@@ -10485,8 +12014,13 @@
1048512014 /* Reset SLI4 HBA FCoE function */
1048612015 lpfc_pci_function_reset(phba);
1048712016
12017
+ /* Free RAS DMA memory */
12018
+ if (phba->ras_fwlog.ras_enabled)
12019
+ lpfc_sli4_ras_dma_free(phba);
12020
+
1048812021 /* Stop the SLI4 device port */
10489
- phba->pport->work_port_events = 0;
12022
+ if (phba->pport)
12023
+ phba->pport->work_port_events = 0;
1049012024 }
1049112025
1049212026 /**
....@@ -10558,6 +12092,7 @@
1055812092 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
1055912093 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
1056012094 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
12095
+ sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
1056112096 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
1056212097 mbx_sli4_parameters);
1056312098 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
....@@ -10565,27 +12100,54 @@
1056512100 mbx_sli4_parameters);
1056612101 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
1056712102 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
10568
- phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
10569
- bf_get(cfg_xib, mbx_sli4_parameters));
1057012103
10571
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
10572
- !phba->nvme_support) {
10573
- phba->nvme_support = 0;
10574
- phba->nvmet_support = 0;
10575
- phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
10576
- phba->cfg_nvme_io_channel = 0;
10577
- phba->io_channel_irqs = phba->cfg_fcp_io_channel;
10578
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
10579
- "6101 Disabling NVME support: "
10580
- "Not supported by firmware: %d %d\n",
10581
- bf_get(cfg_nvme, mbx_sli4_parameters),
10582
- bf_get(cfg_xib, mbx_sli4_parameters));
12104
+ /* Check for Extended Pre-Registered SGL support */
12105
+ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
1058312106
10584
- /* If firmware doesn't support NVME, just use SCSI support */
10585
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
10586
- return -ENODEV;
10587
- phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12107
+ /* Check for firmware nvme support */
12108
+ rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
12109
+ bf_get(cfg_xib, mbx_sli4_parameters));
12110
+
12111
+ if (rc) {
12112
+ /* Save this to indicate the Firmware supports NVME */
12113
+ sli4_params->nvme = 1;
12114
+
12115
+ /* Firmware NVME support, check driver FC4 NVME support */
12116
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
12117
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
12118
+ "6133 Disabling NVME support: "
12119
+ "FC4 type not supported: x%x\n",
12120
+ phba->cfg_enable_fc4_type);
12121
+ goto fcponly;
12122
+ }
12123
+ } else {
12124
+ /* No firmware NVME support, check driver FC4 NVME support */
12125
+ sli4_params->nvme = 0;
12126
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
12127
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
12128
+ "6101 Disabling NVME support: Not "
12129
+ "supported by firmware (%d %d) x%x\n",
12130
+ bf_get(cfg_nvme, mbx_sli4_parameters),
12131
+ bf_get(cfg_xib, mbx_sli4_parameters),
12132
+ phba->cfg_enable_fc4_type);
12133
+fcponly:
12134
+ phba->nvme_support = 0;
12135
+ phba->nvmet_support = 0;
12136
+ phba->cfg_nvmet_mrq = 0;
12137
+ phba->cfg_nvme_seg_cnt = 0;
12138
+
12139
+ /* If no FC4 type support, move to just SCSI support */
12140
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
12141
+ return -ENODEV;
12142
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
12143
+ }
1058812144 }
12145
+
12146
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
12147
+ * accommodate 512K and 1M IOs in a single nvme buf.
12148
+ */
12149
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
12150
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1058912151
1059012152 /* Only embed PBDE for if_type 6, PBDE support requires xib be set */
1059112153 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
....@@ -10650,6 +12212,15 @@
1065012212 phba->mds_diags_support = 1;
1065112213 else
1065212214 phba->mds_diags_support = 0;
12215
+
12216
+ /*
12217
+ * Check if the SLI port supports NSLER
12218
+ */
12219
+ if (bf_get(cfg_nsler, mbx_sli4_parameters))
12220
+ phba->nsler = 1;
12221
+ else
12222
+ phba->nsler = 0;
12223
+
1065312224 return 0;
1065412225 }
1065512226
....@@ -10756,14 +12327,14 @@
1075612327 /* Configure and enable interrupt */
1075712328 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
1075812329 if (intr_mode == LPFC_INTR_ERROR) {
10759
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12330
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1076012331 "0431 Failed to enable interrupt.\n");
1076112332 error = -ENODEV;
1076212333 goto out_free_sysfs_attr;
1076312334 }
1076412335 /* SLI-3 HBA setup */
1076512336 if (lpfc_sli_hba_setup(phba)) {
10766
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12337
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1076712338 "1477 Failed to set up hba\n");
1076812339 error = -ENODEV;
1076912340 goto out_remove_device;
....@@ -10878,9 +12449,9 @@
1087812449 kfree(phba->vpi_ids);
1087912450
1088012451 lpfc_stop_hba_timers(phba);
10881
- spin_lock_irq(&phba->hbalock);
12452
+ spin_lock_irq(&phba->port_list_lock);
1088212453 list_del_init(&vport->listentry);
10883
- spin_unlock_irq(&phba->hbalock);
12454
+ spin_unlock_irq(&phba->port_list_lock);
1088412455
1088512456 lpfc_debugfs_terminate(vport);
1088612457
....@@ -10898,6 +12469,8 @@
1089812469 * corresponding pools here.
1089912470 */
1090012471 lpfc_scsi_free(phba);
12472
+ lpfc_free_iocb_list(phba);
12473
+
1090112474 lpfc_mem_free_all(phba);
1090212475
1090312476 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
....@@ -11019,7 +12592,7 @@
1101912592 /* Configure and enable interrupt */
1102012593 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
1102112594 if (intr_mode == LPFC_INTR_ERROR) {
11022
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12595
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1102312596 "0430 PM resume Failed to enable interrupt\n");
1102412597 return -EIO;
1102512598 } else
....@@ -11045,7 +12618,7 @@
1104512618 static void
1104612619 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
1104712620 {
11048
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12621
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1104912622 "2723 PCI channel I/O abort preparing for recovery\n");
1105012623
1105112624 /*
....@@ -11066,7 +12639,7 @@
1106612639 static void
1106712640 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
1106812641 {
11069
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12642
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1107012643 "2710 PCI channel disable preparing for reset\n");
1107112644
1107212645 /* Block any management I/Os to the device */
....@@ -11076,7 +12649,7 @@
1107612649 lpfc_scsi_dev_block(phba);
1107712650
1107812651 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11079
- lpfc_sli_flush_fcp_rings(phba);
12652
+ lpfc_sli_flush_io_rings(phba);
1108012653
1108112654 /* stop all timers */
1108212655 lpfc_stop_hba_timers(phba);
....@@ -11097,7 +12670,7 @@
1109712670 static void
1109812671 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
1109912672 {
11100
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12673
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1110112674 "2711 PCI channel permanent disable for failure\n");
1110212675 /* Block all SCSI devices' I/Os on the host */
1110312676 lpfc_scsi_dev_block(phba);
....@@ -11106,7 +12679,7 @@
1110612679 lpfc_stop_hba_timers(phba);
1110712680
1110812681 /* Clean up all driver's outstanding SCSI I/Os */
11109
- lpfc_sli_flush_fcp_rings(phba);
12682
+ lpfc_sli_flush_io_rings(phba);
1111012683 }
1111112684
1111212685 /**
....@@ -11148,7 +12721,7 @@
1114812721 return PCI_ERS_RESULT_DISCONNECT;
1114912722 default:
1115012723 /* Unknown state, prepare and request slot reset */
11151
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12724
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1115212725 "0472 Unknown PCI error state: x%x\n", state);
1115312726 lpfc_sli_prep_dev_for_reset(phba);
1115412727 return PCI_ERS_RESULT_NEED_RESET;
....@@ -11206,7 +12779,7 @@
1120612779 /* Configure and enable interrupt */
1120712780 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
1120812781 if (intr_mode == LPFC_INTR_ERROR) {
11209
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12782
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1121012783 "0427 Cannot re-enable interrupt after "
1121112784 "slot reset.\n");
1121212785 return PCI_ERS_RESULT_DISCONNECT;
....@@ -11242,10 +12815,6 @@
1124212815
1124312816 /* Bring device online, it will be no-op for non-fatal error resume */
1124412817 lpfc_online(phba);
11245
-
11246
- /* Clean up Advanced Error Reporting (AER) if needed */
11247
- if (phba->hba_flag & HBA_AER_ENABLED)
11248
- pci_cleanup_aer_uncorrect_error_status(pdev);
1124912818 }
1125012819
1125112820 /**
....@@ -11295,35 +12864,56 @@
1129512864 }
1129612865
1129712866
11298
-static void
12867
+static int
1129912868 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
1130012869 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
1130112870 const struct firmware *fw)
1130212871 {
11303
- if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) ||
11304
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
11305
- magic_number != MAGIC_NUMER_G6) ||
11306
- (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
11307
- magic_number != MAGIC_NUMER_G7))
11308
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11309
- "3030 This firmware version is not supported on "
11310
- "this HBA model. Device:%x Magic:%x Type:%x "
11311
- "ID:%x Size %d %zd\n",
11312
- phba->pcidev->device, magic_number, ftype, fid,
11313
- fsize, fw->size);
11314
- else
11315
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11316
- "3022 FW Download failed. Device:%x Magic:%x Type:%x "
11317
- "ID:%x Size %d %zd\n",
11318
- phba->pcidev->device, magic_number, ftype, fid,
11319
- fsize, fw->size);
11320
-}
12872
+ int rc;
1132112873
12874
+ /* Three cases: (1) FW was not supported on the detected adapter.
12875
+ * (2) FW update has been locked out administratively.
12876
+ * (3) Some other error during FW update.
12877
+ * In each case, an unmaskable message is written to the console
12878
+ * for admin diagnosis.
12879
+ */
12880
+ if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
12881
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
12882
+ magic_number != MAGIC_NUMBER_G6) ||
12883
+ (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC &&
12884
+ magic_number != MAGIC_NUMBER_G7)) {
12885
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12886
+ "3030 This firmware version is not supported on"
12887
+ " this HBA model. Device:%x Magic:%x Type:%x "
12888
+ "ID:%x Size %d %zd\n",
12889
+ phba->pcidev->device, magic_number, ftype, fid,
12890
+ fsize, fw->size);
12891
+ rc = -EINVAL;
12892
+ } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
12893
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12894
+ "3021 Firmware downloads have been prohibited "
12895
+ "by a system configuration setting on "
12896
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
12897
+ "%zd\n",
12898
+ phba->pcidev->device, magic_number, ftype, fid,
12899
+ fsize, fw->size);
12900
+ rc = -EACCES;
12901
+ } else {
12902
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12903
+ "3022 FW Download failed. Add Status x%x "
12904
+ "Device:%x Magic:%x Type:%x ID:%x Size %d "
12905
+ "%zd\n",
12906
+ offset, phba->pcidev->device, magic_number,
12907
+ ftype, fid, fsize, fw->size);
12908
+ rc = -EIO;
12909
+ }
12910
+ return rc;
12911
+}
1132212912
1132312913 /**
1132412914 * lpfc_write_firmware - attempt to write a firmware image to the port
1132512915 * @fw: pointer to firmware image returned from request_firmware.
11326
- * @phba: pointer to lpfc hba data structure.
12916
+ * @context: pointer to firmware image returned from request_firmware.
1132712917 *
1132812918 **/
1132912919 static void
....@@ -11353,7 +12943,7 @@
1135312943 INIT_LIST_HEAD(&dma_buffer_list);
1135412944 lpfc_decode_firmware_rev(phba, fwrev, 1);
1135512945 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
11356
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12946
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1135712947 "3023 Updating Firmware, Current Version:%s "
1135812948 "New Version:%s\n",
1135912949 fwrev, image->revision);
....@@ -11392,14 +12982,18 @@
1139212982 rc = lpfc_wr_object(phba, &dma_buffer_list,
1139312983 (fw->size - offset), &offset);
1139412984 if (rc) {
11395
- lpfc_log_write_firmware_error(phba, offset,
11396
- magic_number, ftype, fid, fsize, fw);
12985
+ rc = lpfc_log_write_firmware_error(phba, offset,
12986
+ magic_number,
12987
+ ftype,
12988
+ fid,
12989
+ fsize,
12990
+ fw);
1139712991 goto release_out;
1139812992 }
1139912993 }
1140012994 rc = offset;
1140112995 } else
11402
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12996
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1140312997 "3029 Skipped Firmware update, Current "
1140412998 "Version:%s New Version:%s\n",
1140512999 fwrev, image->revision);
....@@ -11413,14 +13007,18 @@
1141313007 }
1141413008 release_firmware(fw);
1141513009 out:
11416
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11417
- "3024 Firmware update done: %d.\n", rc);
11418
- return;
13010
+ if (rc < 0)
13011
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13012
+ "3062 Firmware update error, status %d.\n", rc);
13013
+ else
13014
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13015
+ "3024 Firmware update success: size %d.\n", rc);
1141913016 }
1142013017
1142113018 /**
1142213019 * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
1142313020 * @phba: pointer to lpfc hba data structure.
13021
+ * @fw_upgrade: which firmware to update.
1142413022 *
1142513023 * This routine is called to perform Linux generic firmware upgrade on device
1142613024 * that supports such feature.
....@@ -11487,6 +13085,8 @@
1148713085 if (!phba)
1148813086 return -ENOMEM;
1148913087
13088
+ INIT_LIST_HEAD(&phba->poll_list);
13089
+
1149013090 /* Perform generic PCI device enabling operation */
1149113091 error = lpfc_enable_pci_dev(phba);
1149213092 if (error)
....@@ -11527,16 +13127,48 @@
1152713127 /* Get the default values for Model Name and Description */
1152813128 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
1152913129
13130
+ /* Now, trying to enable interrupt and bring up the device */
13131
+ cfg_mode = phba->cfg_use_msi;
13132
+
13133
+ /* Put device to a known state before enabling interrupt */
13134
+ phba->pport = NULL;
13135
+ lpfc_stop_port(phba);
13136
+
13137
+ /* Init cpu_map array */
13138
+ lpfc_cpu_map_array_init(phba);
13139
+
13140
+ /* Init hba_eq_hdl array */
13141
+ lpfc_hba_eq_hdl_array_init(phba);
13142
+
13143
+ /* Configure and enable interrupt */
13144
+ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
13145
+ if (intr_mode == LPFC_INTR_ERROR) {
13146
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13147
+ "0426 Failed to enable interrupt.\n");
13148
+ error = -ENODEV;
13149
+ goto out_unset_driver_resource;
13150
+ }
13151
+ /* Default to single EQ for non-MSI-X */
13152
+ if (phba->intr_type != MSIX) {
13153
+ phba->cfg_irq_chann = 1;
13154
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13155
+ if (phba->nvmet_support)
13156
+ phba->cfg_nvmet_mrq = 1;
13157
+ }
13158
+ }
13159
+ lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
13160
+
1153013161 /* Create SCSI host to the physical port */
1153113162 error = lpfc_create_shost(phba);
1153213163 if (error) {
1153313164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1153413165 "1415 Failed to create scsi host.\n");
11535
- goto out_unset_driver_resource;
13166
+ goto out_disable_intr;
1153613167 }
13168
+ vport = phba->pport;
13169
+ shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
1153713170
1153813171 /* Configure sysfs attributes */
11539
- vport = phba->pport;
1154013172 error = lpfc_alloc_sysfs_attr(vport);
1154113173 if (error) {
1154213174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
....@@ -11544,39 +13176,12 @@
1154413176 goto out_destroy_shost;
1154513177 }
1154613178
11547
- shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
11548
- /* Now, trying to enable interrupt and bring up the device */
11549
- cfg_mode = phba->cfg_use_msi;
11550
-
11551
- /* Put device to a known state before enabling interrupt */
11552
- lpfc_stop_port(phba);
11553
-
11554
- /* Configure and enable interrupt */
11555
- intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
11556
- if (intr_mode == LPFC_INTR_ERROR) {
11557
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11558
- "0426 Failed to enable interrupt.\n");
11559
- error = -ENODEV;
11560
- goto out_free_sysfs_attr;
11561
- }
11562
- /* Default to single EQ for non-MSI-X */
11563
- if (phba->intr_type != MSIX) {
11564
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
11565
- phba->cfg_fcp_io_channel = 1;
11566
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11567
- phba->cfg_nvme_io_channel = 1;
11568
- if (phba->nvmet_support)
11569
- phba->cfg_nvmet_mrq = 1;
11570
- }
11571
- phba->io_channel_irqs = 1;
11572
- }
11573
-
1157413179 /* Set up SLI-4 HBA */
1157513180 if (lpfc_sli4_hba_setup(phba)) {
11576
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13181
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1157713182 "1421 Failed to set up hba\n");
1157813183 error = -ENODEV;
11579
- goto out_disable_intr;
13184
+ goto out_free_sysfs_attr;
1158013185 }
1158113186
1158213187 /* Log the current active interrupt mode */
....@@ -11589,19 +13194,20 @@
1158913194 /* NVME support in FW earlier in the driver load corrects the
1159013195 * FC4 type making a check for nvme_support unnecessary.
1159113196 */
11592
- if ((phba->nvmet_support == 0) &&
11593
- (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
11594
- /* Create NVME binding with nvme_fc_transport. This
11595
- * ensures the vport is initialized. If the localport
11596
- * create fails, it should not unload the driver to
11597
- * support field issues.
11598
- */
11599
- error = lpfc_nvme_create_localport(vport);
11600
- if (error) {
11601
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11602
- "6004 NVME registration failed, "
11603
- "error x%x\n",
11604
- error);
13197
+ if (phba->nvmet_support == 0) {
13198
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13199
+ /* Create NVME binding with nvme_fc_transport. This
13200
+ * ensures the vport is initialized. If the localport
13201
+ * create fails, it should not unload the driver to
13202
+ * support field issues.
13203
+ */
13204
+ error = lpfc_nvme_create_localport(vport);
13205
+ if (error) {
13206
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13207
+ "6004 NVME registration "
13208
+ "failed, error x%x\n",
13209
+ error);
13210
+ }
1160513211 }
1160613212 }
1160713213
....@@ -11611,14 +13217,21 @@
1161113217
1161213218 /* Check if there are static vports to be created. */
1161313219 lpfc_create_static_vport(phba);
13220
+
13221
+ /* Enable RAS FW log support */
13222
+ lpfc_sli4_ras_setup(phba);
13223
+
13224
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
13225
+ cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
13226
+
1161413227 return 0;
1161513228
11616
-out_disable_intr:
11617
- lpfc_sli4_disable_intr(phba);
1161813229 out_free_sysfs_attr:
1161913230 lpfc_free_sysfs_attr(vport);
1162013231 out_destroy_shost:
1162113232 lpfc_destroy_shost(phba);
13233
+out_disable_intr:
13234
+ lpfc_sli4_disable_intr(phba);
1162213235 out_unset_driver_resource:
1162313236 lpfc_unset_driver_resource_phase2(phba);
1162413237 out_unset_driver_resource_s4:
....@@ -11681,25 +13294,28 @@
1168113294 lpfc_nvmet_destroy_targetport(phba);
1168213295 lpfc_nvme_destroy_localport(vport);
1168313296
13297
+ /* De-allocate multi-XRI pools */
13298
+ if (phba->cfg_xri_rebalancing)
13299
+ lpfc_destroy_multixri_pools(phba);
13300
+
1168413301 /*
1168513302 * Bring down the SLI Layer. This step disables all interrupts,
1168613303 * clears the rings, discards all mailbox commands, and resets
1168713304 * the HBA FCoE function.
1168813305 */
1168913306 lpfc_debugfs_terminate(vport);
11690
- lpfc_sli4_hba_unset(phba);
1169113307
1169213308 lpfc_stop_hba_timers(phba);
11693
- spin_lock_irq(&phba->hbalock);
13309
+ spin_lock_irq(&phba->port_list_lock);
1169413310 list_del_init(&vport->listentry);
11695
- spin_unlock_irq(&phba->hbalock);
13311
+ spin_unlock_irq(&phba->port_list_lock);
1169613312
1169713313 /* Perform scsi free before driver resource_unset since scsi
1169813314 * buffers are released to their corresponding pools here.
1169913315 */
11700
- lpfc_scsi_free(phba);
11701
- lpfc_nvme_free(phba);
13316
+ lpfc_io_free(phba);
1170213317 lpfc_free_iocb_list(phba);
13318
+ lpfc_sli4_hba_unset(phba);
1170313319
1170413320 lpfc_unset_driver_resource_phase2(phba);
1170513321 lpfc_sli4_driver_resource_unset(phba);
....@@ -11820,7 +13436,7 @@
1182013436 /* Configure and enable interrupt */
1182113437 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1182213438 if (intr_mode == LPFC_INTR_ERROR) {
11823
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13439
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1182413440 "0294 PM resume Failed to enable interrupt\n");
1182513441 return -EIO;
1182613442 } else
....@@ -11846,7 +13462,7 @@
1184613462 static void
1184713463 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
1184813464 {
11849
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13465
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1185013466 "2828 PCI channel I/O abort preparing for recovery\n");
1185113467 /*
1185213468 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
....@@ -11866,7 +13482,7 @@
1186613482 static void
1186713483 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
1186813484 {
11869
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13485
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1187013486 "2826 PCI channel disable preparing for reset\n");
1187113487
1187213488 /* Block any management I/Os to the device */
....@@ -11875,12 +13491,8 @@
1187513491 /* Block all SCSI devices' I/Os on the host */
1187613492 lpfc_scsi_dev_block(phba);
1187713493
11878
- /* Flush all driver's outstanding SCSI I/Os as we are to reset */
11879
- lpfc_sli_flush_fcp_rings(phba);
11880
-
11881
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
11882
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11883
- lpfc_sli_flush_nvme_rings(phba);
13494
+ /* Flush all driver's outstanding I/Os as we are to reset */
13495
+ lpfc_sli_flush_io_rings(phba);
1188413496
1188513497 /* stop all timers */
1188613498 lpfc_stop_hba_timers(phba);
....@@ -11902,7 +13514,7 @@
1190213514 static void
1190313515 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
1190413516 {
11905
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13517
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1190613518 "2827 PCI channel permanent disable for failure\n");
1190713519
1190813520 /* Block all SCSI devices' I/Os on the host */
....@@ -11911,12 +13523,8 @@
1191113523 /* stop all timers */
1191213524 lpfc_stop_hba_timers(phba);
1191313525
11914
- /* Clean up all driver's outstanding SCSI I/Os */
11915
- lpfc_sli_flush_fcp_rings(phba);
11916
-
11917
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
11918
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
11919
- lpfc_sli_flush_nvme_rings(phba);
13526
+ /* Clean up all driver's outstanding I/Os */
13527
+ lpfc_sli_flush_io_rings(phba);
1192013528 }
1192113529
1192213530 /**
....@@ -11956,7 +13564,7 @@
1195613564 return PCI_ERS_RESULT_DISCONNECT;
1195713565 default:
1195813566 /* Unknown state, prepare and request slot reset */
11959
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13567
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1196013568 "2825 Unknown PCI error state: x%x\n", state);
1196113569 lpfc_sli4_prep_dev_for_reset(phba);
1196213570 return PCI_ERS_RESULT_NEED_RESET;
....@@ -12011,10 +13619,12 @@
1201113619 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1201213620 spin_unlock_irq(&phba->hbalock);
1201313621
13622
+ /* Init cpu_map array */
13623
+ lpfc_cpu_map_array_init(phba);
1201413624 /* Configure and enable interrupt */
1201513625 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1201613626 if (intr_mode == LPFC_INTR_ERROR) {
12017
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13627
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1201813628 "2824 Cannot re-enable interrupt after "
1201913629 "slot reset.\n");
1202013630 return PCI_ERS_RESULT_DISCONNECT;
....@@ -12057,10 +13667,6 @@
1205713667 /* Bring the device back online */
1205813668 lpfc_online(phba);
1205913669 }
12060
-
12061
- /* Clean up Advanced Error Reporting (AER) if needed */
12062
- if (phba->hba_flag & HBA_AER_ENABLED)
12063
- pci_cleanup_aer_uncorrect_error_status(pdev);
1206413670 }
1206513671
1206613672 /**
....@@ -12123,7 +13729,7 @@
1212313729 lpfc_pci_remove_one_s4(pdev);
1212413730 break;
1212513731 default:
12126
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13732
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1212713733 "1424 Invalid PCI device group: 0x%x\n",
1212813734 phba->pci_dev_grp);
1212913735 break;
....@@ -12160,7 +13766,7 @@
1216013766 rc = lpfc_pci_suspend_one_s4(pdev, msg);
1216113767 break;
1216213768 default:
12163
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13769
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1216413770 "1425 Invalid PCI device group: 0x%x\n",
1216513771 phba->pci_dev_grp);
1216613772 break;
....@@ -12196,7 +13802,7 @@
1219613802 rc = lpfc_pci_resume_one_s4(pdev);
1219713803 break;
1219813804 default:
12199
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13805
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1220013806 "1426 Invalid PCI device group: 0x%x\n",
1220113807 phba->pci_dev_grp);
1220213808 break;
....@@ -12234,7 +13840,7 @@
1223413840 rc = lpfc_io_error_detected_s4(pdev, state);
1223513841 break;
1223613842 default:
12237
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13843
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1223813844 "1427 Invalid PCI device group: 0x%x\n",
1223913845 phba->pci_dev_grp);
1224013846 break;
....@@ -12271,7 +13877,7 @@
1227113877 rc = lpfc_io_slot_reset_s4(pdev);
1227213878 break;
1227313879 default:
12274
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13880
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1227513881 "1428 Invalid PCI device group: 0x%x\n",
1227613882 phba->pci_dev_grp);
1227713883 break;
....@@ -12303,7 +13909,7 @@
1230313909 lpfc_io_resume_s4(pdev);
1230413910 break;
1230513911 default:
12306
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13912
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1230713913 "1429 Invalid PCI device group: 0x%x\n",
1230813914 phba->pci_dev_grp);
1230913915 break;
....@@ -12321,7 +13927,7 @@
1232113927 * is destroyed.
1232213928 *
1232313929 **/
12324
-void
13930
+static void
1232513931 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
1232613932 {
1232713933
....@@ -12332,8 +13938,7 @@
1233213938 phba->cfg_fof = 1;
1233313939 } else {
1233413940 phba->cfg_fof = 0;
12335
- if (phba->device_data_mem_pool)
12336
- mempool_destroy(phba->device_data_mem_pool);
13941
+ mempool_destroy(phba->device_data_mem_pool);
1233713942 phba->device_data_mem_pool = NULL;
1233813943 }
1233913944
....@@ -12341,164 +13946,30 @@
1234113946 }
1234213947
1234313948 /**
12344
- * lpfc_fof_queue_setup - Set up all the fof queues
13949
+ * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
1234513950 * @phba: pointer to lpfc hba data structure.
1234613951 *
12347
- * This routine is invoked to set up all the fof queues for the FC HBA
12348
- * operation.
12349
- *
12350
- * Return codes
12351
- * 0 - successful
12352
- * -ENOMEM - No available memory
13952
+ * This routine checks to see if RAS is supported by the adapter. Check the
13953
+ * function through which RAS support enablement is to be done.
1235313954 **/
12354
-int
12355
-lpfc_fof_queue_setup(struct lpfc_hba *phba)
13955
+void
13956
+lpfc_sli4_ras_init(struct lpfc_hba *phba)
1235613957 {
12357
- struct lpfc_sli_ring *pring;
12358
- int rc;
12359
-
12360
- rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
12361
- if (rc)
12362
- return -ENOMEM;
12363
-
12364
- if (phba->cfg_fof) {
12365
-
12366
- rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
12367
- phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
12368
- if (rc)
12369
- goto out_oas_cq;
12370
-
12371
- rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
12372
- phba->sli4_hba.oas_cq, LPFC_FCP);
12373
- if (rc)
12374
- goto out_oas_wq;
12375
-
12376
- /* Bind this CQ/WQ to the NVME ring */
12377
- pring = phba->sli4_hba.oas_wq->pring;
12378
- pring->sli.sli4.wqp =
12379
- (void *)phba->sli4_hba.oas_wq;
12380
- phba->sli4_hba.oas_cq->pring = pring;
12381
- }
12382
-
12383
- return 0;
12384
-
12385
-out_oas_wq:
12386
- lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
12387
-out_oas_cq:
12388
- lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
12389
- return rc;
12390
-
12391
-}
12392
-
12393
-/**
12394
- * lpfc_fof_queue_create - Create all the fof queues
12395
- * @phba: pointer to lpfc hba data structure.
12396
- *
12397
- * This routine is invoked to allocate all the fof queues for the FC HBA
12398
- * operation. For each SLI4 queue type, the parameters such as queue entry
12399
- * count (queue depth) shall be taken from the module parameter. For now,
12400
- * we just use some constant number as place holder.
12401
- *
12402
- * Return codes
12403
- * 0 - successful
12404
- * -ENOMEM - No availble memory
12405
- * -EIO - The mailbox failed to complete successfully.
12406
- **/
12407
-int
12408
-lpfc_fof_queue_create(struct lpfc_hba *phba)
12409
-{
12410
- struct lpfc_queue *qdesc;
12411
- uint32_t wqesize;
12412
-
12413
- /* Create FOF EQ */
12414
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
12415
- phba->sli4_hba.eq_esize,
12416
- phba->sli4_hba.eq_ecount);
12417
- if (!qdesc)
12418
- goto out_error;
12419
-
12420
- qdesc->qe_valid = 1;
12421
- phba->sli4_hba.fof_eq = qdesc;
12422
-
12423
- if (phba->cfg_fof) {
12424
-
12425
- /* Create OAS CQ */
12426
- if (phba->enab_exp_wqcq_pages)
12427
- qdesc = lpfc_sli4_queue_alloc(phba,
12428
- LPFC_EXPANDED_PAGE_SIZE,
12429
- phba->sli4_hba.cq_esize,
12430
- LPFC_CQE_EXP_COUNT);
13958
+ switch (phba->pcidev->device) {
13959
+ case PCI_DEVICE_ID_LANCER_G6_FC:
13960
+ case PCI_DEVICE_ID_LANCER_G7_FC:
13961
+ phba->ras_fwlog.ras_hwsupport = true;
13962
+ if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
13963
+ phba->cfg_ras_fwlog_buffsize)
13964
+ phba->ras_fwlog.ras_enabled = true;
1243113965 else
12432
- qdesc = lpfc_sli4_queue_alloc(phba,
12433
- LPFC_DEFAULT_PAGE_SIZE,
12434
- phba->sli4_hba.cq_esize,
12435
- phba->sli4_hba.cq_ecount);
12436
- if (!qdesc)
12437
- goto out_error;
12438
-
12439
- qdesc->qe_valid = 1;
12440
- phba->sli4_hba.oas_cq = qdesc;
12441
-
12442
- /* Create OAS WQ */
12443
- if (phba->enab_exp_wqcq_pages) {
12444
- wqesize = (phba->fcp_embed_io) ?
12445
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
12446
- qdesc = lpfc_sli4_queue_alloc(phba,
12447
- LPFC_EXPANDED_PAGE_SIZE,
12448
- wqesize,
12449
- LPFC_WQE_EXP_COUNT);
12450
- } else
12451
- qdesc = lpfc_sli4_queue_alloc(phba,
12452
- LPFC_DEFAULT_PAGE_SIZE,
12453
- phba->sli4_hba.wq_esize,
12454
- phba->sli4_hba.wq_ecount);
12455
-
12456
- if (!qdesc)
12457
- goto out_error;
12458
-
12459
- phba->sli4_hba.oas_wq = qdesc;
12460
- list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
12461
-
13966
+ phba->ras_fwlog.ras_enabled = false;
13967
+ break;
13968
+ default:
13969
+ phba->ras_fwlog.ras_hwsupport = false;
1246213970 }
12463
- return 0;
12464
-
12465
-out_error:
12466
- lpfc_fof_queue_destroy(phba);
12467
- return -ENOMEM;
1246813971 }
1246913972
12470
-/**
12471
- * lpfc_fof_queue_destroy - Destroy all the fof queues
12472
- * @phba: pointer to lpfc hba data structure.
12473
- *
12474
- * This routine is invoked to release all the SLI4 queues with the FC HBA
12475
- * operation.
12476
- *
12477
- * Return codes
12478
- * 0 - successful
12479
- **/
12480
-int
12481
-lpfc_fof_queue_destroy(struct lpfc_hba *phba)
12482
-{
12483
- /* Release FOF Event queue */
12484
- if (phba->sli4_hba.fof_eq != NULL) {
12485
- lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
12486
- phba->sli4_hba.fof_eq = NULL;
12487
- }
12488
-
12489
- /* Release OAS Completion queue */
12490
- if (phba->sli4_hba.oas_cq != NULL) {
12491
- lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
12492
- phba->sli4_hba.oas_cq = NULL;
12493
- }
12494
-
12495
- /* Release OAS Work queue */
12496
- if (phba->sli4_hba.oas_wq != NULL) {
12497
- lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
12498
- phba->sli4_hba.oas_wq = NULL;
12499
- }
12500
- return 0;
12501
-}
1250213973
1250313974 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
1250413975
....@@ -12546,40 +14017,134 @@
1254614017 {
1254714018 int error = 0;
1254814019
12549
- printk(LPFC_MODULE_DESC "\n");
12550
- printk(LPFC_COPYRIGHT "\n");
14020
+ pr_info(LPFC_MODULE_DESC "\n");
14021
+ pr_info(LPFC_COPYRIGHT "\n");
1255114022
1255214023 error = misc_register(&lpfc_mgmt_dev);
1255314024 if (error)
1255414025 printk(KERN_ERR "Could not register lpfcmgmt device, "
1255514026 "misc_register returned with status %d", error);
1255614027
14028
+ error = -ENOMEM;
1255714029 lpfc_transport_functions.vport_create = lpfc_vport_create;
1255814030 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
1255914031 lpfc_transport_template =
1256014032 fc_attach_transport(&lpfc_transport_functions);
1256114033 if (lpfc_transport_template == NULL)
12562
- return -ENOMEM;
14034
+ goto unregister;
1256314035 lpfc_vport_transport_template =
1256414036 fc_attach_transport(&lpfc_vport_transport_functions);
1256514037 if (lpfc_vport_transport_template == NULL) {
1256614038 fc_release_transport(lpfc_transport_template);
12567
- return -ENOMEM;
14039
+ goto unregister;
1256814040 }
1256914041 lpfc_nvme_cmd_template();
1257014042 lpfc_nvmet_cmd_template();
1257114043
1257214044 /* Initialize in case vector mapping is needed */
12573
- lpfc_used_cpu = NULL;
1257414045 lpfc_present_cpu = num_present_cpus();
1257514046
14047
+ error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
14048
+ "lpfc/sli4:online",
14049
+ lpfc_cpu_online, lpfc_cpu_offline);
14050
+ if (error < 0)
14051
+ goto cpuhp_failure;
14052
+ lpfc_cpuhp_state = error;
14053
+
1257614054 error = pci_register_driver(&lpfc_driver);
12577
- if (error) {
12578
- fc_release_transport(lpfc_transport_template);
12579
- fc_release_transport(lpfc_vport_transport_template);
12580
- }
14055
+ if (error)
14056
+ goto unwind;
1258114057
1258214058 return error;
14059
+
14060
+unwind:
14061
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
14062
+cpuhp_failure:
14063
+ fc_release_transport(lpfc_transport_template);
14064
+ fc_release_transport(lpfc_vport_transport_template);
14065
+unregister:
14066
+ misc_deregister(&lpfc_mgmt_dev);
14067
+
14068
+ return error;
14069
+}
14070
+
14071
+void lpfc_dmp_dbg(struct lpfc_hba *phba)
14072
+{
14073
+ unsigned int start_idx;
14074
+ unsigned int dbg_cnt;
14075
+ unsigned int temp_idx;
14076
+ int i;
14077
+ int j = 0;
14078
+ unsigned long rem_nsec;
14079
+
14080
+ if (phba->cfg_log_verbose)
14081
+ return;
14082
+
14083
+ if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
14084
+ return;
14085
+
14086
+ start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
14087
+ dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
14088
+ temp_idx = start_idx;
14089
+ if (dbg_cnt >= DBG_LOG_SZ) {
14090
+ dbg_cnt = DBG_LOG_SZ;
14091
+ temp_idx -= 1;
14092
+ } else {
14093
+ if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
14094
+ temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
14095
+ } else {
14096
+ if (start_idx < dbg_cnt)
14097
+ start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
14098
+ else
14099
+ start_idx -= dbg_cnt;
14100
+ }
14101
+ }
14102
+ dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
14103
+ start_idx, temp_idx, dbg_cnt);
14104
+
14105
+ for (i = 0; i < dbg_cnt; i++) {
14106
+ if ((start_idx + i) < DBG_LOG_SZ)
14107
+ temp_idx = (start_idx + i) % DBG_LOG_SZ;
14108
+ else
14109
+ temp_idx = j++;
14110
+ rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
14111
+ dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
14112
+ temp_idx,
14113
+ (unsigned long)phba->dbg_log[temp_idx].t_ns,
14114
+ rem_nsec / 1000,
14115
+ phba->dbg_log[temp_idx].log);
14116
+ }
14117
+ atomic_set(&phba->dbg_log_cnt, 0);
14118
+ atomic_set(&phba->dbg_log_dmping, 0);
14119
+}
14120
+
14121
+__printf(2, 3)
14122
+void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
14123
+{
14124
+ unsigned int idx;
14125
+ va_list args;
14126
+ int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
14127
+ struct va_format vaf;
14128
+
14129
+
14130
+ va_start(args, fmt);
14131
+ if (unlikely(dbg_dmping)) {
14132
+ vaf.fmt = fmt;
14133
+ vaf.va = &args;
14134
+ dev_info(&phba->pcidev->dev, "%pV", &vaf);
14135
+ va_end(args);
14136
+ return;
14137
+ }
14138
+ idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
14139
+ DBG_LOG_SZ;
14140
+
14141
+ atomic_inc(&phba->dbg_log_cnt);
14142
+
14143
+ vscnprintf(phba->dbg_log[idx].log,
14144
+ sizeof(phba->dbg_log[idx].log), fmt, args);
14145
+ va_end(args);
14146
+
14147
+ phba->dbg_log[idx].t_ns = local_clock();
1258314148 }
1258414149
1258514150 /**
....@@ -12594,22 +14159,9 @@
1259414159 {
1259514160 misc_deregister(&lpfc_mgmt_dev);
1259614161 pci_unregister_driver(&lpfc_driver);
14162
+ cpuhp_remove_multi_state(lpfc_cpuhp_state);
1259714163 fc_release_transport(lpfc_transport_template);
1259814164 fc_release_transport(lpfc_vport_transport_template);
12599
- if (_dump_buf_data) {
12600
- printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
12601
- "_dump_buf_data at 0x%p\n",
12602
- (1L << _dump_buf_data_order), _dump_buf_data);
12603
- free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
12604
- }
12605
-
12606
- if (_dump_buf_dif) {
12607
- printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
12608
- "_dump_buf_dif at 0x%p\n",
12609
- (1L << _dump_buf_dif_order), _dump_buf_dif);
12610
- free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
12611
- }
12612
- kfree(lpfc_used_cpu);
1261314165 idr_destroy(&lpfc_hba_index);
1261414166 }
1261514167