hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/scsi/qla2xxx/qla_os.c
....@@ -1,8 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * QLogic Fibre Channel HBA Driver
34 * Copyright (c) 2003-2014 QLogic Corporation
4
- *
5
- * See LICENSE.qla2xxx for copyright and licensing details.
65 */
76 #include "qla_def.h"
87
....@@ -14,6 +13,9 @@
1413 #include <linux/kobject.h>
1514 #include <linux/slab.h>
1615 #include <linux/blk-mq-pci.h>
16
+#include <linux/refcount.h>
17
+#include <linux/crash_dump.h>
18
+
1719 #include <scsi/scsi_tcq.h>
1820 #include <scsi/scsicam.h>
1921 #include <scsi/scsi_transport.h>
....@@ -33,6 +35,16 @@
3335 */
3436 struct kmem_cache *srb_cachep;
3537
38
+int ql2xfulldump_on_mpifail;
39
+module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
40
+MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
41
+ "Set this to take full dump on MPI hang.");
42
+
43
+int ql2xenforce_iocb_limit = 1;
44
+module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
45
+MODULE_PARM_DESC(ql2xenforce_iocb_limit,
46
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
47
+
3648 /*
3749 * CT6 CTX allocation cache
3850 */
....@@ -40,7 +52,7 @@
4052 /*
4153 * error level for logging
4254 */
43
-int ql_errlev = ql_log_all;
55
+uint ql_errlev = 0x8001;
4456
4557 static int ql2xenableclass2;
4658 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
....@@ -65,9 +77,9 @@
6577 MODULE_PARM_DESC(ql2xplogiabsentdevice,
6678 "Option to enable PLOGI to devices that are not present after "
6779 "a Fabric scan. This is needed for several broken switches. "
68
- "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
80
+ "Default is 0 - no PLOGI. 1 - perform PLOGI.");
6981
70
-int ql2xloginretrycount = 0;
82
+int ql2xloginretrycount;
7183 module_param(ql2xloginretrycount, int, S_IRUGO);
7284 MODULE_PARM_DESC(ql2xloginretrycount,
7385 "Specify an alternate value for the NVRAM login retry count.");
....@@ -106,12 +118,13 @@
106118 "Set to control shifting of command type processing "
107119 "based on total number of SG elements.");
108120
109
-int ql2xfdmienable=1;
121
+int ql2xfdmienable = 1;
110122 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
111123 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
112124 MODULE_PARM_DESC(ql2xfdmienable,
113125 "Enables FDMI registrations. "
114
- "0 - no FDMI. Default is 1 - perform FDMI.");
126
+ "0 - no FDMI registrations. "
127
+ "1 - provide FDMI registrations (default).");
115128
116129 #define MAX_Q_DEPTH 64
117130 static int ql2xmaxqdepth = MAX_Q_DEPTH;
....@@ -120,11 +133,7 @@
120133 "Maximum queue depth to set for each LUN. "
121134 "Default is 64.");
122135
123
-#if (IS_ENABLED(CONFIG_NVME_FC))
124
-int ql2xenabledif;
125
-#else
126136 int ql2xenabledif = 2;
127
-#endif
128137 module_param(ql2xenabledif, int, S_IRUGO);
129138 MODULE_PARM_DESC(ql2xenabledif,
130139 " Enable T10-CRC-DIF:\n"
....@@ -152,7 +161,7 @@
152161 " 1 -- Error isolation enabled only for DIX Type 0\n"
153162 " 2 -- Error isolation enabled for all Types\n");
154163
155
-int ql2xiidmaenable=1;
164
+int ql2xiidmaenable = 1;
156165 module_param(ql2xiidmaenable, int, S_IRUGO);
157166 MODULE_PARM_DESC(ql2xiidmaenable,
158167 "Enables iIDMA settings "
....@@ -198,7 +207,7 @@
198207 module_param(ql2xasynctmfenable, int, S_IRUGO);
199208 MODULE_PARM_DESC(ql2xasynctmfenable,
200209 "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
201
- "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
210
+ "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
202211
203212 int ql2xdontresethba;
204213 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
....@@ -226,7 +235,7 @@
226235 "0 - MiniDump disabled. "
227236 "1 (Default) - MiniDump enabled.");
228237
229
-int ql2xexlogins = 0;
238
+int ql2xexlogins;
230239 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
231240 MODULE_PARM_DESC(ql2xexlogins,
232241 "Number of extended Logins. "
....@@ -242,7 +251,7 @@
242251 MODULE_PARM_DESC(ql2xiniexchg,
243252 "Number of initiator exchanges.");
244253
245
-int ql2xfwholdabts = 0;
254
+int ql2xfwholdabts;
246255 module_param(ql2xfwholdabts, int, S_IRUGO);
247256 MODULE_PARM_DESC(ql2xfwholdabts,
248257 "Allow FW to hold status IOCB until ABTS rsp received. "
....@@ -277,58 +286,48 @@
277286 "Reserve 1/2 of emergency exchanges for ELS.\n"
278287 " 0 (default): disabled");
279288
280
-/*
281
- * SCSI host template entry points
282
- */
283
-static int qla2xxx_slave_configure(struct scsi_device * device);
284
-static int qla2xxx_slave_alloc(struct scsi_device *);
285
-static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
286
-static void qla2xxx_scan_start(struct Scsi_Host *);
287
-static void qla2xxx_slave_destroy(struct scsi_device *);
288
-static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
289
-static int qla2xxx_eh_abort(struct scsi_cmnd *);
290
-static int qla2xxx_eh_device_reset(struct scsi_cmnd *);
291
-static int qla2xxx_eh_target_reset(struct scsi_cmnd *);
292
-static int qla2xxx_eh_bus_reset(struct scsi_cmnd *);
293
-static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
289
+static int ql2xprotmask;
290
+module_param(ql2xprotmask, int, 0644);
291
+MODULE_PARM_DESC(ql2xprotmask,
292
+ "Override DIF/DIX protection capabilities mask\n"
293
+ "Default is 0 which sets protection mask based on "
294
+ "capabilities reported by HBA firmware.\n");
295
+
296
+static int ql2xprotguard;
297
+module_param(ql2xprotguard, int, 0644);
298
+MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
299
+ " 0 -- Let HBA firmware decide\n"
300
+ " 1 -- Force T10 CRC\n"
301
+ " 2 -- Force IP checksum\n");
302
+
303
+int ql2xdifbundlinginternalbuffers;
304
+module_param(ql2xdifbundlinginternalbuffers, int, 0644);
305
+MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
306
+ "Force using internal buffers for DIF information\n"
307
+ "0 (Default). Based on check.\n"
308
+ "1 Force using internal buffers\n");
309
+
310
+int ql2xsmartsan;
311
+module_param(ql2xsmartsan, int, 0444);
312
+module_param_named(smartsan, ql2xsmartsan, int, 0444);
313
+MODULE_PARM_DESC(ql2xsmartsan,
314
+ "Send SmartSAN Management Attributes for FDMI Registration."
315
+ " Default is 0 - No SmartSAN registration,"
316
+ " 1 - Register SmartSAN Management Attributes.");
317
+
318
+int ql2xrdpenable;
319
+module_param(ql2xrdpenable, int, 0444);
320
+module_param_named(rdpenable, ql2xrdpenable, int, 0444);
321
+MODULE_PARM_DESC(ql2xrdpenable,
322
+ "Enables RDP responses. "
323
+ "0 - no RDP responses (default). "
324
+ "1 - provide RDP responses.");
294325
295326 static void qla2x00_clear_drv_active(struct qla_hw_data *);
296327 static void qla2x00_free_device(scsi_qla_host_t *);
297328 static int qla2xxx_map_queues(struct Scsi_Host *shost);
298329 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
299330
300
-
301
-struct scsi_host_template qla2xxx_driver_template = {
302
- .module = THIS_MODULE,
303
- .name = QLA2XXX_DRIVER_NAME,
304
- .queuecommand = qla2xxx_queuecommand,
305
-
306
- .eh_timed_out = fc_eh_timed_out,
307
- .eh_abort_handler = qla2xxx_eh_abort,
308
- .eh_device_reset_handler = qla2xxx_eh_device_reset,
309
- .eh_target_reset_handler = qla2xxx_eh_target_reset,
310
- .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
311
- .eh_host_reset_handler = qla2xxx_eh_host_reset,
312
-
313
- .slave_configure = qla2xxx_slave_configure,
314
-
315
- .slave_alloc = qla2xxx_slave_alloc,
316
- .slave_destroy = qla2xxx_slave_destroy,
317
- .scan_finished = qla2xxx_scan_finished,
318
- .scan_start = qla2xxx_scan_start,
319
- .change_queue_depth = scsi_change_queue_depth,
320
- .map_queues = qla2xxx_map_queues,
321
- .this_id = -1,
322
- .cmd_per_lun = 3,
323
- .use_clustering = ENABLE_CLUSTERING,
324
- .sg_tablesize = SG_ALL,
325
-
326
- .max_sectors = 0xFFFF,
327
- .shost_attrs = qla2x00_host_attrs,
328
-
329
- .supported_mode = MODE_INITIATOR,
330
- .track_queue_depth = 1,
331
-};
332331
333332 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
334333 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
....@@ -383,21 +382,24 @@
383382 struct rsp_que *rsp)
384383 {
385384 struct qla_hw_data *ha = vha->hw;
385
+
386386 rsp->qpair = ha->base_qpair;
387387 rsp->req = req;
388
+ ha->base_qpair->hw = ha;
388389 ha->base_qpair->req = req;
389390 ha->base_qpair->rsp = rsp;
390391 ha->base_qpair->vha = vha;
391392 ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
392393 ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
393394 ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
395
+ ha->base_qpair->srb_mempool = ha->srb_mempool;
394396 INIT_LIST_HEAD(&ha->base_qpair->hints_list);
395397 ha->base_qpair->enable_class_2 = ql2xenableclass2;
396398 /* init qpair to this cpu. Will adjust at run time. */
397399 qla_cpu_update(rsp->qpair, raw_smp_processor_id());
398400 ha->base_qpair->pdev = ha->pdev;
399401
400
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
402
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
401403 ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
402404 }
403405
....@@ -405,6 +407,7 @@
405407 struct rsp_que *rsp)
406408 {
407409 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
410
+
408411 ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
409412 GFP_KERNEL);
410413 if (!ha->req_q_map) {
....@@ -550,80 +553,73 @@
550553 }
551554
552555 static char *
553
-qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
556
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
554557 {
555558 struct qla_hw_data *ha = vha->hw;
556
- static char *pci_bus_modes[] = {
559
+ static const char *const pci_bus_modes[] = {
557560 "33", "66", "100", "133",
558561 };
559562 uint16_t pci_bus;
560563
561
- strcpy(str, "PCI");
562564 pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
563565 if (pci_bus) {
564
- strcat(str, "-X (");
565
- strcat(str, pci_bus_modes[pci_bus]);
566
+ snprintf(str, str_len, "PCI-X (%s MHz)",
567
+ pci_bus_modes[pci_bus]);
566568 } else {
567569 pci_bus = (ha->pci_attr & BIT_8) >> 8;
568
- strcat(str, " (");
569
- strcat(str, pci_bus_modes[pci_bus]);
570
+ snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
570571 }
571
- strcat(str, " MHz)");
572572
573
- return (str);
573
+ return str;
574574 }
575575
576576 static char *
577
-qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
577
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
578578 {
579
- static char *pci_bus_modes[] = { "33", "66", "100", "133", };
579
+ static const char *const pci_bus_modes[] = {
580
+ "33", "66", "100", "133",
581
+ };
580582 struct qla_hw_data *ha = vha->hw;
581583 uint32_t pci_bus;
582584
583585 if (pci_is_pcie(ha->pdev)) {
584
- char lwstr[6];
585586 uint32_t lstat, lspeed, lwidth;
587
+ const char *speed_str;
586588
587589 pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
588590 lspeed = lstat & PCI_EXP_LNKCAP_SLS;
589591 lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
590592
591
- strcpy(str, "PCIe (");
592593 switch (lspeed) {
593594 case 1:
594
- strcat(str, "2.5GT/s ");
595
+ speed_str = "2.5GT/s";
595596 break;
596597 case 2:
597
- strcat(str, "5.0GT/s ");
598
+ speed_str = "5.0GT/s";
598599 break;
599600 case 3:
600
- strcat(str, "8.0GT/s ");
601
+ speed_str = "8.0GT/s";
602
+ break;
603
+ case 4:
604
+ speed_str = "16.0GT/s";
601605 break;
602606 default:
603
- strcat(str, "<unknown> ");
607
+ speed_str = "<unknown>";
604608 break;
605609 }
606
- snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
607
- strcat(str, lwstr);
610
+ snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
608611
609612 return str;
610613 }
611614
612
- strcpy(str, "PCI");
613615 pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
614
- if (pci_bus == 0 || pci_bus == 8) {
615
- strcat(str, " (");
616
- strcat(str, pci_bus_modes[pci_bus >> 3]);
617
- } else {
618
- strcat(str, "-X ");
619
- if (pci_bus & BIT_2)
620
- strcat(str, "Mode 2");
621
- else
622
- strcat(str, "Mode 1");
623
- strcat(str, " (");
624
- strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
625
- }
626
- strcat(str, " MHz)");
616
+ if (pci_bus == 0 || pci_bus == 8)
617
+ snprintf(str, str_len, "PCI (%s MHz)",
618
+ pci_bus_modes[pci_bus >> 3]);
619
+ else
620
+ snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
621
+ pci_bus & 4 ? 2 : 1,
622
+ pci_bus_modes[pci_bus & 3]);
627623
628624 return str;
629625 }
....@@ -676,13 +672,10 @@
676672 return str;
677673 }
678674
679
-void
680
-qla2x00_sp_free_dma(void *ptr)
675
+void qla2x00_sp_free_dma(srb_t *sp)
681676 {
682
- srb_t *sp = ptr;
683677 struct qla_hw_data *ha = sp->vha->hw;
684678 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
685
- void *ctx = GET_CMD_CTX_SP(sp);
686679
687680 if (sp->flags & SRB_DMA_VALID) {
688681 scsi_dma_unmap(cmd);
....@@ -695,24 +688,21 @@
695688 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
696689 }
697690
698
- if (!ctx)
699
- goto end;
700
-
701691 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
702692 /* List assured to be having elements */
703
- qla2x00_clean_dsd_pool(ha, ctx);
693
+ qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
704694 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
705695 }
706696
707697 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
708
- struct crc_context *ctx0 = ctx;
698
+ struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
709699
710700 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
711701 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
712702 }
713703
714704 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
715
- struct ct6_dsd *ctx1 = ctx;
705
+ struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
716706
717707 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
718708 ctx1->fcp_cmnd_dma);
....@@ -721,44 +711,25 @@
721711 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
722712 mempool_free(ctx1, ha->ctx_mempool);
723713 }
724
-
725
-end:
726
- if (sp->type != SRB_NVME_CMD && sp->type != SRB_NVME_LS) {
727
- CMD_SP(cmd) = NULL;
728
- qla2x00_rel_sp(sp);
729
- }
730714 }
731715
732
-void
733
-qla2x00_sp_compl(void *ptr, int res)
716
+void qla2x00_sp_compl(srb_t *sp, int res)
734717 {
735
- srb_t *sp = ptr;
736718 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
737
-
738
- cmd->result = res;
739
-
740
- if (atomic_read(&sp->ref_count) == 0) {
741
- ql_dbg(ql_dbg_io, sp->vha, 0x3015,
742
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
743
- sp, GET_CMD_SP(sp));
744
- if (ql2xextended_error_logging & ql_dbg_io)
745
- WARN_ON(atomic_read(&sp->ref_count) == 0);
746
- return;
747
- }
748
- if (!atomic_dec_and_test(&sp->ref_count))
749
- return;
719
+ struct completion *comp = sp->comp;
750720
751721 sp->free(sp);
722
+ cmd->result = res;
723
+ CMD_SP(cmd) = NULL;
752724 cmd->scsi_done(cmd);
725
+ if (comp)
726
+ complete(comp);
753727 }
754728
755
-void
756
-qla2xxx_qpair_sp_free_dma(void *ptr)
729
+void qla2xxx_qpair_sp_free_dma(srb_t *sp)
757730 {
758
- srb_t *sp = (srb_t *)ptr;
759731 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
760732 struct qla_hw_data *ha = sp->fcport->vha->hw;
761
- void *ctx = GET_CMD_CTX_SP(sp);
762733
763734 if (sp->flags & SRB_DMA_VALID) {
764735 scsi_dma_unmap(cmd);
....@@ -771,62 +742,81 @@
771742 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
772743 }
773744
774
- if (!ctx)
775
- goto end;
776
-
777745 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
778746 /* List assured to be having elements */
779
- qla2x00_clean_dsd_pool(ha, ctx);
747
+ qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
780748 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
781749 }
782750
783
- if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
784
- struct crc_context *ctx0 = ctx;
751
+ if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
752
+ struct crc_context *difctx = sp->u.scmd.crc_ctx;
753
+ struct dsd_dma *dif_dsd, *nxt_dsd;
785754
786
- dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
787
- sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
755
+ list_for_each_entry_safe(dif_dsd, nxt_dsd,
756
+ &difctx->ldif_dma_hndl_list, list) {
757
+ list_del(&dif_dsd->list);
758
+ dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
759
+ dif_dsd->dsd_list_dma);
760
+ kfree(dif_dsd);
761
+ difctx->no_dif_bundl--;
762
+ }
763
+
764
+ list_for_each_entry_safe(dif_dsd, nxt_dsd,
765
+ &difctx->ldif_dsd_list, list) {
766
+ list_del(&dif_dsd->list);
767
+ dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
768
+ dif_dsd->dsd_list_dma);
769
+ kfree(dif_dsd);
770
+ difctx->no_ldif_dsd--;
771
+ }
772
+
773
+ if (difctx->no_ldif_dsd) {
774
+ ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
775
+ "%s: difctx->no_ldif_dsd=%x\n",
776
+ __func__, difctx->no_ldif_dsd);
777
+ }
778
+
779
+ if (difctx->no_dif_bundl) {
780
+ ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
781
+ "%s: difctx->no_dif_bundl=%x\n",
782
+ __func__, difctx->no_dif_bundl);
783
+ }
784
+ sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
788785 }
789786
790787 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
791
- struct ct6_dsd *ctx1 = ctx;
788
+ struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
789
+
792790 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
793791 ctx1->fcp_cmnd_dma);
794792 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
795793 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
796794 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
797795 mempool_free(ctx1, ha->ctx_mempool);
796
+ sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
798797 }
799
-end:
800
- CMD_SP(cmd) = NULL;
801
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
798
+
799
+ if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
800
+ struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
801
+
802
+ dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
803
+ sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
804
+ }
802805 }
803806
804
-void
805
-qla2xxx_qpair_sp_compl(void *ptr, int res)
807
+void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
806808 {
807
- srb_t *sp = ptr;
808809 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
809
-
810
- cmd->result = res;
811
-
812
- if (atomic_read(&sp->ref_count) == 0) {
813
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3079,
814
- "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
815
- sp, GET_CMD_SP(sp));
816
- if (ql2xextended_error_logging & ql_dbg_io)
817
- WARN_ON(atomic_read(&sp->ref_count) == 0);
818
- return;
819
- }
820
- if (!atomic_dec_and_test(&sp->ref_count))
821
- return;
810
+ struct completion *comp = sp->comp;
822811
823812 sp->free(sp);
813
+ cmd->result = res;
814
+ CMD_SP(cmd) = NULL;
824815 cmd->scsi_done(cmd);
816
+ if (comp)
817
+ complete(comp);
825818 }
826819
827
-/* If we are SP1 here, we need to still take and release the host_lock as SP1
828
- * does not have the changes necessary to avoid taking host->host_lock.
829
- */
830820 static int
831821 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
832822 {
....@@ -837,23 +827,21 @@
837827 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
838828 srb_t *sp;
839829 int rval;
840
- struct qla_qpair *qpair = NULL;
841
- uint32_t tag;
842
- uint16_t hwq;
843830
844
- if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
831
+ if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
832
+ WARN_ON_ONCE(!rport)) {
845833 cmd->result = DID_NO_CONNECT << 16;
846834 goto qc24_fail_command;
847835 }
848836
849837 if (ha->mqenable) {
850
- if (shost_use_blk_mq(vha->host)) {
851
- tag = blk_mq_unique_tag(cmd->request);
852
- hwq = blk_mq_unique_tag_to_hwq(tag);
853
- qpair = ha->queue_pair_map[hwq];
854
- } else if (vha->vp_idx && vha->qpair) {
855
- qpair = vha->qpair;
856
- }
838
+ uint32_t tag;
839
+ uint16_t hwq;
840
+ struct qla_qpair *qpair = NULL;
841
+
842
+ tag = blk_mq_unique_tag(cmd->request);
843
+ hwq = blk_mq_unique_tag_to_hwq(tag);
844
+ qpair = ha->queue_pair_map[hwq];
857845
858846 if (qpair)
859847 return qla2xxx_mqueuecommand(host, cmd, qpair);
....@@ -891,12 +879,12 @@
891879 goto qc24_fail_command;
892880 }
893881
894
- if (!fcport) {
895
- cmd->result = DID_NO_CONNECT << 16;
882
+ if (!fcport || fcport->deleted) {
883
+ cmd->result = DID_IMM_RETRY << 16;
896884 goto qc24_fail_command;
897885 }
898886
899
- if (atomic_read(&fcport->state) != FCS_ONLINE) {
887
+ if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
900888 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
901889 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
902890 ql_dbg(ql_dbg_io, vha, 0x3005,
....@@ -920,13 +908,12 @@
920908 else
921909 goto qc24_target_busy;
922910
923
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
924
- if (!sp)
925
- goto qc24_host_busy;
911
+ sp = scsi_cmd_priv(cmd);
912
+ qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
926913
927914 sp->u.scmd.cmd = cmd;
928915 sp->type = SRB_SCSI_CMD;
929
- atomic_set(&sp->ref_count, 1);
916
+
930917 CMD_SP(cmd) = (void *)sp;
931918 sp->free = qla2x00_sp_free_dma;
932919 sp->done = qla2x00_sp_compl;
....@@ -942,9 +929,6 @@
942929
943930 qc24_host_busy_free_sp:
944931 sp->free(sp);
945
-
946
-qc24_host_busy:
947
- return SCSI_MLQUEUE_HOST_BUSY;
948932
949933 qc24_target_busy:
950934 return SCSI_MLQUEUE_TARGET_BUSY;
....@@ -968,7 +952,7 @@
968952 srb_t *sp;
969953 int rval;
970954
971
- rval = fc_remote_port_chkready(rport);
955
+ rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
972956 if (rval) {
973957 cmd->result = rval;
974958 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
....@@ -977,12 +961,19 @@
977961 goto qc24_fail_command;
978962 }
979963
980
- if (!fcport) {
964
+ if (!qpair->online) {
965
+ ql_dbg(ql_dbg_io, vha, 0x3077,
966
+ "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
981967 cmd->result = DID_NO_CONNECT << 16;
982968 goto qc24_fail_command;
983969 }
984970
985
- if (atomic_read(&fcport->state) != FCS_ONLINE) {
971
+ if (!fcport || fcport->deleted) {
972
+ cmd->result = DID_IMM_RETRY << 16;
973
+ goto qc24_fail_command;
974
+ }
975
+
976
+ if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
986977 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
987978 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
988979 ql_dbg(ql_dbg_io, vha, 0x3077,
....@@ -1006,17 +997,14 @@
1006997 else
1007998 goto qc24_target_busy;
1008999
1009
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
1010
- if (!sp)
1011
- goto qc24_host_busy;
1000
+ sp = scsi_cmd_priv(cmd);
1001
+ qla2xxx_init_sp(sp, vha, qpair, fcport);
10121002
10131003 sp->u.scmd.cmd = cmd;
10141004 sp->type = SRB_SCSI_CMD;
1015
- atomic_set(&sp->ref_count, 1);
10161005 CMD_SP(cmd) = (void *)sp;
10171006 sp->free = qla2xxx_qpair_sp_free_dma;
10181007 sp->done = qla2xxx_qpair_sp_compl;
1019
- sp->qpair = qpair;
10201008
10211009 rval = ha->isp_ops->start_scsi_mq(sp);
10221010 if (rval != QLA_SUCCESS) {
....@@ -1029,9 +1017,6 @@
10291017
10301018 qc24_host_busy_free_sp:
10311019 sp->free(sp);
1032
-
1033
-qc24_host_busy:
1034
- return SCSI_MLQUEUE_HOST_BUSY;
10351020
10361021 qc24_target_busy:
10371022 return SCSI_MLQUEUE_TARGET_BUSY;
....@@ -1051,8 +1036,8 @@
10511036 * cmd = Scsi Command to wait on.
10521037 *
10531038 * Return:
1054
- * Not Found : 0
1055
- * Found : 1
1039
+ * Completed in time : QLA_SUCCESS
1040
+ * Did not complete in time : QLA_FUNCTION_FAILED
10561041 */
10571042 static int
10581043 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
....@@ -1143,9 +1128,17 @@
11431128 void
11441129 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
11451130 {
1146
- qla2x00_mark_all_devices_lost(vha, 0);
1131
+ u8 i;
11471132
1148
- wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
1133
+ qla2x00_mark_all_devices_lost(vha);
1134
+
1135
+ for (i = 0; i < 10; i++) {
1136
+ if (wait_event_timeout(vha->fcport_waitQ,
1137
+ test_fcport_count(vha), HZ) > 0)
1138
+ break;
1139
+ }
1140
+
1141
+ flush_workqueue(vha->hw->wq);
11491142 }
11501143
11511144 /*
....@@ -1204,41 +1197,6 @@
12041197 return return_status;
12051198 }
12061199
1207
-static void
1208
-sp_get(struct srb *sp)
1209
-{
1210
- atomic_inc(&sp->ref_count);
1211
-}
1212
-
1213
-#define ISP_REG_DISCONNECT 0xffffffffU
1214
-/**************************************************************************
1215
-* qla2x00_isp_reg_stat
1216
-*
1217
-* Description:
1218
-* Read the host status register of ISP before aborting the command.
1219
-*
1220
-* Input:
1221
-* ha = pointer to host adapter structure.
1222
-*
1223
-*
1224
-* Returns:
1225
-* Either true or false.
1226
-*
1227
-* Note: Return true if there is register disconnect.
1228
-**************************************************************************/
1229
-static inline
1230
-uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
1231
-{
1232
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1233
- struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1234
-
1235
- if (IS_P3P_TYPE(ha))
1236
- return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
1237
- else
1238
- return ((RD_REG_DWORD(&reg->host_status)) ==
1239
- ISP_REG_DISCONNECT);
1240
-}
1241
-
12421200 /**************************************************************************
12431201 * qla2xxx_eh_abort
12441202 *
....@@ -1258,84 +1216,89 @@
12581216 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
12591217 {
12601218 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1219
+ DECLARE_COMPLETION_ONSTACK(comp);
12611220 srb_t *sp;
12621221 int ret;
12631222 unsigned int id;
12641223 uint64_t lun;
1265
- unsigned long flags;
1266
- int rval, wait = 0;
1224
+ int rval;
12671225 struct qla_hw_data *ha = vha->hw;
1226
+ uint32_t ratov_j;
1227
+ struct qla_qpair *qpair;
1228
+ unsigned long flags;
1229
+ int fast_fail_status = SUCCESS;
12681230
12691231 if (qla2x00_isp_reg_stat(ha)) {
12701232 ql_log(ql_log_info, vha, 0x8042,
12711233 "PCI/Register disconnect, exiting.\n");
1234
+ qla_pci_set_eeh_busy(vha);
12721235 return FAILED;
12731236 }
1274
- if (!CMD_SP(cmd))
1275
- return SUCCESS;
12761237
1238
+ /* Save any FAST_IO_FAIL value to return later if abort succeeds */
12771239 ret = fc_block_scsi_eh(cmd);
12781240 if (ret != 0)
1279
- return ret;
1280
- ret = SUCCESS;
1241
+ fast_fail_status = ret;
1242
+
1243
+ sp = scsi_cmd_priv(cmd);
1244
+ qpair = sp->qpair;
1245
+
1246
+ if ((sp->fcport && sp->fcport->deleted) || !qpair)
1247
+ return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
1248
+
1249
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1250
+ sp->comp = &comp;
1251
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1252
+
12811253
12821254 id = cmd->device->id;
12831255 lun = cmd->device->lun;
1284
-
1285
- spin_lock_irqsave(&ha->hardware_lock, flags);
1286
- sp = (srb_t *) CMD_SP(cmd);
1287
- if (!sp) {
1288
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
1289
- return SUCCESS;
1290
- }
12911256
12921257 ql_dbg(ql_dbg_taskm, vha, 0x8002,
12931258 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
12941259 vha->host_no, id, lun, sp, cmd, sp->handle);
12951260
1296
- /* Get a reference to the sp and drop the lock.*/
1297
- sp_get(sp);
1298
-
1299
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
1261
+ /*
1262
+ * Abort will release the original Command/sp from FW. Let the
1263
+ * original command call scsi_done. In return, he will wakeup
1264
+ * this sleeping thread.
1265
+ */
13001266 rval = ha->isp_ops->abort_command(sp);
1301
- if (rval) {
1302
- if (rval == QLA_FUNCTION_PARAMETER_ERROR)
1303
- ret = SUCCESS;
1304
- else
1267
+
1268
+ ql_dbg(ql_dbg_taskm, vha, 0x8003,
1269
+ "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
1270
+
1271
+ /* Wait for the command completion. */
1272
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
1273
+ ratov_j = msecs_to_jiffies(ratov_j);
1274
+ switch (rval) {
1275
+ case QLA_SUCCESS:
1276
+ if (!wait_for_completion_timeout(&comp, ratov_j)) {
1277
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
1278
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1279
+ __func__, ha->r_a_tov/10);
13051280 ret = FAILED;
1306
-
1307
- ql_dbg(ql_dbg_taskm, vha, 0x8003,
1308
- "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
1309
- } else {
1310
- ql_dbg(ql_dbg_taskm, vha, 0x8004,
1311
- "Abort command mbx success cmd=%p.\n", cmd);
1312
- wait = 1;
1313
- }
1314
-
1315
- spin_lock_irqsave(&ha->hardware_lock, flags);
1316
- sp->done(sp, 0);
1317
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
1318
-
1319
- /* Did the command return during mailbox execution? */
1320
- if (ret == FAILED && !CMD_SP(cmd))
1321
- ret = SUCCESS;
1322
-
1323
- /* Wait for the command to be returned. */
1324
- if (wait) {
1325
- if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
1326
- ql_log(ql_log_warn, vha, 0x8006,
1327
- "Abort handler timed out cmd=%p.\n", cmd);
1328
- ret = FAILED;
1281
+ } else {
1282
+ ret = fast_fail_status;
13291283 }
1284
+ break;
1285
+ default:
1286
+ ret = FAILED;
1287
+ break;
13301288 }
1289
+
1290
+ sp->comp = NULL;
13311291
13321292 ql_log(ql_log_info, vha, 0x801c,
1333
- "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n",
1334
- vha->host_no, id, lun, wait, ret);
1293
+ "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
1294
+ vha->host_no, id, lun, ret);
13351295
13361296 return ret;
13371297 }
13381298
1299
+/*
1300
+ * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
1301
+ */
13391302 int
13401303 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
13411304 uint64_t l, enum nexus_wait_type type)
....@@ -1409,6 +1372,9 @@
14091372 if (err != 0)
14101373 return err;
14111374
1375
+ if (fcport->deleted)
1376
+ return SUCCESS;
1377
+
14121378 ql_log(ql_log_info, vha, 0x8009,
14131379 "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
14141380 cmd->device->id, cmd->device->lun, cmd);
....@@ -1420,7 +1386,7 @@
14201386 goto eh_reset_failed;
14211387 }
14221388 err = 2;
1423
- if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
1389
+ if (do_reset(fcport, cmd->device->lun, 1)
14241390 != QLA_SUCCESS) {
14251391 ql_log(ql_log_warn, vha, 0x800c,
14261392 "do_reset failed for cmd=%p.\n", cmd);
....@@ -1457,6 +1423,7 @@
14571423 if (qla2x00_isp_reg_stat(ha)) {
14581424 ql_log(ql_log_info, vha, 0x803e,
14591425 "PCI/Register disconnect, exiting.\n");
1426
+ qla_pci_set_eeh_busy(vha);
14601427 return FAILED;
14611428 }
14621429
....@@ -1473,6 +1440,7 @@
14731440 if (qla2x00_isp_reg_stat(ha)) {
14741441 ql_log(ql_log_info, vha, 0x803f,
14751442 "PCI/Register disconnect, exiting.\n");
1443
+ qla_pci_set_eeh_busy(vha);
14761444 return FAILED;
14771445 }
14781446
....@@ -1508,6 +1476,7 @@
15081476 if (qla2x00_isp_reg_stat(ha)) {
15091477 ql_log(ql_log_info, vha, 0x8040,
15101478 "PCI/Register disconnect, exiting.\n");
1479
+ qla_pci_set_eeh_busy(vha);
15111480 return FAILED;
15121481 }
15131482
....@@ -1522,6 +1491,9 @@
15221491 if (ret != 0)
15231492 return ret;
15241493 ret = FAILED;
1494
+
1495
+ if (qla2x00_chip_is_down(vha))
1496
+ return ret;
15251497
15261498 ql_log(ql_log_info, vha, 0x8012,
15271499 "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
....@@ -1582,7 +1554,7 @@
15821554 if (qla2x00_isp_reg_stat(ha)) {
15831555 ql_log(ql_log_info, vha, 0x8041,
15841556 "PCI/Register disconnect, exiting.\n");
1585
- schedule_work(&ha->board_disable);
1557
+ qla_pci_set_eeh_busy(vha);
15861558 return SUCCESS;
15871559 }
15881560
....@@ -1664,7 +1636,7 @@
16641636 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
16651637 atomic_set(&vha->loop_state, LOOP_DOWN);
16661638 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1667
- qla2x00_mark_all_devices_lost(vha, 0);
1639
+ qla2x00_mark_all_devices_lost(vha);
16681640 ret = qla2x00_full_login_lip(vha);
16691641 if (ret != QLA_SUCCESS) {
16701642 ql_dbg(ql_dbg_taskm, vha, 0x802d,
....@@ -1685,10 +1657,78 @@
16851657 return QLA_SUCCESS;
16861658 }
16871659
1660
+/*
1661
+ * The caller must ensure that no completion interrupts will happen
1662
+ * while this function is in progress.
1663
+ */
1664
+static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1665
+ unsigned long *flags)
1666
+ __releases(qp->qp_lock_ptr)
1667
+ __acquires(qp->qp_lock_ptr)
1668
+{
1669
+ DECLARE_COMPLETION_ONSTACK(comp);
1670
+ scsi_qla_host_t *vha = qp->vha;
1671
+ struct qla_hw_data *ha = vha->hw;
1672
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1673
+ int rval;
1674
+ bool ret_cmd;
1675
+ uint32_t ratov_j;
1676
+
1677
+ lockdep_assert_held(qp->qp_lock_ptr);
1678
+
1679
+ if (qla2x00_chip_is_down(vha)) {
1680
+ sp->done(sp, res);
1681
+ return;
1682
+ }
1683
+
1684
+ if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
1685
+ (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
1686
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
1687
+ !qla2x00_isp_reg_stat(ha))) {
1688
+ if (sp->comp) {
1689
+ sp->done(sp, res);
1690
+ return;
1691
+ }
1692
+
1693
+ sp->comp = &comp;
1694
+ spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1695
+
1696
+ rval = ha->isp_ops->abort_command(sp);
1697
+ /* Wait for command completion. */
1698
+ ret_cmd = false;
1699
+ ratov_j = ha->r_a_tov/10 * 4 * 1000;
1700
+ ratov_j = msecs_to_jiffies(ratov_j);
1701
+ switch (rval) {
1702
+ case QLA_SUCCESS:
1703
+ if (wait_for_completion_timeout(&comp, ratov_j)) {
1704
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
1705
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1706
+ __func__, ha->r_a_tov/10);
1707
+ ret_cmd = true;
1708
+ }
1709
+ /* else FW return SP to driver */
1710
+ break;
1711
+ default:
1712
+ ret_cmd = true;
1713
+ break;
1714
+ }
1715
+
1716
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1717
+ if (ret_cmd && blk_mq_request_started(cmd->request))
1718
+ sp->done(sp, res);
1719
+ } else {
1720
+ sp->done(sp, res);
1721
+ }
1722
+}
1723
+
1724
+/*
1725
+ * The caller must ensure that no completion interrupts will happen
1726
+ * while this function is in progress.
1727
+ */
16881728 static void
16891729 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
16901730 {
1691
- int cnt, status;
1731
+ int cnt;
16921732 unsigned long flags;
16931733 srb_t *sp;
16941734 scsi_qla_host_t *vha = qp->vha;
....@@ -1696,7 +1736,6 @@
16961736 struct req_que *req;
16971737 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
16981738 struct qla_tgt_cmd *cmd;
1699
- uint8_t trace = 0;
17001739
17011740 if (!ha->req_q_map)
17021741 return;
....@@ -1705,80 +1744,61 @@
17051744 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
17061745 sp = req->outstanding_cmds[cnt];
17071746 if (sp) {
1708
- req->outstanding_cmds[cnt] = NULL;
1709
- if (sp->cmd_type == TYPE_SRB) {
1710
- if (sp->type == SRB_NVME_CMD ||
1711
- sp->type == SRB_NVME_LS) {
1712
- sp_get(sp);
1713
- spin_unlock_irqrestore(qp->qp_lock_ptr,
1714
- flags);
1715
- qla_nvme_abort(ha, sp, res);
1716
- spin_lock_irqsave(qp->qp_lock_ptr,
1717
- flags);
1718
- } else if (GET_CMD_SP(sp) &&
1719
- !ha->flags.eeh_busy &&
1720
- (!test_bit(ABORT_ISP_ACTIVE,
1721
- &vha->dpc_flags)) &&
1722
- !qla2x00_isp_reg_stat(ha) &&
1723
- (sp->type == SRB_SCSI_CMD)) {
1724
- /*
1725
- * Don't abort commands in
1726
- * adapter during EEH
1727
- * recovery as it's not
1728
- * accessible/responding.
1729
- *
1730
- * Get a reference to the sp
1731
- * and drop the lock. The
1732
- * reference ensures this
1733
- * sp->done() call and not the
1734
- * call in qla2xxx_eh_abort()
1735
- * ends the SCSI command (with
1736
- * result 'res').
1737
- */
1738
- sp_get(sp);
1739
- spin_unlock_irqrestore(qp->qp_lock_ptr,
1740
- flags);
1741
- status = qla2xxx_eh_abort(
1742
- GET_CMD_SP(sp));
1743
- spin_lock_irqsave(qp->qp_lock_ptr,
1744
- flags);
1745
- /*
1746
- * Get rid of extra reference
1747
- * if immediate exit from
1748
- * ql2xxx_eh_abort
1749
- */
1750
- if (status == FAILED &&
1751
- (qla2x00_isp_reg_stat(ha)))
1752
- atomic_dec(
1753
- &sp->ref_count);
1754
- }
1747
+ /*
1748
+ * perform lockless completion during driver unload
1749
+ */
1750
+ if (qla2x00_chip_is_down(vha)) {
1751
+ req->outstanding_cmds[cnt] = NULL;
1752
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
17551753 sp->done(sp, res);
1756
- } else {
1754
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
1755
+ continue;
1756
+ }
1757
+
1758
+ switch (sp->cmd_type) {
1759
+ case TYPE_SRB:
1760
+ qla2x00_abort_srb(qp, sp, res, &flags);
1761
+ break;
1762
+ case TYPE_TGT_CMD:
17571763 if (!vha->hw->tgt.tgt_ops || !tgt ||
17581764 qla_ini_mode_enabled(vha)) {
1759
- if (!trace)
1760
- ql_dbg(ql_dbg_tgt_mgt,
1761
- vha, 0xf003,
1762
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1763
- vha->dpc_flags);
1765
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
1766
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1767
+ vha->dpc_flags);
17641768 continue;
17651769 }
17661770 cmd = (struct qla_tgt_cmd *)sp;
1767
- qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
1771
+ cmd->aborted = 1;
1772
+ break;
1773
+ case TYPE_TGT_TMCMD:
1774
+ /* Skip task management functions. */
1775
+ break;
1776
+ default:
1777
+ break;
17681778 }
1779
+ req->outstanding_cmds[cnt] = NULL;
17691780 }
17701781 }
17711782 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
17721783 }
17731784
1785
+/*
1786
+ * The caller must ensure that no completion interrupts will happen
1787
+ * while this function is in progress.
1788
+ */
17741789 void
17751790 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
17761791 {
17771792 int que;
17781793 struct qla_hw_data *ha = vha->hw;
17791794
1795
+ /* Continue only if initialization complete. */
1796
+ if (!ha->base_qpair)
1797
+ return;
17801798 __qla2x00_abort_all_cmds(ha->base_qpair, res);
17811799
1800
+ if (!ha->queue_pair_map)
1801
+ return;
17821802 for (que = 0; que < ha->max_qpairs; que++) {
17831803 if (!ha->queue_pair_map[que])
17841804 continue;
....@@ -1835,7 +1855,7 @@
18351855 if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
18361856 /* Any upper-dword bits set? */
18371857 if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1838
- !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
1858
+ !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
18391859 /* Ok, a 64bit DMA mask is applicable. */
18401860 ha->flags.enable_64bit_addressing = 1;
18411861 ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
....@@ -1845,7 +1865,7 @@
18451865 }
18461866
18471867 dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1848
- pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
1868
+ dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
18491869 }
18501870
18511871 static void
....@@ -1857,8 +1877,8 @@
18571877 spin_lock_irqsave(&ha->hardware_lock, flags);
18581878 ha->interrupts_on = 1;
18591879 /* enable risc and host interrupts */
1860
- WRT_REG_WORD(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1861
- RD_REG_WORD(&reg->ictrl);
1880
+ wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1881
+ rd_reg_word(&reg->ictrl);
18621882 spin_unlock_irqrestore(&ha->hardware_lock, flags);
18631883
18641884 }
....@@ -1872,8 +1892,8 @@
18721892 spin_lock_irqsave(&ha->hardware_lock, flags);
18731893 ha->interrupts_on = 0;
18741894 /* disable risc and host interrupts */
1875
- WRT_REG_WORD(&reg->ictrl, 0);
1876
- RD_REG_WORD(&reg->ictrl);
1895
+ wrt_reg_word(&reg->ictrl, 0);
1896
+ rd_reg_word(&reg->ictrl);
18771897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
18781898 }
18791899
....@@ -1885,8 +1905,8 @@
18851905
18861906 spin_lock_irqsave(&ha->hardware_lock, flags);
18871907 ha->interrupts_on = 1;
1888
- WRT_REG_DWORD(&reg->ictrl, ICRX_EN_RISC_INT);
1889
- RD_REG_DWORD(&reg->ictrl);
1908
+ wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
1909
+ rd_reg_dword(&reg->ictrl);
18901910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
18911911 }
18921912
....@@ -1900,8 +1920,8 @@
19001920 return;
19011921 spin_lock_irqsave(&ha->hardware_lock, flags);
19021922 ha->interrupts_on = 0;
1903
- WRT_REG_DWORD(&reg->ictrl, 0);
1904
- RD_REG_DWORD(&reg->ictrl);
1923
+ wrt_reg_dword(&reg->ictrl, 0);
1924
+ rd_reg_dword(&reg->ictrl);
19051925 spin_unlock_irqrestore(&ha->hardware_lock, flags);
19061926 }
19071927
....@@ -2264,7 +2284,7 @@
22642284 .config_rings = qla24xx_config_rings,
22652285 .reset_adapter = qla24xx_reset_adapter,
22662286 .nvram_config = qla81xx_nvram_config,
2267
- .update_fw_options = qla81xx_update_fw_options,
2287
+ .update_fw_options = qla24xx_update_fw_options,
22682288 .load_risc = qla81xx_load_risc,
22692289 .pci_info_str = qla24xx_pci_info_str,
22702290 .fw_version_str = qla24xx_fw_version_str,
....@@ -2381,7 +2401,7 @@
23812401 .config_rings = qla24xx_config_rings,
23822402 .reset_adapter = qla24xx_reset_adapter,
23832403 .nvram_config = qla81xx_nvram_config,
2384
- .update_fw_options = qla81xx_update_fw_options,
2404
+ .update_fw_options = qla24xx_update_fw_options,
23852405 .load_risc = qla81xx_load_risc,
23862406 .pci_info_str = qla24xx_pci_info_str,
23872407 .fw_version_str = qla24xx_fw_version_str,
....@@ -2459,7 +2479,7 @@
24592479 .config_rings = qla24xx_config_rings,
24602480 .reset_adapter = qla24xx_reset_adapter,
24612481 .nvram_config = qla81xx_nvram_config,
2462
- .update_fw_options = qla81xx_update_fw_options,
2482
+ .update_fw_options = qla24xx_update_fw_options,
24632483 .load_risc = qla81xx_load_risc,
24642484 .pci_info_str = qla24xx_pci_info_str,
24652485 .fw_version_str = qla24xx_fw_version_str,
....@@ -2478,6 +2498,7 @@
24782498 .read_nvram = NULL,
24792499 .write_nvram = NULL,
24802500 .fw_dump = qla27xx_fwdump,
2501
+ .mpi_fw_dump = qla27xx_mpi_fwdump,
24812502 .beacon_on = qla24xx_beacon_on,
24822503 .beacon_off = qla24xx_beacon_off,
24832504 .beacon_blink = qla83xx_beacon_blink,
....@@ -2636,6 +2657,24 @@
26362657 ha->device_type |= DT_T10_PI;
26372658 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
26382659 break;
2660
+ case PCI_DEVICE_ID_QLOGIC_ISP2081:
2661
+ case PCI_DEVICE_ID_QLOGIC_ISP2089:
2662
+ ha->isp_type |= DT_ISP2081;
2663
+ ha->device_type |= DT_ZIO_SUPPORTED;
2664
+ ha->device_type |= DT_FWI2;
2665
+ ha->device_type |= DT_IIDMA;
2666
+ ha->device_type |= DT_T10_PI;
2667
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2668
+ break;
2669
+ case PCI_DEVICE_ID_QLOGIC_ISP2281:
2670
+ case PCI_DEVICE_ID_QLOGIC_ISP2289:
2671
+ ha->isp_type |= DT_ISP2281;
2672
+ ha->device_type |= DT_ZIO_SUPPORTED;
2673
+ ha->device_type |= DT_FWI2;
2674
+ ha->device_type |= DT_IIDMA;
2675
+ ha->device_type |= DT_T10_PI;
2676
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2677
+ break;
26392678 }
26402679
26412680 if (IS_QLA82XX(ha))
....@@ -2643,7 +2682,8 @@
26432682 else {
26442683 /* Get adapter physical port no from interrupt pin register. */
26452684 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2646
- if (IS_QLA27XX(ha))
2685
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
2686
+ IS_QLA27XX(ha) || IS_QLA28XX(ha))
26472687 ha->port_no--;
26482688 else
26492689 ha->port_no = !(ha->port_no & 1);
....@@ -2740,7 +2780,11 @@
27402780 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
27412781 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
27422782 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
2743
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
2783
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
2784
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
2785
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
2786
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
2787
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
27442788 bars = pci_select_bars(pdev, IORESOURCE_MEM);
27452789 mem_only = 1;
27462790 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
....@@ -2755,6 +2799,11 @@
27552799 } else {
27562800 if (pci_enable_device(pdev))
27572801 return ret;
2802
+ }
2803
+
2804
+ if (is_kdump_kernel()) {
2805
+ ql2xmqsupport = 0;
2806
+ ql2xallocfwdump = 0;
27582807 }
27592808
27602809 /* This may fail but that's ok */
....@@ -2789,7 +2838,7 @@
27892838
27902839 /* Set EEH reset type to fundamental if required by hba */
27912840 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2792
- IS_QLA83XX(ha) || IS_QLA27XX(ha))
2841
+ IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
27932842 pdev->needs_freset = 1;
27942843
27952844 ha->prev_topology = 0;
....@@ -2799,7 +2848,8 @@
27992848 ha->max_exchg = FW_MAX_EXCHANGES_CNT;
28002849 atomic_set(&ha->num_pend_mbx_stage1, 0);
28012850 atomic_set(&ha->num_pend_mbx_stage2, 0);
2802
- atomic_set(&ha->num_pend_mbx_stage3, 0);
2851
+ atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
2852
+ ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
28032853
28042854 /* Assign ISP specific operations. */
28052855 if (IS_QLA2100(ha)) {
....@@ -2966,6 +3016,23 @@
29663016 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
29673017 ha->nvram_conf_off = ~0;
29683018 ha->nvram_data_off = ~0;
3019
+ } else if (IS_QLA28XX(ha)) {
3020
+ ha->portnum = PCI_FUNC(ha->pdev->devfn);
3021
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3022
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
3023
+ req_length = REQUEST_ENTRY_CNT_24XX;
3024
+ rsp_length = RESPONSE_ENTRY_CNT_2300;
3025
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3026
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3027
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3028
+ ha->gid_list_info_size = 8;
3029
+ ha->optrom_size = OPTROM_SIZE_28XX;
3030
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3031
+ ha->isp_ops = &qla27xx_isp_ops;
3032
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
3033
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
3034
+ ha->nvram_conf_off = ~0;
3035
+ ha->nvram_data_off = ~0;
29693036 }
29703037
29713038 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
....@@ -3055,6 +3122,13 @@
30553122 host->max_id = ha->max_fibre_devices;
30563123 host->cmd_per_lun = 3;
30573124 host->unique_id = host->host_no;
3125
+
3126
+ if (ql2xenabledif && ql2xenabledif != 2) {
3127
+ ql_log(ql_log_warn, base_vha, 0x302d,
3128
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
3129
+ ql2xenabledif = 2;
3130
+ }
3131
+
30583132 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
30593133 host->max_cmd_len = 32;
30603134 else
....@@ -3090,10 +3164,11 @@
30903164 ql_log(ql_log_fatal, base_vha, 0x003d,
30913165 "Failed to allocate memory for queue pointers..."
30923166 "aborting.\n");
3167
+ ret = -ENODEV;
30933168 goto probe_failed;
30943169 }
30953170
3096
- if (ha->mqenable && shost_use_blk_mq(host)) {
3171
+ if (ha->mqenable) {
30973172 /* number of hardware queues supported by blk/scsi-mq*/
30983173 host->nr_hw_queues = ha->max_qpairs;
30993174
....@@ -3131,7 +3206,8 @@
31313206 req->req_q_out = &ha->iobase->isp24.req_q_out;
31323207 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
31333208 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
3134
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3209
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3210
+ IS_QLA28XX(ha)) {
31353211 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
31363212 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
31373213 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
....@@ -3209,26 +3285,19 @@
32093285 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
32103286
32113287 if (ha->mqenable) {
3212
- bool mq = false;
32133288 bool startit = false;
32143289
3215
- if (QLA_TGT_MODE_ENABLED()) {
3216
- mq = true;
3290
+ if (QLA_TGT_MODE_ENABLED())
32173291 startit = false;
3218
- }
32193292
3220
- if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
3221
- shost_use_blk_mq(host)) {
3222
- mq = true;
3293
+ if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
32233294 startit = true;
3224
- }
32253295
3226
- if (mq) {
3227
- /* Create start of day qpairs for Block MQ */
3228
- for (i = 0; i < ha->max_qpairs; i++)
3229
- qla2xxx_create_qpair(base_vha, 5, 0, startit);
3230
- }
3296
+ /* Create start of day qpairs for Block MQ */
3297
+ for (i = 0; i < ha->max_qpairs; i++)
3298
+ qla2xxx_create_qpair(base_vha, 5, 0, startit);
32313299 }
3300
+ qla_init_iocb_limit(base_vha);
32323301
32333302 if (ha->flags.running_gold_fw)
32343303 goto skip_dpc;
....@@ -3288,18 +3357,20 @@
32883357 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
32893358 if (ha->fw_attributes & BIT_4) {
32903359 int prot = 0, guard;
3360
+
32913361 base_vha->flags.difdix_supported = 1;
32923362 ql_dbg(ql_dbg_init, base_vha, 0x00f1,
32933363 "Registering for DIF/DIX type 1 and 3 protection.\n");
3294
- if (ql2xenabledif == 1)
3295
- prot = SHOST_DIX_TYPE0_PROTECTION;
3296
- scsi_host_set_prot(host,
3297
- prot | SHOST_DIF_TYPE1_PROTECTION
3298
- | SHOST_DIF_TYPE2_PROTECTION
3299
- | SHOST_DIF_TYPE3_PROTECTION
3300
- | SHOST_DIX_TYPE1_PROTECTION
3301
- | SHOST_DIX_TYPE2_PROTECTION
3302
- | SHOST_DIX_TYPE3_PROTECTION);
3364
+ if (ql2xprotmask)
3365
+ scsi_host_set_prot(host, ql2xprotmask);
3366
+ else
3367
+ scsi_host_set_prot(host,
3368
+ prot | SHOST_DIF_TYPE1_PROTECTION
3369
+ | SHOST_DIF_TYPE2_PROTECTION
3370
+ | SHOST_DIF_TYPE3_PROTECTION
3371
+ | SHOST_DIX_TYPE1_PROTECTION
3372
+ | SHOST_DIX_TYPE2_PROTECTION
3373
+ | SHOST_DIX_TYPE3_PROTECTION);
33033374
33043375 guard = SHOST_DIX_GUARD_CRC;
33053376
....@@ -3307,7 +3378,10 @@
33073378 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
33083379 guard |= SHOST_DIX_GUARD_IP;
33093380
3310
- scsi_host_set_guard(host, guard);
3381
+ if (ql2xprotguard)
3382
+ scsi_host_set_guard(host, ql2xprotguard);
3383
+ else
3384
+ scsi_host_set_guard(host, guard);
33113385 } else
33123386 base_vha->flags.difdix_supported = 0;
33133387 }
....@@ -3358,7 +3432,8 @@
33583432 "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
33593433 ql_log(ql_log_info, base_vha, 0x00fc,
33603434 "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
3361
- pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
3435
+ pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
3436
+ sizeof(pci_info)),
33623437 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
33633438 base_vha->host_no,
33643439 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
....@@ -3369,13 +3444,6 @@
33693444
33703445 if (test_bit(UNLOADING, &base_vha->dpc_flags))
33713446 return -ENODEV;
3372
-
3373
- if (ha->flags.detected_lr_sfp) {
3374
- ql_log(ql_log_info, base_vha, 0xffff,
3375
- "Reset chip to pick up LR SFP setting\n");
3376
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
3377
- qla2xxx_wake_dpc(base_vha);
3378
- }
33793447
33803448 return 0;
33813449
....@@ -3433,6 +3501,29 @@
34333501 return ret;
34343502 }
34353503
3504
+static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
3505
+{
3506
+ scsi_qla_host_t *vp;
3507
+ unsigned long flags;
3508
+ struct qla_hw_data *ha;
3509
+
3510
+ if (!base_vha)
3511
+ return;
3512
+
3513
+ ha = base_vha->hw;
3514
+
3515
+ spin_lock_irqsave(&ha->vport_slock, flags);
3516
+ list_for_each_entry(vp, &ha->vp_list, list)
3517
+ set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
3518
+
3519
+ /*
3520
+ * Indicate device removal to prevent future board_disable
3521
+ * and wait until any pending board_disable has completed.
3522
+ */
3523
+ set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3524
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
3525
+}
3526
+
34363527 static void
34373528 qla2x00_shutdown(struct pci_dev *pdev)
34383529 {
....@@ -3449,7 +3540,7 @@
34493540 * Prevent future board_disable and wait
34503541 * until any pending board_disable has completed.
34513542 */
3452
- set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags);
3543
+ __qla_set_remove_flag(vha);
34533544 cancel_work_sync(&ha->board_disable);
34543545
34553546 if (!atomic_read(&pdev->enable_cnt))
....@@ -3469,7 +3560,8 @@
34693560 if (ha->eft)
34703561 qla2x00_disable_eft_trace(vha);
34713562
3472
- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3563
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3564
+ IS_QLA28XX(ha)) {
34733565 if (ha->flags.fw_started)
34743566 qla2x00_abort_isp_cleanup(vha);
34753567 } else {
....@@ -3578,7 +3670,7 @@
35783670 if (ha->mqiobase)
35793671 iounmap(ha->mqiobase);
35803672
3581
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
3673
+ if (ha->msixbase)
35823674 iounmap(ha->msixbase);
35833675 }
35843676 }
....@@ -3607,10 +3699,7 @@
36073699 ha = base_vha->hw;
36083700 ql_log(ql_log_info, base_vha, 0xb079,
36093701 "Removing driver\n");
3610
-
3611
- /* Indicate device removal to prevent future board_disable and wait
3612
- * until any pending board_disable has completed. */
3613
- set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3702
+ __qla_set_remove_flag(base_vha);
36143703 cancel_work_sync(&ha->board_disable);
36153704
36163705 /*
....@@ -3636,7 +3725,8 @@
36363725 if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
36373726 return;
36383727
3639
- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3728
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3729
+ IS_QLA28XX(ha)) {
36403730 if (ha->flags.fw_started)
36413731 qla2x00_abort_isp_cleanup(base_vha);
36423732 } else if (!IS_QLAFX00(ha)) {
....@@ -3666,8 +3756,6 @@
36663756 qlafx00_driver_shutdown(base_vha, 20);
36673757
36683758 qla2x00_delete_all_vps(ha, base_vha);
3669
-
3670
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
36713759
36723760 qla2x00_dfs_remove(base_vha);
36733761
....@@ -3714,6 +3802,20 @@
37143802 pci_disable_device(pdev);
37153803 }
37163804
3805
+static inline void
3806
+qla24xx_free_purex_list(struct purex_list *list)
3807
+{
3808
+ struct list_head *item, *next;
3809
+ ulong flags;
3810
+
3811
+ spin_lock_irqsave(&list->lock, flags);
3812
+ list_for_each_safe(item, next, &list->head) {
3813
+ list_del(item);
3814
+ kfree(list_entry(item, struct purex_item, list));
3815
+ }
3816
+ spin_unlock_irqrestore(&list->lock, flags);
3817
+}
3818
+
37173819 static void
37183820 qla2x00_free_device(scsi_qla_host_t *vha)
37193821 {
....@@ -3746,6 +3848,8 @@
37463848 }
37473849
37483850
3851
+ qla24xx_free_purex_list(&vha->purex_list);
3852
+
37493853 qla2x00_mem_free(ha);
37503854
37513855 qla82xx_md_free(vha);
....@@ -3757,44 +3861,26 @@
37573861 {
37583862 fc_port_t *fcport, *tfcport;
37593863
3760
- list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
3761
- list_del(&fcport->list);
3762
- qla2x00_clear_loop_id(fcport);
3763
- kfree(fcport);
3764
- }
3864
+ list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
3865
+ qla2x00_free_fcport(fcport);
37653866 }
37663867
37673868 static inline void
3768
-qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3769
- int defer)
3869
+qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
37703870 {
3771
- struct fc_rport *rport;
3772
- scsi_qla_host_t *base_vha;
3773
- unsigned long flags;
3871
+ int now;
37743872
37753873 if (!fcport->rport)
37763874 return;
37773875
3778
- rport = fcport->rport;
3779
- if (defer) {
3780
- base_vha = pci_get_drvdata(vha->hw->pdev);
3781
- spin_lock_irqsave(vha->host->host_lock, flags);
3782
- fcport->drport = rport;
3783
- spin_unlock_irqrestore(vha->host->host_lock, flags);
3784
- qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3785
- set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3786
- qla2xxx_wake_dpc(base_vha);
3787
- } else {
3788
- int now;
3789
- if (rport) {
3790
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
3791
- "%s %8phN. rport %p roles %x\n",
3792
- __func__, fcport->port_name, rport,
3793
- rport->roles);
3794
- fc_remote_port_delete(rport);
3795
- }
3796
- qlt_do_generation_tick(vha, &now);
3876
+ if (fcport->rport) {
3877
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
3878
+ "%s %8phN. rport %p roles %x\n",
3879
+ __func__, fcport->port_name, fcport->rport,
3880
+ fcport->rport->roles);
3881
+ fc_remote_port_delete(fcport->rport);
37973882 }
3883
+ qlt_do_generation_tick(vha, &now);
37983884 }
37993885
38003886 /*
....@@ -3807,18 +3893,18 @@
38073893 * Context:
38083894 */
38093895 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
3810
- int do_login, int defer)
3896
+ int do_login)
38113897 {
38123898 if (IS_QLAFX00(vha->hw)) {
38133899 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3814
- qla2x00_schedule_rport_del(vha, fcport, defer);
3900
+ qla2x00_schedule_rport_del(vha, fcport);
38153901 return;
38163902 }
38173903
38183904 if (atomic_read(&fcport->state) == FCS_ONLINE &&
38193905 vha->vp_idx == fcport->vha->vp_idx) {
38203906 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3821
- qla2x00_schedule_rport_del(vha, fcport, defer);
3907
+ qla2x00_schedule_rport_del(vha, fcport);
38223908 }
38233909 /*
38243910 * We may need to retry the login, so don't change the state of the
....@@ -3833,21 +3919,8 @@
38333919 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
38343920 }
38353921
3836
-/*
3837
- * qla2x00_mark_all_devices_lost
3838
- * Updates fcport state when device goes offline.
3839
- *
3840
- * Input:
3841
- * ha = adapter block pointer.
3842
- * fcport = port structure pointer.
3843
- *
3844
- * Return:
3845
- * None.
3846
- *
3847
- * Context:
3848
- */
38493922 void
3850
-qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
3923
+qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
38513924 {
38523925 fc_port_t *fcport;
38533926
....@@ -3855,26 +3928,32 @@
38553928 "Mark all dev lost\n");
38563929
38573930 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3931
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
3932
+ (fcport->flags & FCF_FCP2_DEVICE) &&
3933
+ fcport->port_type == FCT_TARGET &&
3934
+ !qla2x00_reset_active(vha)) {
3935
+ ql_dbg(ql_dbg_disc, vha, 0x211a,
3936
+ "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
3937
+ fcport->flags, fcport->port_type,
3938
+ fcport->d_id.b24, fcport->port_name);
3939
+ continue;
3940
+ }
38583941 fcport->scan_state = 0;
38593942 qlt_schedule_sess_for_deletion(fcport);
3860
-
3861
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
3862
- continue;
3863
-
3864
- /*
3865
- * No point in marking the device as lost, if the device is
3866
- * already DEAD.
3867
- */
3868
- if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
3869
- continue;
3870
- if (atomic_read(&fcport->state) == FCS_ONLINE) {
3871
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
3872
- if (defer)
3873
- qla2x00_schedule_rport_del(vha, fcport, defer);
3874
- else if (vha->vp_idx == fcport->vha->vp_idx)
3875
- qla2x00_schedule_rport_del(vha, fcport, defer);
3876
- }
38773943 }
3944
+}
3945
+
3946
+static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
3947
+{
3948
+ int i;
3949
+
3950
+ if (IS_FWI2_CAPABLE(ha))
3951
+ return;
3952
+
3953
+ for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
3954
+ set_bit(i, ha->loop_id_map);
3955
+ set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
3956
+ set_bit(BROADCAST, ha->loop_id_map);
38783957 }
38793958
38803959 /*
....@@ -3958,9 +4037,86 @@
39584037 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
39594038 goto fail_dl_dma_pool;
39604039 }
4040
+
4041
+ if (ql2xenabledif) {
4042
+ u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
4043
+ struct dsd_dma *dsd, *nxt;
4044
+ uint i;
4045
+ /* Creata a DMA pool of buffers for DIF bundling */
4046
+ ha->dif_bundl_pool = dma_pool_create(name,
4047
+ &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
4048
+ if (!ha->dif_bundl_pool) {
4049
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4050
+ "%s: failed create dif_bundl_pool\n",
4051
+ __func__);
4052
+ goto fail_dif_bundl_dma_pool;
4053
+ }
4054
+
4055
+ INIT_LIST_HEAD(&ha->pool.good.head);
4056
+ INIT_LIST_HEAD(&ha->pool.unusable.head);
4057
+ ha->pool.good.count = 0;
4058
+ ha->pool.unusable.count = 0;
4059
+ for (i = 0; i < 128; i++) {
4060
+ dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
4061
+ if (!dsd) {
4062
+ ql_dbg_pci(ql_dbg_init, ha->pdev,
4063
+ 0xe0ee, "%s: failed alloc dsd\n",
4064
+ __func__);
4065
+ return -ENOMEM;
4066
+ }
4067
+ ha->dif_bundle_kallocs++;
4068
+
4069
+ dsd->dsd_addr = dma_pool_alloc(
4070
+ ha->dif_bundl_pool, GFP_ATOMIC,
4071
+ &dsd->dsd_list_dma);
4072
+ if (!dsd->dsd_addr) {
4073
+ ql_dbg_pci(ql_dbg_init, ha->pdev,
4074
+ 0xe0ee,
4075
+ "%s: failed alloc ->dsd_addr\n",
4076
+ __func__);
4077
+ kfree(dsd);
4078
+ ha->dif_bundle_kallocs--;
4079
+ continue;
4080
+ }
4081
+ ha->dif_bundle_dma_allocs++;
4082
+
4083
+ /*
4084
+ * if DMA buffer crosses 4G boundary,
4085
+ * put it on bad list
4086
+ */
4087
+ if (MSD(dsd->dsd_list_dma) ^
4088
+ MSD(dsd->dsd_list_dma + bufsize)) {
4089
+ list_add_tail(&dsd->list,
4090
+ &ha->pool.unusable.head);
4091
+ ha->pool.unusable.count++;
4092
+ } else {
4093
+ list_add_tail(&dsd->list,
4094
+ &ha->pool.good.head);
4095
+ ha->pool.good.count++;
4096
+ }
4097
+ }
4098
+
4099
+ /* return the good ones back to the pool */
4100
+ list_for_each_entry_safe(dsd, nxt,
4101
+ &ha->pool.good.head, list) {
4102
+ list_del(&dsd->list);
4103
+ dma_pool_free(ha->dif_bundl_pool,
4104
+ dsd->dsd_addr, dsd->dsd_list_dma);
4105
+ ha->dif_bundle_dma_allocs--;
4106
+ kfree(dsd);
4107
+ ha->dif_bundle_kallocs--;
4108
+ }
4109
+
4110
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4111
+ "%s: dif dma pool (good=%u unusable=%u)\n",
4112
+ __func__, ha->pool.good.count,
4113
+ ha->pool.unusable.count);
4114
+ }
4115
+
39614116 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
3962
- "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
3963
- ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
4117
+ "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
4118
+ ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
4119
+ ha->dif_bundl_pool);
39644120 }
39654121
39664122 /* Allocate memory for SNS commands */
....@@ -4042,13 +4198,25 @@
40424198 ha->npiv_info = NULL;
40434199
40444200 /* Get consistent memory allocated for EX-INIT-CB. */
4045
- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
4201
+ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
4202
+ IS_QLA28XX(ha)) {
40464203 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
40474204 &ha->ex_init_cb_dma);
40484205 if (!ha->ex_init_cb)
40494206 goto fail_ex_init_cb;
40504207 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
40514208 "ex_init_cb=%p.\n", ha->ex_init_cb);
4209
+ }
4210
+
4211
+ /* Get consistent memory allocated for Special Features-CB. */
4212
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4213
+ ha->sf_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4214
+ &ha->sf_init_cb_dma);
4215
+ if (!ha->sf_init_cb)
4216
+ goto fail_sf_init_cb;
4217
+ memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
4218
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
4219
+ "sf_init_cb=%p.\n", ha->sf_init_cb);
40524220 }
40534221
40544222 INIT_LIST_HEAD(&ha->gbl_dsd_list);
....@@ -4085,13 +4253,27 @@
40854253 goto fail_sfp_data;
40864254 }
40874255
4256
+ ha->flt = dma_alloc_coherent(&ha->pdev->dev,
4257
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
4258
+ GFP_KERNEL);
4259
+ if (!ha->flt) {
4260
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4261
+ "Unable to allocate memory for FLT.\n");
4262
+ goto fail_flt_buffer;
4263
+ }
4264
+
40884265 return 0;
40894266
4267
+fail_flt_buffer:
4268
+ dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4269
+ ha->sfp_data, ha->sfp_data_dma);
40904270 fail_sfp_data:
40914271 kfree(ha->loop_id_map);
40924272 fail_loop_id_map:
40934273 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
40944274 fail_async_pd:
4275
+ dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
4276
+fail_sf_init_cb:
40954277 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
40964278 fail_ex_init_cb:
40974279 kfree(ha->npiv_info);
....@@ -4125,6 +4307,24 @@
41254307 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
41264308 ha->sns_cmd, ha->sns_cmd_dma);
41274309 fail_dma_pool:
4310
+ if (ql2xenabledif) {
4311
+ struct dsd_dma *dsd, *nxt;
4312
+
4313
+ list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4314
+ list) {
4315
+ list_del(&dsd->list);
4316
+ dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4317
+ dsd->dsd_list_dma);
4318
+ ha->dif_bundle_dma_allocs--;
4319
+ kfree(dsd);
4320
+ ha->dif_bundle_kallocs--;
4321
+ ha->pool.unusable.count--;
4322
+ }
4323
+ dma_pool_destroy(ha->dif_bundl_pool);
4324
+ ha->dif_bundl_pool = NULL;
4325
+ }
4326
+
4327
+fail_dif_bundl_dma_pool:
41284328 if (IS_QLA82XX(ha) || ql2xenabledif) {
41294329 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
41304330 ha->fcp_cmnd_dma_pool = NULL;
....@@ -4141,12 +4341,10 @@
41414341 kfree(ha->nvram);
41424342 ha->nvram = NULL;
41434343 fail_free_ctx_mempool:
4144
- if (ha->ctx_mempool)
4145
- mempool_destroy(ha->ctx_mempool);
4344
+ mempool_destroy(ha->ctx_mempool);
41464345 ha->ctx_mempool = NULL;
41474346 fail_free_srb_mempool:
4148
- if (ha->srb_mempool)
4149
- mempool_destroy(ha->srb_mempool);
4347
+ mempool_destroy(ha->srb_mempool);
41504348 ha->srb_mempool = NULL;
41514349 fail_free_gid_list:
41524350 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
....@@ -4171,11 +4369,12 @@
41714369 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
41724370 {
41734371 int rval;
4174
- uint16_t size, max_cnt, temp;
4372
+ uint16_t size, max_cnt;
4373
+ uint32_t temp;
41754374 struct qla_hw_data *ha = vha->hw;
41764375
41774376 /* Return if we don't need to alloacate any extended logins */
4178
- if (!ql2xexlogins)
4377
+ if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
41794378 return QLA_SUCCESS;
41804379
41814380 if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
....@@ -4246,29 +4445,34 @@
42464445 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
42474446 {
42484447 u32 temp;
4448
+ struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
42494449 *ret_cnt = FW_DEF_EXCHANGES_CNT;
42504450
42514451 if (max_cnt > vha->hw->max_exchg)
42524452 max_cnt = vha->hw->max_exchg;
42534453
42544454 if (qla_ini_mode_enabled(vha)) {
4255
- if (ql2xiniexchg > max_cnt)
4256
- ql2xiniexchg = max_cnt;
4455
+ if (vha->ql2xiniexchg > max_cnt)
4456
+ vha->ql2xiniexchg = max_cnt;
42574457
4258
- if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4259
- *ret_cnt = ql2xiniexchg;
4458
+ if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4459
+ *ret_cnt = vha->ql2xiniexchg;
4460
+
42604461 } else if (qla_tgt_mode_enabled(vha)) {
4261
- if (ql2xexchoffld > max_cnt)
4262
- ql2xexchoffld = max_cnt;
4462
+ if (vha->ql2xexchoffld > max_cnt) {
4463
+ vha->ql2xexchoffld = max_cnt;
4464
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4465
+ }
42634466
4264
- if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
4265
- *ret_cnt = ql2xexchoffld;
4467
+ if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
4468
+ *ret_cnt = vha->ql2xexchoffld;
42664469 } else if (qla_dual_mode_enabled(vha)) {
4267
- temp = ql2xiniexchg + ql2xexchoffld;
4470
+ temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
42684471 if (temp > max_cnt) {
4269
- ql2xiniexchg -= (temp - max_cnt)/2;
4270
- ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
4472
+ vha->ql2xiniexchg -= (temp - max_cnt)/2;
4473
+ vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
42714474 temp = max_cnt;
4475
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
42724476 }
42734477
42744478 if (temp > FW_DEF_EXCHANGES_CNT)
....@@ -4306,6 +4510,12 @@
43064510
43074511 if (totsz != ha->exchoffld_size) {
43084512 qla2x00_free_exchoffld_buffer(ha);
4513
+ if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
4514
+ ha->exchoffld_size = 0;
4515
+ ha->flags.exchoffld_enabled = 0;
4516
+ return QLA_SUCCESS;
4517
+ }
4518
+
43094519 ha->exchoffld_size = totsz;
43104520
43114521 ql_log(ql_log_info, vha, 0xd016,
....@@ -4338,6 +4548,15 @@
43384548
43394549 return -ENOMEM;
43404550 }
4551
+ } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
4552
+ /* pathological case */
4553
+ qla2x00_free_exchoffld_buffer(ha);
4554
+ ha->exchoffld_size = 0;
4555
+ ha->flags.exchoffld_enabled = 0;
4556
+ ql_log(ql_log_info, vha, 0xd016,
4557
+ "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
4558
+ ha->exchoffld_size, actual_cnt, size, totsz);
4559
+ return 0;
43414560 }
43424561
43434562 /* Now configure the dma buffer */
....@@ -4353,7 +4572,7 @@
43534572 if (qla_ini_mode_enabled(vha))
43544573 icb->exchange_count = 0;
43554574 else
4356
- icb->exchange_count = cpu_to_le16(ql2xexchoffld);
4575
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
43574576 }
43584577
43594578 return rval;
....@@ -4386,6 +4605,9 @@
43864605 static void
43874606 qla2x00_free_fw_dump(struct qla_hw_data *ha)
43884607 {
4608
+ struct fwdt *fwdt = ha->fwdt;
4609
+ uint j;
4610
+
43894611 if (ha->fce)
43904612 dma_free_coherent(&ha->pdev->dev,
43914613 FCE_SIZE, ha->fce, ha->fce_dma);
....@@ -4396,20 +4618,24 @@
43964618
43974619 if (ha->fw_dump)
43984620 vfree(ha->fw_dump);
4399
- if (ha->fw_dump_template)
4400
- vfree(ha->fw_dump_template);
44014621
44024622 ha->fce = NULL;
44034623 ha->fce_dma = 0;
4624
+ ha->flags.fce_enabled = 0;
44044625 ha->eft = NULL;
44054626 ha->eft_dma = 0;
4406
- ha->fw_dumped = 0;
4627
+ ha->fw_dumped = false;
44074628 ha->fw_dump_cap_flags = 0;
44084629 ha->fw_dump_reading = 0;
44094630 ha->fw_dump = NULL;
44104631 ha->fw_dump_len = 0;
4411
- ha->fw_dump_template = NULL;
4412
- ha->fw_dump_template_len = 0;
4632
+
4633
+ for (j = 0; j < 2; j++, fwdt++) {
4634
+ if (fwdt->template)
4635
+ vfree(fwdt->template);
4636
+ fwdt->template = NULL;
4637
+ fwdt->length = 0;
4638
+ }
44134639 }
44144640
44154641 /*
....@@ -4427,46 +4653,73 @@
44274653 if (ha->mctp_dump)
44284654 dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
44294655 ha->mctp_dump_dma);
4656
+ ha->mctp_dump = NULL;
44304657
4431
- if (ha->srb_mempool)
4432
- mempool_destroy(ha->srb_mempool);
4658
+ mempool_destroy(ha->srb_mempool);
4659
+ ha->srb_mempool = NULL;
44334660
44344661 if (ha->dcbx_tlv)
44354662 dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
44364663 ha->dcbx_tlv, ha->dcbx_tlv_dma);
4664
+ ha->dcbx_tlv = NULL;
44374665
44384666 if (ha->xgmac_data)
44394667 dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
44404668 ha->xgmac_data, ha->xgmac_data_dma);
4669
+ ha->xgmac_data = NULL;
44414670
44424671 if (ha->sns_cmd)
44434672 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
44444673 ha->sns_cmd, ha->sns_cmd_dma);
4674
+ ha->sns_cmd = NULL;
4675
+ ha->sns_cmd_dma = 0;
44454676
44464677 if (ha->ct_sns)
44474678 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
44484679 ha->ct_sns, ha->ct_sns_dma);
4680
+ ha->ct_sns = NULL;
4681
+ ha->ct_sns_dma = 0;
44494682
44504683 if (ha->sfp_data)
44514684 dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
44524685 ha->sfp_data_dma);
4686
+ ha->sfp_data = NULL;
4687
+
4688
+ if (ha->flt)
4689
+ dma_free_coherent(&ha->pdev->dev,
4690
+ sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
4691
+ ha->flt, ha->flt_dma);
4692
+ ha->flt = NULL;
4693
+ ha->flt_dma = 0;
44534694
44544695 if (ha->ms_iocb)
44554696 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4697
+ ha->ms_iocb = NULL;
4698
+ ha->ms_iocb_dma = 0;
4699
+
4700
+ if (ha->sf_init_cb)
4701
+ dma_pool_free(ha->s_dma_pool,
4702
+ ha->sf_init_cb, ha->sf_init_cb_dma);
44564703
44574704 if (ha->ex_init_cb)
44584705 dma_pool_free(ha->s_dma_pool,
44594706 ha->ex_init_cb, ha->ex_init_cb_dma);
4707
+ ha->ex_init_cb = NULL;
4708
+ ha->ex_init_cb_dma = 0;
44604709
44614710 if (ha->async_pd)
44624711 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4712
+ ha->async_pd = NULL;
4713
+ ha->async_pd_dma = 0;
44634714
4464
- if (ha->s_dma_pool)
4465
- dma_pool_destroy(ha->s_dma_pool);
4715
+ dma_pool_destroy(ha->s_dma_pool);
4716
+ ha->s_dma_pool = NULL;
44664717
44674718 if (ha->gid_list)
44684719 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
44694720 ha->gid_list, ha->gid_list_dma);
4721
+ ha->gid_list = NULL;
4722
+ ha->gid_list_dma = 0;
44704723
44714724 if (IS_QLA82XX(ha)) {
44724725 if (!list_empty(&ha->gbl_dsd_list)) {
....@@ -4483,61 +4736,61 @@
44834736 }
44844737 }
44854738
4486
- if (ha->dl_dma_pool)
4487
- dma_pool_destroy(ha->dl_dma_pool);
4739
+ dma_pool_destroy(ha->dl_dma_pool);
4740
+ ha->dl_dma_pool = NULL;
44884741
4489
- if (ha->fcp_cmnd_dma_pool)
4490
- dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4742
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4743
+ ha->fcp_cmnd_dma_pool = NULL;
44914744
4492
- if (ha->ctx_mempool)
4493
- mempool_destroy(ha->ctx_mempool);
4745
+ mempool_destroy(ha->ctx_mempool);
4746
+ ha->ctx_mempool = NULL;
4747
+
4748
+ if (ql2xenabledif && ha->dif_bundl_pool) {
4749
+ struct dsd_dma *dsd, *nxt;
4750
+
4751
+ list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4752
+ list) {
4753
+ list_del(&dsd->list);
4754
+ dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4755
+ dsd->dsd_list_dma);
4756
+ ha->dif_bundle_dma_allocs--;
4757
+ kfree(dsd);
4758
+ ha->dif_bundle_kallocs--;
4759
+ ha->pool.unusable.count--;
4760
+ }
4761
+ list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
4762
+ list_del(&dsd->list);
4763
+ dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4764
+ dsd->dsd_list_dma);
4765
+ ha->dif_bundle_dma_allocs--;
4766
+ kfree(dsd);
4767
+ ha->dif_bundle_kallocs--;
4768
+ }
4769
+ }
4770
+
4771
+ dma_pool_destroy(ha->dif_bundl_pool);
4772
+ ha->dif_bundl_pool = NULL;
44944773
44954774 qlt_mem_free(ha);
44964775
44974776 if (ha->init_cb)
44984777 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
44994778 ha->init_cb, ha->init_cb_dma);
4500
-
4501
- vfree(ha->optrom_buffer);
4502
- kfree(ha->nvram);
4503
- kfree(ha->npiv_info);
4504
- kfree(ha->swl);
4505
- kfree(ha->loop_id_map);
4506
-
4507
- ha->srb_mempool = NULL;
4508
- ha->ctx_mempool = NULL;
4509
- ha->sns_cmd = NULL;
4510
- ha->sns_cmd_dma = 0;
4511
- ha->ct_sns = NULL;
4512
- ha->ct_sns_dma = 0;
4513
- ha->ms_iocb = NULL;
4514
- ha->ms_iocb_dma = 0;
45154779 ha->init_cb = NULL;
45164780 ha->init_cb_dma = 0;
4517
- ha->ex_init_cb = NULL;
4518
- ha->ex_init_cb_dma = 0;
4519
- ha->async_pd = NULL;
4520
- ha->async_pd_dma = 0;
4521
- ha->loop_id_map = NULL;
4522
- ha->npiv_info = NULL;
4781
+
4782
+ vfree(ha->optrom_buffer);
45234783 ha->optrom_buffer = NULL;
4524
- ha->swl = NULL;
4784
+ kfree(ha->nvram);
45254785 ha->nvram = NULL;
4526
- ha->mctp_dump = NULL;
4527
- ha->dcbx_tlv = NULL;
4528
- ha->xgmac_data = NULL;
4529
- ha->sfp_data = NULL;
4530
-
4531
- ha->s_dma_pool = NULL;
4532
- ha->dl_dma_pool = NULL;
4533
- ha->fcp_cmnd_dma_pool = NULL;
4534
-
4535
- ha->gid_list = NULL;
4536
- ha->gid_list_dma = 0;
4537
-
4538
- ha->tgt.atio_ring = NULL;
4539
- ha->tgt.atio_dma = 0;
4540
- ha->tgt.tgt_vp_map = NULL;
4786
+ kfree(ha->npiv_info);
4787
+ ha->npiv_info = NULL;
4788
+ kfree(ha->swl);
4789
+ ha->swl = NULL;
4790
+ kfree(ha->loop_id_map);
4791
+ ha->sf_init_cb = NULL;
4792
+ ha->sf_init_cb_dma = 0;
4793
+ ha->loop_id_map = NULL;
45414794 }
45424795
45434796 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
....@@ -4561,6 +4814,10 @@
45614814 vha->host_no = host->host_no;
45624815 vha->hw = ha;
45634816
4817
+ vha->qlini_mode = ql2x_ini_mode;
4818
+ vha->ql2xexchoffld = ql2xexchoffld;
4819
+ vha->ql2xiniexchg = ql2xiniexchg;
4820
+
45644821 INIT_LIST_HEAD(&vha->vp_fcports);
45654822 INIT_LIST_HEAD(&vha->work_list);
45664823 INIT_LIST_HEAD(&vha->list);
....@@ -4570,9 +4827,11 @@
45704827 INIT_LIST_HEAD(&vha->plogi_ack_list);
45714828 INIT_LIST_HEAD(&vha->qp_list);
45724829 INIT_LIST_HEAD(&vha->gnl.fcports);
4573
- INIT_LIST_HEAD(&vha->nvme_rport_list);
45744830 INIT_LIST_HEAD(&vha->gpnid_list);
45754831 INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
4832
+
4833
+ INIT_LIST_HEAD(&vha->purex_list.head);
4834
+ spin_lock_init(&vha->purex_list.lock);
45764835
45774836 spin_lock_init(&vha->work_lock);
45784837 spin_lock_init(&vha->cmd_list_lock);
....@@ -4586,7 +4845,7 @@
45864845 if (!vha->gnl.l) {
45874846 ql_log(ql_log_fatal, vha, 0xd04a,
45884847 "Alloc failed for name list.\n");
4589
- scsi_remove_host(vha->host);
4848
+ scsi_host_put(vha->host);
45904849 return NULL;
45914850 }
45924851
....@@ -4599,12 +4858,13 @@
45994858 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
46004859 vha->gnl.l, vha->gnl.ldma);
46014860 vha->gnl.l = NULL;
4602
- scsi_remove_host(vha->host);
4861
+ scsi_host_put(vha->host);
46034862 return NULL;
46044863 }
46054864 INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
46064865
4607
- sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
4866
+ snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu",
4867
+ QLA2XXX_DRIVER_NAME, vha->host_no);
46084868 ql_dbg(ql_dbg_init, vha, 0x0041,
46094869 "Allocated the host=%p hw=%p vha=%p dev_name=%s",
46104870 vha->host, vha->hw, vha,
....@@ -4708,9 +4968,7 @@
47084968
47094969 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
47104970 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
4711
-qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
47124971 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
4713
-qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
47144972 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
47154973 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
47164974
....@@ -4735,7 +4993,7 @@
47354993
47364994 switch (code) {
47374995 case QLA_UEVENT_CODE_FW_DUMP:
4738
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
4996
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
47394997 vha->host_no);
47404998 break;
47414999 default:
....@@ -4776,7 +5034,7 @@
47765034 fcport->jiffies_at_registration = jiffies;
47775035 fcport->sec_since_registration = 0;
47785036 fcport->next_disc_state = DSC_DELETED;
4779
- fcport->disc_state = DSC_UPD_FCPORT;
5037
+ qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
47805038 spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
47815039
47825040 queue_work(system_unbound_wq, &fcport->reg_work);
....@@ -4817,23 +5075,29 @@
48175075 fcport->d_id = e->u.new_sess.id;
48185076 fcport->flags |= FCF_FABRIC_DEVICE;
48195077 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
4820
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
4821
- fcport->fc4_type = FC4_TYPE_FCP_SCSI;
4822
-
4823
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
4824
- fcport->fc4_type = FC4_TYPE_OTHER;
4825
- fcport->fc4f_nvme = FC4_TYPE_NVME;
4826
- }
48275078
48285079 memcpy(fcport->port_name, e->u.new_sess.port_name,
48295080 WWN_SIZE);
5081
+
5082
+ fcport->fc4_type = e->u.new_sess.fc4_type;
5083
+ if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
5084
+ fcport->dm_login_expire = jiffies +
5085
+ QLA_N2N_WAIT_TIME * HZ;
5086
+ fcport->fc4_type = FS_FC4TYPE_FCP;
5087
+ fcport->n2n_flag = 1;
5088
+ if (vha->flags.nvme_enabled)
5089
+ fcport->fc4_type |= FS_FC4TYPE_NVME;
5090
+ }
5091
+
48305092 } else {
48315093 ql_dbg(ql_dbg_disc, vha, 0xffff,
48325094 "%s %8phC mem alloc fail.\n",
48335095 __func__, e->u.new_sess.port_name);
48345096
4835
- if (pla)
5097
+ if (pla) {
5098
+ list_del(&pla->list);
48365099 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5100
+ }
48375101 return;
48385102 }
48395103
....@@ -4923,19 +5187,17 @@
49235187 if (dfcp)
49245188 qlt_schedule_sess_for_deletion(tfcp);
49255189
4926
-
4927
- if (N2N_TOPO(vha->hw))
4928
- fcport->flags &= ~FCF_FABRIC_DEVICE;
4929
-
49305190 if (N2N_TOPO(vha->hw)) {
5191
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
5192
+ fcport->keep_nport_handle = 1;
49315193 if (vha->flags.nvme_enabled) {
4932
- fcport->fc4f_nvme = 1;
5194
+ fcport->fc4_type =
5195
+ (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
49335196 fcport->n2n_flag = 1;
49345197 }
49355198 fcport->fw_login_state = 0;
4936
- /*
4937
- * wait link init done before sending login
4938
- */
5199
+
5200
+ schedule_delayed_work(&vha->scan.scan_work, 5);
49395201 } else {
49405202 qla24xx_fcport_handle_login(vha, fcport);
49415203 }
....@@ -4944,8 +5206,10 @@
49445206
49455207 if (free_fcport) {
49465208 qla2x00_free_fcport(fcport);
4947
- if (pla)
5209
+ if (pla) {
5210
+ list_del(&pla->list);
49485211 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5212
+ }
49495213 }
49505214 }
49515215
....@@ -4969,14 +5233,14 @@
49695233 struct qla_work_evt *e, *tmp;
49705234 unsigned long flags;
49715235 LIST_HEAD(work);
5236
+ int rc;
49725237
49735238 spin_lock_irqsave(&vha->work_lock, flags);
49745239 list_splice_init(&vha->work_list, &work);
49755240 spin_unlock_irqrestore(&vha->work_lock, flags);
49765241
49775242 list_for_each_entry_safe(e, tmp, &work, list) {
4978
- list_del_init(&e->list);
4979
-
5243
+ rc = QLA_SUCCESS;
49805244 switch (e->type) {
49815245 case QLA_EVT_AEN:
49825246 fc_host_post_event(vha->host, fc_get_event_number(),
....@@ -4990,18 +5254,10 @@
49905254 e->u.logio.data);
49915255 break;
49925256 case QLA_EVT_ASYNC_LOGOUT:
4993
- qla2x00_async_logout(vha, e->u.logio.fcport);
4994
- break;
4995
- case QLA_EVT_ASYNC_LOGOUT_DONE:
4996
- qla2x00_async_logout_done(vha, e->u.logio.fcport,
4997
- e->u.logio.data);
5257
+ rc = qla2x00_async_logout(vha, e->u.logio.fcport);
49985258 break;
49995259 case QLA_EVT_ASYNC_ADISC:
50005260 qla2x00_async_adisc(vha, e->u.logio.fcport,
5001
- e->u.logio.data);
5002
- break;
5003
- case QLA_EVT_ASYNC_ADISC_DONE:
5004
- qla2x00_async_adisc_done(vha, e->u.logio.fcport,
50055261 e->u.logio.data);
50065262 break;
50075263 case QLA_EVT_UEVENT:
....@@ -5009,9 +5265,6 @@
50095265 break;
50105266 case QLA_EVT_AENFX:
50115267 qlafx00_process_aen(vha, e);
5012
- break;
5013
- case QLA_EVT_GIDPN:
5014
- qla24xx_async_gidpn(vha, e->u.fcport.fcport);
50155268 break;
50165269 case QLA_EVT_GPNID:
50175270 qla24xx_async_gpnid(vha, &e->u.gpnid.id);
....@@ -5042,7 +5295,7 @@
50425295 qla24xx_do_nack_work(vha, e);
50435296 break;
50445297 case QLA_EVT_ASYNC_PRLO:
5045
- qla2x00_async_prlo(vha, e->u.logio.fcport);
5298
+ rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
50465299 break;
50475300 case QLA_EVT_ASYNC_PRLO_DONE:
50485301 qla2x00_async_prlo_done(vha, e->u.logio.fcport,
....@@ -5075,6 +5328,15 @@
50755328 e->u.fcport.fcport, false);
50765329 break;
50775330 }
5331
+
5332
+ if (rc == EAGAIN) {
5333
+ /* put 'work' at head of 'vha->work_list' */
5334
+ spin_lock_irqsave(&vha->work_lock, flags);
5335
+ list_splice(&work, &vha->work_list);
5336
+ spin_unlock_irqrestore(&vha->work_lock, flags);
5337
+ break;
5338
+ }
5339
+ list_del_init(&e->list);
50785340 if (e->flags & QLA_EVT_FLAG_FREE)
50795341 kfree(e);
50805342
....@@ -5123,9 +5385,13 @@
51235385 } else {
51245386 if (vha->hw->current_topology != ISP_CFG_NL) {
51255387 memset(&ea, 0, sizeof(ea));
5126
- ea.event = FCME_RELOGIN;
51275388 ea.fcport = fcport;
5128
- qla2x00_fcport_event_handler(vha, &ea);
5389
+ qla24xx_handle_relogin_event(vha, &ea);
5390
+ } else if (vha->hw->current_topology ==
5391
+ ISP_CFG_NL &&
5392
+ IS_QLA2XXX_MIDTYPE(vha->hw)) {
5393
+ (void)qla24xx_fcport_handle_login(vha,
5394
+ fcport);
51295395 } else if (vha->hw->current_topology ==
51305396 ISP_CFG_NL) {
51315397 fcport->login_retry--;
....@@ -5371,6 +5637,7 @@
53715637 uint32_t idc_lck_rcvry_stage_mask = 0x3;
53725638 uint32_t idc_lck_rcvry_owner_mask = 0x3c;
53735639 struct qla_hw_data *ha = base_vha->hw;
5640
+
53745641 ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
53755642 "Trying force recovery of the IDC lock.\n");
53765643
....@@ -5462,7 +5729,6 @@
54625729 void
54635730 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
54645731 {
5465
- uint16_t options = (requester_id << 15) | BIT_6;
54665732 uint32_t data;
54675733 uint32_t lock_owner;
54685734 struct qla_hw_data *ha = base_vha->hw;
....@@ -5495,22 +5761,492 @@
54955761 }
54965762
54975763 return;
5764
+}
54985765
5499
- /* XXX: IDC-lock implementation using access-control mbx */
5500
-retry_lock2:
5501
- if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
5502
- ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
5503
- "Failed to acquire IDC lock. retrying...\n");
5504
- /* Retry/Perform IDC-Lock recovery */
5505
- if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
5506
- qla83xx_wait_logic();
5507
- goto retry_lock2;
5508
- } else
5509
- ql_log(ql_log_warn, base_vha, 0xb076,
5510
- "IDC Lock recovery FAILED.\n");
5766
+static bool
5767
+qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
5768
+ struct purex_entry_24xx *purex)
5769
+{
5770
+ char fwstr[16];
5771
+ u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
5772
+ struct port_database_24xx *pdb;
5773
+
5774
+ /* Domain Controller is always logged-out. */
5775
+ /* if RDP request is not from Domain Controller: */
5776
+ if (sid != 0xfffc01)
5777
+ return false;
5778
+
5779
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
5780
+
5781
+ pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
5782
+ if (!pdb) {
5783
+ ql_dbg(ql_dbg_init, vha, 0x0181,
5784
+ "%s: Failed allocate pdb\n", __func__);
5785
+ } else if (qla24xx_get_port_database(vha,
5786
+ le16_to_cpu(purex->nport_handle), pdb)) {
5787
+ ql_dbg(ql_dbg_init, vha, 0x0181,
5788
+ "%s: Failed get pdb sid=%x\n", __func__, sid);
5789
+ } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
5790
+ pdb->current_login_state != PDS_PRLI_COMPLETE) {
5791
+ ql_dbg(ql_dbg_init, vha, 0x0181,
5792
+ "%s: Port not logged in sid=%#x\n", __func__, sid);
5793
+ } else {
5794
+ /* RDP request is from logged in port */
5795
+ kfree(pdb);
5796
+ return false;
5797
+ }
5798
+ kfree(pdb);
5799
+
5800
+ vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
5801
+ fwstr[strcspn(fwstr, " ")] = 0;
5802
+ /* if FW version allows RDP response length upto 2048 bytes: */
5803
+ if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
5804
+ return false;
5805
+
5806
+ ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
5807
+
5808
+ /* RDP response length is to be reduced to maximum 256 bytes */
5809
+ return true;
5810
+}
5811
+
5812
+/*
5813
+ * Function Name: qla24xx_process_purex_iocb
5814
+ *
5815
+ * Description:
5816
+ * Prepare a RDP response and send to Fabric switch
5817
+ *
5818
+ * PARAMETERS:
5819
+ * vha: SCSI qla host
5820
+ * purex: RDP request received by HBA
5821
+ */
5822
+void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
5823
+ struct purex_item *item)
5824
+{
5825
+ struct qla_hw_data *ha = vha->hw;
5826
+ struct purex_entry_24xx *purex =
5827
+ (struct purex_entry_24xx *)&item->iocb;
5828
+ dma_addr_t rsp_els_dma;
5829
+ dma_addr_t rsp_payload_dma;
5830
+ dma_addr_t stat_dma;
5831
+ dma_addr_t sfp_dma;
5832
+ struct els_entry_24xx *rsp_els = NULL;
5833
+ struct rdp_rsp_payload *rsp_payload = NULL;
5834
+ struct link_statistics *stat = NULL;
5835
+ uint8_t *sfp = NULL;
5836
+ uint16_t sfp_flags = 0;
5837
+ uint rsp_payload_length = sizeof(*rsp_payload);
5838
+ int rval;
5839
+
5840
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
5841
+ "%s: Enter\n", __func__);
5842
+
5843
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
5844
+ "-------- ELS REQ -------\n");
5845
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
5846
+ purex, sizeof(*purex));
5847
+
5848
+ if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
5849
+ rsp_payload_length =
5850
+ offsetof(typeof(*rsp_payload), optical_elmt_desc);
5851
+ ql_dbg(ql_dbg_init, vha, 0x0181,
5852
+ "Reducing RSP payload length to %u bytes...\n",
5853
+ rsp_payload_length);
55115854 }
55125855
5513
- return;
5856
+ rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
5857
+ &rsp_els_dma, GFP_KERNEL);
5858
+ if (!rsp_els) {
5859
+ ql_log(ql_log_warn, vha, 0x0183,
5860
+ "Failed allocate dma buffer ELS RSP.\n");
5861
+ goto dealloc;
5862
+ }
5863
+
5864
+ rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
5865
+ &rsp_payload_dma, GFP_KERNEL);
5866
+ if (!rsp_payload) {
5867
+ ql_log(ql_log_warn, vha, 0x0184,
5868
+ "Failed allocate dma buffer ELS RSP payload.\n");
5869
+ goto dealloc;
5870
+ }
5871
+
5872
+ sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
5873
+ &sfp_dma, GFP_KERNEL);
5874
+
5875
+ stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
5876
+ &stat_dma, GFP_KERNEL);
5877
+
5878
+ /* Prepare Response IOCB */
5879
+ rsp_els->entry_type = ELS_IOCB_TYPE;
5880
+ rsp_els->entry_count = 1;
5881
+ rsp_els->sys_define = 0;
5882
+ rsp_els->entry_status = 0;
5883
+ rsp_els->handle = 0;
5884
+ rsp_els->nport_handle = purex->nport_handle;
5885
+ rsp_els->tx_dsd_count = cpu_to_le16(1);
5886
+ rsp_els->vp_index = purex->vp_idx;
5887
+ rsp_els->sof_type = EST_SOFI3;
5888
+ rsp_els->rx_xchg_address = purex->rx_xchg_addr;
5889
+ rsp_els->rx_dsd_count = 0;
5890
+ rsp_els->opcode = purex->els_frame_payload[0];
5891
+
5892
+ rsp_els->d_id[0] = purex->s_id[0];
5893
+ rsp_els->d_id[1] = purex->s_id[1];
5894
+ rsp_els->d_id[2] = purex->s_id[2];
5895
+
5896
+ rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
5897
+ rsp_els->rx_byte_count = 0;
5898
+ rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
5899
+
5900
+ put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
5901
+ rsp_els->tx_len = rsp_els->tx_byte_count;
5902
+
5903
+ rsp_els->rx_address = 0;
5904
+ rsp_els->rx_len = 0;
5905
+
5906
+ /* Prepare Response Payload */
5907
+ rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
5908
+ rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
5909
+ sizeof(rsp_payload->hdr));
5910
+
5911
+ /* Link service Request Info Descriptor */
5912
+ rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
5913
+ rsp_payload->ls_req_info_desc.desc_len =
5914
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
5915
+ rsp_payload->ls_req_info_desc.req_payload_word_0 =
5916
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
5917
+
5918
+ /* Link service Request Info Descriptor 2 */
5919
+ rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
5920
+ rsp_payload->ls_req_info_desc2.desc_len =
5921
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
5922
+ rsp_payload->ls_req_info_desc2.req_payload_word_0 =
5923
+ cpu_to_be32p((uint32_t *)purex->els_frame_payload);
5924
+
5925
+
5926
+ rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
5927
+ rsp_payload->sfp_diag_desc.desc_len =
5928
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
5929
+
5930
+ if (sfp) {
5931
+ /* SFP Flags */
5932
+ memset(sfp, 0, SFP_RTDI_LEN);
5933
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
5934
+ if (!rval) {
5935
+ /* SFP Flags bits 3-0: Port Tx Laser Type */
5936
+ if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
5937
+ sfp_flags |= BIT_0; /* short wave */
5938
+ else if (sfp[0] & BIT_1)
5939
+ sfp_flags |= BIT_1; /* long wave 1310nm */
5940
+ else if (sfp[1] & BIT_4)
5941
+ sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
5942
+ }
5943
+
5944
+ /* SFP Type */
5945
+ memset(sfp, 0, SFP_RTDI_LEN);
5946
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
5947
+ if (!rval) {
5948
+ sfp_flags |= BIT_4; /* optical */
5949
+ if (sfp[0] == 0x3)
5950
+ sfp_flags |= BIT_6; /* sfp+ */
5951
+ }
5952
+
5953
+ rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
5954
+
5955
+ /* SFP Diagnostics */
5956
+ memset(sfp, 0, SFP_RTDI_LEN);
5957
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
5958
+ if (!rval) {
5959
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
5960
+ rsp_payload->sfp_diag_desc.temperature = trx[0];
5961
+ rsp_payload->sfp_diag_desc.vcc = trx[1];
5962
+ rsp_payload->sfp_diag_desc.tx_bias = trx[2];
5963
+ rsp_payload->sfp_diag_desc.tx_power = trx[3];
5964
+ rsp_payload->sfp_diag_desc.rx_power = trx[4];
5965
+ }
5966
+ }
5967
+
5968
+ /* Port Speed Descriptor */
5969
+ rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
5970
+ rsp_payload->port_speed_desc.desc_len =
5971
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
5972
+ rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
5973
+ qla25xx_fdmi_port_speed_capability(ha));
5974
+ rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
5975
+ qla25xx_fdmi_port_speed_currently(ha));
5976
+
5977
+ /* Link Error Status Descriptor */
5978
+ rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
5979
+ rsp_payload->ls_err_desc.desc_len =
5980
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
5981
+
5982
+ if (stat) {
5983
+ rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
5984
+ if (!rval) {
5985
+ rsp_payload->ls_err_desc.link_fail_cnt =
5986
+ cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
5987
+ rsp_payload->ls_err_desc.loss_sync_cnt =
5988
+ cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
5989
+ rsp_payload->ls_err_desc.loss_sig_cnt =
5990
+ cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
5991
+ rsp_payload->ls_err_desc.prim_seq_err_cnt =
5992
+ cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
5993
+ rsp_payload->ls_err_desc.inval_xmit_word_cnt =
5994
+ cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
5995
+ rsp_payload->ls_err_desc.inval_crc_cnt =
5996
+ cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
5997
+ rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
5998
+ }
5999
+ }
6000
+
6001
+ /* Portname Descriptor */
6002
+ rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
6003
+ rsp_payload->port_name_diag_desc.desc_len =
6004
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
6005
+ memcpy(rsp_payload->port_name_diag_desc.WWNN,
6006
+ vha->node_name,
6007
+ sizeof(rsp_payload->port_name_diag_desc.WWNN));
6008
+ memcpy(rsp_payload->port_name_diag_desc.WWPN,
6009
+ vha->port_name,
6010
+ sizeof(rsp_payload->port_name_diag_desc.WWPN));
6011
+
6012
+ /* F-Port Portname Descriptor */
6013
+ rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
6014
+ rsp_payload->port_name_direct_desc.desc_len =
6015
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
6016
+ memcpy(rsp_payload->port_name_direct_desc.WWNN,
6017
+ vha->fabric_node_name,
6018
+ sizeof(rsp_payload->port_name_direct_desc.WWNN));
6019
+ memcpy(rsp_payload->port_name_direct_desc.WWPN,
6020
+ vha->fabric_port_name,
6021
+ sizeof(rsp_payload->port_name_direct_desc.WWPN));
6022
+
6023
+ /* Bufer Credit Descriptor */
6024
+ rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
6025
+ rsp_payload->buffer_credit_desc.desc_len =
6026
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
6027
+ rsp_payload->buffer_credit_desc.fcport_b2b = 0;
6028
+ rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
6029
+ rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
6030
+
6031
+ if (ha->flags.plogi_template_valid) {
6032
+ uint32_t tmp =
6033
+ be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
6034
+ rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
6035
+ }
6036
+
6037
+ if (rsp_payload_length < sizeof(*rsp_payload))
6038
+ goto send;
6039
+
6040
+ /* Optical Element Descriptor, Temperature */
6041
+ rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
6042
+ rsp_payload->optical_elmt_desc[0].desc_len =
6043
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6044
+ /* Optical Element Descriptor, Voltage */
6045
+ rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
6046
+ rsp_payload->optical_elmt_desc[1].desc_len =
6047
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6048
+ /* Optical Element Descriptor, Tx Bias Current */
6049
+ rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
6050
+ rsp_payload->optical_elmt_desc[2].desc_len =
6051
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6052
+ /* Optical Element Descriptor, Tx Power */
6053
+ rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
6054
+ rsp_payload->optical_elmt_desc[3].desc_len =
6055
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6056
+ /* Optical Element Descriptor, Rx Power */
6057
+ rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
6058
+ rsp_payload->optical_elmt_desc[4].desc_len =
6059
+ cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6060
+
6061
+ if (sfp) {
6062
+ memset(sfp, 0, SFP_RTDI_LEN);
6063
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
6064
+ if (!rval) {
6065
+ __be16 *trx = (__force __be16 *)sfp; /* already be16 */
6066
+
6067
+ /* Optical Element Descriptor, Temperature */
6068
+ rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
6069
+ rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
6070
+ rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
6071
+ rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
6072
+ rsp_payload->optical_elmt_desc[0].element_flags =
6073
+ cpu_to_be32(1 << 28);
6074
+
6075
+ /* Optical Element Descriptor, Voltage */
6076
+ rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
6077
+ rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
6078
+ rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
6079
+ rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
6080
+ rsp_payload->optical_elmt_desc[1].element_flags =
6081
+ cpu_to_be32(2 << 28);
6082
+
6083
+ /* Optical Element Descriptor, Tx Bias Current */
6084
+ rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
6085
+ rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
6086
+ rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
6087
+ rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
6088
+ rsp_payload->optical_elmt_desc[2].element_flags =
6089
+ cpu_to_be32(3 << 28);
6090
+
6091
+ /* Optical Element Descriptor, Tx Power */
6092
+ rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
6093
+ rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
6094
+ rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
6095
+ rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
6096
+ rsp_payload->optical_elmt_desc[3].element_flags =
6097
+ cpu_to_be32(4 << 28);
6098
+
6099
+ /* Optical Element Descriptor, Rx Power */
6100
+ rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
6101
+ rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
6102
+ rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
6103
+ rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
6104
+ rsp_payload->optical_elmt_desc[4].element_flags =
6105
+ cpu_to_be32(5 << 28);
6106
+ }
6107
+
6108
+ memset(sfp, 0, SFP_RTDI_LEN);
6109
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
6110
+ if (!rval) {
6111
+ /* Temperature high/low alarm/warning */
6112
+ rsp_payload->optical_elmt_desc[0].element_flags |=
6113
+ cpu_to_be32(
6114
+ (sfp[0] >> 7 & 1) << 3 |
6115
+ (sfp[0] >> 6 & 1) << 2 |
6116
+ (sfp[4] >> 7 & 1) << 1 |
6117
+ (sfp[4] >> 6 & 1) << 0);
6118
+
6119
+ /* Voltage high/low alarm/warning */
6120
+ rsp_payload->optical_elmt_desc[1].element_flags |=
6121
+ cpu_to_be32(
6122
+ (sfp[0] >> 5 & 1) << 3 |
6123
+ (sfp[0] >> 4 & 1) << 2 |
6124
+ (sfp[4] >> 5 & 1) << 1 |
6125
+ (sfp[4] >> 4 & 1) << 0);
6126
+
6127
+ /* Tx Bias Current high/low alarm/warning */
6128
+ rsp_payload->optical_elmt_desc[2].element_flags |=
6129
+ cpu_to_be32(
6130
+ (sfp[0] >> 3 & 1) << 3 |
6131
+ (sfp[0] >> 2 & 1) << 2 |
6132
+ (sfp[4] >> 3 & 1) << 1 |
6133
+ (sfp[4] >> 2 & 1) << 0);
6134
+
6135
+ /* Tx Power high/low alarm/warning */
6136
+ rsp_payload->optical_elmt_desc[3].element_flags |=
6137
+ cpu_to_be32(
6138
+ (sfp[0] >> 1 & 1) << 3 |
6139
+ (sfp[0] >> 0 & 1) << 2 |
6140
+ (sfp[4] >> 1 & 1) << 1 |
6141
+ (sfp[4] >> 0 & 1) << 0);
6142
+
6143
+ /* Rx Power high/low alarm/warning */
6144
+ rsp_payload->optical_elmt_desc[4].element_flags |=
6145
+ cpu_to_be32(
6146
+ (sfp[1] >> 7 & 1) << 3 |
6147
+ (sfp[1] >> 6 & 1) << 2 |
6148
+ (sfp[5] >> 7 & 1) << 1 |
6149
+ (sfp[5] >> 6 & 1) << 0);
6150
+ }
6151
+ }
6152
+
6153
+ /* Optical Product Data Descriptor */
6154
+ rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
6155
+ rsp_payload->optical_prod_desc.desc_len =
6156
+ cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
6157
+
6158
+ if (sfp) {
6159
+ memset(sfp, 0, SFP_RTDI_LEN);
6160
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
6161
+ if (!rval) {
6162
+ memcpy(rsp_payload->optical_prod_desc.vendor_name,
6163
+ sfp + 0,
6164
+ sizeof(rsp_payload->optical_prod_desc.vendor_name));
6165
+ memcpy(rsp_payload->optical_prod_desc.part_number,
6166
+ sfp + 20,
6167
+ sizeof(rsp_payload->optical_prod_desc.part_number));
6168
+ memcpy(rsp_payload->optical_prod_desc.revision,
6169
+ sfp + 36,
6170
+ sizeof(rsp_payload->optical_prod_desc.revision));
6171
+ memcpy(rsp_payload->optical_prod_desc.serial_number,
6172
+ sfp + 48,
6173
+ sizeof(rsp_payload->optical_prod_desc.serial_number));
6174
+ }
6175
+
6176
+ memset(sfp, 0, SFP_RTDI_LEN);
6177
+ rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
6178
+ if (!rval) {
6179
+ memcpy(rsp_payload->optical_prod_desc.date,
6180
+ sfp + 0,
6181
+ sizeof(rsp_payload->optical_prod_desc.date));
6182
+ }
6183
+ }
6184
+
6185
+send:
6186
+ ql_dbg(ql_dbg_init, vha, 0x0183,
6187
+ "Sending ELS Response to RDP Request...\n");
6188
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
6189
+ "-------- ELS RSP -------\n");
6190
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
6191
+ rsp_els, sizeof(*rsp_els));
6192
+ ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
6193
+ "-------- ELS RSP PAYLOAD -------\n");
6194
+ ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
6195
+ rsp_payload, rsp_payload_length);
6196
+
6197
+ rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
6198
+
6199
+ if (rval) {
6200
+ ql_log(ql_log_warn, vha, 0x0188,
6201
+ "%s: iocb failed to execute -> %x\n", __func__, rval);
6202
+ } else if (rsp_els->comp_status) {
6203
+ ql_log(ql_log_warn, vha, 0x0189,
6204
+ "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
6205
+ __func__, rsp_els->comp_status,
6206
+ rsp_els->error_subcode_1, rsp_els->error_subcode_2);
6207
+ } else {
6208
+ ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
6209
+ }
6210
+
6211
+dealloc:
6212
+ if (stat)
6213
+ dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
6214
+ stat, stat_dma);
6215
+ if (sfp)
6216
+ dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6217
+ sfp, sfp_dma);
6218
+ if (rsp_payload)
6219
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6220
+ rsp_payload, rsp_payload_dma);
6221
+ if (rsp_els)
6222
+ dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6223
+ rsp_els, rsp_els_dma);
6224
+}
6225
+
6226
+void
6227
+qla24xx_free_purex_item(struct purex_item *item)
6228
+{
6229
+ if (item == &item->vha->default_item)
6230
+ memset(&item->vha->default_item, 0, sizeof(struct purex_item));
6231
+ else
6232
+ kfree(item);
6233
+}
6234
+
6235
+void qla24xx_process_purex_list(struct purex_list *list)
6236
+{
6237
+ struct list_head head = LIST_HEAD_INIT(head);
6238
+ struct purex_item *item, *next;
6239
+ ulong flags;
6240
+
6241
+ spin_lock_irqsave(&list->lock, flags);
6242
+ list_splice_init(&list->head, &head);
6243
+ spin_unlock_irqrestore(&list->lock, flags);
6244
+
6245
+ list_for_each_entry_safe(item, next, &head, list) {
6246
+ list_del(&item->list);
6247
+ item->process_item(item->vha, item);
6248
+ qla24xx_free_purex_item(item);
6249
+ }
55146250 }
55156251
55166252 void
....@@ -5922,6 +6658,9 @@
59226658
59236659 schedule();
59246660
6661
+ if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
6662
+ qla_pci_set_eeh_busy(base_vha);
6663
+
59256664 if (!base_vha->flags.init_done || ha->flags.mbox_busy)
59266665 goto end_loop;
59276666
....@@ -6034,13 +6773,14 @@
60346773 }
60356774
60366775 if (test_and_clear_bit(DETECT_SFP_CHANGE,
6037
- &base_vha->dpc_flags) &&
6038
- !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) {
6039
- qla24xx_detect_sfp(base_vha);
6040
-
6041
- if (ha->flags.detected_lr_sfp !=
6042
- ha->flags.using_lr_setting)
6043
- set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6776
+ &base_vha->dpc_flags)) {
6777
+ /* Semantic:
6778
+ * - NO-OP -- await next ISP-ABORT. Preferred method
6779
+ * to minimize disruptions that will occur
6780
+ * when a forced chip-reset occurs.
6781
+ * - Force -- ISP-ABORT scheduled.
6782
+ */
6783
+ /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
60446784 }
60456785
60466786 if (test_and_clear_bit
....@@ -6048,15 +6788,17 @@
60486788 !test_bit(UNLOADING, &base_vha->dpc_flags)) {
60496789 bool do_reset = true;
60506790
6051
- switch (ql2x_ini_mode) {
6791
+ switch (base_vha->qlini_mode) {
60526792 case QLA2XXX_INI_MODE_ENABLED:
60536793 break;
60546794 case QLA2XXX_INI_MODE_DISABLED:
6055
- if (!qla_tgt_mode_enabled(base_vha))
6795
+ if (!qla_tgt_mode_enabled(base_vha) &&
6796
+ !ha->flags.fw_started)
60566797 do_reset = false;
60576798 break;
60586799 case QLA2XXX_INI_MODE_DUAL:
6059
- if (!qla_dual_mode_enabled(base_vha))
6800
+ if (!qla_dual_mode_enabled(base_vha) &&
6801
+ !ha->flags.fw_started)
60606802 do_reset = false;
60616803 break;
60626804 default:
....@@ -6077,6 +6819,15 @@
60776819 &base_vha->dpc_flags);
60786820 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
60796821 "ISP abort end.\n");
6822
+ }
6823
+ }
6824
+
6825
+ if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
6826
+ if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
6827
+ qla24xx_process_purex_list
6828
+ (&base_vha->purex_list);
6829
+ clear_bit(PROCESS_PUREX_IOCB,
6830
+ &base_vha->dpc_flags);
60806831 }
60816832 }
60826833
....@@ -6149,9 +6900,12 @@
61496900 }
61506901 }
61516902 loop_resync_check:
6152
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
6903
+ if (!qla2x00_reset_active(base_vha) &&
6904
+ test_and_clear_bit(LOOP_RESYNC_NEEDED,
61536905 &base_vha->dpc_flags)) {
6154
-
6906
+ /*
6907
+ * Allow abort_isp to complete before moving on to scanning.
6908
+ */
61556909 ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
61566910 "Loop resync scheduled.\n");
61576911
....@@ -6203,14 +6957,20 @@
62036957 mutex_unlock(&ha->mq_lock);
62046958 }
62056959
6206
- if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
6960
+ if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
6961
+ &base_vha->dpc_flags)) {
6962
+ u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
6963
+
6964
+ if (threshold > ha->orig_fw_xcb_count)
6965
+ threshold = ha->orig_fw_xcb_count;
6966
+
62076967 ql_log(ql_log_info, base_vha, 0xffffff,
6208
- "nvme: SET ZIO Activity exchange threshold to %d.\n",
6209
- ha->nvme_last_rptd_aen);
6210
- if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
6968
+ "SET ZIO Activity exchange threshold to %d.\n",
6969
+ threshold);
6970
+ if (qla27xx_set_zio_threshold(base_vha, threshold)) {
62116971 ql_log(ql_log_info, base_vha, 0xffffff,
6212
- "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
6213
- ha->nvme_last_rptd_aen);
6972
+ "Unable to SET ZIO Activity exchange threshold to %d.\n",
6973
+ threshold);
62146974 }
62156975 }
62166976
....@@ -6384,7 +7144,7 @@
63847144
63857145 /* if the loop has been down for 4 minutes, reinit adapter */
63867146 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
6387
- if (!(vha->device_flags & DFLG_NO_CABLE)) {
7147
+ if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
63887148 ql_log(ql_log_warn, vha, 0x6009,
63897149 "Loop down - aborting ISP.\n");
63907150
....@@ -6426,13 +7186,26 @@
64267186 * FC-NVME
64277187 * see if the active AEN count has changed from what was last reported.
64287188 */
7189
+ index = atomic_read(&ha->nvme_active_aen_cnt);
64297190 if (!vha->vp_idx &&
6430
- atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
6431
- ha->zio_mode == QLA_ZIO_MODE_6) {
6432
- ql_log(ql_log_info, vha, 0x3002,
6433
- "nvme: Sched: Set ZIO exchange threshold to %d.\n",
6434
- ha->nvme_last_rptd_aen);
7191
+ (index != ha->nvme_last_rptd_aen) &&
7192
+ ha->zio_mode == QLA_ZIO_MODE_6 &&
7193
+ !ha->flags.host_shutting_down) {
64357194 ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
7195
+ ql_log(ql_log_info, vha, 0x3002,
7196
+ "nvme: Sched: Set ZIO exchange threshold to %d.\n",
7197
+ ha->nvme_last_rptd_aen);
7198
+ set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7199
+ start_dpc++;
7200
+ }
7201
+
7202
+ if (!vha->vp_idx &&
7203
+ atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
7204
+ IS_ZIO_THRESHOLD_CAPABLE(ha)) {
7205
+ ql_log(ql_log_info, vha, 0x3002,
7206
+ "Sched: Set ZIO exchange threshold to %d.\n",
7207
+ ha->last_zio_threshold);
7208
+ ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
64367209 set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
64377210 start_dpc++;
64387211 }
....@@ -6447,7 +7220,8 @@
64477220 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
64487221 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
64497222 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
6450
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
7223
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
7224
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
64517225 ql_dbg(ql_dbg_timer, vha, 0x600b,
64527226 "isp_abort_needed=%d loop_resync_needed=%d "
64537227 "fcport_update_needed=%d start_dpc=%d "
....@@ -6460,12 +7234,13 @@
64607234 ql_dbg(ql_dbg_timer, vha, 0x600c,
64617235 "beacon_blink_needed=%d isp_unrecoverable=%d "
64627236 "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
6463
- "relogin_needed=%d.\n",
7237
+ "relogin_needed=%d, Process_purex_iocb=%d.\n",
64647238 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
64657239 test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
64667240 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
64677241 test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
6468
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
7242
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
7243
+ test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
64697244 qla2xxx_wake_dpc(vha);
64707245 }
64717246
....@@ -6474,7 +7249,6 @@
64747249
64757250 /* Firmware interface routines. */
64767251
6477
-#define FW_BLOBS 11
64787252 #define FW_ISP21XX 0
64797253 #define FW_ISP22XX 1
64807254 #define FW_ISP2300 2
....@@ -6486,6 +7260,7 @@
64867260 #define FW_ISP2031 8
64877261 #define FW_ISP8031 9
64887262 #define FW_ISP27XX 10
7263
+#define FW_ISP28XX 11
64897264
64907265 #define FW_FILE_ISP21XX "ql2100_fw.bin"
64917266 #define FW_FILE_ISP22XX "ql2200_fw.bin"
....@@ -6498,11 +7273,12 @@
64987273 #define FW_FILE_ISP2031 "ql2600_fw.bin"
64997274 #define FW_FILE_ISP8031 "ql8300_fw.bin"
65007275 #define FW_FILE_ISP27XX "ql2700_fw.bin"
7276
+#define FW_FILE_ISP28XX "ql2800_fw.bin"
65017277
65027278
65037279 static DEFINE_MUTEX(qla_fw_lock);
65047280
6505
-static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
7281
+static struct fw_blob qla_fw_blobs[] = {
65067282 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
65077283 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
65087284 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
....@@ -6514,6 +7290,8 @@
65147290 { .name = FW_FILE_ISP2031, },
65157291 { .name = FW_FILE_ISP8031, },
65167292 { .name = FW_FILE_ISP27XX, },
7293
+ { .name = FW_FILE_ISP28XX, },
7294
+ { .name = NULL, },
65177295 };
65187296
65197297 struct fw_blob *
....@@ -6544,9 +7322,14 @@
65447322 blob = &qla_fw_blobs[FW_ISP8031];
65457323 } else if (IS_QLA27XX(ha)) {
65467324 blob = &qla_fw_blobs[FW_ISP27XX];
7325
+ } else if (IS_QLA28XX(ha)) {
7326
+ blob = &qla_fw_blobs[FW_ISP28XX];
65477327 } else {
65487328 return NULL;
65497329 }
7330
+
7331
+ if (!blob->name)
7332
+ return NULL;
65507333
65517334 mutex_lock(&qla_fw_lock);
65527335 if (blob->fw)
....@@ -6557,7 +7340,6 @@
65577340 "Failed to load firmware image (%s).\n", blob->name);
65587341 blob->fw = NULL;
65597342 blob = NULL;
6560
- goto out;
65617343 }
65627344
65637345 out:
....@@ -6568,27 +7350,92 @@
65687350 static void
65697351 qla2x00_release_firmware(void)
65707352 {
6571
- int idx;
7353
+ struct fw_blob *blob;
65727354
65737355 mutex_lock(&qla_fw_lock);
6574
- for (idx = 0; idx < FW_BLOBS; idx++)
6575
- release_firmware(qla_fw_blobs[idx].fw);
7356
+ for (blob = qla_fw_blobs; blob->name; blob++)
7357
+ release_firmware(blob->fw);
65767358 mutex_unlock(&qla_fw_lock);
65777359 }
7360
+
7361
+static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
7362
+{
7363
+ struct qla_hw_data *ha = vha->hw;
7364
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
7365
+ struct qla_qpair *qpair = NULL;
7366
+ struct scsi_qla_host *vp;
7367
+ fc_port_t *fcport;
7368
+ int i;
7369
+ unsigned long flags;
7370
+
7371
+ ql_dbg(ql_dbg_aer, vha, 0x9000,
7372
+ "%s\n", __func__);
7373
+ ha->chip_reset++;
7374
+
7375
+ ha->base_qpair->chip_reset = ha->chip_reset;
7376
+ for (i = 0; i < ha->max_qpairs; i++) {
7377
+ if (ha->queue_pair_map[i])
7378
+ ha->queue_pair_map[i]->chip_reset =
7379
+ ha->base_qpair->chip_reset;
7380
+ }
7381
+
7382
+ /*
7383
+ * purge mailbox might take a while. Slot Reset/chip reset
7384
+ * will take care of the purge
7385
+ */
7386
+
7387
+ mutex_lock(&ha->mq_lock);
7388
+ ha->base_qpair->online = 0;
7389
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7390
+ qpair->online = 0;
7391
+ wmb();
7392
+ mutex_unlock(&ha->mq_lock);
7393
+
7394
+ qla2x00_mark_all_devices_lost(vha);
7395
+
7396
+ spin_lock_irqsave(&ha->vport_slock, flags);
7397
+ list_for_each_entry(vp, &ha->vp_list, list) {
7398
+ atomic_inc(&vp->vref_count);
7399
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
7400
+ qla2x00_mark_all_devices_lost(vp);
7401
+ spin_lock_irqsave(&ha->vport_slock, flags);
7402
+ atomic_dec(&vp->vref_count);
7403
+ }
7404
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
7405
+
7406
+ /* Clear all async request states across all VPs. */
7407
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
7408
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7409
+
7410
+ spin_lock_irqsave(&ha->vport_slock, flags);
7411
+ list_for_each_entry(vp, &ha->vp_list, list) {
7412
+ atomic_inc(&vp->vref_count);
7413
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
7414
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
7415
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7416
+ spin_lock_irqsave(&ha->vport_slock, flags);
7417
+ atomic_dec(&vp->vref_count);
7418
+ }
7419
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
7420
+}
7421
+
65787422
65797423 static pci_ers_result_t
65807424 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
65817425 {
65827426 scsi_qla_host_t *vha = pci_get_drvdata(pdev);
65837427 struct qla_hw_data *ha = vha->hw;
7428
+ pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
65847429
6585
- ql_dbg(ql_dbg_aer, vha, 0x9000,
6586
- "PCI error detected, state %x.\n", state);
7430
+ ql_log(ql_log_warn, vha, 0x9000,
7431
+ "PCI error detected, state %x.\n", state);
7432
+ ha->pci_error_state = QLA_PCI_ERR_DETECTED;
65877433
65887434 if (!atomic_read(&pdev->enable_cnt)) {
65897435 ql_log(ql_log_info, vha, 0xffff,
65907436 "PCI device is disabled,state %x\n", state);
6591
- return PCI_ERS_RESULT_NEED_RESET;
7437
+ ret = PCI_ERS_RESULT_NEED_RESET;
7438
+ goto out;
65927439 }
65937440
65947441 switch (state) {
....@@ -6598,24 +7445,12 @@
65987445 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
65997446 qla2xxx_wake_dpc(vha);
66007447 }
6601
- return PCI_ERS_RESULT_CAN_RECOVER;
7448
+ ret = PCI_ERS_RESULT_CAN_RECOVER;
7449
+ break;
66027450 case pci_channel_io_frozen:
6603
- ha->flags.eeh_busy = 1;
6604
- /* For ISP82XX complete any pending mailbox cmd */
6605
- if (IS_QLA82XX(ha)) {
6606
- ha->flags.isp82xx_fw_hung = 1;
6607
- ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
6608
- qla82xx_clear_pending_mbx(vha);
6609
- }
6610
- qla2x00_free_irqs(vha);
6611
- pci_disable_device(pdev);
6612
- /* Return back all IOs */
6613
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6614
- if (ql2xmqsupport || ql2xnvmeenable) {
6615
- set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
6616
- qla2xxx_wake_dpc(vha);
6617
- }
6618
- return PCI_ERS_RESULT_NEED_RESET;
7451
+ qla_pci_set_eeh_busy(vha);
7452
+ ret = PCI_ERS_RESULT_NEED_RESET;
7453
+ break;
66197454 case pci_channel_io_perm_failure:
66207455 ha->flags.pci_channel_io_perm_failure = 1;
66217456 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
....@@ -6623,9 +7458,12 @@
66237458 set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
66247459 qla2xxx_wake_dpc(vha);
66257460 }
6626
- return PCI_ERS_RESULT_DISCONNECT;
7461
+ ret = PCI_ERS_RESULT_DISCONNECT;
66277462 }
6628
- return PCI_ERS_RESULT_NEED_RESET;
7463
+out:
7464
+ ql_dbg(ql_dbg_aer, vha, 0x600d,
7465
+ "PCI error detected returning [%x].\n", ret);
7466
+ return ret;
66297467 }
66307468
66317469 static pci_ers_result_t
....@@ -6639,20 +7477,24 @@
66397477 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
66407478 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
66417479
7480
+ ql_log(ql_log_warn, base_vha, 0x9000,
7481
+ "mmio enabled\n");
7482
+
7483
+ ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
66427484 if (IS_QLA82XX(ha))
66437485 return PCI_ERS_RESULT_RECOVERED;
66447486
66457487 spin_lock_irqsave(&ha->hardware_lock, flags);
66467488 if (IS_QLA2100(ha) || IS_QLA2200(ha)){
6647
- stat = RD_REG_DWORD(&reg->hccr);
7489
+ stat = rd_reg_word(&reg->hccr);
66487490 if (stat & HCCR_RISC_PAUSE)
66497491 risc_paused = 1;
66507492 } else if (IS_QLA23XX(ha)) {
6651
- stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
7493
+ stat = rd_reg_dword(&reg->u.isp2300.host_status);
66527494 if (stat & HSR_RISC_PAUSED)
66537495 risc_paused = 1;
66547496 } else if (IS_FWI2_CAPABLE(ha)) {
6655
- stat = RD_REG_DWORD(&reg24->host_status);
7497
+ stat = rd_reg_dword(&reg24->host_status);
66567498 if (stat & HSRX_RISC_PAUSED)
66577499 risc_paused = 1;
66587500 }
....@@ -6661,119 +7503,12 @@
66617503 if (risc_paused) {
66627504 ql_log(ql_log_info, base_vha, 0x9003,
66637505 "RISC paused -- mmio_enabled, Dumping firmware.\n");
6664
- ha->isp_ops->fw_dump(base_vha, 0);
6665
-
6666
- return PCI_ERS_RESULT_NEED_RESET;
6667
- } else
6668
- return PCI_ERS_RESULT_RECOVERED;
6669
-}
6670
-
6671
-static uint32_t
6672
-qla82xx_error_recovery(scsi_qla_host_t *base_vha)
6673
-{
6674
- uint32_t rval = QLA_FUNCTION_FAILED;
6675
- uint32_t drv_active = 0;
6676
- struct qla_hw_data *ha = base_vha->hw;
6677
- int fn;
6678
- struct pci_dev *other_pdev = NULL;
6679
-
6680
- ql_dbg(ql_dbg_aer, base_vha, 0x9006,
6681
- "Entered %s.\n", __func__);
6682
-
6683
- set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
6684
-
6685
- if (base_vha->flags.online) {
6686
- /* Abort all outstanding commands,
6687
- * so as to be requeued later */
6688
- qla2x00_abort_isp_cleanup(base_vha);
7506
+ qla2xxx_dump_fw(base_vha);
66897507 }
6690
-
6691
-
6692
- fn = PCI_FUNC(ha->pdev->devfn);
6693
- while (fn > 0) {
6694
- fn--;
6695
- ql_dbg(ql_dbg_aer, base_vha, 0x9007,
6696
- "Finding pci device at function = 0x%x.\n", fn);
6697
- other_pdev =
6698
- pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
6699
- ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
6700
- fn));
6701
-
6702
- if (!other_pdev)
6703
- continue;
6704
- if (atomic_read(&other_pdev->enable_cnt)) {
6705
- ql_dbg(ql_dbg_aer, base_vha, 0x9008,
6706
- "Found PCI func available and enable at 0x%x.\n",
6707
- fn);
6708
- pci_dev_put(other_pdev);
6709
- break;
6710
- }
6711
- pci_dev_put(other_pdev);
6712
- }
6713
-
6714
- if (!fn) {
6715
- /* Reset owner */
6716
- ql_dbg(ql_dbg_aer, base_vha, 0x9009,
6717
- "This devfn is reset owner = 0x%x.\n",
6718
- ha->pdev->devfn);
6719
- qla82xx_idc_lock(ha);
6720
-
6721
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6722
- QLA8XXX_DEV_INITIALIZING);
6723
-
6724
- qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION,
6725
- QLA82XX_IDC_VERSION);
6726
-
6727
- drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
6728
- ql_dbg(ql_dbg_aer, base_vha, 0x900a,
6729
- "drv_active = 0x%x.\n", drv_active);
6730
-
6731
- qla82xx_idc_unlock(ha);
6732
- /* Reset if device is not already reset
6733
- * drv_active would be 0 if a reset has already been done
6734
- */
6735
- if (drv_active)
6736
- rval = qla82xx_start_firmware(base_vha);
6737
- else
6738
- rval = QLA_SUCCESS;
6739
- qla82xx_idc_lock(ha);
6740
-
6741
- if (rval != QLA_SUCCESS) {
6742
- ql_log(ql_log_info, base_vha, 0x900b,
6743
- "HW State: FAILED.\n");
6744
- qla82xx_clear_drv_active(ha);
6745
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6746
- QLA8XXX_DEV_FAILED);
6747
- } else {
6748
- ql_log(ql_log_info, base_vha, 0x900c,
6749
- "HW State: READY.\n");
6750
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6751
- QLA8XXX_DEV_READY);
6752
- qla82xx_idc_unlock(ha);
6753
- ha->flags.isp82xx_fw_hung = 0;
6754
- rval = qla82xx_restart_isp(base_vha);
6755
- qla82xx_idc_lock(ha);
6756
- /* Clear driver state register */
6757
- qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
6758
- qla82xx_set_drv_active(base_vha);
6759
- }
6760
- qla82xx_idc_unlock(ha);
6761
- } else {
6762
- ql_dbg(ql_dbg_aer, base_vha, 0x900d,
6763
- "This devfn is not reset owner = 0x%x.\n",
6764
- ha->pdev->devfn);
6765
- if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
6766
- QLA8XXX_DEV_READY)) {
6767
- ha->flags.isp82xx_fw_hung = 0;
6768
- rval = qla82xx_restart_isp(base_vha);
6769
- qla82xx_idc_lock(ha);
6770
- qla82xx_set_drv_active(base_vha);
6771
- qla82xx_idc_unlock(ha);
6772
- }
6773
- }
6774
- clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
6775
-
6776
- return rval;
7508
+ /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
7509
+ ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7510
+ "mmio enabled returning.\n");
7511
+ return PCI_ERS_RESULT_NEED_RESET;
67777512 }
67787513
67797514 static pci_ers_result_t
....@@ -6782,12 +7517,13 @@
67827517 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
67837518 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
67847519 struct qla_hw_data *ha = base_vha->hw;
6785
- struct rsp_que *rsp;
6786
- int rc, retries = 10;
7520
+ int rc;
7521
+ struct qla_qpair *qpair = NULL;
67877522
6788
- ql_dbg(ql_dbg_aer, base_vha, 0x9004,
6789
- "Slot Reset.\n");
7523
+ ql_log(ql_log_warn, base_vha, 0x9004,
7524
+ "Slot Reset.\n");
67907525
7526
+ ha->pci_error_state = QLA_PCI_SLOT_RESET;
67917527 /* Workaround: qla2xxx driver which access hardware earlier
67927528 * needs error state to be pci_channel_io_online.
67937529 * Otherwise mailbox command timesout.
....@@ -6812,33 +7548,33 @@
68127548 goto exit_slot_reset;
68137549 }
68147550
6815
- rsp = ha->rsp_q_map[0];
6816
- if (qla2x00_request_irqs(ha, rsp))
6817
- goto exit_slot_reset;
68187551
68197552 if (ha->isp_ops->pci_config(base_vha))
68207553 goto exit_slot_reset;
68217554
6822
- if (IS_QLA82XX(ha)) {
6823
- if (qla82xx_error_recovery(base_vha) == QLA_SUCCESS) {
6824
- ret = PCI_ERS_RESULT_RECOVERED;
6825
- goto exit_slot_reset;
6826
- } else
6827
- goto exit_slot_reset;
6828
- }
7555
+ mutex_lock(&ha->mq_lock);
7556
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7557
+ qpair->online = 1;
7558
+ mutex_unlock(&ha->mq_lock);
68297559
6830
- while (ha->flags.mbox_busy && retries--)
6831
- msleep(1000);
6832
-
7560
+ ha->flags.eeh_busy = 0;
7561
+ base_vha->flags.online = 1;
68337562 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
6834
- if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS)
6835
- ret = PCI_ERS_RESULT_RECOVERED;
7563
+ ha->isp_ops->abort_isp(base_vha);
68367564 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
68377565
7566
+ if (qla2x00_isp_reg_stat(ha)) {
7567
+ ha->flags.eeh_busy = 1;
7568
+ qla_pci_error_cleanup(base_vha);
7569
+ ql_log(ql_log_warn, base_vha, 0x9005,
7570
+ "Device unable to recover from PCI error.\n");
7571
+ } else {
7572
+ ret = PCI_ERS_RESULT_RECOVERED;
7573
+ }
68387574
68397575 exit_slot_reset:
68407576 ql_dbg(ql_dbg_aer, base_vha, 0x900e,
6841
- "slot_reset return %x.\n", ret);
7577
+ "Slot Reset returning %x.\n", ret);
68427578
68437579 return ret;
68447580 }
....@@ -6850,37 +7586,160 @@
68507586 struct qla_hw_data *ha = base_vha->hw;
68517587 int ret;
68527588
6853
- ql_dbg(ql_dbg_aer, base_vha, 0x900f,
6854
- "pci_resume.\n");
7589
+ ql_log(ql_log_warn, base_vha, 0x900f,
7590
+ "Pci Resume.\n");
7591
+
68557592
68567593 ret = qla2x00_wait_for_hba_online(base_vha);
68577594 if (ret != QLA_SUCCESS) {
68587595 ql_log(ql_log_fatal, base_vha, 0x9002,
68597596 "The device failed to resume I/O from slot/link_reset.\n");
68607597 }
7598
+ ha->pci_error_state = QLA_PCI_RESUME;
7599
+ ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7600
+ "Pci Resume returning.\n");
7601
+}
68617602
6862
- pci_cleanup_aer_uncorrect_error_status(pdev);
7603
+void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
7604
+{
7605
+ struct qla_hw_data *ha = vha->hw;
7606
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7607
+ bool do_cleanup = false;
7608
+ unsigned long flags;
68637609
7610
+ if (ha->flags.eeh_busy)
7611
+ return;
7612
+
7613
+ spin_lock_irqsave(&base_vha->work_lock, flags);
7614
+ if (!ha->flags.eeh_busy) {
7615
+ ha->flags.eeh_busy = 1;
7616
+ do_cleanup = true;
7617
+ }
7618
+ spin_unlock_irqrestore(&base_vha->work_lock, flags);
7619
+
7620
+ if (do_cleanup)
7621
+ qla_pci_error_cleanup(base_vha);
7622
+}
7623
+
7624
+/*
7625
+ * this routine will schedule a task to pause IO from interrupt context
7626
+ * if caller sees a PCIE error event (register read = 0xf's)
7627
+ */
7628
+void qla_schedule_eeh_work(struct scsi_qla_host *vha)
7629
+{
7630
+ struct qla_hw_data *ha = vha->hw;
7631
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7632
+
7633
+ if (ha->flags.eeh_busy)
7634
+ return;
7635
+
7636
+ set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
7637
+ qla2xxx_wake_dpc(base_vha);
7638
+}
7639
+
7640
+static void
7641
+qla_pci_reset_prepare(struct pci_dev *pdev)
7642
+{
7643
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7644
+ struct qla_hw_data *ha = base_vha->hw;
7645
+ struct qla_qpair *qpair;
7646
+
7647
+ ql_log(ql_log_warn, base_vha, 0xffff,
7648
+ "%s.\n", __func__);
7649
+
7650
+ /*
7651
+ * PCI FLR/function reset is about to reset the
7652
+ * slot. Stop the chip to stop all DMA access.
7653
+ * It is assumed that pci_reset_done will be called
7654
+ * after FLR to resume Chip operation.
7655
+ */
7656
+ ha->flags.eeh_busy = 1;
7657
+ mutex_lock(&ha->mq_lock);
7658
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7659
+ qpair->online = 0;
7660
+ mutex_unlock(&ha->mq_lock);
7661
+
7662
+ set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7663
+ qla2x00_abort_isp_cleanup(base_vha);
7664
+ qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
7665
+}
7666
+
7667
+static void
7668
+qla_pci_reset_done(struct pci_dev *pdev)
7669
+{
7670
+ scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7671
+ struct qla_hw_data *ha = base_vha->hw;
7672
+ struct qla_qpair *qpair;
7673
+
7674
+ ql_log(ql_log_warn, base_vha, 0xffff,
7675
+ "%s.\n", __func__);
7676
+
7677
+ /*
7678
+ * FLR just completed by PCI layer. Resume adapter
7679
+ */
68647680 ha->flags.eeh_busy = 0;
7681
+ mutex_lock(&ha->mq_lock);
7682
+ list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7683
+ qpair->online = 1;
7684
+ mutex_unlock(&ha->mq_lock);
7685
+
7686
+ base_vha->flags.online = 1;
7687
+ ha->isp_ops->abort_isp(base_vha);
7688
+ clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
68657689 }
68667690
68677691 static int qla2xxx_map_queues(struct Scsi_Host *shost)
68687692 {
68697693 int rc;
68707694 scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
7695
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
68717696
6872
- if (USER_CTRL_IRQ(vha->hw))
6873
- rc = blk_mq_map_queues(&shost->tag_set);
7697
+ if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
7698
+ rc = blk_mq_map_queues(qmap);
68747699 else
6875
- rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
7700
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
68767701 return rc;
68777702 }
7703
+
7704
+struct scsi_host_template qla2xxx_driver_template = {
7705
+ .module = THIS_MODULE,
7706
+ .name = QLA2XXX_DRIVER_NAME,
7707
+ .queuecommand = qla2xxx_queuecommand,
7708
+
7709
+ .eh_timed_out = fc_eh_timed_out,
7710
+ .eh_abort_handler = qla2xxx_eh_abort,
7711
+ .eh_device_reset_handler = qla2xxx_eh_device_reset,
7712
+ .eh_target_reset_handler = qla2xxx_eh_target_reset,
7713
+ .eh_bus_reset_handler = qla2xxx_eh_bus_reset,
7714
+ .eh_host_reset_handler = qla2xxx_eh_host_reset,
7715
+
7716
+ .slave_configure = qla2xxx_slave_configure,
7717
+
7718
+ .slave_alloc = qla2xxx_slave_alloc,
7719
+ .slave_destroy = qla2xxx_slave_destroy,
7720
+ .scan_finished = qla2xxx_scan_finished,
7721
+ .scan_start = qla2xxx_scan_start,
7722
+ .change_queue_depth = scsi_change_queue_depth,
7723
+ .map_queues = qla2xxx_map_queues,
7724
+ .this_id = -1,
7725
+ .cmd_per_lun = 3,
7726
+ .sg_tablesize = SG_ALL,
7727
+
7728
+ .max_sectors = 0xFFFF,
7729
+ .shost_attrs = qla2x00_host_attrs,
7730
+
7731
+ .supported_mode = MODE_INITIATOR,
7732
+ .track_queue_depth = 1,
7733
+ .cmd_size = sizeof(srb_t),
7734
+};
68787735
68797736 static const struct pci_error_handlers qla2xxx_err_handler = {
68807737 .error_detected = qla2xxx_pci_error_detected,
68817738 .mmio_enabled = qla2xxx_pci_mmio_enabled,
68827739 .slot_reset = qla2xxx_pci_slot_reset,
68837740 .resume = qla2xxx_pci_resume,
7741
+ .reset_prepare = qla_pci_reset_prepare,
7742
+ .reset_done = qla_pci_reset_done,
68847743 };
68857744
68867745 static struct pci_device_id qla2xxx_pci_tbl[] = {
....@@ -6906,6 +7765,11 @@
69067765 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
69077766 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
69087767 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
7768
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
7769
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
7770
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
7771
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
7772
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
69097773 { 0 },
69107774 };
69117775 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
....@@ -6934,6 +7798,91 @@
69347798 qla2x00_module_init(void)
69357799 {
69367800 int ret = 0;
7801
+
7802
+ BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
7803
+ BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
7804
+ BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
7805
+ BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
7806
+ BUILD_BUG_ON(sizeof(init_cb_t) != 96);
7807
+ BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
7808
+ BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
7809
+ BUILD_BUG_ON(sizeof(request_t) != 64);
7810
+ BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
7811
+ BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
7812
+ BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
7813
+ BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
7814
+ BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
7815
+ BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
7816
+ BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
7817
+ BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
7818
+ BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
7819
+ BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
7820
+ BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
7821
+ BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
7822
+ BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344);
7823
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
7824
+ BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
7825
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
7826
+ BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
7827
+ BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
7828
+ BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
7829
+ BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
7830
+ BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
7831
+ BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
7832
+ BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
7833
+ BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
7834
+ BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
7835
+ BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
7836
+ BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
7837
+ BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
7838
+ BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
7839
+ BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
7840
+ BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
7841
+ BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
7842
+ BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
7843
+ BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
7844
+ BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
7845
+ BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
7846
+ BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
7847
+ BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
7848
+ BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
7849
+ BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
7850
+ BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
7851
+ BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
7852
+ BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
7853
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
7854
+ BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
7855
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
7856
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
7857
+ BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
7858
+ BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
7859
+ BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
7860
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
7861
+ BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
7862
+ BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
7863
+ BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
7864
+ BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
7865
+ BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
7866
+ BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
7867
+ BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
7868
+ BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
7869
+ BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
7870
+ BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
7871
+ BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
7872
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
7873
+ BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
7874
+ BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
7875
+ BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
7876
+ BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
7877
+ BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
7878
+ BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
7879
+ BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
7880
+ BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
7881
+ BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
7882
+ BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
7883
+ BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
7884
+ BUILD_BUG_ON(sizeof(sw_info_t) != 32);
7885
+ BUILD_BUG_ON(sizeof(target_id_t) != 2);
69377886
69387887 /* Allocate cache for SRBs. */
69397888 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
....@@ -6964,6 +7913,9 @@
69647913 strcat(qla2x00_version_str, "-debug");
69657914 if (ql2xextended_error_logging == 1)
69667915 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
7916
+
7917
+ if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
7918
+ qla_insert_tgt_attrs();
69677919
69687920 qla2xxx_transport_template =
69697921 fc_attach_transport(&qla2xxx_transport_functions);
....@@ -7022,15 +7974,15 @@
70227974 static void __exit
70237975 qla2x00_module_exit(void)
70247976 {
7025
- unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
70267977 pci_unregister_driver(&qla2xxx_pci_driver);
70277978 qla2x00_release_firmware();
7028
- kmem_cache_destroy(srb_cachep);
7029
- qlt_exit();
7030
- if (ctx_cachep)
7031
- kmem_cache_destroy(ctx_cachep);
7032
- fc_release_transport(qla2xxx_transport_template);
7979
+ kmem_cache_destroy(ctx_cachep);
70337980 fc_release_transport(qla2xxx_transport_vport_template);
7981
+ if (apidev_major >= 0)
7982
+ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
7983
+ fc_release_transport(qla2xxx_transport_template);
7984
+ qlt_exit();
7985
+ kmem_cache_destroy(srb_cachep);
70347986 }
70357987
70367988 module_init(qla2x00_module_init);
....@@ -7039,7 +7991,6 @@
70397991 MODULE_AUTHOR("QLogic Corporation");
70407992 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
70417993 MODULE_LICENSE("GPL");
7042
-MODULE_VERSION(QLA2XXX_VERSION);
70437994 MODULE_FIRMWARE(FW_FILE_ISP21XX);
70447995 MODULE_FIRMWARE(FW_FILE_ISP22XX);
70457996 MODULE_FIRMWARE(FW_FILE_ISP2300);