hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/scsi/qedf/qedf_main.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * QLogic FCoE Offload Driver
34 * Copyright (c) 2016-2018 Cavium Inc.
4
- *
5
- * This software is available under the terms of the GNU General Public License
6
- * (GPL) Version 2, available from the file COPYING in the main directory of
7
- * this source tree.
85 */
96 #include <linux/init.h>
107 #include <linux/kernel.h>
....@@ -16,6 +13,7 @@
1613 #include <linux/interrupt.h>
1714 #include <linux/list.h>
1815 #include <linux/kthread.h>
16
+#include <linux/phylink.h>
1917 #include <scsi/libfc.h>
2018 #include <scsi/scsi_host.h>
2119 #include <scsi/fc_frame.h>
....@@ -30,6 +28,10 @@
3028
3129 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
3230 static void qedf_remove(struct pci_dev *pdev);
31
+static void qedf_shutdown(struct pci_dev *pdev);
32
+static void qedf_schedule_recovery_handler(void *dev);
33
+static void qedf_recovery_handler(struct work_struct *work);
34
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
3335
3436 /*
3537 * Driver module parameters.
....@@ -40,7 +42,7 @@
4042 "remote ports (default 60)");
4143
4244 uint qedf_debug = QEDF_LOG_INFO;
43
-module_param_named(debug, qedf_debug, uint, S_IRUGO);
45
+module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
4446 MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
4547 " mask");
4648
....@@ -104,6 +106,12 @@
104106 MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
105107 "during probe (0-3: 0 more verbose).");
106108
109
+static bool qedf_enable_recovery = true;
110
+module_param_named(enable_recovery, qedf_enable_recovery,
111
+ bool, S_IRUGO | S_IWUSR);
112
+MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
113
+ "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
114
+
107115 struct workqueue_struct *qedf_io_wq;
108116
109117 static struct fcoe_percpu_s qedf_global;
....@@ -113,35 +121,45 @@
113121
114122 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
115123 {
116
- qedf->vlan_id = vlan_id;
117
- qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
118
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
119
- "prio=%d.\n", vlan_id, qedf->prio);
124
+ int vlan_id_tmp = 0;
125
+
126
+ vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
127
+ qedf->vlan_id = vlan_id_tmp;
128
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
129
+ "Setting vlan_id=0x%04x prio=%d.\n",
130
+ vlan_id_tmp, qedf->prio);
120131 }
121132
122133 /* Returns true if we have a valid vlan, false otherwise */
123134 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
124135 {
125
- int rc;
126
-
127
- if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
128
- QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
129
- return false;
130
- }
131136
132137 while (qedf->fipvlan_retries--) {
133
- if (qedf->vlan_id > 0)
138
+ /* This is to catch if link goes down during fipvlan retries */
139
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
140
+ QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
141
+ return false;
142
+ }
143
+
144
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
145
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
146
+ return false;
147
+ }
148
+
149
+ if (qedf->vlan_id > 0) {
150
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
151
+ "vlan = 0x%x already set, calling ctlr_link_up.\n",
152
+ qedf->vlan_id);
153
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
154
+ fcoe_ctlr_link_up(&qedf->ctlr);
134155 return true;
156
+ }
157
+
135158 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
136159 "Retry %d.\n", qedf->fipvlan_retries);
137160 init_completion(&qedf->fipvlan_compl);
138161 qedf_fcoe_send_vlan_req(qedf);
139
- rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
140
- 1 * HZ);
141
- if (rc > 0) {
142
- fcoe_ctlr_link_up(&qedf->ctlr);
143
- return true;
144
- }
162
+ wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
145163 }
146164
147165 return false;
....@@ -153,12 +171,21 @@
153171 container_of(work, struct qedf_ctx, link_update.work);
154172 int rc;
155173
156
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
174
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
175
+ atomic_read(&qedf->link_state));
157176
158177 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
159178 rc = qedf_initiate_fipvlan_req(qedf);
160179 if (rc)
161180 return;
181
+
182
+ if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
183
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
184
+ "Link is down, resetting vlan_id.\n");
185
+ qedf->vlan_id = 0;
186
+ return;
187
+ }
188
+
162189 /*
163190 * If we get here then we never received a repsonse to our
164191 * fip vlan request so set the vlan_id to the default and
....@@ -185,7 +212,9 @@
185212 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
186213 "Calling fcoe_ctlr_link_down().\n");
187214 fcoe_ctlr_link_down(&qedf->ctlr);
188
- qedf_wait_for_upload(qedf);
215
+ if (qedf_wait_for_upload(qedf) == false)
216
+ QEDF_ERR(&qedf->dbg_ctx,
217
+ "Could not upload all sessions.\n");
189218 /* Reset the number of FIP VLAN retries */
190219 qedf->fipvlan_retries = qedf_fipvlan_retries;
191220 }
....@@ -263,6 +292,7 @@
263292 else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
264293 /* Set the source MAC we will use for FCoE traffic */
265294 qedf_set_data_src_addr(qedf, fp);
295
+ qedf->flogi_pending = 0;
266296 }
267297
268298 /* Complete flogi_compl so we can proceed to sending ADISCs */
....@@ -288,6 +318,11 @@
288318 */
289319 if (resp == fc_lport_flogi_resp) {
290320 qedf->flogi_cnt++;
321
+ if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
322
+ schedule_delayed_work(&qedf->stag_work, 2);
323
+ return NULL;
324
+ }
325
+ qedf->flogi_pending++;
291326 return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
292327 arg, timeout);
293328 }
....@@ -302,8 +337,10 @@
302337
303338 lport = qedf->lport;
304339
305
- if (!lport->tt.elsct_send)
340
+ if (!lport->tt.elsct_send) {
341
+ QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
306342 return -EINVAL;
343
+ }
307344
308345 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
309346 if (!fp) {
....@@ -321,11 +358,6 @@
321358 return 0;
322359 }
323360
324
-struct qedf_tmp_rdata_item {
325
- struct fc_rport_priv *rdata;
326
- struct list_head list;
327
-};
328
-
329361 /*
330362 * This function is called if link_down_tmo is in use. If we get a link up and
331363 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
....@@ -335,9 +367,8 @@
335367 {
336368 struct qedf_ctx *qedf =
337369 container_of(work, struct qedf_ctx, link_recovery.work);
338
- struct qedf_rport *fcport;
370
+ struct fc_lport *lport = qedf->lport;
339371 struct fc_rport_priv *rdata;
340
- struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
341372 bool rc;
342373 int retries = 30;
343374 int rval, i;
....@@ -404,38 +435,20 @@
404435 * Call lport->tt.rport_login which will cause libfc to send an
405436 * ADISC since the rport is in state ready.
406437 */
407
- rcu_read_lock();
408
- list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
409
- rdata = fcport->rdata;
410
- if (rdata == NULL)
411
- continue;
412
- rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
413
- GFP_ATOMIC);
414
- if (!rdata_item)
415
- continue;
438
+ mutex_lock(&lport->disc.disc_mutex);
439
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
416440 if (kref_get_unless_zero(&rdata->kref)) {
417
- rdata_item->rdata = rdata;
418
- list_add(&rdata_item->list, &rdata_login_list);
419
- } else
420
- kfree(rdata_item);
441
+ fc_rport_login(rdata);
442
+ kref_put(&rdata->kref, fc_rport_destroy);
443
+ }
421444 }
422
- rcu_read_unlock();
423
- /*
424
- * Do the fc_rport_login outside of the rcu lock so we don't take a
425
- * mutex in an atomic context.
426
- */
427
- list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
428
- list) {
429
- list_del(&rdata_item->list);
430
- fc_rport_login(rdata_item->rdata);
431
- kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
432
- kfree(rdata_item);
433
- }
445
+ mutex_unlock(&lport->disc.disc_mutex);
434446 }
435447
436448 static void qedf_update_link_speed(struct qedf_ctx *qedf,
437449 struct qed_link_output *link)
438450 {
451
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
439452 struct fc_lport *lport = qedf->lport;
440453
441454 lport->link_speed = FC_PORTSPEED_UNKNOWN;
....@@ -458,6 +471,9 @@
458471 case 100000:
459472 lport->link_speed = FC_PORTSPEED_100GBIT;
460473 break;
474
+ case 20000:
475
+ lport->link_speed = FC_PORTSPEED_20GBIT;
476
+ break;
461477 default:
462478 lport->link_speed = FC_PORTSPEED_UNKNOWN;
463479 break;
....@@ -467,22 +483,104 @@
467483 * Set supported link speed by querying the supported
468484 * capabilities of the link.
469485 */
470
- if (link->supported_caps & SUPPORTED_10000baseKR_Full)
486
+
487
+ phylink_zero(sup_caps);
488
+ phylink_set(sup_caps, 10000baseT_Full);
489
+ phylink_set(sup_caps, 10000baseKX4_Full);
490
+ phylink_set(sup_caps, 10000baseR_FEC);
491
+ phylink_set(sup_caps, 10000baseCR_Full);
492
+ phylink_set(sup_caps, 10000baseSR_Full);
493
+ phylink_set(sup_caps, 10000baseLR_Full);
494
+ phylink_set(sup_caps, 10000baseLRM_Full);
495
+ phylink_set(sup_caps, 10000baseKR_Full);
496
+
497
+ if (linkmode_intersects(link->supported_caps, sup_caps))
471498 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
472
- if (link->supported_caps & SUPPORTED_25000baseKR_Full)
499
+
500
+ phylink_zero(sup_caps);
501
+ phylink_set(sup_caps, 25000baseKR_Full);
502
+ phylink_set(sup_caps, 25000baseCR_Full);
503
+ phylink_set(sup_caps, 25000baseSR_Full);
504
+
505
+ if (linkmode_intersects(link->supported_caps, sup_caps))
473506 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
474
- if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
507
+
508
+ phylink_zero(sup_caps);
509
+ phylink_set(sup_caps, 40000baseLR4_Full);
510
+ phylink_set(sup_caps, 40000baseKR4_Full);
511
+ phylink_set(sup_caps, 40000baseCR4_Full);
512
+ phylink_set(sup_caps, 40000baseSR4_Full);
513
+
514
+ if (linkmode_intersects(link->supported_caps, sup_caps))
475515 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
476
- if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
516
+
517
+ phylink_zero(sup_caps);
518
+ phylink_set(sup_caps, 50000baseKR2_Full);
519
+ phylink_set(sup_caps, 50000baseCR2_Full);
520
+ phylink_set(sup_caps, 50000baseSR2_Full);
521
+
522
+ if (linkmode_intersects(link->supported_caps, sup_caps))
477523 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
478
- if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
524
+
525
+ phylink_zero(sup_caps);
526
+ phylink_set(sup_caps, 100000baseKR4_Full);
527
+ phylink_set(sup_caps, 100000baseSR4_Full);
528
+ phylink_set(sup_caps, 100000baseCR4_Full);
529
+ phylink_set(sup_caps, 100000baseLR4_ER4_Full);
530
+
531
+ if (linkmode_intersects(link->supported_caps, sup_caps))
479532 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
480
- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
533
+
534
+ phylink_zero(sup_caps);
535
+ phylink_set(sup_caps, 20000baseKR2_Full);
536
+
537
+ if (linkmode_intersects(link->supported_caps, sup_caps))
538
+ lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
539
+
540
+ if (lport->host && lport->host->shost_data)
541
+ fc_host_supported_speeds(lport->host) =
542
+ lport->link_supported_speeds;
543
+}
544
+
545
+static void qedf_bw_update(void *dev)
546
+{
547
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
548
+ struct qed_link_output link;
549
+
550
+ /* Get the latest status of the link */
551
+ qed_ops->common->get_link(qedf->cdev, &link);
552
+
553
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
554
+ QEDF_ERR(&qedf->dbg_ctx,
555
+ "Ignore link update, driver getting unload.\n");
556
+ return;
557
+ }
558
+
559
+ if (link.link_up) {
560
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
561
+ qedf_update_link_speed(qedf, &link);
562
+ else
563
+ QEDF_ERR(&qedf->dbg_ctx,
564
+ "Ignore bw update, link is down.\n");
565
+
566
+ } else {
567
+ QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
568
+ }
481569 }
482570
483571 static void qedf_link_update(void *dev, struct qed_link_output *link)
484572 {
485573 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
574
+
575
+ /*
576
+ * Prevent race where we're removing the module and we get link update
577
+ * for qed.
578
+ */
579
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
580
+ QEDF_ERR(&qedf->dbg_ctx,
581
+ "Ignore link update, driver getting unload.\n");
582
+ return;
583
+ }
486584
487585 if (link->link_up) {
488586 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
....@@ -563,7 +661,7 @@
563661 tmp_prio = get->operational.app_prio.fcoe;
564662 if (qedf_default_prio > -1)
565663 qedf->prio = qedf_default_prio;
566
- else if (tmp_prio < 0 || tmp_prio > 7) {
664
+ else if (tmp_prio > 7) {
567665 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
568666 "FIP/FCoE prio %d out of range, setting to %d.\n",
569667 tmp_prio, QEDF_DEFAULT_PRIO);
....@@ -596,9 +694,12 @@
596694 static struct qed_fcoe_cb_ops qedf_cb_ops = {
597695 {
598696 .link_update = qedf_link_update,
697
+ .bw_update = qedf_bw_update,
698
+ .schedule_recovery_handler = qedf_schedule_recovery_handler,
599699 .dcbx_aen = qedf_dcbx_handler,
600700 .get_generic_tlv_data = qedf_get_generic_tlv_data,
601701 .get_protocol_tlv_data = qedf_get_protocol_tlv_data,
702
+ .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
602703 }
603704 };
604705
....@@ -615,50 +716,113 @@
615716 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
616717 {
617718 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
618
- struct fc_rport_libfc_priv *rp = rport->dd_data;
619
- struct qedf_rport *fcport;
620719 struct fc_lport *lport;
621720 struct qedf_ctx *qedf;
622721 struct qedf_ioreq *io_req;
722
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
723
+ struct fc_rport_priv *rdata;
724
+ struct qedf_rport *fcport = NULL;
623725 int rc = FAILED;
726
+ int wait_count = 100;
727
+ int refcount = 0;
624728 int rval;
625
-
626
- if (fc_remote_port_chkready(rport)) {
627
- QEDF_ERR(NULL, "rport not ready\n");
628
- goto out;
629
- }
729
+ int got_ref = 0;
630730
631731 lport = shost_priv(sc_cmd->device->host);
632732 qedf = (struct qedf_ctx *)lport_priv(lport);
633733
634
- if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
635
- QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
636
- goto out;
637
- }
638
-
734
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
639735 fcport = (struct qedf_rport *)&rp[1];
640
-
641
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
642
- if (!io_req) {
643
- QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
736
+ rdata = fcport->rdata;
737
+ if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
738
+ QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
644739 rc = SUCCESS;
645740 goto out;
646741 }
647742
648
- QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
649
- "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
743
+
744
+ io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
745
+ if (!io_req) {
746
+ QEDF_ERR(&qedf->dbg_ctx,
747
+ "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
748
+ sc_cmd, sc_cmd->cmnd[0],
749
+ rdata->ids.port_id);
750
+ rc = SUCCESS;
751
+ goto drop_rdata_kref;
752
+ }
753
+
754
+ rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
755
+ if (rval)
756
+ got_ref = 1;
757
+
758
+ /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
759
+ if (!rval || io_req->sc_cmd != sc_cmd) {
760
+ QEDF_ERR(&qedf->dbg_ctx,
761
+ "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
762
+ io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
763
+
764
+ goto drop_rdata_kref;
765
+ }
766
+
767
+ if (fc_remote_port_chkready(rport)) {
768
+ refcount = kref_read(&io_req->refcount);
769
+ QEDF_ERR(&qedf->dbg_ctx,
770
+ "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
771
+ io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
772
+ refcount, rdata->ids.port_id);
773
+
774
+ goto drop_rdata_kref;
775
+ }
776
+
777
+ rc = fc_block_scsi_eh(sc_cmd);
778
+ if (rc)
779
+ goto drop_rdata_kref;
780
+
781
+ if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
782
+ QEDF_ERR(&qedf->dbg_ctx,
783
+ "Connection uploading, xid=0x%x., port_id=%06x\n",
784
+ io_req->xid, rdata->ids.port_id);
785
+ while (io_req->sc_cmd && (wait_count != 0)) {
786
+ msleep(100);
787
+ wait_count--;
788
+ }
789
+ if (wait_count) {
790
+ QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
791
+ rc = SUCCESS;
792
+ } else {
793
+ QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
794
+ rc = FAILED;
795
+ }
796
+ goto drop_rdata_kref;
797
+ }
798
+
799
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
800
+ QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
801
+ goto drop_rdata_kref;
802
+ }
803
+
804
+ QEDF_ERR(&qedf->dbg_ctx,
805
+ "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
806
+ io_req, sc_cmd, io_req->xid, io_req->fp_idx,
807
+ rdata->ids.port_id);
650808
651809 if (qedf->stop_io_on_error) {
652810 qedf_stop_all_io(qedf);
653811 rc = SUCCESS;
654
- goto out;
812
+ goto drop_rdata_kref;
655813 }
656814
657815 init_completion(&io_req->abts_done);
658816 rval = qedf_initiate_abts(io_req, true);
659817 if (rval) {
660818 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
661
- goto out;
819
+ /*
820
+ * If we fail to queue the ABTS then return this command to
821
+ * the SCSI layer as it will own and free the xid
822
+ */
823
+ rc = SUCCESS;
824
+ qedf_scsi_done(qedf, io_req, DID_ERROR);
825
+ goto drop_rdata_kref;
662826 }
663827
664828 wait_for_completion(&io_req->abts_done);
....@@ -684,38 +848,68 @@
684848 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
685849 io_req->xid);
686850
851
+drop_rdata_kref:
852
+ kref_put(&rdata->kref, fc_rport_destroy);
687853 out:
854
+ if (got_ref)
855
+ kref_put(&io_req->refcount, qedf_release_cmd);
688856 return rc;
689857 }
690858
691859 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
692860 {
693
- QEDF_ERR(NULL, "TARGET RESET Issued...");
861
+ QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
862
+ sc_cmd->device->host->host_no, sc_cmd->device->id,
863
+ sc_cmd->device->lun);
694864 return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
695865 }
696866
697867 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
698868 {
699
- QEDF_ERR(NULL, "LUN RESET Issued...\n");
869
+ QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
870
+ sc_cmd->device->host->host_no, sc_cmd->device->id,
871
+ sc_cmd->device->lun);
700872 return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
701873 }
702874
703
-void qedf_wait_for_upload(struct qedf_ctx *qedf)
875
+bool qedf_wait_for_upload(struct qedf_ctx *qedf)
704876 {
705
- while (1) {
877
+ struct qedf_rport *fcport = NULL;
878
+ int wait_cnt = 120;
879
+
880
+ while (wait_cnt--) {
706881 if (atomic_read(&qedf->num_offloads))
707
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
708
- "Waiting for all uploads to complete.\n");
882
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
883
+ "Waiting for all uploads to complete num_offloads = 0x%x.\n",
884
+ atomic_read(&qedf->num_offloads));
709885 else
710
- break;
886
+ return true;
711887 msleep(500);
712888 }
889
+
890
+ rcu_read_lock();
891
+ list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
892
+ if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
893
+ &fcport->flags)) {
894
+ if (fcport->rdata)
895
+ QEDF_ERR(&qedf->dbg_ctx,
896
+ "Waiting for fcport %p portid=%06x.\n",
897
+ fcport, fcport->rdata->ids.port_id);
898
+ } else {
899
+ QEDF_ERR(&qedf->dbg_ctx,
900
+ "Waiting for fcport %p.\n", fcport);
901
+ }
902
+ }
903
+ rcu_read_unlock();
904
+ return false;
905
+
713906 }
714907
715908 /* Performs soft reset of qedf_ctx by simulating a link down/up */
716
-static void qedf_ctx_soft_reset(struct fc_lport *lport)
909
+void qedf_ctx_soft_reset(struct fc_lport *lport)
717910 {
718911 struct qedf_ctx *qedf;
912
+ struct qed_link_output if_link;
719913
720914 if (lport->vport) {
721915 QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
....@@ -724,13 +918,35 @@
724918
725919 qedf = lport_priv(lport);
726920
921
+ qedf->flogi_pending = 0;
727922 /* For host reset, essentially do a soft link up/down */
728923 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
924
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
925
+ "Queuing link down work.\n");
729926 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
730927 0);
731
- qedf_wait_for_upload(qedf);
928
+
929
+ if (qedf_wait_for_upload(qedf) == false) {
930
+ QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
931
+ WARN_ON(atomic_read(&qedf->num_offloads));
932
+ }
933
+
934
+ /* Before setting link up query physical link state */
935
+ qed_ops->common->get_link(qedf->cdev, &if_link);
936
+ /* Bail if the physical link is not up */
937
+ if (!if_link.link_up) {
938
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
939
+ "Physical link is not up.\n");
940
+ return;
941
+ }
942
+ /* Flush and wait to make sure link down is processed */
943
+ flush_delayed_work(&qedf->link_update);
944
+ msleep(500);
945
+
732946 atomic_set(&qedf->link_state, QEDF_LINK_UP);
733947 qedf->vlan_id = 0;
948
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
949
+ "Queue link up work.\n");
734950 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
735951 0);
736952 }
....@@ -740,22 +956,6 @@
740956 {
741957 struct fc_lport *lport;
742958 struct qedf_ctx *qedf;
743
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
744
- struct fc_rport_libfc_priv *rp = rport->dd_data;
745
- struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
746
- int rval;
747
-
748
- rval = fc_remote_port_chkready(rport);
749
-
750
- if (rval) {
751
- QEDF_ERR(NULL, "device_reset rport not ready\n");
752
- return FAILED;
753
- }
754
-
755
- if (fcport == NULL) {
756
- QEDF_ERR(NULL, "device_reset: rport is NULL\n");
757
- return FAILED;
758
- }
759959
760960 lport = shost_priv(sc_cmd->device->host);
761961 qedf = lport_priv(lport);
....@@ -785,7 +985,6 @@
785985 .name = QEDF_MODULE_NAME,
786986 .this_id = -1,
787987 .cmd_per_lun = 32,
788
- .use_clustering = ENABLE_CLUSTERING,
789988 .max_sectors = 0xffff,
790989 .queuecommand = qedf_queuecommand,
791990 .shost_attrs = qedf_host_attrs,
....@@ -852,9 +1051,8 @@
8521051 return rc;
8531052 }
8541053
855
-/**
1054
+/*
8561055 * qedf_xmit - qedf FCoE frame transmit function
857
- *
8581056 */
8591057 static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
8601058 {
....@@ -908,8 +1106,10 @@
9081106 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
9091107 kfree_skb(skb);
9101108 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
911
- if (rdata)
1109
+ if (rdata) {
9121110 rdata->retries = lport->max_rport_retry_count;
1111
+ kref_put(&rdata->kref, fc_rport_destroy);
1112
+ }
9131113 return -EINVAL;
9141114 }
9151115 /* End NPIV filtering */
....@@ -969,7 +1169,7 @@
9691169 return -ENOMEM;
9701170 }
9711171 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
972
- cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
1172
+ cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
9731173 } else {
9741174 cp = skb_put(skb, tlen);
9751175 }
....@@ -1032,7 +1232,12 @@
10321232 if (qedf_dump_frames)
10331233 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
10341234 1, skb->data, skb->len, false);
1035
- qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1235
+ rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1236
+ if (rc) {
1237
+ QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1238
+ kfree_skb(skb);
1239
+ return rc;
1240
+ }
10361241
10371242 return 0;
10381243 }
....@@ -1051,16 +1256,17 @@
10511256 sizeof(void *);
10521257 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
10531258
1054
- fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
1055
- fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
1259
+ fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1260
+ &fcport->sq_dma, GFP_KERNEL);
10561261 if (!fcport->sq) {
10571262 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
10581263 rval = 1;
10591264 goto out;
10601265 }
10611266
1062
- fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
1063
- fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
1267
+ fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1268
+ fcport->sq_pbl_size,
1269
+ &fcport->sq_pbl_dma, GFP_KERNEL);
10641270 if (!fcport->sq_pbl) {
10651271 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
10661272 rval = 1;
....@@ -1137,7 +1343,7 @@
11371343 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
11381344
11391345 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
1140
- conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
1346
+ conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
11411347 conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
11421348 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
11431349
....@@ -1224,6 +1430,8 @@
12241430 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
12251431 struct qedf_rport *fcport)
12261432 {
1433
+ struct fc_rport_priv *rdata = fcport->rdata;
1434
+
12271435 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
12281436 fcport->rdata->ids.port_id);
12291437
....@@ -1235,9 +1443,10 @@
12351443 qedf_free_sq(qedf, fcport);
12361444 fcport->rdata = NULL;
12371445 fcport->qedf = NULL;
1446
+ kref_put(&rdata->kref, fc_rport_destroy);
12381447 }
12391448
1240
-/**
1449
+/*
12411450 * This event_callback is called after successful completion of libfc
12421451 * initiated target login. qedf can proceed with initiating the session
12431452 * establishment.
....@@ -1310,6 +1519,8 @@
13101519 break;
13111520 }
13121521
1522
+ /* Initial reference held on entry, so this can't fail */
1523
+ kref_get(&rdata->kref);
13131524 fcport->rdata = rdata;
13141525 fcport->rport = rport;
13151526
....@@ -1357,6 +1568,17 @@
13571568 if (port_id == FC_FID_DIR_SERV)
13581569 break;
13591570
1571
+ if (rdata->spp_type != FC_TYPE_FCP) {
1572
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1573
+ "No action since spp type isn't FCP\n");
1574
+ break;
1575
+ }
1576
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
1577
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1578
+ "Not FCP target so no action\n");
1579
+ break;
1580
+ }
1581
+
13601582 if (!rport) {
13611583 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
13621584 "port_id=%x - rport notcreated Yet!!\n", port_id);
....@@ -1369,11 +1591,15 @@
13691591 */
13701592 fcport = (struct qedf_rport *)&rp[1];
13711593
1594
+ spin_lock_irqsave(&fcport->rport_lock, flags);
13721595 /* Only free this fcport if it is offloaded already */
1373
- if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1374
- set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
1596
+ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
1597
+ !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1598
+ &fcport->flags)) {
1599
+ set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
1600
+ &fcport->flags);
1601
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
13751602 qedf_cleanup_fcport(qedf, fcport);
1376
-
13771603 /*
13781604 * Remove fcport to list of qedf_ctx list of offloaded
13791605 * ports
....@@ -1385,8 +1611,9 @@
13851611 clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
13861612 &fcport->flags);
13871613 atomic_dec(&qedf->num_offloads);
1614
+ } else {
1615
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
13881616 }
1389
-
13901617 break;
13911618
13921619 case RPORT_EV_NONE:
....@@ -1428,12 +1655,13 @@
14281655 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
14291656 {
14301657 struct fc_lport *lport = qedf->lport;
1431
- struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
14321658 u8 buf[8];
1433
- int i, pos;
1659
+ int pos;
1660
+ uint32_t i;
14341661
14351662 /*
1436
- * fdmi_enabled needs to be set for libfc to execute FDMI registration.
1663
+ * fdmi_enabled needs to be set for libfc
1664
+ * to execute FDMI registration
14371665 */
14381666 lport->fdmi_enabled = 1;
14391667
....@@ -1449,33 +1677,47 @@
14491677 for (i = 0; i < 8; i++)
14501678 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
14511679
1452
- snprintf(fc_host->serial_number,
1453
- sizeof(fc_host->serial_number),
1680
+ snprintf(fc_host_serial_number(lport->host),
1681
+ FC_SERIAL_NUMBER_SIZE,
14541682 "%02X%02X%02X%02X%02X%02X%02X%02X",
14551683 buf[7], buf[6], buf[5], buf[4],
14561684 buf[3], buf[2], buf[1], buf[0]);
14571685 } else
1458
- snprintf(fc_host->serial_number,
1459
- sizeof(fc_host->serial_number), "Unknown");
1686
+ snprintf(fc_host_serial_number(lport->host),
1687
+ FC_SERIAL_NUMBER_SIZE, "Unknown");
14601688
1461
- snprintf(fc_host->manufacturer,
1462
- sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
1689
+ snprintf(fc_host_manufacturer(lport->host),
1690
+ FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
14631691
1464
- snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
1692
+ if (qedf->pdev->device == QL45xxx) {
1693
+ snprintf(fc_host_model(lport->host),
1694
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
14651695
1466
- snprintf(fc_host->model_description, sizeof(fc_host->model_description),
1467
- "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
1468
- "(FCoE)");
1696
+ snprintf(fc_host_model_description(lport->host),
1697
+ FC_SYMBOLIC_NAME_SIZE, "%s",
1698
+ "Marvell FastLinQ QL45xxx FCoE Adapter");
1699
+ }
14691700
1470
- snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
1471
- "Rev %d", qedf->pdev->revision);
1701
+ if (qedf->pdev->device == QL41xxx) {
1702
+ snprintf(fc_host_model(lport->host),
1703
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
14721704
1473
- snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
1474
- "%s", QEDF_VERSION);
1705
+ snprintf(fc_host_model_description(lport->host),
1706
+ FC_SYMBOLIC_NAME_SIZE, "%s",
1707
+ "Marvell FastLinQ QL41xxx FCoE Adapter");
1708
+ }
14751709
1476
- snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
1477
- "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
1478
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
1710
+ snprintf(fc_host_hardware_version(lport->host),
1711
+ FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1712
+
1713
+ snprintf(fc_host_driver_version(lport->host),
1714
+ FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
1715
+
1716
+ snprintf(fc_host_firmware_version(lport->host),
1717
+ FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
1718
+ FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
1719
+ FW_ENGINEERING_VERSION);
1720
+
14791721 }
14801722
14811723 static int qedf_lport_setup(struct qedf_ctx *qedf)
....@@ -1498,11 +1740,15 @@
14981740 fc_set_wwnn(lport, qedf->wwnn);
14991741 fc_set_wwpn(lport, qedf->wwpn);
15001742
1501
- fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
1743
+ if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1744
+ QEDF_ERR(&qedf->dbg_ctx,
1745
+ "fcoe_libfc_config failed.\n");
1746
+ return -ENOMEM;
1747
+ }
15021748
15031749 /* Allocate the exchange manager */
1504
- fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
1505
- qedf->max_els_xid, NULL);
1750
+ fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
1751
+ 0xfffe, NULL);
15061752
15071753 if (fc_lport_init_stats(lport))
15081754 return -ENOMEM;
....@@ -1518,8 +1764,13 @@
15181764 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
15191765
15201766 /* Set symbolic node name */
1521
- snprintf(fc_host_symbolic_name(lport->host), 256,
1522
- "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
1767
+ if (qedf->pdev->device == QL45xxx)
1768
+ snprintf(fc_host_symbolic_name(lport->host), 256,
1769
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1770
+
1771
+ if (qedf->pdev->device == QL41xxx)
1772
+ snprintf(fc_host_symbolic_name(lport->host), 256,
1773
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
15231774
15241775 qedf_setup_fdmi(qedf);
15251776
....@@ -1577,22 +1828,20 @@
15771828 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
15781829 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
15791830 "WWPN (0x%s) already exists.\n", buf);
1580
- goto err1;
1831
+ return rc;
15811832 }
15821833
15831834 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
15841835 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
15851836 "because link is not up.\n");
1586
- rc = -EIO;
1587
- goto err1;
1837
+ return -EIO;
15881838 }
15891839
15901840 vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
15911841 if (!vn_port) {
15921842 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
15931843 "for vport.\n");
1594
- rc = -ENOMEM;
1595
- goto err1;
1844
+ return -ENOMEM;
15961845 }
15971846
15981847 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
....@@ -1611,12 +1860,13 @@
16111860 vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
16121861 init_completion(&vport_qedf->flogi_compl);
16131862 INIT_LIST_HEAD(&vport_qedf->fcports);
1863
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
16141864
16151865 rc = qedf_vport_libfc_config(vport, vn_port);
16161866 if (rc) {
16171867 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
16181868 "for lport stats.\n");
1619
- goto err2;
1869
+ goto err;
16201870 }
16211871
16221872 fc_set_wwnn(vn_port, vport->node_name);
....@@ -1625,15 +1875,16 @@
16251875 vport_qedf->wwpn = vn_port->wwpn;
16261876
16271877 vn_port->host->transportt = qedf_fc_vport_transport_template;
1628
- vn_port->host->can_queue = QEDF_MAX_ELS_XID;
1878
+ vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
16291879 vn_port->host->max_lun = qedf_max_lun;
16301880 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
16311881 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
16321882
16331883 rc = scsi_add_host(vn_port->host, &vport->dev);
16341884 if (rc) {
1635
- QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
1636
- goto err2;
1885
+ QEDF_WARN(&base_qedf->dbg_ctx,
1886
+ "Error adding Scsi_Host rc=0x%x.\n", rc);
1887
+ goto err;
16371888 }
16381889
16391890 /* Set default dev_loss_tmo based on module parameter */
....@@ -1667,6 +1918,27 @@
16671918 fc_vport_setlink(vn_port);
16681919 }
16691920
1921
+ /* Set symbolic node name */
1922
+ if (base_qedf->pdev->device == QL45xxx)
1923
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
1924
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
1925
+
1926
+ if (base_qedf->pdev->device == QL41xxx)
1927
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
1928
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
1929
+
1930
+ /* Set supported speed */
1931
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
1932
+
1933
+ /* Set speed */
1934
+ vn_port->link_speed = n_port->link_speed;
1935
+
1936
+ /* Set port type */
1937
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
1938
+
1939
+ /* Set maxframe size */
1940
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
1941
+
16701942 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
16711943 vn_port);
16721944
....@@ -1674,9 +1946,10 @@
16741946 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
16751947 vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
16761948
1677
-err2:
1949
+ return 0;
1950
+
1951
+err:
16781952 scsi_host_put(vn_port->host);
1679
-err1:
16801953 return rc;
16811954 }
16821955
....@@ -1717,8 +1990,7 @@
17171990 fc_lport_free_stats(vn_port);
17181991
17191992 /* Release Scsi_Host */
1720
- if (vn_port->host)
1721
- scsi_host_put(vn_port->host);
1993
+ scsi_host_put(vn_port->host);
17221994
17231995 out:
17241996 return 0;
....@@ -1771,6 +2043,13 @@
17712043
17722044 qedf_ctx_soft_reset(lport);
17732045 return 0;
2046
+}
2047
+
2048
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
2049
+{
2050
+ struct fc_lport *lport = shost_priv(shost);
2051
+
2052
+ fc_host_port_id(shost) = lport->port_id;
17742053 }
17752054
17762055 static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
....@@ -1843,6 +2122,7 @@
18432122 .show_host_active_fc4s = 1,
18442123 .show_host_maxframe_size = 1,
18452124
2125
+ .get_host_port_id = qedf_get_host_port_id,
18462126 .show_host_port_id = 1,
18472127 .show_host_supported_speeds = 1,
18482128 .get_host_speed = fc_get_host_speed,
....@@ -2086,16 +2366,21 @@
20862366 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
20872367 {
20882368 int i;
2369
+ u16 vector_idx = 0;
2370
+ u32 vector;
20892371
20902372 if (qedf->int_info.msix_cnt) {
20912373 for (i = 0; i < qedf->int_info.used_cnt; i++) {
2092
- synchronize_irq(qedf->int_info.msix[i].vector);
2093
- irq_set_affinity_hint(qedf->int_info.msix[i].vector,
2094
- NULL);
2095
- irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
2096
- NULL);
2097
- free_irq(qedf->int_info.msix[i].vector,
2098
- &qedf->fp_array[i]);
2374
+ vector_idx = i * qedf->dev_info.common.num_hwfns +
2375
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2376
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2377
+ "Freeing IRQ #%d vector_idx=%d.\n",
2378
+ i, vector_idx);
2379
+ vector = qedf->int_info.msix[vector_idx].vector;
2380
+ synchronize_irq(vector);
2381
+ irq_set_affinity_hint(vector, NULL);
2382
+ irq_set_affinity_notifier(vector, NULL);
2383
+ free_irq(vector, &qedf->fp_array[i]);
20992384 }
21002385 } else
21012386 qed_ops->common->simd_handler_clean(qedf->cdev,
....@@ -2108,11 +2393,19 @@
21082393 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
21092394 {
21102395 int i, rc, cpu;
2396
+ u16 vector_idx = 0;
2397
+ u32 vector;
21112398
21122399 cpu = cpumask_first(cpu_online_mask);
21132400 for (i = 0; i < qedf->num_queues; i++) {
2114
- rc = request_irq(qedf->int_info.msix[i].vector,
2115
- qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
2401
+ vector_idx = i * qedf->dev_info.common.num_hwfns +
2402
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2403
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2404
+ "Requesting IRQ #%d vector_idx=%d.\n",
2405
+ i, vector_idx);
2406
+ vector = qedf->int_info.msix[vector_idx].vector;
2407
+ rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2408
+ &qedf->fp_array[i]);
21162409
21172410 if (rc) {
21182411 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
....@@ -2121,8 +2414,7 @@
21212414 }
21222415
21232416 qedf->int_info.used_cnt++;
2124
- rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
2125
- get_cpu_mask(cpu));
2417
+ rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
21262418 cpu = cpumask_next(cpu, cpu_online_mask);
21272419 }
21282420
....@@ -2155,7 +2447,8 @@
21552447 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
21562448 qedf->int_info.used_cnt = 1;
21572449
2158
- QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
2450
+ QEDF_ERR(&qedf->dbg_ctx,
2451
+ "Cannot load driver due to a lack of MSI-X vectors.\n");
21592452 return -EINVAL;
21602453 }
21612454
....@@ -2198,12 +2491,14 @@
21982491 fr_dev(fp) = lport;
21992492 fr_sof(fp) = hp->fcoe_sof;
22002493 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
2494
+ QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
22012495 kfree_skb(skb);
22022496 return;
22032497 }
22042498 fr_eof(fp) = crc_eof.fcoe_eof;
22052499 fr_crc(fp) = crc_eof.fcoe_crc32;
22062500 if (pskb_trim(skb, fr_len)) {
2501
+ QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
22072502 kfree_skb(skb);
22082503 return;
22092504 }
....@@ -2264,9 +2559,9 @@
22642559 * empty then this is not addressed to our port so simply drop it.
22652560 */
22662561 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
2267
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2268
- "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
2269
- lport->port_id, ntoh24(fh->fh_d_id));
2562
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2563
+ "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
2564
+ lport->port_id, ntoh24(fh->fh_d_id));
22702565 kfree_skb(skb);
22712566 return;
22722567 }
....@@ -2275,6 +2570,8 @@
22752570 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
22762571 (f_ctl & FC_FC_EX_CTX)) {
22772572 /* Drop incoming ABTS response that has both SEQ/EX CTX set */
2573
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2574
+ "Dropping ABTS response as both SEQ/EX CTX set.\n");
22782575 kfree_skb(skb);
22792576 return;
22802577 }
....@@ -2356,6 +2653,13 @@
23562653 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
23572654 struct qedf_skb_work *skb_work;
23582655
2656
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2657
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2658
+ "Dropping frame as link state is down.\n");
2659
+ kfree_skb(skb);
2660
+ return 0;
2661
+ }
2662
+
23592663 skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
23602664 if (!skb_work) {
23612665 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
....@@ -2411,8 +2715,9 @@
24112715 sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
24122716
24132717 if (!sb_virt) {
2414
- QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
2415
- "for id = %d.\n", sb_id);
2718
+ QEDF_ERR(&qedf->dbg_ctx,
2719
+ "Status block allocation failed for id = %d.\n",
2720
+ sb_id);
24162721 return -ENOMEM;
24172722 }
24182723
....@@ -2420,8 +2725,9 @@
24202725 sb_id, QED_SB_TYPE_STORAGE);
24212726
24222727 if (ret) {
2423
- QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
2424
- "failed for id = %d.\n", sb_id);
2728
+ QEDF_ERR(&qedf->dbg_ctx,
2729
+ "Status block initialization failed (0x%x) for id = %d.\n",
2730
+ ret, sb_id);
24252731 return ret;
24262732 }
24272733
....@@ -2497,6 +2803,8 @@
24972803 struct qedf_ioreq *io_req;
24982804 struct qedf_rport *fcport;
24992805 u32 comp_type;
2806
+ u8 io_comp_type;
2807
+ unsigned long flags;
25002808
25012809 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
25022810 FCOE_CQE_CQE_TYPE_MASK;
....@@ -2505,13 +2813,18 @@
25052813 io_req = &qedf->cmd_mgr->cmds[xid];
25062814
25072815 /* Completion not for a valid I/O anymore so just return */
2508
- if (!io_req)
2816
+ if (!io_req) {
2817
+ QEDF_ERR(&qedf->dbg_ctx,
2818
+ "io_req is NULL for xid=0x%x.\n", xid);
25092819 return;
2820
+ }
25102821
25112822 fcport = io_req->fcport;
25122823
25132824 if (fcport == NULL) {
2514
- QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
2825
+ QEDF_ERR(&qedf->dbg_ctx,
2826
+ "fcport is NULL for xid=0x%x io_req=%p.\n",
2827
+ xid, io_req);
25152828 return;
25162829 }
25172830
....@@ -2520,15 +2833,19 @@
25202833 * isn't valid and shouldn't be taken. We should just return.
25212834 */
25222835 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2523
- QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
2836
+ QEDF_ERR(&qedf->dbg_ctx,
2837
+ "Session not offloaded yet, fcport = %p.\n", fcport);
25242838 return;
25252839 }
25262840
2841
+ spin_lock_irqsave(&fcport->rport_lock, flags);
2842
+ io_comp_type = io_req->cmd_type;
2843
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
25272844
25282845 switch (comp_type) {
25292846 case FCOE_GOOD_COMPLETION_CQE_TYPE:
25302847 atomic_inc(&fcport->free_sqes);
2531
- switch (io_req->cmd_type) {
2848
+ switch (io_comp_type) {
25322849 case QEDF_SCSI_CMD:
25332850 qedf_scsi_completion(qedf, cqe, io_req);
25342851 break;
....@@ -2681,8 +2998,10 @@
26812998 }
26822999
26833000 /* Allocate list of PBL pages */
2684
- qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
2685
- QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
3001
+ qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
3002
+ QEDF_PAGE_SIZE,
3003
+ &qedf->bdq_pbl_list_dma,
3004
+ GFP_KERNEL);
26863005 if (!qedf->bdq_pbl_list) {
26873006 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
26883007 return -ENOMEM;
....@@ -2709,7 +3028,7 @@
27093028 {
27103029 u32 *list;
27113030 int i;
2712
- int status = 0, rc;
3031
+ int status;
27133032 u32 *pbl;
27143033 dma_addr_t page;
27153034 int num_pages;
....@@ -2721,7 +3040,7 @@
27213040 */
27223041 if (!qedf->num_queues) {
27233042 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
2724
- return 1;
3043
+ return -ENOMEM;
27253044 }
27263045
27273046 /*
....@@ -2729,8 +3048,8 @@
27293048 * addresses of our queues
27303049 */
27313050 if (!qedf->p_cpuq) {
2732
- status = 1;
2733
- goto mem_alloc_failure;
3051
+ QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3052
+ return -EINVAL;
27343053 }
27353054
27363055 qedf->global_queues = kzalloc((sizeof(struct global_queue *)
....@@ -2744,9 +3063,11 @@
27443063 "qedf->global_queues=%p.\n", qedf->global_queues);
27453064
27463065 /* Allocate DMA coherent buffers for BDQ */
2747
- rc = qedf_alloc_bdq(qedf);
2748
- if (rc)
3066
+ status = qedf_alloc_bdq(qedf);
3067
+ if (status) {
3068
+ QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
27493069 goto mem_alloc_failure;
3070
+ }
27503071
27513072 /* Allocate a CQ and an associated PBL for each MSI-X vector */
27523073 for (i = 0; i < qedf->num_queues; i++) {
....@@ -2771,9 +3092,10 @@
27713092 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
27723093
27733094 qedf->global_queues[i]->cq =
2774
- dma_zalloc_coherent(&qedf->pdev->dev,
2775
- qedf->global_queues[i]->cq_mem_size,
2776
- &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
3095
+ dma_alloc_coherent(&qedf->pdev->dev,
3096
+ qedf->global_queues[i]->cq_mem_size,
3097
+ &qedf->global_queues[i]->cq_dma,
3098
+ GFP_KERNEL);
27773099
27783100 if (!qedf->global_queues[i]->cq) {
27793101 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
....@@ -2782,9 +3104,10 @@
27823104 }
27833105
27843106 qedf->global_queues[i]->cq_pbl =
2785
- dma_zalloc_coherent(&qedf->pdev->dev,
2786
- qedf->global_queues[i]->cq_pbl_size,
2787
- &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
3107
+ dma_alloc_coherent(&qedf->pdev->dev,
3108
+ qedf->global_queues[i]->cq_pbl_size,
3109
+ &qedf->global_queues[i]->cq_pbl_dma,
3110
+ GFP_KERNEL);
27883111
27893112 if (!qedf->global_queues[i]->cq_pbl) {
27903113 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
....@@ -2855,12 +3178,12 @@
28553178 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
28563179 qedf->num_queues);
28573180
2858
- qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
3181
+ qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
28593182 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
2860
- &qedf->hw_p_cpuq);
3183
+ &qedf->hw_p_cpuq, GFP_KERNEL);
28613184
28623185 if (!qedf->p_cpuq) {
2863
- QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
3186
+ QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
28643187 return 1;
28653188 }
28663189
....@@ -2929,14 +3252,13 @@
29293252
29303253 if (qedf->p_cpuq) {
29313254 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
2932
- pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
3255
+ dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
29333256 qedf->hw_p_cpuq);
29343257 }
29353258
29363259 qedf_free_global_queues(qedf);
29373260
2938
- if (qedf->global_queues)
2939
- kfree(qedf->global_queues);
3261
+ kfree(qedf->global_queues);
29403262 }
29413263
29423264 /*
....@@ -2955,6 +3277,8 @@
29553277 .id_table = qedf_pci_tbl,
29563278 .probe = qedf_probe,
29573279 .remove = qedf_remove,
3280
+ .shutdown = qedf_shutdown,
3281
+ .suspend = qedf_suspend,
29583282 };
29593283
29603284 static int __qedf_probe(struct pci_dev *pdev, int mode)
....@@ -2971,12 +3295,16 @@
29713295 void *task_start, *task_end;
29723296 struct qed_slowpath_params slowpath_params;
29733297 struct qed_probe_params qed_params;
2974
- u16 tmp;
3298
+ u16 retry_cnt = 10;
29753299
29763300 /*
29773301 * When doing error recovery we didn't reap the lport so don't try
29783302 * to reallocate it.
29793303 */
3304
+retry_probe:
3305
+ if (mode == QEDF_MODE_RECOVERY)
3306
+ msleep(2000);
3307
+
29803308 if (mode != QEDF_MODE_RECOVERY) {
29813309 lport = libfc_host_alloc(&qedf_host_template,
29823310 sizeof(struct qedf_ctx));
....@@ -2986,6 +3314,8 @@
29863314 rc = -ENOMEM;
29873315 goto err0;
29883316 }
3317
+
3318
+ fc_disc_init(lport);
29893319
29903320 /* Initialize qedf_ctx */
29913321 qedf = lport_priv(lport);
....@@ -3003,6 +3333,8 @@
30033333 pci_set_drvdata(pdev, qedf);
30043334 init_completion(&qedf->fipvlan_compl);
30053335 mutex_init(&qedf->stats_mutex);
3336
+ mutex_init(&qedf->flush_mutex);
3337
+ qedf->flogi_pending = 0;
30063338
30073339 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
30083340 "QLogic FastLinQ FCoE Module qedf %s, "
....@@ -3036,6 +3368,7 @@
30363368 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
30373369 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
30383370 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3371
+ INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
30393372 qedf->fipvlan_retries = qedf_fipvlan_retries;
30403373 /* Set a default prio in case DCBX doesn't converge */
30413374 if (qedf_default_prio > -1) {
....@@ -3058,6 +3391,13 @@
30583391 qed_params.is_vf = is_vf;
30593392 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
30603393 if (!qedf->cdev) {
3394
+ if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
3395
+ QEDF_ERR(&qedf->dbg_ctx,
3396
+ "Retry %d initialize hardware\n", retry_cnt);
3397
+ retry_cnt--;
3398
+ goto retry_probe;
3399
+ }
3400
+ QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
30613401 rc = -ENODEV;
30623402 goto err1;
30633403 }
....@@ -3068,6 +3408,11 @@
30683408 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
30693409 goto err1;
30703410 }
3411
+
3412
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3413
+ "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
3414
+ qedf->dev_info.common.num_hwfns,
3415
+ qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
30713416
30723417 /* queue allocation code should come here
30733418 * order should be
....@@ -3083,6 +3428,13 @@
30833428 goto err2;
30843429 }
30853430 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3431
+
3432
+ /* Learn information crucial for qedf to progress */
3433
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3434
+ if (rc) {
3435
+ QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3436
+ goto err2;
3437
+ }
30863438
30873439 /* Record BDQ producer doorbell addresses */
30883440 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
....@@ -3121,8 +3473,10 @@
31213473
31223474 /* Setup interrupts */
31233475 rc = qedf_setup_int(qedf);
3124
- if (rc)
3476
+ if (rc) {
3477
+ QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
31253478 goto err3;
3479
+ }
31263480
31273481 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
31283482 if (rc) {
....@@ -3145,9 +3499,9 @@
31453499 "Writing %d to primary and secondary BDQ doorbell registers.\n",
31463500 qedf->bdq_prod_idx);
31473501 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3148
- tmp = readw(qedf->bdq_primary_prod);
3502
+ readw(qedf->bdq_primary_prod);
31493503 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3150
- tmp = readw(qedf->bdq_secondary_prod);
3504
+ readw(qedf->bdq_secondary_prod);
31513505
31523506 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
31533507
....@@ -3182,11 +3536,6 @@
31823536 sprintf(host_buf, "host_%d", host->host_no);
31833537 qed_ops->common->set_name(qedf->cdev, host_buf);
31843538
3185
-
3186
- /* Set xid max values */
3187
- qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
3188
- qedf->max_els_xid = QEDF_MAX_ELS_XID;
3189
-
31903539 /* Allocate cmd mgr */
31913540 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
31923541 if (!qedf->cmd_mgr) {
....@@ -3197,16 +3546,19 @@
31973546
31983547 if (mode != QEDF_MODE_RECOVERY) {
31993548 host->transportt = qedf_fc_transport_template;
3200
- host->can_queue = QEDF_MAX_ELS_XID;
32013549 host->max_lun = qedf_max_lun;
32023550 host->max_cmd_len = QEDF_MAX_CDB_LEN;
3551
+ host->can_queue = FCOE_PARAMS_NUM_TASKS;
32033552 rc = scsi_add_host(host, &pdev->dev);
3204
- if (rc)
3553
+ if (rc) {
3554
+ QEDF_WARN(&qedf->dbg_ctx,
3555
+ "Error adding Scsi_Host rc=0x%x.\n", rc);
32053556 goto err6;
3557
+ }
32063558 }
32073559
32083560 memset(&params, 0, sizeof(params));
3209
- params.mtu = 9000;
3561
+ params.mtu = QEDF_LL2_BUF_SIZE;
32103562 ether_addr_copy(params.ll2_mac_address, qedf->mac);
32113563
32123564 /* Start LL2 processing thread */
....@@ -3269,6 +3621,7 @@
32693621 qedf->lport->host->host_no);
32703622 qedf->dpc_wq = create_workqueue(host_buf);
32713623 }
3624
+ INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
32723625
32733626 /*
32743627 * GRC dump and sysfs parameters are not reaped during the recovery
....@@ -3345,11 +3698,6 @@
33453698 err1:
33463699 scsi_host_put(lport->host);
33473700 err0:
3348
- if (qedf) {
3349
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3350
-
3351
- clear_bit(QEDF_PROBING, &qedf->flags);
3352
- }
33533701 return rc;
33543702 }
33553703
....@@ -3387,7 +3735,9 @@
33873735 fcoe_ctlr_link_down(&qedf->ctlr);
33883736 else
33893737 fc_fabric_logoff(qedf->lport);
3390
- qedf_wait_for_upload(qedf);
3738
+
3739
+ if (qedf_wait_for_upload(qedf) == false)
3740
+ QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
33913741
33923742 #ifdef CONFIG_DEBUG_FS
33933743 qedf_dbg_host_exit(&(qedf->dbg_ctx));
....@@ -3490,6 +3840,44 @@
34903840 qedf_capture_grc_dump(qedf);
34913841 }
34923842
3843
+void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
3844
+{
3845
+ struct qedf_ctx *qedf = dev;
3846
+
3847
+ QEDF_ERR(&(qedf->dbg_ctx),
3848
+ "Hardware error handler scheduled, event=%d.\n",
3849
+ err_type);
3850
+
3851
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3852
+ QEDF_ERR(&(qedf->dbg_ctx),
3853
+ "Already in recovery, not scheduling board disable work.\n");
3854
+ return;
3855
+ }
3856
+
3857
+ switch (err_type) {
3858
+ case QED_HW_ERR_FAN_FAIL:
3859
+ schedule_delayed_work(&qedf->board_disable_work, 0);
3860
+ break;
3861
+ case QED_HW_ERR_MFW_RESP_FAIL:
3862
+ case QED_HW_ERR_HW_ATTN:
3863
+ case QED_HW_ERR_DMAE_FAIL:
3864
+ case QED_HW_ERR_FW_ASSERT:
3865
+ /* Prevent HW attentions from being reasserted */
3866
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
3867
+ break;
3868
+ case QED_HW_ERR_RAMROD_FAIL:
3869
+ /* Prevent HW attentions from being reasserted */
3870
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
3871
+
3872
+ if (qedf_enable_recovery)
3873
+ qed_ops->common->recovery_process(qedf->cdev);
3874
+
3875
+ break;
3876
+ default:
3877
+ break;
3878
+ }
3879
+}
3880
+
34933881 /*
34943882 * Protocol TLV handler
34953883 */
....@@ -3584,6 +3972,80 @@
35843972
35853973 fcoe->scsi_tsk_full_set = true;
35863974 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3975
+}
3976
+
3977
+/* Deferred work function to perform soft context reset on STAG change */
3978
+void qedf_stag_change_work(struct work_struct *work)
3979
+{
3980
+ struct qedf_ctx *qedf =
3981
+ container_of(work, struct qedf_ctx, stag_work.work);
3982
+
3983
+ if (!qedf) {
3984
+ QEDF_ERR(NULL, "qedf is NULL");
3985
+ return;
3986
+ }
3987
+ QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
3988
+ qedf_ctx_soft_reset(qedf->lport);
3989
+}
3990
+
3991
+static void qedf_shutdown(struct pci_dev *pdev)
3992
+{
3993
+ __qedf_remove(pdev, QEDF_MODE_NORMAL);
3994
+}
3995
+
3996
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
3997
+{
3998
+ struct qedf_ctx *qedf;
3999
+
4000
+ if (!pdev) {
4001
+ QEDF_ERR(NULL, "pdev is NULL.\n");
4002
+ return -ENODEV;
4003
+ }
4004
+
4005
+ qedf = pci_get_drvdata(pdev);
4006
+
4007
+ QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
4008
+
4009
+ return -EPERM;
4010
+}
4011
+
4012
+/*
4013
+ * Recovery handler code
4014
+ */
4015
+static void qedf_schedule_recovery_handler(void *dev)
4016
+{
4017
+ struct qedf_ctx *qedf = dev;
4018
+
4019
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
4020
+ schedule_delayed_work(&qedf->recovery_work, 0);
4021
+}
4022
+
4023
+static void qedf_recovery_handler(struct work_struct *work)
4024
+{
4025
+ struct qedf_ctx *qedf =
4026
+ container_of(work, struct qedf_ctx, recovery_work.work);
4027
+
4028
+ if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4029
+ return;
4030
+
4031
+ /*
4032
+ * Call common_ops->recovery_prolog to allow the MFW to quiesce
4033
+ * any PCI transactions.
4034
+ */
4035
+ qed_ops->common->recovery_prolog(qedf->cdev);
4036
+
4037
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4038
+ __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4039
+ /*
4040
+ * Reset link and dcbx to down state since we will not get a link down
4041
+ * event from the MFW but calling __qedf_remove will essentially be a
4042
+ * link down event.
4043
+ */
4044
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4045
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4046
+ __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4047
+ clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4048
+ QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
35874049 }
35884050
35894051 /* Generic TLV data callback */
....@@ -3712,7 +4174,7 @@
37124174 }
37134175
37144176 MODULE_LICENSE("GPL");
3715
-MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
4177
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
37164178 MODULE_AUTHOR("QLogic Corporation");
37174179 MODULE_VERSION(QEDF_VERSION);
37184180 module_init(qedf_init);