From ee930fffee469d076998274a2ca55e13dc1efb67 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 08:50:54 +0000
Subject: [PATCH] enable tun/tap/iptables

---
 kernel/drivers/scsi/qedf/qedf_main.c |  912 ++++++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 687 insertions(+), 225 deletions(-)

diff --git a/kernel/drivers/scsi/qedf/qedf_main.c b/kernel/drivers/scsi/qedf/qedf_main.c
index b253523..6923862 100644
--- a/kernel/drivers/scsi/qedf/qedf_main.c
+++ b/kernel/drivers/scsi/qedf/qedf_main.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  *  QLogic FCoE Offload Driver
  *  Copyright (c) 2016-2018 Cavium Inc.
- *
- *  This software is available under the terms of the GNU General Public License
- *  (GPL) Version 2, available from the file COPYING in the main directory of
- *  this source tree.
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -16,6 +13,7 @@
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/kthread.h>
+#include <linux/phylink.h>
 #include <scsi/libfc.h>
 #include <scsi/scsi_host.h>
 #include <scsi/fc_frame.h>
@@ -30,6 +28,10 @@
 
 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 static void qedf_remove(struct pci_dev *pdev);
+static void qedf_shutdown(struct pci_dev *pdev);
+static void qedf_schedule_recovery_handler(void *dev);
+static void qedf_recovery_handler(struct work_struct *work);
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state);
 
 /*
  * Driver module parameters.
@@ -40,7 +42,7 @@
 	"remote ports (default 60)");
 
 uint qedf_debug = QEDF_LOG_INFO;
-module_param_named(debug, qedf_debug, uint, S_IRUGO);
+module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
 	" mask");
 
@@ -104,6 +106,12 @@
 MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module  "
 	"during probe (0-3: 0 more verbose).");
 
+static bool qedf_enable_recovery = true;
+module_param_named(enable_recovery, qedf_enable_recovery,
+		bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
+		"interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
+
 struct workqueue_struct *qedf_io_wq;
 
 static struct fcoe_percpu_s qedf_global;
@@ -113,35 +121,45 @@
 
 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
 {
-	qedf->vlan_id = vlan_id;
-	qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
-		   "prio=%d.\n", vlan_id, qedf->prio);
+	int vlan_id_tmp = 0;
+
+	vlan_id_tmp = vlan_id  | (qedf->prio << VLAN_PRIO_SHIFT);
+	qedf->vlan_id = vlan_id_tmp;
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "Setting vlan_id=0x%04x prio=%d.\n",
+		  vlan_id_tmp, qedf->prio);
 }
 
 /* Returns true if we have a valid vlan, false otherwise */
 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
 {
-	int rc;
-
-	if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
-		return  false;
-	}
 
 	while (qedf->fipvlan_retries--) {
-		if (qedf->vlan_id > 0)
+		/* This is to catch if link goes down during fipvlan retries */
+		if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+			QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
+			return false;
+		}
+
+		if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+			QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
+			return false;
+		}
+
+		if (qedf->vlan_id > 0) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "vlan = 0x%x already set, calling ctlr_link_up.\n",
+				  qedf->vlan_id);
+			if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+				fcoe_ctlr_link_up(&qedf->ctlr);
 			return true;
+		}
+
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 			   "Retry %d.\n", qedf->fipvlan_retries);
 		init_completion(&qedf->fipvlan_compl);
 		qedf_fcoe_send_vlan_req(qedf);
-		rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
-		    1 * HZ);
-		if (rc > 0) {
-			fcoe_ctlr_link_up(&qedf->ctlr);
-			return true;
-		}
+		wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
 	}
 
 	return false;
@@ -153,12 +171,21 @@
 	    container_of(work, struct qedf_ctx, link_update.work);
 	int rc;
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
+		  atomic_read(&qedf->link_state));
 
 	if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
 		rc = qedf_initiate_fipvlan_req(qedf);
 		if (rc)
 			return;
+
+		if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "Link is down, resetting vlan_id.\n");
+			qedf->vlan_id = 0;
+			return;
+		}
+
 		/*
 		 * If we get here then we never received a repsonse to our
 		 * fip vlan request so set the vlan_id to the default and
@@ -185,7 +212,9 @@
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 		    "Calling fcoe_ctlr_link_down().\n");
 		fcoe_ctlr_link_down(&qedf->ctlr);
-		qedf_wait_for_upload(qedf);
+		if (qedf_wait_for_upload(qedf) == false)
+			QEDF_ERR(&qedf->dbg_ctx,
+				 "Could not upload all sessions.\n");
 		/* Reset the number of FIP VLAN retries */
 		qedf->fipvlan_retries = qedf_fipvlan_retries;
 	}
@@ -263,6 +292,7 @@
 	else if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
 		/* Set the source MAC we will use for FCoE traffic */
 		qedf_set_data_src_addr(qedf, fp);
+		qedf->flogi_pending = 0;
 	}
 
 	/* Complete flogi_compl so we can proceed to sending ADISCs */
@@ -288,6 +318,11 @@
 	 */
 	if (resp == fc_lport_flogi_resp) {
 		qedf->flogi_cnt++;
+		if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
+			schedule_delayed_work(&qedf->stag_work, 2);
+			return NULL;
+		}
+		qedf->flogi_pending++;
 		return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
 		    arg, timeout);
 	}
@@ -302,8 +337,10 @@
 
 	lport = qedf->lport;
 
-	if (!lport->tt.elsct_send)
+	if (!lport->tt.elsct_send) {
+		QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
 		return -EINVAL;
+	}
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 	if (!fp) {
@@ -321,11 +358,6 @@
 	return 0;
 }
 
-struct qedf_tmp_rdata_item {
-	struct fc_rport_priv *rdata;
-	struct list_head list;
-};
-
 /*
  * This function is called if link_down_tmo is in use.  If we get a link up and
  * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
@@ -335,9 +367,8 @@
 {
 	struct qedf_ctx *qedf =
 	    container_of(work, struct qedf_ctx, link_recovery.work);
-	struct qedf_rport *fcport;
+	struct fc_lport *lport = qedf->lport;
 	struct fc_rport_priv *rdata;
-	struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
 	bool rc;
 	int retries = 30;
 	int rval, i;
@@ -404,38 +435,20 @@
 	 * Call lport->tt.rport_login which will cause libfc to send an
 	 * ADISC since the rport is in state ready.
 	 */
-	rcu_read_lock();
-	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
-		rdata = fcport->rdata;
-		if (rdata == NULL)
-			continue;
-		rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
-		    GFP_ATOMIC);
-		if (!rdata_item)
-			continue;
+	mutex_lock(&lport->disc.disc_mutex);
+	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
 		if (kref_get_unless_zero(&rdata->kref)) {
-			rdata_item->rdata = rdata;
-			list_add(&rdata_item->list, &rdata_login_list);
-		} else
-			kfree(rdata_item);
+			fc_rport_login(rdata);
+			kref_put(&rdata->kref, fc_rport_destroy);
+		}
 	}
-	rcu_read_unlock();
-	/*
-	 * Do the fc_rport_login outside of the rcu lock so we don't take a
-	 * mutex in an atomic context.
-	 */
-	list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
-	    list) {
-		list_del(&rdata_item->list);
-		fc_rport_login(rdata_item->rdata);
-		kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
-		kfree(rdata_item);
-	}
+	mutex_unlock(&lport->disc.disc_mutex);
 }
 
 static void qedf_update_link_speed(struct qedf_ctx *qedf,
 	struct qed_link_output *link)
 {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
 	struct fc_lport *lport = qedf->lport;
 
 	lport->link_speed = FC_PORTSPEED_UNKNOWN;
@@ -458,6 +471,9 @@
 	case 100000:
 		lport->link_speed = FC_PORTSPEED_100GBIT;
 		break;
+	case 20000:
+		lport->link_speed = FC_PORTSPEED_20GBIT;
+		break;
 	default:
 		lport->link_speed = FC_PORTSPEED_UNKNOWN;
 		break;
@@ -467,22 +483,104 @@
 	 * Set supported link speed by querying the supported
 	 * capabilities of the link.
 	 */
-	if (link->supported_caps & SUPPORTED_10000baseKR_Full)
+
+	phylink_zero(sup_caps);
+	phylink_set(sup_caps, 10000baseT_Full);
+	phylink_set(sup_caps, 10000baseKX4_Full);
+	phylink_set(sup_caps, 10000baseR_FEC);
+	phylink_set(sup_caps, 10000baseCR_Full);
+	phylink_set(sup_caps, 10000baseSR_Full);
+	phylink_set(sup_caps, 10000baseLR_Full);
+	phylink_set(sup_caps, 10000baseLRM_Full);
+	phylink_set(sup_caps, 10000baseKR_Full);
+
+	if (linkmode_intersects(link->supported_caps, sup_caps))
 		lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-	if (link->supported_caps & SUPPORTED_25000baseKR_Full)
+
+	phylink_zero(sup_caps);
+	phylink_set(sup_caps, 25000baseKR_Full);
+	phylink_set(sup_caps, 25000baseCR_Full);
+	phylink_set(sup_caps, 25000baseSR_Full);
+
+	if (linkmode_intersects(link->supported_caps, sup_caps))
 		lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
-	if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
+
+	phylink_zero(sup_caps);
+	phylink_set(sup_caps, 40000baseLR4_Full);
+	phylink_set(sup_caps, 40000baseKR4_Full);
+	phylink_set(sup_caps, 40000baseCR4_Full);
+	phylink_set(sup_caps, 40000baseSR4_Full);
+
+	if (linkmode_intersects(link->supported_caps, sup_caps))
 		lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
-	if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
+
+	phylink_zero(sup_caps);
+	phylink_set(sup_caps, 50000baseKR2_Full);
+	phylink_set(sup_caps, 50000baseCR2_Full);
+	phylink_set(sup_caps, 50000baseSR2_Full);
+
+	if (linkmode_intersects(link->supported_caps, sup_caps))
 		lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
-	if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
+
+	phylink_zero(sup_caps);
+	phylink_set(sup_caps, 100000baseKR4_Full);
+	phylink_set(sup_caps, 100000baseSR4_Full);
+	phylink_set(sup_caps, 100000baseCR4_Full);
+	phylink_set(sup_caps, 100000baseLR4_ER4_Full);
+
+	if (linkmode_intersects(link->supported_caps, sup_caps))
 		lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
-	fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+
+	phylink_zero(sup_caps);
+	phylink_set(sup_caps, 20000baseKR2_Full);
+
+	if (linkmode_intersects(link->supported_caps, sup_caps))
+		lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+
+	if (lport->host && lport->host->shost_data)
+		fc_host_supported_speeds(lport->host) =
+			lport->link_supported_speeds;
+}
+
+static void qedf_bw_update(void *dev)
+{
+	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+	struct qed_link_output link;
+
+	/* Get the latest status of the link */
+	qed_ops->common->get_link(qedf->cdev, &link);
+
+	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Ignore link update, driver getting unload.\n");
+		return;
+	}
+
+	if (link.link_up) {
+		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+			qedf_update_link_speed(qedf, &link);
+		else
+			QEDF_ERR(&qedf->dbg_ctx,
+				 "Ignore bw update, link is down.\n");
+
+	} else {
+		QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
+	}
 }
 
 static void qedf_link_update(void *dev, struct qed_link_output *link)
 {
 	struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+
+	/*
+	 * Prevent race where we're removing the module and we get link update
+	 * for qed.
+	 */
+	if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Ignore link update, driver getting unload.\n");
+		return;
+	}
 
 	if (link->link_up) {
 		if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
@@ -563,7 +661,7 @@
 		tmp_prio = get->operational.app_prio.fcoe;
 		if (qedf_default_prio > -1)
 			qedf->prio = qedf_default_prio;
-		else if (tmp_prio < 0 || tmp_prio > 7) {
+		else if (tmp_prio > 7) {
 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 			    "FIP/FCoE prio %d out of range, setting to %d.\n",
 			    tmp_prio, QEDF_DEFAULT_PRIO);
@@ -596,9 +694,12 @@
 static struct qed_fcoe_cb_ops qedf_cb_ops = {
 	{
 		.link_update = qedf_link_update,
+		.bw_update = qedf_bw_update,
+		.schedule_recovery_handler = qedf_schedule_recovery_handler,
 		.dcbx_aen = qedf_dcbx_handler,
 		.get_generic_tlv_data = qedf_get_generic_tlv_data,
 		.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
+		.schedule_hw_err_handler = qedf_schedule_hw_err_handler,
 	}
 };
 
@@ -615,50 +716,113 @@
 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
 {
 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-	struct fc_rport_libfc_priv *rp = rport->dd_data;
-	struct qedf_rport *fcport;
 	struct fc_lport *lport;
 	struct qedf_ctx *qedf;
 	struct qedf_ioreq *io_req;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_rport_priv *rdata;
+	struct qedf_rport *fcport = NULL;
 	int rc = FAILED;
+	int wait_count = 100;
+	int refcount = 0;
 	int rval;
-
-	if (fc_remote_port_chkready(rport)) {
-		QEDF_ERR(NULL, "rport not ready\n");
-		goto out;
-	}
+	int got_ref = 0;
 
 	lport = shost_priv(sc_cmd->device->host);
 	qedf = (struct qedf_ctx *)lport_priv(lport);
 
-	if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
-		QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
-		goto out;
-	}
-
+	/* rport and tgt are allocated together, so tgt should be non-NULL */
 	fcport = (struct qedf_rport *)&rp[1];
-
-	io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
-	if (!io_req) {
-		QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
+	rdata = fcport->rdata;
+	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
+		QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
 		rc = SUCCESS;
 		goto out;
 	}
 
-	QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
-		  "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
+
+	io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+	if (!io_req) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
+			 sc_cmd, sc_cmd->cmnd[0],
+			 rdata->ids.port_id);
+		rc = SUCCESS;
+		goto drop_rdata_kref;
+	}
+
+	rval = kref_get_unless_zero(&io_req->refcount);	/* ID: 005 */
+	if (rval)
+		got_ref = 1;
+
+	/* If we got a valid io_req, confirm it belongs to this sc_cmd. */
+	if (!rval || io_req->sc_cmd != sc_cmd) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
+			 io_req->sc_cmd, sc_cmd, rdata->ids.port_id);
+
+		goto drop_rdata_kref;
+	}
+
+	if (fc_remote_port_chkready(rport)) {
+		refcount = kref_read(&io_req->refcount);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n",
+			 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0],
+			 refcount, rdata->ids.port_id);
+
+		goto drop_rdata_kref;
+	}
+
+	rc = fc_block_scsi_eh(sc_cmd);
+	if (rc)
+		goto drop_rdata_kref;
+
+	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Connection uploading, xid=0x%x., port_id=%06x\n",
+			 io_req->xid, rdata->ids.port_id);
+		while (io_req->sc_cmd && (wait_count != 0)) {
+			msleep(100);
+			wait_count--;
+		}
+		if (wait_count) {
+			QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
+			rc = SUCCESS;
+		} else {
+			QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
+			rc = FAILED;
+		}
+		goto drop_rdata_kref;
+	}
+
+	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+		QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
+		goto drop_rdata_kref;
+	}
+
+	QEDF_ERR(&qedf->dbg_ctx,
+		 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n",
+		 io_req, sc_cmd, io_req->xid, io_req->fp_idx,
+		 rdata->ids.port_id);
 
 	if (qedf->stop_io_on_error) {
 		qedf_stop_all_io(qedf);
 		rc = SUCCESS;
-		goto out;
+		goto drop_rdata_kref;
 	}
 
 	init_completion(&io_req->abts_done);
 	rval = qedf_initiate_abts(io_req, true);
 	if (rval) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
-		goto out;
+		/*
+		 * If we fail to queue the ABTS then return this command to
+		 * the SCSI layer as it will own and free the xid
+		 */
+		rc = SUCCESS;
+		qedf_scsi_done(qedf, io_req, DID_ERROR);
+		goto drop_rdata_kref;
 	}
 
 	wait_for_completion(&io_req->abts_done);
@@ -684,38 +848,68 @@
 		QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
 			  io_req->xid);
 
+drop_rdata_kref:
+	kref_put(&rdata->kref, fc_rport_destroy);
 out:
+	if (got_ref)
+		kref_put(&io_req->refcount, qedf_release_cmd);
 	return rc;
 }
 
 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
 {
-	QEDF_ERR(NULL, "TARGET RESET Issued...");
+	QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...",
+		 sc_cmd->device->host->host_no, sc_cmd->device->id,
+		 sc_cmd->device->lun);
 	return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
 }
 
 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
 {
-	QEDF_ERR(NULL, "LUN RESET Issued...\n");
+	QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ",
+		 sc_cmd->device->host->host_no, sc_cmd->device->id,
+		 sc_cmd->device->lun);
 	return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 
-void qedf_wait_for_upload(struct qedf_ctx *qedf)
+bool qedf_wait_for_upload(struct qedf_ctx *qedf)
 {
-	while (1) {
+	struct qedf_rport *fcport = NULL;
+	int wait_cnt = 120;
+
+	while (wait_cnt--) {
 		if (atomic_read(&qedf->num_offloads))
-			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
-			    "Waiting for all uploads to complete.\n");
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "Waiting for all uploads to complete num_offloads = 0x%x.\n",
+				  atomic_read(&qedf->num_offloads));
 		else
-			break;
+			return true;
 		msleep(500);
 	}
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+		if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+				       &fcport->flags)) {
+			if (fcport->rdata)
+				QEDF_ERR(&qedf->dbg_ctx,
+					 "Waiting for fcport %p portid=%06x.\n",
+					 fcport, fcport->rdata->ids.port_id);
+			} else {
+				QEDF_ERR(&qedf->dbg_ctx,
+					 "Waiting for fcport %p.\n", fcport);
+			}
+	}
+	rcu_read_unlock();
+	return false;
+
 }
 
 /* Performs soft reset of qedf_ctx by simulating a link down/up */
-static void qedf_ctx_soft_reset(struct fc_lport *lport)
+void qedf_ctx_soft_reset(struct fc_lport *lport)
 {
 	struct qedf_ctx *qedf;
+	struct qed_link_output if_link;
 
 	if (lport->vport) {
 		QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
@@ -724,13 +918,35 @@
 
 	qedf = lport_priv(lport);
 
+	qedf->flogi_pending = 0;
 	/* For host reset, essentially do a soft link up/down */
 	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "Queuing link down work.\n");
 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 	    0);
-	qedf_wait_for_upload(qedf);
+
+	if (qedf_wait_for_upload(qedf) == false) {
+		QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
+		WARN_ON(atomic_read(&qedf->num_offloads));
+	}
+
+	/* Before setting link up query physical link state */
+	qed_ops->common->get_link(qedf->cdev, &if_link);
+	/* Bail if the physical link is not up */
+	if (!if_link.link_up) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+			  "Physical link is not up.\n");
+		return;
+	}
+	/* Flush and wait to make sure link down is processed */
+	flush_delayed_work(&qedf->link_update);
+	msleep(500);
+
 	atomic_set(&qedf->link_state, QEDF_LINK_UP);
 	qedf->vlan_id  = 0;
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "Queue link up work.\n");
 	queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
 	    0);
 }
@@ -740,22 +956,6 @@
 {
 	struct fc_lport *lport;
 	struct qedf_ctx *qedf;
-	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
-	struct fc_rport_libfc_priv *rp = rport->dd_data;
-	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
-	int rval;
-
-	rval = fc_remote_port_chkready(rport);
-
-	if (rval) {
-		QEDF_ERR(NULL, "device_reset rport not ready\n");
-		return FAILED;
-	}
-
-	if (fcport == NULL) {
-		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
-		return FAILED;
-	}
 
 	lport = shost_priv(sc_cmd->device->host);
 	qedf = lport_priv(lport);
@@ -785,7 +985,6 @@
 	.name 		= QEDF_MODULE_NAME,
 	.this_id 	= -1,
 	.cmd_per_lun	= 32,
-	.use_clustering = ENABLE_CLUSTERING,
 	.max_sectors 	= 0xffff,
 	.queuecommand 	= qedf_queuecommand,
 	.shost_attrs	= qedf_host_attrs,
@@ -852,9 +1051,8 @@
 	return rc;
 }
 
-/**
+/*
  * qedf_xmit - qedf FCoE frame transmit function
- *
  */
 static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
 {
@@ -908,8 +1106,10 @@
 		    "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
 		kfree_skb(skb);
 		rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
-		if (rdata)
+		if (rdata) {
 			rdata->retries = lport->max_rport_retry_count;
+			kref_put(&rdata->kref, fc_rport_destroy);
+		}
 		return -EINVAL;
 	}
 	/* End NPIV filtering */
@@ -969,7 +1169,7 @@
 			return -ENOMEM;
 		}
 		frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-		cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+		cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
 	} else {
 		cp = skb_put(skb, tlen);
 	}
@@ -1032,7 +1232,12 @@
 	if (qedf_dump_frames)
 		print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
 		    1, skb->data, skb->len, false);
-	qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+	rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
+		kfree_skb(skb);
+		return rc;
+	}
 
 	return 0;
 }
@@ -1051,16 +1256,17 @@
 	    sizeof(void *);
 	fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
 
-	fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
-	    fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
+	fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+					&fcport->sq_dma, GFP_KERNEL);
 	if (!fcport->sq) {
 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
 		rval = 1;
 		goto out;
 	}
 
-	fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
-	    fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
+	fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+					    fcport->sq_pbl_size,
+					    &fcport->sq_pbl_dma, GFP_KERNEL);
 	if (!fcport->sq_pbl) {
 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
 		rval = 1;
@@ -1137,7 +1343,7 @@
 	ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
 
 	conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
-	conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
+	conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
 	conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
 	conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
 
@@ -1224,6 +1430,8 @@
 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
 	struct qedf_rport *fcport)
 {
+	struct fc_rport_priv *rdata = fcport->rdata;
+
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
 	    fcport->rdata->ids.port_id);
 
@@ -1235,9 +1443,10 @@
 	qedf_free_sq(qedf, fcport);
 	fcport->rdata = NULL;
 	fcport->qedf = NULL;
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
-/**
+/*
  * This event_callback is called after successful completion of libfc
  * initiated target login. qedf can proceed with initiating the session
  * establishment.
@@ -1310,6 +1519,8 @@
 			break;
 		}
 
+		/* Initial reference held on entry, so this can't fail */
+		kref_get(&rdata->kref);
 		fcport->rdata = rdata;
 		fcport->rport = rport;
 
@@ -1357,6 +1568,17 @@
 		if (port_id == FC_FID_DIR_SERV)
 			break;
 
+		if (rdata->spp_type != FC_TYPE_FCP) {
+			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+			    "No action since spp type isn't FCP\n");
+			break;
+		}
+		if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+			    "Not FCP target so no action\n");
+			break;
+		}
+
 		if (!rport) {
 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
 			    "port_id=%x - rport notcreated Yet!!\n", port_id);
@@ -1369,11 +1591,15 @@
 		 */
 		fcport = (struct qedf_rport *)&rp[1];
 
+		spin_lock_irqsave(&fcport->rport_lock, flags);
 		/* Only free this fcport if it is offloaded already */
-		if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-			set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+		if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) &&
+		    !test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+		    &fcport->flags)) {
+			set_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+				&fcport->flags);
+			spin_unlock_irqrestore(&fcport->rport_lock, flags);
 			qedf_cleanup_fcport(qedf, fcport);
-
 			/*
 			 * Remove fcport to list of qedf_ctx list of offloaded
 			 * ports
@@ -1385,8 +1611,9 @@
 			clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
 			    &fcport->flags);
 			atomic_dec(&qedf->num_offloads);
+		} else {
+			spin_unlock_irqrestore(&fcport->rport_lock, flags);
 		}
-
 		break;
 
 	case RPORT_EV_NONE:
@@ -1428,12 +1655,13 @@
 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
 {
 	struct fc_lport *lport = qedf->lport;
-	struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
 	u8 buf[8];
-	int i, pos;
+	int pos;
+	uint32_t i;
 
 	/*
-	 * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+	 * fdmi_enabled needs to be set for libfc
+	 * to execute FDMI registration
 	 */
 	lport->fdmi_enabled = 1;
 
@@ -1449,33 +1677,47 @@
 		for (i = 0; i < 8; i++)
 			pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
 
-		snprintf(fc_host->serial_number,
-		    sizeof(fc_host->serial_number),
+		snprintf(fc_host_serial_number(lport->host),
+		    FC_SERIAL_NUMBER_SIZE,
 		    "%02X%02X%02X%02X%02X%02X%02X%02X",
 		    buf[7], buf[6], buf[5], buf[4],
 		    buf[3], buf[2], buf[1], buf[0]);
 	} else
-		snprintf(fc_host->serial_number,
-		    sizeof(fc_host->serial_number), "Unknown");
+		snprintf(fc_host_serial_number(lport->host),
+		    FC_SERIAL_NUMBER_SIZE, "Unknown");
 
-	snprintf(fc_host->manufacturer,
-	    sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+	snprintf(fc_host_manufacturer(lport->host),
+	    FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
 
-	snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+	if (qedf->pdev->device == QL45xxx) {
+		snprintf(fc_host_model(lport->host),
+			FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
 
-	snprintf(fc_host->model_description, sizeof(fc_host->model_description),
-	    "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
-	    "(FCoE)");
+		snprintf(fc_host_model_description(lport->host),
+			FC_SYMBOLIC_NAME_SIZE, "%s",
+			"Marvell FastLinQ QL45xxx FCoE Adapter");
+	}
 
-	snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
-	    "Rev %d", qedf->pdev->revision);
+	if (qedf->pdev->device == QL41xxx) {
+		snprintf(fc_host_model(lport->host),
+			FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
 
-	snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
-	    "%s", QEDF_VERSION);
+		snprintf(fc_host_model_description(lport->host),
+			FC_SYMBOLIC_NAME_SIZE, "%s",
+			"Marvell FastLinQ QL41xxx FCoE Adapter");
+	}
 
-	snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
-	    "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
-	    FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+	snprintf(fc_host_hardware_version(lport->host),
+	    FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
+
+	snprintf(fc_host_driver_version(lport->host),
+	    FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
+
+	snprintf(fc_host_firmware_version(lport->host),
+	    FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
+	    FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
+	    FW_ENGINEERING_VERSION);
+
 }
 
 static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1498,11 +1740,15 @@
 	fc_set_wwnn(lport, qedf->wwnn);
 	fc_set_wwpn(lport, qedf->wwpn);
 
-	fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
+	if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "fcoe_libfc_config failed.\n");
+		return -ENOMEM;
+	}
 
 	/* Allocate the exchange manager */
-	fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
-	    qedf->max_els_xid, NULL);
+	fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS,
+			  0xfffe, NULL);
 
 	if (fc_lport_init_stats(lport))
 		return -ENOMEM;
@@ -1518,8 +1764,13 @@
 	fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
 
 	/* Set symbolic node name */
-	snprintf(fc_host_symbolic_name(lport->host), 256,
-	    "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+	if (qedf->pdev->device == QL45xxx)
+		snprintf(fc_host_symbolic_name(lport->host), 256,
+			"Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+	if (qedf->pdev->device == QL41xxx)
+		snprintf(fc_host_symbolic_name(lport->host), 256,
+			"Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
 
 	qedf_setup_fdmi(qedf);
 
@@ -1577,22 +1828,20 @@
 		fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
 		QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
 			   "WWPN (0x%s) already exists.\n", buf);
-		goto err1;
+		return rc;
 	}
 
 	if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
 		QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
 			   "because link is not up.\n");
-		rc = -EIO;
-		goto err1;
+		return -EIO;
 	}
 
 	vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
 	if (!vn_port) {
 		QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
 			   "for vport.\n");
-		rc = -ENOMEM;
-		goto err1;
+		return -ENOMEM;
 	}
 
 	fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
@@ -1611,12 +1860,13 @@
 	vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
 	init_completion(&vport_qedf->flogi_compl);
 	INIT_LIST_HEAD(&vport_qedf->fcports);
+	INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
 
 	rc = qedf_vport_libfc_config(vport, vn_port);
 	if (rc) {
 		QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
 		    "for lport stats.\n");
-		goto err2;
+		goto err;
 	}
 
 	fc_set_wwnn(vn_port, vport->node_name);
@@ -1625,15 +1875,16 @@
 	vport_qedf->wwpn = vn_port->wwpn;
 
 	vn_port->host->transportt = qedf_fc_vport_transport_template;
-	vn_port->host->can_queue = QEDF_MAX_ELS_XID;
+	vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS;
 	vn_port->host->max_lun = qedf_max_lun;
 	vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
 	vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
 
 	rc = scsi_add_host(vn_port->host, &vport->dev);
 	if (rc) {
-		QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
-		goto err2;
+		QEDF_WARN(&base_qedf->dbg_ctx,
+			  "Error adding Scsi_Host rc=0x%x.\n", rc);
+		goto err;
 	}
 
 	/* Set default dev_loss_tmo based on module parameter */
@@ -1667,6 +1918,27 @@
 		fc_vport_setlink(vn_port);
 	}
 
+	/* Set symbolic node name */
+	if (base_qedf->pdev->device == QL45xxx)
+		snprintf(fc_host_symbolic_name(vn_port->host), 256,
+			 "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+	if (base_qedf->pdev->device == QL41xxx)
+		snprintf(fc_host_symbolic_name(vn_port->host), 256,
+			 "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+	/* Set supported speed */
+	fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+	/* Set speed */
+	vn_port->link_speed = n_port->link_speed;
+
+	/* Set port type */
+	fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+	/* Set maxframe size */
+	fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
 	QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
 		   vn_port);
 
@@ -1674,9 +1946,10 @@
 	vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
 	vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
 
-err2:
+	return 0;
+
+err:
 	scsi_host_put(vn_port->host);
-err1:
 	return rc;
 }
 
@@ -1717,8 +1990,7 @@
 	fc_lport_free_stats(vn_port);
 
 	/* Release Scsi_Host */
-	if (vn_port->host)
-		scsi_host_put(vn_port->host);
+	scsi_host_put(vn_port->host);
 
 out:
 	return 0;
@@ -1771,6 +2043,13 @@
 
 	qedf_ctx_soft_reset(lport);
 	return 0;
+}
+
+static void qedf_get_host_port_id(struct Scsi_Host *shost)
+{
+	struct fc_lport *lport = shost_priv(shost);
+
+	fc_host_port_id(shost) = lport->port_id;
 }
 
 static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
@@ -1843,6 +2122,7 @@
 	.show_host_active_fc4s = 1,
 	.show_host_maxframe_size = 1,
 
+	.get_host_port_id = qedf_get_host_port_id,
 	.show_host_port_id = 1,
 	.show_host_supported_speeds = 1,
 	.get_host_speed = fc_get_host_speed,
@@ -2086,16 +2366,21 @@
 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
 {
 	int i;
+	u16 vector_idx = 0;
+	u32 vector;
 
 	if (qedf->int_info.msix_cnt) {
 		for (i = 0; i < qedf->int_info.used_cnt; i++) {
-			synchronize_irq(qedf->int_info.msix[i].vector);
-			irq_set_affinity_hint(qedf->int_info.msix[i].vector,
-			    NULL);
-			irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
-			    NULL);
-			free_irq(qedf->int_info.msix[i].vector,
-			    &qedf->fp_array[i]);
+			vector_idx = i * qedf->dev_info.common.num_hwfns +
+				qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+				  "Freeing IRQ #%d vector_idx=%d.\n",
+				  i, vector_idx);
+			vector = qedf->int_info.msix[vector_idx].vector;
+			synchronize_irq(vector);
+			irq_set_affinity_hint(vector, NULL);
+			irq_set_affinity_notifier(vector, NULL);
+			free_irq(vector, &qedf->fp_array[i]);
 		}
 	} else
 		qed_ops->common->simd_handler_clean(qedf->cdev,
@@ -2108,11 +2393,19 @@
 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
 {
 	int i, rc, cpu;
+	u16 vector_idx = 0;
+	u32 vector;
 
 	cpu = cpumask_first(cpu_online_mask);
 	for (i = 0; i < qedf->num_queues; i++) {
-		rc = request_irq(qedf->int_info.msix[i].vector,
-		    qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
+		vector_idx = i * qedf->dev_info.common.num_hwfns +
+			qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+			  "Requesting IRQ #%d vector_idx=%d.\n",
+			  i, vector_idx);
+		vector = qedf->int_info.msix[vector_idx].vector;
+		rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
+				 &qedf->fp_array[i]);
 
 		if (rc) {
 			QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
@@ -2121,8 +2414,7 @@
 		}
 
 		qedf->int_info.used_cnt++;
-		rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
-		    get_cpu_mask(cpu));
+		rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu));
 		cpu = cpumask_next(cpu, cpu_online_mask);
 	}
 
@@ -2155,7 +2447,8 @@
 	    QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
 	qedf->int_info.used_cnt = 1;
 
-	QEDF_ERR(&qedf->dbg_ctx, "Only MSI-X supported. Failing probe.\n");
+	QEDF_ERR(&qedf->dbg_ctx,
+		 "Cannot load driver due to a lack of MSI-X vectors.\n");
 	return -EINVAL;
 }
 
@@ -2198,12 +2491,14 @@
 	fr_dev(fp) = lport;
 	fr_sof(fp) = hp->fcoe_sof;
 	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+		QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
 		kfree_skb(skb);
 		return;
 	}
 	fr_eof(fp) = crc_eof.fcoe_eof;
 	fr_crc(fp) = crc_eof.fcoe_crc32;
 	if (pskb_trim(skb, fr_len)) {
+		QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
 		kfree_skb(skb);
 		return;
 	}
@@ -2264,9 +2559,9 @@
 	 * empty then this is not addressed to our port so simply drop it.
 	 */
 	if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
-		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
-		    "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
-		    lport->port_id, ntoh24(fh->fh_d_id));
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
+			  lport->port_id, ntoh24(fh->fh_d_id));
 		kfree_skb(skb);
 		return;
 	}
@@ -2275,6 +2570,8 @@
 	if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
 	    (f_ctl & FC_FC_EX_CTX)) {
 		/* Drop incoming ABTS response that has both SEQ/EX CTX set */
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping ABTS response as both SEQ/EX CTX set.\n");
 		kfree_skb(skb);
 		return;
 	}
@@ -2356,6 +2653,13 @@
 	struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
 	struct qedf_skb_work *skb_work;
 
+	if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+			  "Dropping frame as link state is down.\n");
+		kfree_skb(skb);
+		return 0;
+	}
+
 	skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
 	if (!skb_work) {
 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
@@ -2411,8 +2715,9 @@
 	    sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
 
 	if (!sb_virt) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
-			  "for id = %d.\n", sb_id);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Status block allocation failed for id = %d.\n",
+			 sb_id);
 		return -ENOMEM;
 	}
 
@@ -2420,8 +2725,9 @@
 	    sb_id, QED_SB_TYPE_STORAGE);
 
 	if (ret) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
-			  "failed for id = %d.\n", sb_id);
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Status block initialization failed (0x%x) for id = %d.\n",
+			 ret, sb_id);
 		return ret;
 	}
 
@@ -2497,6 +2803,8 @@
 	struct qedf_ioreq *io_req;
 	struct qedf_rport *fcport;
 	u32 comp_type;
+	u8 io_comp_type;
+	unsigned long flags;
 
 	comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
 	    FCOE_CQE_CQE_TYPE_MASK;
@@ -2505,13 +2813,18 @@
 	io_req = &qedf->cmd_mgr->cmds[xid];
 
 	/* Completion not for a valid I/O anymore so just return */
-	if (!io_req)
+	if (!io_req) {
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "io_req is NULL for xid=0x%x.\n", xid);
 		return;
+	}
 
 	fcport = io_req->fcport;
 
 	if (fcport == NULL) {
-		QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "fcport is NULL for xid=0x%x io_req=%p.\n",
+			 xid, io_req);
 		return;
 	}
 
@@ -2520,15 +2833,19 @@
 	 * isn't valid and shouldn't be taken. We should just return.
 	 */
 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+		QEDF_ERR(&qedf->dbg_ctx,
+			 "Session not offloaded yet, fcport = %p.\n", fcport);
 		return;
 	}
 
+	spin_lock_irqsave(&fcport->rport_lock, flags);
+	io_comp_type = io_req->cmd_type;
+	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
 	switch (comp_type) {
 	case FCOE_GOOD_COMPLETION_CQE_TYPE:
 		atomic_inc(&fcport->free_sqes);
-		switch (io_req->cmd_type) {
+		switch (io_comp_type) {
 		case QEDF_SCSI_CMD:
 			qedf_scsi_completion(qedf, cqe, io_req);
 			break;
@@ -2681,8 +2998,10 @@
 	}
 
 	/* Allocate list of PBL pages */
-	qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
-	    QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
+	qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+						QEDF_PAGE_SIZE,
+						&qedf->bdq_pbl_list_dma,
+						GFP_KERNEL);
 	if (!qedf->bdq_pbl_list) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
 		return -ENOMEM;
@@ -2709,7 +3028,7 @@
 {
 	u32 *list;
 	int i;
-	int status = 0, rc;
+	int status;
 	u32 *pbl;
 	dma_addr_t page;
 	int num_pages;
@@ -2721,7 +3040,7 @@
 	 */
 	if (!qedf->num_queues) {
 		QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
-		return 1;
+		return -ENOMEM;
 	}
 
 	/*
@@ -2729,8 +3048,8 @@
 	 * addresses of our queues
 	 */
 	if (!qedf->p_cpuq) {
-		status = 1;
-		goto mem_alloc_failure;
+		QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
+		return -EINVAL;
 	}
 
 	qedf->global_queues = kzalloc((sizeof(struct global_queue *)
@@ -2744,9 +3063,11 @@
 		   "qedf->global_queues=%p.\n", qedf->global_queues);
 
 	/* Allocate DMA coherent buffers for BDQ */
-	rc = qedf_alloc_bdq(qedf);
-	if (rc)
+	status = qedf_alloc_bdq(qedf);
+	if (status) {
+		QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
 		goto mem_alloc_failure;
+	}
 
 	/* Allocate a CQ and an associated PBL for each MSI-X vector */
 	for (i = 0; i < qedf->num_queues; i++) {
@@ -2771,9 +3092,10 @@
 		    ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
 
 		qedf->global_queues[i]->cq =
-		    dma_zalloc_coherent(&qedf->pdev->dev,
-			qedf->global_queues[i]->cq_mem_size,
-			&qedf->global_queues[i]->cq_dma, GFP_KERNEL);
+		    dma_alloc_coherent(&qedf->pdev->dev,
+				       qedf->global_queues[i]->cq_mem_size,
+				       &qedf->global_queues[i]->cq_dma,
+				       GFP_KERNEL);
 
 		if (!qedf->global_queues[i]->cq) {
 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2782,9 +3104,10 @@
 		}
 
 		qedf->global_queues[i]->cq_pbl =
-		    dma_zalloc_coherent(&qedf->pdev->dev,
-			qedf->global_queues[i]->cq_pbl_size,
-			&qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
+		    dma_alloc_coherent(&qedf->pdev->dev,
+				       qedf->global_queues[i]->cq_pbl_size,
+				       &qedf->global_queues[i]->cq_pbl_dma,
+				       GFP_KERNEL);
 
 		if (!qedf->global_queues[i]->cq_pbl) {
 			QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
@@ -2855,12 +3178,12 @@
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
 		   qedf->num_queues);
 
-	qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+	qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
 	    qedf->num_queues * sizeof(struct qedf_glbl_q_params),
-	    &qedf->hw_p_cpuq);
+	    &qedf->hw_p_cpuq, GFP_KERNEL);
 
 	if (!qedf->p_cpuq) {
-		QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+		QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
 		return 1;
 	}
 
@@ -2929,14 +3252,13 @@
 
 	if (qedf->p_cpuq) {
 		size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
-		pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+		dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
 		    qedf->hw_p_cpuq);
 	}
 
 	qedf_free_global_queues(qedf);
 
-	if (qedf->global_queues)
-		kfree(qedf->global_queues);
+	kfree(qedf->global_queues);
 }
 
 /*
@@ -2955,6 +3277,8 @@
 	.id_table = qedf_pci_tbl,
 	.probe = qedf_probe,
 	.remove = qedf_remove,
+	.shutdown = qedf_shutdown,
+	.suspend = qedf_suspend,
 };
 
 static int __qedf_probe(struct pci_dev *pdev, int mode)
@@ -2971,12 +3295,16 @@
 	void *task_start, *task_end;
 	struct qed_slowpath_params slowpath_params;
 	struct qed_probe_params qed_params;
-	u16 tmp;
+	u16 retry_cnt = 10;
 
 	/*
 	 * When doing error recovery we didn't reap the lport so don't try
 	 * to reallocate it.
 	 */
+retry_probe:
+	if (mode == QEDF_MODE_RECOVERY)
+		msleep(2000);
+
 	if (mode != QEDF_MODE_RECOVERY) {
 		lport = libfc_host_alloc(&qedf_host_template,
 		    sizeof(struct qedf_ctx));
@@ -2986,6 +3314,8 @@
 			rc = -ENOMEM;
 			goto err0;
 		}
+
+		fc_disc_init(lport);
 
 		/* Initialize qedf_ctx */
 		qedf = lport_priv(lport);
@@ -3003,6 +3333,8 @@
 		pci_set_drvdata(pdev, qedf);
 		init_completion(&qedf->fipvlan_compl);
 		mutex_init(&qedf->stats_mutex);
+		mutex_init(&qedf->flush_mutex);
+		qedf->flogi_pending = 0;
 
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
 		   "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3036,6 +3368,7 @@
 	INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
 	INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
 	INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
+	INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
 	qedf->fipvlan_retries = qedf_fipvlan_retries;
 	/* Set a default prio in case DCBX doesn't converge */
 	if (qedf_default_prio > -1) {
@@ -3058,6 +3391,13 @@
 	qed_params.is_vf = is_vf;
 	qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
 	if (!qedf->cdev) {
+		if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
+			QEDF_ERR(&qedf->dbg_ctx,
+				"Retry %d initialize hardware\n", retry_cnt);
+			retry_cnt--;
+			goto retry_probe;
+		}
+		QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
 		rc = -ENODEV;
 		goto err1;
 	}
@@ -3068,6 +3408,11 @@
 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
 		goto err1;
 	}
+
+	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+		  "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n",
+		  qedf->dev_info.common.num_hwfns,
+		  qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
 
 	/* queue allocation code should come here
 	 * order should be
@@ -3083,6 +3428,13 @@
 		goto err2;
 	}
 	qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+
+	/* Learn information crucial for qedf to progress */
+	rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
+		goto err2;
+	}
 
 	/* Record BDQ producer doorbell addresses */
 	qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
@@ -3121,8 +3473,10 @@
 
 	/* Setup interrupts */
 	rc = qedf_setup_int(qedf);
-	if (rc)
+	if (rc) {
+		QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
 		goto err3;
+	}
 
 	rc = qed_ops->start(qedf->cdev, &qedf->tasks);
 	if (rc) {
@@ -3145,9 +3499,9 @@
 	    "Writing %d to primary and secondary BDQ doorbell registers.\n",
 	    qedf->bdq_prod_idx);
 	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
-	tmp = readw(qedf->bdq_primary_prod);
+	readw(qedf->bdq_primary_prod);
 	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
-	tmp = readw(qedf->bdq_secondary_prod);
+	readw(qedf->bdq_secondary_prod);
 
 	qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
 
@@ -3182,11 +3536,6 @@
 	sprintf(host_buf, "host_%d", host->host_no);
 	qed_ops->common->set_name(qedf->cdev, host_buf);
 
-
-	/* Set xid max values */
-	qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
-	qedf->max_els_xid = QEDF_MAX_ELS_XID;
-
 	/* Allocate cmd mgr */
 	qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
 	if (!qedf->cmd_mgr) {
@@ -3197,16 +3546,19 @@
 
 	if (mode != QEDF_MODE_RECOVERY) {
 		host->transportt = qedf_fc_transport_template;
-		host->can_queue = QEDF_MAX_ELS_XID;
 		host->max_lun = qedf_max_lun;
 		host->max_cmd_len = QEDF_MAX_CDB_LEN;
+		host->can_queue = FCOE_PARAMS_NUM_TASKS;
 		rc = scsi_add_host(host, &pdev->dev);
-		if (rc)
+		if (rc) {
+			QEDF_WARN(&qedf->dbg_ctx,
+				  "Error adding Scsi_Host rc=0x%x.\n", rc);
 			goto err6;
+		}
 	}
 
 	memset(&params, 0, sizeof(params));
-	params.mtu = 9000;
+	params.mtu = QEDF_LL2_BUF_SIZE;
 	ether_addr_copy(params.ll2_mac_address, qedf->mac);
 
 	/* Start LL2 processing thread */
@@ -3269,6 +3621,7 @@
 		    qedf->lport->host->host_no);
 		qedf->dpc_wq = create_workqueue(host_buf);
 	}
+	INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
 
 	/*
 	 * GRC dump and sysfs parameters are not reaped during the recovery
@@ -3345,11 +3698,6 @@
 err1:
 	scsi_host_put(lport->host);
 err0:
-	if (qedf) {
-		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
-		clear_bit(QEDF_PROBING, &qedf->flags);
-	}
 	return rc;
 }
 
@@ -3387,7 +3735,9 @@
 		fcoe_ctlr_link_down(&qedf->ctlr);
 	else
 		fc_fabric_logoff(qedf->lport);
-	qedf_wait_for_upload(qedf);
+
+	if (qedf_wait_for_upload(qedf) == false)
+		QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
 
 #ifdef CONFIG_DEBUG_FS
 	qedf_dbg_host_exit(&(qedf->dbg_ctx));
@@ -3490,6 +3840,44 @@
 	qedf_capture_grc_dump(qedf);
 }
 
+void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
+{
+	struct qedf_ctx *qedf = dev;
+
+	QEDF_ERR(&(qedf->dbg_ctx),
+			"Hardware error handler scheduled, event=%d.\n",
+			err_type);
+
+	if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+		QEDF_ERR(&(qedf->dbg_ctx),
+				"Already in recovery, not scheduling board disable work.\n");
+		return;
+	}
+
+	switch (err_type) {
+	case QED_HW_ERR_FAN_FAIL:
+		schedule_delayed_work(&qedf->board_disable_work, 0);
+		break;
+	case QED_HW_ERR_MFW_RESP_FAIL:
+	case QED_HW_ERR_HW_ATTN:
+	case QED_HW_ERR_DMAE_FAIL:
+	case QED_HW_ERR_FW_ASSERT:
+		/* Prevent HW attentions from being reasserted */
+		qed_ops->common->attn_clr_enable(qedf->cdev, true);
+		break;
+	case QED_HW_ERR_RAMROD_FAIL:
+		/* Prevent HW attentions from being reasserted */
+		qed_ops->common->attn_clr_enable(qedf->cdev, true);
+
+		if (qedf_enable_recovery)
+			qed_ops->common->recovery_process(qedf->cdev);
+
+		break;
+	default:
+		break;
+	}
+}
+
 /*
  * Protocol TLV handler
  */
@@ -3584,6 +3972,80 @@
 
 	fcoe->scsi_tsk_full_set = true;
 	fcoe->scsi_tsk_full = qedf->task_set_fulls;
+}
+
+/* Deferred work function to perform soft context reset on STAG change */
+void qedf_stag_change_work(struct work_struct *work)
+{
+	struct qedf_ctx *qedf =
+	    container_of(work, struct qedf_ctx, stag_work.work);
+
+	if (!qedf) {
+		QEDF_ERR(NULL, "qedf is NULL");
+		return;
+	}
+	QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+	qedf_ctx_soft_reset(qedf->lport);
+}
+
+static void qedf_shutdown(struct pci_dev *pdev)
+{
+	__qedf_remove(pdev, QEDF_MODE_NORMAL);
+}
+
+static int qedf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct qedf_ctx *qedf;
+
+	if (!pdev) {
+		QEDF_ERR(NULL, "pdev is NULL.\n");
+		return -ENODEV;
+	}
+
+	qedf = pci_get_drvdata(pdev);
+
+	QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
+
+	return -EPERM;
+}
+
+/*
+ * Recovery handler code
+ */
+static void qedf_schedule_recovery_handler(void *dev)
+{
+	struct qedf_ctx *qedf = dev;
+
+	QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
+	schedule_delayed_work(&qedf->recovery_work, 0);
+}
+
+static void qedf_recovery_handler(struct work_struct *work)
+{
+	struct qedf_ctx *qedf =
+	    container_of(work, struct qedf_ctx, recovery_work.work);
+
+	if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
+		return;
+
+	/*
+	 * Call common_ops->recovery_prolog to allow the MFW to quiesce
+	 * any PCI transactions.
+	 */
+	qed_ops->common->recovery_prolog(qedf->cdev);
+
+	QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
+	__qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
+	/*
+	 * Reset link and dcbx to down state since we will not get a link down
+	 * event from the MFW but calling __qedf_remove will essentially be a
+	 * link down event.
+	 */
+	atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+	atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+	__qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
+	clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
+	QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
 }
 
 /* Generic TLV data callback */
@@ -3712,7 +4174,7 @@
 }
 
 MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
 MODULE_AUTHOR("QLogic Corporation");
 MODULE_VERSION(QEDF_VERSION);
 module_init(qedf_init);

--
Gitblit v1.6.2