From 50a212ec906f7524620675f0c57357691c26c81f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 16 Oct 2024 01:20:19 +0000
Subject: [PATCH] 修改GPIO导出默认初始值

---
 kernel/drivers/scsi/qla2xxx/qla_iocb.c | 1391 +++++++++++++++++++++++++++++++++++----------------------
 1 files changed, 840 insertions(+), 551 deletions(-)

diff --git a/kernel/drivers/scsi/qla2xxx/qla_iocb.c b/kernel/drivers/scsi/qla2xxx/qla_iocb.c
index 7e47321..1752a62 100644
--- a/kernel/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/kernel/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * QLogic Fibre Channel HBA Driver
  * Copyright (c)  2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #include "qla_def.h"
 #include "qla_target.h"
@@ -44,7 +43,7 @@
  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  * Continuation Type 0 IOCBs to allocate.
  *
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
  *
  * Returns the number of IOCB entries needed to store @dsds.
  */
@@ -66,7 +65,7 @@
  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  * Continuation Type 1 IOCBs to allocate.
  *
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
  *
  * Returns the number of IOCB entries needed to store @dsds.
  */
@@ -107,7 +106,7 @@
 	cont_pkt = (cont_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
-	*((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
+	put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 
 	return (cont_pkt);
 }
@@ -136,9 +135,8 @@
 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
-	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
-	    cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
-	    cpu_to_le32(CONTINUE_A64_TYPE);
+	put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
+			   CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 
 	return (cont_pkt);
 }
@@ -193,7 +191,7 @@
     uint16_t tot_dsds)
 {
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd32	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
@@ -202,8 +200,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 2 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-	    cpu_to_le32(COMMAND_TYPE);
+	put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -215,8 +212,8 @@
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
 	/* Three DSDs are available in the Command Type 2 IOCB */
-	avail_dsds = 3;
-	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
+	cur_dsd = cmd_pkt->dsd32;
 
 	/* Load data segments */
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
@@ -229,12 +226,11 @@
 			 * Type 0 IOCB.
 			 */
 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
-			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
-			avail_dsds = 7;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd32(&cur_dsd, sg);
 		avail_dsds--;
 	}
 }
@@ -251,7 +247,7 @@
     uint16_t tot_dsds)
 {
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd64	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
@@ -260,7 +256,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
+	put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -272,12 +268,11 @@
 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
 	/* Two DSDs are available in the Command Type 3 IOCB */
-	avail_dsds = 2;
-	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
+	cur_dsd = cmd_pkt->dsd64;
 
 	/* Load data segments */
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -287,16 +282,33 @@
 			 * Type 1 IOCB.
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-			avail_dsds = 5;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
+}
+
+/*
+ * Find the first handle that is not in use, starting from
+ * req->current_outstanding_cmd + 1. The caller must hold the lock that is
+ * associated with @req.
+ */
+uint32_t qla2xxx_get_next_handle(struct req_que *req)
+{
+	uint32_t index, handle = req->current_outstanding_cmd;
+
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			return handle;
+	}
+
+	return 0;
 }
 
 /**
@@ -313,7 +325,6 @@
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	cmd_entry_t	*cmd_pkt;
 	uint16_t	cnt;
@@ -336,7 +347,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS) {
 			return (QLA_FUNCTION_FAILED);
 		}
@@ -346,16 +357,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -372,7 +375,7 @@
 	/* Calculate the number of request entries needed. */
 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 	if (req->cnt < (req_cnt + 2)) {
-		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
+		cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
 		if (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
 		else
@@ -424,8 +427,8 @@
 	sp->flags |= SRB_DMA_VALID;
 
 	/* Set chip new ring index. */
-	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
-	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
+	wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
+	rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
 
 	/* Manage unprocessed RIO/ZIO commands in response queue. */
 	if (vha->flags.process_response_queue &&
@@ -467,22 +470,22 @@
 			req->ring_ptr++;
 
 		/* Set chip new ring index. */
-		if (ha->mqenable || IS_QLA27XX(ha)) {
-			WRT_REG_DWORD(req->req_q_in, req->ring_index);
+		if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+			wrt_reg_dword(req->req_q_in, req->ring_index);
 		} else if (IS_QLA83XX(ha)) {
-			WRT_REG_DWORD(req->req_q_in, req->ring_index);
-			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+			wrt_reg_dword(req->req_q_in, req->ring_index);
+			rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
 		} else if (IS_QLAFX00(ha)) {
-			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
-			RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+			wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
+			rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
 			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 		} else if (IS_FWI2_CAPABLE(ha)) {
-			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
-			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+			wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
+			rd_reg_dword_relaxed(&reg->isp24.req_q_in);
 		} else {
-			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+			wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
 				req->ring_index);
-			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+			rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
 		}
 	}
 }
@@ -490,8 +493,7 @@
 /**
  * qla2x00_marker() - Send a marker IOCB to the firmware.
  * @vha: HA context
- * @req: request queue
- * @rsp: response queue
+ * @qpair: queue pair pointer
  * @loop_id: loop ID
  * @lun: LUN
  * @type: marker modifier
@@ -501,18 +503,16 @@
  * Returns non-zero if a failure occurred, else zero.
  */
 static int
-__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
-			struct rsp_que *rsp, uint16_t loop_id,
-			uint64_t lun, uint8_t type)
+__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+    uint16_t loop_id, uint64_t lun, uint8_t type)
 {
 	mrk_entry_t *mrk;
 	struct mrk_entry_24xx *mrk24 = NULL;
-
+	struct req_que *req = qpair->req;
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-	req = ha->req_q_map[0];
-	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
+	mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 	if (mrk == NULL) {
 		ql_log(ql_log_warn, base_vha, 0x3026,
 		    "Failed to allocate Marker IOCB.\n");
@@ -529,7 +529,7 @@
 			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 			mrk24->vp_index = vha->vp_idx;
-			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
+			mrk24->handle = make_handle(req->id, mrk24->handle);
 		} else {
 			SET_TARGET_ID(ha, mrk->target, loop_id);
 			mrk->lun = cpu_to_le16((uint16_t)lun);
@@ -543,16 +543,15 @@
 }
 
 int
-qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
-		struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
-		uint8_t type)
+qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+    uint16_t loop_id, uint64_t lun, uint8_t type)
 {
 	int ret;
 	unsigned long flags = 0;
 
-	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
-	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+	ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
+	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
 	return (ret);
 }
@@ -567,11 +566,11 @@
 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 {
 	if (ha_locked) {
-		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+		if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 					MK_SYNC_ALL) != QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 	} else {
-		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+		if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 					MK_SYNC_ALL) != QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 	}
@@ -584,26 +583,26 @@
 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 	uint16_t tot_dsds)
 {
-	uint32_t *cur_dsd = NULL;
+	struct dsd64 *cur_dsd = NULL, *next_dsd;
 	scsi_qla_host_t	*vha;
 	struct qla_hw_data *ha;
 	struct scsi_cmnd *cmd;
 	struct	scatterlist *cur_seg;
-	uint32_t *dsd_seg;
-	void *next_dsd;
 	uint8_t avail_dsds;
 	uint8_t first_iocb = 1;
 	uint32_t dsd_list_len;
 	struct dsd_dma *dsd_ptr;
 	struct ct6_dsd *ctx;
+	struct qla_qpair *qpair = sp->qpair;
 
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
+	put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 
 	/* No data transfer */
-	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE ||
+	    tot_dsds == 0) {
 		cmd_pkt->byte_count = cpu_to_le32(0);
 		return 0;
 	}
@@ -614,16 +613,16 @@
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
-		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
-		vha->qla_stats.output_requests++;
+		qpair->counters.output_bytes += scsi_bufflen(cmd);
+		qpair->counters.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
-		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
-		vha->qla_stats.input_requests++;
+		qpair->counters.input_bytes += scsi_bufflen(cmd);
+		qpair->counters.input_requests++;
 	}
 
 	cur_seg = scsi_sglist(cmd);
-	ctx = GET_CMD_CTX_SP(sp);
+	ctx = sp->u.scmd.ct6_ctx;
 
 	while (tot_dsds) {
 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -642,33 +641,28 @@
 
 		if (first_iocb) {
 			first_iocb = 0;
-			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
-			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cmd_pkt->fcp_dsd.address);
+			cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 		} else {
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(dsd_list_len);
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(dsd_list_len);
+			cur_dsd++;
 		}
-		cur_dsd = (uint32_t *)next_dsd;
+		cur_dsd = next_dsd;
 		while (avail_dsds) {
-			dma_addr_t	sle_dma;
-
-			sle_dma = sg_dma_address(cur_seg);
-			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+			append_dsd64(&cur_dsd, cur_seg);
 			cur_seg = sg_next(cur_seg);
 			avail_dsds--;
 		}
 	}
 
 	/* Null termination */
-	*cur_dsd++ =  0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
+	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
 	return 0;
 }
 
@@ -676,7 +670,7 @@
  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
  * for Command Type 6.
  *
- * @dsds: number of data segment decriptors needed
+ * @dsds: number of data segment descriptors needed
  *
  * Returns the number of dsd list needed to store @dsds.
  */
@@ -706,16 +700,17 @@
 	uint16_t tot_dsds, struct req_que *req)
 {
 	uint16_t	avail_dsds;
-	uint32_t	*cur_dsd;
+	struct dsd64	*cur_dsd;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
 	int i;
+	struct qla_qpair *qpair = sp->qpair;
 
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
+	put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
@@ -728,22 +723,21 @@
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
-		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
-		vha->qla_stats.output_requests++;
+		qpair->counters.output_bytes += scsi_bufflen(cmd);
+		qpair->counters.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
-		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
-		vha->qla_stats.input_requests++;
+		qpair->counters.input_bytes += scsi_bufflen(cmd);
+		qpair->counters.input_requests++;
 	}
 
 	/* One DSD is available in the Command Type 3 IOCB */
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
+	cur_dsd = &cmd_pkt->dsd;
 
 	/* Load data segments */
 
 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -753,21 +747,18 @@
 			 * Type 1 IOCB.
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
-			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
-			avail_dsds = 5;
+			cur_dsd = cont_pkt->dsd;
+			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 }
 
 struct fw_dif_context {
-	uint32_t ref_tag;
-	uint16_t app_tag;
+	__le32	ref_tag;
+	__le16	app_tag;
 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
 };
@@ -896,14 +887,14 @@
 
 int
 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
-	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
 	void *next_dsd;
 	uint8_t avail_dsds = 0;
 	uint32_t dsd_list_len;
 	struct dsd_dma *dsd_ptr;
 	struct scatterlist *sg_prot;
-	uint32_t *cur_dsd = dsd;
+	struct dsd64 *cur_dsd = dsd;
 	uint16_t	used_dsds = tot_dsds;
 	uint32_t	prot_int; /* protection interval */
 	uint32_t	partial;
@@ -965,8 +956,7 @@
 
 			if (sp) {
 				list_add_tail(&dsd_ptr->list,
-				    &((struct crc_context *)
-					    sp->u.scmd.ctx)->dsd_list);
+					      &sp->u.scmd.crc_ctx->dsd_list);
 
 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
 			} else {
@@ -977,14 +967,14 @@
 
 
 			/* add new list to cmd iocb or last list */
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = dsd_list_len;
-			cur_dsd = (uint32_t *)next_dsd;
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(dsd_list_len);
+			cur_dsd = next_dsd;
 		}
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sle_dma_len);
+		put_unaligned_le64(sle_dma, &cur_dsd->address);
+		cur_dsd->length = cpu_to_le32(sle_dma_len);
+		cur_dsd++;
 		avail_dsds--;
 
 		if (partial == 0) {
@@ -1003,22 +993,22 @@
 		}
 	}
 	/* Null termination */
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	return 0;
 }
 
 int
-qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
-	uint16_t tot_dsds, struct qla_tc_param *tc)
+qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
+	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 {
 	void *next_dsd;
 	uint8_t avail_dsds = 0;
 	uint32_t dsd_list_len;
 	struct dsd_dma *dsd_ptr;
 	struct scatterlist *sg, *sgl;
-	uint32_t *cur_dsd = dsd;
+	struct dsd64 *cur_dsd = dsd;
 	int	i;
 	uint16_t	used_dsds = tot_dsds;
 	struct scsi_cmnd *cmd;
@@ -1035,8 +1025,6 @@
 
 
 	for_each_sg(sgl, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1065,8 +1053,7 @@
 
 			if (sp) {
 				list_add_tail(&dsd_ptr->list,
-				    &((struct crc_context *)
-					    sp->u.scmd.ctx)->dsd_list);
+					      &sp->u.scmd.crc_ctx->dsd_list);
 
 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
 			} else {
@@ -1076,115 +1063,316 @@
 			}
 
 			/* add new list to cmd iocb or last list */
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = dsd_list_len;
-			cur_dsd = (uint32_t *)next_dsd;
+			put_unaligned_le64(dsd_ptr->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(dsd_list_len);
+			cur_dsd = next_dsd;
 		}
-		sle_dma = sg_dma_address(sg);
-
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 
 	}
 	/* Null termination */
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	return 0;
 }
 
 int
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
-	uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
+	struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 {
-	void *next_dsd;
-	uint8_t avail_dsds = 0;
-	uint32_t dsd_list_len;
-	struct dsd_dma *dsd_ptr;
+	struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
 	struct scatterlist *sg, *sgl;
-	int	i;
-	struct scsi_cmnd *cmd;
-	uint32_t *cur_dsd = dsd;
-	uint16_t used_dsds = tot_dsds;
+	struct crc_context *difctx = NULL;
 	struct scsi_qla_host *vha;
+	uint dsd_list_len;
+	uint avail_dsds = 0;
+	uint used_dsds = tot_dsds;
+	bool dif_local_dma_alloc = false;
+	bool direction_to_device = false;
+	int i;
 
 	if (sp) {
-		cmd = GET_CMD_SP(sp);
+		struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+
 		sgl = scsi_prot_sglist(cmd);
 		vha = sp->vha;
+		difctx = sp->u.scmd.crc_ctx;
+		direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
+		  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
+			__func__, cmd, difctx, sp);
 	} else if (tc) {
 		vha = tc->vha;
 		sgl = tc->prot_sg;
+		difctx = tc->ctx;
+		direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
 	} else {
 		BUG();
 		return 1;
 	}
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe021,
-		"%s: enter\n", __func__);
+	ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
+	    "%s: enter (write=%u)\n", __func__, direction_to_device);
 
-	for_each_sg(sgl, sg, tot_dsds, i) {
-		dma_addr_t	sle_dma;
+	/* if initiator doing write or target doing read */
+	if (direction_to_device) {
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			u64 sle_phys = sg_phys(sg);
 
-		/* Allocate additional continuation packets? */
-		if (avail_dsds == 0) {
-			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
-						QLA_DSDS_PER_IOCB : used_dsds;
-			dsd_list_len = (avail_dsds + 1) * 12;
-			used_dsds -= avail_dsds;
+			/* If SGE addr + len flips bits in upper 32-bits */
+			if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
+				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
+				    "%s: page boundary crossing (phys=%llx len=%x)\n",
+				    __func__, sle_phys, sg->length);
 
-			/* allocate tracking DS */
-			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
-			if (!dsd_ptr)
-				return 1;
-
-			/* allocate new list */
-			dsd_ptr->dsd_addr = next_dsd =
-			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
-				&dsd_ptr->dsd_list_dma);
-
-			if (!next_dsd) {
-				/*
-				 * Need to cleanup only this dsd_ptr, rest
-				 * will be done by sp_free_dma()
-				 */
-				kfree(dsd_ptr);
-				return 1;
+				if (difctx) {
+					ha->dif_bundle_crossed_pages++;
+					dif_local_dma_alloc = true;
+				} else {
+					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
+					    vha, 0xe022,
+					    "%s: difctx pointer is NULL\n",
+					    __func__);
+				}
+				break;
 			}
-
-			if (sp) {
-				list_add_tail(&dsd_ptr->list,
-				    &((struct crc_context *)
-					    sp->u.scmd.ctx)->dsd_list);
-
-				sp->flags |= SRB_CRC_CTX_DSD_VALID;
-			} else {
-				list_add_tail(&dsd_ptr->list,
-				    &(tc->ctx->dsd_list));
-				*tc->ctx_dsd_alloced = 1;
-			}
-
-			/* add new list to cmd iocb or last list */
-			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
-			*cur_dsd++ = dsd_list_len;
-			cur_dsd = (uint32_t *)next_dsd;
 		}
-		sle_dma = sg_dma_address(sg);
+		ha->dif_bundle_writes++;
+	} else {
+		ha->dif_bundle_reads++;
+	}
 
-		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+	if (ql2xdifbundlinginternalbuffers)
+		dif_local_dma_alloc = direction_to_device;
 
-		avail_dsds--;
+	if (dif_local_dma_alloc) {
+		u32 track_difbundl_buf = 0;
+		u32 ldma_sg_len = 0;
+		u8 ldma_needed = 1;
+
+		difctx->no_dif_bundl = 0;
+		difctx->dif_bundl_len = 0;
+
+		/* Track DSD buffers */
+		INIT_LIST_HEAD(&difctx->ldif_dsd_list);
+		/* Track local DMA buffers */
+		INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
+
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			u32 sglen = sg_dma_len(sg);
+
+			ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
+			    "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
+			    __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
+			    difctx->dif_bundl_len, ldma_needed);
+
+			while (sglen) {
+				u32 xfrlen = 0;
+
+				if (ldma_needed) {
+					/*
+					 * Allocate list item to store
+					 * the DMA buffers
+					 */
+					dsd_ptr = kzalloc(sizeof(*dsd_ptr),
+					    GFP_ATOMIC);
+					if (!dsd_ptr) {
+						ql_dbg(ql_dbg_tgt, vha, 0xe024,
+						    "%s: failed alloc dsd_ptr\n",
+						    __func__);
+						return 1;
+					}
+					ha->dif_bundle_kallocs++;
+
+					/* allocate dma buffer */
+					dsd_ptr->dsd_addr = dma_pool_alloc
+						(ha->dif_bundl_pool, GFP_ATOMIC,
+						 &dsd_ptr->dsd_list_dma);
+					if (!dsd_ptr->dsd_addr) {
+						ql_dbg(ql_dbg_tgt, vha, 0xe024,
+						    "%s: failed alloc ->dsd_ptr\n",
+						    __func__);
+						/*
+						 * need to cleanup only this
+						 * dsd_ptr rest will be done
+						 * by sp_free_dma()
+						 */
+						kfree(dsd_ptr);
+						ha->dif_bundle_kallocs--;
+						return 1;
+					}
+					ha->dif_bundle_dma_allocs++;
+					ldma_needed = 0;
+					difctx->no_dif_bundl++;
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->ldif_dma_hndl_list);
+				}
+
+				/* xfrlen is min of dma pool size and sglen */
+				xfrlen = (sglen >
+				   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
+				    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
+				    sglen;
+
+				/* replace with local allocated dma buffer */
+				sg_pcopy_to_buffer(sgl, sg_nents(sgl),
+				    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
+				    difctx->dif_bundl_len);
+				difctx->dif_bundl_len += xfrlen;
+				sglen -= xfrlen;
+				ldma_sg_len += xfrlen;
+				if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
+				    sg_is_last(sg)) {
+					ldma_needed = 1;
+					ldma_sg_len = 0;
+				}
+			}
+		}
+
+		track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
+		    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
+		    difctx->dif_bundl_len, difctx->no_dif_bundl,
+		    track_difbundl_buf);
+
+		if (sp)
+			sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
+		else
+			tc->prot_flags = DIF_BUNDL_DMA_VALID;
+
+		list_for_each_entry_safe(dif_dsd, nxt_dsd,
+		    &difctx->ldif_dma_hndl_list, list) {
+			u32 sglen = (difctx->dif_bundl_len >
+			    DIF_BUNDLING_DMA_POOL_SIZE) ?
+			    DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
+
+			BUG_ON(track_difbundl_buf == 0);
+
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
+				    0xe024,
+				    "%s: adding continuation iocb's\n",
+				    __func__);
+				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+				    QLA_DSDS_PER_IOCB : used_dsds;
+				dsd_list_len = (avail_dsds + 1) * 12;
+				used_dsds -= avail_dsds;
+
+				/* allocate tracking DS */
+				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+				if (!dsd_ptr) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe026,
+					    "%s: failed alloc dsd_ptr\n",
+					    __func__);
+					return 1;
+				}
+				ha->dif_bundle_kallocs++;
+
+				difctx->no_ldif_dsd++;
+				/* allocate new list */
+				dsd_ptr->dsd_addr =
+				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+					&dsd_ptr->dsd_list_dma);
+				if (!dsd_ptr->dsd_addr) {
+					ql_dbg(ql_dbg_tgt, vha, 0xe026,
+					    "%s: failed alloc ->dsd_addr\n",
+					    __func__);
+					/*
+					 * need to cleanup only this dsd_ptr
+					 *  rest will be done by sp_free_dma()
+					 */
+					kfree(dsd_ptr);
+					ha->dif_bundle_kallocs--;
+					return 1;
+				}
+				ha->dif_bundle_dma_allocs++;
+
+				if (sp) {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->ldif_dsd_list);
+					sp->flags |= SRB_CRC_CTX_DSD_VALID;
+				} else {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->ldif_dsd_list);
+					tc->ctx_dsd_alloced = 1;
+				}
+
+				/* add new list to cmd iocb or last list */
+				put_unaligned_le64(dsd_ptr->dsd_list_dma,
+						   &cur_dsd->address);
+				cur_dsd->length = cpu_to_le32(dsd_list_len);
+				cur_dsd = dsd_ptr->dsd_addr;
+			}
+			put_unaligned_le64(dif_dsd->dsd_list_dma,
+					   &cur_dsd->address);
+			cur_dsd->length = cpu_to_le32(sglen);
+			cur_dsd++;
+			avail_dsds--;
+			difctx->dif_bundl_len -= sglen;
+			track_difbundl_buf--;
+		}
+
+		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
+		    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
+			difctx->no_ldif_dsd, difctx->no_dif_bundl);
+	} else {
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
+				    QLA_DSDS_PER_IOCB : used_dsds;
+				dsd_list_len = (avail_dsds + 1) * 12;
+				used_dsds -= avail_dsds;
+
+				/* allocate tracking DS */
+				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+				if (!dsd_ptr) {
+					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
+					    vha, 0xe027,
+					    "%s: failed alloc dsd_dma...\n",
+					    __func__);
+					return 1;
+				}
+
+				/* allocate new list */
+				dsd_ptr->dsd_addr =
+				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
+					&dsd_ptr->dsd_list_dma);
+				if (!dsd_ptr->dsd_addr) {
+					/* need to cleanup only this dsd_ptr */
+					/* rest will be done by sp_free_dma() */
+					kfree(dsd_ptr);
+					return 1;
+				}
+
+				if (sp) {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->dsd_list);
+					sp->flags |= SRB_CRC_CTX_DSD_VALID;
+				} else {
+					list_add_tail(&dsd_ptr->list,
+					    &difctx->dsd_list);
+					tc->ctx_dsd_alloced = 1;
+				}
+
+				/* add new list to cmd iocb or last list */
+				put_unaligned_le64(dsd_ptr->dsd_list_dma,
+						   &cur_dsd->address);
+				cur_dsd->length = cpu_to_le32(dsd_list_len);
+				cur_dsd = dsd_ptr->dsd_addr;
+			}
+			append_dsd64(&cur_dsd, sg);
+			avail_dsds--;
+		}
 	}
 	/* Null termination */
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
-	*cur_dsd++ = 0;
+	cur_dsd->address = 0;
+	cur_dsd->length = 0;
+	cur_dsd++;
 	return 0;
 }
 
@@ -1195,14 +1383,15 @@
  * @sp: SRB command to process
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
- * @tot_prot_dsds:
- * @fw_prot_opts:
+ * @tot_prot_dsds: Total number of segments with protection information
+ * @fw_prot_opts: Protection options to be passed to firmware
  */
-inline int
+static inline int
 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
 {
-	uint32_t		*cur_dsd, *fcp_dl;
+	struct dsd64		*cur_dsd;
+	__be32			*fcp_dl;
 	scsi_qla_host_t		*vha;
 	struct scsi_cmnd	*cmd;
 	uint32_t		total_bytes = 0;
@@ -1220,7 +1409,7 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type CRC_2 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
+	put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
 
 	vha = sp->vha;
 	ha = vha->hw;
@@ -1250,7 +1439,7 @@
 		bundling = 0;
 
 	/* Allocate CRC context from global pool */
-	crc_ctx_pkt = sp->u.scmd.ctx =
+	crc_ctx_pkt = sp->u.scmd.crc_ctx =
 	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
 
 	if (!crc_ctx_pkt)
@@ -1268,9 +1457,8 @@
 	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
 
-	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
-	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
-	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+	put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
+	cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
 
 	/* Determine SCSI command length -- align to 4 byte boundary */
 	if (cmd->cmd_len > 16) {
@@ -1296,10 +1484,8 @@
 	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
-	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
-	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
-	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
-	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
+	put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
+			   &cmd_pkt->fcp_cmnd_dseg_address);
 	fcp_cmnd->task_management = 0;
 	fcp_cmnd->task_attribute = TSK_SIMPLE;
 
@@ -1313,18 +1499,18 @@
 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
 	case SCSI_PROT_READ_INSERT:
 	case SCSI_PROT_WRITE_STRIP:
-	    total_bytes = data_bytes;
-	    data_bytes += dif_bytes;
-	    break;
+		total_bytes = data_bytes;
+		data_bytes += dif_bytes;
+		break;
 
 	case SCSI_PROT_READ_STRIP:
 	case SCSI_PROT_WRITE_INSERT:
 	case SCSI_PROT_READ_PASS:
 	case SCSI_PROT_WRITE_PASS:
-	    total_bytes = data_bytes + dif_bytes;
-	    break;
+		total_bytes = data_bytes + dif_bytes;
+		break;
 	default:
-	    BUG();
+		BUG();
 	}
 
 	if (!qla2x00_hba_err_chk_enabled(sp))
@@ -1341,7 +1527,7 @@
 	}
 
 	if (!bundling) {
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
 	} else {
 		/*
 		 * Configure Bundling if we need to fetch interlaving
@@ -1351,7 +1537,7 @@
 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
 							tot_prot_dsds);
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
 	}
 
 	/* Finish the common fields of CRC pkt */
@@ -1361,7 +1547,7 @@
 	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
 	/* Fibre channel byte count */
 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
-	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
+	fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
 	    additional_fcpcdb_len);
 	*fcp_dl = htonl(total_bytes);
 
@@ -1384,7 +1570,7 @@
 	if (bundling && tot_prot_dsds) {
 		/* Walks dif segments */
 		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
-		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
 				tot_prot_dsds, NULL))
 			goto crc_queuing_error;
@@ -1409,28 +1595,25 @@
 	int		nseg;
 	unsigned long   flags;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	struct cmd_type_7 *cmd_pkt;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
 	struct req_que *req = NULL;
-	struct rsp_que *rsp = NULL;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct scsi_qla_host *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Setup device pointers. */
 	req = vha->req;
-	rsp = req->rsp;
 
 	/* So we know we haven't pci_map'ed anything yet */
 	tot_dsds = 0;
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 		vha->marker_needed = 0;
@@ -1439,16 +1622,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1462,9 +1637,21 @@
 
 	tot_dsds = nseg;
 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+	sp->iores.res_type = RESOURCE_INI;
+	sp->iores.iocb_cnt = req_cnt;
+	if (qla_get_iocbs(sp->qpair, &sp->iores))
+		goto queuing_error;
+
 	if (req->cnt < (req_cnt + 2)) {
-		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (IS_SHADOW_REG_CAPABLE(ha)) {
+			cnt = *req->out_ptr;
+		} else {
+			cnt = rd_reg_dword_relaxed(req->req_q_out);
+			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+				goto queuing_error;
+		}
+
 		if (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
 		else
@@ -1482,7 +1669,7 @@
 	req->cnt -= req_cnt;
 
 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
-	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+	cmd_pkt->handle = make_handle(req->id, handle);
 
 	/* Zero out remaining portion of packet. */
 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1525,7 +1712,7 @@
 	sp->flags |= SRB_DMA_VALID;
 
 	/* Set chip new ring index. */
-	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+	wrt_reg_dword(req->req_q_in, req->ring_index);
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	return QLA_SUCCESS;
@@ -1534,6 +1721,7 @@
 	if (tot_dsds)
 		scsi_dma_unmap(cmd);
 
+	qla_put_iocbs(sp->qpair, &sp->iores);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	return QLA_FUNCTION_FAILED;
@@ -1551,7 +1739,6 @@
 	int			nseg;
 	unsigned long		flags;
 	uint32_t		*clr_ptr;
-	uint32_t		index;
 	uint32_t		handle;
 	uint16_t		cnt;
 	uint16_t		req_cnt = 0;
@@ -1583,7 +1770,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS)
 			return QLA_FUNCTION_FAILED;
 		vha->marker_needed = 0;
@@ -1592,17 +1779,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Compute number of required data segments */
@@ -1657,9 +1835,20 @@
 	/* Total Data and protection sg segment(s) */
 	tot_prot_dsds = nseg;
 	tot_dsds += nseg;
+
+	sp->iores.res_type = RESOURCE_INI;
+	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (qla_get_iocbs(sp->qpair, &sp->iores))
+		goto queuing_error;
+
 	if (req->cnt < (req_cnt + 2)) {
-		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (IS_SHADOW_REG_CAPABLE(ha)) {
+			cnt = *req->out_ptr;
+		} else {
+			cnt = rd_reg_dword_relaxed(req->req_q_out);
+			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+				goto queuing_error;
+		}
 		if (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
 		else
@@ -1680,7 +1869,7 @@
 
 	/* Fill-in common area */
 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
-	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+	cmd_pkt->handle = make_handle(req->id, handle);
 
 	clr_ptr = (uint32_t *)cmd_pkt + 2;
 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -1718,7 +1907,7 @@
 		req->ring_ptr++;
 
 	/* Set chip new ring index. */
-	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+	wrt_reg_dword(req->req_q_in, req->ring_index);
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -1731,7 +1920,9 @@
 	}
 	/* Cleanup will be performed by the caller (queuecommand) */
 
+	qla_put_iocbs(sp->qpair, &sp->iores);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 	return QLA_FUNCTION_FAILED;
 }
 
@@ -1747,14 +1938,12 @@
 	int		nseg;
 	unsigned long   flags;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	struct cmd_type_7 *cmd_pkt;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
 	struct req_que *req = NULL;
-	struct rsp_que *rsp = NULL;
 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
@@ -1764,7 +1953,6 @@
 	spin_lock_irqsave(&qpair->qp_lock, flags);
 
 	/* Setup qpair pointers */
-	rsp = qpair->rsp;
 	req = qpair->req;
 
 	/* So we know we haven't pci_map'ed anything yet */
@@ -1772,7 +1960,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS) {
 			spin_unlock_irqrestore(&qpair->qp_lock, flags);
 			return QLA_FUNCTION_FAILED;
@@ -1780,16 +1968,8 @@
 		vha->marker_needed = 0;
 	}
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1803,9 +1983,21 @@
 
 	tot_dsds = nseg;
 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+	sp->iores.res_type = RESOURCE_INI;
+	sp->iores.iocb_cnt = req_cnt;
+	if (qla_get_iocbs(sp->qpair, &sp->iores))
+		goto queuing_error;
+
 	if (req->cnt < (req_cnt + 2)) {
-		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (IS_SHADOW_REG_CAPABLE(ha)) {
+			cnt = *req->out_ptr;
+		} else {
+			cnt = rd_reg_dword_relaxed(req->req_q_out);
+			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+				goto queuing_error;
+		}
+
 		if (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
 		else
@@ -1823,7 +2015,7 @@
 	req->cnt -= req_cnt;
 
 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
-	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+	cmd_pkt->handle = make_handle(req->id, handle);
 
 	/* Zero out remaining portion of packet. */
 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1866,7 +2058,7 @@
 	sp->flags |= SRB_DMA_VALID;
 
 	/* Set chip new ring index. */
-	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+	wrt_reg_dword(req->req_q_in, req->ring_index);
 
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
 	return QLA_SUCCESS;
@@ -1875,6 +2067,7 @@
 	if (tot_dsds)
 		scsi_dma_unmap(cmd);
 
+	qla_put_iocbs(sp->qpair, &sp->iores);
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
 
 	return QLA_FUNCTION_FAILED;
@@ -1893,7 +2086,6 @@
 	int			nseg;
 	unsigned long		flags;
 	uint32_t		*clr_ptr;
-	uint32_t		index;
 	uint32_t		handle;
 	uint16_t		cnt;
 	uint16_t		req_cnt = 0;
@@ -1940,7 +2132,7 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
 		    QLA_SUCCESS) {
 			spin_unlock_irqrestore(&qpair->qp_lock, flags);
 			return QLA_FUNCTION_FAILED;
@@ -1948,17 +2140,8 @@
 		vha->marker_needed = 0;
 	}
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Compute number of required data segments */
@@ -2013,9 +2196,21 @@
 	/* Total Data and protection sg segment(s) */
 	tot_prot_dsds = nseg;
 	tot_dsds += nseg;
+
+	sp->iores.res_type = RESOURCE_INI;
+	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (qla_get_iocbs(sp->qpair, &sp->iores))
+		goto queuing_error;
+
 	if (req->cnt < (req_cnt + 2)) {
-		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (IS_SHADOW_REG_CAPABLE(ha)) {
+			cnt = *req->out_ptr;
+		} else {
+			cnt = rd_reg_dword_relaxed(req->req_q_out);
+			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+				goto queuing_error;
+		}
+
 		if (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
 		else
@@ -2036,7 +2231,7 @@
 
 	/* Fill-in common area */
 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
-	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+	cmd_pkt->handle = make_handle(req->id, handle);
 
 	clr_ptr = (uint32_t *)cmd_pkt + 2;
 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
@@ -2072,7 +2267,7 @@
 		req->ring_ptr++;
 
 	/* Set chip new ring index. */
-	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+	wrt_reg_dword(req->req_q_in, req->ring_index);
 
 	/* Manage unprocessed RIO/ZIO commands in response queue. */
 	if (vha->flags.process_response_queue &&
@@ -2090,7 +2285,9 @@
 	}
 	/* Cleanup will be performed by the caller (queuecommand) */
 
+	qla_put_iocbs(sp->qpair, &sp->iores);
 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
 	return QLA_FUNCTION_FAILED;
 }
 
@@ -2105,7 +2302,7 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = qpair->req;
 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
-	uint32_t index, handle;
+	uint32_t handle;
 	request_t *pkt;
 	uint16_t cnt, req_cnt;
 
@@ -2122,17 +2319,23 @@
 	if (req->cnt < req_cnt + 2) {
 		if (qpair->use_shadow_reg)
 			cnt = *req->out_ptr;
-		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
-			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
+		    IS_QLA28XX(ha))
+			cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
 		else if (IS_P3P_TYPE(ha))
-			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+			cnt = rd_reg_dword(reg->isp82.req_q_out);
 		else if (IS_FWI2_CAPABLE(ha))
-			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+			cnt = rd_reg_dword(&reg->isp24.req_q_out);
 		else if (IS_QLAFX00(ha))
-			cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
+			cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
 		else
 			cnt = qla2x00_debounce_register(
 			    ISP_REQ_Q_OUT(ha, &reg->isp));
+
+		if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
+			qla_schedule_eeh_work(vha);
+			return NULL;
+		}
 
 		if  (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
@@ -2144,16 +2347,8 @@
 		goto queuing_error;
 
 	if (sp) {
-		/* Check for room in outstanding command list. */
-		handle = req->current_outstanding_cmd;
-		for (index = 1; index < req->num_outstanding_cmds; index++) {
-			handle++;
-			if (handle == req->num_outstanding_cmds)
-				handle = 1;
-			if (!req->outstanding_cmds[handle])
-				break;
-		}
-		if (index == req->num_outstanding_cmds) {
+		handle = qla2xxx_get_next_handle(req);
+		if (handle == 0) {
 			ql_log(ql_log_warn, vha, 0x700b,
 			    "No room on outstanding cmd array.\n");
 			goto queuing_error;
@@ -2170,8 +2365,8 @@
 	pkt = req->ring_ptr;
 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
 	if (IS_QLAFX00(ha)) {
-		WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
-		WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
+		wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
+		wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
 	} else {
 		pkt->entry_count = req_cnt;
 		pkt->handle = handle;
@@ -2208,8 +2403,20 @@
 
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
-	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
-		logio->control_flags |= LCF_NVME_PRLI;
+	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
+		logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
+		if (sp->vha->flags.nvme_first_burst)
+			logio->io_parameter[0] =
+				cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
+		if (sp->vha->flags.nvme2_enabled) {
+			/* Set service parameter BIT_8 for SLER support */
+			logio->io_parameter[0] |=
+				cpu_to_le32(NVME_PRLI_SP_SLER);
+			/* Set service parameter BIT_9 for PI control support */
+			logio->io_parameter[0] |=
+				cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
+		}
+	}
 
 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2224,6 +2431,8 @@
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
 	if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
 		logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
 	} else {
@@ -2267,12 +2476,19 @@
 static void
 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
 {
+	u16 control_flags = LCF_COMMAND_LOGO;
 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
-	logio->control_flags =
-	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
-	if (!sp->fcport->se_sess ||
-	    !sp->fcport->keep_nport_handle)
-		logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+
+	if (sp->fcport->explicit_logout) {
+		control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
+	} else {
+		control_flags |= LCF_IMPL_LOGO;
+
+		if (!sp->fcport->keep_nport_handle)
+			control_flags |= LCF_FREE_NPORT;
+	}
+
+	logio->control_flags = cpu_to_le16(control_flags);
 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
 	logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2289,7 +2505,7 @@
 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
-	    cpu_to_le16(sp->fcport->loop_id):
+	    cpu_to_le16(sp->fcport->loop_id) :
 	    cpu_to_le16(sp->fcport->loop_id << 8);
 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
@@ -2344,7 +2560,7 @@
 
 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
 	tsk->entry_count = 1;
-	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
+	tsk->handle = make_handle(req->id, tsk->handle);
 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
 	tsk->control_flags = cpu_to_le32(flags);
@@ -2360,10 +2576,18 @@
 	}
 }
 
-static void
-qla2x00_els_dcmd_sp_free(void *data)
+void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
 {
-	srb_t *sp = data;
+	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
+	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+	sp->free = qla2x00_sp_free;
+	if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
+		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
+	sp->start_timer = 1;
+}
+
+static void qla2x00_els_dcmd_sp_free(srb_t *sp)
+{
 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
 
 	kfree(sp->fcport);
@@ -2384,19 +2608,36 @@
 	fc_port_t *fcport = sp->fcport;
 	struct scsi_qla_host *vha = sp->vha;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
+	unsigned long flags = 0;
+	int res, h;
 
 	ql_dbg(ql_dbg_io, vha, 0x3069,
 	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
 	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
 	    fcport->d_id.b.al_pa);
 
-	complete(&lio->u.els_logo.comp);
+	/* Abort the exchange */
+	res = qla24xx_async_abort_cmd(sp, false);
+	if (res) {
+		ql_dbg(ql_dbg_io, vha, 0x3070,
+		    "mbx abort_command failed.\n");
+		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+				sp->qpair->req->outstanding_cmds[h] = NULL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+		complete(&lio->u.els_logo.comp);
+	} else {
+		ql_dbg(ql_dbg_io, vha, 0x3071,
+		    "mbx abort_command success.\n");
+	}
 }
 
-static void
-qla2x00_els_dcmd_sp_done(void *ptr, int res)
+static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
 {
-	srb_t *sp = ptr;
 	fc_port_t *fcport = sp->fcport;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct scsi_qla_host *vha = sp->vha;
@@ -2474,6 +2715,10 @@
 
 	memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
 	    sizeof(struct els_logo_payload));
+	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
+		       elsio->u.els_logo.els_logo_pyld,
+		       sizeof(*elsio->u.els_logo.els_logo_pyld));
 
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
@@ -2504,35 +2749,31 @@
 	els_iocb->entry_status = 0;
 	els_iocb->handle = sp->handle;
 	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-	els_iocb->tx_dsd_count = 1;
+	els_iocb->tx_dsd_count = cpu_to_le16(1);
 	els_iocb->vp_index = vha->vp_idx;
 	els_iocb->sof_type = EST_SOFI3;
 	els_iocb->rx_dsd_count = 0;
 	els_iocb->opcode = elsio->u.els_logo.els_cmd;
 
-	els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
-	els_iocb->port_id[1] = sp->fcport->d_id.b.area;
-	els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
-	els_iocb->s_id[0] = vha->d_id.b.al_pa;
-	els_iocb->s_id[1] = vha->d_id.b.area;
-	els_iocb->s_id[2] = vha->d_id.b.domain;
-	els_iocb->control_flags = 0;
+	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
+	/* For SID the byte order is different than DID */
+	els_iocb->s_id[1] = vha->d_id.b.al_pa;
+	els_iocb->s_id[2] = vha->d_id.b.area;
+	els_iocb->s_id[0] = vha->d_id.b.domain;
 
 	if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
+		els_iocb->control_flags = 0;
 		els_iocb->tx_byte_count = els_iocb->tx_len =
-			sizeof(struct els_plogi_payload);
-		els_iocb->tx_address[0] =
-			cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-		els_iocb->tx_address[1] =
-			cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
-
-		els_iocb->rx_dsd_count = 1;
+			cpu_to_le32(sizeof(struct els_plogi_payload));
+		put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
+				   &els_iocb->tx_address);
+		els_iocb->rx_dsd_count = cpu_to_le16(1);
 		els_iocb->rx_byte_count = els_iocb->rx_len =
-			sizeof(struct els_plogi_payload);
-		els_iocb->rx_address[0] =
-			cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
-		els_iocb->rx_address[1] =
-			cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
+			cpu_to_le32(sizeof(struct els_plogi_payload));
+		put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
+				   &els_iocb->rx_address);
 
 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
 		    "PLOGI ELS IOCB:\n");
@@ -2540,17 +2781,21 @@
 		    (uint8_t *)els_iocb,
 		    sizeof(*els_iocb));
 	} else {
-		els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
-		els_iocb->tx_address[0] =
-		    cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
-		els_iocb->tx_address[1] =
-		    cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+		els_iocb->control_flags = cpu_to_le16(1 << 13);
+		els_iocb->tx_byte_count =
+			cpu_to_le32(sizeof(struct els_logo_payload));
+		put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
+				   &els_iocb->tx_address);
 		els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
 
 		els_iocb->rx_byte_count = 0;
-		els_iocb->rx_address[0] = 0;
-		els_iocb->rx_address[1] = 0;
+		els_iocb->rx_address = 0;
 		els_iocb->rx_len = 0;
+		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
+		       "LOGO ELS IOCB:");
+		ql_dump_buffer(ql_log_info, vha, 0x010b,
+			       els_iocb,
+			       sizeof(*els_iocb));
 	}
 
 	sp->vha->qla_stats.control_requests++;
@@ -2562,34 +2807,57 @@
 	srb_t *sp = data;
 	fc_port_t *fcport = sp->fcport;
 	struct scsi_qla_host *vha = sp->vha;
-	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags = 0;
-	int res;
+	int res, h;
 
 	ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
 	    "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
 	    sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
 
 	/* Abort the exchange */
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	res = ha->isp_ops->abort_command(sp);
+	res = qla24xx_async_abort_cmd(sp, false);
 	ql_dbg(ql_dbg_io, vha, 0x3070,
 	    "mbx abort_command %s\n",
 	    (res == QLA_SUCCESS) ? "successful" : "failed");
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-	sp->done(sp, QLA_FUNCTION_TIMEOUT);
+	if (res) {
+		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+				sp->qpair->req->outstanding_cmds[h] = NULL;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+		sp->done(sp, QLA_FUNCTION_TIMEOUT);
+	}
 }
 
-static void
-qla2x00_els_dcmd2_sp_done(void *ptr, int res)
+void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
 {
-	srb_t *sp = ptr;
+	if (els_plogi->els_plogi_pyld)
+		dma_free_coherent(&vha->hw->pdev->dev,
+				  els_plogi->tx_size,
+				  els_plogi->els_plogi_pyld,
+				  els_plogi->els_plogi_pyld_dma);
+
+	if (els_plogi->els_resp_pyld)
+		dma_free_coherent(&vha->hw->pdev->dev,
+				  els_plogi->rx_size,
+				  els_plogi->els_resp_pyld,
+				  els_plogi->els_resp_pyld_dma);
+}
+
+static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
+{
 	fc_port_t *fcport = sp->fcport;
 	struct srb_iocb *lio = &sp->u.iocb_cmd;
 	struct scsi_qla_host *vha = sp->vha;
 	struct event_arg ea;
 	struct qla_work_evt *e;
+	struct fc_port *conflict_fcport;
+	port_id_t cid;	/* conflict Nport id */
+	const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+	u16 lid;
 
 	ql_dbg(ql_dbg_disc, vha, 0x3072,
 	    "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
@@ -2601,31 +2869,109 @@
 	if (sp->flags & SRB_WAKEUP_ON_COMP)
 		complete(&lio->u.els_plogi.comp);
 	else {
-		if (res) {
-			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-		} else {
+		switch (le32_to_cpu(fw_status[0])) {
+		case CS_DATA_UNDERRUN:
+		case CS_COMPLETE:
 			memset(&ea, 0, sizeof(ea));
 			ea.fcport = fcport;
 			ea.rc = res;
-			ea.event = FCME_ELS_PLOGI_DONE;
-			qla2x00_fcport_event_handler(vha, &ea);
+			qla_handle_els_plogi_done(vha, &ea);
+			break;
+
+		case CS_IOCB_ERROR:
+			switch (le32_to_cpu(fw_status[1])) {
+			case LSC_SCODE_PORTID_USED:
+				lid = le32_to_cpu(fw_status[2]) & 0xffff;
+				qlt_find_sess_invalidate_other(vha,
+				    wwn_to_u64(fcport->port_name),
+				    fcport->d_id, lid, &conflict_fcport);
+				if (conflict_fcport) {
+					/*
+					 * Another fcport shares the same
+					 * loop_id & nport id; conflict
+					 * fcport needs to finish cleanup
+					 * before this fcport can proceed
+					 * to login.
+					 */
+					conflict_fcport->conflict = fcport;
+					fcport->login_pause = 1;
+					ql_dbg(ql_dbg_disc, vha, 0x20ed,
+					    "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+					    __func__, __LINE__,
+					    fcport->port_name,
+					    fcport->d_id.b24, lid);
+				} else {
+					ql_dbg(ql_dbg_disc, vha, 0x20ed,
+					    "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
+					    __func__, __LINE__,
+					    fcport->port_name,
+					    fcport->d_id.b24, lid);
+					qla2x00_clear_loop_id(fcport);
+					set_bit(lid, vha->hw->loop_id_map);
+					fcport->loop_id = lid;
+					fcport->keep_nport_handle = 0;
+					qlt_schedule_sess_for_deletion(fcport);
+				}
+				break;
+
+			case LSC_SCODE_NPORT_USED:
+				cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
+					& 0xff;
+				cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
+					& 0xff;
+				cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
+				cid.b.rsvd_1 = 0;
+
+				ql_dbg(ql_dbg_disc, vha, 0x20ec,
+				    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+				    __func__, __LINE__, fcport->port_name,
+				    fcport->loop_id, cid.b24);
+				set_bit(fcport->loop_id,
+				    vha->hw->loop_id_map);
+				fcport->loop_id = FC_NO_LOOP_ID;
+				qla24xx_post_gnl_work(vha, fcport);
+				break;
+
+			case LSC_SCODE_NOXCB:
+				vha->hw->exch_starvation++;
+				if (vha->hw->exch_starvation > 5) {
+					ql_log(ql_log_warn, vha, 0xd046,
+					    "Exchange starvation. Resetting RISC\n");
+					vha->hw->exch_starvation = 0;
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+					qla2xxx_wake_dpc(vha);
+					break;
+				}
+				fallthrough;
+			default:
+				ql_dbg(ql_dbg_disc, vha, 0x20eb,
+				    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
+				    __func__, sp->fcport->port_name,
+				    fw_status[0], fw_status[1], fw_status[2]);
+
+				fcport->flags &= ~FCF_ASYNC_SENT;
+				qlt_schedule_sess_for_deletion(fcport);
+				break;
+			}
+			break;
+
+		default:
+			ql_dbg(ql_dbg_disc, vha, 0x20eb,
+			    "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
+			    __func__, sp->fcport->port_name,
+			    fw_status[0], fw_status[1], fw_status[2]);
+
+			sp->fcport->flags &= ~FCF_ASYNC_SENT;
+			qlt_schedule_sess_for_deletion(fcport);
+			break;
 		}
 
 		e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
 		if (!e) {
 			struct srb_iocb *elsio = &sp->u.iocb_cmd;
 
-			if (elsio->u.els_plogi.els_plogi_pyld)
-				dma_free_coherent(&sp->vha->hw->pdev->dev,
-				    elsio->u.els_plogi.tx_size,
-				    elsio->u.els_plogi.els_plogi_pyld,
-				    elsio->u.els_plogi.els_plogi_pyld_dma);
-
-			if (elsio->u.els_plogi.els_resp_pyld)
-				dma_free_coherent(&sp->vha->hw->pdev->dev,
-				    elsio->u.els_plogi.rx_size,
-				    elsio->u.els_plogi.els_resp_pyld,
-				    elsio->u.els_plogi.els_resp_pyld_dma);
+			qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
 			sp->free(sp);
 			return;
 		}
@@ -2643,27 +2989,27 @@
 	struct qla_hw_data *ha = vha->hw;
 	int rval = QLA_SUCCESS;
 	void	*ptr, *resp_ptr;
-	dma_addr_t ptr_dma;
 
 	/* Alloc SRB structure */
 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
 	if (!sp) {
 		ql_log(ql_log_info, vha, 0x70e6,
 		 "SRB allocation failed\n");
+		fcport->flags &= ~FCF_ASYNC_ACTIVE;
 		return -ENOMEM;
 	}
 
+	fcport->flags |= FCF_ASYNC_SENT;
+	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
 	elsio = &sp->u.iocb_cmd;
 	ql_dbg(ql_dbg_io, vha, 0x3073,
 	    "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
 
-	fcport->flags |= FCF_ASYNC_SENT;
 	sp->type = SRB_ELS_DCMD;
 	sp->name = "ELS_DCMD";
 	sp->fcport = fcport;
 
 	elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
-	init_completion(&elsio->u.els_plogi.comp);
 	if (wait)
 		sp->flags = SRB_WAKEUP_ON_COMP;
 
@@ -2673,9 +3019,8 @@
 	elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
 
 	ptr = elsio->u.els_plogi.els_plogi_pyld =
-	    dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
 		&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
-	ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
 
 	if (!elsio->u.els_plogi.els_plogi_pyld) {
 		rval = QLA_FUNCTION_FAILED;
@@ -2683,7 +3028,7 @@
 	}
 
 	resp_ptr = elsio->u.els_plogi.els_resp_pyld =
-	    dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
 		&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
 
 	if (!elsio->u.els_plogi.els_resp_pyld) {
@@ -2696,8 +3041,7 @@
 	memset(ptr, 0, sizeof(struct els_plogi_payload));
 	memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
 	memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
-	    &ha->plogi_els_payld.data,
-	    sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+	    &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
 
 	elsio->u.els_plogi.els_cmd = els_opcode;
 	elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
@@ -2707,6 +3051,7 @@
 	    (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
 	    sizeof(*elsio->u.els_plogi.els_plogi_pyld));
 
+	init_completion(&elsio->u.els_plogi.comp);
 	rval = qla2x00_start_sp(sp);
 	if (rval != QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
@@ -2727,19 +3072,8 @@
 	}
 
 out:
-	fcport->flags &= ~(FCF_ASYNC_SENT);
-	if (elsio->u.els_plogi.els_plogi_pyld)
-		dma_free_coherent(&sp->vha->hw->pdev->dev,
-		    elsio->u.els_plogi.tx_size,
-		    elsio->u.els_plogi.els_plogi_pyld,
-		    elsio->u.els_plogi.els_plogi_pyld_dma);
-
-	if (elsio->u.els_plogi.els_resp_pyld)
-		dma_free_coherent(&sp->vha->hw->pdev->dev,
-		    elsio->u.els_plogi.rx_size,
-		    elsio->u.els_plogi.els_resp_pyld,
-		    elsio->u.els_plogi.els_resp_pyld_dma);
-
+	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
 	sp->free(sp);
 done:
 	return rval;
@@ -2756,7 +3090,7 @@
         els_iocb->sys_define = 0;
         els_iocb->entry_status = 0;
         els_iocb->handle = sp->handle;
-        els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
 	els_iocb->vp_index = sp->vha->vp_idx;
         els_iocb->sof_type = EST_SOFI3;
@@ -2766,26 +3100,22 @@
 	    sp->type == SRB_ELS_CMD_RPT ?
 	    bsg_request->rqst_data.r_els.els_code :
 	    bsg_request->rqst_data.h_els.command_code;
-        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
-        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
-        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
+	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
+	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
         els_iocb->control_flags = 0;
         els_iocb->rx_byte_count =
             cpu_to_le32(bsg_job->reply_payload.payload_len);
         els_iocb->tx_byte_count =
             cpu_to_le32(bsg_job->request_payload.payload_len);
 
-        els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
-        els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
-            (bsg_job->request_payload.sg_list)));
+	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+			   &els_iocb->tx_address);
         els_iocb->tx_len = cpu_to_le32(sg_dma_len
             (bsg_job->request_payload.sg_list));
 
-        els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
-            (bsg_job->reply_payload.sg_list)));
-        els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
-            (bsg_job->reply_payload.sg_list)));
+	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+			   &els_iocb->rx_address);
         els_iocb->rx_len = cpu_to_le32(sg_dma_len
             (bsg_job->reply_payload.sg_list));
 
@@ -2796,14 +3126,13 @@
 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
 {
 	uint16_t        avail_dsds;
-	uint32_t        *cur_dsd;
+	struct dsd64	*cur_dsd;
 	struct scatterlist *sg;
 	int index;
 	uint16_t tot_dsds;
 	scsi_qla_host_t *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
 	struct bsg_job *bsg_job = sp->u.bsg_job;
-	int loop_iterartion = 0;
 	int entry_count = 1;
 
 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -2823,25 +3152,20 @@
 	ct_iocb->rsp_bytecount =
 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
 
-	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
-	    (bsg_job->request_payload.sg_list)));
-	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
-	    (bsg_job->request_payload.sg_list)));
-	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
+	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
+			   &ct_iocb->req_dsd.address);
+	ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
 
-	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
-	    (bsg_job->reply_payload.sg_list)));
-	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
-	    (bsg_job->reply_payload.sg_list)));
-	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
+	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
+			   &ct_iocb->rsp_dsd.address);
+	ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
 
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
+	cur_dsd = &ct_iocb->rsp_dsd;
 	index = 0;
 	tot_dsds = bsg_job->reply_payload.sg_cnt;
 
 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
-		dma_addr_t       sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets? */
@@ -2852,16 +3176,12 @@
 			       */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
 			    vha->hw->req_q_map[0]);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
-		loop_iterartion++;
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 	ct_iocb->entry_count = entry_count;
@@ -2873,7 +3193,7 @@
 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 {
 	uint16_t        avail_dsds;
-	uint32_t        *cur_dsd;
+	struct dsd64	*cur_dsd;
 	struct scatterlist *sg;
 	int index;
 	uint16_t cmd_dsds, rsp_dsds;
@@ -2902,12 +3222,10 @@
             cpu_to_le32(bsg_job->request_payload.payload_len);
 
 	avail_dsds = 2;
-	cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
+	cur_dsd = ct_iocb->dsd;
 	index = 0;
 
 	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
-		dma_addr_t       sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			/*
@@ -2916,23 +3234,18 @@
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(
 			    vha, ha->req_q_map[0]);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 
 	index = 0;
 
 	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
-		dma_addr_t       sle_dma;
-
 		/* Allocate additional continuation packets? */
 		if (avail_dsds == 0) {
 			/*
@@ -2941,15 +3254,12 @@
 			       */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
 			    ha->req_q_map[0]);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
 
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
         ct_iocb->entry_count = entry_count;
@@ -2968,14 +3278,13 @@
 	unsigned long   flags;
 	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
-	uint32_t        index;
 	uint32_t	handle;
 	uint16_t	cnt;
 	uint16_t	req_cnt;
 	uint16_t	tot_dsds;
 	struct device_reg_82xx __iomem *reg;
 	uint32_t dbval;
-	uint32_t *fcp_dl;
+	__be32 *fcp_dl;
 	uint8_t additional_cdb_len;
 	struct ct6_dsd *ctx;
 	struct scsi_qla_host *vha = sp->vha;
@@ -2996,8 +3305,8 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req,
-			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+		if (qla2x00_marker(vha, ha->base_qpair,
+			0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
 			ql_log(ql_log_warn, vha, 0x300c,
 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
 			return QLA_FUNCTION_FAILED;
@@ -3008,16 +3317,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-	if (index == req->num_outstanding_cmds)
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0)
 		goto queuing_error;
 
 	/* Map the sg table so we have an accurate count of sg entries needed */
@@ -3077,7 +3378,7 @@
 		req_cnt = 1;
 
 		if (req->cnt < (req_cnt + 2)) {
-			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+			cnt = (uint16_t)rd_reg_dword_relaxed(
 				&reg->req_q_out[0]);
 			if (req->ring_index < cnt)
 				req->cnt = cnt - req->ring_index;
@@ -3088,7 +3389,7 @@
 				goto queuing_error;
 		}
 
-		ctx = sp->u.scmd.ctx =
+		ctx = sp->u.scmd.ct6_ctx =
 		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
 		if (!ctx) {
 			ql_log(ql_log_fatal, vha, 0x3010,
@@ -3127,7 +3428,7 @@
 		}
 
 		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
-		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+		cmd_pkt->handle = make_handle(req->id, handle);
 
 		/* Zero out remaining portion of packet. */
 		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -3165,15 +3466,13 @@
 
 		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
 
-		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+		fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
 		    additional_cdb_len);
 		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
 
 		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
-		cmd_pkt->fcp_cmnd_dseg_address[0] =
-		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
-		cmd_pkt->fcp_cmnd_dseg_address[1] =
-		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+		put_unaligned_le64(ctx->fcp_cmnd_dma,
+				   &cmd_pkt->fcp_cmnd_dseg_address);
 
 		sp->flags |= SRB_FCP_CMND_DMA_VALID;
 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
@@ -3185,9 +3484,10 @@
 		cmd_pkt->entry_status = (uint8_t) rsp->id;
 	} else {
 		struct cmd_type_7 *cmd_pkt;
+
 		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
 		if (req->cnt < (req_cnt + 2)) {
-			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+			cnt = (uint16_t)rd_reg_dword_relaxed(
 			    &reg->req_q_out[0]);
 			if (req->ring_index < cnt)
 				req->cnt = cnt - req->ring_index;
@@ -3199,7 +3499,7 @@
 			goto queuing_error;
 
 		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
-		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+		cmd_pkt->handle = make_handle(req->id, handle);
 
 		/* Zero out remaining portion of packet. */
 		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
@@ -3263,10 +3563,10 @@
 	if (ql2xdbwr)
 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
 	else {
-		WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+		wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
 		wmb();
-		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
-			WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
+		while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
+			wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
 			wmb();
 		}
 	}
@@ -3285,9 +3585,9 @@
 	if (tot_dsds)
 		scsi_dma_unmap(cmd);
 
-	if (sp->u.scmd.ctx) {
-		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
-		sp->u.scmd.ctx = NULL;
+	if (sp->u.scmd.crc_ctx) {
+		mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+		sp->u.scmd.crc_ctx = NULL;
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -3304,7 +3604,7 @@
 	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
 	abt_iocb->entry_type = ABORT_IOCB_TYPE;
 	abt_iocb->entry_count = 1;
-	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	abt_iocb->handle = make_handle(req->id, sp->handle);
 	if (sp->fcport) {
 		abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 		abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -3312,10 +3612,10 @@
 		abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
 	}
 	abt_iocb->handle_to_abort =
-	    cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
-				    aio->u.abt.cmd_hndl));
+		make_handle(le16_to_cpu(aio->u.abt.req_que_no),
+			    aio->u.abt.cmd_hndl);
 	abt_iocb->vp_index = vha->vp_idx;
-	abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
+	abt_iocb->req_que_no = aio->u.abt.req_que_no;
 	/* Send the command to the firmware */
 	wmb();
 }
@@ -3330,7 +3630,7 @@
 	sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
 
 	for (i = 0; i < sz; i++)
-		mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+		mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
 }
 
 static void
@@ -3354,7 +3654,7 @@
 	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
 	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
 		nack->u.isp24.flags = ntfy->u.isp24.flags &
-			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+			cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
 	}
 	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
 	nack->u.isp24.status = ntfy->u.isp24.status;
@@ -3372,34 +3672,29 @@
 /*
  * Build NVME LS request
  */
-static int
+static void
 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
 {
 	struct srb_iocb *nvme;
-	int     rval = QLA_SUCCESS;
 
 	nvme = &sp->u.iocb_cmd;
 	cmd_pkt->entry_type = PT_LS4_REQUEST;
 	cmd_pkt->entry_count = 1;
-	cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+	cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
 
 	cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
 
-	cmd_pkt->tx_dseg_count = 1;
-	cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
-	cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
-	cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
-	cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+	cmd_pkt->tx_dseg_count = cpu_to_le16(1);
+	cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
+	cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
+	put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
 
-	cmd_pkt->rx_dseg_count = 1;
-	cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
-	cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
-	cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
-	cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
-
-	return rval;
+	cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+	cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
+	cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
+	put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
 }
 
 static void
@@ -3439,22 +3734,25 @@
 int
 qla2x00_start_sp(srb_t *sp)
 {
-	int rval;
+	int rval = QLA_SUCCESS;
 	scsi_qla_host_t *vha = sp->vha;
 	struct qla_hw_data *ha = vha->hw;
+	struct qla_qpair *qp = sp->qpair;
 	void *pkt;
 	unsigned long flags;
 
-	rval = QLA_FUNCTION_FAILED;
-	spin_lock_irqsave(&ha->hardware_lock, flags);
-	pkt = qla2x00_alloc_iocbs(vha, sp);
+	if (vha->hw->flags.eeh_busy)
+		return -EIO;
+
+	spin_lock_irqsave(qp->qp_lock_ptr, flags);
+	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
 	if (!pkt) {
+		rval = -EAGAIN;
 		ql_log(ql_log_warn, vha, 0x700c,
 		    "qla2x00_alloc_iocbs failed.\n");
 		goto done;
 	}
 
-	rval = QLA_SUCCESS;
 	switch (sp->type) {
 	case SRB_LOGIN_CMD:
 		IS_FWI2_CAPABLE(ha) ?
@@ -3524,10 +3822,13 @@
 		break;
 	}
 
+	if (sp->start_timer)
+		add_timer(&sp->u.iocb_cmd.timer);
+
 	wmb();
-	qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+	qla2x00_start_iocbs(vha, qp->req);
 done:
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
 	return rval;
 }
 
@@ -3536,7 +3837,7 @@
 				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
 {
 	uint16_t avail_dsds;
-	uint32_t *cur_dsd;
+	struct dsd64 *cur_dsd;
 	uint32_t req_data_len = 0;
 	uint32_t rsp_data_len = 0;
 	struct scatterlist *sg;
@@ -3545,8 +3846,7 @@
 	struct bsg_job *bsg_job = sp->u.bsg_job;
 
 	/*Update entry type to indicate bidir command */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-		cpu_to_le32(COMMAND_BIDIRECTIONAL);
+	put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
 
 	/* Set the transfer direction, in this set both flags
 	 * Also set the BD_WRAP_BACK flag, firmware will take care
@@ -3572,13 +3872,12 @@
 	 * are bundled in continuation iocb
 	 */
 	avail_dsds = 1;
-	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+	cur_dsd = &cmd_pkt->fcp_dsd;
 
 	index = 0;
 
 	for_each_sg(bsg_job->request_payload.sg_list, sg,
 				bsg_job->request_payload.sg_cnt, index) {
-		dma_addr_t sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets */
@@ -3587,14 +3886,11 @@
 			 * 5 DSDS
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 	/* For read request DSD will always goes to continuation IOCB
@@ -3604,7 +3900,6 @@
 	 */
 	for_each_sg(bsg_job->reply_payload.sg_list, sg,
 				bsg_job->reply_payload.sg_cnt, index) {
-		dma_addr_t sle_dma;
 		cont_a64_entry_t *cont_pkt;
 
 		/* Allocate additional continuation packets */
@@ -3613,14 +3908,11 @@
 			 * 5 DSDS
 			 */
 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
-			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
+			cur_dsd = cont_pkt->dsd;
 			avail_dsds = 5;
 			entry_count++;
 		}
-		sle_dma = sg_dma_address(sg);
-		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
-		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+		append_dsd64(&cur_dsd, sg);
 		avail_dsds--;
 	}
 	/* This value should be same as number of IOCB required for this cmd */
@@ -3634,7 +3926,6 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags;
 	uint32_t handle;
-	uint32_t index;
 	uint16_t req_cnt;
 	uint16_t cnt;
 	uint32_t *clr_ptr;
@@ -3650,8 +3941,8 @@
 
 	/* Send marker if required */
 	if (vha->marker_needed != 0) {
-		if (qla2x00_marker(vha, req,
-			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+		if (qla2x00_marker(vha, ha->base_qpair,
+			0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
 			return EXT_STATUS_MAILBOX;
 		vha->marker_needed = 0;
 	}
@@ -3659,17 +3950,8 @@
 	/* Acquire ring specific lock */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
-	/* Check for room in outstanding command list. */
-	handle = req->current_outstanding_cmd;
-	for (index = 1; index < req->num_outstanding_cmds; index++) {
-		handle++;
-		if (handle == req->num_outstanding_cmds)
-			handle = 1;
-		if (!req->outstanding_cmds[handle])
-			break;
-	}
-
-	if (index == req->num_outstanding_cmds) {
+	handle = qla2xxx_get_next_handle(req);
+	if (handle == 0) {
 		rval = EXT_STATUS_BUSY;
 		goto queuing_error;
 	}
@@ -3679,8 +3961,14 @@
 
 	/* Check for room on request queue. */
 	if (req->cnt < req_cnt + 2) {
-		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
-		    RD_REG_DWORD_RELAXED(req->req_q_out);
+		if (IS_SHADOW_REG_CAPABLE(ha)) {
+			cnt = *req->out_ptr;
+		} else {
+			cnt = rd_reg_dword_relaxed(req->req_q_out);
+			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+				goto queuing_error;
+		}
+
 		if  (req->ring_index < cnt)
 			req->cnt = cnt - req->ring_index;
 		else
@@ -3693,7 +3981,7 @@
 	}
 
 	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
-	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+	cmd_pkt->handle = make_handle(req->id, handle);
 
 	/* Zero out remaining portion of packet. */
 	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
@@ -3719,5 +4007,6 @@
 	qla2x00_start_iocbs(vha, req);
 queuing_error:
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 	return rval;
 }

--
Gitblit v1.6.2