From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp

---
 kernel/drivers/scsi/smartpqi/smartpqi_init.c | 2638 ++++++++++++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 2,143 insertions(+), 495 deletions(-)

diff --git a/kernel/drivers/scsi/smartpqi/smartpqi_init.c b/kernel/drivers/scsi/smartpqi/smartpqi_init.c
index 98f2d07..fcff35e 100644
--- a/kernel/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/kernel/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,18 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *    driver for Microsemi PQI-based storage controllers
- *    Copyright (c) 2016-2017 Microsemi Corporation
+ *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
+ *    Copyright (c) 2016-2018 Microsemi Corporation
  *    Copyright (c) 2016 PMC-Sierra, Inc.
  *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; version 2 of the License.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- *    NON INFRINGEMENT.  See the GNU General Public License for more details.
- *
- *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *    Questions/Comments/Bugfixes to storagedev@microchip.com
  *
  */
 
@@ -40,11 +33,11 @@
 #define BUILD_TIMESTAMP
 #endif
 
-#define DRIVER_VERSION		"1.1.4-130"
+#define DRIVER_VERSION		"1.2.16-010"
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		1
-#define DRIVER_RELEASE		4
-#define DRIVER_REVISION		130
+#define DRIVER_MINOR		2
+#define DRIVER_RELEASE		16
+#define DRIVER_REVISION		10
 
 #define DRIVER_NAME		"Microsemi PQI Driver (v" \
 				DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -74,6 +67,15 @@
 	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
 	unsigned int cdb_length, struct pqi_queue_group *queue_group,
 	struct pqi_encryption_info *encryption_info, bool raid_bypass);
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
+	u32 bytes_requested);
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, unsigned long timeout_secs);
 
 /* for flags argument to pqi_submit_raid_request_synchronous() */
 #define PQI_SYNC_FLAGS_INTERRUPTABLE	0x1
@@ -113,6 +115,7 @@
 	PQI_EVENT_TYPE_HARDWARE,
 	PQI_EVENT_TYPE_PHYSICAL_DEVICE,
 	PQI_EVENT_TYPE_LOGICAL_DEVICE,
+	PQI_EVENT_TYPE_OFA,
 	PQI_EVENT_TYPE_AIO_STATE_CHANGE,
 	PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
 };
@@ -141,6 +144,18 @@
 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
 	"\t\tSupported: none, reboot, panic\n"
 	"\t\tDefault: none");
+
+static int pqi_expose_ld_first;
+module_param_named(expose_ld_first,
+	pqi_expose_ld_first, int, 0644);
+MODULE_PARM_DESC(expose_ld_first,
+	"Expose logical drives before physical drives.");
+
+static int pqi_hide_vsep;
+module_param_named(hide_vsep,
+	pqi_hide_vsep, int, 0644);
+MODULE_PARM_DESC(hide_vsep,
+	"Hide the virtual SEP for direct attached drives.");
 
 static char *raid_levels[] = {
 	"RAID-0",
@@ -176,16 +191,14 @@
 	scmd->scsi_done(scmd);
 }
 
+static inline void pqi_disable_write_same(struct scsi_device *sdev)
+{
+	sdev->no_write_same = 1;
+}
+
 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
 {
 	return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
-}
-
-static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
-{
-	void *hostdata = shost_priv(shost);
-
-	return *((struct pqi_ctrl_info **)hostdata);
 }
 
 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
@@ -227,6 +240,21 @@
 	sis_write_driver_scratch(ctrl_info, mode);
 }
 
+static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->block_device_reset = true;
+}
+
+static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+	return ctrl_info->block_device_reset;
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+	return ctrl_info->block_requests;
+}
+
 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
 {
 	ctrl_info->block_requests = true;
@@ -239,11 +267,6 @@
 	wake_up_all(&ctrl_info->block_requests_wait);
 	pqi_retry_raid_bypass_requests(ctrl_info);
 	scsi_unblock_requests(ctrl_info->scsi_host);
-}
-
-static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
-{
-	return ctrl_info->block_requests;
 }
 
 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
@@ -275,16 +298,6 @@
 	return remaining_msecs;
 }
 
-static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
-{
-	atomic_inc(&ctrl_info->num_busy_threads);
-}
-
-static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
-{
-	atomic_dec(&ctrl_info->num_busy_threads);
-}
-
 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
 {
 	while (atomic_read(&ctrl_info->num_busy_threads) >
@@ -312,10 +325,48 @@
 	return device->in_reset;
 }
 
+static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->in_ofa = true;
+}
+
+static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->in_ofa = false;
+}
+
+static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
+{
+	return ctrl_info->in_ofa;
+}
+
+static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
+{
+	device->in_remove = true;
+}
+
+static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
+					struct pqi_scsi_dev *device)
+{
+	return device->in_remove && !ctrl_info->in_shutdown;
+}
+
+static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->in_shutdown = true;
+}
+
+static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
+{
+	return ctrl_info->in_shutdown;
+}
+
 static inline void pqi_schedule_rescan_worker_with_delay(
 	struct pqi_ctrl_info *ctrl_info, unsigned long delay)
 {
 	if (pqi_ctrl_offline(ctrl_info))
+		return;
+	if (pqi_ctrl_in_ofa(ctrl_info))
 		return;
 
 	schedule_delayed_work(&ctrl_info->rescan_work, delay);
@@ -326,7 +377,7 @@
 	pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
 }
 
-#define PQI_RESCAN_WORK_DELAY  (10 * HZ)
+#define PQI_RESCAN_WORK_DELAY	(10 * PQI_HZ)
 
 static inline void pqi_schedule_rescan_worker_delayed(
 	struct pqi_ctrl_info *ctrl_info)
@@ -339,6 +390,11 @@
 	cancel_delayed_work_sync(&ctrl_info->rescan_work);
 }
 
+static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
+{
+	cancel_work_sync(&ctrl_info->event_work);
+}
+
 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
 {
 	if (!ctrl_info->heartbeat_counter)
@@ -347,18 +403,39 @@
 	return readl(ctrl_info->heartbeat_counter);
 }
 
+static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
+{
+	if (!ctrl_info->soft_reset_status)
+		return 0;
+
+	return readb(ctrl_info->soft_reset_status);
+}
+
+static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
+	u8 clear)
+{
+	u8 status;
+
+	if (!ctrl_info->soft_reset_status)
+		return;
+
+	status = pqi_read_soft_reset_status(ctrl_info);
+	status &= ~clear;
+	writeb(status, ctrl_info->soft_reset_status);
+}
+
 static int pqi_map_single(struct pci_dev *pci_dev,
 	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
-	size_t buffer_length, int data_direction)
+	size_t buffer_length, enum dma_data_direction data_direction)
 {
 	dma_addr_t bus_address;
 
-	if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+	if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
 		return 0;
 
-	bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+	bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
 		data_direction);
-	if (pci_dma_mapping_error(pci_dev, bus_address))
+	if (dma_mapping_error(&pci_dev->dev, bus_address))
 		return -ENOMEM;
 
 	put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +447,15 @@
 
 static void pqi_pci_unmap(struct pci_dev *pci_dev,
 	struct pqi_sg_descriptor *descriptors, int num_descriptors,
-	int data_direction)
+	enum dma_data_direction data_direction)
 {
 	int i;
 
-	if (data_direction == PCI_DMA_NONE)
+	if (data_direction == DMA_NONE)
 		return;
 
 	for (i = 0; i < num_descriptors; i++)
-		pci_unmap_single(pci_dev,
+		dma_unmap_single(&pci_dev->dev,
 			(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
 			get_unaligned_le32(&descriptors[i].length),
 			data_direction);
@@ -387,10 +464,10 @@
 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_raid_path_request *request, u8 cmd,
 	u8 *scsi3addr, void *buffer, size_t buffer_length,
-	u16 vpd_page, int *pci_direction)
+	u16 vpd_page, enum dma_data_direction *dir)
 {
 	u8 *cdb;
-	int pci_dir;
+	size_t cdb_length = buffer_length;
 
 	memset(request, 0, sizeof(*request));
 
@@ -413,68 +490,79 @@
 			cdb[1] = 0x1;
 			cdb[2] = (u8)vpd_page;
 		}
-		cdb[4] = (u8)buffer_length;
+		cdb[4] = (u8)cdb_length;
 		break;
 	case CISS_REPORT_LOG:
 	case CISS_REPORT_PHYS:
 		request->data_direction = SOP_READ_FLAG;
 		cdb[0] = cmd;
 		if (cmd == CISS_REPORT_PHYS)
-			cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+			cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
 		else
-			cdb[1] = CISS_REPORT_LOG_EXTENDED;
-		put_unaligned_be32(buffer_length, &cdb[6]);
+			cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
+		put_unaligned_be32(cdb_length, &cdb[6]);
 		break;
 	case CISS_GET_RAID_MAP:
 		request->data_direction = SOP_READ_FLAG;
 		cdb[0] = CISS_READ;
 		cdb[1] = CISS_GET_RAID_MAP;
-		put_unaligned_be32(buffer_length, &cdb[6]);
+		put_unaligned_be32(cdb_length, &cdb[6]);
 		break;
 	case SA_FLUSH_CACHE:
 		request->data_direction = SOP_WRITE_FLAG;
 		cdb[0] = BMIC_WRITE;
 		cdb[6] = BMIC_FLUSH_CACHE;
-		put_unaligned_be16(buffer_length, &cdb[7]);
+		put_unaligned_be16(cdb_length, &cdb[7]);
 		break;
+	case BMIC_SENSE_DIAG_OPTIONS:
+		cdb_length = 0;
+		fallthrough;
 	case BMIC_IDENTIFY_CONTROLLER:
 	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+	case BMIC_SENSE_SUBSYSTEM_INFORMATION:
 		request->data_direction = SOP_READ_FLAG;
 		cdb[0] = BMIC_READ;
 		cdb[6] = cmd;
-		put_unaligned_be16(buffer_length, &cdb[7]);
+		put_unaligned_be16(cdb_length, &cdb[7]);
 		break;
+	case BMIC_SET_DIAG_OPTIONS:
+		cdb_length = 0;
+		fallthrough;
 	case BMIC_WRITE_HOST_WELLNESS:
 		request->data_direction = SOP_WRITE_FLAG;
 		cdb[0] = BMIC_WRITE;
 		cdb[6] = cmd;
-		put_unaligned_be16(buffer_length, &cdb[7]);
+		put_unaligned_be16(cdb_length, &cdb[7]);
+		break;
+	case BMIC_CSMI_PASSTHRU:
+		request->data_direction = SOP_BIDIRECTIONAL;
+		cdb[0] = BMIC_WRITE;
+		cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
+		cdb[6] = cmd;
+		put_unaligned_be16(cdb_length, &cdb[7]);
 		break;
 	default:
-		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
-			cmd);
+		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
 		break;
 	}
 
 	switch (request->data_direction) {
 	case SOP_READ_FLAG:
-		pci_dir = PCI_DMA_FROMDEVICE;
+		*dir = DMA_FROM_DEVICE;
 		break;
 	case SOP_WRITE_FLAG:
-		pci_dir = PCI_DMA_TODEVICE;
+		*dir = DMA_TO_DEVICE;
 		break;
 	case SOP_NO_DIRECTION_FLAG:
-		pci_dir = PCI_DMA_NONE;
+		*dir = DMA_NONE;
 		break;
 	default:
-		pci_dir = PCI_DMA_BIDIRECTIONAL;
+		*dir = DMA_BIDIRECTIONAL;
 		break;
 	}
 
-	*pci_direction = pci_dir;
-
 	return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
-		buffer, buffer_length, pci_dir);
+		buffer, buffer_length, *dir);
 }
 
 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -512,63 +600,79 @@
 	atomic_dec(&io_request->refcount);
 }
 
-static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
-	struct bmic_identify_controller *buffer)
+static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+	u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
+	struct pqi_raid_error_info *error_info,	unsigned long timeout_msecs)
 {
 	int rc;
-	int pci_direction;
 	struct pqi_raid_path_request request;
+	enum dma_data_direction dir;
 
 	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
-		sizeof(*buffer), 0, &pci_direction);
+		cmd, scsi3addr, buffer,
+		buffer_length, vpd_page, &dir);
 	if (rc)
 		return rc;
 
 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
+		error_info, timeout_msecs);
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 
 	return rc;
 }
 
-static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
+/* helper functions for pqi_send_scsi_raid_request */
+
+static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
+	u8 cmd, void *buffer, size_t buffer_length)
+{
+	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
+		buffer, buffer_length, 0, NULL, NO_TIMEOUT);
+}
+
+static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
+	u8 cmd, void *buffer, size_t buffer_length,
+	struct pqi_raid_error_info *error_info)
+{
+	return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
+		buffer, buffer_length, 0, error_info, NO_TIMEOUT);
+}
+
+static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
+	struct bmic_identify_controller *buffer)
+{
+	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
+		buffer, sizeof(*buffer));
+}
+
+static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
+	struct bmic_sense_subsystem_info *sense_info)
+{
+	return pqi_send_ctrl_raid_request(ctrl_info,
+		BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
+		sizeof(*sense_info));
+}
+
+static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
 	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
 {
-	int rc;
-	int pci_direction;
-	struct pqi_raid_path_request request;
-
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
-		&pci_direction);
-	if (rc)
-		return rc;
-
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
-	return rc;
+	return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
+		buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
 }
 
 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device,
-	struct bmic_identify_physical_device *buffer,
-	size_t buffer_length)
+	struct bmic_identify_physical_device *buffer, size_t buffer_length)
 {
 	int rc;
-	int pci_direction;
+	enum dma_data_direction dir;
 	u16 bmic_device_index;
 	struct pqi_raid_path_request request;
 
 	rc = pqi_build_raid_path_request(ctrl_info, &request,
 		BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
-		buffer_length, 0, &pci_direction);
+		buffer_length, 0, &dir);
 	if (rc)
 		return rc;
 
@@ -579,8 +683,7 @@
 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 		0, NULL, NO_TIMEOUT);
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 
 	return rc;
 }
@@ -589,8 +692,6 @@
 	enum bmic_flush_cache_shutdown_event shutdown_event)
 {
 	int rc;
-	struct pqi_raid_path_request request;
-	int pci_direction;
 	struct bmic_flush_cache *flush_cache;
 
 	/*
@@ -606,44 +707,54 @@
 
 	flush_cache->shutdown_event = shutdown_event;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
-		sizeof(*flush_cache), 0, &pci_direction);
-	if (rc)
-		goto out;
+	rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
+		sizeof(*flush_cache));
 
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-		0, NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
-out:
 	kfree(flush_cache);
 
 	return rc;
 }
 
-static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
-	void *buffer, size_t buffer_length)
+int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
+	struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
+	struct pqi_raid_error_info *error_info)
+{
+	return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
+		buffer, buffer_length, error_info);
+}
+
+#define PQI_FETCH_PTRAID_DATA		(1 << 31)
+
+static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
-	struct pqi_raid_path_request request;
-	int pci_direction;
+	struct bmic_diag_options *diag;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
-		buffer_length, 0, &pci_direction);
+	diag = kzalloc(sizeof(*diag), GFP_KERNEL);
+	if (!diag)
+		return -ENOMEM;
+
+	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
+		diag, sizeof(*diag));
 	if (rc)
-		return rc;
+		goto out;
 
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
-		0, NULL, NO_TIMEOUT);
+	diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
 
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
+		sizeof(*diag));
+
+out:
+	kfree(diag);
 
 	return rc;
+}
+
+static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
+	void *buffer, size_t buffer_length)
+{
+	return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
+		buffer, buffer_length);
 }
 
 #pragma pack(1)
@@ -758,7 +869,7 @@
 	return rc;
 }
 
-#define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * PQI_HZ)
 
 static void pqi_update_time_worker(struct work_struct *work)
 {
@@ -792,25 +903,11 @@
 	cancel_delayed_work_sync(&ctrl_info->update_time_work);
 }
 
-static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
 	void *buffer, size_t buffer_length)
 {
-	int rc;
-	int pci_direction;
-	struct pqi_raid_path_request request;
-
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
-	if (rc)
-		return rc;
-
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
-
-	return rc;
+	return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
+		buffer_length);
 }
 
 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
@@ -1028,29 +1125,11 @@
 	char *err_msg;
 	u32 raid_map_size;
 	u32 r5or6_blocks_per_row;
-	unsigned int num_phys_disks;
-	unsigned int num_raid_map_entries;
 
 	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
 
 	if (raid_map_size < offsetof(struct raid_map, disk_data)) {
 		err_msg = "RAID map too small";
-		goto bad_raid_map;
-	}
-
-	if (raid_map_size > sizeof(*raid_map)) {
-		err_msg = "RAID map too large";
-		goto bad_raid_map;
-	}
-
-	num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
-		(get_unaligned_le16(&raid_map->data_disks_per_row) +
-		get_unaligned_le16(&raid_map->metadata_disks_per_row));
-	num_raid_map_entries = num_phys_disks *
-		get_unaligned_le16(&raid_map->row_cnt);
-
-	if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
-		err_msg = "invalid number of map entries in RAID map";
 		goto bad_raid_map;
 	}
 
@@ -1092,28 +1171,46 @@
 	struct pqi_scsi_dev *device)
 {
 	int rc;
-	int pci_direction;
-	struct pqi_raid_path_request request;
+	u32 raid_map_size;
 	struct raid_map *raid_map;
 
 	raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
 	if (!raid_map)
 		return -ENOMEM;
 
-	rc = pqi_build_raid_path_request(ctrl_info, &request,
-		CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
-		sizeof(*raid_map), 0, &pci_direction);
-	if (rc)
-		goto error;
-
-	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
-		NULL, NO_TIMEOUT);
-
-	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-		pci_direction);
+	rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
+		device->scsi3addr, raid_map, sizeof(*raid_map),
+		0, NULL, NO_TIMEOUT);
 
 	if (rc)
 		goto error;
+
+	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
+
+	if (raid_map_size > sizeof(*raid_map)) {
+
+		kfree(raid_map);
+
+		raid_map = kmalloc(raid_map_size, GFP_KERNEL);
+		if (!raid_map)
+			return -ENOMEM;
+
+		rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
+			device->scsi3addr, raid_map, raid_map_size,
+			0, NULL, NO_TIMEOUT);
+		if (rc)
+			goto error;
+
+		if (get_unaligned_le32(&raid_map->structure_size)
+			!= raid_map_size) {
+			dev_warn(&ctrl_info->pci_dev->dev,
+				"Requested %d bytes, received %d bytes",
+				raid_map_size,
+				get_unaligned_le32(&raid_map->structure_size));
+			rc = -EINVAL;
+			goto error;
+		}
+	}
 
 	rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
 	if (rc)
@@ -1145,9 +1242,9 @@
 	if (rc)
 		goto out;
 
-#define RAID_BYPASS_STATUS	4
-#define RAID_BYPASS_CONFIGURED	0x1
-#define RAID_BYPASS_ENABLED	0x2
+#define RAID_BYPASS_STATUS		4
+#define RAID_BYPASS_CONFIGURED		0x1
+#define RAID_BYPASS_ENABLED		0x2
 
 	bypass_status = buffer[RAID_BYPASS_STATUS];
 	device->raid_bypass_configured =
@@ -1203,30 +1300,59 @@
 	device->volume_offline = volume_offline;
 }
 
-#define PQI_INQUIRY_PAGE0_RETRIES	3
+static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device,
+	struct bmic_identify_physical_device *id_phys)
+{
+	int rc;
 
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+	memset(id_phys, 0, sizeof(*id_phys));
+
+	rc = pqi_identify_physical_device(ctrl_info, device,
+		id_phys, sizeof(*id_phys));
+	if (rc) {
+		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+		return rc;
+	}
+
+	scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
+	scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
+
+	memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
+	memcpy(device->model, &id_phys->model[8], sizeof(device->model));
+
+	device->box_index = id_phys->box_index;
+	device->phys_box_on_bus = id_phys->phys_box_on_bus;
+	device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
+	device->queue_depth =
+		get_unaligned_le16(&id_phys->current_queue_depth_limit);
+	device->active_path_index = id_phys->active_path_number;
+	device->path_map = id_phys->redundant_path_present_map;
+	memcpy(&device->box,
+		&id_phys->alternate_paths_phys_box_on_port,
+		sizeof(device->box));
+	memcpy(&device->phys_connector,
+		&id_phys->alternate_paths_phys_connector,
+		sizeof(device->phys_connector));
+	device->bay = id_phys->phys_bay_in_box;
+
+	return 0;
+}
+
+static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device)
 {
 	int rc;
 	u8 *buffer;
-	unsigned int retries;
 
 	buffer = kmalloc(64, GFP_KERNEL);
 	if (!buffer)
 		return -ENOMEM;
 
 	/* Send an inquiry to the device to see what it is. */
-	for (retries = 0;;) {
-		rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
-			buffer, 64);
-		if (rc == 0)
-			break;
-		if (pqi_is_logical_device(device) ||
-			rc != PQI_CMD_STATUS_ABORTED ||
-			++retries > PQI_INQUIRY_PAGE0_RETRIES)
-			goto out;
-	}
+	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+	if (rc)
+		goto out;
 
 	scsi_sanitize_inquiry_string(&buffer[8], 8);
 	scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -1235,7 +1361,7 @@
 	memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
 	memcpy(device->model, &buffer[16], sizeof(device->model));
 
-	if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+	if (device->devtype == TYPE_DISK) {
 		if (device->is_external_raid_device) {
 			device->raid_level = SA_RAID_UNKNOWN;
 			device->volume_status = CISS_LV_OK;
@@ -1253,33 +1379,21 @@
 	return rc;
 }
 
-static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device,
 	struct bmic_identify_physical_device *id_phys)
 {
 	int rc;
 
-	memset(id_phys, 0, sizeof(*id_phys));
+	if (device->is_expander_smp_device)
+		return 0;
 
-	rc = pqi_identify_physical_device(ctrl_info, device,
-		id_phys, sizeof(*id_phys));
-	if (rc) {
-		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
-		return;
-	}
+	if (pqi_is_logical_device(device))
+		rc = pqi_get_logical_device_info(ctrl_info, device);
+	else
+		rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
 
-	device->queue_depth =
-		get_unaligned_le16(&id_phys->current_queue_depth_limit);
-	device->device_type = id_phys->device_type;
-	device->active_path_index = id_phys->active_path_number;
-	device->path_map = id_phys->redundant_path_present_map;
-	memcpy(&device->box,
-		&id_phys->alternate_paths_phys_box_on_port,
-		sizeof(device->box));
-	memcpy(&device->phys_connector,
-		&id_phys->alternate_paths_phys_connector,
-		sizeof(device->phys_connector));
-	device->bay = id_phys->phys_bay_in_box;
+	return rc;
 }
 
 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
@@ -1409,9 +1523,23 @@
 	return rc;
 }
 
+#define PQI_PENDING_IO_TIMEOUT_SECS	20
+
 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device)
 {
+	int rc;
+
+	pqi_device_remove_start(device);
+
+	rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
+	if (rc)
+		dev_err(&ctrl_info->pci_dev->dev,
+			"scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+			ctrl_info->scsi_host->host_no, device->bus,
+			device->target, device->lun,
+			atomic_read(&device->scsi_cmds_outstanding));
+
 	if (pqi_is_logical_device(device))
 		scsi_remove_device(device->sdev);
 	else
@@ -1425,10 +1553,8 @@
 {
 	struct pqi_scsi_dev *device;
 
-	list_for_each_entry(device, &ctrl_info->scsi_device_list,
-		scsi_device_list_entry)
-		if (device->bus == bus && device->target == target &&
-			device->lun == lun)
+	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+		if (device->bus == bus && device->target == target && device->lun == lun)
 			return device;
 
 	return NULL;
@@ -1454,15 +1580,12 @@
 };
 
 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
-	struct pqi_scsi_dev *device_to_find,
-	struct pqi_scsi_dev **matching_device)
+	struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
 {
 	struct pqi_scsi_dev *device;
 
-	list_for_each_entry(device, &ctrl_info->scsi_device_list,
-		scsi_device_list_entry) {
-		if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
-			device->scsi3addr)) {
+	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+		if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
 			*matching_device = device;
 			if (pqi_device_equal(device_to_find, device)) {
 				if (device_to_find->volume_offline)
@@ -1474,6 +1597,14 @@
 	}
 
 	return DEVICE_NOT_FOUND;
+}
+
+static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
+{
+	if (device->is_expander_smp_device)
+		return "Enclosure SMP    ";
+
+	return scsi_device_type(device->devtype);
 }
 
 #define PQI_DEV_INFO_BUFFER_LENGTH	128
@@ -1488,48 +1619,48 @@
 		"%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
 
 	if (device->target_lun_valid)
-		count += snprintf(buffer + count,
+		count += scnprintf(buffer + count,
 			PQI_DEV_INFO_BUFFER_LENGTH - count,
 			"%d:%d",
 			device->target,
 			device->lun);
 	else
-		count += snprintf(buffer + count,
+		count += scnprintf(buffer + count,
 			PQI_DEV_INFO_BUFFER_LENGTH - count,
 			"-:-");
 
 	if (pqi_is_logical_device(device))
-		count += snprintf(buffer + count,
+		count += scnprintf(buffer + count,
 			PQI_DEV_INFO_BUFFER_LENGTH - count,
 			" %08x%08x",
 			*((u32 *)&device->scsi3addr),
 			*((u32 *)&device->scsi3addr[4]));
 	else
-		count += snprintf(buffer + count,
+		count += scnprintf(buffer + count,
 			PQI_DEV_INFO_BUFFER_LENGTH - count,
 			" %016llx", device->sas_address);
 
-	count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+	count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
 		" %s %.8s %.16s ",
-		scsi_device_type(device->devtype),
+		pqi_device_type(device),
 		device->vendor,
 		device->model);
 
 	if (pqi_is_logical_device(device)) {
 		if (device->devtype == TYPE_DISK)
-			count += snprintf(buffer + count,
+			count += scnprintf(buffer + count,
 				PQI_DEV_INFO_BUFFER_LENGTH - count,
 				"SSDSmartPathCap%c En%c %-12s",
 				device->raid_bypass_configured ? '+' : '-',
 				device->raid_bypass_enabled ? '+' : '-',
 				pqi_raid_level_to_string(device->raid_level));
 	} else {
-		count += snprintf(buffer + count,
+		count += scnprintf(buffer + count,
 			PQI_DEV_INFO_BUFFER_LENGTH - count,
 			"AIO%c", device->aio_enabled ? '+' : '-');
 		if (device->devtype == TYPE_DISK ||
 			device->devtype == TYPE_ZBC)
-			count += snprintf(buffer + count,
+			count += scnprintf(buffer + count,
 				PQI_DEV_INFO_BUFFER_LENGTH - count,
 				" qd=%-6d", device->queue_depth);
 	}
@@ -1551,11 +1682,18 @@
 		existing_device->target_lun_valid = true;
 	}
 
+	if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
+		existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
+		new_device->volume_status == CISS_LV_OK)
+		existing_device->rescan = true;
+
 	/* By definition, the scsi3addr and wwid fields are already the same. */
 
 	existing_device->is_physical_device = new_device->is_physical_device;
 	existing_device->is_external_raid_device =
 		new_device->is_external_raid_device;
+	existing_device->is_expander_smp_device =
+		new_device->is_expander_smp_device;
 	existing_device->aio_enabled = new_device->aio_enabled;
 	memcpy(existing_device->vendor, new_device->vendor,
 		sizeof(existing_device->vendor));
@@ -1569,6 +1707,10 @@
 	existing_device->active_path_index = new_device->active_path_index;
 	existing_device->path_map = new_device->path_map;
 	existing_device->bay = new_device->bay;
+	existing_device->box_index = new_device->box_index;
+	existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+	existing_device->phy_connected_dev_type =
+		new_device->phy_connected_dev_type;
 	memcpy(existing_device->box, new_device->box,
 		sizeof(existing_device->box));
 	memcpy(existing_device->phys_connector, new_device->phys_connector,
@@ -1580,6 +1722,7 @@
 		new_device->raid_bypass_configured;
 	existing_device->raid_bypass_enabled =
 		new_device->raid_bypass_enabled;
+	existing_device->device_offline = false;
 
 	/* To prevent this from being freed later. */
 	new_device->raid_map = NULL;
@@ -1611,6 +1754,14 @@
 	device->keep_device = false;
 }
 
+static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
+{
+	if (device->is_expander_smp_device)
+		return device->sas_port != NULL;
+
+	return device->sdev != NULL;
+}
+
 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
 {
@@ -1634,15 +1785,14 @@
 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 
 	/* Assume that all devices in the existing list have gone away. */
-	list_for_each_entry(device, &ctrl_info->scsi_device_list,
-		scsi_device_list_entry)
+	list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
 		device->device_gone = true;
 
 	for (i = 0; i < num_new_devices; i++) {
 		device = new_device_list[i];
 
 		find_result = pqi_scsi_find_entry(ctrl_info, device,
-						&matching_device);
+			&matching_device);
 
 		switch (find_result) {
 		case DEVICE_SAME:
@@ -1675,7 +1825,7 @@
 	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
 		scsi_device_list_entry) {
 		if (device->device_gone) {
-			list_del(&device->scsi_device_list_entry);
+			list_del_init(&device->scsi_device_list_entry);
 			list_add_tail(&device->delete_list_entry, &delete_list);
 		}
 	}
@@ -1696,19 +1846,23 @@
 
 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 
+	if (pqi_ctrl_in_ofa(ctrl_info))
+		pqi_ctrl_ofa_done(ctrl_info);
+
 	/* Remove all devices that have gone away. */
-	list_for_each_entry_safe(device, next, &delete_list,
-		delete_list_entry) {
+	list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
 		if (device->volume_offline) {
 			pqi_dev_info(ctrl_info, "offline", device);
 			pqi_show_volume_status(ctrl_info, device);
-		} else {
-			pqi_dev_info(ctrl_info, "removed", device);
 		}
-		if (device->sdev)
-			pqi_remove_device(ctrl_info, device);
 		list_del(&device->delete_list_entry);
-		pqi_free_device(device);
+		if (pqi_is_device_added(device)) {
+			pqi_remove_device(ctrl_info, device);
+		} else {
+			if (!device->volume_offline)
+				pqi_dev_info(ctrl_info, "removed", device);
+			pqi_free_device(device);
+		}
 	}
 
 	/*
@@ -1717,20 +1871,27 @@
 	 */
 	list_for_each_entry(device, &ctrl_info->scsi_device_list,
 		scsi_device_list_entry) {
-		if (device->sdev && device->queue_depth !=
-			device->advertised_queue_depth) {
-			device->advertised_queue_depth = device->queue_depth;
-			scsi_change_queue_depth(device->sdev,
-				device->advertised_queue_depth);
+		if (device->sdev) {
+			if (device->queue_depth !=
+				device->advertised_queue_depth) {
+				device->advertised_queue_depth = device->queue_depth;
+				scsi_change_queue_depth(device->sdev,
+					device->advertised_queue_depth);
+			}
+			if (device->rescan) {
+				scsi_rescan_device(&device->sdev->sdev_gendev);
+				device->rescan = false;
+			}
 		}
 	}
 
 	/* Expose any new devices. */
 	list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
-		if (!device->sdev) {
-			pqi_dev_info(ctrl_info, "added", device);
+		if (!pqi_is_device_added(device)) {
 			rc = pqi_add_device(ctrl_info, device);
-			if (rc) {
+			if (rc == 0) {
+				pqi_dev_info(ctrl_info, "added", device);
+			} else {
 				dev_warn(&ctrl_info->pci_dev->dev,
 					"scsi %d:%d:%d:%d addition failed, device not added\n",
 					ctrl_info->scsi_host->host_no,
@@ -1742,31 +1903,19 @@
 	}
 }
 
-static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
 {
-	bool is_supported = false;
+	/*
+	 * Only support the HBA controller itself as a RAID
+	 * controller.  If it's a RAID controller other than
+	 * the HBA itself (an external RAID controller, for
+	 * example), we don't support it.
+	 */
+	if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
+		!pqi_is_hba_lunid(device->scsi3addr))
+		return false;
 
-	switch (device->devtype) {
-	case TYPE_DISK:
-	case TYPE_ZBC:
-	case TYPE_TAPE:
-	case TYPE_MEDIUM_CHANGER:
-	case TYPE_ENCLOSURE:
-		is_supported = true;
-		break;
-	case TYPE_RAID:
-		/*
-		 * Only support the HBA controller itself as a RAID
-		 * controller.  If it's a RAID controller other than
-		 * the HBA itself (an external RAID controller, for
-		 * example), we don't support it.
-		 */
-		if (pqi_is_hba_lunid(device->scsi3addr))
-			is_supported = true;
-		break;
-	}
-
-	return is_supported;
+	return true;
 }
 
 static inline bool pqi_skip_device(u8 *scsi3addr)
@@ -1776,6 +1925,29 @@
 		return true;
 
 	return false;
+}
+
+static inline void pqi_mask_device(u8 *scsi3addr)
+{
+	scsi3addr[3] |= 0xc0;
+}
+
+static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+{
+	switch (device->device_type) {
+	case SA_DEVICE_TYPE_SAS:
+	case SA_DEVICE_TYPE_EXPANDER_SMP:
+	case SA_DEVICE_TYPE_SES:
+		return true;
+	}
+
+	return false;
+}
+
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+{
+	return !device->is_physical_device ||
+		!pqi_skip_device(device->scsi3addr);
 }
 
 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -1797,6 +1969,8 @@
 	unsigned int num_valid_devices;
 	bool is_physical_device;
 	u8 *scsi3addr;
+	unsigned int physical_index;
+	unsigned int logical_index;
 	static char *out_of_memory_msg =
 		"failed to allocate memory, device discovery stopped";
 
@@ -1832,6 +2006,20 @@
 			rc = -ENOMEM;
 			goto out;
 		}
+
+		if (pqi_hide_vsep) {
+			for (i = num_physicals - 1; i >= 0; i--) {
+				phys_lun_ext_entry =
+						&physdev_list->lun_entries[i];
+				if (CISS_GET_DRIVE_NUMBER(
+					phys_lun_ext_entry->lunid) ==
+						PQI_VSEP_CISS_BTL) {
+					pqi_mask_device(
+						phys_lun_ext_entry->lunid);
+					break;
+				}
+			}
+		}
 	}
 
 	num_new_devices = num_physicals + num_logicals;
@@ -1859,19 +2047,23 @@
 
 	device = NULL;
 	num_valid_devices = 0;
+	physical_index = 0;
+	logical_index = 0;
 
 	for (i = 0; i < num_new_devices; i++) {
 
-		if (i < num_physicals) {
+		if ((!pqi_expose_ld_first && i < num_physicals) ||
+			(pqi_expose_ld_first && i >= num_logicals)) {
 			is_physical_device = true;
-			phys_lun_ext_entry = &physdev_list->lun_entries[i];
+			phys_lun_ext_entry =
+				&physdev_list->lun_entries[physical_index++];
 			log_lun_ext_entry = NULL;
 			scsi3addr = phys_lun_ext_entry->lunid;
 		} else {
 			is_physical_device = false;
 			phys_lun_ext_entry = NULL;
 			log_lun_ext_entry =
-				&logdev_list->lun_entries[i - num_physicals];
+				&logdev_list->lun_entries[logical_index++];
 			scsi3addr = log_lun_ext_entry->lunid;
 		}
 
@@ -1886,12 +2078,20 @@
 
 		memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
 		device->is_physical_device = is_physical_device;
-		if (!is_physical_device)
+		if (is_physical_device) {
+			device->device_type = phys_lun_ext_entry->device_type;
+			if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
+				device->is_expander_smp_device = true;
+		} else {
 			device->is_external_raid_device =
 				pqi_is_external_raid_addr(scsi3addr);
+		}
+
+		if (!pqi_is_supported_device(device))
+			continue;
 
 		/* Gather information about the device. */
-		rc = pqi_get_device_info(ctrl_info, device);
+		rc = pqi_get_device_info(ctrl_info, device, id_phys);
 		if (rc == -ENOMEM) {
 			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
 				out_of_memory_msg);
@@ -1912,39 +2112,24 @@
 			continue;
 		}
 
-		if (!pqi_is_supported_device(device))
-			continue;
-
 		pqi_assign_bus_target_lun(device);
 
 		if (device->is_physical_device) {
 			device->wwid = phys_lun_ext_entry->wwid;
 			if ((phys_lun_ext_entry->device_flags &
-				REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
-				phys_lun_ext_entry->aio_handle)
+				CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
+				phys_lun_ext_entry->aio_handle) {
 				device->aio_enabled = true;
+				device->aio_handle =
+					phys_lun_ext_entry->aio_handle;
+			}
 		} else {
 			memcpy(device->volume_id, log_lun_ext_entry->volume_id,
 				sizeof(device->volume_id));
 		}
 
-		switch (device->devtype) {
-		case TYPE_DISK:
-		case TYPE_ZBC:
-		case TYPE_ENCLOSURE:
-			if (device->is_physical_device) {
-				device->sas_address =
-					get_unaligned_be64(&device->wwid);
-				if (device->devtype == TYPE_DISK ||
-					device->devtype == TYPE_ZBC) {
-					device->aio_handle =
-						phys_lun_ext_entry->aio_handle;
-					pqi_get_physical_disk_info(ctrl_info,
-						device, id_phys);
-				}
-			}
-			break;
-		}
+		if (pqi_is_device_with_sas_address(device))
+			device->sas_address = get_unaligned_be64(&device->wwid);
 
 		new_device_list[num_valid_devices++] = device;
 	}
@@ -1968,52 +2153,35 @@
 	return rc;
 }
 
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
-	unsigned long flags;
-	struct pqi_scsi_dev *device;
-
-	while (1) {
-		spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
-		device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
-			struct pqi_scsi_dev, scsi_device_list_entry);
-		if (device)
-			list_del(&device->scsi_device_list_entry);
-
-		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
-			flags);
-
-		if (!device)
-			break;
-
-		if (device->sdev)
-			pqi_remove_device(ctrl_info, device);
-		pqi_free_device(device);
-	}
-}
-
 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
 {
-	int rc;
+	int rc = 0;
 
 	if (pqi_ctrl_offline(ctrl_info))
 		return -ENXIO;
 
-	mutex_lock(&ctrl_info->scan_mutex);
-
-	rc = pqi_update_scsi_devices(ctrl_info);
-	if (rc)
+	if (!mutex_trylock(&ctrl_info->scan_mutex)) {
 		pqi_schedule_rescan_worker_delayed(ctrl_info);
-
-	mutex_unlock(&ctrl_info->scan_mutex);
+		rc = -EINPROGRESS;
+	} else {
+		rc = pqi_update_scsi_devices(ctrl_info);
+		if (rc)
+			pqi_schedule_rescan_worker_delayed(ctrl_info);
+		mutex_unlock(&ctrl_info->scan_mutex);
+	}
 
 	return rc;
 }
 
 static void pqi_scan_start(struct Scsi_Host *shost)
 {
-	pqi_scan_scsi_devices(shost_to_hba(shost));
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = shost_to_hba(shost);
+	if (pqi_ctrl_in_ofa(ctrl_info))
+		return;
+
+	pqi_scan_scsi_devices(ctrl_info);
 }
 
 /* Returns TRUE if scan is finished. */
@@ -2038,6 +2206,12 @@
 {
 	mutex_lock(&ctrl_info->lun_reset_mutex);
 	mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
+static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
+{
+	mutex_lock(&ctrl_info->ofa_mutex);
+	mutex_unlock(&ctrl_info->ofa_mutex);
 }
 
 static inline void pqi_set_encryption_info(
@@ -2118,7 +2292,7 @@
 	switch (scmd->cmnd[0]) {
 	case WRITE_6:
 		is_write = true;
-		/* fall through */
+		fallthrough;
 	case READ_6:
 		first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
 			(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
@@ -2128,21 +2302,21 @@
 		break;
 	case WRITE_10:
 		is_write = true;
-		/* fall through */
+		fallthrough;
 	case READ_10:
 		first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
 		block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
 		break;
 	case WRITE_12:
 		is_write = true;
-		/* fall through */
+		fallthrough;
 	case READ_12:
 		first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
 		break;
 	case WRITE_16:
 		is_write = true;
-		/* fall through */
+		fallthrough;
 	case READ_16:
 		first_block = get_unaligned_be64(&scmd->cmnd[2]);
 		block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
@@ -2256,7 +2430,6 @@
 		offload_to_mirror =
 			(offload_to_mirror >= layout_map_count - 1) ?
 				0 : offload_to_mirror + 1;
-		WARN_ON(offload_to_mirror >= layout_map_count);
 		device->offload_to_mirror = offload_to_mirror;
 		/*
 		 * Avoid direct use of device->offload_to_mirror within this
@@ -2347,9 +2520,6 @@
 			(map_row * total_disks_per_row) + first_column;
 	}
 
-	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
-		return PQI_RAID_BYPASS_INELIGIBLE;
-
 	aio_handle = raid_map->disk_data[map_index].aio_handle;
 	disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
 		first_row * strip_size +
@@ -2419,7 +2589,7 @@
 	u8 status;
 
 	pqi_registers = ctrl_info->pqi_registers;
-	timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+	timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
 
 	while (1) {
 		signature = readq(&pqi_registers->signature);
@@ -2480,10 +2650,9 @@
 		return;
 
 	device->device_offline = true;
-	scsi_device_set_state(sdev, SDEV_OFFLINE);
 	ctrl_info = shost_to_hba(sdev->host);
 	pqi_schedule_rescan_worker(ctrl_info);
-	dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
+	dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
 		path, ctrl_info->scsi_host->host_no, device->bus,
 		device->target, device->lun);
 }
@@ -2558,10 +2727,25 @@
 			scsi_normalize_sense(error_info->data,
 				sense_data_length, &sshdr) &&
 				sshdr.sense_key == HARDWARE_ERROR &&
-				sshdr.asc == 0x3e &&
-				sshdr.ascq == 0x1) {
-			pqi_take_device_offline(scmd->device, "RAID");
-			host_byte = DID_NO_CONNECT;
+				sshdr.asc == 0x3e) {
+			struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
+			struct pqi_scsi_dev *device = scmd->device->hostdata;
+
+			switch (sshdr.ascq) {
+			case 0x1: /* LOGICAL UNIT FAILURE */
+				if (printk_ratelimit())
+					scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
+						ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+				pqi_take_device_offline(scmd->device, "RAID");
+				host_byte = DID_NO_CONNECT;
+				break;
+
+			default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
+				if (printk_ratelimit())
+					scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
+						sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
+				break;
+			}
 		}
 
 		if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
@@ -2687,6 +2871,9 @@
 	case SOP_TMF_FUNCTION_SUCCEEDED:
 		rc = 0;
 		break;
+	case SOP_TMF_REJECTED:
+		rc = -EAGAIN;
+		break;
 	default:
 		rc = -EIO;
 		break;
@@ -2695,10 +2882,14 @@
 	return rc;
 }
 
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
-	struct pqi_queue_group *queue_group)
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
 {
-	unsigned int num_responses;
+	pqi_take_ctrl_offline(ctrl_info);
+}
+
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
+{
+	int num_responses;
 	pqi_index_t oq_pi;
 	pqi_index_t oq_ci;
 	struct pqi_io_request *io_request;
@@ -2710,6 +2901,13 @@
 
 	while (1) {
 		oq_pi = readl(queue_group->oq_pi);
+		if (oq_pi >= ctrl_info->num_elements_per_oq) {
+			pqi_invalid_response(ctrl_info);
+			dev_err(&ctrl_info->pci_dev->dev,
+				"I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+				oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
+			return -1;
+		}
 		if (oq_pi == oq_ci)
 			break;
 
@@ -2718,18 +2916,36 @@
 			(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
 
 		request_id = get_unaligned_le16(&response->request_id);
-		WARN_ON(request_id >= ctrl_info->max_io_slots);
+		if (request_id >= ctrl_info->max_io_slots) {
+			pqi_invalid_response(ctrl_info);
+			dev_err(&ctrl_info->pci_dev->dev,
+				"request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
+				request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
+			return -1;
+		}
 
 		io_request = &ctrl_info->io_request_pool[request_id];
-		WARN_ON(atomic_read(&io_request->refcount) == 0);
+		if (atomic_read(&io_request->refcount) == 0) {
+			pqi_invalid_response(ctrl_info);
+			dev_err(&ctrl_info->pci_dev->dev,
+				"request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
+				request_id, oq_pi, oq_ci);
+			return -1;
+		}
 
 		switch (response->header.iu_type) {
 		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
 		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
 			if (io_request->scmd)
 				io_request->scmd->result = 0;
-			/* fall through */
+			fallthrough;
 		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
+			break;
+		case PQI_RESPONSE_IU_VENDOR_GENERAL:
+			io_request->status =
+				get_unaligned_le16(
+				&((struct pqi_vendor_general_response *)
+					response)->status);
 			break;
 		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
 			io_request->status =
@@ -2745,24 +2961,22 @@
 			io_request->error_info = ctrl_info->error_buffer +
 				(get_unaligned_le16(&response->error_index) *
 				PQI_ERROR_BUFFER_ELEMENT_LENGTH);
-			pqi_process_io_error(response->header.iu_type,
-				io_request);
+			pqi_process_io_error(response->header.iu_type, io_request);
 			break;
 		default:
+			pqi_invalid_response(ctrl_info);
 			dev_err(&ctrl_info->pci_dev->dev,
-				"unexpected IU type: 0x%x\n",
-				response->header.iu_type);
-			break;
+				"unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
+				response->header.iu_type, oq_pi, oq_ci);
+			return -1;
 		}
 
-		io_request->io_complete_callback(io_request,
-			io_request->context);
+		io_request->io_complete_callback(io_request, io_request->context);
 
 		/*
 		 * Note that the I/O request structure CANNOT BE TOUCHED after
 		 * returning from the I/O completion callback!
 		 */
-
 		oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
 	}
 
@@ -2850,6 +3064,110 @@
 	pqi_send_event_ack(ctrl_info, &request, sizeof(request));
 }
 
+#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS		30
+#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS	1
+
+static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned long timeout;
+	u8 status;
+
+	timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+
+	while (1) {
+		status = pqi_read_soft_reset_status(ctrl_info);
+		if (status & PQI_SOFT_RESET_INITIATE)
+			return RESET_INITIATE_DRIVER;
+
+		if (status & PQI_SOFT_RESET_ABORT)
+			return RESET_ABORT;
+
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"timed out waiting for soft reset status\n");
+			return RESET_TIMEDOUT;
+		}
+
+		if (!sis_is_firmware_running(ctrl_info))
+			return RESET_NORESPONSE;
+
+		ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
+	}
+}
+
+static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
+	enum pqi_soft_reset_status reset_status)
+{
+	int rc;
+
+	switch (reset_status) {
+	case RESET_INITIATE_DRIVER:
+	case RESET_TIMEDOUT:
+		dev_info(&ctrl_info->pci_dev->dev,
+			"resetting controller %u\n", ctrl_info->ctrl_id);
+		sis_soft_reset(ctrl_info);
+		fallthrough;
+	case RESET_INITIATE_FIRMWARE:
+		rc = pqi_ofa_ctrl_restart(ctrl_info);
+		pqi_ofa_free_host_buffer(ctrl_info);
+		dev_info(&ctrl_info->pci_dev->dev,
+			"Online Firmware Activation for controller %u: %s\n",
+			ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
+		break;
+	case RESET_ABORT:
+		pqi_ofa_ctrl_unquiesce(ctrl_info);
+		dev_info(&ctrl_info->pci_dev->dev,
+			"Online Firmware Activation for controller %u: %s\n",
+			ctrl_info->ctrl_id, "ABORTED");
+		break;
+	case RESET_NORESPONSE:
+		pqi_ofa_free_host_buffer(ctrl_info);
+		pqi_take_ctrl_offline(ctrl_info);
+		break;
+	}
+}
+
+static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_event *event)
+{
+	u16 event_id;
+	enum pqi_soft_reset_status status;
+
+	event_id = get_unaligned_le16(&event->event_id);
+
+	mutex_lock(&ctrl_info->ofa_mutex);
+
+	if (event_id == PQI_EVENT_OFA_QUIESCE) {
+		dev_info(&ctrl_info->pci_dev->dev,
+			"Received Online Firmware Activation quiesce event for controller %u\n",
+			ctrl_info->ctrl_id);
+		pqi_ofa_ctrl_quiesce(ctrl_info);
+		pqi_acknowledge_event(ctrl_info, event);
+		if (ctrl_info->soft_reset_handshake_supported) {
+			status = pqi_poll_for_soft_reset_status(ctrl_info);
+			pqi_process_soft_reset(ctrl_info, status);
+		} else {
+			pqi_process_soft_reset(ctrl_info,
+					RESET_INITIATE_FIRMWARE);
+		}
+
+	} else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
+		pqi_acknowledge_event(ctrl_info, event);
+		pqi_ofa_setup_host_buffer(ctrl_info,
+			le32_to_cpu(event->ofa_bytes_requested));
+		pqi_ofa_host_memory_update(ctrl_info);
+	} else if (event_id == PQI_EVENT_OFA_CANCELLED) {
+		pqi_ofa_free_host_buffer(ctrl_info);
+		pqi_acknowledge_event(ctrl_info, event);
+		dev_info(&ctrl_info->pci_dev->dev,
+			"Online Firmware Activation(%u) cancel reason : %u\n",
+			ctrl_info->ctrl_id, event->ofa_cancel_reason);
+	}
+
+	mutex_unlock(&ctrl_info->ofa_mutex);
+}
+
 static void pqi_event_worker(struct work_struct *work)
 {
 	unsigned int i;
@@ -2869,6 +3187,11 @@
 	for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
 		if (event->pending) {
 			event->pending = false;
+			if (event->event_type == PQI_EVENT_TYPE_OFA) {
+				pqi_ctrl_unbusy(ctrl_info);
+				pqi_ofa_process_event(ctrl_info, event);
+				return;
+			}
 			pqi_acknowledge_event(ctrl_info, event);
 		}
 		event++;
@@ -2878,7 +3201,7 @@
 	pqi_ctrl_unbusy(ctrl_info);
 }
 
-#define PQI_HEARTBEAT_TIMER_INTERVAL	(10 * HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL	(10 * PQI_HZ)
 
 static void pqi_heartbeat_timer_handler(struct timer_list *t)
 {
@@ -2947,9 +3270,27 @@
 	return pqi_event_type_to_event_index(event_type) != -1;
 }
 
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+static void pqi_ofa_capture_event_payload(struct pqi_event *event,
+	struct pqi_event_response *response)
 {
-	unsigned int num_events;
+	u16 event_id;
+
+	event_id = get_unaligned_le16(&event->event_id);
+
+	if (event->event_type == PQI_EVENT_TYPE_OFA) {
+		if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
+			event->ofa_bytes_requested =
+			response->data.ofa_memory_allocation.bytes_requested;
+		} else if (event_id == PQI_EVENT_OFA_CANCELLED) {
+			event->ofa_cancel_reason =
+			response->data.ofa_cancelled.reason;
+		}
+	}
+}
+
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+{
+	int num_events;
 	pqi_index_t oq_pi;
 	pqi_index_t oq_ci;
 	struct pqi_event_queue *event_queue;
@@ -2963,25 +3304,31 @@
 
 	while (1) {
 		oq_pi = readl(event_queue->oq_pi);
+		if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
+			pqi_invalid_response(ctrl_info);
+			dev_err(&ctrl_info->pci_dev->dev,
+				"event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+				oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
+			return -1;
+		}
+
 		if (oq_pi == oq_ci)
 			break;
 
 		num_events++;
-		response = event_queue->oq_element_array +
-			(oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+		response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
 
 		event_index =
 			pqi_event_type_to_event_index(response->event_type);
 
-		if (event_index >= 0) {
-			if (response->request_acknowlege) {
-				event = &ctrl_info->events[event_index];
-				event->pending = true;
-				event->event_type = response->event_type;
-				event->event_id = response->event_id;
-				event->additional_event_id =
-					response->additional_event_id;
-			}
+		if (event_index >= 0 && response->request_acknowledge) {
+			event = &ctrl_info->events[event_index];
+			event->pending = true;
+			event->event_type = response->event_type;
+			event->event_id = response->event_id;
+			event->additional_event_id = response->additional_event_id;
+			if (event->event_type == PQI_EVENT_TYPE_OFA)
+				pqi_ofa_capture_event_payload(event, response);
 		}
 
 		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -2999,7 +3346,7 @@
 #define PQI_LEGACY_INTX_MASK	0x1
 
 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
-						bool enable_intx)
+	bool enable_intx)
 {
 	u32 intx_mask;
 	struct pqi_device_registers __iomem *pqi_registers;
@@ -3096,7 +3443,8 @@
 {
 	struct pqi_ctrl_info *ctrl_info;
 	struct pqi_queue_group *queue_group;
-	unsigned int num_responses_handled;
+	int num_io_responses_handled;
+	int num_events_handled;
 
 	queue_group = data;
 	ctrl_info = queue_group->ctrl_info;
@@ -3104,17 +3452,25 @@
 	if (!pqi_is_valid_irq(ctrl_info))
 		return IRQ_NONE;
 
-	num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+	num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+	if (num_io_responses_handled < 0)
+		goto out;
 
-	if (irq == ctrl_info->event_irq)
-		num_responses_handled += pqi_process_event_intr(ctrl_info);
+	if (irq == ctrl_info->event_irq) {
+		num_events_handled = pqi_process_event_intr(ctrl_info);
+		if (num_events_handled < 0)
+			goto out;
+	} else {
+		num_events_handled = 0;
+	}
 
-	if (num_responses_handled)
+	if (num_io_responses_handled + num_events_handled > 0)
 		atomic_inc(&ctrl_info->num_interrupts);
 
 	pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
 	pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
 
+out:
 	return IRQ_HANDLED;
 }
 
@@ -3234,9 +3590,9 @@
 	alloc_length += PQI_EXTRA_SGL_MEMORY;
 
 	ctrl_info->queue_memory_base =
-		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
-			alloc_length,
-			&ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+				   &ctrl_info->queue_memory_base_dma_handle,
+				   GFP_KERNEL);
 
 	if (!ctrl_info->queue_memory_base)
 		return -ENOMEM;
@@ -3373,10 +3729,9 @@
 		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
 
 	ctrl_info->admin_queue_memory_base =
-		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
-			alloc_length,
-			&ctrl_info->admin_queue_memory_base_dma_handle,
-			GFP_KERNEL);
+		dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
+				   &ctrl_info->admin_queue_memory_base_dma_handle,
+				   GFP_KERNEL);
 
 	if (!ctrl_info->admin_queue_memory_base)
 		return -ENOMEM;
@@ -3414,7 +3769,7 @@
 	return 0;
 }
 
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		PQI_HZ
 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS	1
 
 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -3438,7 +3793,7 @@
 		&pqi_registers->admin_oq_pi_addr);
 
 	reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
-		(PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+		(PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
 		(admin_queues->int_msg_num << 16);
 	writel(reg, &pqi_registers->admin_iq_num_elements);
 	writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
@@ -3507,7 +3862,7 @@
 	admin_queues = &ctrl_info->admin_queues;
 	oq_ci = admin_queues->oq_ci_copy;
 
-	timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
+	timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
 
 	while (1) {
 		oq_pi = readl(admin_queues->oq_pi);
@@ -3622,7 +3977,7 @@
 
 	while (1) {
 		if (wait_for_completion_io_timeout(wait,
-			PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
+			PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
 			rc = 0;
 			break;
 		}
@@ -3645,8 +4000,8 @@
 	complete(waiting);
 }
 
-static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
-						*error_info)
+static int pqi_process_raid_io_error_synchronous(
+	struct pqi_raid_error_info *error_info)
 {
 	int rc = -EIO;
 
@@ -3719,6 +4074,8 @@
 		goto out;
 	}
 
+	atomic_inc(&ctrl_info->sync_cmds_outstanding);
+
 	io_request = pqi_alloc_io_request(ctrl_info);
 
 	put_unaligned_le16(io_request->index,
@@ -3765,6 +4122,7 @@
 
 	pqi_free_io_request(io_request);
 
+	atomic_dec(&ctrl_info->sync_cmds_outstanding);
 out:
 	up(&ctrl_info->sync_request_sem);
 
@@ -3833,7 +4191,7 @@
 	rc = pqi_map_single(ctrl_info->pci_dev,
 		&request.data.report_device_capability.sg_descriptor,
 		capability, sizeof(*capability),
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 	if (rc)
 		goto out;
 
@@ -3842,7 +4200,7 @@
 
 	pqi_pci_unmap(ctrl_info->pci_dev,
 		&request.data.report_device_capability.sg_descriptor, 1,
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 
 	if (rc)
 		goto out;
@@ -4169,7 +4527,7 @@
 	rc = pqi_map_single(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors,
 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 	if (rc)
 		goto out;
 
@@ -4178,7 +4536,7 @@
 
 	pqi_pci_unmap(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors, 1,
-		PCI_DMA_FROMDEVICE);
+		DMA_FROM_DEVICE);
 
 	if (rc)
 		goto out;
@@ -4205,7 +4563,7 @@
 	rc = pqi_map_single(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors,
 		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
-		PCI_DMA_TODEVICE);
+		DMA_TO_DEVICE);
 	if (rc)
 		goto out;
 
@@ -4214,7 +4572,7 @@
 
 	pqi_pci_unmap(ctrl_info->pci_dev,
 		request.data.report_event_configuration.sg_descriptors, 1,
-		PCI_DMA_TODEVICE);
+		DMA_TO_DEVICE);
 
 out:
 	kfree(event_config);
@@ -4262,10 +4620,11 @@
 
 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
 {
-	ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
-		ctrl_info->error_buffer_length,
-		&ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
 
+	ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
+				     ctrl_info->error_buffer_length,
+				     &ctrl_info->error_buffer_dma_handle,
+				     GFP_KERNEL);
 	if (!ctrl_info->error_buffer)
 		return -ENOMEM;
 
@@ -4638,10 +4997,10 @@
 	}
 
 	switch (scmd->sc_data_direction) {
-	case DMA_TO_DEVICE:
+	case DMA_FROM_DEVICE:
 		request->data_direction = SOP_READ_FLAG;
 		break;
-	case DMA_FROM_DEVICE:
+	case DMA_TO_DEVICE:
 		request->data_direction = SOP_WRITE_FLAG;
 		break;
 	case DMA_NONE:
@@ -4954,7 +5313,17 @@
 {
 	struct pqi_scsi_dev *device;
 
+	if (!scmd->device) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		return;
+	}
+
 	device = scmd->device->hostdata;
+	if (!device) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		return;
+	}
+
 	atomic_dec(&device->scsi_cmds_outstanding);
 }
 
@@ -4971,16 +5340,24 @@
 	device = scmd->device->hostdata;
 	ctrl_info = shost_to_hba(shost);
 
+	if (!device) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		pqi_scsi_done(scmd);
+		return 0;
+	}
+
 	atomic_inc(&device->scsi_cmds_outstanding);
 
-	if (pqi_ctrl_offline(ctrl_info)) {
+	if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
+								device)) {
 		set_host_byte(scmd, DID_NO_CONNECT);
 		pqi_scsi_done(scmd);
 		return 0;
 	}
 
 	pqi_ctrl_busy(ctrl_info);
-	if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
+	if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
+	    pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 	}
@@ -4997,22 +5374,21 @@
 	if (pqi_is_logical_device(device)) {
 		raid_bypassed = false;
 		if (device->raid_bypass_enabled &&
-				!blk_rq_is_passthrough(scmd->request)) {
+			!blk_rq_is_passthrough(scmd->request)) {
 			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
 				scmd, queue_group);
-			if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
+			if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
 				raid_bypassed = true;
+				atomic_inc(&device->raid_bypass_cnt);
+			}
 		}
 		if (!raid_bypassed)
-			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
-				queue_group);
+			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 	} else {
 		if (device->aio_enabled)
-			rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
-				queue_group);
+			rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 		else
-			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
-				queue_group);
+			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
 	}
 
 out:
@@ -5116,6 +5492,46 @@
 
 				list_del(&io_request->request_list_entry);
 				set_host_byte(scmd, DID_RESET);
+				pqi_free_io_request(io_request);
+				scsi_dma_unmap(scmd);
+				pqi_scsi_done(scmd);
+			}
+
+			spin_unlock_irqrestore(
+				&queue_group->submit_lock[path], flags);
+		}
+	}
+}
+
+static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	unsigned int path;
+	struct pqi_queue_group *queue_group;
+	unsigned long flags;
+	struct pqi_io_request *io_request;
+	struct pqi_io_request *next;
+	struct scsi_cmnd *scmd;
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		queue_group = &ctrl_info->queue_groups[i];
+
+		for (path = 0; path < 2; path++) {
+			spin_lock_irqsave(&queue_group->submit_lock[path],
+						flags);
+
+			list_for_each_entry_safe(io_request, next,
+				&queue_group->request_list[path],
+				request_list_entry) {
+
+				scmd = io_request->scmd;
+				if (!scmd)
+					continue;
+
+				list_del(&io_request->request_list_entry);
+				set_host_byte(scmd, DID_RESET);
+				pqi_free_io_request(io_request);
+				scsi_dma_unmap(scmd);
 				pqi_scsi_done(scmd);
 			}
 
@@ -5126,24 +5542,38 @@
 }
 
 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
-	struct pqi_scsi_dev *device)
+	struct pqi_scsi_dev *device, unsigned long timeout_secs)
 {
+	unsigned long timeout;
+
+	timeout = (timeout_secs * PQI_HZ) + jiffies;
+
 	while (atomic_read(&device->scsi_cmds_outstanding)) {
 		pqi_check_ctrl_health(ctrl_info);
 		if (pqi_ctrl_offline(ctrl_info))
 			return -ENXIO;
+		if (timeout_secs != NO_TIMEOUT) {
+			if (time_after(jiffies, timeout)) {
+				dev_err(&ctrl_info->pci_dev->dev,
+					"timed out waiting for pending IO\n");
+				return -ETIMEDOUT;
+			}
+		}
 		usleep_range(1000, 2000);
 	}
 
 	return 0;
 }
 
-static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+	unsigned long timeout_secs)
 {
 	bool io_pending;
 	unsigned long flags;
+	unsigned long timeout;
 	struct pqi_scsi_dev *device;
 
+	timeout = (timeout_secs * PQI_HZ) + jiffies;
 	while (1) {
 		io_pending = false;
 
@@ -5165,6 +5595,25 @@
 		if (pqi_ctrl_offline(ctrl_info))
 			return -ENXIO;
 
+		if (timeout_secs != NO_TIMEOUT) {
+			if (time_after(jiffies, timeout)) {
+				dev_err(&ctrl_info->pci_dev->dev,
+					"timed out waiting for pending IO\n");
+				return -ETIMEDOUT;
+			}
+		}
+		usleep_range(1000, 2000);
+	}
+
+	return 0;
+}
+
+static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
+{
+	while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
+		pqi_check_ctrl_health(ctrl_info);
+		if (pqi_ctrl_offline(ctrl_info))
+			return -ENXIO;
 		usleep_range(1000, 2000);
 	}
 
@@ -5179,7 +5628,8 @@
 	complete(waiting);
 }
 
-#define PQI_LUN_RESET_TIMEOUT_SECS	10
+#define PQI_LUN_RESET_TIMEOUT_SECS		30
+#define PQI_LUN_RESET_POLL_COMPLETION_SECS	10
 
 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device, struct completion *wait)
@@ -5188,7 +5638,7 @@
 
 	while (1) {
 		if (wait_for_completion_io_timeout(wait,
-			PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
+			PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
 			rc = 0;
 			break;
 		}
@@ -5225,6 +5675,9 @@
 	memcpy(request->lun_number, device->scsi3addr,
 		sizeof(request->lun_number));
 	request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+	if (ctrl_info->tmf_iu_timeout_supported)
+		put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
+					&request->timeout);
 
 	pqi_start_io(ctrl_info,
 		&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
@@ -5241,16 +5694,55 @@
 
 /* Performs a reset at the LUN level. */
 
+#define PQI_LUN_RESET_RETRIES			3
+#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS	10000
+#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS	120
+
+static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	unsigned int retries;
+	unsigned long timeout_secs;
+
+	for (retries = 0;;) {
+		rc = pqi_lun_reset(ctrl_info, device);
+		if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+			break;
+		msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
+	}
+
+	timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
+
+	rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
+
+	return rc == 0 ? SUCCESS : FAILED;
+}
+
 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
 	struct pqi_scsi_dev *device)
 {
 	int rc;
 
-	rc = pqi_lun_reset(ctrl_info, device);
-	if (rc == 0)
-		rc = pqi_device_wait_for_pending_io(ctrl_info, device);
+	mutex_lock(&ctrl_info->lun_reset_mutex);
 
-	return rc == 0 ? SUCCESS : FAILED;
+	pqi_ctrl_block_requests(ctrl_info);
+	pqi_ctrl_wait_until_quiesced(ctrl_info);
+	pqi_fail_io_queued_for_device(ctrl_info, device);
+	rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+	pqi_device_reset_start(device);
+	pqi_ctrl_unblock_requests(ctrl_info);
+
+	if (rc)
+		rc = FAILED;
+	else
+		rc = _pqi_device_reset(ctrl_info, device);
+
+	pqi_device_reset_done(device);
+
+	mutex_unlock(&ctrl_info->lun_reset_mutex);
+
+	return rc;
 }
 
 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
@@ -5269,28 +5761,17 @@
 		shost->host_no, device->bus, device->target, device->lun);
 
 	pqi_check_ctrl_health(ctrl_info);
-	if (pqi_ctrl_offline(ctrl_info)) {
+	if (pqi_ctrl_offline(ctrl_info) ||
+		pqi_device_reset_blocked(ctrl_info)) {
 		rc = FAILED;
 		goto out;
 	}
 
-	mutex_lock(&ctrl_info->lun_reset_mutex);
+	pqi_wait_until_ofa_finished(ctrl_info);
 
-	pqi_ctrl_block_requests(ctrl_info);
-	pqi_ctrl_wait_until_quiesced(ctrl_info);
-	pqi_fail_io_queued_for_device(ctrl_info, device);
-	rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
-	pqi_device_reset_start(device);
-	pqi_ctrl_unblock_requests(ctrl_info);
-
-	if (rc)
-		rc = FAILED;
-	else
-		rc = pqi_device_reset(ctrl_info, device);
-
-	pqi_device_reset_done(device);
-
-	mutex_unlock(&ctrl_info->lun_reset_mutex);
+	atomic_inc(&ctrl_info->sync_cmds_outstanding);
+	rc = pqi_device_reset(ctrl_info, device);
+	atomic_dec(&ctrl_info->sync_cmds_outstanding);
 
 out:
 	dev_err(&ctrl_info->pci_dev->dev,
@@ -5335,6 +5816,10 @@
 			scsi_change_queue_depth(sdev,
 				device->advertised_queue_depth);
 		}
+		if (pqi_is_logical_device(device))
+			pqi_disable_write_same(sdev);
+		else
+			sdev->allow_restart = 1;
 	}
 
 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5346,11 +5831,46 @@
 {
 	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
 
-	return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
+	return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+					ctrl_info->pci_dev, 0);
 }
 
-static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
-	void __user *arg)
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+	struct pqi_scsi_dev *device;
+
+	device = sdev->hostdata;
+	device->devtype = sdev->type;
+
+	return 0;
+}
+
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+	unsigned long flags;
+	struct pqi_scsi_dev *device;
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (device) {
+		sdev->hostdata = NULL;
+		if (!list_empty(&device->scsi_device_list_entry))
+			list_del(&device->scsi_device_list_entry);
+	}
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	if (device) {
+		pqi_dev_info(ctrl_info, "removed", device);
+		pqi_free_device(device);
+	}
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 {
 	struct pci_dev *pci_dev;
 	u32 subsystem_vendor;
@@ -5367,8 +5887,7 @@
 	pciinfo.dev_fn = pci_dev->devfn;
 	subsystem_vendor = pci_dev->subsystem_vendor;
 	subsystem_device = pci_dev->subsystem_device;
-	pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
-		subsystem_vendor;
+	pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
 
 	if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
 		return -EFAULT;
@@ -5545,7 +6064,7 @@
 
 		rc = pqi_map_single(ctrl_info->pci_dev,
 			&request.sg_descriptors[0], kernel_buffer,
-			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+			iocommand.buf_size, DMA_BIDIRECTIONAL);
 		if (rc)
 			goto out;
 
@@ -5554,12 +6073,15 @@
 
 	put_unaligned_le16(iu_length, &request.header.iu_length);
 
+	if (ctrl_info->raid_iu_timeout_supported)
+		put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
+
 	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
 		PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
 
 	if (iocommand.buf_size > 0)
 		pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-			PCI_DMA_BIDIRECTIONAL);
+			DMA_BIDIRECTIONAL);
 
 	memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
 
@@ -5599,12 +6121,16 @@
 	return rc;
 }
 
-static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
+		     void __user *arg)
 {
 	int rc;
 	struct pqi_ctrl_info *ctrl_info;
 
 	ctrl_info = shost_to_hba(sdev->host);
+
+	if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
+		return -EBUSY;
 
 	switch (cmd) {
 	case CCISS_DEREGDISK:
@@ -5629,23 +6155,59 @@
 	return rc;
 }
 
-static ssize_t pqi_version_show(struct device *dev,
+static ssize_t pqi_firmware_version_show(struct device *dev,
 	struct device_attribute *attr, char *buffer)
 {
-	ssize_t count = 0;
 	struct Scsi_Host *shost;
 	struct pqi_ctrl_info *ctrl_info;
 
 	shost = class_to_shost(dev);
 	ctrl_info = shost_to_hba(shost);
 
-	count += snprintf(buffer + count, PAGE_SIZE - count,
-		"  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+}
 
-	count += snprintf(buffer + count, PAGE_SIZE - count,
-		"firmware: %s\n", ctrl_info->firmware_version);
+static ssize_t pqi_driver_version_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	return snprintf(buffer, PAGE_SIZE, "%s\n",
+			DRIVER_VERSION BUILD_TIMESTAMP);
+}
 
-	return count;
+static ssize_t pqi_serial_number_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+}
+
+static ssize_t pqi_model_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+}
+
+static ssize_t pqi_vendor_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
 }
 
 static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -5666,14 +6228,14 @@
 
 	for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
 		if (pqi_lockup_actions[i].action == pqi_lockup_action)
-			count += snprintf(buffer + count, PAGE_SIZE - count,
+			count += scnprintf(buffer + count, PAGE_SIZE - count,
 				"[%s] ", pqi_lockup_actions[i].name);
 		else
-			count += snprintf(buffer + count, PAGE_SIZE - count,
+			count += scnprintf(buffer + count, PAGE_SIZE - count,
 				"%s ", pqi_lockup_actions[i].name);
 	}
 
-	count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+	count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
 
 	return count;
 }
@@ -5698,17 +6260,174 @@
 	return -EINVAL;
 }
 
-static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
+static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
+static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
+static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
+static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
+static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
 static DEVICE_ATTR(lockup_action, 0644,
 	pqi_lockup_action_show, pqi_lockup_action_store);
 
 static struct device_attribute *pqi_shost_attrs[] = {
-	&dev_attr_version,
+	&dev_attr_driver_version,
+	&dev_attr_firmware_version,
+	&dev_attr_model,
+	&dev_attr_serial_number,
+	&dev_attr_vendor,
 	&dev_attr_rescan,
 	&dev_attr_lockup_action,
 	NULL
 };
+
+static ssize_t pqi_unique_id_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	u8 unique_id[16];
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+		return -ENODEV;
+	}
+
+	if (device->is_physical_device) {
+		memset(unique_id, 0, 8);
+		memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
+	} else {
+		memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
+	}
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return snprintf(buffer, PAGE_SIZE,
+		"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+		unique_id[0], unique_id[1], unique_id[2], unique_id[3],
+		unique_id[4], unique_id[5], unique_id[6], unique_id[7],
+		unique_id[8], unique_id[9], unique_id[10], unique_id[11],
+		unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
+}
+
+static ssize_t pqi_lunid_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	u8 lunid[8];
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+		return -ENODEV;
+	}
+
+	memcpy(lunid, device->scsi3addr, sizeof(lunid));
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
+}
+
+#define MAX_PATHS	8
+
+static ssize_t pqi_path_info_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	int i;
+	int output_len = 0;
+	u8 box;
+	u8 bay;
+	u8 path_map_index;
+	char *active;
+	u8 phys_connector[2];
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+		return -ENODEV;
+	}
+
+	bay = device->bay;
+	for (i = 0; i < MAX_PATHS; i++) {
+		path_map_index = 1 << i;
+		if (i == device->active_path_index)
+			active = "Active";
+		else if (device->path_map & path_map_index)
+			active = "Inactive";
+		else
+			continue;
+
+		output_len += scnprintf(buf + output_len,
+					PAGE_SIZE - output_len,
+					"[%d:%d:%d:%d] %20.20s ",
+					ctrl_info->scsi_host->host_no,
+					device->bus, device->target,
+					device->lun,
+					scsi_device_type(device->devtype));
+
+		if (device->devtype == TYPE_RAID ||
+			pqi_is_logical_device(device))
+			goto end_buffer;
+
+		memcpy(&phys_connector, &device->phys_connector[i],
+			sizeof(phys_connector));
+		if (phys_connector[0] < '0')
+			phys_connector[0] = '0';
+		if (phys_connector[1] < '0')
+			phys_connector[1] = '0';
+
+		output_len += scnprintf(buf + output_len,
+					PAGE_SIZE - output_len,
+					"PORT: %.2s ", phys_connector);
+
+		box = device->box[i];
+		if (box != 0 && box != 0xFF)
+			output_len += scnprintf(buf + output_len,
+						PAGE_SIZE - output_len,
+						"BOX: %hhu ", box);
+
+		if ((device->devtype == TYPE_DISK ||
+			device->devtype == TYPE_ZBC) &&
+			pqi_expose_device(device))
+			output_len += scnprintf(buf + output_len,
+						PAGE_SIZE - output_len,
+						"BAY: %hhu ", bay);
+
+end_buffer:
+		output_len += scnprintf(buf + output_len,
+					PAGE_SIZE - output_len,
+					"%s\n", active);
+	}
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return output_len;
+}
 
 static ssize_t pqi_sas_address_show(struct device *dev,
 	struct device_attribute *attr, char *buffer)
@@ -5725,11 +6444,11 @@
 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 
 	device = sdev->hostdata;
-	if (pqi_is_logical_device(device)) {
-		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
-			flags);
+	if (!device || !pqi_is_device_with_sas_address(device)) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
 		return -ENODEV;
 	}
+
 	sas_address = device->sas_address;
 
 	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -5751,6 +6470,11 @@
 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 
 	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+		return -ENODEV;
+	}
+
 	buffer[0] = device->raid_bypass_enabled ? '1' : '0';
 	buffer[1] = '\n';
 	buffer[2] = '\0';
@@ -5775,6 +6499,10 @@
 	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
 
 	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+		return -ENODEV;
+	}
 
 	if (pqi_is_logical_device(device))
 		raid_level = pqi_raid_level_to_string(device->raid_level);
@@ -5786,15 +6514,49 @@
 	return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
 }
 
+static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	int raid_bypass_cnt;
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (!device) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+		return -ENODEV;
+	}
+
+	raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+}
+
+static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
+static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
+static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
-	pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
+static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
 
 static struct device_attribute *pqi_sdev_attrs[] = {
+	&dev_attr_lunid,
+	&dev_attr_unique_id,
+	&dev_attr_path_info,
 	&dev_attr_sas_address,
 	&dev_attr_ssd_smart_path_enabled,
 	&dev_attr_raid_level,
+	&dev_attr_raid_bypass_cnt,
 	NULL
 };
 
@@ -5806,10 +6568,11 @@
 	.scan_start = pqi_scan_start,
 	.scan_finished = pqi_scan_finished,
 	.this_id = -1,
-	.use_clustering = ENABLE_CLUSTERING,
 	.eh_device_reset_handler = pqi_eh_device_reset_handler,
 	.ioctl = pqi_ioctl,
 	.slave_alloc = pqi_slave_alloc,
+	.slave_configure = pqi_slave_configure,
+	.slave_destroy = pqi_slave_destroy,
 	.map_queues = pqi_map_queues,
 	.sdev_attrs = pqi_sdev_attrs,
 	.shost_attrs = pqi_shost_attrs,
@@ -5843,6 +6606,7 @@
 	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
 	shost->unique_id = shost->irq;
 	shost->nr_hw_queues = ctrl_info->num_queue_groups;
+	shost->host_tagset = 1;
 	shost->hostdata[0] = (unsigned long)ctrl_info;
 
 	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
@@ -5947,7 +6711,30 @@
 	return rc;
 }
 
-static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct bmic_sense_subsystem_info *sense_info;
+
+	sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
+	if (!sense_info)
+		return -ENOMEM;
+
+	rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
+	if (rc)
+		goto out;
+
+	memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
+		sizeof(sense_info->ctrl_serial_number));
+	ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
+
+out:
+	kfree(sense_info);
+
+	return rc;
+}
+
+static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
 	struct bmic_identify_controller *identify;
@@ -5968,10 +6755,282 @@
 		sizeof(ctrl_info->firmware_version),
 		"-%u", get_unaligned_le16(&identify->firmware_build_number));
 
+	memcpy(ctrl_info->model, identify->product_id,
+		sizeof(identify->product_id));
+	ctrl_info->model[sizeof(identify->product_id)] = '\0';
+
+	memcpy(ctrl_info->vendor, identify->vendor_id,
+		sizeof(identify->vendor_id));
+	ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+
 out:
 	kfree(identify);
 
 	return rc;
+}
+
+struct pqi_config_table_section_info {
+	struct pqi_ctrl_info *ctrl_info;
+	void		*section;
+	u32		section_offset;
+	void __iomem	*section_iomem_addr;
+};
+
+static inline bool pqi_is_firmware_feature_supported(
+	struct pqi_config_table_firmware_features *firmware_features,
+	unsigned int bit_position)
+{
+	unsigned int byte_index;
+
+	byte_index = bit_position / BITS_PER_BYTE;
+
+	if (byte_index >= le16_to_cpu(firmware_features->num_elements))
+		return false;
+
+	return firmware_features->features_supported[byte_index] &
+		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline bool pqi_is_firmware_feature_enabled(
+	struct pqi_config_table_firmware_features *firmware_features,
+	void __iomem *firmware_features_iomem_addr,
+	unsigned int bit_position)
+{
+	unsigned int byte_index;
+	u8 __iomem *features_enabled_iomem_addr;
+
+	byte_index = (bit_position / BITS_PER_BYTE) +
+		(le16_to_cpu(firmware_features->num_elements) * 2);
+
+	features_enabled_iomem_addr = firmware_features_iomem_addr +
+		offsetof(struct pqi_config_table_firmware_features,
+			features_supported) + byte_index;
+
+	return *((__force u8 *)features_enabled_iomem_addr) &
+		(1 << (bit_position % BITS_PER_BYTE)) ? true : false;
+}
+
+static inline void pqi_request_firmware_feature(
+	struct pqi_config_table_firmware_features *firmware_features,
+	unsigned int bit_position)
+{
+	unsigned int byte_index;
+
+	byte_index = (bit_position / BITS_PER_BYTE) +
+		le16_to_cpu(firmware_features->num_elements);
+
+	firmware_features->features_supported[byte_index] |=
+		(1 << (bit_position % BITS_PER_BYTE));
+}
+
+static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
+	u16 first_section, u16 last_section)
+{
+	struct pqi_vendor_general_request request;
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+		&request.header.iu_length);
+	put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
+		&request.function_code);
+	put_unaligned_le16(first_section,
+		&request.data.config_table_update.first_section);
+	put_unaligned_le16(last_section,
+		&request.data.config_table_update.last_section);
+
+	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+}
+
+static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_config_table_firmware_features *firmware_features,
+	void __iomem *firmware_features_iomem_addr)
+{
+	void *features_requested;
+	void __iomem *features_requested_iomem_addr;
+
+	features_requested = firmware_features->features_supported +
+		le16_to_cpu(firmware_features->num_elements);
+
+	features_requested_iomem_addr = firmware_features_iomem_addr +
+		(features_requested - (void *)firmware_features);
+
+	memcpy_toio(features_requested_iomem_addr, features_requested,
+		le16_to_cpu(firmware_features->num_elements));
+
+	return pqi_config_table_update(ctrl_info,
+		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
+		PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
+}
+
+struct pqi_firmware_feature {
+	char		*feature_name;
+	unsigned int	feature_bit;
+	bool		supported;
+	bool		enabled;
+	void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
+		struct pqi_firmware_feature *firmware_feature);
+};
+
+static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_firmware_feature *firmware_feature)
+{
+	if (!firmware_feature->supported) {
+		dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
+			firmware_feature->feature_name);
+		return;
+	}
+
+	if (firmware_feature->enabled) {
+		dev_info(&ctrl_info->pci_dev->dev,
+			"%s enabled\n", firmware_feature->feature_name);
+		return;
+	}
+
+	dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
+		firmware_feature->feature_name);
+}
+
+static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_firmware_feature *firmware_feature)
+{
+	switch (firmware_feature->feature_bit) {
+	case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
+		ctrl_info->soft_reset_handshake_supported =
+			firmware_feature->enabled;
+		break;
+	case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
+		ctrl_info->raid_iu_timeout_supported =
+			firmware_feature->enabled;
+		break;
+	case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
+		ctrl_info->tmf_iu_timeout_supported =
+			firmware_feature->enabled;
+		break;
+	}
+
+	pqi_firmware_feature_status(ctrl_info, firmware_feature);
+}
+
+static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_firmware_feature *firmware_feature)
+{
+	if (firmware_feature->feature_status)
+		firmware_feature->feature_status(ctrl_info, firmware_feature);
+}
+
+static DEFINE_MUTEX(pqi_firmware_features_mutex);
+
+static struct pqi_firmware_feature pqi_firmware_features[] = {
+	{
+		.feature_name = "Online Firmware Activation",
+		.feature_bit = PQI_FIRMWARE_FEATURE_OFA,
+		.feature_status = pqi_firmware_feature_status,
+	},
+	{
+		.feature_name = "Serial Management Protocol",
+		.feature_bit = PQI_FIRMWARE_FEATURE_SMP,
+		.feature_status = pqi_firmware_feature_status,
+	},
+	{
+		.feature_name = "New Soft Reset Handshake",
+		.feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
+		.feature_status = pqi_ctrl_update_feature_flags,
+	},
+	{
+		.feature_name = "RAID IU Timeout",
+		.feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
+		.feature_status = pqi_ctrl_update_feature_flags,
+	},
+	{
+		.feature_name = "TMF IU Timeout",
+		.feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
+		.feature_status = pqi_ctrl_update_feature_flags,
+	},
+};
+
+static void pqi_process_firmware_features(
+	struct pqi_config_table_section_info *section_info)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_config_table_firmware_features *firmware_features;
+	void __iomem *firmware_features_iomem_addr;
+	unsigned int i;
+	unsigned int num_features_supported;
+
+	ctrl_info = section_info->ctrl_info;
+	firmware_features = section_info->section;
+	firmware_features_iomem_addr = section_info->section_iomem_addr;
+
+	for (i = 0, num_features_supported = 0;
+		i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		if (pqi_is_firmware_feature_supported(firmware_features,
+			pqi_firmware_features[i].feature_bit)) {
+			pqi_firmware_features[i].supported = true;
+			num_features_supported++;
+		} else {
+			pqi_firmware_feature_update(ctrl_info,
+				&pqi_firmware_features[i]);
+		}
+	}
+
+	if (num_features_supported == 0)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		if (!pqi_firmware_features[i].supported)
+			continue;
+		pqi_request_firmware_feature(firmware_features,
+			pqi_firmware_features[i].feature_bit);
+	}
+
+	rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
+		firmware_features_iomem_addr);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to enable firmware features in PQI configuration table\n");
+		for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+			if (!pqi_firmware_features[i].supported)
+				continue;
+			pqi_firmware_feature_update(ctrl_info,
+				&pqi_firmware_features[i]);
+		}
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		if (!pqi_firmware_features[i].supported)
+			continue;
+		if (pqi_is_firmware_feature_enabled(firmware_features,
+			firmware_features_iomem_addr,
+			pqi_firmware_features[i].feature_bit)) {
+			pqi_firmware_features[i].enabled = true;
+		}
+		pqi_firmware_feature_update(ctrl_info,
+			&pqi_firmware_features[i]);
+	}
+}
+
+static void pqi_init_firmware_features(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
+		pqi_firmware_features[i].supported = false;
+		pqi_firmware_features[i].enabled = false;
+	}
+}
+
+static void pqi_process_firmware_features_section(
+	struct pqi_config_table_section_info *section_info)
+{
+	mutex_lock(&pqi_firmware_features_mutex);
+	pqi_init_firmware_features();
+	pqi_process_firmware_features(section_info);
+	mutex_unlock(&pqi_firmware_features_mutex);
 }
 
 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -5981,8 +7040,11 @@
 	void __iomem *table_iomem_addr;
 	struct pqi_config_table *config_table;
 	struct pqi_config_table_section_header *section;
+	struct pqi_config_table_section_info section_info;
 
 	table_length = ctrl_info->config_table_length;
+	if (table_length == 0)
+		return 0;
 
 	config_table = kmalloc(table_length, GFP_KERNEL);
 	if (!config_table) {
@@ -5999,13 +7061,22 @@
 		ctrl_info->config_table_offset;
 	memcpy_fromio(config_table, table_iomem_addr, table_length);
 
+	section_info.ctrl_info = ctrl_info;
 	section_offset =
 		get_unaligned_le32(&config_table->first_section_offset);
 
 	while (section_offset) {
 		section = (void *)config_table + section_offset;
 
+		section_info.section = section;
+		section_info.section_offset = section_offset;
+		section_info.section_iomem_addr =
+			table_iomem_addr + section_offset;
+
 		switch (get_unaligned_le16(&section->section_id)) {
+		case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
+			pqi_process_firmware_features_section(&section_info);
+			break;
 		case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
 			if (pqi_disable_heartbeat)
 				dev_warn(&ctrl_info->pci_dev->dev,
@@ -6017,6 +7088,13 @@
 					offsetof(
 					struct pqi_config_table_heartbeat,
 						heartbeat_counter);
+			break;
+		case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
+			ctrl_info->soft_reset_status =
+				table_iomem_addr +
+				section_offset +
+				offsetof(struct pqi_config_table_soft_reset,
+						soft_reset_status);
 			break;
 		}
 
@@ -6071,13 +7149,20 @@
 	return pqi_revert_to_sis_mode(ctrl_info);
 }
 
+#define PQI_POST_RESET_DELAY_B4_MSGU_READY	5000
+
 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
 {
 	int rc;
 
-	rc = pqi_force_sis_mode(ctrl_info);
-	if (rc)
-		return rc;
+	if (reset_devices) {
+		sis_soft_reset(ctrl_info);
+		msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+	} else {
+		rc = pqi_force_sis_mode(ctrl_info);
+		if (rc)
+			return rc;
+	}
 
 	/*
 	 * Wait until the controller is ready to start accepting SIS
@@ -6150,10 +7235,6 @@
 	ctrl_info->pqi_mode_enabled = true;
 	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
 
-	rc = pqi_process_config_table(ctrl_info);
-	if (rc)
-		return rc;
-
 	rc = pqi_alloc_admin_queues(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
@@ -6215,6 +7296,11 @@
 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
 
 	ctrl_info->controller_online = true;
+
+	rc = pqi_process_config_table(ctrl_info);
+	if (rc)
+		return rc;
+
 	pqi_start_heartbeat_timer(ctrl_info);
 
 	rc = pqi_enable_events(ctrl_info);
@@ -6229,10 +7315,24 @@
 	if (rc)
 		return rc;
 
-	rc = pqi_get_ctrl_firmware_version(ctrl_info);
+	rc = pqi_get_ctrl_product_details(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
-			"error obtaining firmware version\n");
+			"error obtaining product details\n");
+		return rc;
+	}
+
+	rc = pqi_get_ctrl_serial_number(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining ctrl serial number\n");
+		return rc;
+	}
+
+	rc = pqi_set_diag_rescan(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error enabling multi-lun rescan\n");
 		return rc;
 	}
 
@@ -6293,6 +7393,24 @@
 		return rc;
 
 	/*
+	 * Get the controller properties.  This allows us to determine
+	 * whether or not it supports PQI mode.
+	 */
+	rc = sis_get_ctrl_properties(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining controller properties\n");
+		return rc;
+	}
+
+	rc = sis_get_pqi_capabilities(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining controller capabilities\n");
+		return rc;
+	}
+
+	/*
 	 * If the function we are about to call succeeds, the
 	 * controller will transition from legacy SIS mode
 	 * into PQI mode.
@@ -6332,13 +7450,32 @@
 	pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
 
 	ctrl_info->controller_online = true;
-	pqi_start_heartbeat_timer(ctrl_info);
 	pqi_ctrl_unblock_requests(ctrl_info);
+
+	rc = pqi_process_config_table(ctrl_info);
+	if (rc)
+		return rc;
+
+	pqi_start_heartbeat_timer(ctrl_info);
 
 	rc = pqi_enable_events(ctrl_info);
 	if (rc) {
 		dev_err(&ctrl_info->pci_dev->dev,
 			"error enabling events\n");
+		return rc;
+	}
+
+	rc = pqi_get_ctrl_product_details(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining product details\n");
+		return rc;
+	}
+
+	rc = pqi_set_diag_rescan(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error enabling multi-lun rescan\n");
 		return rc;
 	}
 
@@ -6359,8 +7496,12 @@
 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
 	u16 timeout)
 {
-	return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
+	int rc;
+
+	rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
 		PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
+
+	return pcibios_err_to_errno(rc);
 }
 
 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
@@ -6393,7 +7534,7 @@
 		goto disable_device;
 	}
 
-	ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+	ctrl_info->iomem_base = ioremap(pci_resource_start(
 		ctrl_info->pci_dev, 0),
 		sizeof(struct pqi_ctrl_registers));
 	if (!ctrl_info->iomem_base) {
@@ -6452,12 +7593,14 @@
 
 	mutex_init(&ctrl_info->scan_mutex);
 	mutex_init(&ctrl_info->lun_reset_mutex);
+	mutex_init(&ctrl_info->ofa_mutex);
 
 	INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
 	spin_lock_init(&ctrl_info->scsi_device_list_lock);
 
 	INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
 	atomic_set(&ctrl_info->num_interrupts, 0);
+	atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
 
 	INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
 	INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
@@ -6521,11 +7664,221 @@
 {
 	pqi_cancel_rescan_worker(ctrl_info);
 	pqi_cancel_update_time_worker(ctrl_info);
-	pqi_remove_all_scsi_devices(ctrl_info);
 	pqi_unregister_scsi(ctrl_info);
 	if (ctrl_info->pqi_mode_enabled)
 		pqi_revert_to_sis_mode(ctrl_info);
 	pqi_free_ctrl_resources(ctrl_info);
+}
+
+static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
+{
+	pqi_cancel_update_time_worker(ctrl_info);
+	pqi_cancel_rescan_worker(ctrl_info);
+	pqi_wait_until_lun_reset_finished(ctrl_info);
+	pqi_wait_until_scan_finished(ctrl_info);
+	pqi_ctrl_ofa_start(ctrl_info);
+	pqi_ctrl_block_requests(ctrl_info);
+	pqi_ctrl_wait_until_quiesced(ctrl_info);
+	pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
+	pqi_fail_io_queued_for_all_devices(ctrl_info);
+	pqi_wait_until_inbound_queues_empty(ctrl_info);
+	pqi_stop_heartbeat_timer(ctrl_info);
+	ctrl_info->pqi_mode_enabled = false;
+	pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+}
+
+static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
+{
+	pqi_ofa_free_host_buffer(ctrl_info);
+	ctrl_info->pqi_mode_enabled = true;
+	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+	ctrl_info->controller_online = true;
+	pqi_ctrl_unblock_requests(ctrl_info);
+	pqi_start_heartbeat_timer(ctrl_info);
+	pqi_schedule_update_time_worker(ctrl_info);
+	pqi_clear_soft_reset_status(ctrl_info,
+		PQI_SOFT_RESET_ABORT);
+	pqi_scan_scsi_devices(ctrl_info);
+}
+
+static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
+	u32 total_size, u32 chunk_size)
+{
+	u32 sg_count;
+	u32 size;
+	int i;
+	struct pqi_sg_descriptor *mem_descriptor = NULL;
+	struct device *dev;
+	struct pqi_ofa_memory *ofap;
+
+	dev = &ctrl_info->pci_dev->dev;
+
+	sg_count = (total_size + chunk_size - 1);
+	sg_count /= chunk_size;
+
+	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+	if (sg_count*chunk_size < total_size)
+		goto out;
+
+	ctrl_info->pqi_ofa_chunk_virt_addr =
+				kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
+	if (!ctrl_info->pqi_ofa_chunk_virt_addr)
+		goto out;
+
+	for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
+		dma_addr_t dma_handle;
+
+		ctrl_info->pqi_ofa_chunk_virt_addr[i] =
+			dma_alloc_coherent(dev, chunk_size, &dma_handle,
+					   GFP_KERNEL);
+
+		if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
+			break;
+
+		mem_descriptor = &ofap->sg_descriptor[i];
+		put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
+		put_unaligned_le32 (chunk_size, &mem_descriptor->length);
+	}
+
+	if (!size || size < total_size)
+		goto out_free_chunks;
+
+	put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
+	put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
+	put_unaligned_le32(size, &ofap->bytes_allocated);
+
+	return 0;
+
+out_free_chunks:
+	while (--i >= 0) {
+		mem_descriptor = &ofap->sg_descriptor[i];
+		dma_free_coherent(dev, chunk_size,
+				ctrl_info->pqi_ofa_chunk_virt_addr[i],
+				get_unaligned_le64(&mem_descriptor->address));
+	}
+	kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+
+out:
+	put_unaligned_le32 (0, &ofap->bytes_allocated);
+	return -ENOMEM;
+}
+
+static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+	u32 total_size;
+	u32 min_chunk_size;
+	u32 chunk_sz;
+
+	total_size = le32_to_cpu(
+			ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
+	min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
+
+	for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
+		if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
+			return 0;
+
+	return -ENOMEM;
+}
+
+static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
+	u32 bytes_requested)
+{
+	struct pqi_ofa_memory *pqi_ofa_memory;
+	struct device *dev;
+
+	dev = &ctrl_info->pci_dev->dev;
+	pqi_ofa_memory = dma_alloc_coherent(dev,
+					    PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
+					    &ctrl_info->pqi_ofa_mem_dma_handle,
+					    GFP_KERNEL);
+
+	if (!pqi_ofa_memory)
+		return;
+
+	put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
+	memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
+					sizeof(pqi_ofa_memory->signature));
+	pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
+
+	ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
+
+	if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
+		dev_err(dev, "Failed to allocate host buffer of size = %u",
+			bytes_requested);
+	}
+
+	return;
+}
+
+static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+	struct pqi_sg_descriptor *mem_descriptor;
+	struct pqi_ofa_memory *ofap;
+
+	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+	if (!ofap)
+		return;
+
+	if (!ofap->bytes_allocated)
+		goto out;
+
+	mem_descriptor = ofap->sg_descriptor;
+
+	for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
+		i++) {
+		dma_free_coherent(&ctrl_info->pci_dev->dev,
+			get_unaligned_le32(&mem_descriptor[i].length),
+			ctrl_info->pqi_ofa_chunk_virt_addr[i],
+			get_unaligned_le64(&mem_descriptor[i].address));
+	}
+	kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
+
+out:
+	dma_free_coherent(&ctrl_info->pci_dev->dev,
+			PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
+			ctrl_info->pqi_ofa_mem_dma_handle);
+	ctrl_info->pqi_ofa_mem_virt_addr = NULL;
+}
+
+static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
+{
+	struct pqi_vendor_general_request request;
+	size_t size;
+	struct pqi_ofa_memory *ofap;
+
+	memset(&request, 0, sizeof(request));
+
+	ofap = ctrl_info->pqi_ofa_mem_virt_addr;
+
+	request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
+	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+		&request.header.iu_length);
+	put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
+		&request.function_code);
+
+	if (ofap) {
+		size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
+			get_unaligned_le16(&ofap->num_memory_descriptors) *
+			sizeof(struct pqi_sg_descriptor);
+
+		put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
+			&request.data.ofa_memory_allocation.buffer_address);
+		put_unaligned_le32(size,
+			&request.data.ofa_memory_allocation.buffer_length);
+
+	}
+
+	return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+}
+
+static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
+{
+	msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
+	return pqi_ctrl_init_resume(ctrl_info);
 }
 
 static void pqi_perform_lockup_action(void)
@@ -6626,7 +7979,7 @@
 	const struct pci_device_id *id)
 {
 	int rc;
-	int node;
+	int node, cp_node;
 	struct pqi_ctrl_info *ctrl_info;
 
 	pqi_print_ctrl_info(pci_dev, id);
@@ -6644,8 +7997,12 @@
 			"controller device ID matched using wildcards\n");
 
 	node = dev_to_node(&pci_dev->dev);
-	if (node == NUMA_NO_NODE)
-		set_dev_node(&pci_dev->dev, 0);
+	if (node == NUMA_NO_NODE) {
+		cp_node = cpu_to_node(0);
+		if (cp_node == NUMA_NO_NODE)
+			cp_node = 0;
+		set_dev_node(&pci_dev->dev, cp_node);
+	}
 
 	ctrl_info = pqi_alloc_ctrl_info(node);
 	if (!ctrl_info) {
@@ -6680,7 +8037,25 @@
 	if (!ctrl_info)
 		return;
 
+	ctrl_info->in_shutdown = true;
+
 	pqi_remove_ctrl(ctrl_info);
+}
+
+static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	struct pqi_io_request *io_request;
+	struct scsi_cmnd *scmd;
+
+	for (i = 0; i < ctrl_info->max_io_slots; i++) {
+		io_request = &ctrl_info->io_request_pool[i];
+		if (atomic_read(&io_request->refcount) == 0)
+			continue;
+		scmd = io_request->scmd;
+		WARN_ON(scmd != NULL); /* IO command from SML */
+		WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
+	}
 }
 
 static void pqi_shutdown(struct pci_dev *pci_dev)
@@ -6689,22 +8064,51 @@
 	struct pqi_ctrl_info *ctrl_info;
 
 	ctrl_info = pci_get_drvdata(pci_dev);
-	if (!ctrl_info)
-		goto error;
+	if (!ctrl_info) {
+		dev_err(&pci_dev->dev,
+			"cache could not be flushed\n");
+		return;
+	}
+
+	pqi_disable_events(ctrl_info);
+	pqi_wait_until_ofa_finished(ctrl_info);
+	pqi_cancel_update_time_worker(ctrl_info);
+	pqi_cancel_rescan_worker(ctrl_info);
+	pqi_cancel_event_worker(ctrl_info);
+
+	pqi_ctrl_shutdown_start(ctrl_info);
+	pqi_ctrl_wait_until_quiesced(ctrl_info);
+
+	rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
+	if (rc) {
+		dev_err(&pci_dev->dev,
+			"wait for pending I/O failed\n");
+		return;
+	}
+
+	pqi_ctrl_block_device_reset(ctrl_info);
+	pqi_wait_until_lun_reset_finished(ctrl_info);
 
 	/*
 	 * Write all data in the controller's battery-backed cache to
 	 * storage.
 	 */
 	rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
-	pqi_free_interrupts(ctrl_info);
-	pqi_reset(ctrl_info);
-	if (rc == 0)
-		return;
+	if (rc)
+		dev_err(&pci_dev->dev,
+			"unable to flush controller cache\n");
 
-error:
-	dev_warn(&pci_dev->dev,
-		"unable to flush controller cache\n");
+	pqi_ctrl_block_requests(ctrl_info);
+
+	rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
+	if (rc) {
+		dev_err(&pci_dev->dev,
+			"wait for pending sync cmds failed\n");
+		return;
+	}
+
+	pqi_crash_if_pending_command(ctrl_info);
+	pqi_reset(ctrl_info);
 }
 
 static void pqi_process_lockup_action_param(void)
@@ -6742,11 +8146,12 @@
 	pqi_cancel_rescan_worker(ctrl_info);
 	pqi_wait_until_scan_finished(ctrl_info);
 	pqi_wait_until_lun_reset_finished(ctrl_info);
+	pqi_wait_until_ofa_finished(ctrl_info);
 	pqi_flush_cache(ctrl_info, SUSPEND);
 	pqi_ctrl_block_requests(ctrl_info);
 	pqi_ctrl_wait_until_quiesced(ctrl_info);
 	pqi_wait_until_inbound_queues_empty(ctrl_info);
-	pqi_ctrl_wait_for_pending_io(ctrl_info);
+	pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
 	pqi_stop_heartbeat_timer(ctrl_info);
 
 	if (state.event == PM_EVENT_FREEZE)
@@ -6828,7 +8233,35 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1104)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1105)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1106)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x1107)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0x8460)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       0x193d, 0x8461)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0xc460)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x193d, 0xc461)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6865,6 +8298,50 @@
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       0x1bd4, 0x004c)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1bd4, 0x004f)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1bd4, 0x0051)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1bd4, 0x0052)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1bd4, 0x0053)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1bd4, 0x0054)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd227)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd228)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd229)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd22a)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd22b)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x19e5, 0xd22c)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6905,6 +8382,18 @@
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADAPTEC2, 0x0807)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0808)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0809)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x080a)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -6992,6 +8481,122 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1400)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1402)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1410)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1411)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1412)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1420)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1430)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1440)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1441)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1450)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1452)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1460)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1461)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1462)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1470)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1471)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1472)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1480)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1490)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x1491)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_ADVANTECH, 0x8312)
 	},
 	{
@@ -7056,11 +8661,51 @@
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x1002)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x1100)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
 			       PCI_VENDOR_ID_HP, 0x1101)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1590, 0x0294)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1590, 0x02db)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1590, 0x02dc)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1590, 0x032e)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0800)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0908)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0806)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       0x1d8d, 0x0916)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_GIGABYTE, 0x1000)
 	},
 	{
 		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -7089,8 +8734,7 @@
 
 	pr_info(DRIVER_NAME "\n");
 
-	pqi_sas_transport_template =
-		sas_attach_transport(&pqi_sas_transport_functions);
+	pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
 	if (!pqi_sas_transport_template)
 		return -ENODEV;
 
@@ -7280,11 +8924,11 @@
 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
 		data.delete_operational_queue.queue_id) != 12);
 	BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
-	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
 		data.create_operational_iq) != 64 - 11);
-	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
 		data.create_operational_oq) != 64 - 11);
-	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+	BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
 		data.delete_operational_queue) != 64 - 11);
 
 	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
@@ -7331,6 +8975,8 @@
 		error_index) != 27);
 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
 		cdb) != 32);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		timeout) != 60);
 	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
 		sg_descriptors) != 64);
 	BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
@@ -7486,6 +9132,8 @@
 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
 		nexus_id) != 10);
 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		timeout) != 14);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
 		lun_number) != 16);
 	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
 		protocol_specific) != 24);

--
Gitblit v1.6.2