From 50a212ec906f7524620675f0c57357691c26c81f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 16 Oct 2024 01:20:19 +0000
Subject: [PATCH] 修改GPIO导出默认初始值
---
kernel/drivers/scsi/qla2xxx/qla_mr.c | 349 ++++++++++++++++++++++++++--------------------------------
1 files changed, 156 insertions(+), 193 deletions(-)
diff --git a/kernel/drivers/scsi/qla2xxx/qla_mr.c b/kernel/drivers/scsi/qla2xxx/qla_mr.c
index 0c00aae..7178646 100644
--- a/kernel/drivers/scsi/qla2xxx/qla_mr.c
+++ b/kernel/drivers/scsi/qla2xxx/qla_mr.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* QLogic Fibre Channel HBA Driver
* Copyright (c) 2003-2014 QLogic Corporation
- *
- * See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
#include <linux/delay.h>
@@ -10,7 +9,6 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
-#include <linux/bsg-lib.h>
#include <scsi/scsi_tcq.h>
#include <linux/utsname.h>
@@ -47,17 +45,16 @@
uint8_t io_lock_on;
uint16_t command = 0;
uint32_t *iptr;
- uint32_t __iomem *optr;
+ __le32 __iomem *optr;
uint32_t cnt;
uint32_t mboxes;
unsigned long wait_time;
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- if (ha->pdev->error_state > pci_channel_io_frozen) {
+ if (ha->pdev->error_state == pci_channel_io_perm_failure) {
ql_log(ql_log_warn, vha, 0x115c,
- "error_state is greater than pci_channel_io_frozen, "
- "exiting.\n");
+ "PCI channel failed permanently, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -111,7 +108,7 @@
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- optr = (uint32_t __iomem *)®->ispfx00.mailbox0;
+ optr = ®->ispfx00.mailbox0;
iptr = mcp->mb;
command = mcp->mb[0];
@@ -119,7 +116,7 @@
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (mboxes & BIT_0)
- WRT_REG_DWORD(optr, *iptr);
+ wrt_reg_dword(optr, *iptr);
mboxes >>= 1;
optr++;
@@ -149,7 +146,8 @@
QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+ WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
+ mcp->tov * HZ) != 0);
} else {
ql_dbg(ql_dbg_mbx, vha, 0x112c,
"Cmd=%x Polling Mode.\n", command);
@@ -273,9 +271,9 @@
if (rval) {
ql_log(ql_log_warn, base_vha, 0x1163,
- "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
- "mb[3]=%x, cmd=%x ****.\n",
- mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+ "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
+ command);
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
}
@@ -629,17 +627,20 @@
*
* Returns 0 on success.
*/
-void
+int
qlafx00_soft_reset(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(ha->pdev) &&
ha->flags.pci_channel_io_perm_failure))
- return;
+ return rval;
ha->isp_ops->disable_intrs(ha);
qlafx00_soc_cpu_reset(vha);
+
+ return QLA_SUCCESS;
}
/**
@@ -674,25 +675,23 @@
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
- WRT_REG_DWORD(®->req_q_in, 0);
- WRT_REG_DWORD(®->req_q_out, 0);
+ wrt_reg_dword(®->req_q_in, 0);
+ wrt_reg_dword(®->req_q_out, 0);
- WRT_REG_DWORD(®->rsp_q_in, 0);
- WRT_REG_DWORD(®->rsp_q_out, 0);
+ wrt_reg_dword(®->rsp_q_in, 0);
+ wrt_reg_dword(®->rsp_q_out, 0);
/* PCI posting */
- RD_REG_DWORD(®->rsp_q_out);
+ rd_reg_dword(®->rsp_q_out);
}
char *
-qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
{
struct qla_hw_data *ha = vha->hw;
- if (pci_is_pcie(ha->pdev)) {
- strcpy(str, "PCIe iSA");
- return str;
- }
+ if (pci_is_pcie(ha->pdev))
+ strlcpy(str, "PCIe iSA", str_len);
return str;
}
@@ -765,7 +764,7 @@
}
ha->cregbase =
- ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
if (!ha->cregbase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -786,7 +785,7 @@
}
ha->iobase =
- ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+ ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
if (!ha->iobase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
"cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
@@ -889,9 +888,9 @@
/* 30 seconds wait - Adjust if required */
wait_time = 30;
- pseudo_aen = RD_REG_DWORD(®->pseudoaen);
+ pseudo_aen = rd_reg_dword(®->pseudoaen);
if (pseudo_aen == 1) {
- aenmbx7 = RD_REG_DWORD(®->initval7);
+ aenmbx7 = rd_reg_dword(®->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
rval = qlafx00_driver_shutdown(vha, 10);
@@ -902,7 +901,7 @@
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
- aenmbx = RD_REG_DWORD(®->aenmailbox0);
+ aenmbx = rd_reg_dword(®->aenmailbox0);
barrier();
ql_dbg(ql_dbg_mbx, vha, 0x0133,
"aenmbx: 0x%x\n", aenmbx);
@@ -921,15 +920,15 @@
case MBA_FW_RESTART_CMPLT:
/* Set the mbx and rqstq intr code */
- aenmbx7 = RD_REG_DWORD(®->aenmailbox7);
+ aenmbx7 = rd_reg_dword(®->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(®->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(®->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6);
- WRT_REG_DWORD(®->aenmailbox0, 0);
- RD_REG_DWORD_RELAXED(®->aenmailbox0);
+ ha->req_que_off = rd_reg_dword(®->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(®->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(®->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(®->aenmailbox6);
+ wrt_reg_dword(®->aenmailbox0, 0);
+ rd_reg_dword_relaxed(®->aenmailbox0);
ql_dbg(ql_dbg_init, vha, 0x0134,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -959,13 +958,13 @@
* 3. issue Get FW State Mbox cmd to determine fw state
* Set the mbx and rqstq intr code from Shadow Regs
*/
- aenmbx7 = RD_REG_DWORD(®->initval7);
+ aenmbx7 = rd_reg_dword(®->initval7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(®->initval1);
- ha->rsp_que_off = RD_REG_DWORD(®->initval3);
- ha->req_que_len = RD_REG_DWORD(®->initval5);
- ha->rsp_que_len = RD_REG_DWORD(®->initval6);
+ ha->req_que_off = rd_reg_dword(®->initval1);
+ ha->rsp_que_off = rd_reg_dword(®->initval3);
+ ha->req_que_len = rd_reg_dword(®->initval5);
+ ha->rsp_que_len = rd_reg_dword(®->initval6);
ql_dbg(ql_dbg_init, vha, 0x0135,
"f/w returned mbx_intr_code: 0x%x, "
"rqstq_intr_code: 0x%x\n",
@@ -1011,7 +1010,7 @@
if (time_after_eq(jiffies, wtime)) {
ql_dbg(ql_dbg_init, vha, 0x0137,
"Init f/w failed: aen[7]: 0x%x\n",
- RD_REG_DWORD(®->aenmailbox7));
+ rd_reg_dword(®->aenmailbox7));
rval = QLA_FUNCTION_FAILED;
done = true;
break;
@@ -1115,8 +1114,8 @@
ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
"Listing Target bit map...\n");
- ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
- 0x2089, (uint8_t *)ha->gid_list, 32);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089,
+ ha->gid_list, 32);
/* Allocate temporary rmtport for any new rmtports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
@@ -1186,9 +1185,9 @@
" Existing TGT-ID %x did not get "
" offline event from firmware.\n",
fcport->old_tgt_id);
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
return rval;
}
break;
@@ -1206,7 +1205,7 @@
return QLA_MEMORY_ALLOC_FAILED;
}
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
return rval;
}
@@ -1250,7 +1249,7 @@
if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
if (fcport->port_type != FCT_INITIATOR)
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
}
}
@@ -1274,7 +1273,7 @@
/* Free all new device structures not processed. */
list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
list_del(&fcport->list);
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
}
return rval;
@@ -1297,6 +1296,7 @@
{
int rval;
unsigned long flags;
+
rval = QLA_SUCCESS;
flags = vha->dpc_flags;
@@ -1404,7 +1404,7 @@
pkt = rsp->ring_ptr;
for (cnt = 0; cnt < rsp->length; cnt++) {
pkt->signature = RESPONSE_PROCESSED;
- WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
+ wrt_reg_dword((void __force __iomem *)&pkt->signature,
RESPONSE_PROCESSED);
pkt++;
}
@@ -1420,13 +1420,13 @@
qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
- aenmbx7 = RD_REG_DWORD(®->aenmailbox7);
+ aenmbx7 = rd_reg_dword(®->aenmailbox7);
ha->mbx_intr_code = MSW(aenmbx7);
ha->rqstq_intr_code = LSW(aenmbx7);
- ha->req_que_off = RD_REG_DWORD(®->aenmailbox1);
- ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3);
- ha->req_que_len = RD_REG_DWORD(®->aenmailbox5);
- ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6);
+ ha->req_que_off = rd_reg_dword(®->aenmailbox1);
+ ha->rsp_que_off = rd_reg_dword(®->aenmailbox3);
+ ha->req_que_len = rd_reg_dword(®->aenmailbox5);
+ ha->rsp_que_len = rd_reg_dword(®->aenmailbox6);
ql_dbg(ql_dbg_disc, vha, 0x2094,
"fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
@@ -1471,7 +1471,7 @@
(!test_bit(UNLOADING, &vha->dpc_flags)) &&
(!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
(ha->mr.fw_hbt_en)) {
- fw_heart_beat = RD_REG_DWORD(®->fwheartbeat);
+ fw_heart_beat = rd_reg_dword(®->fwheartbeat);
if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
ha->mr.old_fw_hbt_cnt = fw_heart_beat;
ha->mr.fw_hbt_miss_cnt = 0;
@@ -1491,7 +1491,7 @@
if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
/* Reset recovery to be performed in timer routine */
- aenmbx0 = RD_REG_DWORD(®->aenmailbox0);
+ aenmbx0 = rd_reg_dword(®->aenmailbox0);
if (ha->mr.fw_reset_timer_exp) {
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -1681,15 +1681,14 @@
if (!fcport)
return;
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
+ qla2x00_mark_device_lost(vha, fcport, 0);
return;
}
-int
+void
qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
{
- int rval = 0;
uint32_t aen_code, aen_data;
aen_code = FCH_EVT_VENDOR_UNIQUE;
@@ -1715,7 +1714,7 @@
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
} else if (evt->u.aenfx.mbx[2] == 2) {
vha->device_flags |= DFLG_NO_CABLE;
- qla2x00_mark_all_devices_lost(vha, 1);
+ qla2x00_mark_all_devices_lost(vha);
}
}
break;
@@ -1740,8 +1739,6 @@
fc_host_post_event(vha->host, fc_get_event_number(),
aen_code, aen_data);
-
- return rval;
}
static void
@@ -1772,10 +1769,8 @@
complete(&lio->u.fxiocb.fxiocb_comp);
}
-static void
-qla2x00_fxdisc_sp_done(void *ptr, int res)
+static void qla2x00_fxdisc_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
complete(&lio->u.fxiocb.fxiocb_comp);
@@ -1854,22 +1849,22 @@
phost_info = &preg_hsi->hsi;
memset(preg_hsi, 0, sizeof(struct register_host_info));
phost_info->os_type = OS_TYPE_LINUX;
- strncpy(phost_info->sysname,
- p_sysid->sysname, SYSNAME_LENGTH);
- strncpy(phost_info->nodename,
- p_sysid->nodename, NODENAME_LENGTH);
+ strlcpy(phost_info->sysname, p_sysid->sysname,
+ sizeof(phost_info->sysname));
+ strlcpy(phost_info->nodename, p_sysid->nodename,
+ sizeof(phost_info->nodename));
if (!strcmp(phost_info->nodename, "(none)"))
ha->mr.host_info_resend = true;
- strncpy(phost_info->release,
- p_sysid->release, RELEASE_LENGTH);
- strncpy(phost_info->version,
- p_sysid->version, VERSION_LENGTH);
- strncpy(phost_info->machine,
- p_sysid->machine, MACHINE_LENGTH);
- strncpy(phost_info->domainname,
- p_sysid->domainname, DOMNAME_LENGTH);
- strncpy(phost_info->hostdriver,
- QLA2XXX_VERSION, VERSION_LENGTH);
+ strlcpy(phost_info->release, p_sysid->release,
+ sizeof(phost_info->release));
+ strlcpy(phost_info->version, p_sysid->version,
+ sizeof(phost_info->version));
+ strlcpy(phost_info->machine, p_sysid->machine,
+ sizeof(phost_info->machine));
+ strlcpy(phost_info->domainname, p_sysid->domainname,
+ sizeof(phost_info->domainname));
+ strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
+ sizeof(phost_info->hostdriver));
preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
ql_dbg(ql_dbg_init, vha, 0x0149,
"ISP%04X: Host registration with firmware\n",
@@ -1890,8 +1885,7 @@
phost_info->domainname,
phost_info->hostdriver);
ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
- (uint8_t *)phost_info,
- sizeof(struct host_system_info));
+ phost_info, sizeof(*phost_info));
}
}
@@ -1915,8 +1909,10 @@
if (fx_type == FXDISC_GET_CONFIG_INFO) {
struct config_info_data *pinfo =
(struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
- strcpy(vha->hw->model_number, pinfo->model_num);
- strcpy(vha->hw->model_desc, pinfo->model_description);
+ strlcpy(vha->hw->model_number, pinfo->model_num,
+ ARRAY_SIZE(vha->hw->model_number));
+ strlcpy(vha->hw->model_desc, pinfo->model_description,
+ ARRAY_SIZE(vha->hw->model_desc));
memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
sizeof(vha->hw->mr.symbolic_name));
memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
@@ -1945,7 +1941,7 @@
vha->d_id.b.al_pa = pinfo->port_id[2];
qlafx00_update_host_attr(vha, pinfo);
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
- (uint8_t *)pinfo, 16);
+ pinfo, 16);
} else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
struct qlafx00_tgt_node_info *pinfo =
(struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
@@ -1953,12 +1949,12 @@
memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
fcport->port_type = FCT_TARGET;
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
- (uint8_t *)pinfo, 16);
+ pinfo, 16);
} else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
struct qlafx00_tgt_node_info *pinfo =
(struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
- (uint8_t *)pinfo, 16);
+ pinfo, 16);
memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
} else if (fx_type == FXDISC_ABORT_IOCTL)
fdisc->u.fxiocb.result =
@@ -2189,7 +2185,7 @@
struct bsg_job *bsg_job;
struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
- int res;
+ int res = 0;
struct qla_mt_iocb_rsp_fx00 fstatus;
uint8_t *fw_sts_ptr;
@@ -2225,18 +2221,16 @@
fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
- sizeof(struct qla_mt_iocb_rsp_fx00));
+ memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus));
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x5080,
- (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+ sp->vha, 0x5080, pkt, sizeof(*pkt));
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x5074,
- (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+ sp->vha, 0x5074,
+ fw_sts_ptr, sizeof(fstatus));
res = bsg_reply->result = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
@@ -2491,7 +2485,7 @@
atomic_read(&fcport->state));
if (atomic_read(&fcport->state) == FCS_ONLINE)
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1);
break;
case CS_ABORTED:
@@ -2517,6 +2511,8 @@
if (rsp->status_srb == NULL)
sp->done(sp, res);
+ else
+ WARN_ON_ONCE(true);
}
/**
@@ -2574,7 +2570,7 @@
/* Move sense data. */
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
- (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+ pkt, sizeof(*pkt));
memcpy(sense_ptr, pkt->data, sense_sz);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
sense_ptr, sense_sz);
@@ -2594,6 +2590,8 @@
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(sp, cp->result);
+ } else {
+ WARN_ON_ONCE(true);
}
}
@@ -2601,7 +2599,7 @@
* qlafx00_multistatus_entry() - Process Multi response queue entries.
* @vha: SCSI driver HA context
* @rsp: response queue
- * @pkt:
+ * @pkt: received packet
*/
static void
qlafx00_multistatus_entry(struct scsi_qla_host *vha,
@@ -2658,12 +2656,10 @@
* @vha: SCSI driver HA context
* @rsp: response queue
* @pkt: Entry pointer
- * @estatus:
- * @etype:
*/
static void
qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
- struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+ struct sts_entry_fx00 *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -2671,9 +2667,6 @@
uint16_t que = 0;
struct req_que *req = NULL;
int res = DID_ERROR << 16;
-
- ql_dbg(ql_dbg_async, vha, 0x507f,
- "type of error status in response: 0x%x\n", estatus);
req = ha->req_q_map[que];
@@ -2701,7 +2694,7 @@
uint16_t lreq_q_in = 0;
uint16_t lreq_q_out = 0;
- lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
+ lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
lreq_q_out = rsp->ring_index;
while (lreq_q_in != lreq_q_out) {
@@ -2722,9 +2715,11 @@
if (pkt->entry_status != 0 &&
pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+ ql_dbg(ql_dbg_async, vha, 0x507f,
+ "type of error status in response: 0x%x\n",
+ pkt->entry_status);
qlafx00_error_entry(vha, rsp,
- (struct sts_entry_fx00 *)pkt, pkt->entry_status,
- pkt->entry_type);
+ (struct sts_entry_fx00 *)pkt);
continue;
}
@@ -2761,7 +2756,7 @@
}
/* Adjust ring index */
- WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
}
/**
@@ -2792,9 +2787,9 @@
break;
case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
- ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1);
- ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2);
- ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3);
+ ha->aenmb[1] = rd_reg_dword(®->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(®->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(®->aenmailbox3);
ql_dbg(ql_dbg_async, vha, 0x5077,
"Asynchronous port Update received "
"aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2824,13 +2819,13 @@
break;
default:
- ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1);
- ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2);
- ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3);
- ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4);
- ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5);
- ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6);
- ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7);
+ ha->aenmb[1] = rd_reg_dword(®->aenmailbox1);
+ ha->aenmb[2] = rd_reg_dword(®->aenmailbox2);
+ ha->aenmb[3] = rd_reg_dword(®->aenmailbox3);
+ ha->aenmb[4] = rd_reg_dword(®->aenmailbox4);
+ ha->aenmb[5] = rd_reg_dword(®->aenmailbox5);
+ ha->aenmb[6] = rd_reg_dword(®->aenmailbox6);
+ ha->aenmb[7] = rd_reg_dword(®->aenmailbox7);
ql_dbg(ql_dbg_async, vha, 0x5078,
"AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
@@ -2844,13 +2839,13 @@
/**
* qlafx00x_mbx_completion() - Process mailbox command completions.
* @vha: SCSI driver HA context
- * @mb0:
+ * @mb0: value to be written into mailbox register 0
*/
static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
{
uint16_t cnt;
- uint32_t __iomem *wptr;
+ __le32 __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
@@ -2860,17 +2855,17 @@
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out32[0] = mb0;
- wptr = (uint32_t __iomem *)®->mailbox17;
+ wptr = ®->mailbox17;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
+ ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
wptr++;
}
}
/**
* qlafx00_intr_handler() - Process interrupts for the ISPFX00.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
@@ -2917,13 +2912,13 @@
break;
if (stat & QLAFX00_INTR_MB_CMPLT) {
- mb[0] = RD_REG_WORD(®->mailbox16);
+ mb[0] = rd_reg_dword(®->mailbox16);
qlafx00_mbx_completion(vha, mb[0]);
status |= MBX_INTERRUPT;
clr_intr |= QLAFX00_INTR_MB_CMPLT;
}
if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
- ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0);
+ ha->aenmb[0] = rd_reg_dword(®->aenmailbox0);
qlafx00_async_event(vha);
clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
}
@@ -2972,7 +2967,7 @@
uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
{
uint16_t avail_dsds;
- __le32 *cur_dsd;
+ struct dsd64 *cur_dsd;
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
struct scatterlist *sg;
@@ -3008,12 +3003,10 @@
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1;
- cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
+ cur_dsd = &lcmd_pkt->dsd;
/* Load data segments */
scsi_for_each_sg(cmd, sg, tot_dsds, i) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3023,26 +3016,23 @@
memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
cont_pkt =
qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
- cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
+ cur_dsd = lcont_pkt.dsd;
avail_dsds = 5;
cont = 1;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
if (avail_dsds == 0 && cont == 1) {
cont = 0;
memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
- REQUEST_ENTRY_SIZE);
+ sizeof(lcont_pkt));
}
}
if (avail_dsds != 0 && cont == 1) {
memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
- REQUEST_ENTRY_SIZE);
+ sizeof(lcont_pkt));
}
}
@@ -3057,7 +3047,6 @@
{
int nseg;
unsigned long flags;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt;
@@ -3081,16 +3070,8 @@
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -3105,7 +3086,7 @@
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
- cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+ cnt = rd_reg_dword_relaxed(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
@@ -3127,7 +3108,7 @@
memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
- lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+ lcmd_pkt.handle = make_handle(req->id, sp->handle);
lcmd_pkt.reserved_0 = 0;
lcmd_pkt.port_path_ctrl = 0;
lcmd_pkt.reserved_1 = 0;
@@ -3152,9 +3133,9 @@
lcmd_pkt.entry_status = (uint8_t) rsp->id;
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
- (uint8_t *)cmd->cmnd, cmd->cmd_len);
+ cmd->cmnd, cmd->cmd_len);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
- (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+ &lcmd_pkt, sizeof(lcmd_pkt));
memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
wmb();
@@ -3170,7 +3151,7 @@
sp->flags |= SRB_DMA_VALID;
/* Set chip new ring index. */
- WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ wrt_reg_dword(req->req_q_in, req->ring_index);
QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3197,7 +3178,7 @@
memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
tm_iocb.entry_count = 1;
- tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ tm_iocb.handle = make_handle(req->id, sp->handle);
tm_iocb.reserved_0 = 0;
tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
@@ -3207,7 +3188,7 @@
sizeof(struct scsi_lun));
}
- memcpy((void *)ptm_iocb, &tm_iocb,
+ memcpy(ptm_iocb, &tm_iocb,
sizeof(struct tsk_mgmt_entry_fx00));
wmb();
}
@@ -3223,13 +3204,12 @@
memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
abt_iocb.entry_count = 1;
- abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- abt_iocb.abort_handle =
- cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+ abt_iocb.handle = make_handle(req->id, sp->handle);
+ abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
abt_iocb.req_que_no = cpu_to_le16(req->id);
- memcpy((void *)pabt_iocb, &abt_iocb,
+ memcpy(pabt_iocb, &abt_iocb,
sizeof(struct abort_iocb_entry_fx00));
wmb();
}
@@ -3246,7 +3226,7 @@
memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
fx_iocb.entry_type = FX00_IOCB_TYPE;
- fx_iocb.handle = cpu_to_le32(sp->handle);
+ fx_iocb.handle = sp->handle;
fx_iocb.entry_count = entry_cnt;
if (sp->type == SRB_FXIOCB_DCMD) {
@@ -3262,11 +3242,9 @@
fx_iocb.req_dsdcnt = cpu_to_le16(1);
fx_iocb.req_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.req_len);
- fx_iocb.dseg_rq_address[0] =
- cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
- fx_iocb.dseg_rq_address[1] =
- cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
- fx_iocb.dseg_rq_len =
+ put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
+ &fx_iocb.dseg_rq.address);
+ fx_iocb.dseg_rq.length =
cpu_to_le32(fxio->u.fxiocb.req_len);
}
@@ -3274,11 +3252,9 @@
fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
fx_iocb.rsp_xfrcnt =
cpu_to_le16(fxio->u.fxiocb.rsp_len);
- fx_iocb.dseg_rsp_address[0] =
- cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
- fx_iocb.dseg_rsp_address[1] =
- cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
- fx_iocb.dseg_rsp_len =
+ put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
+ &fx_iocb.dseg_rsp.address);
+ fx_iocb.dseg_rsp.length =
cpu_to_le32(fxio->u.fxiocb.rsp_len);
}
@@ -3288,6 +3264,7 @@
fx_iocb.flags = fxio->u.fxiocb.flags;
} else {
struct scatterlist *sg;
+
bsg_job = sp->u.bsg_job;
bsg_request = bsg_job->request;
piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
@@ -3307,19 +3284,17 @@
int avail_dsds, tot_dsds;
cont_a64_entry_t lcont_pkt;
cont_a64_entry_t *cont_pkt = NULL;
- __le32 *cur_dsd;
+ struct dsd64 *cur_dsd;
int index = 0, cont = 0;
fx_iocb.req_dsdcnt =
cpu_to_le16(bsg_job->request_payload.sg_cnt);
tot_dsds =
bsg_job->request_payload.sg_cnt;
- cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
+ cur_dsd = &fx_iocb.dseg_rq;
avail_dsds = 1;
for_each_sg(bsg_job->request_payload.sg_list, sg,
tot_dsds, index) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3331,17 +3306,13 @@
cont_pkt =
qlafx00_prep_cont_type1_iocb(
sp->vha->req, &lcont_pkt);
- cur_dsd = (__le32 *)
- lcont_pkt.dseg_0_address;
+ cur_dsd = lcont_pkt.dsd;
avail_dsds = 5;
cont = 1;
entry_cnt++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
if (avail_dsds == 0 && cont == 1) {
@@ -3369,19 +3340,17 @@
int avail_dsds, tot_dsds;
cont_a64_entry_t lcont_pkt;
cont_a64_entry_t *cont_pkt = NULL;
- __le32 *cur_dsd;
+ struct dsd64 *cur_dsd;
int index = 0, cont = 0;
fx_iocb.rsp_dsdcnt =
cpu_to_le16(bsg_job->reply_payload.sg_cnt);
tot_dsds = bsg_job->reply_payload.sg_cnt;
- cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
+ cur_dsd = &fx_iocb.dseg_rsp;
avail_dsds = 1;
for_each_sg(bsg_job->reply_payload.sg_list, sg,
tot_dsds, index) {
- dma_addr_t sle_dma;
-
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
/*
@@ -3393,17 +3362,13 @@
cont_pkt =
qlafx00_prep_cont_type1_iocb(
sp->vha->req, &lcont_pkt);
- cur_dsd = (__le32 *)
- lcont_pkt.dseg_0_address;
+ cur_dsd = lcont_pkt.dsd;
avail_dsds = 5;
cont = 1;
entry_cnt++;
}
- sle_dma = sg_dma_address(sg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ append_dsd64(&cur_dsd, sg);
avail_dsds--;
if (avail_dsds == 0 && cont == 1) {
@@ -3434,10 +3399,8 @@
}
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->vha, 0x3047,
- (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+ sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb));
- memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
- sizeof(struct fxdisc_entry_fx00));
+ memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb));
wmb();
}
--
Gitblit v1.6.2