From 2f7c68cb55ecb7331f2381deb497c27155f32faf Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 03 Jan 2024 09:43:39 +0000 Subject: [PATCH] update kernel to 5.10.198 --- kernel/drivers/scsi/qla2xxx/qla_nvme.c | 449 ++++++++++++++++++++++++++++++------------------------- 1 files changed, 242 insertions(+), 207 deletions(-) diff --git a/kernel/drivers/scsi/qla2xxx/qla_nvme.c b/kernel/drivers/scsi/qla2xxx/qla_nvme.c index dcd0f05..6dad778 100644 --- a/kernel/drivers/scsi/qla2xxx/qla_nvme.c +++ b/kernel/drivers/scsi/qla2xxx/qla_nvme.c @@ -1,8 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2017 QLogic Corporation - * - * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_nvme.h" #include <linux/scatterlist.h> @@ -11,8 +10,6 @@ #include <linux/nvme-fc.h> static struct nvme_fc_port_template qla_nvme_fc_transport; - -static void qla_nvme_unregister_remote_port(struct work_struct *); int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) { @@ -38,14 +35,13 @@ (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; - INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port); fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); req.port_name = wwn_to_u64(fcport->port_name); req.node_name = wwn_to_u64(fcport->node_name); req.port_role = 0; - req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO; + req.dev_loss_tmo = 0; if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) req.port_role = FC_PORT_ROLE_NVME_INITIATOR; @@ -72,9 +68,16 @@ return ret; } + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) + ql_log(ql_log_info, vha, 0x212a, + "PortID:%06x Supports SLER\n", req.port_id); + + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) + ql_log(ql_log_info, vha, 0x212b, + "PortID:%06x Supports PI control\n", req.port_id); + rport = fcport->nvme_remote_port->private; rport->fcport = fcport; - list_add_tail(&rport->list, &vha->nvme_rport_list); fcport->nvme_flag |= NVME_FLAG_REGISTERED; return 0; @@ -106,73 +109,114 @@ return -EINVAL; } - if (ha->queue_pair_map[qidx]) { - *handle = ha->queue_pair_map[qidx]; - ql_log(ql_log_info, vha, 0x2121, - "Returning existing qpair of %p for idx=%x\n", - *handle, qidx); - return 0; - } + /* Use base qpair if max_qpairs is 0 */ + if (!ha->max_qpairs) { + qpair = ha->base_qpair; + } else { + if (ha->queue_pair_map[qidx]) { + *handle = ha->queue_pair_map[qidx]; + ql_log(ql_log_info, vha, 0x2121, + "Returning existing qpair of %p for idx=%x\n", + *handle, qidx); + return 0; + } - qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); - if (qpair == NULL) { - ql_log(ql_log_warn, vha, 0x2122, - "Failed to allocate qpair\n"); - return -EINVAL; + qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); + if (!qpair) { + ql_log(ql_log_warn, vha, 0x2122, + "Failed to allocate qpair\n"); + return -EINVAL; + } } *handle = qpair; return 0; } -static void qla_nvme_sp_ls_done(void *ptr, int res) +static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) { - srb_t *sp = ptr; + struct srb *sp = container_of(kref, struct srb, cmd_kref); + struct nvme_private *priv = (struct nvme_private *)sp->priv; + struct nvmefc_fcp_req *fd; struct srb_iocb *nvme; - struct nvmefc_ls_req *fd; - struct nvme_private *priv; + unsigned long flags; - if (atomic_read(&sp->ref_count) == 0) { - ql_log(ql_log_warn, sp->fcport->vha, 0x2123, - "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp); - return; + if (!priv) + goto out; + + nvme = &sp->u.iocb_cmd; + fd = nvme->u.nvme.desc; + + spin_lock_irqsave(&priv->cmd_lock, flags); + priv->sp = NULL; + sp->priv = NULL; + if (priv->comp_status == QLA_SUCCESS) { + fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + fd->status = NVME_SC_SUCCESS; + } else { + fd->rcv_rsplen = 0; + fd->transferred_length = 0; + fd->status = NVME_SC_INTERNAL; } + spin_unlock_irqrestore(&priv->cmd_lock, flags); - if (!atomic_dec_and_test(&sp->ref_count)) + fd->done(fd); +out: + qla2xxx_rel_qpair_sp(sp->qpair, sp); +} + +static void qla_nvme_release_ls_cmd_kref(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + struct nvme_private *priv = (struct nvme_private *)sp->priv; + struct nvmefc_ls_req *fd; + unsigned long flags; + + if (!priv) + goto out; + + spin_lock_irqsave(&priv->cmd_lock, flags); + priv->sp = NULL; + sp->priv = NULL; + spin_unlock_irqrestore(&priv->cmd_lock, flags); + + fd = priv->fd; + + fd->done(fd, priv->comp_status); +out: + qla2x00_rel_sp(sp); +} + +static void qla_nvme_ls_complete(struct work_struct *work) +{ + struct nvme_private *priv = + container_of(work, struct nvme_private, ls_work); + + kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); +} + +static void qla_nvme_sp_ls_done(srb_t *sp, int res) +{ + struct nvme_private *priv = sp->priv; + + if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) return; if (res) res = -EINVAL; - nvme = &sp->u.iocb_cmd; - fd = nvme->u.nvme.desc; - priv = fd->private; priv->comp_status = res; + INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); schedule_work(&priv->ls_work); - /* work schedule doesn't need the sp */ - qla2x00_rel_sp(sp); } -static void qla_nvme_sp_done(void *ptr, int res) +/* it assumed that QPair lock is held. */ +static void qla_nvme_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; - struct srb_iocb *nvme; - struct nvmefc_fcp_req *fd; + struct nvme_private *priv = sp->priv; - nvme = &sp->u.iocb_cmd; - fd = nvme->u.nvme.desc; - - if (!atomic_dec_and_test(&sp->ref_count)) - return; - - if (res == QLA_SUCCESS) - fd->status = 0; - else - fd->status = NVME_SC_INTERNAL; - - fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; - fd->done(fd); - qla2xxx_rel_qpair_sp(sp->qpair, sp); + priv->comp_status = res; + kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); return; } @@ -186,30 +230,53 @@ struct qla_hw_data *ha = fcport->vha->hw; int rval; + ql_dbg(ql_dbg_io, fcport->vha, 0xffff, + "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", + __func__, sp, sp->handle, fcport, fcport->deleted); + + if (!ha->flags.fw_started && fcport->deleted) + goto out; + + if (ha->flags.host_shutting_down) { + ql_log(ql_log_info, sp->fcport->vha, 0xffff, + "%s Calling done on sp: %p, type: 0x%x\n", + __func__, sp, sp->type); + sp->done(sp, 0); + goto out; + } + rval = ha->isp_ops->abort_command(sp); ql_dbg(ql_dbg_io, fcport->vha, 0x212b, "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", sp, sp->handle, fcport, rval); + +out: + /* kref_get was done before work was schedule. */ + kref_put(&sp->cmd_kref, sp->put_fn); } static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) { struct nvme_private *priv = fd->private; + unsigned long flags; + + spin_lock_irqsave(&priv->cmd_lock, flags); + if (!priv->sp) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + + if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + spin_unlock_irqrestore(&priv->cmd_lock, flags); INIT_WORK(&priv->abort_work, qla_nvme_abort_work); schedule_work(&priv->abort_work); -} - -static void qla_nvme_ls_complete(struct work_struct *work) -{ - struct nvme_private *priv = - container_of(work, struct nvme_private, ls_work); - struct nvmefc_ls_req *fd = priv->fd; - - fd->done(fd, priv->comp_status); } static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, @@ -224,8 +291,16 @@ struct qla_hw_data *ha; srb_t *sp; + + if (!fcport || (fcport && fcport->deleted)) + return rval; + vha = fcport->vha; ha = vha->hw; + + if (!ha->flags.fw_started) + return rval; + /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) @@ -234,11 +309,13 @@ sp->type = SRB_NVME_LS; sp->name = "nvme_ls"; sp->done = qla_nvme_sp_ls_done; - atomic_set(&sp->ref_count, 1); - nvme = &sp->u.iocb_cmd; + sp->put_fn = qla_nvme_release_ls_cmd_kref; + sp->priv = priv; priv->sp = sp; + kref_init(&sp->cmd_kref); + spin_lock_init(&priv->cmd_lock); + nvme = &sp->u.iocb_cmd; priv->fd = fd; - INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); nvme->u.nvme.desc = fd; nvme->u.nvme.dir = 0; nvme->u.nvme.dl = 0; @@ -246,8 +323,7 @@ nvme->u.nvme.rsp_len = fd->rsplen; nvme->u.nvme.rsp_dma = fd->rspdma; nvme->u.nvme.timeout_sec = fd->timeout; - nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, - fd->rqstlen, DMA_TO_DEVICE); + nvme->u.nvme.cmd_dma = fd->rqstdma; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); @@ -255,8 +331,9 @@ if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); - atomic_dec(&sp->ref_count); - wake_up(&sp->nvme_ls_waitq); + sp->priv = NULL; + priv->sp = NULL; + qla2x00_rel_sp(sp); return rval; } @@ -268,34 +345,34 @@ struct nvmefc_fcp_req *fd) { struct nvme_private *priv = fd->private; + unsigned long flags; + + spin_lock_irqsave(&priv->cmd_lock, flags); + if (!priv->sp) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + if (!kref_get_unless_zero(&priv->sp->cmd_kref)) { + spin_unlock_irqrestore(&priv->cmd_lock, flags); + return; + } + spin_unlock_irqrestore(&priv->cmd_lock, flags); INIT_WORK(&priv->abort_work, qla_nvme_abort_work); schedule_work(&priv->abort_work); -} - -static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle) -{ - struct qla_qpair *qpair = hw_queue_handle; - unsigned long flags; - struct scsi_qla_host *vha = lport->private; - - spin_lock_irqsave(&qpair->qp_lock, flags); - qla24xx_process_response_queue(vha, qpair->rsp); - spin_unlock_irqrestore(&qpair->qp_lock, flags); } static inline int qla2x00_start_nvme_mq(srb_t *sp) { unsigned long flags; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; struct cmd_nvme *cmd_pkt; uint16_t cnt, i; uint16_t req_cnt; uint16_t tot_dsds; uint16_t avail_dsds; - uint32_t *cur_dsd; + struct dsd64 *cur_dsd; struct req_que *req = NULL; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; @@ -303,6 +380,7 @@ struct srb_iocb *nvme = &sp->u.iocb_cmd; struct scatterlist *sgl, *sg; struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; + struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; uint32_t rval = QLA_SUCCESS; /* Setup qpair pointers */ @@ -312,24 +390,20 @@ /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - - if (index == req->num_outstanding_cmds) { + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { rval = -EBUSY; goto queuing_error; } req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - RD_REG_DWORD_RELAXED(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = rd_reg_dword_relaxed(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -343,7 +417,6 @@ } if (unlikely(!fd->sqid)) { - struct nvme_fc_cmd_iu *cmd = fd->cmdaddr; if (cmd->sqe.common.opcode == nvme_admin_async_event) { nvme->u.nvme.aen_op = 1; atomic_inc(&ha->nvme_active_aen_cnt); @@ -357,7 +430,7 @@ req->cnt -= req_cnt; cmd_pkt = (struct cmd_nvme *)req->ring_ptr; - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); + cmd_pkt->handle = make_handle(req->id, handle); /* Zero out remaining portion of packet. */ clr_ptr = (uint32_t *)cmd_pkt + 2; @@ -370,17 +443,29 @@ /* No data transfer how do we check buffer len == 0?? */ if (fd->io_dir == NVMEFC_FCP_READ) { - cmd_pkt->control_flags = - cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE); - vha->qla_stats.input_bytes += fd->payload_length; - vha->qla_stats.input_requests++; + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + qpair->counters.input_bytes += fd->payload_length; + qpair->counters.input_requests++; } else if (fd->io_dir == NVMEFC_FCP_WRITE) { - cmd_pkt->control_flags = - cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE); - vha->qla_stats.output_bytes += fd->payload_length; - vha->qla_stats.output_requests++; + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + if ((vha->flags.nvme_first_burst) && + (sp->fcport->nvme_prli_service_param & + NVME_PRLI_SP_FIRST_BURST)) { + if ((fd->payload_length <= + sp->fcport->nvme_first_burst_size) || + (sp->fcport->nvme_first_burst_size == 0)) + cmd_pkt->control_flags |= + cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); + } + qpair->counters.output_bytes += fd->payload_length; + qpair->counters.output_requests++; } else if (fd->io_dir == 0) { - cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE); + cmd_pkt->control_flags = 0; + } + /* Set BIT_13 of control flags for Async event */ + if (vha->flags.nvme2_enabled && + cmd->sqe.common.opcode == nvme_admin_async_event) { + cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); } /* Set NPORT-ID */ @@ -392,25 +477,22 @@ /* NVME RSP IU */ cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen); - cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma)); - cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma)); + put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address); /* NVME CNMD IU */ cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen); - cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma)); - cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma)); + cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); cmd_pkt->byte_count = cpu_to_le32(fd->payload_length); /* One DSD is available in the Command Type NVME IOCB */ avail_dsds = 1; - cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0]; + cur_dsd = &cmd_pkt->nvme_dsd; sgl = fd->first_sgl; /* Load data segments */ for_each_sg(sgl, sg, tot_dsds, i) { - dma_addr_t sle_dma; cont_a64_entry_t *cont_pkt; /* Allocate additional continuation packets? */ @@ -429,17 +511,14 @@ req->ring_ptr++; } cont_pkt = (cont_a64_entry_t *)req->ring_ptr; - *((uint32_t *)(&cont_pkt->entry_type)) = - cpu_to_le32(CONTINUE_A64_TYPE); + put_unaligned_le32(CONTINUE_A64_TYPE, + &cont_pkt->entry_type); - cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; - avail_dsds = 5; + cur_dsd = cont_pkt->dsd; + avail_dsds = ARRAY_SIZE(cont_pkt->dsd); } - sle_dma = sg_dma_address(sg); - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + append_dsd64(&cur_dsd, sg); avail_dsds--; } @@ -457,10 +536,11 @@ } /* Set chip new ring index. */ - WRT_REG_DWORD(req->req_q_in, req->ring_index); + wrt_reg_dword(req->req_q_in, req->ring_index); queuing_error: spin_unlock_irqrestore(&qpair->qp_lock, flags); + return rval; } @@ -472,7 +552,7 @@ fc_port_t *fcport; struct srb_iocb *nvme; struct scsi_qla_host *vha; - int rval = -ENODEV; + int rval; srb_t *sp; struct qla_qpair *qpair = hw_queue_handle; struct nvme_private *priv = fd->private; @@ -480,15 +560,25 @@ if (!priv) { /* nvme association has been torn down */ - return rval; + return -ENODEV; } fcport = qla_rport->fcport; + if (!qpair || !fcport) + return -ENODEV; + + if (!qpair->fw_started || fcport->deleted) + return -EBUSY; + vha = fcport->vha; - if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) - return rval; + if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) + return -ENODEV; + + if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || + (qpair && !qpair->fw_started) || fcport->deleted) + return -EBUSY; /* * If we know the dev is going away while the transport is still sending @@ -501,16 +591,18 @@ return -EBUSY; /* Alloc SRB structure */ - sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC); + sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); if (!sp) return -EBUSY; - atomic_set(&sp->ref_count, 1); - init_waitqueue_head(&sp->nvme_ls_waitq); + kref_init(&sp->cmd_kref); + spin_lock_init(&priv->cmd_lock); + sp->priv = priv; priv->sp = sp; sp->type = SRB_NVME_CMD; sp->name = "nvme_cmd"; sp->done = qla_nvme_sp_done; + sp->put_fn = qla_nvme_release_fcp_cmd_kref; sp->qpair = qpair; sp->vha = vha; nvme = &sp->u.iocb_cmd; @@ -518,10 +610,11 @@ rval = qla2x00_start_nvme_mq(sp); if (rval != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x212d, + ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); - atomic_dec(&sp->ref_count); - wake_up(&sp->nvme_ls_waitq); + sp->priv = NULL; + priv->sp = NULL; + qla2xxx_rel_qpair_sp(sp->qpair, sp); } return rval; @@ -540,29 +633,16 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) { fc_port_t *fcport; - struct qla_nvme_rport *qla_rport = rport->private, *trport; + struct qla_nvme_rport *qla_rport = rport->private; fcport = qla_rport->fcport; fcport->nvme_remote_port = NULL; fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; - - list_for_each_entry_safe(qla_rport, trport, - &fcport->vha->nvme_rport_list, list) { - if (qla_rport->fcport == fcport) { - list_del(&qla_rport->list); - break; - } - } - complete(&fcport->nvme_del_done); - - if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) { - INIT_WORK(&fcport->free_work, qlt_free_session_done); - schedule_work(&fcport->free_work); - } - fcport->nvme_flag &= ~NVME_FLAG_DELETING; ql_log(ql_log_info, fcport->vha, 0x2110, - "remoteport_delete of %p completed.\n", fcport); + "remoteport_delete of %p %8phN completed.\n", + fcport, fcport->port_name); + complete(&fcport->nvme_del_done); } static struct nvme_fc_port_template qla_nvme_fc_transport = { @@ -574,9 +654,8 @@ .ls_abort = qla_nvme_ls_abort, .fcp_io = qla_nvme_post_cmd, .fcp_abort = qla_nvme_fcp_abort, - .poll_queue = qla_nvme_poll, .max_hw_queues = 8, - .max_sgl_segments = 128, + .max_sgl_segments = 1024, .max_dif_sgl_segments = 64, .dma_boundary = 0xFFFFFFFF, .local_priv_sz = 8, @@ -585,78 +664,35 @@ .fcprqst_priv_sz = sizeof(struct nvme_private), }; -#define NVME_ABORT_POLLING_PERIOD 2 -static int qla_nvme_wait_on_command(srb_t *sp) +void qla_nvme_unregister_remote_port(struct fc_port *fcport) { - int ret = QLA_SUCCESS; - - wait_event_timeout(sp->nvme_ls_waitq, (atomic_read(&sp->ref_count) > 1), - NVME_ABORT_POLLING_PERIOD*HZ); - - if (atomic_read(&sp->ref_count) > 1) - ret = QLA_FUNCTION_FAILED; - - return ret; -} - -void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res) -{ - int rval; - - if (ha->flags.fw_started) { - rval = ha->isp_ops->abort_command(sp); - if (!rval && !qla_nvme_wait_on_command(sp)) - ql_log(ql_log_warn, NULL, 0x2112, - "timed out waiting on sp=%p\n", sp); - } else { - sp->done(sp, res); - } -} - -static void qla_nvme_unregister_remote_port(struct work_struct *work) -{ - struct fc_port *fcport = container_of(work, struct fc_port, - nvme_del_work); - struct qla_nvme_rport *qla_rport, *trport; + int ret; if (!IS_ENABLED(CONFIG_NVME_FC)) return; ql_log(ql_log_warn, NULL, 0x2112, - "%s: unregister remoteport on %p\n",__func__, fcport); + "%s: unregister remoteport on %p %8phN\n", + __func__, fcport, fcport->port_name); - list_for_each_entry_safe(qla_rport, trport, - &fcport->vha->nvme_rport_list, list) { - if (qla_rport->fcport == fcport) { - ql_log(ql_log_info, fcport->vha, 0x2113, - "%s: fcport=%p\n", __func__, fcport); - init_completion(&fcport->nvme_del_done); - nvme_fc_unregister_remoteport( - fcport->nvme_remote_port); - wait_for_completion(&fcport->nvme_del_done); - break; - } - } + if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); + + init_completion(&fcport->nvme_del_done); + ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); + if (ret) + ql_log(ql_log_info, fcport->vha, 0x2114, + "%s: Failed to unregister nvme_remote_port (%d)\n", + __func__, ret); + wait_for_completion(&fcport->nvme_del_done); } void qla_nvme_delete(struct scsi_qla_host *vha) { - struct qla_nvme_rport *qla_rport, *trport; - fc_port_t *fcport; int nv_ret; if (!IS_ENABLED(CONFIG_NVME_FC)) return; - - list_for_each_entry_safe(qla_rport, trport, - &vha->nvme_rport_list, list) { - fcport = qla_rport->fcport; - - ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n", - __func__, fcport); - - nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); - } if (vha->nvme_local_port) { init_completion(&vha->nvme_del_done); @@ -686,11 +722,10 @@ tmpl = &qla_nvme_fc_transport; WARN_ON(vha->nvme_local_port); - WARN_ON(ha->max_req_queues < 3); qla_nvme_fc_transport.max_hw_queues = min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), - (uint8_t)(ha->max_req_queues - 2)); + (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1)); pinfo.node_name = wwn_to_u64(vha->node_name); pinfo.port_name = wwn_to_u64(vha->port_name); -- Gitblit v1.6.2