From f70575805708cabdedea7498aaa3f710fde4d920 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Wed, 31 Jan 2024 03:29:01 +0000 Subject: [PATCH] add lvds1024*800 --- kernel/drivers/nvme/target/fc.c | 1072 +++++++++++++++++++++++++++++++++++++++------------------- 1 files changed, 716 insertions(+), 356 deletions(-) diff --git a/kernel/drivers/nvme/target/fc.c b/kernel/drivers/nvme/target/fc.c index 77e4d18..46fc44c 100644 --- a/kernel/drivers/nvme/target/fc.c +++ b/kernel/drivers/nvme/target/fc.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful. - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, - * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO - * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. - * See the GNU General Public License for more details, a copy of which - * can be found in the file COPYING included with this package - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> @@ -26,6 +14,7 @@ #include "nvmet.h" #include <linux/nvme-fc-driver.h> #include <linux/nvme-fc.h> +#include "../host/fc.h" /* *************************** Data Structures/Defines ****************** */ @@ -33,23 +22,21 @@ #define NVMET_LS_CTX_COUNT 256 -/* for this implementation, assume small single frame rqst/rsp */ -#define NVME_FC_MAX_LS_BUFFER_SIZE 2048 - struct nvmet_fc_tgtport; struct nvmet_fc_tgt_assoc; -struct nvmet_fc_ls_iod { - struct nvmefc_tgt_ls_req *lsreq; +struct nvmet_fc_ls_iod { /* for an LS RQST RCV */ + struct nvmefc_ls_rsp *lsrsp; struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */ - struct list_head ls_list; /* tgtport->ls_list */ + struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */ struct nvmet_fc_tgtport *tgtport; struct nvmet_fc_tgt_assoc *assoc; + void *hosthandle; - u8 *rqstbuf; - u8 *rspbuf; + union nvmefc_ls_requests *rqstbuf; + union nvmefc_ls_responses *rspbuf; u16 rqstdatalen; dma_addr_t rspdma; @@ -57,6 +44,18 @@ struct work_struct work; } __aligned(sizeof(unsigned long long)); + +struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ + struct nvmefc_ls_req ls_req; + + struct nvmet_fc_tgtport *tgtport; + void *hosthandle; + + int ls_error; + struct list_head lsreq_list; /* tgtport->ls_req_list */ + bool req_queued; +}; + /* desired maximum for a single sequence - if sg list allows it */ #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) @@ -86,8 +85,6 @@ spinlock_t flock; struct nvmet_req req; - struct work_struct work; - struct work_struct done_work; struct work_struct defer_work; struct nvmet_fc_tgtport *tgtport; @@ -97,7 +94,6 @@ }; struct nvmet_fc_tgtport { - struct nvmet_fc_target_port fc_target_port; struct list_head tgt_list; /* nvmet_fc_target_list */ @@ -106,13 +102,23 @@ struct nvmet_fc_ls_iod *iod; spinlock_t lock; - struct list_head ls_list; + struct list_head ls_rcv_list; + struct list_head ls_req_list; struct list_head ls_busylist; struct list_head assoc_list; + struct list_head host_list; struct ida assoc_cnt; - struct nvmet_port *port; + struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; +}; + +struct nvmet_fc_port_entry { + struct nvmet_fc_tgtport *tgtport; + struct nvmet_port *port; + u64 node_name; + u64 port_name; + struct list_head pe_list; }; struct nvmet_fc_defer_fcp_req { @@ -126,28 +132,37 @@ u16 sqsize; u16 ersp_ratio; __le16 sqhd; - int cpu; atomic_t connected; atomic_t sqtail; atomic_t zrspcnt; atomic_t rsn; spinlock_t qlock; - struct nvmet_port *port; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; struct nvmet_fc_tgt_assoc *assoc; - struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ struct list_head fod_list; struct list_head pending_cmd_list; struct list_head avail_defer_list; struct workqueue_struct *work_q; struct kref ref; + struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */ } __aligned(sizeof(unsigned long long)); + +struct nvmet_fc_hostport { + struct nvmet_fc_tgtport *tgtport; + void *hosthandle; + struct list_head host_list; + struct kref ref; + u8 invalid; +}; struct nvmet_fc_tgt_assoc { u64 association_id; u32 a_id; + atomic_t terminating; struct nvmet_fc_tgtport *tgtport; + struct nvmet_fc_hostport *hostport; + struct nvmet_fc_ls_iod *rcv_disconn; struct list_head a_list; struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; @@ -221,11 +236,10 @@ static LIST_HEAD(nvmet_fc_target_list); static DEFINE_IDA(nvmet_fc_tgtport_cnt); +static LIST_HEAD(nvmet_fc_portentry_list); static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); -static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work); -static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work); static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work); static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc); static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); @@ -236,6 +250,8 @@ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod); static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc); +static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_iod *iod); /* *********************** FC-NVME DMA Handling **************************** */ @@ -327,6 +343,188 @@ } +/* ********************** FC-NVME LS XMT Handling ************************* */ + + +static void +__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) +{ + struct nvmet_fc_tgtport *tgtport = lsop->tgtport; + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + + if (!lsop->req_queued) { + spin_unlock_irqrestore(&tgtport->lock, flags); + return; + } + + list_del(&lsop->lsreq_list); + + lsop->req_queued = false; + + spin_unlock_irqrestore(&tgtport->lock, flags); + + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + + nvmet_fc_tgtport_put(tgtport); +} + +static int +__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + struct nvmefc_ls_req *lsreq = &lsop->ls_req; + unsigned long flags; + int ret = 0; + + if (!tgtport->ops->ls_req) + return -EOPNOTSUPP; + + if (!nvmet_fc_tgtport_get(tgtport)) + return -ESHUTDOWN; + + lsreq->done = done; + lsop->req_queued = false; + INIT_LIST_HEAD(&lsop->lsreq_list); + + lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, + lsreq->rqstlen + lsreq->rsplen, + DMA_BIDIRECTIONAL); + if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { + ret = -EFAULT; + goto out_puttgtport; + } + lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; + + spin_lock_irqsave(&tgtport->lock, flags); + + list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); + + lsop->req_queued = true; + + spin_unlock_irqrestore(&tgtport->lock, flags); + + ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, + lsreq); + if (ret) + goto out_unlink; + + return 0; + +out_unlink: + lsop->ls_error = ret; + spin_lock_irqsave(&tgtport->lock, flags); + lsop->req_queued = false; + list_del(&lsop->lsreq_list); + spin_unlock_irqrestore(&tgtport->lock, flags); + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); +out_puttgtport: + nvmet_fc_tgtport_put(tgtport); + + return ret; +} + +static int +nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_ls_req_op *lsop, + void (*done)(struct nvmefc_ls_req *req, int status)) +{ + /* don't wait for completion */ + + return __nvmet_fc_send_ls_req(tgtport, lsop, done); +} + +static void +nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) +{ + struct nvmet_fc_ls_req_op *lsop = + container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req); + + __nvmet_fc_finish_ls_req(lsop); + + /* fc-nvme target doesn't care about success or failure of cmd */ + + kfree(lsop); +} + +/* + * This routine sends a FC-NVME LS to disconnect (aka terminate) + * the FC-NVME Association. Terminating the association also + * terminates the FC-NVME connections (per queue, both admin and io + * queues) that are part of the association. E.g. things are torn + * down, and the related FC-NVME Association ID and Connection IDs + * become invalid. + * + * The behavior of the fc-nvme target is such that it's + * understanding of the association and connections will implicitly + * be torn down. The action is implicit as it may be due to a loss of + * connectivity with the fc-nvme host, so the target may never get a + * response even if it tried. As such, the action of this routine + * is to asynchronously send the LS, ignore any results of the LS, and + * continue on with terminating the association. If the fc-nvme host + * is present and receives the LS, it too can tear down. + */ +static void +nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; + struct fcnvme_ls_disconnect_assoc_acc *discon_acc; + struct nvmet_fc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + int ret; + + /* + * If ls_req is NULL or no hosthandle, it's an older lldd and no + * message is normal. Otherwise, send unless the hostport has + * already been invalidated by the lldd. + */ + if (!tgtport->ops->ls_req || !assoc->hostport || + assoc->hostport->invalid) + return; + + lsop = kzalloc((sizeof(*lsop) + + sizeof(*discon_rqst) + sizeof(*discon_acc) + + tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); + if (!lsop) { + dev_info(tgtport->dev, + "{%d:%d} send Disconnect Association failed: ENOMEM\n", + tgtport->fc_target_port.port_num, assoc->a_id); + return; + } + + discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; + discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; + lsreq = &lsop->ls_req; + if (tgtport->ops->lsrqst_priv_sz) + lsreq->private = (void *)&discon_acc[1]; + else + lsreq->private = NULL; + + lsop->tgtport = tgtport; + lsop->hosthandle = assoc->hostport->hosthandle; + + nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, + assoc->association_id); + + ret = nvmet_fc_send_ls_req_async(tgtport, lsop, + nvmet_fc_disconnect_assoc_done); + if (ret) { + dev_info(tgtport->dev, + "{%d:%d} XMT Disconnect Association failed: %d\n", + tgtport->fc_target_port.port_num, assoc->a_id, ret); + kfree(lsop); + } +} + + /* *********************** FC-NVME Port Management ************************ */ @@ -346,17 +544,18 @@ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); iod->tgtport = tgtport; - list_add_tail(&iod->ls_list, &tgtport->ls_list); + list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); - iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE, - GFP_KERNEL); + iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + + sizeof(union nvmefc_ls_responses), + GFP_KERNEL); if (!iod->rqstbuf) goto out_fail; - iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE; + iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, - NVME_FC_MAX_LS_BUFFER_SIZE, + sizeof(*iod->rspbuf), DMA_TO_DEVICE); if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) goto out_fail; @@ -366,12 +565,12 @@ out_fail: kfree(iod->rqstbuf); - list_del(&iod->ls_list); + list_del(&iod->ls_rcv_list); for (iod--, i--; i >= 0; iod--, i--) { fc_dma_unmap_single(tgtport->dev, iod->rspdma, - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + sizeof(*iod->rspbuf), DMA_TO_DEVICE); kfree(iod->rqstbuf); - list_del(&iod->ls_list); + list_del(&iod->ls_rcv_list); } kfree(iod); @@ -387,10 +586,10 @@ for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { fc_dma_unmap_single(tgtport->dev, - iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE, + iod->rspdma, sizeof(*iod->rspbuf), DMA_TO_DEVICE); kfree(iod->rqstbuf); - list_del(&iod->ls_list); + list_del(&iod->ls_rcv_list); } kfree(tgtport->iod); } @@ -402,10 +601,10 @@ unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); - iod = list_first_entry_or_null(&tgtport->ls_list, - struct nvmet_fc_ls_iod, ls_list); + iod = list_first_entry_or_null(&tgtport->ls_rcv_list, + struct nvmet_fc_ls_iod, ls_rcv_list); if (iod) - list_move_tail(&iod->ls_list, &tgtport->ls_busylist); + list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); spin_unlock_irqrestore(&tgtport->lock, flags); return iod; } @@ -418,7 +617,7 @@ unsigned long flags; spin_lock_irqsave(&tgtport->lock, flags); - list_move(&iod->ls_list, &tgtport->ls_list); + list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); spin_unlock_irqrestore(&tgtport->lock, flags); } @@ -430,8 +629,6 @@ int i; for (i = 0; i < queue->sqsize; fod++, i++) { - INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work); - INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work); INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); fod->tgtport = tgtport; fod->queue = queue; @@ -509,10 +706,7 @@ fcpreq->hwqid = queue->qid ? ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; - if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) - queue_work_on(queue->cpu, queue->work_q, &fod->work); - else - nvmet_fc_handle_fcp_rqst(tgtport, fod); + nvmet_fc_handle_fcp_rqst(tgtport, fod); } static void @@ -591,30 +785,6 @@ queue_work(queue->work_q, &fod->defer_work); } -static int -nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid) -{ - int cpu, idx, cnt; - - if (tgtport->ops->max_hw_queues == 1) - return WORK_CPU_UNBOUND; - - /* Simple cpu selection based on qid modulo active cpu count */ - idx = !qid ? 0 : (qid - 1) % num_active_cpus(); - - /* find the n'th active cpu */ - for (cpu = 0, cnt = 0; ; ) { - if (cpu_active(cpu)) { - if (cnt == idx) - break; - cnt++; - } - cpu = (cpu + 1) % num_possible_cpus(); - } - - return cpu; -} - static struct nvmet_fc_tgt_queue * nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, u16 qid, u16 sqsize) @@ -626,9 +796,7 @@ if (qid > NVMET_NR_QUEUES) return NULL; - queue = kzalloc((sizeof(*queue) + - (sizeof(struct nvmet_fc_fcp_iod) * sqsize)), - GFP_KERNEL); + queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL); if (!queue) return NULL; @@ -641,12 +809,9 @@ if (!queue->work_q) goto out_a_put; - queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1]; queue->qid = qid; queue->sqsize = sqsize; queue->assoc = assoc; - queue->port = assoc->tgtport->port; - queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); INIT_LIST_HEAD(&queue->fod_list); INIT_LIST_HEAD(&queue->avail_defer_list); INIT_LIST_HEAD(&queue->pending_cmd_list); @@ -721,31 +886,33 @@ struct nvmet_fc_fcp_iod *fod = queue->fod; struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; unsigned long flags; - int i, writedataactive; + int i; bool disconnect; disconnect = atomic_xchg(&queue->connected, 0); + /* if not connected, nothing to do */ + if (!disconnect) + return; + spin_lock_irqsave(&queue->qlock, flags); - /* about outstanding io's */ + /* abort outstanding io's */ for (i = 0; i < queue->sqsize; fod++, i++) { if (fod->active) { spin_lock(&fod->flock); fod->abort = true; - writedataactive = fod->writedataactive; - spin_unlock(&fod->flock); /* * only call lldd abort routine if waiting for * writedata. other outstanding ops should finish * on their own. */ - if (writedataactive) { - spin_lock(&fod->flock); + if (fod->writedataactive) { fod->aborted = true; spin_unlock(&fod->flock); tgtport->ops->fcp_abort( &tgtport->fc_target_port, fod->fcpreq); - } + } else + spin_unlock(&fod->flock); } } @@ -785,8 +952,7 @@ flush_workqueue(queue->work_q); - if (disconnect) - nvmet_sq_destroy(&queue->nvme_sq); + nvmet_sq_destroy(&queue->nvme_sq); nvmet_fc_tgt_q_put(queue); } @@ -821,6 +987,102 @@ } static void +nvmet_fc_hostport_free(struct kref *ref) +{ + struct nvmet_fc_hostport *hostport = + container_of(ref, struct nvmet_fc_hostport, ref); + struct nvmet_fc_tgtport *tgtport = hostport->tgtport; + unsigned long flags; + + spin_lock_irqsave(&tgtport->lock, flags); + list_del(&hostport->host_list); + spin_unlock_irqrestore(&tgtport->lock, flags); + if (tgtport->ops->host_release && hostport->invalid) + tgtport->ops->host_release(hostport->hosthandle); + kfree(hostport); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport) +{ + kref_put(&hostport->ref, nvmet_fc_hostport_free); +} + +static int +nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) +{ + return kref_get_unless_zero(&hostport->ref); +} + +static void +nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) +{ + /* if LLDD not implemented, leave as NULL */ + if (!hostport || !hostport->hosthandle) + return; + + nvmet_fc_hostport_put(hostport); +} + +static struct nvmet_fc_hostport * +nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) +{ + struct nvmet_fc_hostport *newhost, *host, *match = NULL; + unsigned long flags; + + /* if LLDD not implemented, leave as NULL */ + if (!hosthandle) + return NULL; + + /* take reference for what will be the newly allocated hostport */ + if (!nvmet_fc_tgtport_get(tgtport)) + return ERR_PTR(-EINVAL); + + newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); + if (!newhost) { + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry(host, &tgtport->host_list, host_list) { + if (host->hosthandle == hosthandle && !host->invalid) { + if (nvmet_fc_hostport_get(host)) { + match = host; + break; + } + } + } + spin_unlock_irqrestore(&tgtport->lock, flags); + /* no allocation - release reference */ + nvmet_fc_tgtport_put(tgtport); + return (match) ? match : ERR_PTR(-ENOMEM); + } + + newhost->tgtport = tgtport; + newhost->hosthandle = hosthandle; + INIT_LIST_HEAD(&newhost->host_list); + kref_init(&newhost->ref); + + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry(host, &tgtport->host_list, host_list) { + if (host->hosthandle == hosthandle && !host->invalid) { + if (nvmet_fc_hostport_get(host)) { + match = host; + break; + } + } + } + if (match) { + kfree(newhost); + newhost = NULL; + /* releasing allocation - release reference */ + nvmet_fc_tgtport_put(tgtport); + } else + list_add_tail(&newhost->host_list, &tgtport->host_list); + spin_unlock_irqrestore(&tgtport->lock, flags); + + return (match) ? match : newhost; +} + +static void nvmet_fc_delete_assoc(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = @@ -831,7 +1093,7 @@ } static struct nvmet_fc_tgt_assoc * -nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport) +nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { struct nvmet_fc_tgt_assoc *assoc, *tmpassoc; unsigned long flags; @@ -848,13 +1110,18 @@ goto out_free_assoc; if (!nvmet_fc_tgtport_get(tgtport)) - goto out_ida_put; + goto out_ida; + + assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); + if (IS_ERR(assoc->hostport)) + goto out_put; assoc->tgtport = tgtport; assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); + atomic_set(&assoc->terminating, 0); while (needrandom) { get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); @@ -862,11 +1129,12 @@ spin_lock_irqsave(&tgtport->lock, flags); needrandom = false; - list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) + list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) { if (ran == tmpassoc->association_id) { needrandom = true; break; } + } if (!needrandom) { assoc->association_id = ran; list_add_tail(&assoc->a_list, &tgtport->assoc_list); @@ -876,7 +1144,9 @@ return assoc; -out_ida_put: +out_put: + nvmet_fc_tgtport_put(tgtport); +out_ida: ida_simple_remove(&tgtport->assoc_cnt, idx); out_free_assoc: kfree(assoc); @@ -889,12 +1159,24 @@ struct nvmet_fc_tgt_assoc *assoc = container_of(ref, struct nvmet_fc_tgt_assoc, ref); struct nvmet_fc_tgtport *tgtport = assoc->tgtport; + struct nvmet_fc_ls_iod *oldls; unsigned long flags; + /* Send Disconnect now that all i/o has completed */ + nvmet_fc_xmt_disconnect_assoc(assoc); + + nvmet_fc_free_hostport(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); list_del(&assoc->a_list); + oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); + /* if pending Rcv Disconnect Association LS, send rsp now */ + if (oldls) + nvmet_fc_xmt_ls_rsp(tgtport, oldls); ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id); + dev_info(tgtport->dev, + "{%d:%d} Association freed\n", + tgtport->fc_target_port.port_num, assoc->a_id); kfree(assoc); nvmet_fc_tgtport_put(tgtport); } @@ -917,7 +1199,13 @@ struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct nvmet_fc_tgt_queue *queue; unsigned long flags; - int i; + int i, terminating; + + terminating = atomic_xchg(&assoc->terminating, 1); + + /* if already terminating, do nothing */ + if (terminating) + return; spin_lock_irqsave(&tgtport->lock, flags); for (i = NVMET_NR_QUEUES; i >= 0; i--) { @@ -932,6 +1220,10 @@ } } spin_unlock_irqrestore(&tgtport->lock, flags); + + dev_info(tgtport->dev, + "{%d:%d} Association deleted\n", + tgtport->fc_target_port.port_num, assoc->a_id); nvmet_fc_tgt_a_put(assoc); } @@ -948,7 +1240,8 @@ list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { ret = assoc; - nvmet_fc_tgt_a_get(assoc); + if (!nvmet_fc_tgt_a_get(assoc)) + ret = NULL; break; } } @@ -957,6 +1250,83 @@ return ret; } +static void +nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, + struct nvmet_fc_port_entry *pe, + struct nvmet_port *port) +{ + lockdep_assert_held(&nvmet_fc_tgtlock); + + pe->tgtport = tgtport; + tgtport->pe = pe; + + pe->port = port; + port->priv = pe; + + pe->node_name = tgtport->fc_target_port.node_name; + pe->port_name = tgtport->fc_target_port.port_name; + INIT_LIST_HEAD(&pe->pe_list); + + list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); +} + +static void +nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) +{ + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport) + pe->tgtport->pe = NULL; + list_del(&pe->pe_list); + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); +} + +/* + * called when a targetport deregisters. Breaks the relationship + * with the nvmet port, but leaves the port_entry in place so that + * re-registration can resume operation. + */ +static void +nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_port_entry *pe; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + pe = tgtport->pe; + if (pe) + pe->tgtport = NULL; + tgtport->pe = NULL; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); +} + +/* + * called when a new targetport is registered. Looks in the + * existing nvmet port_entries to see if the nvmet layer is + * configured for the targetport's wwn's. (the targetport existed, + * nvmet configured, the lldd unregistered the tgtport, and is now + * reregistering the same targetport). If so, set the nvmet port + * port entry on the targetport. + */ +static void +nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_port_entry *pe; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { + if (tgtport->fc_target_port.node_name == pe->node_name && + tgtport->fc_target_port.port_name == pe->port_name) { + WARN_ON(pe->tgtport); + tgtport->pe = pe; + pe->tgtport = tgtport; + break; + } + } + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); +} /** * nvme_fc_register_targetport - transport entry point called by an @@ -1014,16 +1384,21 @@ newrec->fc_target_port.node_name = pinfo->node_name; newrec->fc_target_port.port_name = pinfo->port_name; - newrec->fc_target_port.private = &newrec[1]; + if (template->target_priv_sz) + newrec->fc_target_port.private = &newrec[1]; + else + newrec->fc_target_port.private = NULL; newrec->fc_target_port.port_id = pinfo->port_id; newrec->fc_target_port.port_num = idx; INIT_LIST_HEAD(&newrec->tgt_list); newrec->dev = dev; newrec->ops = template; spin_lock_init(&newrec->lock); - INIT_LIST_HEAD(&newrec->ls_list); + INIT_LIST_HEAD(&newrec->ls_rcv_list); + INIT_LIST_HEAD(&newrec->ls_req_list); INIT_LIST_HEAD(&newrec->ls_busylist); INIT_LIST_HEAD(&newrec->assoc_list); + INIT_LIST_HEAD(&newrec->host_list); kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; @@ -1033,6 +1408,8 @@ ret = -ENOMEM; goto out_free_newrec; } + + nvmet_fc_portentry_rebind_tgt(newrec); spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); @@ -1104,13 +1481,72 @@ &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; - spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_assoc(assoc); - nvmet_fc_tgt_a_put(assoc); - spin_lock_irqsave(&tgtport->lock, flags); + if (!schedule_work(&assoc->del_work)) + /* already deleting - release local reference */ + nvmet_fc_tgt_a_put(assoc); } spin_unlock_irqrestore(&tgtport->lock, flags); } + +/** + * nvmet_fc_invalidate_host - transport entry point called by an LLDD + * to remove references to a hosthandle for LS's. + * + * The nvmet-fc layer ensures that any references to the hosthandle + * on the targetport are forgotten (set to NULL). The LLDD will + * typically call this when a login with a remote host port has been + * lost, thus LS's for the remote host port are no longer possible. + * + * If an LS request is outstanding to the targetport/hosthandle (or + * issued concurrently with the call to invalidate the host), the + * LLDD is responsible for terminating/aborting the LS and completing + * the LS request. It is recommended that these terminations/aborts + * occur after calling to invalidate the host handle to avoid additional + * retries by the nvmet-fc transport. The nvmet-fc transport may + * continue to reference host handle while it cleans up outstanding + * NVME associations. The nvmet-fc transport will call the + * ops->host_release() callback to notify the LLDD that all references + * are complete and the related host handle can be recovered. + * Note: if there are no references, the callback may be called before + * the invalidate host call returns. + * + * @target_port: pointer to the (registered) target port that a prior + * LS was received on and which supplied the transport the + * hosthandle. + * @hosthandle: the handle (pointer) that represents the host port + * that no longer has connectivity and that LS's should + * no longer be directed to. + */ +void +nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, + void *hosthandle) +{ + struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + struct nvmet_fc_tgt_assoc *assoc, *next; + unsigned long flags; + bool noassoc = true; + + spin_lock_irqsave(&tgtport->lock, flags); + list_for_each_entry_safe(assoc, next, + &tgtport->assoc_list, a_list) { + if (!assoc->hostport || + assoc->hostport->hosthandle != hosthandle) + continue; + if (!nvmet_fc_tgt_a_get(assoc)) + continue; + assoc->hostport->invalid = 1; + noassoc = false; + if (!schedule_work(&assoc->del_work)) + /* already deleting - release local reference */ + nvmet_fc_tgt_a_put(assoc); + } + spin_unlock_irqrestore(&tgtport->lock, flags); + + /* if there's nothing to wait for - call the callback */ + if (noassoc && tgtport->ops->host_release) + tgtport->ops->host_release(hosthandle); +} +EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host); /* * nvmet layer has called to terminate an association @@ -1146,7 +1582,9 @@ nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { - schedule_work(&assoc->del_work); + if (!schedule_work(&assoc->del_work)) + /* already deleting - release local reference */ + nvmet_fc_tgt_a_put(assoc); return; } @@ -1159,8 +1597,8 @@ * nvme_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously * registered a local NVME subsystem FC port. - * @tgtport: pointer to the (registered) target port that is to be - * deregistered. + * @target_port: pointer to the (registered) target port that is to be + * deregistered. * * Returns: * a completion status. Must be 0 upon success; a negative errno @@ -1171,8 +1609,17 @@ { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + nvmet_fc_portentry_unbind_tgt(tgtport); + /* terminate any outstanding associations */ __nvmet_fc_free_assocs(tgtport); + + /* + * should terminate LS's as well. However, LS's will be generated + * at the tail end of association termination, so they likely don't + * exist yet. And even if they did, it's worthwhile to just let + * them finish and targetport ref counting will clean things up. + */ nvmet_fc_tgtport_put(tgtport); @@ -1181,113 +1628,15 @@ EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport); -/* *********************** FC-NVME LS Handling **************************** */ +/* ********************** FC-NVME LS RCV Handling ************************* */ - -static void -nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd) -{ - struct fcnvme_ls_acc_hdr *acc = buf; - - acc->w0.ls_cmd = ls_cmd; - acc->desc_list_len = desc_len; - acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST); - acc->rqst.desc_len = - fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)); - acc->rqst.w0.ls_cmd = rqst_ls_cmd; -} - -static int -nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd, - u8 reason, u8 explanation, u8 vendor) -{ - struct fcnvme_ls_rjt *rjt = buf; - - nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST, - fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)), - ls_cmd); - rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT); - rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt)); - rjt->rjt.reason_code = reason; - rjt->rjt.reason_explanation = explanation; - rjt->rjt.vendor = vendor; - - return sizeof(struct fcnvme_ls_rjt); -} - -/* Validation Error indexes into the string table below */ -enum { - VERR_NO_ERROR = 0, - VERR_CR_ASSOC_LEN = 1, - VERR_CR_ASSOC_RQST_LEN = 2, - VERR_CR_ASSOC_CMD = 3, - VERR_CR_ASSOC_CMD_LEN = 4, - VERR_ERSP_RATIO = 5, - VERR_ASSOC_ALLOC_FAIL = 6, - VERR_QUEUE_ALLOC_FAIL = 7, - VERR_CR_CONN_LEN = 8, - VERR_CR_CONN_RQST_LEN = 9, - VERR_ASSOC_ID = 10, - VERR_ASSOC_ID_LEN = 11, - VERR_NO_ASSOC = 12, - VERR_CONN_ID = 13, - VERR_CONN_ID_LEN = 14, - VERR_NO_CONN = 15, - VERR_CR_CONN_CMD = 16, - VERR_CR_CONN_CMD_LEN = 17, - VERR_DISCONN_LEN = 18, - VERR_DISCONN_RQST_LEN = 19, - VERR_DISCONN_CMD = 20, - VERR_DISCONN_CMD_LEN = 21, - VERR_DISCONN_SCOPE = 22, - VERR_RS_LEN = 23, - VERR_RS_RQST_LEN = 24, - VERR_RS_CMD = 25, - VERR_RS_CMD_LEN = 26, - VERR_RS_RCTL = 27, - VERR_RS_RO = 28, -}; - -static char *validation_errors[] = { - "OK", - "Bad CR_ASSOC Length", - "Bad CR_ASSOC Rqst Length", - "Not CR_ASSOC Cmd", - "Bad CR_ASSOC Cmd Length", - "Bad Ersp Ratio", - "Association Allocation Failed", - "Queue Allocation Failed", - "Bad CR_CONN Length", - "Bad CR_CONN Rqst Length", - "Not Association ID", - "Bad Association ID Length", - "No Association", - "Not Connection ID", - "Bad Connection ID Length", - "No Connection", - "Not CR_CONN Cmd", - "Bad CR_CONN Cmd Length", - "Bad DISCONN Length", - "Bad DISCONN Rqst Length", - "Not DISCONN Cmd", - "Bad DISCONN Cmd Length", - "Bad Disconnect Scope", - "Bad RS Length", - "Bad RS Rqst Length", - "Not RS Cmd", - "Bad RS Cmd Length", - "Bad RS R_CTL", - "Bad RS Relative Offset", -}; static void nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_cr_assoc_rqst *rqst = - (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf; - struct fcnvme_ls_cr_assoc_acc *acc = - (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf; + struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; + struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; struct nvmet_fc_tgt_queue *queue; int ret = 0; @@ -1319,14 +1668,17 @@ else { /* new association w/ admin queue */ - iod->assoc = nvmet_fc_alloc_target_assoc(tgtport); + iod->assoc = nvmet_fc_alloc_target_assoc( + tgtport, iod->hosthandle); if (!iod->assoc) ret = VERR_ASSOC_ALLOC_FAIL; else { queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, be16_to_cpu(rqst->assoc_cmd.sqsize)); - if (!queue) + if (!queue) { ret = VERR_QUEUE_ALLOC_FAIL; + nvmet_fc_tgt_a_put(iod->assoc); + } } } @@ -1334,8 +1686,8 @@ dev_err(tgtport->dev, "Create Association LS failed: %s\n", validation_errors[ret]); - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); return; @@ -1345,11 +1697,15 @@ atomic_set(&queue->connected, 1); queue->sqhd = 0; /* best place to init value */ + dev_info(tgtport->dev, + "{%d:%d} Association created\n", + tgtport->fc_target_port.port_num, iod->assoc->a_id); + /* format a response */ - iod->lsreq->rsplen = sizeof(*acc); + iod->lsrsp->rsplen = sizeof(*acc); - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( sizeof(struct fcnvme_ls_cr_assoc_acc)), FCNVME_LS_CREATE_ASSOCIATION); @@ -1370,10 +1726,8 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_cr_conn_rqst *rqst = - (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf; - struct fcnvme_ls_cr_conn_acc *acc = - (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf; + struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; + struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; struct nvmet_fc_tgt_queue *queue; int ret = 0; @@ -1425,8 +1779,8 @@ dev_err(tgtport->dev, "Create Connection LS failed: %s\n", validation_errors[ret]); - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : FCNVME_RJT_RC_LOGIC, @@ -1440,9 +1794,9 @@ /* format a response */ - iod->lsreq->rsplen = sizeof(*acc); + iod->lsrsp->rsplen = sizeof(*acc); - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)), FCNVME_LS_CREATE_CONNECTION); acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); @@ -1454,107 +1808,92 @@ be16_to_cpu(rqst->connect_cmd.qid))); } -static void +/* + * Returns true if the LS response is to be transmit + * Returns false if the LS response is to be delayed + */ +static int nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_disconnect_rqst *rqst = - (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; - struct fcnvme_ls_disconnect_acc *acc = - (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; - struct nvmet_fc_tgt_queue *queue = NULL; - struct nvmet_fc_tgt_assoc *assoc; + struct fcnvme_ls_disconnect_assoc_rqst *rqst = + &iod->rqstbuf->rq_dis_assoc; + struct fcnvme_ls_disconnect_assoc_acc *acc = + &iod->rspbuf->rsp_dis_assoc; + struct nvmet_fc_tgt_assoc *assoc = NULL; + struct nvmet_fc_ls_iod *oldls = NULL; + unsigned long flags; int ret = 0; - bool del_assoc = false; memset(acc, 0, sizeof(*acc)); - if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst)) - ret = VERR_DISCONN_LEN; - else if (rqst->desc_list_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_rqst))) - ret = VERR_DISCONN_RQST_LEN; - else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) - ret = VERR_ASSOC_ID; - else if (rqst->associd.desc_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_lsdesc_assoc_id))) - ret = VERR_ASSOC_ID_LEN; - else if (rqst->discon_cmd.desc_tag != - cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD)) - ret = VERR_DISCONN_CMD; - else if (rqst->discon_cmd.desc_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_lsdesc_disconn_cmd))) - ret = VERR_DISCONN_CMD_LEN; - else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) && - (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION)) - ret = VERR_DISCONN_SCOPE; - else { - /* match an active association */ + ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); + if (!ret) { + /* match an active association - takes an assoc ref if !NULL */ assoc = nvmet_fc_find_target_assoc(tgtport, be64_to_cpu(rqst->associd.association_id)); iod->assoc = assoc; - if (assoc) { - if (rqst->discon_cmd.scope == - FCNVME_DISCONN_CONNECTION) { - queue = nvmet_fc_find_target_queue(tgtport, - be64_to_cpu( - rqst->discon_cmd.id)); - if (!queue) { - nvmet_fc_tgt_a_put(assoc); - ret = VERR_NO_CONN; - } - } - } else + if (!assoc) ret = VERR_NO_ASSOC; } - if (ret) { + if (ret || !assoc) { dev_err(tgtport->dev, "Disconnect LS failed: %s\n", validation_errors[ret]); - iod->lsreq->rsplen = nvmet_fc_format_rjt(acc, - NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd, + iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, + sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? FCNVME_RJT_RC_INV_ASSOC : - (ret == VERR_NO_CONN) ? - FCNVME_RJT_RC_INV_CONN : - FCNVME_RJT_RC_LOGIC, + FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); - return; + return true; } /* format a response */ - iod->lsreq->rsplen = sizeof(*acc); + iod->lsrsp->rsplen = sizeof(*acc); - nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, + nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_disconnect_acc)), - FCNVME_LS_DISCONNECT); - - - /* are we to delete a Connection ID (queue) */ - if (queue) { - int qid = queue->qid; - - nvmet_fc_delete_target_queue(queue); - - /* release the get taken by find_target_queue */ - nvmet_fc_tgt_q_put(queue); - - /* tear association down if io queue terminated */ - if (!qid) - del_assoc = true; - } + sizeof(struct fcnvme_ls_disconnect_assoc_acc)), + FCNVME_LS_DISCONNECT_ASSOC); /* release get taken in nvmet_fc_find_target_assoc */ - nvmet_fc_tgt_a_put(iod->assoc); + nvmet_fc_tgt_a_put(assoc); - if (del_assoc) - nvmet_fc_delete_target_assoc(iod->assoc); + /* + * The rules for LS response says the response cannot + * go back until ABTS's have been sent for all outstanding + * I/O and a Disconnect Association LS has been sent. + * So... save off the Disconnect LS to send the response + * later. If there was a prior LS already saved, replace + * it with the newer one and send a can't perform reject + * on the older one. + */ + spin_lock_irqsave(&tgtport->lock, flags); + oldls = assoc->rcv_disconn; + assoc->rcv_disconn = iod; + spin_unlock_irqrestore(&tgtport->lock, flags); + + nvmet_fc_delete_target_assoc(assoc); + + if (oldls) { + dev_info(tgtport->dev, + "{%d:%d} Multiple Disconnect Association LS's " + "received\n", + tgtport->fc_target_port.port_num, assoc->a_id); + /* overwrite good response with bogus failure */ + oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, + sizeof(*iod->rspbuf), + /* ok to use rqst, LS is same */ + rqst->w0.ls_cmd, + FCNVME_RJT_RC_UNAB, + FCNVME_RJT_EXP_NONE, 0); + nvmet_fc_xmt_ls_rsp(tgtport, oldls); + } + + return false; } @@ -1566,13 +1905,13 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops; static void -nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq) +nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) { - struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private; + struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; struct nvmet_fc_tgtport *tgtport = iod->tgtport; fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + sizeof(*iod->rspbuf), DMA_TO_DEVICE); nvmet_fc_free_ls_iod(tgtport, iod); nvmet_fc_tgtport_put(tgtport); } @@ -1584,11 +1923,11 @@ int ret; fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, - NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE); + sizeof(*iod->rspbuf), DMA_TO_DEVICE); - ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq); + ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); if (ret) - nvmet_fc_xmt_ls_rsp_done(iod->lsreq); + nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); } /* @@ -1598,15 +1937,15 @@ nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_ls_iod *iod) { - struct fcnvme_ls_rqst_w0 *w0 = - (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf; + struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; + bool sendrsp = true; - iod->lsreq->nvmet_fc_private = iod; - iod->lsreq->rspbuf = iod->rspbuf; - iod->lsreq->rspdma = iod->rspdma; - iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done; + iod->lsrsp->nvme_fc_private = iod; + iod->lsrsp->rspbuf = iod->rspbuf; + iod->lsrsp->rspdma = iod->rspdma; + iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; /* Be preventative. handlers will later set to valid length */ - iod->lsreq->rsplen = 0; + iod->lsrsp->rsplen = 0; iod->assoc = NULL; @@ -1624,17 +1963,18 @@ /* Creates an IO Queue/Connection */ nvmet_fc_ls_create_connection(tgtport, iod); break; - case FCNVME_LS_DISCONNECT: + case FCNVME_LS_DISCONNECT_ASSOC: /* Terminate a Queue/Connection or the Association */ - nvmet_fc_ls_disconnect(tgtport, iod); + sendrsp = nvmet_fc_ls_disconnect(tgtport, iod); break; default: - iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf, - NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd, + iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, + sizeof(*iod->rspbuf), w0->ls_cmd, FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); } - nvmet_fc_xmt_ls_rsp(tgtport, iod); + if (sendrsp) + nvmet_fc_xmt_ls_rsp(tgtport, iod); } /* @@ -1661,37 +2001,55 @@ * * If this routine returns error, the LLDD should abort the exchange. * - * @tgtport: pointer to the (registered) target port the LS was + * @target_port: pointer to the (registered) target port the LS was * received on. - * @lsreq: pointer to a lsreq request structure to be used to reference + * @lsrsp: pointer to a lsrsp structure to be used to reference * the exchange corresponding to the LS. * @lsreqbuf: pointer to the buffer containing the LS Request * @lsreqbuf_len: length, in bytes, of the received LS request */ int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, - struct nvmefc_tgt_ls_req *lsreq, + void *hosthandle, + struct nvmefc_ls_rsp *lsrsp, void *lsreqbuf, u32 lsreqbuf_len) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); struct nvmet_fc_ls_iod *iod; + struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; - if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE) + if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { + dev_info(tgtport->dev, + "RCV %s LS failed: payload too large (%d)\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : "", + lsreqbuf_len); return -E2BIG; + } - if (!nvmet_fc_tgtport_get(tgtport)) + if (!nvmet_fc_tgtport_get(tgtport)) { + dev_info(tgtport->dev, + "RCV %s LS failed: target deleting\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); return -ESHUTDOWN; + } iod = nvmet_fc_alloc_ls_iod(tgtport); if (!iod) { + dev_info(tgtport->dev, + "RCV %s LS failed: context allocation failed\n", + (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? + nvmefc_ls_names[w0->ls_cmd] : ""); nvmet_fc_tgtport_put(tgtport); return -ENOENT; } - iod->lsreq = lsreq; + iod->lsrsp = lsrsp; iod->fcpreq = NULL; memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); iod->rqstdatalen = lsreqbuf_len; + iod->hosthandle = hosthandle; schedule_work(&iod->work); @@ -1798,7 +2156,7 @@ */ rspcnt = atomic_inc_return(&fod->queue->zrspcnt); if (!(rspcnt % fod->queue->ersp_ratio) || - sqe->opcode == nvme_fabrics_command || + nvme_is_fabrics((struct nvme_command *) sqe) || xfr_length != fod->req.transfer_len || (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || @@ -2007,7 +2365,7 @@ } /* data transfer complete, resume with nvmet layer */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); break; case NVMET_FCOP_READDATA: @@ -2058,25 +2416,11 @@ } static void -nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work) -{ - struct nvmet_fc_fcp_iod *fod = - container_of(work, struct nvmet_fc_fcp_iod, done_work); - - nvmet_fc_fod_op_done(fod); -} - -static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq) { struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; - struct nvmet_fc_tgt_queue *queue = fod->queue; - if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR) - /* context switch so completion is not in ISR context */ - queue_work_on(queue->cpu, queue->work_q, &fod->done_work); - else - nvmet_fc_fod_op_done(fod); + nvmet_fc_fod_op_done(fod); } /* @@ -2147,7 +2491,7 @@ /* - * Actual processing routine for received FC-NVME LS Requests from the LLD + * Actual processing routine for received FC-NVME I/O Requests from the LLD */ static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, @@ -2183,8 +2527,9 @@ } fod->req.cmd = &fod->cmdiubuf.sqe; - fod->req.rsp = &fod->rspiubuf.cqe; - fod->req.port = fod->queue->port; + fod->req.cqe = &fod->rspiubuf.cqe; + if (tgtport->pe) + fod->req.port = tgtport->pe->port; /* clear any response payload */ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); @@ -2230,24 +2575,11 @@ * can invoke the nvmet_layer now. If read data, cmd completion will * push the data */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); return; transport_error: nvmet_fc_abort_op(tgtport, fod); -} - -/* - * Actual processing routine for received FC-NVME LS Requests from the LLD - */ -static void -nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) -{ - struct nvmet_fc_fcp_iod *fod = - container_of(work, struct nvmet_fc_fcp_iod, work); - struct nvmet_fc_tgtport *tgtport = fod->tgtport; - - nvmet_fc_handle_fcp_rqst(tgtport, fod); } /** @@ -2311,7 +2643,7 @@ /* validate iu, so the connection id can be used to find the queue */ if ((cmdiubuf_len != sizeof(*cmdiu)) || - (cmdiu->scsi_id != NVME_CMD_SCSI_ID) || + (cmdiu->format_id != NVME_CMD_FORMAT_ID) || (cmdiu->fc_id != NVME_CMD_FC_ID) || (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) return -EIO; @@ -2468,7 +2800,7 @@ substring_t wwn = { name, &name[sizeof(name)-1] }; int nnoffset, pnoffset; - /* validate it string one of the 2 allowed formats */ + /* validate if string is one of the 2 allowed formats */ if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], @@ -2508,6 +2840,7 @@ nvmet_fc_add_port(struct nvmet_port *port) { struct nvmet_fc_tgtport *tgtport; + struct nvmet_fc_port_entry *pe; struct nvmet_fc_traddr traddr = { 0L, 0L }; unsigned long flags; int ret; @@ -2524,24 +2857,50 @@ if (ret) return ret; + pe = kzalloc(sizeof(*pe), GFP_KERNEL); + if (!pe) + return -ENOMEM; + ret = -ENXIO; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { if ((tgtport->fc_target_port.node_name == traddr.nn) && (tgtport->fc_target_port.port_name == traddr.pn)) { - tgtport->port = port; - ret = 0; + /* a FC port can only be 1 nvmet port id */ + if (!tgtport->pe) { + nvmet_fc_portentry_bind(tgtport, pe, port); + ret = 0; + } else + ret = -EALREADY; break; } } spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (ret) + kfree(pe); + return ret; } static void nvmet_fc_remove_port(struct nvmet_port *port) { - /* nothing to do */ + struct nvmet_fc_port_entry *pe = port->priv; + + nvmet_fc_portentry_unbind(pe); + + kfree(pe); +} + +static void +nvmet_fc_discovery_chg(struct nvmet_port *port) +{ + struct nvmet_fc_port_entry *pe = port->priv; + struct nvmet_fc_tgtport *tgtport = pe->tgtport; + + if (tgtport && tgtport->ops->discovery_event) + tgtport->ops->discovery_event(&tgtport->fc_target_port); } static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { @@ -2552,6 +2911,7 @@ .remove_port = nvmet_fc_remove_port, .queue_response = nvmet_fc_fcp_nvme_cmd_done, .delete_ctrl = nvmet_fc_delete_ctrl, + .discovery_chg = nvmet_fc_discovery_chg, }; static int __init nvmet_fc_init_module(void) -- Gitblit v1.6.2