From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 08 Dec 2023 10:40:48 +0000
Subject: [PATCH] 移去rt
---
kernel/drivers/nvme/target/core.c | 731 +++++++++++++++++++++++++++++++++++++++++--------------
1 files changed, 539 insertions(+), 192 deletions(-)
diff --git a/kernel/drivers/nvme/target/core.c b/kernel/drivers/nvme/target/core.c
index 1a35d73..fa04d97 100644
--- a/kernel/drivers/nvme/target/core.c
+++ b/kernel/drivers/nvme/target/core.c
@@ -1,20 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common code for the NVMe target.
* Copyright (c) 2015-2016 HGST, a Western Digital Company.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rculist.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/scatterlist.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#include "nvmet.h"
@@ -44,40 +41,88 @@
u64 nvmet_ana_chgcnt;
DECLARE_RWSEM(nvmet_ana_sem);
+inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
+{
+ u16 status;
+
+ switch (errno) {
+ case 0:
+ status = NVME_SC_SUCCESS;
+ break;
+ case -ENOSPC:
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ break;
+ case -EREMOTEIO:
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ break;
+ case -EOPNOTSUPP:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_dsm:
+ case nvme_cmd_write_zeroes:
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ break;
+ default:
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ break;
+ case -ENODATA:
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
+ status = NVME_SC_ACCESS_DENIED;
+ break;
+ case -EIO:
+ fallthrough;
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ }
+
+ return status;
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
size_t len)
{
- if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
return 0;
}
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{
- if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
return 0;
}
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{
- if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
+ if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
return 0;
}
static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
{
- struct nvmet_ns *ns;
+ unsigned long nsid = 0;
+ struct nvmet_ns *cur;
+ unsigned long idx;
- if (list_empty(&subsys->namespaces))
- return 0;
+ xa_for_each(&subsys->namespaces, idx, cur)
+ nsid = cur->nsid;
- ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
- return ns->nsid;
+ return nsid;
}
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
@@ -85,39 +130,30 @@
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
}
-static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
{
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- if (!ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
-
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds) {
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
mutex_unlock(&ctrl->lock);
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ nvmet_req_complete(req, status);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
}
-static void nvmet_async_event_work(struct work_struct *work)
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
{
- struct nvmet_ctrl *ctrl =
- container_of(work, struct nvmet_ctrl, async_event_work);
struct nvmet_async_event *aen;
struct nvmet_req *req;
- while (1) {
- mutex_lock(&ctrl->lock);
- aen = list_first_entry_or_null(&ctrl->async_events,
- struct nvmet_async_event, entry);
- if (!aen || !ctrl->nr_async_event_cmds) {
- mutex_unlock(&ctrl->lock);
- return;
- }
-
+ mutex_lock(&ctrl->lock);
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
+ aen = list_first_entry(&ctrl->async_events,
+ struct nvmet_async_event, entry);
req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
nvmet_set_result(req, nvmet_async_event_result(aen));
@@ -125,11 +161,34 @@
kfree(aen);
mutex_unlock(&ctrl->lock);
+ trace_nvmet_async_event(ctrl, req->cqe->result.u32);
nvmet_req_complete(req, 0);
+ mutex_lock(&ctrl->lock);
}
+ mutex_unlock(&ctrl->lock);
}
-static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_async_event *aen, *tmp;
+
+ mutex_lock(&ctrl->lock);
+ list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
+ list_del(&aen->entry);
+ kfree(aen);
+ }
+ mutex_unlock(&ctrl->lock);
+}
+
+static void nvmet_async_event_work(struct work_struct *work)
+{
+ struct nvmet_ctrl *ctrl =
+ container_of(work, struct nvmet_ctrl, async_event_work);
+
+ nvmet_async_events_process(ctrl);
+}
+
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page)
{
struct nvmet_async_event *aen;
@@ -147,13 +206,6 @@
mutex_unlock(&ctrl->lock);
schedule_work(&ctrl->async_event_work);
-}
-
-static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
-{
- if (!(READ_ONCE(ctrl->aen_enabled) & aen))
- return true;
- return test_and_set_bit(aen, &ctrl->aen_masked);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -184,9 +236,11 @@
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&subsys->lock);
+
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
- if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
@@ -203,7 +257,7 @@
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
if (port && ctrl->port != port)
continue;
- if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
@@ -244,6 +298,18 @@
}
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->port == port)
+ ctrl->ops->delete_ctrl(ctrl);
+ }
+ mutex_unlock(&subsys->lock);
+}
+
int nvmet_enable_port(struct nvmet_port *port)
{
const struct nvmet_fabrics_ops *ops;
@@ -267,18 +333,32 @@
if (!try_module_get(ops->owner))
return -EINVAL;
- ret = ops->add_port(port);
- if (ret) {
- module_put(ops->owner);
- return ret;
+ /*
+ * If the user requested PI support and the transport isn't pi capable,
+ * don't enable the port.
+ */
+ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
+ pr_err("T10-PI is not supported by transport type %d\n",
+ port->disc_addr.trtype);
+ ret = -EINVAL;
+ goto out_put;
}
+
+ ret = ops->add_port(port);
+ if (ret)
+ goto out_put;
/* If the transport didn't set inline_data_size, then disable it. */
if (port->inline_data_size < 0)
port->inline_data_size = 0;
port->enabled = true;
+ port->tr_ops = ops;
return 0;
+
+out_put:
+ module_put(ops->owner);
+ return ret;
}
void nvmet_disable_port(struct nvmet_port *port)
@@ -288,6 +368,7 @@
lockdep_assert_held(&nvmet_config_sem);
port->enabled = false;
+ port->tr_ops = NULL;
ops = nvmet_transports[port->disc_addr.trtype];
ops->remove_port(port);
@@ -298,6 +379,15 @@
{
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work);
+ bool reset_tbkas = ctrl->reset_tbkas;
+
+ ctrl->reset_tbkas = false;
+ if (reset_tbkas) {
+ pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
+ ctrl->cntlid);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ return;
+ }
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
ctrl->cntlid, ctrl->kato);
@@ -305,7 +395,7 @@
nvmet_ctrl_fatal_error(ctrl);
}
-static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -317,7 +407,7 @@
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
-static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
{
if (unlikely(ctrl->kato == 0))
return;
@@ -327,28 +417,13 @@
cancel_delayed_work_sync(&ctrl->ka_work);
}
-static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
- __le32 nsid)
-{
- struct nvmet_ns *ns;
-
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
- if (ns->nsid == le32_to_cpu(nsid))
- return ns;
- }
-
- return NULL;
-}
-
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
{
struct nvmet_ns *ns;
- rcu_read_lock();
- ns = __nvmet_find_namespace(ctrl, nsid);
+ ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
if (ns)
percpu_ref_get(&ns->ref);
- rcu_read_unlock();
return ns;
}
@@ -371,17 +446,121 @@
nvmet_file_ns_disable(ns);
}
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct pci_dev *p2p_dev;
+
+ if (!ns->use_p2pmem)
+ return 0;
+
+ if (!ns->bdev) {
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
+ return -EINVAL;
+ }
+
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
+ if (ret < 0)
+ return -EINVAL;
+ } else {
+ /*
+ * Right now we just check that there is p2pmem available so
+ * we can report an error to the user right away if there
+ * is not. We'll find the actual device to use once we
+ * setup the controller when the port's device is available.
+ */
+
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available for %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ pci_dev_put(p2p_dev);
+ }
+
+ return 0;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
+ struct nvmet_ns *ns)
+{
+ struct device *clients[2];
+ struct pci_dev *p2p_dev;
+ int ret;
+
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
+ return;
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
+ if (ret < 0)
+ return;
+
+ p2p_dev = pci_dev_get(ns->p2p_dev);
+ } else {
+ clients[0] = ctrl->p2p_client;
+ clients[1] = nvmet_ns_dev(ns);
+
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
+ dev_name(ctrl->p2p_client), ns->device_path);
+ return;
+ }
+ }
+
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ if (ret < 0)
+ pci_dev_put(p2p_dev);
+
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
+ ns->nsid);
+}
+
+void nvmet_ns_revalidate(struct nvmet_ns *ns)
+{
+ loff_t oldsize = ns->size;
+
+ if (ns->bdev)
+ nvmet_bdev_ns_revalidate(ns);
+ else
+ nvmet_file_ns_revalidate(ns);
+
+ if (oldsize != ns->size)
+ nvmet_ns_changed(ns->subsys, ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
int ret;
mutex_lock(&subsys->lock);
+ ret = 0;
+
+ if (nvmet_passthru_ctrl(subsys)) {
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
+ goto out_unlock;
+ }
+
+ if (ns->enabled)
+ goto out_unlock;
+
ret = -EMFILE;
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
- ret = 0;
- if (ns->enabled)
goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
@@ -389,6 +568,13 @@
ret = nvmet_file_ns_enable(ns);
if (ret)
goto out_unlock;
+
+ ret = nvmet_p2pmem_ns_enable(ns);
+ if (ret)
+ goto out_dev_disable;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
@@ -398,23 +584,10 @@
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = ns->nsid;
- /*
- * The namespaces list needs to be sorted to simplify the implementation
- * of the Identify Namepace List subcommand.
- */
- if (list_empty(&subsys->namespaces)) {
- list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
- } else {
- struct nvmet_ns *old;
+ ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
+ if (ret)
+ goto out_restore_subsys_maxnsid;
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
- BUG_ON(ns->nsid == old->nsid);
- if (ns->nsid < old->nsid)
- break;
- }
-
- list_add_tail_rcu(&ns->dev_link, &old->dev_link);
- }
subsys->nr_namespaces++;
nvmet_ns_changed(subsys, ns->nsid);
@@ -423,7 +596,14 @@
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+
+out_restore_subsys_maxnsid:
+ subsys->max_nsid = nvmet_max_nsid(subsys);
+ percpu_ref_exit(&ns->ref);
out_dev_put:
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+out_dev_disable:
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
@@ -431,15 +611,20 @@
void nvmet_ns_disable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (!ns->enabled)
goto out_unlock;
ns->enabled = false;
- list_del_rcu(&ns->dev_link);
+ xa_erase(&ns->subsys->namespaces, ns->nsid);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
mutex_unlock(&subsys->lock);
/*
@@ -456,6 +641,7 @@
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+
subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
@@ -483,7 +669,6 @@
if (!ns)
return NULL;
- INIT_LIST_HEAD(&ns->dev_link);
init_completion(&ns->disable_done);
ns->nsid = nsid;
@@ -500,29 +685,66 @@
return ns;
}
-static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+static void nvmet_update_sq_head(struct nvmet_req *req)
{
- u32 old_sqhd, new_sqhd;
- u16 sqhd;
-
- if (status)
- nvmet_set_status(req, status);
-
if (req->sq->size) {
+ u32 old_sqhd, new_sqhd;
+
do {
old_sqhd = req->sq->sqhd;
new_sqhd = (old_sqhd + 1) % req->sq->size;
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd);
}
- sqhd = req->sq->sqhd & 0x0000FFFF;
- req->rsp->sq_head = cpu_to_le16(sqhd);
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
- req->rsp->command_id = req->cmd->common.command_id;
+ req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
+}
- if (req->ns)
- nvmet_put_namespace(req->ns);
+static void nvmet_set_error(struct nvmet_req *req, u16 status)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_error_slot *new_error_slot;
+ unsigned long flags;
+
+ req->cqe->status = cpu_to_le16(status << 1);
+
+ if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
+ return;
+
+ spin_lock_irqsave(&ctrl->error_lock, flags);
+ ctrl->err_counter++;
+ new_error_slot =
+ &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
+
+ new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
+ new_error_slot->sqid = cpu_to_le16(req->sq->qid);
+ new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
+ new_error_slot->status_field = cpu_to_le16(status << 1);
+ new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
+ new_error_slot->lba = cpu_to_le64(req->error_slba);
+ new_error_slot->nsid = req->cmd->common.nsid;
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
+
+ /* set the more bit for this request */
+ req->cqe->status |= cpu_to_le16(1 << 14);
+}
+
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ struct nvmet_ns *ns = req->ns;
+
+ if (!req->sq->sqhd_disabled)
+ nvmet_update_sq_head(req);
+ req->cqe->sq_id = cpu_to_le16(req->sq->qid);
+ req->cqe->command_id = req->cmd->common.command_id;
+
+ if (unlikely(status))
+ nvmet_set_error(req, status);
+
+ trace_nvmet_req_complete(req);
+
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
@@ -537,8 +759,6 @@
{
cq->qid = qid;
cq->size = size;
-
- ctrl->cqs[qid] = cq;
}
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
@@ -560,19 +780,28 @@
void nvmet_sq_destroy(struct nvmet_sq *sq)
{
+ struct nvmet_ctrl *ctrl = sq->ctrl;
+
/*
* If this is the admin queue, complete all AERs so that our
* queue doesn't have outstanding requests on it.
*/
- if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
- nvmet_async_events_free(sq->ctrl);
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
+ nvmet_async_events_failall(ctrl);
percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
wait_for_completion(&sq->confirm_done);
wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref);
- if (sq->ctrl) {
- nvmet_ctrl_put(sq->ctrl);
+ if (ctrl) {
+ /*
+ * The teardown flow may take some time, and the host may not
+ * send us keep-alive during this period, hence reset the
+ * traffic based keep-alive timer so we don't trigger a
+ * controller teardown as a result of a keep-alive expiration.
+ */
+ ctrl->reset_tbkas = true;
+ nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
}
@@ -639,15 +868,24 @@
if (unlikely(ret))
return ret;
+ if (nvmet_req_passthru_ctrl(req))
+ return nvmet_parse_passthru_io_cmd(req);
+
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns))
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
ret = nvmet_check_ana_state(req->port, req->ns);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return ret;
+ }
ret = nvmet_io_cmd_check_access(req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return ret;
+ }
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
@@ -665,13 +903,20 @@
req->sq = sq;
req->ops = ops;
req->sg = NULL;
+ req->metadata_sg = NULL;
req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
req->transfer_len = 0;
- req->rsp->status = 0;
+ req->metadata_len = 0;
+ req->cqe->status = 0;
+ req->cqe->sq_head = 0;
req->ns = NULL;
+ req->error_loc = NVMET_NO_ERROR_LOC;
+ req->error_slba = 0;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+ req->error_loc = offsetof(struct nvme_common_command, flags);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
}
@@ -682,29 +927,31 @@
* byte aligned.
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
+ req->error_loc = offsetof(struct nvme_common_command, flags);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
}
if (unlikely(!req->sq->ctrl))
- /* will return an error for any Non-connect command: */
+ /* will return an error for any non-connect command: */
status = nvmet_parse_connect_cmd(req);
else if (likely(req->sq->qid != 0))
status = nvmet_parse_io_cmd(req);
- else if (req->cmd->common.opcode == nvme_fabrics_command)
- status = nvmet_parse_fabrics_cmd(req);
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
- status = nvmet_parse_discovery_cmd(req);
else
status = nvmet_parse_admin_cmd(req);
if (status)
goto fail;
+ trace_nvmet_req_init(req, req->cmd);
+
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
}
+
+ if (sq->ctrl)
+ sq->ctrl->reset_tbkas = true;
return true;
@@ -722,14 +969,112 @@
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
-void nvmet_req_execute(struct nvmet_req *req)
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
{
- if (unlikely(req->data_len != req->transfer_len))
+ if (unlikely(len != req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
- else
- req->execute(req);
+ return false;
+ }
+
+ return true;
}
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
+EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
+
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
+{
+ if (unlikely(data_len > req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
+{
+ return req->transfer_len - req->metadata_len;
+}
+
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+ struct nvmet_req *req)
+{
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ nvmet_data_transfer_len(req));
+ if (!req->sg)
+ goto out_err;
+
+ if (req->metadata_len) {
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
+ &req->metadata_sg_cnt, req->metadata_len);
+ if (!req->metadata_sg)
+ goto out_free_sg;
+ }
+
+ req->p2p_dev = p2p_dev;
+
+ return 0;
+out_free_sg:
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+out_err:
+ return -ENOMEM;
+}
+
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
+{
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
+ return NULL;
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
+}
+
+int nvmet_req_alloc_sgls(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
+ return 0;
+
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
+ &req->sg_cnt);
+ if (unlikely(!req->sg))
+ goto out;
+
+ if (req->metadata_len) {
+ req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
+ &req->metadata_sg_cnt);
+ if (unlikely(!req->metadata_sg))
+ goto out_free;
+ }
+
+ return 0;
+out_free:
+ sgl_free(req->sg);
+out:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
+
+void nvmet_req_free_sgls(struct nvmet_req *req)
+{
+ if (req->p2p_dev) {
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ if (req->metadata_sg)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+ req->p2p_dev = NULL;
+ } else {
+ sgl_free(req->sg);
+ if (req->metadata_sg)
+ sgl_free(req->metadata_sg);
+ }
+
+ req->sg = NULL;
+ req->metadata_sg = NULL;
+ req->sg_cnt = 0;
+ req->metadata_sg_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
static inline bool nvmet_cc_en(u32 cc)
{
@@ -853,7 +1198,7 @@
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
}
@@ -874,7 +1219,7 @@
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out:
@@ -899,12 +1244,16 @@
return 0;
}
-static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
- const char *hostnqn)
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
{
struct nvmet_host_link *p;
+ lockdep_assert_held(&nvmet_config_sem);
+
if (subsys->allow_any_host)
+ return true;
+
+ if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@@ -915,28 +1264,36 @@
return false;
}
-static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
- const char *hostnqn)
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req)
{
- struct nvmet_subsys_link *s;
+ struct nvmet_ns *ns;
+ unsigned long idx;
- list_for_each_entry(s, &req->port->subsystems, entry) {
- if (__nvmet_host_allowed(s->subsys, hostnqn))
- return true;
- }
+ if (!req->p2p_client)
+ return;
- return false;
+ ctrl->p2p_client = get_device(req->p2p_client);
+
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
-bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
- const char *hostnqn)
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
- lockdep_assert_held(&nvmet_config_sem);
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- if (subsys->type == NVME_NQN_DISC)
- return nvmet_host_discovery_allowed(req, hostnqn);
- else
- return __nvmet_host_allowed(subsys, hostnqn);
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
+ pci_dev_put(radix_tree_deref_slot(slot));
+
+ put_device(ctrl->p2p_client);
}
static void nvmet_fatal_error_handler(struct work_struct *work)
@@ -961,16 +1318,16 @@
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out;
}
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
down_read(&nvmet_config_sem);
- if (!nvmet_host_allowed(req, subsys, hostnqn)) {
+ if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
goto out_put_subsystem;
@@ -989,6 +1346,7 @@
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
@@ -1003,20 +1361,17 @@
if (!ctrl->changed_ns_list)
goto out_free_ctrl;
- ctrl->cqs = kcalloc(subsys->max_qid + 1,
- sizeof(struct nvmet_cq *),
- GFP_KERNEL);
- if (!ctrl->cqs)
- goto out_free_changed_ns_list;
-
ctrl->sqs = kcalloc(subsys->max_qid + 1,
sizeof(struct nvmet_sq *),
GFP_KERNEL);
if (!ctrl->sqs)
- goto out_free_cqs;
+ goto out_free_changed_ns_list;
+
+ if (subsys->cntlid_min > subsys->cntlid_max)
+ goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida,
- NVME_CNTLID_MIN, NVME_CNTLID_MAX,
+ subsys->cntlid_min, subsys->cntlid_max,
GFP_KERNEL);
if (ret < 0) {
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
@@ -1025,46 +1380,32 @@
ctrl->cntlid = ret;
ctrl->ops = req->ops;
- if (ctrl->subsys->type == NVME_NQN_DISC) {
- /* Don't accept keep-alive timeout for discovery controllers */
- if (kato) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_remove_ida;
- }
- /*
- * Discovery controllers use some arbitrary high value in order
- * to cleanup stale discovery sessions
- *
- * From the latest base diff RC:
- * "The Keep Alive command is not supported by
- * Discovery controllers. A transport may specify a
- * fixed Discovery controller activity timeout value
- * (e.g., 2 minutes). If no commands are received
- * by a Discovery controller within that time
- * period, the controller may perform the
- * actions for Keep Alive Timer expiration".
- */
- ctrl->kato = NVMET_DISC_KATO;
- } else {
- /* keep-alive timeout in seconds */
- ctrl->kato = DIV_ROUND_UP(kato, 1000);
- }
+ /*
+ * Discovery controllers may use some arbitrary high value
+ * in order to cleanup stale discovery sessions
+ */
+ if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
+ kato = NVMET_DISC_KATO_MS;
+
+ /* keep-alive timeout in seconds */
+ ctrl->kato = DIV_ROUND_UP(kato, 1000);
+
+ ctrl->err_counter = 0;
+ spin_lock_init(&ctrl->error_lock);
+
nvmet_start_keep_alive_timer(ctrl);
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ nvmet_setup_p2p_ns_map(ctrl, req);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
return 0;
-out_remove_ida:
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
-out_free_cqs:
- kfree(ctrl->cqs);
out_free_changed_ns_list:
kfree(ctrl->changed_ns_list);
out_free_ctrl:
@@ -1081,6 +1422,7 @@
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
@@ -1091,8 +1433,8 @@
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
+ nvmet_async_events_free(ctrl);
kfree(ctrl->sqs);
- kfree(ctrl->cqs);
kfree(ctrl->changed_ns_list);
kfree(ctrl);
@@ -1123,8 +1465,7 @@
if (!port)
return NULL;
- if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
- NVMF_NQN_SIZE)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
@@ -1151,9 +1492,9 @@
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
+ subsys->ver = NVMET_DEFAULT_VS;
/* generate a random serial number as our controllers are ephemeral: */
get_random_bytes(&subsys->serial, sizeof(subsys->serial));
@@ -1167,20 +1508,21 @@
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
kfree(subsys);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
kfree(subsys);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
-
+ subsys->cntlid_min = NVME_CNTLID_MIN;
+ subsys->cntlid_max = NVME_CNTLID_MAX;
kref_init(&subsys->ref);
mutex_init(&subsys->lock);
- INIT_LIST_HEAD(&subsys->namespaces);
+ xa_init(&subsys->namespaces);
INIT_LIST_HEAD(&subsys->ctrls);
INIT_LIST_HEAD(&subsys->hosts);
@@ -1192,9 +1534,13 @@
struct nvmet_subsys *subsys =
container_of(ref, struct nvmet_subsys, ref);
- WARN_ON_ONCE(!list_empty(&subsys->namespaces));
+ WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
+
+ xa_destroy(&subsys->namespaces);
+ nvmet_passthru_subsys_free(subsys);
kfree(subsys->subsysnqn);
+ kfree_rcu(subsys->model, rcuhead);
kfree(subsys);
}
@@ -1258,3 +1604,4 @@
module_exit(nvmet_exit);
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
--
Gitblit v1.6.2