| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Common code for the NVMe target. |
|---|
| 3 | 4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 6 | | - * under the terms and conditions of the GNU General Public License, |
|---|
| 7 | | - * version 2, as published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
|---|
| 10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 12 | | - * more details. |
|---|
| 13 | 5 | */ |
|---|
| 14 | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| 15 | 7 | #include <linux/module.h> |
|---|
| 16 | 8 | #include <linux/random.h> |
|---|
| 17 | 9 | #include <linux/rculist.h> |
|---|
| 10 | +#include <linux/pci-p2pdma.h> |
|---|
| 11 | +#include <linux/scatterlist.h> |
|---|
| 12 | + |
|---|
| 13 | +#define CREATE_TRACE_POINTS |
|---|
| 14 | +#include "trace.h" |
|---|
| 18 | 15 | |
|---|
| 19 | 16 | #include "nvmet.h" |
|---|
| 20 | 17 | |
|---|
| .. | .. |
|---|
| 44 | 41 | u64 nvmet_ana_chgcnt; |
|---|
| 45 | 42 | DECLARE_RWSEM(nvmet_ana_sem); |
|---|
| 46 | 43 | |
|---|
| 44 | +inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) |
|---|
| 45 | +{ |
|---|
| 46 | + u16 status; |
|---|
| 47 | + |
|---|
| 48 | + switch (errno) { |
|---|
| 49 | + case 0: |
|---|
| 50 | + status = NVME_SC_SUCCESS; |
|---|
| 51 | + break; |
|---|
| 52 | + case -ENOSPC: |
|---|
| 53 | + req->error_loc = offsetof(struct nvme_rw_command, length); |
|---|
| 54 | + status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; |
|---|
| 55 | + break; |
|---|
| 56 | + case -EREMOTEIO: |
|---|
| 57 | + req->error_loc = offsetof(struct nvme_rw_command, slba); |
|---|
| 58 | + status = NVME_SC_LBA_RANGE | NVME_SC_DNR; |
|---|
| 59 | + break; |
|---|
| 60 | + case -EOPNOTSUPP: |
|---|
| 61 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
|---|
| 62 | + switch (req->cmd->common.opcode) { |
|---|
| 63 | + case nvme_cmd_dsm: |
|---|
| 64 | + case nvme_cmd_write_zeroes: |
|---|
| 65 | + status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; |
|---|
| 66 | + break; |
|---|
| 67 | + default: |
|---|
| 68 | + status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
|---|
| 69 | + } |
|---|
| 70 | + break; |
|---|
| 71 | + case -ENODATA: |
|---|
| 72 | + req->error_loc = offsetof(struct nvme_rw_command, nsid); |
|---|
| 73 | + status = NVME_SC_ACCESS_DENIED; |
|---|
| 74 | + break; |
|---|
| 75 | + case -EIO: |
|---|
| 76 | + fallthrough; |
|---|
| 77 | + default: |
|---|
| 78 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
|---|
| 79 | + status = NVME_SC_INTERNAL | NVME_SC_DNR; |
|---|
| 80 | + } |
|---|
| 81 | + |
|---|
| 82 | + return status; |
|---|
| 83 | +} |
|---|
| 84 | + |
|---|
| 47 | 85 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, |
|---|
| 48 | 86 | const char *subsysnqn); |
|---|
| 49 | 87 | |
|---|
| 50 | 88 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
|---|
| 51 | 89 | size_t len) |
|---|
| 52 | 90 | { |
|---|
| 53 | | - if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) |
|---|
| 91 | + if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { |
|---|
| 92 | + req->error_loc = offsetof(struct nvme_common_command, dptr); |
|---|
| 54 | 93 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
|---|
| 94 | + } |
|---|
| 55 | 95 | return 0; |
|---|
| 56 | 96 | } |
|---|
| 57 | 97 | |
|---|
| 58 | 98 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) |
|---|
| 59 | 99 | { |
|---|
| 60 | | - if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) |
|---|
| 100 | + if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { |
|---|
| 101 | + req->error_loc = offsetof(struct nvme_common_command, dptr); |
|---|
| 61 | 102 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
|---|
| 103 | + } |
|---|
| 62 | 104 | return 0; |
|---|
| 63 | 105 | } |
|---|
| 64 | 106 | |
|---|
| 65 | 107 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) |
|---|
| 66 | 108 | { |
|---|
| 67 | | - if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) |
|---|
| 109 | + if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { |
|---|
| 110 | + req->error_loc = offsetof(struct nvme_common_command, dptr); |
|---|
| 68 | 111 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
|---|
| 112 | + } |
|---|
| 69 | 113 | return 0; |
|---|
| 70 | 114 | } |
|---|
| 71 | 115 | |
|---|
| 72 | 116 | static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys) |
|---|
| 73 | 117 | { |
|---|
| 74 | | - struct nvmet_ns *ns; |
|---|
| 118 | + unsigned long nsid = 0; |
|---|
| 119 | + struct nvmet_ns *cur; |
|---|
| 120 | + unsigned long idx; |
|---|
| 75 | 121 | |
|---|
| 76 | | - if (list_empty(&subsys->namespaces)) |
|---|
| 77 | | - return 0; |
|---|
| 122 | + xa_for_each(&subsys->namespaces, idx, cur) |
|---|
| 123 | + nsid = cur->nsid; |
|---|
| 78 | 124 | |
|---|
| 79 | | - ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link); |
|---|
| 80 | | - return ns->nsid; |
|---|
| 125 | + return nsid; |
|---|
| 81 | 126 | } |
|---|
| 82 | 127 | |
|---|
| 83 | 128 | static u32 nvmet_async_event_result(struct nvmet_async_event *aen) |
|---|
| .. | .. |
|---|
| 85 | 130 | return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); |
|---|
| 86 | 131 | } |
|---|
| 87 | 132 | |
|---|
| 88 | | -static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) |
|---|
| 133 | +static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) |
|---|
| 89 | 134 | { |
|---|
| 135 | + u16 status = NVME_SC_INTERNAL | NVME_SC_DNR; |
|---|
| 90 | 136 | struct nvmet_req *req; |
|---|
| 91 | 137 | |
|---|
| 92 | | - while (1) { |
|---|
| 93 | | - mutex_lock(&ctrl->lock); |
|---|
| 94 | | - if (!ctrl->nr_async_event_cmds) { |
|---|
| 95 | | - mutex_unlock(&ctrl->lock); |
|---|
| 96 | | - return; |
|---|
| 97 | | - } |
|---|
| 98 | | - |
|---|
| 138 | + mutex_lock(&ctrl->lock); |
|---|
| 139 | + while (ctrl->nr_async_event_cmds) { |
|---|
| 99 | 140 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
|---|
| 100 | 141 | mutex_unlock(&ctrl->lock); |
|---|
| 101 | | - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); |
|---|
| 142 | + nvmet_req_complete(req, status); |
|---|
| 143 | + mutex_lock(&ctrl->lock); |
|---|
| 102 | 144 | } |
|---|
| 145 | + mutex_unlock(&ctrl->lock); |
|---|
| 103 | 146 | } |
|---|
| 104 | 147 | |
|---|
| 105 | | -static void nvmet_async_event_work(struct work_struct *work) |
|---|
| 148 | +static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) |
|---|
| 106 | 149 | { |
|---|
| 107 | | - struct nvmet_ctrl *ctrl = |
|---|
| 108 | | - container_of(work, struct nvmet_ctrl, async_event_work); |
|---|
| 109 | 150 | struct nvmet_async_event *aen; |
|---|
| 110 | 151 | struct nvmet_req *req; |
|---|
| 111 | 152 | |
|---|
| 112 | | - while (1) { |
|---|
| 113 | | - mutex_lock(&ctrl->lock); |
|---|
| 114 | | - aen = list_first_entry_or_null(&ctrl->async_events, |
|---|
| 115 | | - struct nvmet_async_event, entry); |
|---|
| 116 | | - if (!aen || !ctrl->nr_async_event_cmds) { |
|---|
| 117 | | - mutex_unlock(&ctrl->lock); |
|---|
| 118 | | - return; |
|---|
| 119 | | - } |
|---|
| 120 | | - |
|---|
| 153 | + mutex_lock(&ctrl->lock); |
|---|
| 154 | + while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) { |
|---|
| 155 | + aen = list_first_entry(&ctrl->async_events, |
|---|
| 156 | + struct nvmet_async_event, entry); |
|---|
| 121 | 157 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
|---|
| 122 | 158 | nvmet_set_result(req, nvmet_async_event_result(aen)); |
|---|
| 123 | 159 | |
|---|
| .. | .. |
|---|
| 125 | 161 | kfree(aen); |
|---|
| 126 | 162 | |
|---|
| 127 | 163 | mutex_unlock(&ctrl->lock); |
|---|
| 164 | + trace_nvmet_async_event(ctrl, req->cqe->result.u32); |
|---|
| 128 | 165 | nvmet_req_complete(req, 0); |
|---|
| 166 | + mutex_lock(&ctrl->lock); |
|---|
| 129 | 167 | } |
|---|
| 168 | + mutex_unlock(&ctrl->lock); |
|---|
| 130 | 169 | } |
|---|
| 131 | 170 | |
|---|
| 132 | | -static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
|---|
| 171 | +static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) |
|---|
| 172 | +{ |
|---|
| 173 | + struct nvmet_async_event *aen, *tmp; |
|---|
| 174 | + |
|---|
| 175 | + mutex_lock(&ctrl->lock); |
|---|
| 176 | + list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { |
|---|
| 177 | + list_del(&aen->entry); |
|---|
| 178 | + kfree(aen); |
|---|
| 179 | + } |
|---|
| 180 | + mutex_unlock(&ctrl->lock); |
|---|
| 181 | +} |
|---|
| 182 | + |
|---|
| 183 | +static void nvmet_async_event_work(struct work_struct *work) |
|---|
| 184 | +{ |
|---|
| 185 | + struct nvmet_ctrl *ctrl = |
|---|
| 186 | + container_of(work, struct nvmet_ctrl, async_event_work); |
|---|
| 187 | + |
|---|
| 188 | + nvmet_async_events_process(ctrl); |
|---|
| 189 | +} |
|---|
| 190 | + |
|---|
| 191 | +void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
|---|
| 133 | 192 | u8 event_info, u8 log_page) |
|---|
| 134 | 193 | { |
|---|
| 135 | 194 | struct nvmet_async_event *aen; |
|---|
| .. | .. |
|---|
| 147 | 206 | mutex_unlock(&ctrl->lock); |
|---|
| 148 | 207 | |
|---|
| 149 | 208 | schedule_work(&ctrl->async_event_work); |
|---|
| 150 | | -} |
|---|
| 151 | | - |
|---|
| 152 | | -static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen) |
|---|
| 153 | | -{ |
|---|
| 154 | | - if (!(READ_ONCE(ctrl->aen_enabled) & aen)) |
|---|
| 155 | | - return true; |
|---|
| 156 | | - return test_and_set_bit(aen, &ctrl->aen_masked); |
|---|
| 157 | 209 | } |
|---|
| 158 | 210 | |
|---|
| 159 | 211 | static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) |
|---|
| .. | .. |
|---|
| 184 | 236 | { |
|---|
| 185 | 237 | struct nvmet_ctrl *ctrl; |
|---|
| 186 | 238 | |
|---|
| 239 | + lockdep_assert_held(&subsys->lock); |
|---|
| 240 | + |
|---|
| 187 | 241 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
|---|
| 188 | 242 | nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); |
|---|
| 189 | | - if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR)) |
|---|
| 243 | + if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR)) |
|---|
| 190 | 244 | continue; |
|---|
| 191 | 245 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, |
|---|
| 192 | 246 | NVME_AER_NOTICE_NS_CHANGED, |
|---|
| .. | .. |
|---|
| 203 | 257 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
|---|
| 204 | 258 | if (port && ctrl->port != port) |
|---|
| 205 | 259 | continue; |
|---|
| 206 | | - if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE)) |
|---|
| 260 | + if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE)) |
|---|
| 207 | 261 | continue; |
|---|
| 208 | 262 | nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, |
|---|
| 209 | 263 | NVME_AER_NOTICE_ANA, NVME_LOG_ANA); |
|---|
| .. | .. |
|---|
| 244 | 298 | } |
|---|
| 245 | 299 | EXPORT_SYMBOL_GPL(nvmet_unregister_transport); |
|---|
| 246 | 300 | |
|---|
| 301 | +void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) |
|---|
| 302 | +{ |
|---|
| 303 | + struct nvmet_ctrl *ctrl; |
|---|
| 304 | + |
|---|
| 305 | + mutex_lock(&subsys->lock); |
|---|
| 306 | + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
|---|
| 307 | + if (ctrl->port == port) |
|---|
| 308 | + ctrl->ops->delete_ctrl(ctrl); |
|---|
| 309 | + } |
|---|
| 310 | + mutex_unlock(&subsys->lock); |
|---|
| 311 | +} |
|---|
| 312 | + |
|---|
| 247 | 313 | int nvmet_enable_port(struct nvmet_port *port) |
|---|
| 248 | 314 | { |
|---|
| 249 | 315 | const struct nvmet_fabrics_ops *ops; |
|---|
| .. | .. |
|---|
| 267 | 333 | if (!try_module_get(ops->owner)) |
|---|
| 268 | 334 | return -EINVAL; |
|---|
| 269 | 335 | |
|---|
| 270 | | - ret = ops->add_port(port); |
|---|
| 271 | | - if (ret) { |
|---|
| 272 | | - module_put(ops->owner); |
|---|
| 273 | | - return ret; |
|---|
| 336 | + /* |
|---|
| 337 | + * If the user requested PI support and the transport isn't pi capable, |
|---|
| 338 | + * don't enable the port. |
|---|
| 339 | + */ |
|---|
| 340 | + if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { |
|---|
| 341 | + pr_err("T10-PI is not supported by transport type %d\n", |
|---|
| 342 | + port->disc_addr.trtype); |
|---|
| 343 | + ret = -EINVAL; |
|---|
| 344 | + goto out_put; |
|---|
| 274 | 345 | } |
|---|
| 346 | + |
|---|
| 347 | + ret = ops->add_port(port); |
|---|
| 348 | + if (ret) |
|---|
| 349 | + goto out_put; |
|---|
| 275 | 350 | |
|---|
| 276 | 351 | /* If the transport didn't set inline_data_size, then disable it. */ |
|---|
| 277 | 352 | if (port->inline_data_size < 0) |
|---|
| 278 | 353 | port->inline_data_size = 0; |
|---|
| 279 | 354 | |
|---|
| 280 | 355 | port->enabled = true; |
|---|
| 356 | + port->tr_ops = ops; |
|---|
| 281 | 357 | return 0; |
|---|
| 358 | + |
|---|
| 359 | +out_put: |
|---|
| 360 | + module_put(ops->owner); |
|---|
| 361 | + return ret; |
|---|
| 282 | 362 | } |
|---|
| 283 | 363 | |
|---|
| 284 | 364 | void nvmet_disable_port(struct nvmet_port *port) |
|---|
| .. | .. |
|---|
| 288 | 368 | lockdep_assert_held(&nvmet_config_sem); |
|---|
| 289 | 369 | |
|---|
| 290 | 370 | port->enabled = false; |
|---|
| 371 | + port->tr_ops = NULL; |
|---|
| 291 | 372 | |
|---|
| 292 | 373 | ops = nvmet_transports[port->disc_addr.trtype]; |
|---|
| 293 | 374 | ops->remove_port(port); |
|---|
| .. | .. |
|---|
| 298 | 379 | { |
|---|
| 299 | 380 | struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), |
|---|
| 300 | 381 | struct nvmet_ctrl, ka_work); |
|---|
| 382 | + bool reset_tbkas = ctrl->reset_tbkas; |
|---|
| 383 | + |
|---|
| 384 | + ctrl->reset_tbkas = false; |
|---|
| 385 | + if (reset_tbkas) { |
|---|
| 386 | + pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", |
|---|
| 387 | + ctrl->cntlid); |
|---|
| 388 | + schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
|---|
| 389 | + return; |
|---|
| 390 | + } |
|---|
| 301 | 391 | |
|---|
| 302 | 392 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", |
|---|
| 303 | 393 | ctrl->cntlid, ctrl->kato); |
|---|
| .. | .. |
|---|
| 305 | 395 | nvmet_ctrl_fatal_error(ctrl); |
|---|
| 306 | 396 | } |
|---|
| 307 | 397 | |
|---|
| 308 | | -static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) |
|---|
| 398 | +void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) |
|---|
| 309 | 399 | { |
|---|
| 310 | 400 | if (unlikely(ctrl->kato == 0)) |
|---|
| 311 | 401 | return; |
|---|
| .. | .. |
|---|
| 317 | 407 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
|---|
| 318 | 408 | } |
|---|
| 319 | 409 | |
|---|
| 320 | | -static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) |
|---|
| 410 | +void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) |
|---|
| 321 | 411 | { |
|---|
| 322 | 412 | if (unlikely(ctrl->kato == 0)) |
|---|
| 323 | 413 | return; |
|---|
| .. | .. |
|---|
| 327 | 417 | cancel_delayed_work_sync(&ctrl->ka_work); |
|---|
| 328 | 418 | } |
|---|
| 329 | 419 | |
|---|
| 330 | | -static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl, |
|---|
| 331 | | - __le32 nsid) |
|---|
| 332 | | -{ |
|---|
| 333 | | - struct nvmet_ns *ns; |
|---|
| 334 | | - |
|---|
| 335 | | - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { |
|---|
| 336 | | - if (ns->nsid == le32_to_cpu(nsid)) |
|---|
| 337 | | - return ns; |
|---|
| 338 | | - } |
|---|
| 339 | | - |
|---|
| 340 | | - return NULL; |
|---|
| 341 | | -} |
|---|
| 342 | | - |
|---|
| 343 | 420 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) |
|---|
| 344 | 421 | { |
|---|
| 345 | 422 | struct nvmet_ns *ns; |
|---|
| 346 | 423 | |
|---|
| 347 | | - rcu_read_lock(); |
|---|
| 348 | | - ns = __nvmet_find_namespace(ctrl, nsid); |
|---|
| 424 | + ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid)); |
|---|
| 349 | 425 | if (ns) |
|---|
| 350 | 426 | percpu_ref_get(&ns->ref); |
|---|
| 351 | | - rcu_read_unlock(); |
|---|
| 352 | 427 | |
|---|
| 353 | 428 | return ns; |
|---|
| 354 | 429 | } |
|---|
| .. | .. |
|---|
| 371 | 446 | nvmet_file_ns_disable(ns); |
|---|
| 372 | 447 | } |
|---|
| 373 | 448 | |
|---|
| 449 | +static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) |
|---|
| 450 | +{ |
|---|
| 451 | + int ret; |
|---|
| 452 | + struct pci_dev *p2p_dev; |
|---|
| 453 | + |
|---|
| 454 | + if (!ns->use_p2pmem) |
|---|
| 455 | + return 0; |
|---|
| 456 | + |
|---|
| 457 | + if (!ns->bdev) { |
|---|
| 458 | + pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n"); |
|---|
| 459 | + return -EINVAL; |
|---|
| 460 | + } |
|---|
| 461 | + |
|---|
| 462 | + if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { |
|---|
| 463 | + pr_err("peer-to-peer DMA is not supported by the driver of %s\n", |
|---|
| 464 | + ns->device_path); |
|---|
| 465 | + return -EINVAL; |
|---|
| 466 | + } |
|---|
| 467 | + |
|---|
| 468 | + if (ns->p2p_dev) { |
|---|
| 469 | + ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true); |
|---|
| 470 | + if (ret < 0) |
|---|
| 471 | + return -EINVAL; |
|---|
| 472 | + } else { |
|---|
| 473 | + /* |
|---|
| 474 | + * Right now we just check that there is p2pmem available so |
|---|
| 475 | + * we can report an error to the user right away if there |
|---|
| 476 | + * is not. We'll find the actual device to use once we |
|---|
| 477 | + * setup the controller when the port's device is available. |
|---|
| 478 | + */ |
|---|
| 479 | + |
|---|
| 480 | + p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns)); |
|---|
| 481 | + if (!p2p_dev) { |
|---|
| 482 | + pr_err("no peer-to-peer memory is available for %s\n", |
|---|
| 483 | + ns->device_path); |
|---|
| 484 | + return -EINVAL; |
|---|
| 485 | + } |
|---|
| 486 | + |
|---|
| 487 | + pci_dev_put(p2p_dev); |
|---|
| 488 | + } |
|---|
| 489 | + |
|---|
| 490 | + return 0; |
|---|
| 491 | +} |
|---|
| 492 | + |
|---|
| 493 | +/* |
|---|
| 494 | + * Note: ctrl->subsys->lock should be held when calling this function |
|---|
| 495 | + */ |
|---|
| 496 | +static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, |
|---|
| 497 | + struct nvmet_ns *ns) |
|---|
| 498 | +{ |
|---|
| 499 | + struct device *clients[2]; |
|---|
| 500 | + struct pci_dev *p2p_dev; |
|---|
| 501 | + int ret; |
|---|
| 502 | + |
|---|
| 503 | + if (!ctrl->p2p_client || !ns->use_p2pmem) |
|---|
| 504 | + return; |
|---|
| 505 | + |
|---|
| 506 | + if (ns->p2p_dev) { |
|---|
| 507 | + ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true); |
|---|
| 508 | + if (ret < 0) |
|---|
| 509 | + return; |
|---|
| 510 | + |
|---|
| 511 | + p2p_dev = pci_dev_get(ns->p2p_dev); |
|---|
| 512 | + } else { |
|---|
| 513 | + clients[0] = ctrl->p2p_client; |
|---|
| 514 | + clients[1] = nvmet_ns_dev(ns); |
|---|
| 515 | + |
|---|
| 516 | + p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients)); |
|---|
| 517 | + if (!p2p_dev) { |
|---|
| 518 | + pr_err("no peer-to-peer memory is available that's supported by %s and %s\n", |
|---|
| 519 | + dev_name(ctrl->p2p_client), ns->device_path); |
|---|
| 520 | + return; |
|---|
| 521 | + } |
|---|
| 522 | + } |
|---|
| 523 | + |
|---|
| 524 | + ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev); |
|---|
| 525 | + if (ret < 0) |
|---|
| 526 | + pci_dev_put(p2p_dev); |
|---|
| 527 | + |
|---|
| 528 | + pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev), |
|---|
| 529 | + ns->nsid); |
|---|
| 530 | +} |
|---|
| 531 | + |
|---|
| 532 | +void nvmet_ns_revalidate(struct nvmet_ns *ns) |
|---|
| 533 | +{ |
|---|
| 534 | + loff_t oldsize = ns->size; |
|---|
| 535 | + |
|---|
| 536 | + if (ns->bdev) |
|---|
| 537 | + nvmet_bdev_ns_revalidate(ns); |
|---|
| 538 | + else |
|---|
| 539 | + nvmet_file_ns_revalidate(ns); |
|---|
| 540 | + |
|---|
| 541 | + if (oldsize != ns->size) |
|---|
| 542 | + nvmet_ns_changed(ns->subsys, ns->nsid); |
|---|
| 543 | +} |
|---|
| 544 | + |
|---|
| 374 | 545 | int nvmet_ns_enable(struct nvmet_ns *ns) |
|---|
| 375 | 546 | { |
|---|
| 376 | 547 | struct nvmet_subsys *subsys = ns->subsys; |
|---|
| 548 | + struct nvmet_ctrl *ctrl; |
|---|
| 377 | 549 | int ret; |
|---|
| 378 | 550 | |
|---|
| 379 | 551 | mutex_lock(&subsys->lock); |
|---|
| 552 | + ret = 0; |
|---|
| 553 | + |
|---|
| 554 | + if (nvmet_passthru_ctrl(subsys)) { |
|---|
| 555 | + pr_info("cannot enable both passthru and regular namespaces for a single subsystem"); |
|---|
| 556 | + goto out_unlock; |
|---|
| 557 | + } |
|---|
| 558 | + |
|---|
| 559 | + if (ns->enabled) |
|---|
| 560 | + goto out_unlock; |
|---|
| 561 | + |
|---|
| 380 | 562 | ret = -EMFILE; |
|---|
| 381 | 563 | if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) |
|---|
| 382 | | - goto out_unlock; |
|---|
| 383 | | - ret = 0; |
|---|
| 384 | | - if (ns->enabled) |
|---|
| 385 | 564 | goto out_unlock; |
|---|
| 386 | 565 | |
|---|
| 387 | 566 | ret = nvmet_bdev_ns_enable(ns); |
|---|
| .. | .. |
|---|
| 389 | 568 | ret = nvmet_file_ns_enable(ns); |
|---|
| 390 | 569 | if (ret) |
|---|
| 391 | 570 | goto out_unlock; |
|---|
| 571 | + |
|---|
| 572 | + ret = nvmet_p2pmem_ns_enable(ns); |
|---|
| 573 | + if (ret) |
|---|
| 574 | + goto out_dev_disable; |
|---|
| 575 | + |
|---|
| 576 | + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
|---|
| 577 | + nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
|---|
| 392 | 578 | |
|---|
| 393 | 579 | ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, |
|---|
| 394 | 580 | 0, GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 398 | 584 | if (ns->nsid > subsys->max_nsid) |
|---|
| 399 | 585 | subsys->max_nsid = ns->nsid; |
|---|
| 400 | 586 | |
|---|
| 401 | | - /* |
|---|
| 402 | | - * The namespaces list needs to be sorted to simplify the implementation |
|---|
| 403 | | - * of the Identify Namepace List subcommand. |
|---|
| 404 | | - */ |
|---|
| 405 | | - if (list_empty(&subsys->namespaces)) { |
|---|
| 406 | | - list_add_tail_rcu(&ns->dev_link, &subsys->namespaces); |
|---|
| 407 | | - } else { |
|---|
| 408 | | - struct nvmet_ns *old; |
|---|
| 587 | + ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); |
|---|
| 588 | + if (ret) |
|---|
| 589 | + goto out_restore_subsys_maxnsid; |
|---|
| 409 | 590 | |
|---|
| 410 | | - list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) { |
|---|
| 411 | | - BUG_ON(ns->nsid == old->nsid); |
|---|
| 412 | | - if (ns->nsid < old->nsid) |
|---|
| 413 | | - break; |
|---|
| 414 | | - } |
|---|
| 415 | | - |
|---|
| 416 | | - list_add_tail_rcu(&ns->dev_link, &old->dev_link); |
|---|
| 417 | | - } |
|---|
| 418 | 591 | subsys->nr_namespaces++; |
|---|
| 419 | 592 | |
|---|
| 420 | 593 | nvmet_ns_changed(subsys, ns->nsid); |
|---|
| .. | .. |
|---|
| 423 | 596 | out_unlock: |
|---|
| 424 | 597 | mutex_unlock(&subsys->lock); |
|---|
| 425 | 598 | return ret; |
|---|
| 599 | + |
|---|
| 600 | +out_restore_subsys_maxnsid: |
|---|
| 601 | + subsys->max_nsid = nvmet_max_nsid(subsys); |
|---|
| 602 | + percpu_ref_exit(&ns->ref); |
|---|
| 426 | 603 | out_dev_put: |
|---|
| 604 | + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
|---|
| 605 | + pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
|---|
| 606 | +out_dev_disable: |
|---|
| 427 | 607 | nvmet_ns_dev_disable(ns); |
|---|
| 428 | 608 | goto out_unlock; |
|---|
| 429 | 609 | } |
|---|
| .. | .. |
|---|
| 431 | 611 | void nvmet_ns_disable(struct nvmet_ns *ns) |
|---|
| 432 | 612 | { |
|---|
| 433 | 613 | struct nvmet_subsys *subsys = ns->subsys; |
|---|
| 614 | + struct nvmet_ctrl *ctrl; |
|---|
| 434 | 615 | |
|---|
| 435 | 616 | mutex_lock(&subsys->lock); |
|---|
| 436 | 617 | if (!ns->enabled) |
|---|
| 437 | 618 | goto out_unlock; |
|---|
| 438 | 619 | |
|---|
| 439 | 620 | ns->enabled = false; |
|---|
| 440 | | - list_del_rcu(&ns->dev_link); |
|---|
| 621 | + xa_erase(&ns->subsys->namespaces, ns->nsid); |
|---|
| 441 | 622 | if (ns->nsid == subsys->max_nsid) |
|---|
| 442 | 623 | subsys->max_nsid = nvmet_max_nsid(subsys); |
|---|
| 624 | + |
|---|
| 625 | + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
|---|
| 626 | + pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
|---|
| 627 | + |
|---|
| 443 | 628 | mutex_unlock(&subsys->lock); |
|---|
| 444 | 629 | |
|---|
| 445 | 630 | /* |
|---|
| .. | .. |
|---|
| 456 | 641 | percpu_ref_exit(&ns->ref); |
|---|
| 457 | 642 | |
|---|
| 458 | 643 | mutex_lock(&subsys->lock); |
|---|
| 644 | + |
|---|
| 459 | 645 | subsys->nr_namespaces--; |
|---|
| 460 | 646 | nvmet_ns_changed(subsys, ns->nsid); |
|---|
| 461 | 647 | nvmet_ns_dev_disable(ns); |
|---|
| .. | .. |
|---|
| 483 | 669 | if (!ns) |
|---|
| 484 | 670 | return NULL; |
|---|
| 485 | 671 | |
|---|
| 486 | | - INIT_LIST_HEAD(&ns->dev_link); |
|---|
| 487 | 672 | init_completion(&ns->disable_done); |
|---|
| 488 | 673 | |
|---|
| 489 | 674 | ns->nsid = nsid; |
|---|
| .. | .. |
|---|
| 500 | 685 | return ns; |
|---|
| 501 | 686 | } |
|---|
| 502 | 687 | |
|---|
| 503 | | -static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
|---|
| 688 | +static void nvmet_update_sq_head(struct nvmet_req *req) |
|---|
| 504 | 689 | { |
|---|
| 505 | | - u32 old_sqhd, new_sqhd; |
|---|
| 506 | | - u16 sqhd; |
|---|
| 507 | | - |
|---|
| 508 | | - if (status) |
|---|
| 509 | | - nvmet_set_status(req, status); |
|---|
| 510 | | - |
|---|
| 511 | 690 | if (req->sq->size) { |
|---|
| 691 | + u32 old_sqhd, new_sqhd; |
|---|
| 692 | + |
|---|
| 512 | 693 | do { |
|---|
| 513 | 694 | old_sqhd = req->sq->sqhd; |
|---|
| 514 | 695 | new_sqhd = (old_sqhd + 1) % req->sq->size; |
|---|
| 515 | 696 | } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != |
|---|
| 516 | 697 | old_sqhd); |
|---|
| 517 | 698 | } |
|---|
| 518 | | - sqhd = req->sq->sqhd & 0x0000FFFF; |
|---|
| 519 | | - req->rsp->sq_head = cpu_to_le16(sqhd); |
|---|
| 520 | | - req->rsp->sq_id = cpu_to_le16(req->sq->qid); |
|---|
| 521 | | - req->rsp->command_id = req->cmd->common.command_id; |
|---|
| 699 | + req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); |
|---|
| 700 | +} |
|---|
| 522 | 701 | |
|---|
| 523 | | - if (req->ns) |
|---|
| 524 | | - nvmet_put_namespace(req->ns); |
|---|
| 702 | +static void nvmet_set_error(struct nvmet_req *req, u16 status) |
|---|
| 703 | +{ |
|---|
| 704 | + struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 705 | + struct nvme_error_slot *new_error_slot; |
|---|
| 706 | + unsigned long flags; |
|---|
| 707 | + |
|---|
| 708 | + req->cqe->status = cpu_to_le16(status << 1); |
|---|
| 709 | + |
|---|
| 710 | + if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) |
|---|
| 711 | + return; |
|---|
| 712 | + |
|---|
| 713 | + spin_lock_irqsave(&ctrl->error_lock, flags); |
|---|
| 714 | + ctrl->err_counter++; |
|---|
| 715 | + new_error_slot = |
|---|
| 716 | + &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; |
|---|
| 717 | + |
|---|
| 718 | + new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); |
|---|
| 719 | + new_error_slot->sqid = cpu_to_le16(req->sq->qid); |
|---|
| 720 | + new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); |
|---|
| 721 | + new_error_slot->status_field = cpu_to_le16(status << 1); |
|---|
| 722 | + new_error_slot->param_error_location = cpu_to_le16(req->error_loc); |
|---|
| 723 | + new_error_slot->lba = cpu_to_le64(req->error_slba); |
|---|
| 724 | + new_error_slot->nsid = req->cmd->common.nsid; |
|---|
| 725 | + spin_unlock_irqrestore(&ctrl->error_lock, flags); |
|---|
| 726 | + |
|---|
| 727 | + /* set the more bit for this request */ |
|---|
| 728 | + req->cqe->status |= cpu_to_le16(1 << 14); |
|---|
| 729 | +} |
|---|
| 730 | + |
|---|
| 731 | +static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
|---|
| 732 | +{ |
|---|
| 733 | + struct nvmet_ns *ns = req->ns; |
|---|
| 734 | + |
|---|
| 735 | + if (!req->sq->sqhd_disabled) |
|---|
| 736 | + nvmet_update_sq_head(req); |
|---|
| 737 | + req->cqe->sq_id = cpu_to_le16(req->sq->qid); |
|---|
| 738 | + req->cqe->command_id = req->cmd->common.command_id; |
|---|
| 739 | + |
|---|
| 740 | + if (unlikely(status)) |
|---|
| 741 | + nvmet_set_error(req, status); |
|---|
| 742 | + |
|---|
| 743 | + trace_nvmet_req_complete(req); |
|---|
| 744 | + |
|---|
| 525 | 745 | req->ops->queue_response(req); |
|---|
| 746 | + if (ns) |
|---|
| 747 | + nvmet_put_namespace(ns); |
|---|
| 526 | 748 | } |
|---|
| 527 | 749 | |
|---|
| 528 | 750 | void nvmet_req_complete(struct nvmet_req *req, u16 status) |
|---|
| .. | .. |
|---|
| 537 | 759 | { |
|---|
| 538 | 760 | cq->qid = qid; |
|---|
| 539 | 761 | cq->size = size; |
|---|
| 540 | | - |
|---|
| 541 | | - ctrl->cqs[qid] = cq; |
|---|
| 542 | 762 | } |
|---|
| 543 | 763 | |
|---|
| 544 | 764 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, |
|---|
| .. | .. |
|---|
| 560 | 780 | |
|---|
| 561 | 781 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
|---|
| 562 | 782 | { |
|---|
| 783 | + struct nvmet_ctrl *ctrl = sq->ctrl; |
|---|
| 784 | + |
|---|
| 563 | 785 | /* |
|---|
| 564 | 786 | * If this is the admin queue, complete all AERs so that our |
|---|
| 565 | 787 | * queue doesn't have outstanding requests on it. |
|---|
| 566 | 788 | */ |
|---|
| 567 | | - if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) |
|---|
| 568 | | - nvmet_async_events_free(sq->ctrl); |
|---|
| 789 | + if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) |
|---|
| 790 | + nvmet_async_events_failall(ctrl); |
|---|
| 569 | 791 | percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); |
|---|
| 570 | 792 | wait_for_completion(&sq->confirm_done); |
|---|
| 571 | 793 | wait_for_completion(&sq->free_done); |
|---|
| 572 | 794 | percpu_ref_exit(&sq->ref); |
|---|
| 573 | 795 | |
|---|
| 574 | | - if (sq->ctrl) { |
|---|
| 575 | | - nvmet_ctrl_put(sq->ctrl); |
|---|
| 796 | + if (ctrl) { |
|---|
| 797 | + /* |
|---|
| 798 | + * The teardown flow may take some time, and the host may not |
|---|
| 799 | + * send us keep-alive during this period, hence reset the |
|---|
| 800 | + * traffic based keep-alive timer so we don't trigger a |
|---|
| 801 | + * controller teardown as a result of a keep-alive expiration. |
|---|
| 802 | + */ |
|---|
| 803 | + ctrl->reset_tbkas = true; |
|---|
| 804 | + nvmet_ctrl_put(ctrl); |
|---|
| 576 | 805 | sq->ctrl = NULL; /* allows reusing the queue later */ |
|---|
| 577 | 806 | } |
|---|
| 578 | 807 | } |
|---|
| .. | .. |
|---|
| 639 | 868 | if (unlikely(ret)) |
|---|
| 640 | 869 | return ret; |
|---|
| 641 | 870 | |
|---|
| 871 | + if (nvmet_req_passthru_ctrl(req)) |
|---|
| 872 | + return nvmet_parse_passthru_io_cmd(req); |
|---|
| 873 | + |
|---|
| 642 | 874 | req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); |
|---|
| 643 | | - if (unlikely(!req->ns)) |
|---|
| 875 | + if (unlikely(!req->ns)) { |
|---|
| 876 | + req->error_loc = offsetof(struct nvme_common_command, nsid); |
|---|
| 644 | 877 | return NVME_SC_INVALID_NS | NVME_SC_DNR; |
|---|
| 878 | + } |
|---|
| 645 | 879 | ret = nvmet_check_ana_state(req->port, req->ns); |
|---|
| 646 | | - if (unlikely(ret)) |
|---|
| 880 | + if (unlikely(ret)) { |
|---|
| 881 | + req->error_loc = offsetof(struct nvme_common_command, nsid); |
|---|
| 647 | 882 | return ret; |
|---|
| 883 | + } |
|---|
| 648 | 884 | ret = nvmet_io_cmd_check_access(req); |
|---|
| 649 | | - if (unlikely(ret)) |
|---|
| 885 | + if (unlikely(ret)) { |
|---|
| 886 | + req->error_loc = offsetof(struct nvme_common_command, nsid); |
|---|
| 650 | 887 | return ret; |
|---|
| 888 | + } |
|---|
| 651 | 889 | |
|---|
| 652 | 890 | if (req->ns->file) |
|---|
| 653 | 891 | return nvmet_file_parse_io_cmd(req); |
|---|
| .. | .. |
|---|
| 665 | 903 | req->sq = sq; |
|---|
| 666 | 904 | req->ops = ops; |
|---|
| 667 | 905 | req->sg = NULL; |
|---|
| 906 | + req->metadata_sg = NULL; |
|---|
| 668 | 907 | req->sg_cnt = 0; |
|---|
| 908 | + req->metadata_sg_cnt = 0; |
|---|
| 669 | 909 | req->transfer_len = 0; |
|---|
| 670 | | - req->rsp->status = 0; |
|---|
| 910 | + req->metadata_len = 0; |
|---|
| 911 | + req->cqe->status = 0; |
|---|
| 912 | + req->cqe->sq_head = 0; |
|---|
| 671 | 913 | req->ns = NULL; |
|---|
| 914 | + req->error_loc = NVMET_NO_ERROR_LOC; |
|---|
| 915 | + req->error_slba = 0; |
|---|
| 672 | 916 | |
|---|
| 673 | 917 | /* no support for fused commands yet */ |
|---|
| 674 | 918 | if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { |
|---|
| 919 | + req->error_loc = offsetof(struct nvme_common_command, flags); |
|---|
| 675 | 920 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 676 | 921 | goto fail; |
|---|
| 677 | 922 | } |
|---|
| .. | .. |
|---|
| 682 | 927 | * byte aligned. |
|---|
| 683 | 928 | */ |
|---|
| 684 | 929 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { |
|---|
| 930 | + req->error_loc = offsetof(struct nvme_common_command, flags); |
|---|
| 685 | 931 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 686 | 932 | goto fail; |
|---|
| 687 | 933 | } |
|---|
| 688 | 934 | |
|---|
| 689 | 935 | if (unlikely(!req->sq->ctrl)) |
|---|
| 690 | | - /* will return an error for any Non-connect command: */ |
|---|
| 936 | + /* will return an error for any non-connect command: */ |
|---|
| 691 | 937 | status = nvmet_parse_connect_cmd(req); |
|---|
| 692 | 938 | else if (likely(req->sq->qid != 0)) |
|---|
| 693 | 939 | status = nvmet_parse_io_cmd(req); |
|---|
| 694 | | - else if (req->cmd->common.opcode == nvme_fabrics_command) |
|---|
| 695 | | - status = nvmet_parse_fabrics_cmd(req); |
|---|
| 696 | | - else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) |
|---|
| 697 | | - status = nvmet_parse_discovery_cmd(req); |
|---|
| 698 | 940 | else |
|---|
| 699 | 941 | status = nvmet_parse_admin_cmd(req); |
|---|
| 700 | 942 | |
|---|
| 701 | 943 | if (status) |
|---|
| 702 | 944 | goto fail; |
|---|
| 703 | 945 | |
|---|
| 946 | + trace_nvmet_req_init(req, req->cmd); |
|---|
| 947 | + |
|---|
| 704 | 948 | if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { |
|---|
| 705 | 949 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 706 | 950 | goto fail; |
|---|
| 707 | 951 | } |
|---|
| 952 | + |
|---|
| 953 | + if (sq->ctrl) |
|---|
| 954 | + sq->ctrl->reset_tbkas = true; |
|---|
| 708 | 955 | |
|---|
| 709 | 956 | return true; |
|---|
| 710 | 957 | |
|---|
| .. | .. |
|---|
| 722 | 969 | } |
|---|
| 723 | 970 | EXPORT_SYMBOL_GPL(nvmet_req_uninit); |
|---|
| 724 | 971 | |
|---|
| 725 | | -void nvmet_req_execute(struct nvmet_req *req) |
|---|
| 972 | +bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) |
|---|
| 726 | 973 | { |
|---|
| 727 | | - if (unlikely(req->data_len != req->transfer_len)) |
|---|
| 974 | + if (unlikely(len != req->transfer_len)) { |
|---|
| 975 | + req->error_loc = offsetof(struct nvme_common_command, dptr); |
|---|
| 728 | 976 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
|---|
| 729 | | - else |
|---|
| 730 | | - req->execute(req); |
|---|
| 977 | + return false; |
|---|
| 978 | + } |
|---|
| 979 | + |
|---|
| 980 | + return true; |
|---|
| 731 | 981 | } |
|---|
| 732 | | -EXPORT_SYMBOL_GPL(nvmet_req_execute); |
|---|
| 982 | +EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); |
|---|
| 983 | + |
|---|
| 984 | +bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) |
|---|
| 985 | +{ |
|---|
| 986 | + if (unlikely(data_len > req->transfer_len)) { |
|---|
| 987 | + req->error_loc = offsetof(struct nvme_common_command, dptr); |
|---|
| 988 | + nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
|---|
| 989 | + return false; |
|---|
| 990 | + } |
|---|
| 991 | + |
|---|
| 992 | + return true; |
|---|
| 993 | +} |
|---|
| 994 | + |
|---|
| 995 | +static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) |
|---|
| 996 | +{ |
|---|
| 997 | + return req->transfer_len - req->metadata_len; |
|---|
| 998 | +} |
|---|
| 999 | + |
|---|
| 1000 | +static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, |
|---|
| 1001 | + struct nvmet_req *req) |
|---|
| 1002 | +{ |
|---|
| 1003 | + req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, |
|---|
| 1004 | + nvmet_data_transfer_len(req)); |
|---|
| 1005 | + if (!req->sg) |
|---|
| 1006 | + goto out_err; |
|---|
| 1007 | + |
|---|
| 1008 | + if (req->metadata_len) { |
|---|
| 1009 | + req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, |
|---|
| 1010 | + &req->metadata_sg_cnt, req->metadata_len); |
|---|
| 1011 | + if (!req->metadata_sg) |
|---|
| 1012 | + goto out_free_sg; |
|---|
| 1013 | + } |
|---|
| 1014 | + |
|---|
| 1015 | + req->p2p_dev = p2p_dev; |
|---|
| 1016 | + |
|---|
| 1017 | + return 0; |
|---|
| 1018 | +out_free_sg: |
|---|
| 1019 | + pci_p2pmem_free_sgl(req->p2p_dev, req->sg); |
|---|
| 1020 | +out_err: |
|---|
| 1021 | + return -ENOMEM; |
|---|
| 1022 | +} |
|---|
| 1023 | + |
|---|
| 1024 | +static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) |
|---|
| 1025 | +{ |
|---|
| 1026 | + if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || |
|---|
| 1027 | + !req->sq->ctrl || !req->sq->qid || !req->ns) |
|---|
| 1028 | + return NULL; |
|---|
| 1029 | + return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); |
|---|
| 1030 | +} |
|---|
| 1031 | + |
|---|
| 1032 | +int nvmet_req_alloc_sgls(struct nvmet_req *req) |
|---|
| 1033 | +{ |
|---|
| 1034 | + struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); |
|---|
| 1035 | + |
|---|
| 1036 | + if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) |
|---|
| 1037 | + return 0; |
|---|
| 1038 | + |
|---|
| 1039 | + req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, |
|---|
| 1040 | + &req->sg_cnt); |
|---|
| 1041 | + if (unlikely(!req->sg)) |
|---|
| 1042 | + goto out; |
|---|
| 1043 | + |
|---|
| 1044 | + if (req->metadata_len) { |
|---|
| 1045 | + req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL, |
|---|
| 1046 | + &req->metadata_sg_cnt); |
|---|
| 1047 | + if (unlikely(!req->metadata_sg)) |
|---|
| 1048 | + goto out_free; |
|---|
| 1049 | + } |
|---|
| 1050 | + |
|---|
| 1051 | + return 0; |
|---|
| 1052 | +out_free: |
|---|
| 1053 | + sgl_free(req->sg); |
|---|
| 1054 | +out: |
|---|
| 1055 | + return -ENOMEM; |
|---|
| 1056 | +} |
|---|
| 1057 | +EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls); |
|---|
| 1058 | + |
|---|
| 1059 | +void nvmet_req_free_sgls(struct nvmet_req *req) |
|---|
| 1060 | +{ |
|---|
| 1061 | + if (req->p2p_dev) { |
|---|
| 1062 | + pci_p2pmem_free_sgl(req->p2p_dev, req->sg); |
|---|
| 1063 | + if (req->metadata_sg) |
|---|
| 1064 | + pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); |
|---|
| 1065 | + req->p2p_dev = NULL; |
|---|
| 1066 | + } else { |
|---|
| 1067 | + sgl_free(req->sg); |
|---|
| 1068 | + if (req->metadata_sg) |
|---|
| 1069 | + sgl_free(req->metadata_sg); |
|---|
| 1070 | + } |
|---|
| 1071 | + |
|---|
| 1072 | + req->sg = NULL; |
|---|
| 1073 | + req->metadata_sg = NULL; |
|---|
| 1074 | + req->sg_cnt = 0; |
|---|
| 1075 | + req->metadata_sg_cnt = 0; |
|---|
| 1076 | +} |
|---|
| 1077 | +EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); |
|---|
| 733 | 1078 | |
|---|
| 734 | 1079 | static inline bool nvmet_cc_en(u32 cc) |
|---|
| 735 | 1080 | { |
|---|
| .. | .. |
|---|
| 853 | 1198 | if (!subsys) { |
|---|
| 854 | 1199 | pr_warn("connect request for invalid subsystem %s!\n", |
|---|
| 855 | 1200 | subsysnqn); |
|---|
| 856 | | - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
|---|
| 1201 | + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
|---|
| 857 | 1202 | return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
|---|
| 858 | 1203 | } |
|---|
| 859 | 1204 | |
|---|
| .. | .. |
|---|
| 874 | 1219 | |
|---|
| 875 | 1220 | pr_warn("could not find controller %d for subsys %s / host %s\n", |
|---|
| 876 | 1221 | cntlid, subsysnqn, hostnqn); |
|---|
| 877 | | - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
|---|
| 1222 | + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
|---|
| 878 | 1223 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
|---|
| 879 | 1224 | |
|---|
| 880 | 1225 | out: |
|---|
| .. | .. |
|---|
| 899 | 1244 | return 0; |
|---|
| 900 | 1245 | } |
|---|
| 901 | 1246 | |
|---|
| 902 | | -static bool __nvmet_host_allowed(struct nvmet_subsys *subsys, |
|---|
| 903 | | - const char *hostnqn) |
|---|
| 1247 | +bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) |
|---|
| 904 | 1248 | { |
|---|
| 905 | 1249 | struct nvmet_host_link *p; |
|---|
| 906 | 1250 | |
|---|
| 1251 | + lockdep_assert_held(&nvmet_config_sem); |
|---|
| 1252 | + |
|---|
| 907 | 1253 | if (subsys->allow_any_host) |
|---|
| 1254 | + return true; |
|---|
| 1255 | + |
|---|
| 1256 | + if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */ |
|---|
| 908 | 1257 | return true; |
|---|
| 909 | 1258 | |
|---|
| 910 | 1259 | list_for_each_entry(p, &subsys->hosts, entry) { |
|---|
| .. | .. |
|---|
| 915 | 1264 | return false; |
|---|
| 916 | 1265 | } |
|---|
| 917 | 1266 | |
|---|
| 918 | | -static bool nvmet_host_discovery_allowed(struct nvmet_req *req, |
|---|
| 919 | | - const char *hostnqn) |
|---|
| 1267 | +/* |
|---|
| 1268 | + * Note: ctrl->subsys->lock should be held when calling this function |
|---|
| 1269 | + */ |
|---|
| 1270 | +static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, |
|---|
| 1271 | + struct nvmet_req *req) |
|---|
| 920 | 1272 | { |
|---|
| 921 | | - struct nvmet_subsys_link *s; |
|---|
| 1273 | + struct nvmet_ns *ns; |
|---|
| 1274 | + unsigned long idx; |
|---|
| 922 | 1275 | |
|---|
| 923 | | - list_for_each_entry(s, &req->port->subsystems, entry) { |
|---|
| 924 | | - if (__nvmet_host_allowed(s->subsys, hostnqn)) |
|---|
| 925 | | - return true; |
|---|
| 926 | | - } |
|---|
| 1276 | + if (!req->p2p_client) |
|---|
| 1277 | + return; |
|---|
| 927 | 1278 | |
|---|
| 928 | | - return false; |
|---|
| 1279 | + ctrl->p2p_client = get_device(req->p2p_client); |
|---|
| 1280 | + |
|---|
| 1281 | + xa_for_each(&ctrl->subsys->namespaces, idx, ns) |
|---|
| 1282 | + nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
|---|
| 929 | 1283 | } |
|---|
| 930 | 1284 | |
|---|
| 931 | | -bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, |
|---|
| 932 | | - const char *hostnqn) |
|---|
| 1285 | +/* |
|---|
| 1286 | + * Note: ctrl->subsys->lock should be held when calling this function |
|---|
| 1287 | + */ |
|---|
| 1288 | +static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) |
|---|
| 933 | 1289 | { |
|---|
| 934 | | - lockdep_assert_held(&nvmet_config_sem); |
|---|
| 1290 | + struct radix_tree_iter iter; |
|---|
| 1291 | + void __rcu **slot; |
|---|
| 935 | 1292 | |
|---|
| 936 | | - if (subsys->type == NVME_NQN_DISC) |
|---|
| 937 | | - return nvmet_host_discovery_allowed(req, hostnqn); |
|---|
| 938 | | - else |
|---|
| 939 | | - return __nvmet_host_allowed(subsys, hostnqn); |
|---|
| 1293 | + radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) |
|---|
| 1294 | + pci_dev_put(radix_tree_deref_slot(slot)); |
|---|
| 1295 | + |
|---|
| 1296 | + put_device(ctrl->p2p_client); |
|---|
| 940 | 1297 | } |
|---|
| 941 | 1298 | |
|---|
| 942 | 1299 | static void nvmet_fatal_error_handler(struct work_struct *work) |
|---|
| .. | .. |
|---|
| 961 | 1318 | if (!subsys) { |
|---|
| 962 | 1319 | pr_warn("connect request for invalid subsystem %s!\n", |
|---|
| 963 | 1320 | subsysnqn); |
|---|
| 964 | | - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
|---|
| 1321 | + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
|---|
| 965 | 1322 | goto out; |
|---|
| 966 | 1323 | } |
|---|
| 967 | 1324 | |
|---|
| 968 | 1325 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
|---|
| 969 | 1326 | down_read(&nvmet_config_sem); |
|---|
| 970 | | - if (!nvmet_host_allowed(req, subsys, hostnqn)) { |
|---|
| 1327 | + if (!nvmet_host_allowed(subsys, hostnqn)) { |
|---|
| 971 | 1328 | pr_info("connect by host %s for subsystem %s not allowed\n", |
|---|
| 972 | 1329 | hostnqn, subsysnqn); |
|---|
| 973 | | - req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); |
|---|
| 1330 | + req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); |
|---|
| 974 | 1331 | up_read(&nvmet_config_sem); |
|---|
| 975 | 1332 | status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; |
|---|
| 976 | 1333 | goto out_put_subsystem; |
|---|
| .. | .. |
|---|
| 989 | 1346 | |
|---|
| 990 | 1347 | INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); |
|---|
| 991 | 1348 | INIT_LIST_HEAD(&ctrl->async_events); |
|---|
| 1349 | + INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); |
|---|
| 992 | 1350 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); |
|---|
| 993 | 1351 | |
|---|
| 994 | 1352 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); |
|---|
| .. | .. |
|---|
| 1003 | 1361 | if (!ctrl->changed_ns_list) |
|---|
| 1004 | 1362 | goto out_free_ctrl; |
|---|
| 1005 | 1363 | |
|---|
| 1006 | | - ctrl->cqs = kcalloc(subsys->max_qid + 1, |
|---|
| 1007 | | - sizeof(struct nvmet_cq *), |
|---|
| 1008 | | - GFP_KERNEL); |
|---|
| 1009 | | - if (!ctrl->cqs) |
|---|
| 1010 | | - goto out_free_changed_ns_list; |
|---|
| 1011 | | - |
|---|
| 1012 | 1364 | ctrl->sqs = kcalloc(subsys->max_qid + 1, |
|---|
| 1013 | 1365 | sizeof(struct nvmet_sq *), |
|---|
| 1014 | 1366 | GFP_KERNEL); |
|---|
| 1015 | 1367 | if (!ctrl->sqs) |
|---|
| 1016 | | - goto out_free_cqs; |
|---|
| 1368 | + goto out_free_changed_ns_list; |
|---|
| 1369 | + |
|---|
| 1370 | + if (subsys->cntlid_min > subsys->cntlid_max) |
|---|
| 1371 | + goto out_free_sqs; |
|---|
| 1017 | 1372 | |
|---|
| 1018 | 1373 | ret = ida_simple_get(&cntlid_ida, |
|---|
| 1019 | | - NVME_CNTLID_MIN, NVME_CNTLID_MAX, |
|---|
| 1374 | + subsys->cntlid_min, subsys->cntlid_max, |
|---|
| 1020 | 1375 | GFP_KERNEL); |
|---|
| 1021 | 1376 | if (ret < 0) { |
|---|
| 1022 | 1377 | status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; |
|---|
| .. | .. |
|---|
| 1025 | 1380 | ctrl->cntlid = ret; |
|---|
| 1026 | 1381 | |
|---|
| 1027 | 1382 | ctrl->ops = req->ops; |
|---|
| 1028 | | - if (ctrl->subsys->type == NVME_NQN_DISC) { |
|---|
| 1029 | | - /* Don't accept keep-alive timeout for discovery controllers */ |
|---|
| 1030 | | - if (kato) { |
|---|
| 1031 | | - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 1032 | | - goto out_remove_ida; |
|---|
| 1033 | | - } |
|---|
| 1034 | 1383 | |
|---|
| 1035 | | - /* |
|---|
| 1036 | | - * Discovery controllers use some arbitrary high value in order |
|---|
| 1037 | | - * to cleanup stale discovery sessions |
|---|
| 1038 | | - * |
|---|
| 1039 | | - * From the latest base diff RC: |
|---|
| 1040 | | - * "The Keep Alive command is not supported by |
|---|
| 1041 | | - * Discovery controllers. A transport may specify a |
|---|
| 1042 | | - * fixed Discovery controller activity timeout value |
|---|
| 1043 | | - * (e.g., 2 minutes). If no commands are received |
|---|
| 1044 | | - * by a Discovery controller within that time |
|---|
| 1045 | | - * period, the controller may perform the |
|---|
| 1046 | | - * actions for Keep Alive Timer expiration". |
|---|
| 1047 | | - */ |
|---|
| 1048 | | - ctrl->kato = NVMET_DISC_KATO; |
|---|
| 1049 | | - } else { |
|---|
| 1050 | | - /* keep-alive timeout in seconds */ |
|---|
| 1051 | | - ctrl->kato = DIV_ROUND_UP(kato, 1000); |
|---|
| 1052 | | - } |
|---|
| 1384 | + /* |
|---|
| 1385 | + * Discovery controllers may use some arbitrary high value |
|---|
| 1386 | + * in order to cleanup stale discovery sessions |
|---|
| 1387 | + */ |
|---|
| 1388 | + if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato) |
|---|
| 1389 | + kato = NVMET_DISC_KATO_MS; |
|---|
| 1390 | + |
|---|
| 1391 | + /* keep-alive timeout in seconds */ |
|---|
| 1392 | + ctrl->kato = DIV_ROUND_UP(kato, 1000); |
|---|
| 1393 | + |
|---|
| 1394 | + ctrl->err_counter = 0; |
|---|
| 1395 | + spin_lock_init(&ctrl->error_lock); |
|---|
| 1396 | + |
|---|
| 1053 | 1397 | nvmet_start_keep_alive_timer(ctrl); |
|---|
| 1054 | 1398 | |
|---|
| 1055 | 1399 | mutex_lock(&subsys->lock); |
|---|
| 1056 | 1400 | list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); |
|---|
| 1401 | + nvmet_setup_p2p_ns_map(ctrl, req); |
|---|
| 1057 | 1402 | mutex_unlock(&subsys->lock); |
|---|
| 1058 | 1403 | |
|---|
| 1059 | 1404 | *ctrlp = ctrl; |
|---|
| 1060 | 1405 | return 0; |
|---|
| 1061 | 1406 | |
|---|
| 1062 | | -out_remove_ida: |
|---|
| 1063 | | - ida_simple_remove(&cntlid_ida, ctrl->cntlid); |
|---|
| 1064 | 1407 | out_free_sqs: |
|---|
| 1065 | 1408 | kfree(ctrl->sqs); |
|---|
| 1066 | | -out_free_cqs: |
|---|
| 1067 | | - kfree(ctrl->cqs); |
|---|
| 1068 | 1409 | out_free_changed_ns_list: |
|---|
| 1069 | 1410 | kfree(ctrl->changed_ns_list); |
|---|
| 1070 | 1411 | out_free_ctrl: |
|---|
| .. | .. |
|---|
| 1081 | 1422 | struct nvmet_subsys *subsys = ctrl->subsys; |
|---|
| 1082 | 1423 | |
|---|
| 1083 | 1424 | mutex_lock(&subsys->lock); |
|---|
| 1425 | + nvmet_release_p2p_ns_map(ctrl); |
|---|
| 1084 | 1426 | list_del(&ctrl->subsys_entry); |
|---|
| 1085 | 1427 | mutex_unlock(&subsys->lock); |
|---|
| 1086 | 1428 | |
|---|
| .. | .. |
|---|
| 1091 | 1433 | |
|---|
| 1092 | 1434 | ida_simple_remove(&cntlid_ida, ctrl->cntlid); |
|---|
| 1093 | 1435 | |
|---|
| 1436 | + nvmet_async_events_free(ctrl); |
|---|
| 1094 | 1437 | kfree(ctrl->sqs); |
|---|
| 1095 | | - kfree(ctrl->cqs); |
|---|
| 1096 | 1438 | kfree(ctrl->changed_ns_list); |
|---|
| 1097 | 1439 | kfree(ctrl); |
|---|
| 1098 | 1440 | |
|---|
| .. | .. |
|---|
| 1123 | 1465 | if (!port) |
|---|
| 1124 | 1466 | return NULL; |
|---|
| 1125 | 1467 | |
|---|
| 1126 | | - if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn, |
|---|
| 1127 | | - NVMF_NQN_SIZE)) { |
|---|
| 1468 | + if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { |
|---|
| 1128 | 1469 | if (!kref_get_unless_zero(&nvmet_disc_subsys->ref)) |
|---|
| 1129 | 1470 | return NULL; |
|---|
| 1130 | 1471 | return nvmet_disc_subsys; |
|---|
| .. | .. |
|---|
| 1151 | 1492 | |
|---|
| 1152 | 1493 | subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); |
|---|
| 1153 | 1494 | if (!subsys) |
|---|
| 1154 | | - return NULL; |
|---|
| 1495 | + return ERR_PTR(-ENOMEM); |
|---|
| 1155 | 1496 | |
|---|
| 1156 | | - subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ |
|---|
| 1497 | + subsys->ver = NVMET_DEFAULT_VS; |
|---|
| 1157 | 1498 | /* generate a random serial number as our controllers are ephemeral: */ |
|---|
| 1158 | 1499 | get_random_bytes(&subsys->serial, sizeof(subsys->serial)); |
|---|
| 1159 | 1500 | |
|---|
| .. | .. |
|---|
| 1167 | 1508 | default: |
|---|
| 1168 | 1509 | pr_err("%s: Unknown Subsystem type - %d\n", __func__, type); |
|---|
| 1169 | 1510 | kfree(subsys); |
|---|
| 1170 | | - return NULL; |
|---|
| 1511 | + return ERR_PTR(-EINVAL); |
|---|
| 1171 | 1512 | } |
|---|
| 1172 | 1513 | subsys->type = type; |
|---|
| 1173 | 1514 | subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, |
|---|
| 1174 | 1515 | GFP_KERNEL); |
|---|
| 1175 | 1516 | if (!subsys->subsysnqn) { |
|---|
| 1176 | 1517 | kfree(subsys); |
|---|
| 1177 | | - return NULL; |
|---|
| 1518 | + return ERR_PTR(-ENOMEM); |
|---|
| 1178 | 1519 | } |
|---|
| 1179 | | - |
|---|
| 1520 | + subsys->cntlid_min = NVME_CNTLID_MIN; |
|---|
| 1521 | + subsys->cntlid_max = NVME_CNTLID_MAX; |
|---|
| 1180 | 1522 | kref_init(&subsys->ref); |
|---|
| 1181 | 1523 | |
|---|
| 1182 | 1524 | mutex_init(&subsys->lock); |
|---|
| 1183 | | - INIT_LIST_HEAD(&subsys->namespaces); |
|---|
| 1525 | + xa_init(&subsys->namespaces); |
|---|
| 1184 | 1526 | INIT_LIST_HEAD(&subsys->ctrls); |
|---|
| 1185 | 1527 | INIT_LIST_HEAD(&subsys->hosts); |
|---|
| 1186 | 1528 | |
|---|
| .. | .. |
|---|
| 1192 | 1534 | struct nvmet_subsys *subsys = |
|---|
| 1193 | 1535 | container_of(ref, struct nvmet_subsys, ref); |
|---|
| 1194 | 1536 | |
|---|
| 1195 | | - WARN_ON_ONCE(!list_empty(&subsys->namespaces)); |
|---|
| 1537 | + WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); |
|---|
| 1538 | + |
|---|
| 1539 | + xa_destroy(&subsys->namespaces); |
|---|
| 1540 | + nvmet_passthru_subsys_free(subsys); |
|---|
| 1196 | 1541 | |
|---|
| 1197 | 1542 | kfree(subsys->subsysnqn); |
|---|
| 1543 | + kfree_rcu(subsys->model, rcuhead); |
|---|
| 1198 | 1544 | kfree(subsys); |
|---|
| 1199 | 1545 | } |
|---|
| 1200 | 1546 | |
|---|
| .. | .. |
|---|
| 1258 | 1604 | module_exit(nvmet_exit); |
|---|
| 1259 | 1605 | |
|---|
| 1260 | 1606 | MODULE_LICENSE("GPL v2"); |
|---|
| 1607 | +MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); |
|---|