hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/nvme/target/core.c
....@@ -1,20 +1,17 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Common code for the NVMe target.
34 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
135 */
146 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
157 #include <linux/module.h>
168 #include <linux/random.h>
179 #include <linux/rculist.h>
10
+#include <linux/pci-p2pdma.h>
11
+#include <linux/scatterlist.h>
12
+
13
+#define CREATE_TRACE_POINTS
14
+#include "trace.h"
1815
1916 #include "nvmet.h"
2017
....@@ -44,40 +41,88 @@
4441 u64 nvmet_ana_chgcnt;
4542 DECLARE_RWSEM(nvmet_ana_sem);
4643
44
+inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45
+{
46
+ u16 status;
47
+
48
+ switch (errno) {
49
+ case 0:
50
+ status = NVME_SC_SUCCESS;
51
+ break;
52
+ case -ENOSPC:
53
+ req->error_loc = offsetof(struct nvme_rw_command, length);
54
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
55
+ break;
56
+ case -EREMOTEIO:
57
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
58
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
59
+ break;
60
+ case -EOPNOTSUPP:
61
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
62
+ switch (req->cmd->common.opcode) {
63
+ case nvme_cmd_dsm:
64
+ case nvme_cmd_write_zeroes:
65
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66
+ break;
67
+ default:
68
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69
+ }
70
+ break;
71
+ case -ENODATA:
72
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
73
+ status = NVME_SC_ACCESS_DENIED;
74
+ break;
75
+ case -EIO:
76
+ fallthrough;
77
+ default:
78
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
79
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
80
+ }
81
+
82
+ return status;
83
+}
84
+
4785 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
4886 const char *subsysnqn);
4987
5088 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
5189 size_t len)
5290 {
53
- if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
91
+ if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
92
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
5493 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
94
+ }
5595 return 0;
5696 }
5797
5898 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
5999 {
60
- if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
100
+ if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
61102 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103
+ }
62104 return 0;
63105 }
64106
65107 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
66108 {
67
- if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
109
+ if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
110
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
68111 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112
+ }
69113 return 0;
70114 }
71115
72116 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
73117 {
74
- struct nvmet_ns *ns;
118
+ unsigned long nsid = 0;
119
+ struct nvmet_ns *cur;
120
+ unsigned long idx;
75121
76
- if (list_empty(&subsys->namespaces))
77
- return 0;
122
+ xa_for_each(&subsys->namespaces, idx, cur)
123
+ nsid = cur->nsid;
78124
79
- ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
80
- return ns->nsid;
125
+ return nsid;
81126 }
82127
83128 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
....@@ -85,39 +130,30 @@
85130 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
86131 }
87132
88
-static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
133
+static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
89134 {
135
+ u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
90136 struct nvmet_req *req;
91137
92
- while (1) {
93
- mutex_lock(&ctrl->lock);
94
- if (!ctrl->nr_async_event_cmds) {
95
- mutex_unlock(&ctrl->lock);
96
- return;
97
- }
98
-
138
+ mutex_lock(&ctrl->lock);
139
+ while (ctrl->nr_async_event_cmds) {
99140 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
100141 mutex_unlock(&ctrl->lock);
101
- nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
142
+ nvmet_req_complete(req, status);
143
+ mutex_lock(&ctrl->lock);
102144 }
145
+ mutex_unlock(&ctrl->lock);
103146 }
104147
105
-static void nvmet_async_event_work(struct work_struct *work)
148
+static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
106149 {
107
- struct nvmet_ctrl *ctrl =
108
- container_of(work, struct nvmet_ctrl, async_event_work);
109150 struct nvmet_async_event *aen;
110151 struct nvmet_req *req;
111152
112
- while (1) {
113
- mutex_lock(&ctrl->lock);
114
- aen = list_first_entry_or_null(&ctrl->async_events,
115
- struct nvmet_async_event, entry);
116
- if (!aen || !ctrl->nr_async_event_cmds) {
117
- mutex_unlock(&ctrl->lock);
118
- return;
119
- }
120
-
153
+ mutex_lock(&ctrl->lock);
154
+ while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
155
+ aen = list_first_entry(&ctrl->async_events,
156
+ struct nvmet_async_event, entry);
121157 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
122158 nvmet_set_result(req, nvmet_async_event_result(aen));
123159
....@@ -125,11 +161,34 @@
125161 kfree(aen);
126162
127163 mutex_unlock(&ctrl->lock);
164
+ trace_nvmet_async_event(ctrl, req->cqe->result.u32);
128165 nvmet_req_complete(req, 0);
166
+ mutex_lock(&ctrl->lock);
129167 }
168
+ mutex_unlock(&ctrl->lock);
130169 }
131170
132
-static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
171
+static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
172
+{
173
+ struct nvmet_async_event *aen, *tmp;
174
+
175
+ mutex_lock(&ctrl->lock);
176
+ list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
177
+ list_del(&aen->entry);
178
+ kfree(aen);
179
+ }
180
+ mutex_unlock(&ctrl->lock);
181
+}
182
+
183
+static void nvmet_async_event_work(struct work_struct *work)
184
+{
185
+ struct nvmet_ctrl *ctrl =
186
+ container_of(work, struct nvmet_ctrl, async_event_work);
187
+
188
+ nvmet_async_events_process(ctrl);
189
+}
190
+
191
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
133192 u8 event_info, u8 log_page)
134193 {
135194 struct nvmet_async_event *aen;
....@@ -147,13 +206,6 @@
147206 mutex_unlock(&ctrl->lock);
148207
149208 schedule_work(&ctrl->async_event_work);
150
-}
151
-
152
-static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
153
-{
154
- if (!(READ_ONCE(ctrl->aen_enabled) & aen))
155
- return true;
156
- return test_and_set_bit(aen, &ctrl->aen_masked);
157209 }
158210
159211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
....@@ -184,9 +236,11 @@
184236 {
185237 struct nvmet_ctrl *ctrl;
186238
239
+ lockdep_assert_held(&subsys->lock);
240
+
187241 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
188242 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
189
- if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
243
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
190244 continue;
191245 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
192246 NVME_AER_NOTICE_NS_CHANGED,
....@@ -203,7 +257,7 @@
203257 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
204258 if (port && ctrl->port != port)
205259 continue;
206
- if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
260
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
207261 continue;
208262 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
209263 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
....@@ -244,6 +298,18 @@
244298 }
245299 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
246300
301
+void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
302
+{
303
+ struct nvmet_ctrl *ctrl;
304
+
305
+ mutex_lock(&subsys->lock);
306
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
307
+ if (ctrl->port == port)
308
+ ctrl->ops->delete_ctrl(ctrl);
309
+ }
310
+ mutex_unlock(&subsys->lock);
311
+}
312
+
247313 int nvmet_enable_port(struct nvmet_port *port)
248314 {
249315 const struct nvmet_fabrics_ops *ops;
....@@ -267,18 +333,32 @@
267333 if (!try_module_get(ops->owner))
268334 return -EINVAL;
269335
270
- ret = ops->add_port(port);
271
- if (ret) {
272
- module_put(ops->owner);
273
- return ret;
336
+ /*
337
+ * If the user requested PI support and the transport isn't pi capable,
338
+ * don't enable the port.
339
+ */
340
+ if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
341
+ pr_err("T10-PI is not supported by transport type %d\n",
342
+ port->disc_addr.trtype);
343
+ ret = -EINVAL;
344
+ goto out_put;
274345 }
346
+
347
+ ret = ops->add_port(port);
348
+ if (ret)
349
+ goto out_put;
275350
276351 /* If the transport didn't set inline_data_size, then disable it. */
277352 if (port->inline_data_size < 0)
278353 port->inline_data_size = 0;
279354
280355 port->enabled = true;
356
+ port->tr_ops = ops;
281357 return 0;
358
+
359
+out_put:
360
+ module_put(ops->owner);
361
+ return ret;
282362 }
283363
284364 void nvmet_disable_port(struct nvmet_port *port)
....@@ -288,6 +368,7 @@
288368 lockdep_assert_held(&nvmet_config_sem);
289369
290370 port->enabled = false;
371
+ port->tr_ops = NULL;
291372
292373 ops = nvmet_transports[port->disc_addr.trtype];
293374 ops->remove_port(port);
....@@ -298,6 +379,15 @@
298379 {
299380 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
300381 struct nvmet_ctrl, ka_work);
382
+ bool reset_tbkas = ctrl->reset_tbkas;
383
+
384
+ ctrl->reset_tbkas = false;
385
+ if (reset_tbkas) {
386
+ pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
387
+ ctrl->cntlid);
388
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
389
+ return;
390
+ }
301391
302392 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
303393 ctrl->cntlid, ctrl->kato);
....@@ -305,7 +395,7 @@
305395 nvmet_ctrl_fatal_error(ctrl);
306396 }
307397
308
-static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
398
+void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
309399 {
310400 if (unlikely(ctrl->kato == 0))
311401 return;
....@@ -317,7 +407,7 @@
317407 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
318408 }
319409
320
-static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
410
+void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
321411 {
322412 if (unlikely(ctrl->kato == 0))
323413 return;
....@@ -327,28 +417,13 @@
327417 cancel_delayed_work_sync(&ctrl->ka_work);
328418 }
329419
330
-static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
331
- __le32 nsid)
332
-{
333
- struct nvmet_ns *ns;
334
-
335
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
336
- if (ns->nsid == le32_to_cpu(nsid))
337
- return ns;
338
- }
339
-
340
- return NULL;
341
-}
342
-
343420 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
344421 {
345422 struct nvmet_ns *ns;
346423
347
- rcu_read_lock();
348
- ns = __nvmet_find_namespace(ctrl, nsid);
424
+ ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
349425 if (ns)
350426 percpu_ref_get(&ns->ref);
351
- rcu_read_unlock();
352427
353428 return ns;
354429 }
....@@ -371,17 +446,121 @@
371446 nvmet_file_ns_disable(ns);
372447 }
373448
449
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
450
+{
451
+ int ret;
452
+ struct pci_dev *p2p_dev;
453
+
454
+ if (!ns->use_p2pmem)
455
+ return 0;
456
+
457
+ if (!ns->bdev) {
458
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
459
+ return -EINVAL;
460
+ }
461
+
462
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
463
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
464
+ ns->device_path);
465
+ return -EINVAL;
466
+ }
467
+
468
+ if (ns->p2p_dev) {
469
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
470
+ if (ret < 0)
471
+ return -EINVAL;
472
+ } else {
473
+ /*
474
+ * Right now we just check that there is p2pmem available so
475
+ * we can report an error to the user right away if there
476
+ * is not. We'll find the actual device to use once we
477
+ * setup the controller when the port's device is available.
478
+ */
479
+
480
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
481
+ if (!p2p_dev) {
482
+ pr_err("no peer-to-peer memory is available for %s\n",
483
+ ns->device_path);
484
+ return -EINVAL;
485
+ }
486
+
487
+ pci_dev_put(p2p_dev);
488
+ }
489
+
490
+ return 0;
491
+}
492
+
493
+/*
494
+ * Note: ctrl->subsys->lock should be held when calling this function
495
+ */
496
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
497
+ struct nvmet_ns *ns)
498
+{
499
+ struct device *clients[2];
500
+ struct pci_dev *p2p_dev;
501
+ int ret;
502
+
503
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
504
+ return;
505
+
506
+ if (ns->p2p_dev) {
507
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
508
+ if (ret < 0)
509
+ return;
510
+
511
+ p2p_dev = pci_dev_get(ns->p2p_dev);
512
+ } else {
513
+ clients[0] = ctrl->p2p_client;
514
+ clients[1] = nvmet_ns_dev(ns);
515
+
516
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
517
+ if (!p2p_dev) {
518
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
519
+ dev_name(ctrl->p2p_client), ns->device_path);
520
+ return;
521
+ }
522
+ }
523
+
524
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
525
+ if (ret < 0)
526
+ pci_dev_put(p2p_dev);
527
+
528
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
529
+ ns->nsid);
530
+}
531
+
532
+void nvmet_ns_revalidate(struct nvmet_ns *ns)
533
+{
534
+ loff_t oldsize = ns->size;
535
+
536
+ if (ns->bdev)
537
+ nvmet_bdev_ns_revalidate(ns);
538
+ else
539
+ nvmet_file_ns_revalidate(ns);
540
+
541
+ if (oldsize != ns->size)
542
+ nvmet_ns_changed(ns->subsys, ns->nsid);
543
+}
544
+
374545 int nvmet_ns_enable(struct nvmet_ns *ns)
375546 {
376547 struct nvmet_subsys *subsys = ns->subsys;
548
+ struct nvmet_ctrl *ctrl;
377549 int ret;
378550
379551 mutex_lock(&subsys->lock);
552
+ ret = 0;
553
+
554
+ if (nvmet_passthru_ctrl(subsys)) {
555
+ pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
556
+ goto out_unlock;
557
+ }
558
+
559
+ if (ns->enabled)
560
+ goto out_unlock;
561
+
380562 ret = -EMFILE;
381563 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
382
- goto out_unlock;
383
- ret = 0;
384
- if (ns->enabled)
385564 goto out_unlock;
386565
387566 ret = nvmet_bdev_ns_enable(ns);
....@@ -389,6 +568,13 @@
389568 ret = nvmet_file_ns_enable(ns);
390569 if (ret)
391570 goto out_unlock;
571
+
572
+ ret = nvmet_p2pmem_ns_enable(ns);
573
+ if (ret)
574
+ goto out_dev_disable;
575
+
576
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
577
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
392578
393579 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
394580 0, GFP_KERNEL);
....@@ -398,23 +584,10 @@
398584 if (ns->nsid > subsys->max_nsid)
399585 subsys->max_nsid = ns->nsid;
400586
401
- /*
402
- * The namespaces list needs to be sorted to simplify the implementation
403
- * of the Identify Namepace List subcommand.
404
- */
405
- if (list_empty(&subsys->namespaces)) {
406
- list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
407
- } else {
408
- struct nvmet_ns *old;
587
+ ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
588
+ if (ret)
589
+ goto out_restore_subsys_maxnsid;
409590
410
- list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
411
- BUG_ON(ns->nsid == old->nsid);
412
- if (ns->nsid < old->nsid)
413
- break;
414
- }
415
-
416
- list_add_tail_rcu(&ns->dev_link, &old->dev_link);
417
- }
418591 subsys->nr_namespaces++;
419592
420593 nvmet_ns_changed(subsys, ns->nsid);
....@@ -423,7 +596,14 @@
423596 out_unlock:
424597 mutex_unlock(&subsys->lock);
425598 return ret;
599
+
600
+out_restore_subsys_maxnsid:
601
+ subsys->max_nsid = nvmet_max_nsid(subsys);
602
+ percpu_ref_exit(&ns->ref);
426603 out_dev_put:
604
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
605
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
606
+out_dev_disable:
427607 nvmet_ns_dev_disable(ns);
428608 goto out_unlock;
429609 }
....@@ -431,15 +611,20 @@
431611 void nvmet_ns_disable(struct nvmet_ns *ns)
432612 {
433613 struct nvmet_subsys *subsys = ns->subsys;
614
+ struct nvmet_ctrl *ctrl;
434615
435616 mutex_lock(&subsys->lock);
436617 if (!ns->enabled)
437618 goto out_unlock;
438619
439620 ns->enabled = false;
440
- list_del_rcu(&ns->dev_link);
621
+ xa_erase(&ns->subsys->namespaces, ns->nsid);
441622 if (ns->nsid == subsys->max_nsid)
442623 subsys->max_nsid = nvmet_max_nsid(subsys);
624
+
625
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
626
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
627
+
443628 mutex_unlock(&subsys->lock);
444629
445630 /*
....@@ -456,6 +641,7 @@
456641 percpu_ref_exit(&ns->ref);
457642
458643 mutex_lock(&subsys->lock);
644
+
459645 subsys->nr_namespaces--;
460646 nvmet_ns_changed(subsys, ns->nsid);
461647 nvmet_ns_dev_disable(ns);
....@@ -483,7 +669,6 @@
483669 if (!ns)
484670 return NULL;
485671
486
- INIT_LIST_HEAD(&ns->dev_link);
487672 init_completion(&ns->disable_done);
488673
489674 ns->nsid = nsid;
....@@ -500,35 +685,74 @@
500685 return ns;
501686 }
502687
503
-static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
688
+static void nvmet_update_sq_head(struct nvmet_req *req)
504689 {
505
- u32 old_sqhd, new_sqhd;
506
- u16 sqhd;
507
-
508
- if (status)
509
- nvmet_set_status(req, status);
510
-
511690 if (req->sq->size) {
691
+ u32 old_sqhd, new_sqhd;
692
+
512693 do {
513694 old_sqhd = req->sq->sqhd;
514695 new_sqhd = (old_sqhd + 1) % req->sq->size;
515696 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
516697 old_sqhd);
517698 }
518
- sqhd = req->sq->sqhd & 0x0000FFFF;
519
- req->rsp->sq_head = cpu_to_le16(sqhd);
520
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
521
- req->rsp->command_id = req->cmd->common.command_id;
699
+ req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
700
+}
522701
523
- if (req->ns)
524
- nvmet_put_namespace(req->ns);
702
+static void nvmet_set_error(struct nvmet_req *req, u16 status)
703
+{
704
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
705
+ struct nvme_error_slot *new_error_slot;
706
+ unsigned long flags;
707
+
708
+ req->cqe->status = cpu_to_le16(status << 1);
709
+
710
+ if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
711
+ return;
712
+
713
+ spin_lock_irqsave(&ctrl->error_lock, flags);
714
+ ctrl->err_counter++;
715
+ new_error_slot =
716
+ &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
717
+
718
+ new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
719
+ new_error_slot->sqid = cpu_to_le16(req->sq->qid);
720
+ new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
721
+ new_error_slot->status_field = cpu_to_le16(status << 1);
722
+ new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
723
+ new_error_slot->lba = cpu_to_le64(req->error_slba);
724
+ new_error_slot->nsid = req->cmd->common.nsid;
725
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
726
+
727
+ /* set the more bit for this request */
728
+ req->cqe->status |= cpu_to_le16(1 << 14);
729
+}
730
+
731
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
732
+{
733
+ struct nvmet_ns *ns = req->ns;
734
+
735
+ if (!req->sq->sqhd_disabled)
736
+ nvmet_update_sq_head(req);
737
+ req->cqe->sq_id = cpu_to_le16(req->sq->qid);
738
+ req->cqe->command_id = req->cmd->common.command_id;
739
+
740
+ if (unlikely(status))
741
+ nvmet_set_error(req, status);
742
+
743
+ trace_nvmet_req_complete(req);
744
+
525745 req->ops->queue_response(req);
746
+ if (ns)
747
+ nvmet_put_namespace(ns);
526748 }
527749
528750 void nvmet_req_complete(struct nvmet_req *req, u16 status)
529751 {
752
+ struct nvmet_sq *sq = req->sq;
753
+
530754 __nvmet_req_complete(req, status);
531
- percpu_ref_put(&req->sq->ref);
755
+ percpu_ref_put(&sq->ref);
532756 }
533757 EXPORT_SYMBOL_GPL(nvmet_req_complete);
534758
....@@ -537,8 +761,6 @@
537761 {
538762 cq->qid = qid;
539763 cq->size = size;
540
-
541
- ctrl->cqs[qid] = cq;
542764 }
543765
544766 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
....@@ -560,19 +782,28 @@
560782
561783 void nvmet_sq_destroy(struct nvmet_sq *sq)
562784 {
785
+ struct nvmet_ctrl *ctrl = sq->ctrl;
786
+
563787 /*
564788 * If this is the admin queue, complete all AERs so that our
565789 * queue doesn't have outstanding requests on it.
566790 */
567
- if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
568
- nvmet_async_events_free(sq->ctrl);
791
+ if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
792
+ nvmet_async_events_failall(ctrl);
569793 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
570794 wait_for_completion(&sq->confirm_done);
571795 wait_for_completion(&sq->free_done);
572796 percpu_ref_exit(&sq->ref);
573797
574
- if (sq->ctrl) {
575
- nvmet_ctrl_put(sq->ctrl);
798
+ if (ctrl) {
799
+ /*
800
+ * The teardown flow may take some time, and the host may not
801
+ * send us keep-alive during this period, hence reset the
802
+ * traffic based keep-alive timer so we don't trigger a
803
+ * controller teardown as a result of a keep-alive expiration.
804
+ */
805
+ ctrl->reset_tbkas = true;
806
+ nvmet_ctrl_put(ctrl);
576807 sq->ctrl = NULL; /* allows reusing the queue later */
577808 }
578809 }
....@@ -639,15 +870,24 @@
639870 if (unlikely(ret))
640871 return ret;
641872
873
+ if (nvmet_req_passthru_ctrl(req))
874
+ return nvmet_parse_passthru_io_cmd(req);
875
+
642876 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
643
- if (unlikely(!req->ns))
877
+ if (unlikely(!req->ns)) {
878
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
644879 return NVME_SC_INVALID_NS | NVME_SC_DNR;
880
+ }
645881 ret = nvmet_check_ana_state(req->port, req->ns);
646
- if (unlikely(ret))
882
+ if (unlikely(ret)) {
883
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
647884 return ret;
885
+ }
648886 ret = nvmet_io_cmd_check_access(req);
649
- if (unlikely(ret))
887
+ if (unlikely(ret)) {
888
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
650889 return ret;
890
+ }
651891
652892 if (req->ns->file)
653893 return nvmet_file_parse_io_cmd(req);
....@@ -665,13 +905,20 @@
665905 req->sq = sq;
666906 req->ops = ops;
667907 req->sg = NULL;
908
+ req->metadata_sg = NULL;
668909 req->sg_cnt = 0;
910
+ req->metadata_sg_cnt = 0;
669911 req->transfer_len = 0;
670
- req->rsp->status = 0;
912
+ req->metadata_len = 0;
913
+ req->cqe->status = 0;
914
+ req->cqe->sq_head = 0;
671915 req->ns = NULL;
916
+ req->error_loc = NVMET_NO_ERROR_LOC;
917
+ req->error_slba = 0;
672918
673919 /* no support for fused commands yet */
674920 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
921
+ req->error_loc = offsetof(struct nvme_common_command, flags);
675922 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
676923 goto fail;
677924 }
....@@ -682,29 +929,31 @@
682929 * byte aligned.
683930 */
684931 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
932
+ req->error_loc = offsetof(struct nvme_common_command, flags);
685933 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
686934 goto fail;
687935 }
688936
689937 if (unlikely(!req->sq->ctrl))
690
- /* will return an error for any Non-connect command: */
938
+ /* will return an error for any non-connect command: */
691939 status = nvmet_parse_connect_cmd(req);
692940 else if (likely(req->sq->qid != 0))
693941 status = nvmet_parse_io_cmd(req);
694
- else if (req->cmd->common.opcode == nvme_fabrics_command)
695
- status = nvmet_parse_fabrics_cmd(req);
696
- else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
697
- status = nvmet_parse_discovery_cmd(req);
698942 else
699943 status = nvmet_parse_admin_cmd(req);
700944
701945 if (status)
702946 goto fail;
703947
948
+ trace_nvmet_req_init(req, req->cmd);
949
+
704950 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
705951 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
706952 goto fail;
707953 }
954
+
955
+ if (sq->ctrl)
956
+ sq->ctrl->reset_tbkas = true;
708957
709958 return true;
710959
....@@ -722,14 +971,112 @@
722971 }
723972 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
724973
725
-void nvmet_req_execute(struct nvmet_req *req)
974
+bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
726975 {
727
- if (unlikely(req->data_len != req->transfer_len))
976
+ if (unlikely(len != req->transfer_len)) {
977
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
728978 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
729
- else
730
- req->execute(req);
979
+ return false;
980
+ }
981
+
982
+ return true;
731983 }
732
-EXPORT_SYMBOL_GPL(nvmet_req_execute);
984
+EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
985
+
986
+bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
987
+{
988
+ if (unlikely(data_len > req->transfer_len)) {
989
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
990
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
991
+ return false;
992
+ }
993
+
994
+ return true;
995
+}
996
+
997
+static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
998
+{
999
+ return req->transfer_len - req->metadata_len;
1000
+}
1001
+
1002
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1003
+ struct nvmet_req *req)
1004
+{
1005
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1006
+ nvmet_data_transfer_len(req));
1007
+ if (!req->sg)
1008
+ goto out_err;
1009
+
1010
+ if (req->metadata_len) {
1011
+ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1012
+ &req->metadata_sg_cnt, req->metadata_len);
1013
+ if (!req->metadata_sg)
1014
+ goto out_free_sg;
1015
+ }
1016
+
1017
+ req->p2p_dev = p2p_dev;
1018
+
1019
+ return 0;
1020
+out_free_sg:
1021
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1022
+out_err:
1023
+ return -ENOMEM;
1024
+}
1025
+
1026
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1027
+{
1028
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1029
+ !req->sq->ctrl || !req->sq->qid || !req->ns)
1030
+ return NULL;
1031
+ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1032
+}
1033
+
1034
+int nvmet_req_alloc_sgls(struct nvmet_req *req)
1035
+{
1036
+ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1037
+
1038
+ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1039
+ return 0;
1040
+
1041
+ req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1042
+ &req->sg_cnt);
1043
+ if (unlikely(!req->sg))
1044
+ goto out;
1045
+
1046
+ if (req->metadata_len) {
1047
+ req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1048
+ &req->metadata_sg_cnt);
1049
+ if (unlikely(!req->metadata_sg))
1050
+ goto out_free;
1051
+ }
1052
+
1053
+ return 0;
1054
+out_free:
1055
+ sgl_free(req->sg);
1056
+out:
1057
+ return -ENOMEM;
1058
+}
1059
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1060
+
1061
+void nvmet_req_free_sgls(struct nvmet_req *req)
1062
+{
1063
+ if (req->p2p_dev) {
1064
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1065
+ if (req->metadata_sg)
1066
+ pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1067
+ req->p2p_dev = NULL;
1068
+ } else {
1069
+ sgl_free(req->sg);
1070
+ if (req->metadata_sg)
1071
+ sgl_free(req->metadata_sg);
1072
+ }
1073
+
1074
+ req->sg = NULL;
1075
+ req->metadata_sg = NULL;
1076
+ req->sg_cnt = 0;
1077
+ req->metadata_sg_cnt = 0;
1078
+}
1079
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
7331080
7341081 static inline bool nvmet_cc_en(u32 cc)
7351082 {
....@@ -853,7 +1200,7 @@
8531200 if (!subsys) {
8541201 pr_warn("connect request for invalid subsystem %s!\n",
8551202 subsysnqn);
856
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1203
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
8571204 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
8581205 }
8591206
....@@ -874,7 +1221,7 @@
8741221
8751222 pr_warn("could not find controller %d for subsys %s / host %s\n",
8761223 cntlid, subsysnqn, hostnqn);
877
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1224
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
8781225 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
8791226
8801227 out:
....@@ -899,12 +1246,16 @@
8991246 return 0;
9001247 }
9011248
902
-static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
903
- const char *hostnqn)
1249
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
9041250 {
9051251 struct nvmet_host_link *p;
9061252
1253
+ lockdep_assert_held(&nvmet_config_sem);
1254
+
9071255 if (subsys->allow_any_host)
1256
+ return true;
1257
+
1258
+ if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
9081259 return true;
9091260
9101261 list_for_each_entry(p, &subsys->hosts, entry) {
....@@ -915,28 +1266,36 @@
9151266 return false;
9161267 }
9171268
918
-static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
919
- const char *hostnqn)
1269
+/*
1270
+ * Note: ctrl->subsys->lock should be held when calling this function
1271
+ */
1272
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1273
+ struct nvmet_req *req)
9201274 {
921
- struct nvmet_subsys_link *s;
1275
+ struct nvmet_ns *ns;
1276
+ unsigned long idx;
9221277
923
- list_for_each_entry(s, &req->port->subsystems, entry) {
924
- if (__nvmet_host_allowed(s->subsys, hostnqn))
925
- return true;
926
- }
1278
+ if (!req->p2p_client)
1279
+ return;
9271280
928
- return false;
1281
+ ctrl->p2p_client = get_device(req->p2p_client);
1282
+
1283
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1284
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
9291285 }
9301286
931
-bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
932
- const char *hostnqn)
1287
+/*
1288
+ * Note: ctrl->subsys->lock should be held when calling this function
1289
+ */
1290
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
9331291 {
934
- lockdep_assert_held(&nvmet_config_sem);
1292
+ struct radix_tree_iter iter;
1293
+ void __rcu **slot;
9351294
936
- if (subsys->type == NVME_NQN_DISC)
937
- return nvmet_host_discovery_allowed(req, hostnqn);
938
- else
939
- return __nvmet_host_allowed(subsys, hostnqn);
1295
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1296
+ pci_dev_put(radix_tree_deref_slot(slot));
1297
+
1298
+ put_device(ctrl->p2p_client);
9401299 }
9411300
9421301 static void nvmet_fatal_error_handler(struct work_struct *work)
....@@ -961,16 +1320,16 @@
9611320 if (!subsys) {
9621321 pr_warn("connect request for invalid subsystem %s!\n",
9631322 subsysnqn);
964
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1323
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
9651324 goto out;
9661325 }
9671326
9681327 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
9691328 down_read(&nvmet_config_sem);
970
- if (!nvmet_host_allowed(req, subsys, hostnqn)) {
1329
+ if (!nvmet_host_allowed(subsys, hostnqn)) {
9711330 pr_info("connect by host %s for subsystem %s not allowed\n",
9721331 hostnqn, subsysnqn);
973
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1332
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
9741333 up_read(&nvmet_config_sem);
9751334 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
9761335 goto out_put_subsystem;
....@@ -989,6 +1348,7 @@
9891348
9901349 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
9911350 INIT_LIST_HEAD(&ctrl->async_events);
1351
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
9921352 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
9931353
9941354 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
....@@ -1003,20 +1363,17 @@
10031363 if (!ctrl->changed_ns_list)
10041364 goto out_free_ctrl;
10051365
1006
- ctrl->cqs = kcalloc(subsys->max_qid + 1,
1007
- sizeof(struct nvmet_cq *),
1008
- GFP_KERNEL);
1009
- if (!ctrl->cqs)
1010
- goto out_free_changed_ns_list;
1011
-
10121366 ctrl->sqs = kcalloc(subsys->max_qid + 1,
10131367 sizeof(struct nvmet_sq *),
10141368 GFP_KERNEL);
10151369 if (!ctrl->sqs)
1016
- goto out_free_cqs;
1370
+ goto out_free_changed_ns_list;
1371
+
1372
+ if (subsys->cntlid_min > subsys->cntlid_max)
1373
+ goto out_free_sqs;
10171374
10181375 ret = ida_simple_get(&cntlid_ida,
1019
- NVME_CNTLID_MIN, NVME_CNTLID_MAX,
1376
+ subsys->cntlid_min, subsys->cntlid_max,
10201377 GFP_KERNEL);
10211378 if (ret < 0) {
10221379 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
....@@ -1025,46 +1382,32 @@
10251382 ctrl->cntlid = ret;
10261383
10271384 ctrl->ops = req->ops;
1028
- if (ctrl->subsys->type == NVME_NQN_DISC) {
1029
- /* Don't accept keep-alive timeout for discovery controllers */
1030
- if (kato) {
1031
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
1032
- goto out_remove_ida;
1033
- }
10341385
1035
- /*
1036
- * Discovery controllers use some arbitrary high value in order
1037
- * to cleanup stale discovery sessions
1038
- *
1039
- * From the latest base diff RC:
1040
- * "The Keep Alive command is not supported by
1041
- * Discovery controllers. A transport may specify a
1042
- * fixed Discovery controller activity timeout value
1043
- * (e.g., 2 minutes). If no commands are received
1044
- * by a Discovery controller within that time
1045
- * period, the controller may perform the
1046
- * actions for Keep Alive Timer expiration".
1047
- */
1048
- ctrl->kato = NVMET_DISC_KATO;
1049
- } else {
1050
- /* keep-alive timeout in seconds */
1051
- ctrl->kato = DIV_ROUND_UP(kato, 1000);
1052
- }
1386
+ /*
1387
+ * Discovery controllers may use some arbitrary high value
1388
+ * in order to cleanup stale discovery sessions
1389
+ */
1390
+ if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1391
+ kato = NVMET_DISC_KATO_MS;
1392
+
1393
+ /* keep-alive timeout in seconds */
1394
+ ctrl->kato = DIV_ROUND_UP(kato, 1000);
1395
+
1396
+ ctrl->err_counter = 0;
1397
+ spin_lock_init(&ctrl->error_lock);
1398
+
10531399 nvmet_start_keep_alive_timer(ctrl);
10541400
10551401 mutex_lock(&subsys->lock);
10561402 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1403
+ nvmet_setup_p2p_ns_map(ctrl, req);
10571404 mutex_unlock(&subsys->lock);
10581405
10591406 *ctrlp = ctrl;
10601407 return 0;
10611408
1062
-out_remove_ida:
1063
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
10641409 out_free_sqs:
10651410 kfree(ctrl->sqs);
1066
-out_free_cqs:
1067
- kfree(ctrl->cqs);
10681411 out_free_changed_ns_list:
10691412 kfree(ctrl->changed_ns_list);
10701413 out_free_ctrl:
....@@ -1081,6 +1424,7 @@
10811424 struct nvmet_subsys *subsys = ctrl->subsys;
10821425
10831426 mutex_lock(&subsys->lock);
1427
+ nvmet_release_p2p_ns_map(ctrl);
10841428 list_del(&ctrl->subsys_entry);
10851429 mutex_unlock(&subsys->lock);
10861430
....@@ -1091,8 +1435,8 @@
10911435
10921436 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
10931437
1438
+ nvmet_async_events_free(ctrl);
10941439 kfree(ctrl->sqs);
1095
- kfree(ctrl->cqs);
10961440 kfree(ctrl->changed_ns_list);
10971441 kfree(ctrl);
10981442
....@@ -1123,8 +1467,7 @@
11231467 if (!port)
11241468 return NULL;
11251469
1126
- if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
1127
- NVMF_NQN_SIZE)) {
1470
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
11281471 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
11291472 return NULL;
11301473 return nvmet_disc_subsys;
....@@ -1151,9 +1494,9 @@
11511494
11521495 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
11531496 if (!subsys)
1154
- return NULL;
1497
+ return ERR_PTR(-ENOMEM);
11551498
1156
- subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1499
+ subsys->ver = NVMET_DEFAULT_VS;
11571500 /* generate a random serial number as our controllers are ephemeral: */
11581501 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
11591502
....@@ -1167,20 +1510,21 @@
11671510 default:
11681511 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
11691512 kfree(subsys);
1170
- return NULL;
1513
+ return ERR_PTR(-EINVAL);
11711514 }
11721515 subsys->type = type;
11731516 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
11741517 GFP_KERNEL);
11751518 if (!subsys->subsysnqn) {
11761519 kfree(subsys);
1177
- return NULL;
1520
+ return ERR_PTR(-ENOMEM);
11781521 }
1179
-
1522
+ subsys->cntlid_min = NVME_CNTLID_MIN;
1523
+ subsys->cntlid_max = NVME_CNTLID_MAX;
11801524 kref_init(&subsys->ref);
11811525
11821526 mutex_init(&subsys->lock);
1183
- INIT_LIST_HEAD(&subsys->namespaces);
1527
+ xa_init(&subsys->namespaces);
11841528 INIT_LIST_HEAD(&subsys->ctrls);
11851529 INIT_LIST_HEAD(&subsys->hosts);
11861530
....@@ -1192,9 +1536,13 @@
11921536 struct nvmet_subsys *subsys =
11931537 container_of(ref, struct nvmet_subsys, ref);
11941538
1195
- WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1539
+ WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1540
+
1541
+ xa_destroy(&subsys->namespaces);
1542
+ nvmet_passthru_subsys_free(subsys);
11961543
11971544 kfree(subsys->subsysnqn);
1545
+ kfree_rcu(subsys->model, rcuhead);
11981546 kfree(subsys);
11991547 }
12001548
....@@ -1258,3 +1606,4 @@
12581606 module_exit(nvmet_exit);
12591607
12601608 MODULE_LICENSE("GPL v2");
1609
+MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);