forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/nvme/target/configfs.c
....@@ -1,15 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Configfs interface for the NVMe target.
34 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
135 */
146 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
157 #include <linux/kernel.h>
....@@ -17,66 +9,82 @@
179 #include <linux/slab.h>
1810 #include <linux/stat.h>
1911 #include <linux/ctype.h>
12
+#include <linux/pci.h>
13
+#include <linux/pci-p2pdma.h>
2014
2115 #include "nvmet.h"
2216
2317 static const struct config_item_type nvmet_host_type;
2418 static const struct config_item_type nvmet_subsys_type;
2519
26
-static const struct nvmet_transport_name {
20
+static LIST_HEAD(nvmet_ports_list);
21
+struct list_head *nvmet_ports = &nvmet_ports_list;
22
+
23
+struct nvmet_type_name_map {
2724 u8 type;
2825 const char *name;
29
-} nvmet_transport_names[] = {
26
+};
27
+
28
+static struct nvmet_type_name_map nvmet_transport[] = {
3029 { NVMF_TRTYPE_RDMA, "rdma" },
3130 { NVMF_TRTYPE_FC, "fc" },
31
+ { NVMF_TRTYPE_TCP, "tcp" },
3232 { NVMF_TRTYPE_LOOP, "loop" },
3333 };
34
+
35
+static const struct nvmet_type_name_map nvmet_addr_family[] = {
36
+ { NVMF_ADDR_FAMILY_PCI, "pcie" },
37
+ { NVMF_ADDR_FAMILY_IP4, "ipv4" },
38
+ { NVMF_ADDR_FAMILY_IP6, "ipv6" },
39
+ { NVMF_ADDR_FAMILY_IB, "ib" },
40
+ { NVMF_ADDR_FAMILY_FC, "fc" },
41
+ { NVMF_ADDR_FAMILY_LOOP, "loop" },
42
+};
43
+
44
+static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
45
+{
46
+ if (p->enabled)
47
+ pr_err("Disable port '%u' before changing attribute in %s\n",
48
+ le16_to_cpu(p->disc_addr.portid), caller);
49
+ return p->enabled;
50
+}
3451
3552 /*
3653 * nvmet_port Generic ConfigFS definitions.
3754 * Used in any place in the ConfigFS tree that refers to an address.
3855 */
39
-static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
40
- char *page)
56
+static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
4157 {
42
- switch (to_nvmet_port(item)->disc_addr.adrfam) {
43
- case NVMF_ADDR_FAMILY_IP4:
44
- return sprintf(page, "ipv4\n");
45
- case NVMF_ADDR_FAMILY_IP6:
46
- return sprintf(page, "ipv6\n");
47
- case NVMF_ADDR_FAMILY_IB:
48
- return sprintf(page, "ib\n");
49
- case NVMF_ADDR_FAMILY_FC:
50
- return sprintf(page, "fc\n");
51
- default:
52
- return sprintf(page, "\n");
58
+ u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
59
+ int i;
60
+
61
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
62
+ if (nvmet_addr_family[i].type == adrfam)
63
+ return sprintf(page, "%s\n", nvmet_addr_family[i].name);
5364 }
65
+
66
+ return sprintf(page, "\n");
5467 }
5568
5669 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
5770 const char *page, size_t count)
5871 {
5972 struct nvmet_port *port = to_nvmet_port(item);
73
+ int i;
6074
61
- if (port->enabled) {
62
- pr_err("Cannot modify address while enabled\n");
63
- pr_err("Disable the address before modifying\n");
75
+ if (nvmet_is_port_enabled(port, __func__))
6476 return -EACCES;
77
+
78
+ for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
79
+ if (sysfs_streq(page, nvmet_addr_family[i].name))
80
+ goto found;
6581 }
6682
67
- if (sysfs_streq(page, "ipv4")) {
68
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP4;
69
- } else if (sysfs_streq(page, "ipv6")) {
70
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
71
- } else if (sysfs_streq(page, "ib")) {
72
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
73
- } else if (sysfs_streq(page, "fc")) {
74
- port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
75
- } else {
76
- pr_err("Invalid value '%s' for adrfam\n", page);
77
- return -EINVAL;
78
- }
83
+ pr_err("Invalid value '%s' for adrfam\n", page);
84
+ return -EINVAL;
7985
86
+found:
87
+ port->disc_addr.adrfam = nvmet_addr_family[i].type;
8088 return count;
8189 }
8290
....@@ -102,11 +110,9 @@
102110 return -EINVAL;
103111 }
104112
105
- if (port->enabled) {
106
- pr_err("Cannot modify address while enabled\n");
107
- pr_err("Disable the address before modifying\n");
113
+ if (nvmet_is_port_enabled(port, __func__))
108114 return -EACCES;
109
- }
115
+
110116 port->disc_addr.portid = cpu_to_le16(portid);
111117 return count;
112118 }
....@@ -132,11 +138,8 @@
132138 return -EINVAL;
133139 }
134140
135
- if (port->enabled) {
136
- pr_err("Cannot modify address while enabled\n");
137
- pr_err("Disable the address before modifying\n");
141
+ if (nvmet_is_port_enabled(port, __func__))
138142 return -EACCES;
139
- }
140143
141144 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
142145 return -EINVAL;
....@@ -145,43 +148,47 @@
145148
146149 CONFIGFS_ATTR(nvmet_, addr_traddr);
147150
148
-static ssize_t nvmet_addr_treq_show(struct config_item *item,
149
- char *page)
151
+static const struct nvmet_type_name_map nvmet_addr_treq[] = {
152
+ { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
153
+ { NVMF_TREQ_REQUIRED, "required" },
154
+ { NVMF_TREQ_NOT_REQUIRED, "not required" },
155
+};
156
+
157
+static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
150158 {
151
- switch (to_nvmet_port(item)->disc_addr.treq) {
152
- case NVMF_TREQ_NOT_SPECIFIED:
153
- return sprintf(page, "not specified\n");
154
- case NVMF_TREQ_REQUIRED:
155
- return sprintf(page, "required\n");
156
- case NVMF_TREQ_NOT_REQUIRED:
157
- return sprintf(page, "not required\n");
158
- default:
159
- return sprintf(page, "\n");
159
+ u8 treq = to_nvmet_port(item)->disc_addr.treq &
160
+ NVME_TREQ_SECURE_CHANNEL_MASK;
161
+ int i;
162
+
163
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
164
+ if (treq == nvmet_addr_treq[i].type)
165
+ return sprintf(page, "%s\n", nvmet_addr_treq[i].name);
160166 }
167
+
168
+ return sprintf(page, "\n");
161169 }
162170
163171 static ssize_t nvmet_addr_treq_store(struct config_item *item,
164172 const char *page, size_t count)
165173 {
166174 struct nvmet_port *port = to_nvmet_port(item);
175
+ u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
176
+ int i;
167177
168
- if (port->enabled) {
169
- pr_err("Cannot modify address while enabled\n");
170
- pr_err("Disable the address before modifying\n");
178
+ if (nvmet_is_port_enabled(port, __func__))
171179 return -EACCES;
180
+
181
+ for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
182
+ if (sysfs_streq(page, nvmet_addr_treq[i].name))
183
+ goto found;
172184 }
173185
174
- if (sysfs_streq(page, "not specified")) {
175
- port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED;
176
- } else if (sysfs_streq(page, "required")) {
177
- port->disc_addr.treq = NVMF_TREQ_REQUIRED;
178
- } else if (sysfs_streq(page, "not required")) {
179
- port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED;
180
- } else {
181
- pr_err("Invalid value '%s' for treq\n", page);
182
- return -EINVAL;
183
- }
186
+ pr_err("Invalid value '%s' for treq\n", page);
187
+ return -EINVAL;
184188
189
+found:
190
+ treq |= nvmet_addr_treq[i].type;
191
+ port->disc_addr.treq = treq;
185192 return count;
186193 }
187194
....@@ -205,11 +212,8 @@
205212 pr_err("Invalid value '%s' for trsvcid\n", page);
206213 return -EINVAL;
207214 }
208
- if (port->enabled) {
209
- pr_err("Cannot modify address while enabled\n");
210
- pr_err("Disable the address before modifying\n");
215
+ if (nvmet_is_port_enabled(port, __func__))
211216 return -EACCES;
212
- }
213217
214218 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
215219 return -EINVAL;
....@@ -232,11 +236,8 @@
232236 struct nvmet_port *port = to_nvmet_port(item);
233237 int ret;
234238
235
- if (port->enabled) {
236
- pr_err("Cannot modify inline_data_size while port enabled\n");
237
- pr_err("Disable the port before modifying\n");
239
+ if (nvmet_is_port_enabled(port, __func__))
238240 return -EACCES;
239
- }
240241 ret = kstrtoint(page, 0, &port->inline_data_size);
241242 if (ret) {
242243 pr_err("Invalid value '%s' for inline_data_size\n", page);
....@@ -247,16 +248,45 @@
247248
248249 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
249250
251
+#ifdef CONFIG_BLK_DEV_INTEGRITY
252
+static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
253
+ char *page)
254
+{
255
+ struct nvmet_port *port = to_nvmet_port(item);
256
+
257
+ return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
258
+}
259
+
260
+static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
261
+ const char *page, size_t count)
262
+{
263
+ struct nvmet_port *port = to_nvmet_port(item);
264
+ bool val;
265
+
266
+ if (strtobool(page, &val))
267
+ return -EINVAL;
268
+
269
+ if (port->enabled) {
270
+ pr_err("Disable port before setting pi_enable value.\n");
271
+ return -EACCES;
272
+ }
273
+
274
+ port->pi_enable = val;
275
+ return count;
276
+}
277
+
278
+CONFIGFS_ATTR(nvmet_, param_pi_enable);
279
+#endif
280
+
250281 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
251282 char *page)
252283 {
253284 struct nvmet_port *port = to_nvmet_port(item);
254285 int i;
255286
256
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
257
- if (port->disc_addr.trtype != nvmet_transport_names[i].type)
258
- continue;
259
- return sprintf(page, "%s\n", nvmet_transport_names[i].name);
287
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
288
+ if (port->disc_addr.trtype == nvmet_transport[i].type)
289
+ return sprintf(page, "%s\n", nvmet_transport[i].name);
260290 }
261291
262292 return sprintf(page, "\n");
....@@ -275,22 +305,20 @@
275305 struct nvmet_port *port = to_nvmet_port(item);
276306 int i;
277307
278
- if (port->enabled) {
279
- pr_err("Cannot modify address while enabled\n");
280
- pr_err("Disable the address before modifying\n");
308
+ if (nvmet_is_port_enabled(port, __func__))
281309 return -EACCES;
282
- }
283310
284
- for (i = 0; i < ARRAY_SIZE(nvmet_transport_names); i++) {
285
- if (sysfs_streq(page, nvmet_transport_names[i].name))
311
+ for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
312
+ if (sysfs_streq(page, nvmet_transport[i].name))
286313 goto found;
287314 }
288315
289316 pr_err("Invalid value '%s' for trtype\n", page);
290317 return -EINVAL;
318
+
291319 found:
292320 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
293
- port->disc_addr.trtype = nvmet_transport_names[i].type;
321
+ port->disc_addr.trtype = nvmet_transport[i].type;
294322 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
295323 nvmet_port_init_tsas_rdma(port);
296324 return count;
....@@ -326,7 +354,7 @@
326354
327355 kfree(ns->device_path);
328356 ret = -ENOMEM;
329
- ns->device_path = kstrndup(page, len, GFP_KERNEL);
357
+ ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
330358 if (!ns->device_path)
331359 goto out_unlock;
332360
....@@ -340,6 +368,48 @@
340368
341369 CONFIGFS_ATTR(nvmet_ns_, device_path);
342370
371
+#ifdef CONFIG_PCI_P2PDMA
372
+static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
373
+{
374
+ struct nvmet_ns *ns = to_nvmet_ns(item);
375
+
376
+ return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
377
+}
378
+
379
+static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
380
+ const char *page, size_t count)
381
+{
382
+ struct nvmet_ns *ns = to_nvmet_ns(item);
383
+ struct pci_dev *p2p_dev = NULL;
384
+ bool use_p2pmem;
385
+ int ret = count;
386
+ int error;
387
+
388
+ mutex_lock(&ns->subsys->lock);
389
+ if (ns->enabled) {
390
+ ret = -EBUSY;
391
+ goto out_unlock;
392
+ }
393
+
394
+ error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
395
+ if (error) {
396
+ ret = error;
397
+ goto out_unlock;
398
+ }
399
+
400
+ ns->use_p2pmem = use_p2pmem;
401
+ pci_dev_put(ns->p2p_dev);
402
+ ns->p2p_dev = p2p_dev;
403
+
404
+out_unlock:
405
+ mutex_unlock(&ns->subsys->lock);
406
+
407
+ return ret;
408
+}
409
+
410
+CONFIGFS_ATTR(nvmet_ns_, p2pmem);
411
+#endif /* CONFIG_PCI_P2PDMA */
412
+
343413 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
344414 {
345415 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
....@@ -352,13 +422,11 @@
352422 struct nvmet_subsys *subsys = ns->subsys;
353423 int ret = 0;
354424
355
-
356425 mutex_lock(&subsys->lock);
357426 if (ns->enabled) {
358427 ret = -EBUSY;
359428 goto out_unlock;
360429 }
361
-
362430
363431 if (uuid_parse(page, &ns->uuid))
364432 ret = -EINVAL;
....@@ -502,6 +570,31 @@
502570
503571 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
504572
573
+static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
574
+ const char *page, size_t count)
575
+{
576
+ struct nvmet_ns *ns = to_nvmet_ns(item);
577
+ bool val;
578
+
579
+ if (strtobool(page, &val))
580
+ return -EINVAL;
581
+
582
+ if (!val)
583
+ return -EINVAL;
584
+
585
+ mutex_lock(&ns->subsys->lock);
586
+ if (!ns->enabled) {
587
+ pr_err("enable ns before revalidate.\n");
588
+ mutex_unlock(&ns->subsys->lock);
589
+ return -EINVAL;
590
+ }
591
+ nvmet_ns_revalidate(ns);
592
+ mutex_unlock(&ns->subsys->lock);
593
+ return count;
594
+}
595
+
596
+CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
597
+
505598 static struct configfs_attribute *nvmet_ns_attrs[] = {
506599 &nvmet_ns_attr_device_path,
507600 &nvmet_ns_attr_device_nguid,
....@@ -509,6 +602,10 @@
509602 &nvmet_ns_attr_ana_grpid,
510603 &nvmet_ns_attr_enable,
511604 &nvmet_ns_attr_buffered_io,
605
+ &nvmet_ns_attr_revalidate_size,
606
+#ifdef CONFIG_PCI_P2PDMA
607
+ &nvmet_ns_attr_p2pmem,
608
+#endif
512609 NULL,
513610 };
514611
....@@ -542,8 +639,10 @@
542639 goto out;
543640
544641 ret = -EINVAL;
545
- if (nsid == 0 || nsid == NVME_NSID_ALL)
642
+ if (nsid == 0 || nsid == NVME_NSID_ALL) {
643
+ pr_err("invalid nsid %#x", nsid);
546644 goto out;
645
+ }
547646
548647 ret = -ENOMEM;
549648 ns = nvmet_ns_alloc(subsys, nsid);
....@@ -566,6 +665,103 @@
566665 .ct_group_ops = &nvmet_namespaces_group_ops,
567666 .ct_owner = THIS_MODULE,
568667 };
668
+
669
+#ifdef CONFIG_NVME_TARGET_PASSTHRU
670
+
671
+static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
672
+ char *page)
673
+{
674
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
675
+
676
+ return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
677
+}
678
+
679
+static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
680
+ const char *page, size_t count)
681
+{
682
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
683
+ size_t len;
684
+ int ret;
685
+
686
+ mutex_lock(&subsys->lock);
687
+
688
+ ret = -EBUSY;
689
+ if (subsys->passthru_ctrl)
690
+ goto out_unlock;
691
+
692
+ ret = -EINVAL;
693
+ len = strcspn(page, "\n");
694
+ if (!len)
695
+ goto out_unlock;
696
+
697
+ kfree(subsys->passthru_ctrl_path);
698
+ ret = -ENOMEM;
699
+ subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
700
+ if (!subsys->passthru_ctrl_path)
701
+ goto out_unlock;
702
+
703
+ mutex_unlock(&subsys->lock);
704
+
705
+ return count;
706
+out_unlock:
707
+ mutex_unlock(&subsys->lock);
708
+ return ret;
709
+}
710
+CONFIGFS_ATTR(nvmet_passthru_, device_path);
711
+
712
+static ssize_t nvmet_passthru_enable_show(struct config_item *item,
713
+ char *page)
714
+{
715
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
716
+
717
+ return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
718
+}
719
+
720
+static ssize_t nvmet_passthru_enable_store(struct config_item *item,
721
+ const char *page, size_t count)
722
+{
723
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
724
+ bool enable;
725
+ int ret = 0;
726
+
727
+ if (strtobool(page, &enable))
728
+ return -EINVAL;
729
+
730
+ if (enable)
731
+ ret = nvmet_passthru_ctrl_enable(subsys);
732
+ else
733
+ nvmet_passthru_ctrl_disable(subsys);
734
+
735
+ return ret ? ret : count;
736
+}
737
+CONFIGFS_ATTR(nvmet_passthru_, enable);
738
+
739
+static struct configfs_attribute *nvmet_passthru_attrs[] = {
740
+ &nvmet_passthru_attr_device_path,
741
+ &nvmet_passthru_attr_enable,
742
+ NULL,
743
+};
744
+
745
+static const struct config_item_type nvmet_passthru_type = {
746
+ .ct_attrs = nvmet_passthru_attrs,
747
+ .ct_owner = THIS_MODULE,
748
+};
749
+
750
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
751
+{
752
+ config_group_init_type_name(&subsys->passthru_group,
753
+ "passthru", &nvmet_passthru_type);
754
+ configfs_add_default_group(&subsys->passthru_group,
755
+ &subsys->group);
756
+}
757
+
758
+#else /* CONFIG_NVME_TARGET_PASSTHRU */
759
+
760
+static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
761
+{
762
+}
763
+
764
+#endif /* CONFIG_NVME_TARGET_PASSTHRU */
569765
570766 static int nvmet_port_subsys_allow_link(struct config_item *parent,
571767 struct config_item *target)
....@@ -599,7 +795,8 @@
599795 }
600796
601797 list_add_tail(&link->entry, &port->subsystems);
602
- nvmet_genctr++;
798
+ nvmet_port_disc_changed(port, subsys);
799
+
603800 up_write(&nvmet_config_sem);
604801 return 0;
605802
....@@ -626,7 +823,9 @@
626823
627824 found:
628825 list_del(&p->entry);
629
- nvmet_genctr++;
826
+ nvmet_port_del_ctrls(port, subsys);
827
+ nvmet_port_disc_changed(port, subsys);
828
+
630829 if (list_empty(&port->subsystems))
631830 nvmet_disable_port(port);
632831 up_write(&nvmet_config_sem);
....@@ -675,7 +874,8 @@
675874 goto out_free_link;
676875 }
677876 list_add_tail(&link->entry, &subsys->hosts);
678
- nvmet_genctr++;
877
+ nvmet_subsys_disc_changed(subsys, host);
878
+
679879 up_write(&nvmet_config_sem);
680880 return 0;
681881 out_free_link:
....@@ -701,7 +901,8 @@
701901
702902 found:
703903 list_del(&p->entry);
704
- nvmet_genctr++;
904
+ nvmet_subsys_disc_changed(subsys, host);
905
+
705906 up_write(&nvmet_config_sem);
706907 kfree(p);
707908 }
....@@ -740,7 +941,11 @@
740941 goto out_unlock;
741942 }
742943
743
- subsys->allow_any_host = allow_any_host;
944
+ if (subsys->allow_any_host != allow_any_host) {
945
+ subsys->allow_any_host = allow_any_host;
946
+ nvmet_subsys_disc_changed(subsys, NULL);
947
+ }
948
+
744949 out_unlock:
745950 up_write(&nvmet_config_sem);
746951 return ret ? ret : count;
....@@ -754,14 +959,14 @@
754959 struct nvmet_subsys *subsys = to_subsys(item);
755960
756961 if (NVME_TERTIARY(subsys->ver))
757
- return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
758
- (int)NVME_MAJOR(subsys->ver),
759
- (int)NVME_MINOR(subsys->ver),
760
- (int)NVME_TERTIARY(subsys->ver));
761
- else
762
- return snprintf(page, PAGE_SIZE, "%d.%d\n",
763
- (int)NVME_MAJOR(subsys->ver),
764
- (int)NVME_MINOR(subsys->ver));
962
+ return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
963
+ NVME_MAJOR(subsys->ver),
964
+ NVME_MINOR(subsys->ver),
965
+ NVME_TERTIARY(subsys->ver));
966
+
967
+ return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
968
+ NVME_MAJOR(subsys->ver),
969
+ NVME_MINOR(subsys->ver));
765970 }
766971
767972 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
....@@ -771,6 +976,9 @@
771976 int major, minor, tertiary = 0;
772977 int ret;
773978
979
+ /* passthru subsystems use the underlying controller's version */
980
+ if (nvmet_passthru_ctrl(subsys))
981
+ return -EINVAL;
774982
775983 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
776984 if (ret != 2 && ret != 3)
....@@ -795,20 +1003,177 @@
7951003 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
7961004 const char *page, size_t count)
7971005 {
798
- struct nvmet_subsys *subsys = to_subsys(item);
1006
+ u64 serial;
1007
+
1008
+ if (sscanf(page, "%llx\n", &serial) != 1)
1009
+ return -EINVAL;
7991010
8001011 down_write(&nvmet_config_sem);
801
- sscanf(page, "%llx\n", &subsys->serial);
1012
+ to_subsys(item)->serial = serial;
8021013 up_write(&nvmet_config_sem);
8031014
8041015 return count;
8051016 }
8061017 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
8071018
1019
+static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1020
+ char *page)
1021
+{
1022
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1023
+}
1024
+
1025
+static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1026
+ const char *page, size_t cnt)
1027
+{
1028
+ u16 cntlid_min;
1029
+
1030
+ if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1031
+ return -EINVAL;
1032
+
1033
+ if (cntlid_min == 0)
1034
+ return -EINVAL;
1035
+
1036
+ down_write(&nvmet_config_sem);
1037
+ if (cntlid_min >= to_subsys(item)->cntlid_max)
1038
+ goto out_unlock;
1039
+ to_subsys(item)->cntlid_min = cntlid_min;
1040
+ up_write(&nvmet_config_sem);
1041
+ return cnt;
1042
+
1043
+out_unlock:
1044
+ up_write(&nvmet_config_sem);
1045
+ return -EINVAL;
1046
+}
1047
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1048
+
1049
+static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1050
+ char *page)
1051
+{
1052
+ return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1053
+}
1054
+
1055
+static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1056
+ const char *page, size_t cnt)
1057
+{
1058
+ u16 cntlid_max;
1059
+
1060
+ if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1061
+ return -EINVAL;
1062
+
1063
+ if (cntlid_max == 0)
1064
+ return -EINVAL;
1065
+
1066
+ down_write(&nvmet_config_sem);
1067
+ if (cntlid_max <= to_subsys(item)->cntlid_min)
1068
+ goto out_unlock;
1069
+ to_subsys(item)->cntlid_max = cntlid_max;
1070
+ up_write(&nvmet_config_sem);
1071
+ return cnt;
1072
+
1073
+out_unlock:
1074
+ up_write(&nvmet_config_sem);
1075
+ return -EINVAL;
1076
+}
1077
+CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1078
+
1079
+static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1080
+ char *page)
1081
+{
1082
+ struct nvmet_subsys *subsys = to_subsys(item);
1083
+ struct nvmet_subsys_model *subsys_model;
1084
+ char *model = NVMET_DEFAULT_CTRL_MODEL;
1085
+ int ret;
1086
+
1087
+ rcu_read_lock();
1088
+ subsys_model = rcu_dereference(subsys->model);
1089
+ if (subsys_model)
1090
+ model = subsys_model->number;
1091
+ ret = snprintf(page, PAGE_SIZE, "%s\n", model);
1092
+ rcu_read_unlock();
1093
+
1094
+ return ret;
1095
+}
1096
+
1097
+/* See Section 1.5 of NVMe 1.4 */
1098
+static bool nvmet_is_ascii(const char c)
1099
+{
1100
+ return c >= 0x20 && c <= 0x7e;
1101
+}
1102
+
1103
+static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1104
+ const char *page, size_t count)
1105
+{
1106
+ struct nvmet_subsys *subsys = to_subsys(item);
1107
+ struct nvmet_subsys_model *new_model;
1108
+ char *new_model_number;
1109
+ int pos = 0, len;
1110
+
1111
+ len = strcspn(page, "\n");
1112
+ if (!len)
1113
+ return -EINVAL;
1114
+
1115
+ for (pos = 0; pos < len; pos++) {
1116
+ if (!nvmet_is_ascii(page[pos]))
1117
+ return -EINVAL;
1118
+ }
1119
+
1120
+ new_model_number = kmemdup_nul(page, len, GFP_KERNEL);
1121
+ if (!new_model_number)
1122
+ return -ENOMEM;
1123
+
1124
+ new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL);
1125
+ if (!new_model) {
1126
+ kfree(new_model_number);
1127
+ return -ENOMEM;
1128
+ }
1129
+ memcpy(new_model->number, new_model_number, len);
1130
+
1131
+ down_write(&nvmet_config_sem);
1132
+ mutex_lock(&subsys->lock);
1133
+ new_model = rcu_replace_pointer(subsys->model, new_model,
1134
+ mutex_is_locked(&subsys->lock));
1135
+ mutex_unlock(&subsys->lock);
1136
+ up_write(&nvmet_config_sem);
1137
+
1138
+ kfree_rcu(new_model, rcuhead);
1139
+ kfree(new_model_number);
1140
+
1141
+ return count;
1142
+}
1143
+CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1144
+
1145
+#ifdef CONFIG_BLK_DEV_INTEGRITY
1146
+static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1147
+ char *page)
1148
+{
1149
+ return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1150
+}
1151
+
1152
+static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1153
+ const char *page, size_t count)
1154
+{
1155
+ struct nvmet_subsys *subsys = to_subsys(item);
1156
+ bool pi_enable;
1157
+
1158
+ if (strtobool(page, &pi_enable))
1159
+ return -EINVAL;
1160
+
1161
+ subsys->pi_support = pi_enable;
1162
+ return count;
1163
+}
1164
+CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1165
+#endif
1166
+
8081167 static struct configfs_attribute *nvmet_subsys_attrs[] = {
8091168 &nvmet_subsys_attr_attr_allow_any_host,
8101169 &nvmet_subsys_attr_attr_version,
8111170 &nvmet_subsys_attr_attr_serial,
1171
+ &nvmet_subsys_attr_attr_cntlid_min,
1172
+ &nvmet_subsys_attr_attr_cntlid_max,
1173
+ &nvmet_subsys_attr_attr_model,
1174
+#ifdef CONFIG_BLK_DEV_INTEGRITY
1175
+ &nvmet_subsys_attr_attr_pi_enable,
1176
+#endif
8121177 NULL,
8131178 };
8141179
....@@ -844,8 +1209,8 @@
8441209 }
8451210
8461211 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
847
- if (!subsys)
848
- return ERR_PTR(-ENOMEM);
1212
+ if (IS_ERR(subsys))
1213
+ return ERR_CAST(subsys);
8491214
8501215 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
8511216
....@@ -857,6 +1222,8 @@
8571222 "allowed_hosts", &nvmet_allowed_hosts_type);
8581223 configfs_add_default_group(&subsys->allowed_hosts_group,
8591224 &subsys->group);
1225
+
1226
+ nvmet_add_passthru_group(subsys);
8601227
8611228 return &subsys->group;
8621229 }
....@@ -889,7 +1256,7 @@
8891256 if (enable)
8901257 nvmet_referral_enable(parent, port);
8911258 else
892
- nvmet_referral_disable(port);
1259
+ nvmet_referral_disable(parent, port);
8931260
8941261 return count;
8951262 inval:
....@@ -913,11 +1280,19 @@
9131280 NULL,
9141281 };
9151282
1283
+static void nvmet_referral_notify(struct config_group *group,
1284
+ struct config_item *item)
1285
+{
1286
+ struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1287
+ struct nvmet_port *port = to_nvmet_port(item);
1288
+
1289
+ nvmet_referral_disable(parent, port);
1290
+}
1291
+
9161292 static void nvmet_referral_release(struct config_item *item)
9171293 {
9181294 struct nvmet_port *port = to_nvmet_port(item);
9191295
920
- nvmet_referral_disable(port);
9211296 kfree(port);
9221297 }
9231298
....@@ -948,6 +1323,7 @@
9481323
9491324 static struct configfs_group_operations nvmet_referral_group_ops = {
9501325 .make_group = nvmet_referral_make,
1326
+ .disconnect_notify = nvmet_referral_notify,
9511327 };
9521328
9531329 static const struct config_item_type nvmet_referrals_type = {
....@@ -955,10 +1331,7 @@
9551331 .ct_group_ops = &nvmet_referral_group_ops,
9561332 };
9571333
958
-static struct {
959
- enum nvme_ana_state state;
960
- const char *name;
961
-} nvmet_ana_state_names[] = {
1334
+static struct nvmet_type_name_map nvmet_ana_state[] = {
9621335 { NVME_ANA_OPTIMIZED, "optimized" },
9631336 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
9641337 { NVME_ANA_INACCESSIBLE, "inaccessible" },
....@@ -973,10 +1346,9 @@
9731346 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
9741347 int i;
9751348
976
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
977
- if (state != nvmet_ana_state_names[i].state)
978
- continue;
979
- return sprintf(page, "%s\n", nvmet_ana_state_names[i].name);
1349
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1350
+ if (state == nvmet_ana_state[i].type)
1351
+ return sprintf(page, "%s\n", nvmet_ana_state[i].name);
9801352 }
9811353
9821354 return sprintf(page, "\n");
....@@ -986,10 +1358,11 @@
9861358 const char *page, size_t count)
9871359 {
9881360 struct nvmet_ana_group *grp = to_ana_group(item);
1361
+ enum nvme_ana_state *ana_state = grp->port->ana_state;
9891362 int i;
9901363
991
- for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) {
992
- if (sysfs_streq(page, nvmet_ana_state_names[i].name))
1364
+ for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1365
+ if (sysfs_streq(page, nvmet_ana_state[i].name))
9931366 goto found;
9941367 }
9951368
....@@ -998,10 +1371,9 @@
9981371
9991372 found:
10001373 down_write(&nvmet_ana_sem);
1001
- grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state;
1374
+ ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
10021375 nvmet_ana_chgcnt++;
10031376 up_write(&nvmet_ana_sem);
1004
-
10051377 nvmet_port_send_ana_event(grp->port);
10061378 return count;
10071379 }
....@@ -1090,6 +1462,10 @@
10901462 {
10911463 struct nvmet_port *port = to_nvmet_port(item);
10921464
1465
+ /* Let inflight controllers teardown complete */
1466
+ flush_scheduled_work();
1467
+ list_del(&port->global_entry);
1468
+
10931469 kfree(port->ana_state);
10941470 kfree(port);
10951471 }
....@@ -1101,6 +1477,9 @@
11011477 &nvmet_attr_addr_trsvcid,
11021478 &nvmet_attr_addr_trtype,
11031479 &nvmet_attr_param_inline_data_size,
1480
+#ifdef CONFIG_BLK_DEV_INTEGRITY
1481
+ &nvmet_attr_param_pi_enable,
1482
+#endif
11041483 NULL,
11051484 };
11061485
....@@ -1142,12 +1521,16 @@
11421521 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
11431522 }
11441523
1524
+ list_add(&port->global_entry, &nvmet_ports_list);
1525
+
11451526 INIT_LIST_HEAD(&port->entry);
11461527 INIT_LIST_HEAD(&port->subsystems);
11471528 INIT_LIST_HEAD(&port->referrals);
11481529 port->inline_data_size = -1; /* < 0 == let the transport choose */
11491530
11501531 port->disc_addr.portid = cpu_to_le16(portid);
1532
+ port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1533
+ port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
11511534 config_group_init_type_name(&port->group, name, &nvmet_port_type);
11521535
11531536 config_group_init_type_name(&port->subsys_group,