hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/nvme/target/discovery.c
....@@ -1,15 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Discovery service for the NVMe over Fabrics target.
34 * Copyright (C) 2016 Intel Corporation. All rights reserved.
4
- *
5
- * This program is free software; you can redistribute it and/or
6
- * modify it under the terms of the GNU General Public License version
7
- * 2 as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
135 */
146 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
157 #include <linux/slab.h>
....@@ -18,7 +10,74 @@
1810
1911 struct nvmet_subsys *nvmet_disc_subsys;
2012
21
-u64 nvmet_genctr;
13
+static u64 nvmet_genctr;
14
+
15
+static void __nvmet_disc_changed(struct nvmet_port *port,
16
+ struct nvmet_ctrl *ctrl)
17
+{
18
+ if (ctrl->port != port)
19
+ return;
20
+
21
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
22
+ return;
23
+
24
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
25
+ NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
26
+}
27
+
28
+void nvmet_port_disc_changed(struct nvmet_port *port,
29
+ struct nvmet_subsys *subsys)
30
+{
31
+ struct nvmet_ctrl *ctrl;
32
+
33
+ lockdep_assert_held(&nvmet_config_sem);
34
+ nvmet_genctr++;
35
+
36
+ mutex_lock(&nvmet_disc_subsys->lock);
37
+ list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38
+ if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
39
+ continue;
40
+
41
+ __nvmet_disc_changed(port, ctrl);
42
+ }
43
+ mutex_unlock(&nvmet_disc_subsys->lock);
44
+
45
+ /* If transport can signal change, notify transport */
46
+ if (port->tr_ops && port->tr_ops->discovery_chg)
47
+ port->tr_ops->discovery_chg(port);
48
+}
49
+
50
+static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51
+ struct nvmet_subsys *subsys,
52
+ struct nvmet_host *host)
53
+{
54
+ struct nvmet_ctrl *ctrl;
55
+
56
+ mutex_lock(&nvmet_disc_subsys->lock);
57
+ list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58
+ if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
59
+ continue;
60
+
61
+ __nvmet_disc_changed(port, ctrl);
62
+ }
63
+ mutex_unlock(&nvmet_disc_subsys->lock);
64
+}
65
+
66
+void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67
+ struct nvmet_host *host)
68
+{
69
+ struct nvmet_port *port;
70
+ struct nvmet_subsys_link *s;
71
+
72
+ nvmet_genctr++;
73
+
74
+ list_for_each_entry(port, nvmet_ports, global_entry)
75
+ list_for_each_entry(s, &port->subsystems, entry) {
76
+ if (s->subsys != subsys)
77
+ continue;
78
+ __nvmet_subsys_disc_changed(port, subsys, host);
79
+ }
80
+}
2281
2382 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
2483 {
....@@ -26,18 +85,18 @@
2685 if (list_empty(&port->entry)) {
2786 list_add_tail(&port->entry, &parent->referrals);
2887 port->enabled = true;
29
- nvmet_genctr++;
88
+ nvmet_port_disc_changed(parent, NULL);
3089 }
3190 up_write(&nvmet_config_sem);
3291 }
3392
34
-void nvmet_referral_disable(struct nvmet_port *port)
93
+void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
3594 {
3695 down_write(&nvmet_config_sem);
3796 if (!list_empty(&port->entry)) {
3897 port->enabled = false;
3998 list_del_init(&port->entry);
40
- nvmet_genctr++;
99
+ nvmet_port_disc_changed(parent, NULL);
41100 }
42101 up_write(&nvmet_config_sem);
43102 }
....@@ -81,54 +140,88 @@
81140 memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
82141 }
83142
84
-static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
143
+static size_t discovery_log_entries(struct nvmet_req *req)
144
+{
145
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
146
+ struct nvmet_subsys_link *p;
147
+ struct nvmet_port *r;
148
+ size_t entries = 0;
149
+
150
+ list_for_each_entry(p, &req->port->subsystems, entry) {
151
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
152
+ continue;
153
+ entries++;
154
+ }
155
+ list_for_each_entry(r, &req->port->referrals, entry)
156
+ entries++;
157
+ return entries;
158
+}
159
+
160
+static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
85161 {
86162 const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
87163 struct nvmet_ctrl *ctrl = req->sq->ctrl;
88164 struct nvmf_disc_rsp_page_hdr *hdr;
165
+ u64 offset = nvmet_get_log_page_offset(req->cmd);
89166 size_t data_len = nvmet_get_log_page_len(req->cmd);
90
- size_t alloc_len = max(data_len, sizeof(*hdr));
91
- int residual_len = data_len - sizeof(*hdr);
167
+ size_t alloc_len;
92168 struct nvmet_subsys_link *p;
93169 struct nvmet_port *r;
94170 u32 numrec = 0;
95171 u16 status = 0;
172
+ void *buffer;
173
+
174
+ if (!nvmet_check_transfer_len(req, data_len))
175
+ return;
176
+
177
+ if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
178
+ req->error_loc =
179
+ offsetof(struct nvme_get_log_page_command, lid);
180
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
181
+ goto out;
182
+ }
183
+
184
+ /* Spec requires dword aligned offsets */
185
+ if (offset & 0x3) {
186
+ req->error_loc =
187
+ offsetof(struct nvme_get_log_page_command, lpo);
188
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
189
+ goto out;
190
+ }
96191
97192 /*
98193 * Make sure we're passing at least a buffer of response header size.
99194 * If host provided data len is less than the header size, only the
100195 * number of bytes requested by host will be sent to host.
101196 */
102
- hdr = kzalloc(alloc_len, GFP_KERNEL);
103
- if (!hdr) {
197
+ down_read(&nvmet_config_sem);
198
+ alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
199
+ buffer = kzalloc(alloc_len, GFP_KERNEL);
200
+ if (!buffer) {
201
+ up_read(&nvmet_config_sem);
104202 status = NVME_SC_INTERNAL;
105203 goto out;
106204 }
107205
108
- down_read(&nvmet_config_sem);
206
+ hdr = buffer;
109207 list_for_each_entry(p, &req->port->subsystems, entry) {
110
- if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn))
111
- continue;
112
- if (residual_len >= entry_size) {
113
- char traddr[NVMF_TRADDR_SIZE];
208
+ char traddr[NVMF_TRADDR_SIZE];
114209
115
- nvmet_set_disc_traddr(req, req->port, traddr);
116
- nvmet_format_discovery_entry(hdr, req->port,
117
- p->subsys->subsysnqn, traddr,
118
- NVME_NQN_NVME, numrec);
119
- residual_len -= entry_size;
120
- }
210
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
211
+ continue;
212
+
213
+ nvmet_set_disc_traddr(req, req->port, traddr);
214
+ nvmet_format_discovery_entry(hdr, req->port,
215
+ p->subsys->subsysnqn, traddr,
216
+ NVME_NQN_NVME, numrec);
121217 numrec++;
122218 }
123219
124220 list_for_each_entry(r, &req->port->referrals, entry) {
125
- if (residual_len >= entry_size) {
126
- nvmet_format_discovery_entry(hdr, r,
127
- NVME_DISC_SUBSYS_NAME,
128
- r->disc_addr.traddr,
129
- NVME_NQN_DISC, numrec);
130
- residual_len -= entry_size;
131
- }
221
+ nvmet_format_discovery_entry(hdr, r,
222
+ NVME_DISC_SUBSYS_NAME,
223
+ r->disc_addr.traddr,
224
+ NVME_NQN_DISC, numrec);
132225 numrec++;
133226 }
134227
....@@ -136,19 +229,31 @@
136229 hdr->numrec = cpu_to_le64(numrec);
137230 hdr->recfmt = cpu_to_le16(0);
138231
232
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
233
+
139234 up_read(&nvmet_config_sem);
140235
141
- status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
142
- kfree(hdr);
236
+ status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
237
+ kfree(buffer);
143238 out:
144239 nvmet_req_complete(req, status);
145240 }
146241
147
-static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
242
+static void nvmet_execute_disc_identify(struct nvmet_req *req)
148243 {
149244 struct nvmet_ctrl *ctrl = req->sq->ctrl;
150245 struct nvme_id_ctrl *id;
246
+ const char model[] = "Linux";
151247 u16 status = 0;
248
+
249
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
250
+ return;
251
+
252
+ if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
253
+ req->error_loc = offsetof(struct nvme_identify, cns);
254
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
255
+ goto out;
256
+ }
152257
153258 id = kzalloc(sizeof(*id), GFP_KERNEL);
154259 if (!id) {
....@@ -156,8 +261,13 @@
156261 goto out;
157262 }
158263
264
+ memset(id->sn, ' ', sizeof(id->sn));
265
+ bin2hex(id->sn, &ctrl->subsys->serial,
266
+ min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
159267 memset(id->fr, ' ', sizeof(id->fr));
160
- strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
268
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
269
+ memcpy_and_pad(id->fr, sizeof(id->fr),
270
+ UTS_RELEASE, strlen(UTS_RELEASE), ' ');
161271
162272 /* no limit on data transfer sizes for now */
163273 id->mdts = 0;
....@@ -169,18 +279,71 @@
169279 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
170280
171281 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
172
- if (ctrl->ops->has_keyed_sgls)
282
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
173283 id->sgls |= cpu_to_le32(1 << 2);
174284 if (req->port->inline_data_size)
175285 id->sgls |= cpu_to_le32(1 << 20);
176286
177
- strcpy(id->subnqn, ctrl->subsys->subsysnqn);
287
+ id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
288
+
289
+ strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
178290
179291 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
180292
181293 kfree(id);
182294 out:
183295 nvmet_req_complete(req, status);
296
+}
297
+
298
+static void nvmet_execute_disc_set_features(struct nvmet_req *req)
299
+{
300
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
301
+ u16 stat;
302
+
303
+ if (!nvmet_check_transfer_len(req, 0))
304
+ return;
305
+
306
+ switch (cdw10 & 0xff) {
307
+ case NVME_FEAT_KATO:
308
+ stat = nvmet_set_feat_kato(req);
309
+ break;
310
+ case NVME_FEAT_ASYNC_EVENT:
311
+ stat = nvmet_set_feat_async_event(req,
312
+ NVMET_DISC_AEN_CFG_OPTIONAL);
313
+ break;
314
+ default:
315
+ req->error_loc =
316
+ offsetof(struct nvme_common_command, cdw10);
317
+ stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
318
+ break;
319
+ }
320
+
321
+ nvmet_req_complete(req, stat);
322
+}
323
+
324
+static void nvmet_execute_disc_get_features(struct nvmet_req *req)
325
+{
326
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
327
+ u16 stat = 0;
328
+
329
+ if (!nvmet_check_transfer_len(req, 0))
330
+ return;
331
+
332
+ switch (cdw10 & 0xff) {
333
+ case NVME_FEAT_KATO:
334
+ nvmet_get_feat_kato(req);
335
+ break;
336
+ case NVME_FEAT_ASYNC_EVENT:
337
+ nvmet_get_feat_async_event(req);
338
+ break;
339
+ default:
340
+ req->error_loc =
341
+ offsetof(struct nvme_common_command, cdw10);
342
+ stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
343
+ break;
344
+ }
345
+
346
+ nvmet_req_complete(req, stat);
184347 }
185348
186349 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
....@@ -190,50 +353,43 @@
190353 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
191354 pr_err("got cmd %d while not ready\n",
192355 cmd->common.opcode);
356
+ req->error_loc =
357
+ offsetof(struct nvme_common_command, opcode);
193358 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
194359 }
195360
196361 switch (cmd->common.opcode) {
362
+ case nvme_admin_set_features:
363
+ req->execute = nvmet_execute_disc_set_features;
364
+ return 0;
365
+ case nvme_admin_get_features:
366
+ req->execute = nvmet_execute_disc_get_features;
367
+ return 0;
368
+ case nvme_admin_async_event:
369
+ req->execute = nvmet_execute_async_event;
370
+ return 0;
371
+ case nvme_admin_keep_alive:
372
+ req->execute = nvmet_execute_keep_alive;
373
+ return 0;
197374 case nvme_admin_get_log_page:
198
- req->data_len = nvmet_get_log_page_len(cmd);
199
-
200
- switch (cmd->get_log_page.lid) {
201
- case NVME_LOG_DISC:
202
- req->execute = nvmet_execute_get_disc_log_page;
203
- return 0;
204
- default:
205
- pr_err("unsupported get_log_page lid %d\n",
206
- cmd->get_log_page.lid);
207
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
208
- }
375
+ req->execute = nvmet_execute_disc_get_log_page;
376
+ return 0;
209377 case nvme_admin_identify:
210
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
211
- switch (cmd->identify.cns) {
212
- case NVME_ID_CNS_CTRL:
213
- req->execute =
214
- nvmet_execute_identify_disc_ctrl;
215
- return 0;
216
- default:
217
- pr_err("unsupported identify cns %d\n",
218
- cmd->identify.cns);
219
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
220
- }
378
+ req->execute = nvmet_execute_disc_identify;
379
+ return 0;
221380 default:
222
- pr_err("unsupported cmd %d\n", cmd->common.opcode);
381
+ pr_err("unhandled cmd %d\n", cmd->common.opcode);
382
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
223383 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
224384 }
225385
226
- pr_err("unhandled cmd %d\n", cmd->common.opcode);
227
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
228386 }
229387
230388 int __init nvmet_init_discovery(void)
231389 {
232390 nvmet_disc_subsys =
233391 nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
234
- if (!nvmet_disc_subsys)
235
- return -ENOMEM;
236
- return 0;
392
+ return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
237393 }
238394
239395 void nvmet_exit_discovery(void)