hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/nvme/target/admin-cmd.c
....@@ -1,36 +1,16 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * NVMe admin command implementation.
34 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
135 */
146 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
157 #include <linux/module.h>
168 #include <linux/rculist.h>
9
+#include <linux/part_stat.h>
1710
1811 #include <generated/utsrelease.h>
1912 #include <asm/unaligned.h>
2013 #include "nvmet.h"
21
-
22
-/*
23
- * This helper allows us to clear the AEN based on the RAE bit,
24
- * Please use this helper when processing the log pages which are
25
- * associated with the AEN.
26
- */
27
-static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
28
-{
29
- int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
30
-
31
- if (!rae)
32
- clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
33
-}
3414
3515 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
3616 {
....@@ -45,9 +25,50 @@
4525 return len;
4626 }
4727
28
+static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
29
+{
30
+ switch (cdw10 & 0xff) {
31
+ case NVME_FEAT_HOST_ID:
32
+ return sizeof(req->sq->ctrl->hostid);
33
+ default:
34
+ return 0;
35
+ }
36
+}
37
+
38
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
39
+{
40
+ return le64_to_cpu(cmd->get_log_page.lpo);
41
+}
42
+
4843 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
4944 {
50
- nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
45
+ nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
46
+}
47
+
48
+static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
49
+{
50
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
51
+ unsigned long flags;
52
+ off_t offset = 0;
53
+ u64 slot;
54
+ u64 i;
55
+
56
+ spin_lock_irqsave(&ctrl->error_lock, flags);
57
+ slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
58
+
59
+ for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
60
+ if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
61
+ sizeof(struct nvme_error_slot)))
62
+ break;
63
+
64
+ if (slot == 0)
65
+ slot = NVMET_ERROR_LOG_SLOTS - 1;
66
+ else
67
+ slot--;
68
+ offset += sizeof(struct nvme_error_slot);
69
+ }
70
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
71
+ nvmet_req_complete(req, 0);
5172 }
5273
5374 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
....@@ -58,8 +79,9 @@
5879
5980 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
6081 if (!ns) {
61
- pr_err("nvmet : Could not find namespace id : %d\n",
82
+ pr_err("Could not find namespace id : %d\n",
6283 le32_to_cpu(req->cmd->get_log_page.nsid));
84
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
6385 return NVME_SC_INVALID_NS;
6486 }
6587
....@@ -91,11 +113,10 @@
91113 u64 data_units_read = 0, data_units_written = 0;
92114 struct nvmet_ns *ns;
93115 struct nvmet_ctrl *ctrl;
116
+ unsigned long idx;
94117
95118 ctrl = req->sq->ctrl;
96
-
97
- rcu_read_lock();
98
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
119
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
99120 /* we don't have the right data for file backed ns */
100121 if (!ns->bdev)
101122 continue;
....@@ -105,9 +126,7 @@
105126 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
106127 data_units_written += DIV_ROUND_UP(
107128 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
108
-
109129 }
110
- rcu_read_unlock();
111130
112131 put_unaligned_le64(host_reads, &slog->host_reads[0]);
113132 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
....@@ -121,8 +140,9 @@
121140 {
122141 struct nvme_smart_log *log;
123142 u16 status = NVME_SC_INTERNAL;
143
+ unsigned long flags;
124144
125
- if (req->data_len != sizeof(*log))
145
+ if (req->transfer_len != sizeof(*log))
126146 goto out;
127147
128148 log = kzalloc(sizeof(*log), GFP_KERNEL);
....@@ -135,6 +155,11 @@
135155 status = nvmet_get_smart_log_nsid(req, log);
136156 if (status)
137157 goto out_free_log;
158
+
159
+ spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
160
+ put_unaligned_le64(req->sq->ctrl->err_counter,
161
+ &log->num_err_log_entries);
162
+ spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
138163
139164 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
140165 out_free_log:
....@@ -179,7 +204,7 @@
179204 u16 status = NVME_SC_INTERNAL;
180205 size_t len;
181206
182
- if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
207
+ if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
183208 goto out;
184209
185210 mutex_lock(&ctrl->lock);
....@@ -189,9 +214,9 @@
189214 len = ctrl->nr_changed_ns * sizeof(__le32);
190215 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
191216 if (!status)
192
- status = nvmet_zero_sgl(req, len, req->data_len - len);
217
+ status = nvmet_zero_sgl(req, len, req->transfer_len - len);
193218 ctrl->nr_changed_ns = 0;
194
- nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
219
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
195220 mutex_unlock(&ctrl->lock);
196221 out:
197222 nvmet_req_complete(req, status);
....@@ -202,14 +227,13 @@
202227 {
203228 struct nvmet_ctrl *ctrl = req->sq->ctrl;
204229 struct nvmet_ns *ns;
230
+ unsigned long idx;
205231 u32 count = 0;
206232
207233 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
208
- rcu_read_lock();
209
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
234
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns)
210235 if (ns->anagrpid == grpid)
211236 desc->nsids[count++] = cpu_to_le32(ns->nsid);
212
- rcu_read_unlock();
213237 }
214238
215239 desc->grpid = cpu_to_le32(grpid);
....@@ -254,7 +278,7 @@
254278
255279 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
256280 hdr.ngrps = cpu_to_le16(ngrps);
257
- nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
281
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
258282 up_read(&nvmet_ana_sem);
259283
260284 kfree(desc);
....@@ -265,12 +289,56 @@
265289 nvmet_req_complete(req, status);
266290 }
267291
292
+static void nvmet_execute_get_log_page(struct nvmet_req *req)
293
+{
294
+ if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
295
+ return;
296
+
297
+ switch (req->cmd->get_log_page.lid) {
298
+ case NVME_LOG_ERROR:
299
+ return nvmet_execute_get_log_page_error(req);
300
+ case NVME_LOG_SMART:
301
+ return nvmet_execute_get_log_page_smart(req);
302
+ case NVME_LOG_FW_SLOT:
303
+ /*
304
+ * We only support a single firmware slot which always is
305
+ * active, so we can zero out the whole firmware slot log and
306
+ * still claim to fully implement this mandatory log page.
307
+ */
308
+ return nvmet_execute_get_log_page_noop(req);
309
+ case NVME_LOG_CHANGED_NS:
310
+ return nvmet_execute_get_log_changed_ns(req);
311
+ case NVME_LOG_CMD_EFFECTS:
312
+ return nvmet_execute_get_log_cmd_effects_ns(req);
313
+ case NVME_LOG_ANA:
314
+ return nvmet_execute_get_log_page_ana(req);
315
+ }
316
+ pr_debug("unhandled lid %d on qid %d\n",
317
+ req->cmd->get_log_page.lid, req->sq->qid);
318
+ req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
319
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
320
+}
321
+
322
+static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
323
+ struct nvmet_subsys *subsys)
324
+{
325
+ const char *model = NVMET_DEFAULT_CTRL_MODEL;
326
+ struct nvmet_subsys_model *subsys_model;
327
+
328
+ rcu_read_lock();
329
+ subsys_model = rcu_dereference(subsys->model);
330
+ if (subsys_model)
331
+ model = subsys_model->number;
332
+ memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
333
+ rcu_read_unlock();
334
+}
335
+
268336 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
269337 {
270338 struct nvmet_ctrl *ctrl = req->sq->ctrl;
271339 struct nvme_id_ctrl *id;
340
+ u32 cmd_capsule_size;
272341 u16 status = 0;
273
- const char model[] = "Linux";
274342
275343 id = kzalloc(sizeof(*id), GFP_KERNEL);
276344 if (!id) {
....@@ -285,7 +353,7 @@
285353 memset(id->sn, ' ', sizeof(id->sn));
286354 bin2hex(id->sn, &ctrl->subsys->serial,
287355 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
288
- memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
356
+ nvmet_id_set_model_number(id, ctrl->subsys);
289357 memcpy_and_pad(id->fr, sizeof(id->fr),
290358 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
291359
....@@ -299,14 +367,19 @@
299367 /* we support multiple ports, multiples hosts and ANA: */
300368 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
301369
302
- /* no limit on data transfer sizes for now */
303
- id->mdts = 0;
370
+ /* Limit MDTS according to transport capability */
371
+ if (ctrl->ops->get_mdts)
372
+ id->mdts = ctrl->ops->get_mdts(ctrl);
373
+ else
374
+ id->mdts = 0;
375
+
304376 id->cntlid = cpu_to_le16(ctrl->cntlid);
305377 id->ver = cpu_to_le32(ctrl->subsys->ver);
306378
307379 /* XXX: figure out what to do about RTD3R/RTD3 */
308380 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
309
- id->ctratt = cpu_to_le32(1 << 0);
381
+ id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
382
+ NVME_CTRL_ATTR_TBKAS);
310383
311384 id->oacs = 0;
312385
....@@ -350,16 +423,22 @@
350423 id->awupf = 0;
351424
352425 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
353
- if (ctrl->ops->has_keyed_sgls)
426
+ if (ctrl->ops->flags & NVMF_KEYED_SGLS)
354427 id->sgls |= cpu_to_le32(1 << 2);
355428 if (req->port->inline_data_size)
356429 id->sgls |= cpu_to_le32(1 << 20);
357430
358
- strcpy(id->subnqn, ctrl->subsys->subsysnqn);
431
+ strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
359432
360
- /* Max command capsule size is sqe + single page of in-capsule data */
361
- id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
362
- req->port->inline_data_size) / 16);
433
+ /*
434
+ * Max command capsule size is sqe + in-capsule data size.
435
+ * Disable in-capsule data for Metadata capable controllers.
436
+ */
437
+ cmd_capsule_size = sizeof(struct nvme_command);
438
+ if (!ctrl->pi_support)
439
+ cmd_capsule_size += req->port->inline_data_size;
440
+ id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
441
+
363442 /* Max response capsule size is cqe */
364443 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
365444
....@@ -389,11 +468,12 @@
389468
390469 static void nvmet_execute_identify_ns(struct nvmet_req *req)
391470 {
392
- struct nvmet_ns *ns;
471
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
393472 struct nvme_id_ns *id;
394473 u16 status = 0;
395474
396475 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
476
+ req->error_loc = offsetof(struct nvme_identify, nsid);
397477 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
398478 goto out;
399479 }
....@@ -405,16 +485,21 @@
405485 }
406486
407487 /* return an all zeroed buffer if we can't find an active namespace */
408
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
409
- if (!ns)
488
+ req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
489
+ if (!req->ns) {
490
+ status = 0;
410491 goto done;
492
+ }
493
+
494
+ nvmet_ns_revalidate(req->ns);
411495
412496 /*
413497 * nuse = ncap = nsze isn't always true, but we have no way to find
414498 * that out from the underlying device.
415499 */
416
- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
417
- switch (req->port->ana_state[ns->anagrpid]) {
500
+ id->ncap = id->nsze =
501
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
502
+ switch (req->port->ana_state[req->ns->anagrpid]) {
418503 case NVME_ANA_INACCESSIBLE:
419504 case NVME_ANA_PERSISTENT_LOSS:
420505 break;
....@@ -422,6 +507,9 @@
422507 id->nuse = id->nsze;
423508 break;
424509 }
510
+
511
+ if (req->ns->bdev)
512
+ nvmet_bdev_set_limits(req->ns->bdev, id);
425513
426514 /*
427515 * We just provide a single LBA format that matches what the
....@@ -435,17 +523,28 @@
435523 * controllers, but also with any other user of the block device.
436524 */
437525 id->nmic = (1 << 0);
438
- id->anagrpid = cpu_to_le32(ns->anagrpid);
526
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
439527
440
- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
528
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
441529
442
- id->lbaf[0].ds = ns->blksize_shift;
530
+ id->lbaf[0].ds = req->ns->blksize_shift;
443531
444
- if (ns->readonly)
532
+ if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
533
+ id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
534
+ NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
535
+ NVME_NS_DPC_PI_TYPE3;
536
+ id->mc = NVME_MC_EXTENDED_LBA;
537
+ id->dps = req->ns->pi_type;
538
+ id->flbas = NVME_NS_FLBAS_META_EXT;
539
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
540
+ }
541
+
542
+ if (req->ns->readonly)
445543 id->nsattr |= (1 << 0);
446
- nvmet_put_namespace(ns);
447544 done:
448
- status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
545
+ if (!status)
546
+ status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
547
+
449548 kfree(id);
450549 out:
451550 nvmet_req_complete(req, status);
....@@ -456,6 +555,7 @@
456555 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
457556 struct nvmet_ctrl *ctrl = req->sq->ctrl;
458557 struct nvmet_ns *ns;
558
+ unsigned long idx;
459559 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
460560 __le32 *list;
461561 u16 status = 0;
....@@ -467,15 +567,13 @@
467567 goto out;
468568 }
469569
470
- rcu_read_lock();
471
- list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
570
+ xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
472571 if (ns->nsid <= min_nsid)
473572 continue;
474573 list[i++] = cpu_to_le32(ns->nsid);
475574 if (i == buf_size / sizeof(__le32))
476575 break;
477576 }
478
- rcu_read_unlock();
479577
480578 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
481579
....@@ -514,6 +612,7 @@
514612
515613 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
516614 if (!ns) {
615
+ req->error_loc = offsetof(struct nvme_identify, nsid);
517616 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
518617 goto out;
519618 }
....@@ -542,6 +641,28 @@
542641 nvmet_req_complete(req, status);
543642 }
544643
644
+static void nvmet_execute_identify(struct nvmet_req *req)
645
+{
646
+ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
647
+ return;
648
+
649
+ switch (req->cmd->identify.cns) {
650
+ case NVME_ID_CNS_NS:
651
+ return nvmet_execute_identify_ns(req);
652
+ case NVME_ID_CNS_CTRL:
653
+ return nvmet_execute_identify_ctrl(req);
654
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
655
+ return nvmet_execute_identify_nslist(req);
656
+ case NVME_ID_CNS_NS_DESC_LIST:
657
+ return nvmet_execute_identify_desclist(req);
658
+ }
659
+
660
+ pr_debug("unhandled identify cns %d on qid %d\n",
661
+ req->cmd->identify.cns, req->sq->qid);
662
+ req->error_loc = offsetof(struct nvme_identify, cns);
663
+ nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
664
+}
665
+
545666 /*
546667 * A "minimum viable" abort implementation: the command is mandatory in the
547668 * spec, but we are not required to do any useful work. We couldn't really
....@@ -551,6 +672,8 @@
551672 */
552673 static void nvmet_execute_abort(struct nvmet_req *req)
553674 {
675
+ if (!nvmet_check_transfer_len(req, 0))
676
+ return;
554677 nvmet_set_result(req, 1);
555678 nvmet_req_complete(req, 0);
556679 }
....@@ -571,13 +694,15 @@
571694
572695 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
573696 {
574
- u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
697
+ u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
575698 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
576699 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
577700
578701 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
579
- if (unlikely(!req->ns))
702
+ if (unlikely(!req->ns)) {
703
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
580704 return status;
705
+ }
581706
582707 mutex_lock(&subsys->lock);
583708 switch (write_protect) {
....@@ -601,32 +726,62 @@
601726 return status;
602727 }
603728
604
-static void nvmet_execute_set_features(struct nvmet_req *req)
729
+u16 nvmet_set_feat_kato(struct nvmet_req *req)
730
+{
731
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
732
+
733
+ nvmet_stop_keep_alive_timer(req->sq->ctrl);
734
+ req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
735
+ nvmet_start_keep_alive_timer(req->sq->ctrl);
736
+
737
+ nvmet_set_result(req, req->sq->ctrl->kato);
738
+
739
+ return 0;
740
+}
741
+
742
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
743
+{
744
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
745
+
746
+ if (val32 & ~mask) {
747
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
748
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
749
+ }
750
+
751
+ WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
752
+ nvmet_set_result(req, val32);
753
+
754
+ return 0;
755
+}
756
+
757
+void nvmet_execute_set_features(struct nvmet_req *req)
605758 {
606759 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
607
- u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
608
- u32 val32;
760
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
761
+ u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
609762 u16 status = 0;
763
+ u16 nsqr;
764
+ u16 ncqr;
765
+
766
+ if (!nvmet_check_transfer_len(req, 0))
767
+ return;
610768
611769 switch (cdw10 & 0xff) {
612770 case NVME_FEAT_NUM_QUEUES:
771
+ ncqr = (cdw11 >> 16) & 0xffff;
772
+ nsqr = cdw11 & 0xffff;
773
+ if (ncqr == 0xffff || nsqr == 0xffff) {
774
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
775
+ break;
776
+ }
613777 nvmet_set_result(req,
614778 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
615779 break;
616780 case NVME_FEAT_KATO:
617
- val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
618
- req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
619
- nvmet_set_result(req, req->sq->ctrl->kato);
781
+ status = nvmet_set_feat_kato(req);
620782 break;
621783 case NVME_FEAT_ASYNC_EVENT:
622
- val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
623
- if (val32 & ~NVMET_AEN_CFG_ALL) {
624
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
625
- break;
626
- }
627
-
628
- WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
629
- nvmet_set_result(req, val32);
784
+ status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
630785 break;
631786 case NVME_FEAT_HOST_ID:
632787 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
....@@ -635,6 +790,7 @@
635790 status = nvmet_set_feat_write_protect(req);
636791 break;
637792 default:
793
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
638794 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
639795 break;
640796 }
....@@ -648,9 +804,10 @@
648804 u32 result;
649805
650806 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
651
- if (!req->ns)
807
+ if (!req->ns) {
808
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
652809 return NVME_SC_INVALID_NS | NVME_SC_DNR;
653
-
810
+ }
654811 mutex_lock(&subsys->lock);
655812 if (req->ns->readonly == true)
656813 result = NVME_NS_WRITE_PROTECT;
....@@ -662,11 +819,24 @@
662819 return 0;
663820 }
664821
665
-static void nvmet_execute_get_features(struct nvmet_req *req)
822
+void nvmet_get_feat_kato(struct nvmet_req *req)
823
+{
824
+ nvmet_set_result(req, req->sq->ctrl->kato * 1000);
825
+}
826
+
827
+void nvmet_get_feat_async_event(struct nvmet_req *req)
828
+{
829
+ nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
830
+}
831
+
832
+void nvmet_execute_get_features(struct nvmet_req *req)
666833 {
667834 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
668
- u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
835
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
669836 u16 status = 0;
837
+
838
+ if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
839
+ return;
670840
671841 switch (cdw10 & 0xff) {
672842 /*
....@@ -691,7 +861,7 @@
691861 break;
692862 #endif
693863 case NVME_FEAT_ASYNC_EVENT:
694
- nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
864
+ nvmet_get_feat_async_event(req);
695865 break;
696866 case NVME_FEAT_VOLATILE_WC:
697867 nvmet_set_result(req, 1);
....@@ -701,11 +871,13 @@
701871 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
702872 break;
703873 case NVME_FEAT_KATO:
704
- nvmet_set_result(req, req->sq->ctrl->kato * 1000);
874
+ nvmet_get_feat_kato(req);
705875 break;
706876 case NVME_FEAT_HOST_ID:
707877 /* need 128-bit host identifier flag */
708
- if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
878
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
879
+ req->error_loc =
880
+ offsetof(struct nvme_common_command, cdw11);
709881 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
710882 break;
711883 }
....@@ -717,6 +889,8 @@
717889 status = nvmet_get_feat_write_protect(req);
718890 break;
719891 default:
892
+ req->error_loc =
893
+ offsetof(struct nvme_common_command, cdw10);
720894 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
721895 break;
722896 }
....@@ -724,9 +898,12 @@
724898 nvmet_req_complete(req, status);
725899 }
726900
727
-static void nvmet_execute_async_event(struct nvmet_req *req)
901
+void nvmet_execute_async_event(struct nvmet_req *req)
728902 {
729903 struct nvmet_ctrl *ctrl = req->sq->ctrl;
904
+
905
+ if (!nvmet_check_transfer_len(req, 0))
906
+ return;
730907
731908 mutex_lock(&ctrl->lock);
732909 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
....@@ -740,9 +917,12 @@
740917 schedule_work(&ctrl->async_event_work);
741918 }
742919
743
-static void nvmet_execute_keep_alive(struct nvmet_req *req)
920
+void nvmet_execute_keep_alive(struct nvmet_req *req)
744921 {
745922 struct nvmet_ctrl *ctrl = req->sq->ctrl;
923
+
924
+ if (!nvmet_check_transfer_len(req, 0))
925
+ return;
746926
747927 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
748928 ctrl->cntlid, ctrl->kato);
....@@ -756,87 +936,44 @@
756936 struct nvme_command *cmd = req->cmd;
757937 u16 ret;
758938
939
+ if (nvme_is_fabrics(cmd))
940
+ return nvmet_parse_fabrics_cmd(req);
941
+ if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
942
+ return nvmet_parse_discovery_cmd(req);
943
+
759944 ret = nvmet_check_ctrl_status(req, cmd);
760945 if (unlikely(ret))
761946 return ret;
762947
948
+ if (nvmet_req_passthru_ctrl(req))
949
+ return nvmet_parse_passthru_admin_cmd(req);
950
+
763951 switch (cmd->common.opcode) {
764952 case nvme_admin_get_log_page:
765
- req->data_len = nvmet_get_log_page_len(cmd);
766
-
767
- switch (cmd->get_log_page.lid) {
768
- case NVME_LOG_ERROR:
769
- /*
770
- * We currently never set the More bit in the status
771
- * field, so all error log entries are invalid and can
772
- * be zeroed out. This is called a minum viable
773
- * implementation (TM) of this mandatory log page.
774
- */
775
- req->execute = nvmet_execute_get_log_page_noop;
776
- return 0;
777
- case NVME_LOG_SMART:
778
- req->execute = nvmet_execute_get_log_page_smart;
779
- return 0;
780
- case NVME_LOG_FW_SLOT:
781
- /*
782
- * We only support a single firmware slot which always
783
- * is active, so we can zero out the whole firmware slot
784
- * log and still claim to fully implement this mandatory
785
- * log page.
786
- */
787
- req->execute = nvmet_execute_get_log_page_noop;
788
- return 0;
789
- case NVME_LOG_CHANGED_NS:
790
- req->execute = nvmet_execute_get_log_changed_ns;
791
- return 0;
792
- case NVME_LOG_CMD_EFFECTS:
793
- req->execute = nvmet_execute_get_log_cmd_effects_ns;
794
- return 0;
795
- case NVME_LOG_ANA:
796
- req->execute = nvmet_execute_get_log_page_ana;
797
- return 0;
798
- }
799
- break;
953
+ req->execute = nvmet_execute_get_log_page;
954
+ return 0;
800955 case nvme_admin_identify:
801
- req->data_len = NVME_IDENTIFY_DATA_SIZE;
802
- switch (cmd->identify.cns) {
803
- case NVME_ID_CNS_NS:
804
- req->execute = nvmet_execute_identify_ns;
805
- return 0;
806
- case NVME_ID_CNS_CTRL:
807
- req->execute = nvmet_execute_identify_ctrl;
808
- return 0;
809
- case NVME_ID_CNS_NS_ACTIVE_LIST:
810
- req->execute = nvmet_execute_identify_nslist;
811
- return 0;
812
- case NVME_ID_CNS_NS_DESC_LIST:
813
- req->execute = nvmet_execute_identify_desclist;
814
- return 0;
815
- }
816
- break;
956
+ req->execute = nvmet_execute_identify;
957
+ return 0;
817958 case nvme_admin_abort_cmd:
818959 req->execute = nvmet_execute_abort;
819
- req->data_len = 0;
820960 return 0;
821961 case nvme_admin_set_features:
822962 req->execute = nvmet_execute_set_features;
823
- req->data_len = 0;
824963 return 0;
825964 case nvme_admin_get_features:
826965 req->execute = nvmet_execute_get_features;
827
- req->data_len = 0;
828966 return 0;
829967 case nvme_admin_async_event:
830968 req->execute = nvmet_execute_async_event;
831
- req->data_len = 0;
832969 return 0;
833970 case nvme_admin_keep_alive:
834971 req->execute = nvmet_execute_keep_alive;
835
- req->data_len = 0;
836972 return 0;
837973 }
838974
839
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
975
+ pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
840976 req->sq->qid);
977
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
841978 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
842979 }