| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * NVMe admin command implementation. |
|---|
| 3 | 4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 6 | | - * under the terms and conditions of the GNU General Public License, |
|---|
| 7 | | - * version 2, as published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
|---|
| 10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 12 | | - * more details. |
|---|
| 13 | 5 | */ |
|---|
| 14 | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| 15 | 7 | #include <linux/module.h> |
|---|
| 16 | 8 | #include <linux/rculist.h> |
|---|
| 9 | +#include <linux/part_stat.h> |
|---|
| 17 | 10 | |
|---|
| 18 | 11 | #include <generated/utsrelease.h> |
|---|
| 19 | 12 | #include <asm/unaligned.h> |
|---|
| 20 | 13 | #include "nvmet.h" |
|---|
| 21 | | - |
|---|
| 22 | | -/* |
|---|
| 23 | | - * This helper allows us to clear the AEN based on the RAE bit, |
|---|
| 24 | | - * Please use this helper when processing the log pages which are |
|---|
| 25 | | - * associated with the AEN. |
|---|
| 26 | | - */ |
|---|
| 27 | | -static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit) |
|---|
| 28 | | -{ |
|---|
| 29 | | - int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15; |
|---|
| 30 | | - |
|---|
| 31 | | - if (!rae) |
|---|
| 32 | | - clear_bit(aen_bit, &req->sq->ctrl->aen_masked); |
|---|
| 33 | | -} |
|---|
| 34 | 14 | |
|---|
| 35 | 15 | u32 nvmet_get_log_page_len(struct nvme_command *cmd) |
|---|
| 36 | 16 | { |
|---|
| .. | .. |
|---|
| 45 | 25 | return len; |
|---|
| 46 | 26 | } |
|---|
| 47 | 27 | |
|---|
| 28 | +static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) |
|---|
| 29 | +{ |
|---|
| 30 | + switch (cdw10 & 0xff) { |
|---|
| 31 | + case NVME_FEAT_HOST_ID: |
|---|
| 32 | + return sizeof(req->sq->ctrl->hostid); |
|---|
| 33 | + default: |
|---|
| 34 | + return 0; |
|---|
| 35 | + } |
|---|
| 36 | +} |
|---|
| 37 | + |
|---|
| 38 | +u64 nvmet_get_log_page_offset(struct nvme_command *cmd) |
|---|
| 39 | +{ |
|---|
| 40 | + return le64_to_cpu(cmd->get_log_page.lpo); |
|---|
| 41 | +} |
|---|
| 42 | + |
|---|
| 48 | 43 | static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) |
|---|
| 49 | 44 | { |
|---|
| 50 | | - nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); |
|---|
| 45 | + nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); |
|---|
| 46 | +} |
|---|
| 47 | + |
|---|
| 48 | +static void nvmet_execute_get_log_page_error(struct nvmet_req *req) |
|---|
| 49 | +{ |
|---|
| 50 | + struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 51 | + unsigned long flags; |
|---|
| 52 | + off_t offset = 0; |
|---|
| 53 | + u64 slot; |
|---|
| 54 | + u64 i; |
|---|
| 55 | + |
|---|
| 56 | + spin_lock_irqsave(&ctrl->error_lock, flags); |
|---|
| 57 | + slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS; |
|---|
| 58 | + |
|---|
| 59 | + for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) { |
|---|
| 60 | + if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], |
|---|
| 61 | + sizeof(struct nvme_error_slot))) |
|---|
| 62 | + break; |
|---|
| 63 | + |
|---|
| 64 | + if (slot == 0) |
|---|
| 65 | + slot = NVMET_ERROR_LOG_SLOTS - 1; |
|---|
| 66 | + else |
|---|
| 67 | + slot--; |
|---|
| 68 | + offset += sizeof(struct nvme_error_slot); |
|---|
| 69 | + } |
|---|
| 70 | + spin_unlock_irqrestore(&ctrl->error_lock, flags); |
|---|
| 71 | + nvmet_req_complete(req, 0); |
|---|
| 51 | 72 | } |
|---|
| 52 | 73 | |
|---|
| 53 | 74 | static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, |
|---|
| .. | .. |
|---|
| 58 | 79 | |
|---|
| 59 | 80 | ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); |
|---|
| 60 | 81 | if (!ns) { |
|---|
| 61 | | - pr_err("nvmet : Could not find namespace id : %d\n", |
|---|
| 82 | + pr_err("Could not find namespace id : %d\n", |
|---|
| 62 | 83 | le32_to_cpu(req->cmd->get_log_page.nsid)); |
|---|
| 84 | + req->error_loc = offsetof(struct nvme_rw_command, nsid); |
|---|
| 63 | 85 | return NVME_SC_INVALID_NS; |
|---|
| 64 | 86 | } |
|---|
| 65 | 87 | |
|---|
| .. | .. |
|---|
| 91 | 113 | u64 data_units_read = 0, data_units_written = 0; |
|---|
| 92 | 114 | struct nvmet_ns *ns; |
|---|
| 93 | 115 | struct nvmet_ctrl *ctrl; |
|---|
| 116 | + unsigned long idx; |
|---|
| 94 | 117 | |
|---|
| 95 | 118 | ctrl = req->sq->ctrl; |
|---|
| 96 | | - |
|---|
| 97 | | - rcu_read_lock(); |
|---|
| 98 | | - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { |
|---|
| 119 | + xa_for_each(&ctrl->subsys->namespaces, idx, ns) { |
|---|
| 99 | 120 | /* we don't have the right data for file backed ns */ |
|---|
| 100 | 121 | if (!ns->bdev) |
|---|
| 101 | 122 | continue; |
|---|
| .. | .. |
|---|
| 105 | 126 | host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); |
|---|
| 106 | 127 | data_units_written += DIV_ROUND_UP( |
|---|
| 107 | 128 | part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); |
|---|
| 108 | | - |
|---|
| 109 | 129 | } |
|---|
| 110 | | - rcu_read_unlock(); |
|---|
| 111 | 130 | |
|---|
| 112 | 131 | put_unaligned_le64(host_reads, &slog->host_reads[0]); |
|---|
| 113 | 132 | put_unaligned_le64(data_units_read, &slog->data_units_read[0]); |
|---|
| .. | .. |
|---|
| 121 | 140 | { |
|---|
| 122 | 141 | struct nvme_smart_log *log; |
|---|
| 123 | 142 | u16 status = NVME_SC_INTERNAL; |
|---|
| 143 | + unsigned long flags; |
|---|
| 124 | 144 | |
|---|
| 125 | | - if (req->data_len != sizeof(*log)) |
|---|
| 145 | + if (req->transfer_len != sizeof(*log)) |
|---|
| 126 | 146 | goto out; |
|---|
| 127 | 147 | |
|---|
| 128 | 148 | log = kzalloc(sizeof(*log), GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 135 | 155 | status = nvmet_get_smart_log_nsid(req, log); |
|---|
| 136 | 156 | if (status) |
|---|
| 137 | 157 | goto out_free_log; |
|---|
| 158 | + |
|---|
| 159 | + spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); |
|---|
| 160 | + put_unaligned_le64(req->sq->ctrl->err_counter, |
|---|
| 161 | + &log->num_err_log_entries); |
|---|
| 162 | + spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); |
|---|
| 138 | 163 | |
|---|
| 139 | 164 | status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); |
|---|
| 140 | 165 | out_free_log: |
|---|
| .. | .. |
|---|
| 179 | 204 | u16 status = NVME_SC_INTERNAL; |
|---|
| 180 | 205 | size_t len; |
|---|
| 181 | 206 | |
|---|
| 182 | | - if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) |
|---|
| 207 | + if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) |
|---|
| 183 | 208 | goto out; |
|---|
| 184 | 209 | |
|---|
| 185 | 210 | mutex_lock(&ctrl->lock); |
|---|
| .. | .. |
|---|
| 189 | 214 | len = ctrl->nr_changed_ns * sizeof(__le32); |
|---|
| 190 | 215 | status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); |
|---|
| 191 | 216 | if (!status) |
|---|
| 192 | | - status = nvmet_zero_sgl(req, len, req->data_len - len); |
|---|
| 217 | + status = nvmet_zero_sgl(req, len, req->transfer_len - len); |
|---|
| 193 | 218 | ctrl->nr_changed_ns = 0; |
|---|
| 194 | | - nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR); |
|---|
| 219 | + nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); |
|---|
| 195 | 220 | mutex_unlock(&ctrl->lock); |
|---|
| 196 | 221 | out: |
|---|
| 197 | 222 | nvmet_req_complete(req, status); |
|---|
| .. | .. |
|---|
| 202 | 227 | { |
|---|
| 203 | 228 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 204 | 229 | struct nvmet_ns *ns; |
|---|
| 230 | + unsigned long idx; |
|---|
| 205 | 231 | u32 count = 0; |
|---|
| 206 | 232 | |
|---|
| 207 | 233 | if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { |
|---|
| 208 | | - rcu_read_lock(); |
|---|
| 209 | | - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) |
|---|
| 234 | + xa_for_each(&ctrl->subsys->namespaces, idx, ns) |
|---|
| 210 | 235 | if (ns->anagrpid == grpid) |
|---|
| 211 | 236 | desc->nsids[count++] = cpu_to_le32(ns->nsid); |
|---|
| 212 | | - rcu_read_unlock(); |
|---|
| 213 | 237 | } |
|---|
| 214 | 238 | |
|---|
| 215 | 239 | desc->grpid = cpu_to_le32(grpid); |
|---|
| .. | .. |
|---|
| 254 | 278 | |
|---|
| 255 | 279 | hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); |
|---|
| 256 | 280 | hdr.ngrps = cpu_to_le16(ngrps); |
|---|
| 257 | | - nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE); |
|---|
| 281 | + nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE); |
|---|
| 258 | 282 | up_read(&nvmet_ana_sem); |
|---|
| 259 | 283 | |
|---|
| 260 | 284 | kfree(desc); |
|---|
| .. | .. |
|---|
| 265 | 289 | nvmet_req_complete(req, status); |
|---|
| 266 | 290 | } |
|---|
| 267 | 291 | |
|---|
| 292 | +static void nvmet_execute_get_log_page(struct nvmet_req *req) |
|---|
| 293 | +{ |
|---|
| 294 | + if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) |
|---|
| 295 | + return; |
|---|
| 296 | + |
|---|
| 297 | + switch (req->cmd->get_log_page.lid) { |
|---|
| 298 | + case NVME_LOG_ERROR: |
|---|
| 299 | + return nvmet_execute_get_log_page_error(req); |
|---|
| 300 | + case NVME_LOG_SMART: |
|---|
| 301 | + return nvmet_execute_get_log_page_smart(req); |
|---|
| 302 | + case NVME_LOG_FW_SLOT: |
|---|
| 303 | + /* |
|---|
| 304 | + * We only support a single firmware slot which always is |
|---|
| 305 | + * active, so we can zero out the whole firmware slot log and |
|---|
| 306 | + * still claim to fully implement this mandatory log page. |
|---|
| 307 | + */ |
|---|
| 308 | + return nvmet_execute_get_log_page_noop(req); |
|---|
| 309 | + case NVME_LOG_CHANGED_NS: |
|---|
| 310 | + return nvmet_execute_get_log_changed_ns(req); |
|---|
| 311 | + case NVME_LOG_CMD_EFFECTS: |
|---|
| 312 | + return nvmet_execute_get_log_cmd_effects_ns(req); |
|---|
| 313 | + case NVME_LOG_ANA: |
|---|
| 314 | + return nvmet_execute_get_log_page_ana(req); |
|---|
| 315 | + } |
|---|
| 316 | + pr_debug("unhandled lid %d on qid %d\n", |
|---|
| 317 | + req->cmd->get_log_page.lid, req->sq->qid); |
|---|
| 318 | + req->error_loc = offsetof(struct nvme_get_log_page_command, lid); |
|---|
| 319 | + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); |
|---|
| 320 | +} |
|---|
| 321 | + |
|---|
| 322 | +static void nvmet_id_set_model_number(struct nvme_id_ctrl *id, |
|---|
| 323 | + struct nvmet_subsys *subsys) |
|---|
| 324 | +{ |
|---|
| 325 | + const char *model = NVMET_DEFAULT_CTRL_MODEL; |
|---|
| 326 | + struct nvmet_subsys_model *subsys_model; |
|---|
| 327 | + |
|---|
| 328 | + rcu_read_lock(); |
|---|
| 329 | + subsys_model = rcu_dereference(subsys->model); |
|---|
| 330 | + if (subsys_model) |
|---|
| 331 | + model = subsys_model->number; |
|---|
| 332 | + memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' '); |
|---|
| 333 | + rcu_read_unlock(); |
|---|
| 334 | +} |
|---|
| 335 | + |
|---|
| 268 | 336 | static void nvmet_execute_identify_ctrl(struct nvmet_req *req) |
|---|
| 269 | 337 | { |
|---|
| 270 | 338 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 271 | 339 | struct nvme_id_ctrl *id; |
|---|
| 340 | + u32 cmd_capsule_size; |
|---|
| 272 | 341 | u16 status = 0; |
|---|
| 273 | | - const char model[] = "Linux"; |
|---|
| 274 | 342 | |
|---|
| 275 | 343 | id = kzalloc(sizeof(*id), GFP_KERNEL); |
|---|
| 276 | 344 | if (!id) { |
|---|
| .. | .. |
|---|
| 285 | 353 | memset(id->sn, ' ', sizeof(id->sn)); |
|---|
| 286 | 354 | bin2hex(id->sn, &ctrl->subsys->serial, |
|---|
| 287 | 355 | min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); |
|---|
| 288 | | - memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' '); |
|---|
| 356 | + nvmet_id_set_model_number(id, ctrl->subsys); |
|---|
| 289 | 357 | memcpy_and_pad(id->fr, sizeof(id->fr), |
|---|
| 290 | 358 | UTS_RELEASE, strlen(UTS_RELEASE), ' '); |
|---|
| 291 | 359 | |
|---|
| .. | .. |
|---|
| 299 | 367 | /* we support multiple ports, multiples hosts and ANA: */ |
|---|
| 300 | 368 | id->cmic = (1 << 0) | (1 << 1) | (1 << 3); |
|---|
| 301 | 369 | |
|---|
| 302 | | - /* no limit on data transfer sizes for now */ |
|---|
| 303 | | - id->mdts = 0; |
|---|
| 370 | + /* Limit MDTS according to transport capability */ |
|---|
| 371 | + if (ctrl->ops->get_mdts) |
|---|
| 372 | + id->mdts = ctrl->ops->get_mdts(ctrl); |
|---|
| 373 | + else |
|---|
| 374 | + id->mdts = 0; |
|---|
| 375 | + |
|---|
| 304 | 376 | id->cntlid = cpu_to_le16(ctrl->cntlid); |
|---|
| 305 | 377 | id->ver = cpu_to_le32(ctrl->subsys->ver); |
|---|
| 306 | 378 | |
|---|
| 307 | 379 | /* XXX: figure out what to do about RTD3R/RTD3 */ |
|---|
| 308 | 380 | id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); |
|---|
| 309 | | - id->ctratt = cpu_to_le32(1 << 0); |
|---|
| 381 | + id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | |
|---|
| 382 | + NVME_CTRL_ATTR_TBKAS); |
|---|
| 310 | 383 | |
|---|
| 311 | 384 | id->oacs = 0; |
|---|
| 312 | 385 | |
|---|
| .. | .. |
|---|
| 350 | 423 | id->awupf = 0; |
|---|
| 351 | 424 | |
|---|
| 352 | 425 | id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ |
|---|
| 353 | | - if (ctrl->ops->has_keyed_sgls) |
|---|
| 426 | + if (ctrl->ops->flags & NVMF_KEYED_SGLS) |
|---|
| 354 | 427 | id->sgls |= cpu_to_le32(1 << 2); |
|---|
| 355 | 428 | if (req->port->inline_data_size) |
|---|
| 356 | 429 | id->sgls |= cpu_to_le32(1 << 20); |
|---|
| 357 | 430 | |
|---|
| 358 | | - strcpy(id->subnqn, ctrl->subsys->subsysnqn); |
|---|
| 431 | + strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); |
|---|
| 359 | 432 | |
|---|
| 360 | | - /* Max command capsule size is sqe + single page of in-capsule data */ |
|---|
| 361 | | - id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + |
|---|
| 362 | | - req->port->inline_data_size) / 16); |
|---|
| 433 | + /* |
|---|
| 434 | + * Max command capsule size is sqe + in-capsule data size. |
|---|
| 435 | + * Disable in-capsule data for Metadata capable controllers. |
|---|
| 436 | + */ |
|---|
| 437 | + cmd_capsule_size = sizeof(struct nvme_command); |
|---|
| 438 | + if (!ctrl->pi_support) |
|---|
| 439 | + cmd_capsule_size += req->port->inline_data_size; |
|---|
| 440 | + id->ioccsz = cpu_to_le32(cmd_capsule_size / 16); |
|---|
| 441 | + |
|---|
| 363 | 442 | /* Max response capsule size is cqe */ |
|---|
| 364 | 443 | id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); |
|---|
| 365 | 444 | |
|---|
| .. | .. |
|---|
| 389 | 468 | |
|---|
| 390 | 469 | static void nvmet_execute_identify_ns(struct nvmet_req *req) |
|---|
| 391 | 470 | { |
|---|
| 392 | | - struct nvmet_ns *ns; |
|---|
| 471 | + struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 393 | 472 | struct nvme_id_ns *id; |
|---|
| 394 | 473 | u16 status = 0; |
|---|
| 395 | 474 | |
|---|
| 396 | 475 | if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { |
|---|
| 476 | + req->error_loc = offsetof(struct nvme_identify, nsid); |
|---|
| 397 | 477 | status = NVME_SC_INVALID_NS | NVME_SC_DNR; |
|---|
| 398 | 478 | goto out; |
|---|
| 399 | 479 | } |
|---|
| .. | .. |
|---|
| 405 | 485 | } |
|---|
| 406 | 486 | |
|---|
| 407 | 487 | /* return an all zeroed buffer if we can't find an active namespace */ |
|---|
| 408 | | - ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); |
|---|
| 409 | | - if (!ns) |
|---|
| 488 | + req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid); |
|---|
| 489 | + if (!req->ns) { |
|---|
| 490 | + status = 0; |
|---|
| 410 | 491 | goto done; |
|---|
| 492 | + } |
|---|
| 493 | + |
|---|
| 494 | + nvmet_ns_revalidate(req->ns); |
|---|
| 411 | 495 | |
|---|
| 412 | 496 | /* |
|---|
| 413 | 497 | * nuse = ncap = nsze isn't always true, but we have no way to find |
|---|
| 414 | 498 | * that out from the underlying device. |
|---|
| 415 | 499 | */ |
|---|
| 416 | | - id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift); |
|---|
| 417 | | - switch (req->port->ana_state[ns->anagrpid]) { |
|---|
| 500 | + id->ncap = id->nsze = |
|---|
| 501 | + cpu_to_le64(req->ns->size >> req->ns->blksize_shift); |
|---|
| 502 | + switch (req->port->ana_state[req->ns->anagrpid]) { |
|---|
| 418 | 503 | case NVME_ANA_INACCESSIBLE: |
|---|
| 419 | 504 | case NVME_ANA_PERSISTENT_LOSS: |
|---|
| 420 | 505 | break; |
|---|
| .. | .. |
|---|
| 422 | 507 | id->nuse = id->nsze; |
|---|
| 423 | 508 | break; |
|---|
| 424 | 509 | } |
|---|
| 510 | + |
|---|
| 511 | + if (req->ns->bdev) |
|---|
| 512 | + nvmet_bdev_set_limits(req->ns->bdev, id); |
|---|
| 425 | 513 | |
|---|
| 426 | 514 | /* |
|---|
| 427 | 515 | * We just provide a single LBA format that matches what the |
|---|
| .. | .. |
|---|
| 435 | 523 | * controllers, but also with any other user of the block device. |
|---|
| 436 | 524 | */ |
|---|
| 437 | 525 | id->nmic = (1 << 0); |
|---|
| 438 | | - id->anagrpid = cpu_to_le32(ns->anagrpid); |
|---|
| 526 | + id->anagrpid = cpu_to_le32(req->ns->anagrpid); |
|---|
| 439 | 527 | |
|---|
| 440 | | - memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid)); |
|---|
| 528 | + memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); |
|---|
| 441 | 529 | |
|---|
| 442 | | - id->lbaf[0].ds = ns->blksize_shift; |
|---|
| 530 | + id->lbaf[0].ds = req->ns->blksize_shift; |
|---|
| 443 | 531 | |
|---|
| 444 | | - if (ns->readonly) |
|---|
| 532 | + if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { |
|---|
| 533 | + id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST | |
|---|
| 534 | + NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 | |
|---|
| 535 | + NVME_NS_DPC_PI_TYPE3; |
|---|
| 536 | + id->mc = NVME_MC_EXTENDED_LBA; |
|---|
| 537 | + id->dps = req->ns->pi_type; |
|---|
| 538 | + id->flbas = NVME_NS_FLBAS_META_EXT; |
|---|
| 539 | + id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); |
|---|
| 540 | + } |
|---|
| 541 | + |
|---|
| 542 | + if (req->ns->readonly) |
|---|
| 445 | 543 | id->nsattr |= (1 << 0); |
|---|
| 446 | | - nvmet_put_namespace(ns); |
|---|
| 447 | 544 | done: |
|---|
| 448 | | - status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); |
|---|
| 545 | + if (!status) |
|---|
| 546 | + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); |
|---|
| 547 | + |
|---|
| 449 | 548 | kfree(id); |
|---|
| 450 | 549 | out: |
|---|
| 451 | 550 | nvmet_req_complete(req, status); |
|---|
| .. | .. |
|---|
| 456 | 555 | static const int buf_size = NVME_IDENTIFY_DATA_SIZE; |
|---|
| 457 | 556 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 458 | 557 | struct nvmet_ns *ns; |
|---|
| 558 | + unsigned long idx; |
|---|
| 459 | 559 | u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); |
|---|
| 460 | 560 | __le32 *list; |
|---|
| 461 | 561 | u16 status = 0; |
|---|
| .. | .. |
|---|
| 467 | 567 | goto out; |
|---|
| 468 | 568 | } |
|---|
| 469 | 569 | |
|---|
| 470 | | - rcu_read_lock(); |
|---|
| 471 | | - list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { |
|---|
| 570 | + xa_for_each(&ctrl->subsys->namespaces, idx, ns) { |
|---|
| 472 | 571 | if (ns->nsid <= min_nsid) |
|---|
| 473 | 572 | continue; |
|---|
| 474 | 573 | list[i++] = cpu_to_le32(ns->nsid); |
|---|
| 475 | 574 | if (i == buf_size / sizeof(__le32)) |
|---|
| 476 | 575 | break; |
|---|
| 477 | 576 | } |
|---|
| 478 | | - rcu_read_unlock(); |
|---|
| 479 | 577 | |
|---|
| 480 | 578 | status = nvmet_copy_to_sgl(req, 0, list, buf_size); |
|---|
| 481 | 579 | |
|---|
| .. | .. |
|---|
| 514 | 612 | |
|---|
| 515 | 613 | ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); |
|---|
| 516 | 614 | if (!ns) { |
|---|
| 615 | + req->error_loc = offsetof(struct nvme_identify, nsid); |
|---|
| 517 | 616 | status = NVME_SC_INVALID_NS | NVME_SC_DNR; |
|---|
| 518 | 617 | goto out; |
|---|
| 519 | 618 | } |
|---|
| .. | .. |
|---|
| 542 | 641 | nvmet_req_complete(req, status); |
|---|
| 543 | 642 | } |
|---|
| 544 | 643 | |
|---|
| 644 | +static void nvmet_execute_identify(struct nvmet_req *req) |
|---|
| 645 | +{ |
|---|
| 646 | + if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) |
|---|
| 647 | + return; |
|---|
| 648 | + |
|---|
| 649 | + switch (req->cmd->identify.cns) { |
|---|
| 650 | + case NVME_ID_CNS_NS: |
|---|
| 651 | + return nvmet_execute_identify_ns(req); |
|---|
| 652 | + case NVME_ID_CNS_CTRL: |
|---|
| 653 | + return nvmet_execute_identify_ctrl(req); |
|---|
| 654 | + case NVME_ID_CNS_NS_ACTIVE_LIST: |
|---|
| 655 | + return nvmet_execute_identify_nslist(req); |
|---|
| 656 | + case NVME_ID_CNS_NS_DESC_LIST: |
|---|
| 657 | + return nvmet_execute_identify_desclist(req); |
|---|
| 658 | + } |
|---|
| 659 | + |
|---|
| 660 | + pr_debug("unhandled identify cns %d on qid %d\n", |
|---|
| 661 | + req->cmd->identify.cns, req->sq->qid); |
|---|
| 662 | + req->error_loc = offsetof(struct nvme_identify, cns); |
|---|
| 663 | + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); |
|---|
| 664 | +} |
|---|
| 665 | + |
|---|
| 545 | 666 | /* |
|---|
| 546 | 667 | * A "minimum viable" abort implementation: the command is mandatory in the |
|---|
| 547 | 668 | * spec, but we are not required to do any useful work. We couldn't really |
|---|
| .. | .. |
|---|
| 551 | 672 | */ |
|---|
| 552 | 673 | static void nvmet_execute_abort(struct nvmet_req *req) |
|---|
| 553 | 674 | { |
|---|
| 675 | + if (!nvmet_check_transfer_len(req, 0)) |
|---|
| 676 | + return; |
|---|
| 554 | 677 | nvmet_set_result(req, 1); |
|---|
| 555 | 678 | nvmet_req_complete(req, 0); |
|---|
| 556 | 679 | } |
|---|
| .. | .. |
|---|
| 571 | 694 | |
|---|
| 572 | 695 | static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) |
|---|
| 573 | 696 | { |
|---|
| 574 | | - u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]); |
|---|
| 697 | + u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); |
|---|
| 575 | 698 | struct nvmet_subsys *subsys = req->sq->ctrl->subsys; |
|---|
| 576 | 699 | u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE; |
|---|
| 577 | 700 | |
|---|
| 578 | 701 | req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); |
|---|
| 579 | | - if (unlikely(!req->ns)) |
|---|
| 702 | + if (unlikely(!req->ns)) { |
|---|
| 703 | + req->error_loc = offsetof(struct nvme_common_command, nsid); |
|---|
| 580 | 704 | return status; |
|---|
| 705 | + } |
|---|
| 581 | 706 | |
|---|
| 582 | 707 | mutex_lock(&subsys->lock); |
|---|
| 583 | 708 | switch (write_protect) { |
|---|
| .. | .. |
|---|
| 601 | 726 | return status; |
|---|
| 602 | 727 | } |
|---|
| 603 | 728 | |
|---|
| 604 | | -static void nvmet_execute_set_features(struct nvmet_req *req) |
|---|
| 729 | +u16 nvmet_set_feat_kato(struct nvmet_req *req) |
|---|
| 730 | +{ |
|---|
| 731 | + u32 val32 = le32_to_cpu(req->cmd->common.cdw11); |
|---|
| 732 | + |
|---|
| 733 | + nvmet_stop_keep_alive_timer(req->sq->ctrl); |
|---|
| 734 | + req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); |
|---|
| 735 | + nvmet_start_keep_alive_timer(req->sq->ctrl); |
|---|
| 736 | + |
|---|
| 737 | + nvmet_set_result(req, req->sq->ctrl->kato); |
|---|
| 738 | + |
|---|
| 739 | + return 0; |
|---|
| 740 | +} |
|---|
| 741 | + |
|---|
| 742 | +u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) |
|---|
| 743 | +{ |
|---|
| 744 | + u32 val32 = le32_to_cpu(req->cmd->common.cdw11); |
|---|
| 745 | + |
|---|
| 746 | + if (val32 & ~mask) { |
|---|
| 747 | + req->error_loc = offsetof(struct nvme_common_command, cdw11); |
|---|
| 748 | + return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 749 | + } |
|---|
| 750 | + |
|---|
| 751 | + WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); |
|---|
| 752 | + nvmet_set_result(req, val32); |
|---|
| 753 | + |
|---|
| 754 | + return 0; |
|---|
| 755 | +} |
|---|
| 756 | + |
|---|
| 757 | +void nvmet_execute_set_features(struct nvmet_req *req) |
|---|
| 605 | 758 | { |
|---|
| 606 | 759 | struct nvmet_subsys *subsys = req->sq->ctrl->subsys; |
|---|
| 607 | | - u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); |
|---|
| 608 | | - u32 val32; |
|---|
| 760 | + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); |
|---|
| 761 | + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); |
|---|
| 609 | 762 | u16 status = 0; |
|---|
| 763 | + u16 nsqr; |
|---|
| 764 | + u16 ncqr; |
|---|
| 765 | + |
|---|
| 766 | + if (!nvmet_check_transfer_len(req, 0)) |
|---|
| 767 | + return; |
|---|
| 610 | 768 | |
|---|
| 611 | 769 | switch (cdw10 & 0xff) { |
|---|
| 612 | 770 | case NVME_FEAT_NUM_QUEUES: |
|---|
| 771 | + ncqr = (cdw11 >> 16) & 0xffff; |
|---|
| 772 | + nsqr = cdw11 & 0xffff; |
|---|
| 773 | + if (ncqr == 0xffff || nsqr == 0xffff) { |
|---|
| 774 | + status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 775 | + break; |
|---|
| 776 | + } |
|---|
| 613 | 777 | nvmet_set_result(req, |
|---|
| 614 | 778 | (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); |
|---|
| 615 | 779 | break; |
|---|
| 616 | 780 | case NVME_FEAT_KATO: |
|---|
| 617 | | - val32 = le32_to_cpu(req->cmd->common.cdw10[1]); |
|---|
| 618 | | - req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); |
|---|
| 619 | | - nvmet_set_result(req, req->sq->ctrl->kato); |
|---|
| 781 | + status = nvmet_set_feat_kato(req); |
|---|
| 620 | 782 | break; |
|---|
| 621 | 783 | case NVME_FEAT_ASYNC_EVENT: |
|---|
| 622 | | - val32 = le32_to_cpu(req->cmd->common.cdw10[1]); |
|---|
| 623 | | - if (val32 & ~NVMET_AEN_CFG_ALL) { |
|---|
| 624 | | - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 625 | | - break; |
|---|
| 626 | | - } |
|---|
| 627 | | - |
|---|
| 628 | | - WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); |
|---|
| 629 | | - nvmet_set_result(req, val32); |
|---|
| 784 | + status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); |
|---|
| 630 | 785 | break; |
|---|
| 631 | 786 | case NVME_FEAT_HOST_ID: |
|---|
| 632 | 787 | status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; |
|---|
| .. | .. |
|---|
| 635 | 790 | status = nvmet_set_feat_write_protect(req); |
|---|
| 636 | 791 | break; |
|---|
| 637 | 792 | default: |
|---|
| 793 | + req->error_loc = offsetof(struct nvme_common_command, cdw10); |
|---|
| 638 | 794 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 639 | 795 | break; |
|---|
| 640 | 796 | } |
|---|
| .. | .. |
|---|
| 648 | 804 | u32 result; |
|---|
| 649 | 805 | |
|---|
| 650 | 806 | req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); |
|---|
| 651 | | - if (!req->ns) |
|---|
| 807 | + if (!req->ns) { |
|---|
| 808 | + req->error_loc = offsetof(struct nvme_common_command, nsid); |
|---|
| 652 | 809 | return NVME_SC_INVALID_NS | NVME_SC_DNR; |
|---|
| 653 | | - |
|---|
| 810 | + } |
|---|
| 654 | 811 | mutex_lock(&subsys->lock); |
|---|
| 655 | 812 | if (req->ns->readonly == true) |
|---|
| 656 | 813 | result = NVME_NS_WRITE_PROTECT; |
|---|
| .. | .. |
|---|
| 662 | 819 | return 0; |
|---|
| 663 | 820 | } |
|---|
| 664 | 821 | |
|---|
| 665 | | -static void nvmet_execute_get_features(struct nvmet_req *req) |
|---|
| 822 | +void nvmet_get_feat_kato(struct nvmet_req *req) |
|---|
| 823 | +{ |
|---|
| 824 | + nvmet_set_result(req, req->sq->ctrl->kato * 1000); |
|---|
| 825 | +} |
|---|
| 826 | + |
|---|
| 827 | +void nvmet_get_feat_async_event(struct nvmet_req *req) |
|---|
| 828 | +{ |
|---|
| 829 | + nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); |
|---|
| 830 | +} |
|---|
| 831 | + |
|---|
| 832 | +void nvmet_execute_get_features(struct nvmet_req *req) |
|---|
| 666 | 833 | { |
|---|
| 667 | 834 | struct nvmet_subsys *subsys = req->sq->ctrl->subsys; |
|---|
| 668 | | - u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); |
|---|
| 835 | + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); |
|---|
| 669 | 836 | u16 status = 0; |
|---|
| 837 | + |
|---|
| 838 | + if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10))) |
|---|
| 839 | + return; |
|---|
| 670 | 840 | |
|---|
| 671 | 841 | switch (cdw10 & 0xff) { |
|---|
| 672 | 842 | /* |
|---|
| .. | .. |
|---|
| 691 | 861 | break; |
|---|
| 692 | 862 | #endif |
|---|
| 693 | 863 | case NVME_FEAT_ASYNC_EVENT: |
|---|
| 694 | | - nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); |
|---|
| 864 | + nvmet_get_feat_async_event(req); |
|---|
| 695 | 865 | break; |
|---|
| 696 | 866 | case NVME_FEAT_VOLATILE_WC: |
|---|
| 697 | 867 | nvmet_set_result(req, 1); |
|---|
| .. | .. |
|---|
| 701 | 871 | (subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); |
|---|
| 702 | 872 | break; |
|---|
| 703 | 873 | case NVME_FEAT_KATO: |
|---|
| 704 | | - nvmet_set_result(req, req->sq->ctrl->kato * 1000); |
|---|
| 874 | + nvmet_get_feat_kato(req); |
|---|
| 705 | 875 | break; |
|---|
| 706 | 876 | case NVME_FEAT_HOST_ID: |
|---|
| 707 | 877 | /* need 128-bit host identifier flag */ |
|---|
| 708 | | - if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) { |
|---|
| 878 | + if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { |
|---|
| 879 | + req->error_loc = |
|---|
| 880 | + offsetof(struct nvme_common_command, cdw11); |
|---|
| 709 | 881 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 710 | 882 | break; |
|---|
| 711 | 883 | } |
|---|
| .. | .. |
|---|
| 717 | 889 | status = nvmet_get_feat_write_protect(req); |
|---|
| 718 | 890 | break; |
|---|
| 719 | 891 | default: |
|---|
| 892 | + req->error_loc = |
|---|
| 893 | + offsetof(struct nvme_common_command, cdw10); |
|---|
| 720 | 894 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
|---|
| 721 | 895 | break; |
|---|
| 722 | 896 | } |
|---|
| .. | .. |
|---|
| 724 | 898 | nvmet_req_complete(req, status); |
|---|
| 725 | 899 | } |
|---|
| 726 | 900 | |
|---|
| 727 | | -static void nvmet_execute_async_event(struct nvmet_req *req) |
|---|
| 901 | +void nvmet_execute_async_event(struct nvmet_req *req) |
|---|
| 728 | 902 | { |
|---|
| 729 | 903 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 904 | + |
|---|
| 905 | + if (!nvmet_check_transfer_len(req, 0)) |
|---|
| 906 | + return; |
|---|
| 730 | 907 | |
|---|
| 731 | 908 | mutex_lock(&ctrl->lock); |
|---|
| 732 | 909 | if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { |
|---|
| .. | .. |
|---|
| 740 | 917 | schedule_work(&ctrl->async_event_work); |
|---|
| 741 | 918 | } |
|---|
| 742 | 919 | |
|---|
| 743 | | -static void nvmet_execute_keep_alive(struct nvmet_req *req) |
|---|
| 920 | +void nvmet_execute_keep_alive(struct nvmet_req *req) |
|---|
| 744 | 921 | { |
|---|
| 745 | 922 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
|---|
| 923 | + |
|---|
| 924 | + if (!nvmet_check_transfer_len(req, 0)) |
|---|
| 925 | + return; |
|---|
| 746 | 926 | |
|---|
| 747 | 927 | pr_debug("ctrl %d update keep-alive timer for %d secs\n", |
|---|
| 748 | 928 | ctrl->cntlid, ctrl->kato); |
|---|
| .. | .. |
|---|
| 756 | 936 | struct nvme_command *cmd = req->cmd; |
|---|
| 757 | 937 | u16 ret; |
|---|
| 758 | 938 | |
|---|
| 939 | + if (nvme_is_fabrics(cmd)) |
|---|
| 940 | + return nvmet_parse_fabrics_cmd(req); |
|---|
| 941 | + if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) |
|---|
| 942 | + return nvmet_parse_discovery_cmd(req); |
|---|
| 943 | + |
|---|
| 759 | 944 | ret = nvmet_check_ctrl_status(req, cmd); |
|---|
| 760 | 945 | if (unlikely(ret)) |
|---|
| 761 | 946 | return ret; |
|---|
| 762 | 947 | |
|---|
| 948 | + if (nvmet_req_passthru_ctrl(req)) |
|---|
| 949 | + return nvmet_parse_passthru_admin_cmd(req); |
|---|
| 950 | + |
|---|
| 763 | 951 | switch (cmd->common.opcode) { |
|---|
| 764 | 952 | case nvme_admin_get_log_page: |
|---|
| 765 | | - req->data_len = nvmet_get_log_page_len(cmd); |
|---|
| 766 | | - |
|---|
| 767 | | - switch (cmd->get_log_page.lid) { |
|---|
| 768 | | - case NVME_LOG_ERROR: |
|---|
| 769 | | - /* |
|---|
| 770 | | - * We currently never set the More bit in the status |
|---|
| 771 | | - * field, so all error log entries are invalid and can |
|---|
| 772 | | - * be zeroed out. This is called a minum viable |
|---|
| 773 | | - * implementation (TM) of this mandatory log page. |
|---|
| 774 | | - */ |
|---|
| 775 | | - req->execute = nvmet_execute_get_log_page_noop; |
|---|
| 776 | | - return 0; |
|---|
| 777 | | - case NVME_LOG_SMART: |
|---|
| 778 | | - req->execute = nvmet_execute_get_log_page_smart; |
|---|
| 779 | | - return 0; |
|---|
| 780 | | - case NVME_LOG_FW_SLOT: |
|---|
| 781 | | - /* |
|---|
| 782 | | - * We only support a single firmware slot which always |
|---|
| 783 | | - * is active, so we can zero out the whole firmware slot |
|---|
| 784 | | - * log and still claim to fully implement this mandatory |
|---|
| 785 | | - * log page. |
|---|
| 786 | | - */ |
|---|
| 787 | | - req->execute = nvmet_execute_get_log_page_noop; |
|---|
| 788 | | - return 0; |
|---|
| 789 | | - case NVME_LOG_CHANGED_NS: |
|---|
| 790 | | - req->execute = nvmet_execute_get_log_changed_ns; |
|---|
| 791 | | - return 0; |
|---|
| 792 | | - case NVME_LOG_CMD_EFFECTS: |
|---|
| 793 | | - req->execute = nvmet_execute_get_log_cmd_effects_ns; |
|---|
| 794 | | - return 0; |
|---|
| 795 | | - case NVME_LOG_ANA: |
|---|
| 796 | | - req->execute = nvmet_execute_get_log_page_ana; |
|---|
| 797 | | - return 0; |
|---|
| 798 | | - } |
|---|
| 799 | | - break; |
|---|
| 953 | + req->execute = nvmet_execute_get_log_page; |
|---|
| 954 | + return 0; |
|---|
| 800 | 955 | case nvme_admin_identify: |
|---|
| 801 | | - req->data_len = NVME_IDENTIFY_DATA_SIZE; |
|---|
| 802 | | - switch (cmd->identify.cns) { |
|---|
| 803 | | - case NVME_ID_CNS_NS: |
|---|
| 804 | | - req->execute = nvmet_execute_identify_ns; |
|---|
| 805 | | - return 0; |
|---|
| 806 | | - case NVME_ID_CNS_CTRL: |
|---|
| 807 | | - req->execute = nvmet_execute_identify_ctrl; |
|---|
| 808 | | - return 0; |
|---|
| 809 | | - case NVME_ID_CNS_NS_ACTIVE_LIST: |
|---|
| 810 | | - req->execute = nvmet_execute_identify_nslist; |
|---|
| 811 | | - return 0; |
|---|
| 812 | | - case NVME_ID_CNS_NS_DESC_LIST: |
|---|
| 813 | | - req->execute = nvmet_execute_identify_desclist; |
|---|
| 814 | | - return 0; |
|---|
| 815 | | - } |
|---|
| 816 | | - break; |
|---|
| 956 | + req->execute = nvmet_execute_identify; |
|---|
| 957 | + return 0; |
|---|
| 817 | 958 | case nvme_admin_abort_cmd: |
|---|
| 818 | 959 | req->execute = nvmet_execute_abort; |
|---|
| 819 | | - req->data_len = 0; |
|---|
| 820 | 960 | return 0; |
|---|
| 821 | 961 | case nvme_admin_set_features: |
|---|
| 822 | 962 | req->execute = nvmet_execute_set_features; |
|---|
| 823 | | - req->data_len = 0; |
|---|
| 824 | 963 | return 0; |
|---|
| 825 | 964 | case nvme_admin_get_features: |
|---|
| 826 | 965 | req->execute = nvmet_execute_get_features; |
|---|
| 827 | | - req->data_len = 0; |
|---|
| 828 | 966 | return 0; |
|---|
| 829 | 967 | case nvme_admin_async_event: |
|---|
| 830 | 968 | req->execute = nvmet_execute_async_event; |
|---|
| 831 | | - req->data_len = 0; |
|---|
| 832 | 969 | return 0; |
|---|
| 833 | 970 | case nvme_admin_keep_alive: |
|---|
| 834 | 971 | req->execute = nvmet_execute_keep_alive; |
|---|
| 835 | | - req->data_len = 0; |
|---|
| 836 | 972 | return 0; |
|---|
| 837 | 973 | } |
|---|
| 838 | 974 | |
|---|
| 839 | | - pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, |
|---|
| 975 | + pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode, |
|---|
| 840 | 976 | req->sq->qid); |
|---|
| 977 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
|---|
| 841 | 978 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
|---|
| 842 | 979 | } |
|---|