| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * NVMe I/O command implementation. |
|---|
| 3 | 4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 6 | | - * under the terms and conditions of the GNU General Public License, |
|---|
| 7 | | - * version 2, as published by the Free Software Foundation. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope it will be useful, but WITHOUT |
|---|
| 10 | | - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|---|
| 11 | | - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 12 | | - * more details. |
|---|
| 13 | 5 | */ |
|---|
| 14 | 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
|---|
| 15 | 7 | #include <linux/blkdev.h> |
|---|
| 16 | 8 | #include <linux/module.h> |
|---|
| 17 | 9 | #include "nvmet.h" |
|---|
| 10 | + |
|---|
| 11 | +void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) |
|---|
| 12 | +{ |
|---|
| 13 | + const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; |
|---|
| 14 | + /* Number of logical blocks per physical block. */ |
|---|
| 15 | + const u32 lpp = ql->physical_block_size / ql->logical_block_size; |
|---|
| 16 | + /* Logical blocks per physical block, 0's based. */ |
|---|
| 17 | + const __le16 lpp0b = to0based(lpp); |
|---|
| 18 | + |
|---|
| 19 | + /* |
|---|
| 20 | + * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, |
|---|
| 21 | + * NAWUPF, and NACWU are defined for this namespace and should be |
|---|
| 22 | + * used by the host for this namespace instead of the AWUN, AWUPF, |
|---|
| 23 | + * and ACWU fields in the Identify Controller data structure. If |
|---|
| 24 | + * any of these fields are zero that means that the corresponding |
|---|
| 25 | + * field from the identify controller data structure should be used. |
|---|
| 26 | + */ |
|---|
| 27 | + id->nsfeat |= 1 << 1; |
|---|
| 28 | + id->nawun = lpp0b; |
|---|
| 29 | + id->nawupf = lpp0b; |
|---|
| 30 | + id->nacwu = lpp0b; |
|---|
| 31 | + |
|---|
| 32 | + /* |
|---|
| 33 | + * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and |
|---|
| 34 | + * NOWS are defined for this namespace and should be used by |
|---|
| 35 | + * the host for I/O optimization. |
|---|
| 36 | + */ |
|---|
| 37 | + id->nsfeat |= 1 << 4; |
|---|
| 38 | + /* NPWG = Namespace Preferred Write Granularity. 0's based */ |
|---|
| 39 | + id->npwg = lpp0b; |
|---|
| 40 | + /* NPWA = Namespace Preferred Write Alignment. 0's based */ |
|---|
| 41 | + id->npwa = id->npwg; |
|---|
| 42 | + /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ |
|---|
| 43 | + id->npdg = to0based(ql->discard_granularity / ql->logical_block_size); |
|---|
| 44 | + /* NPDG = Namespace Preferred Deallocate Alignment */ |
|---|
| 45 | + id->npda = id->npdg; |
|---|
| 46 | + /* NOWS = Namespace Optimal Write Size */ |
|---|
| 47 | + id->nows = to0based(ql->io_opt / ql->logical_block_size); |
|---|
| 48 | +} |
|---|
| 49 | + |
|---|
| 50 | +static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) |
|---|
| 51 | +{ |
|---|
| 52 | + struct blk_integrity *bi = bdev_get_integrity(ns->bdev); |
|---|
| 53 | + |
|---|
| 54 | + if (bi) { |
|---|
| 55 | + ns->metadata_size = bi->tuple_size; |
|---|
| 56 | + if (bi->profile == &t10_pi_type1_crc) |
|---|
| 57 | + ns->pi_type = NVME_NS_DPS_PI_TYPE1; |
|---|
| 58 | + else if (bi->profile == &t10_pi_type3_crc) |
|---|
| 59 | + ns->pi_type = NVME_NS_DPS_PI_TYPE3; |
|---|
| 60 | + else |
|---|
| 61 | + /* Unsupported metadata type */ |
|---|
| 62 | + ns->metadata_size = 0; |
|---|
| 63 | + } |
|---|
| 64 | +} |
|---|
| 18 | 65 | |
|---|
| 19 | 66 | int nvmet_bdev_ns_enable(struct nvmet_ns *ns) |
|---|
| 20 | 67 | { |
|---|
| .. | .. |
|---|
| 33 | 80 | } |
|---|
| 34 | 81 | ns->size = i_size_read(ns->bdev->bd_inode); |
|---|
| 35 | 82 | ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); |
|---|
| 83 | + |
|---|
| 84 | + ns->pi_type = 0; |
|---|
| 85 | + ns->metadata_size = 0; |
|---|
| 86 | + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10)) |
|---|
| 87 | + nvmet_bdev_ns_enable_integrity(ns); |
|---|
| 88 | + |
|---|
| 36 | 89 | return 0; |
|---|
| 37 | 90 | } |
|---|
| 38 | 91 | |
|---|
| .. | .. |
|---|
| 44 | 97 | } |
|---|
| 45 | 98 | } |
|---|
| 46 | 99 | |
|---|
| 100 | +void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns) |
|---|
| 101 | +{ |
|---|
| 102 | + ns->size = i_size_read(ns->bdev->bd_inode); |
|---|
| 103 | +} |
|---|
| 104 | + |
|---|
| 105 | +static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) |
|---|
| 106 | +{ |
|---|
| 107 | + u16 status = NVME_SC_SUCCESS; |
|---|
| 108 | + |
|---|
| 109 | + if (likely(blk_sts == BLK_STS_OK)) |
|---|
| 110 | + return status; |
|---|
| 111 | + /* |
|---|
| 112 | + * Right now there exists M : 1 mapping between block layer error |
|---|
| 113 | + * to the NVMe status code (see nvme_error_status()). For consistency, |
|---|
| 114 | + * when we reverse map we use most appropriate NVMe Status code from |
|---|
| 115 | + * the group of the NVMe staus codes used in the nvme_error_status(). |
|---|
| 116 | + */ |
|---|
| 117 | + switch (blk_sts) { |
|---|
| 118 | + case BLK_STS_NOSPC: |
|---|
| 119 | + status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; |
|---|
| 120 | + req->error_loc = offsetof(struct nvme_rw_command, length); |
|---|
| 121 | + break; |
|---|
| 122 | + case BLK_STS_TARGET: |
|---|
| 123 | + status = NVME_SC_LBA_RANGE | NVME_SC_DNR; |
|---|
| 124 | + req->error_loc = offsetof(struct nvme_rw_command, slba); |
|---|
| 125 | + break; |
|---|
| 126 | + case BLK_STS_NOTSUPP: |
|---|
| 127 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
|---|
| 128 | + switch (req->cmd->common.opcode) { |
|---|
| 129 | + case nvme_cmd_dsm: |
|---|
| 130 | + case nvme_cmd_write_zeroes: |
|---|
| 131 | + status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; |
|---|
| 132 | + break; |
|---|
| 133 | + default: |
|---|
| 134 | + status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
|---|
| 135 | + } |
|---|
| 136 | + break; |
|---|
| 137 | + case BLK_STS_MEDIUM: |
|---|
| 138 | + status = NVME_SC_ACCESS_DENIED; |
|---|
| 139 | + req->error_loc = offsetof(struct nvme_rw_command, nsid); |
|---|
| 140 | + break; |
|---|
| 141 | + case BLK_STS_IOERR: |
|---|
| 142 | + default: |
|---|
| 143 | + status = NVME_SC_INTERNAL | NVME_SC_DNR; |
|---|
| 144 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
|---|
| 145 | + } |
|---|
| 146 | + |
|---|
| 147 | + switch (req->cmd->common.opcode) { |
|---|
| 148 | + case nvme_cmd_read: |
|---|
| 149 | + case nvme_cmd_write: |
|---|
| 150 | + req->error_slba = le64_to_cpu(req->cmd->rw.slba); |
|---|
| 151 | + break; |
|---|
| 152 | + case nvme_cmd_write_zeroes: |
|---|
| 153 | + req->error_slba = |
|---|
| 154 | + le64_to_cpu(req->cmd->write_zeroes.slba); |
|---|
| 155 | + break; |
|---|
| 156 | + default: |
|---|
| 157 | + req->error_slba = 0; |
|---|
| 158 | + } |
|---|
| 159 | + return status; |
|---|
| 160 | +} |
|---|
| 161 | + |
|---|
| 47 | 162 | static void nvmet_bio_done(struct bio *bio) |
|---|
| 48 | 163 | { |
|---|
| 49 | 164 | struct nvmet_req *req = bio->bi_private; |
|---|
| 50 | 165 | |
|---|
| 51 | | - nvmet_req_complete(req, |
|---|
| 52 | | - bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); |
|---|
| 53 | | - |
|---|
| 166 | + nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); |
|---|
| 54 | 167 | if (bio != &req->b.inline_bio) |
|---|
| 55 | 168 | bio_put(bio); |
|---|
| 56 | 169 | } |
|---|
| 57 | 170 | |
|---|
| 171 | +#ifdef CONFIG_BLK_DEV_INTEGRITY |
|---|
| 172 | +static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, |
|---|
| 173 | + struct sg_mapping_iter *miter) |
|---|
| 174 | +{ |
|---|
| 175 | + struct blk_integrity *bi; |
|---|
| 176 | + struct bio_integrity_payload *bip; |
|---|
| 177 | + struct block_device *bdev = req->ns->bdev; |
|---|
| 178 | + int rc; |
|---|
| 179 | + size_t resid, len; |
|---|
| 180 | + |
|---|
| 181 | + bi = bdev_get_integrity(bdev); |
|---|
| 182 | + if (unlikely(!bi)) { |
|---|
| 183 | + pr_err("Unable to locate bio_integrity\n"); |
|---|
| 184 | + return -ENODEV; |
|---|
| 185 | + } |
|---|
| 186 | + |
|---|
| 187 | + bip = bio_integrity_alloc(bio, GFP_NOIO, |
|---|
| 188 | + min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES)); |
|---|
| 189 | + if (IS_ERR(bip)) { |
|---|
| 190 | + pr_err("Unable to allocate bio_integrity_payload\n"); |
|---|
| 191 | + return PTR_ERR(bip); |
|---|
| 192 | + } |
|---|
| 193 | + |
|---|
| 194 | + bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); |
|---|
| 195 | + /* virtual start sector must be in integrity interval units */ |
|---|
| 196 | + bip_set_seed(bip, bio->bi_iter.bi_sector >> |
|---|
| 197 | + (bi->interval_exp - SECTOR_SHIFT)); |
|---|
| 198 | + |
|---|
| 199 | + resid = bip->bip_iter.bi_size; |
|---|
| 200 | + while (resid > 0 && sg_miter_next(miter)) { |
|---|
| 201 | + len = min_t(size_t, miter->length, resid); |
|---|
| 202 | + rc = bio_integrity_add_page(bio, miter->page, len, |
|---|
| 203 | + offset_in_page(miter->addr)); |
|---|
| 204 | + if (unlikely(rc != len)) { |
|---|
| 205 | + pr_err("bio_integrity_add_page() failed; %d\n", rc); |
|---|
| 206 | + sg_miter_stop(miter); |
|---|
| 207 | + return -ENOMEM; |
|---|
| 208 | + } |
|---|
| 209 | + |
|---|
| 210 | + resid -= len; |
|---|
| 211 | + if (len < miter->length) |
|---|
| 212 | + miter->consumed -= miter->length - len; |
|---|
| 213 | + } |
|---|
| 214 | + sg_miter_stop(miter); |
|---|
| 215 | + |
|---|
| 216 | + return 0; |
|---|
| 217 | +} |
|---|
| 218 | +#else |
|---|
| 219 | +static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, |
|---|
| 220 | + struct sg_mapping_iter *miter) |
|---|
| 221 | +{ |
|---|
| 222 | + return -EINVAL; |
|---|
| 223 | +} |
|---|
| 224 | +#endif /* CONFIG_BLK_DEV_INTEGRITY */ |
|---|
| 225 | + |
|---|
| 58 | 226 | static void nvmet_bdev_execute_rw(struct nvmet_req *req) |
|---|
| 59 | 227 | { |
|---|
| 60 | 228 | int sg_cnt = req->sg_cnt; |
|---|
| 61 | | - struct bio *bio = &req->b.inline_bio; |
|---|
| 229 | + struct bio *bio; |
|---|
| 62 | 230 | struct scatterlist *sg; |
|---|
| 231 | + struct blk_plug plug; |
|---|
| 63 | 232 | sector_t sector; |
|---|
| 64 | | - blk_qc_t cookie; |
|---|
| 65 | | - int op, op_flags = 0, i; |
|---|
| 233 | + int op, i, rc; |
|---|
| 234 | + struct sg_mapping_iter prot_miter; |
|---|
| 235 | + unsigned int iter_flags; |
|---|
| 236 | + unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len; |
|---|
| 237 | + |
|---|
| 238 | + if (!nvmet_check_transfer_len(req, total_len)) |
|---|
| 239 | + return; |
|---|
| 66 | 240 | |
|---|
| 67 | 241 | if (!req->sg_cnt) { |
|---|
| 68 | 242 | nvmet_req_complete(req, 0); |
|---|
| .. | .. |
|---|
| 70 | 244 | } |
|---|
| 71 | 245 | |
|---|
| 72 | 246 | if (req->cmd->rw.opcode == nvme_cmd_write) { |
|---|
| 73 | | - op = REQ_OP_WRITE; |
|---|
| 74 | | - op_flags = REQ_SYNC | REQ_IDLE; |
|---|
| 247 | + op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
|---|
| 75 | 248 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) |
|---|
| 76 | | - op_flags |= REQ_FUA; |
|---|
| 249 | + op |= REQ_FUA; |
|---|
| 250 | + iter_flags = SG_MITER_TO_SG; |
|---|
| 77 | 251 | } else { |
|---|
| 78 | 252 | op = REQ_OP_READ; |
|---|
| 253 | + iter_flags = SG_MITER_FROM_SG; |
|---|
| 79 | 254 | } |
|---|
| 80 | 255 | |
|---|
| 81 | | - sector = le64_to_cpu(req->cmd->rw.slba); |
|---|
| 82 | | - sector <<= (req->ns->blksize_shift - 9); |
|---|
| 256 | + if (is_pci_p2pdma_page(sg_page(req->sg))) |
|---|
| 257 | + op |= REQ_NOMERGE; |
|---|
| 83 | 258 | |
|---|
| 84 | | - bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
|---|
| 259 | + sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); |
|---|
| 260 | + |
|---|
| 261 | + if (nvmet_use_inline_bvec(req)) { |
|---|
| 262 | + bio = &req->b.inline_bio; |
|---|
| 263 | + bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
|---|
| 264 | + } else { |
|---|
| 265 | + bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); |
|---|
| 266 | + } |
|---|
| 85 | 267 | bio_set_dev(bio, req->ns->bdev); |
|---|
| 86 | 268 | bio->bi_iter.bi_sector = sector; |
|---|
| 87 | 269 | bio->bi_private = req; |
|---|
| 88 | 270 | bio->bi_end_io = nvmet_bio_done; |
|---|
| 89 | | - bio_set_op_attrs(bio, op, op_flags); |
|---|
| 271 | + bio->bi_opf = op; |
|---|
| 272 | + |
|---|
| 273 | + blk_start_plug(&plug); |
|---|
| 274 | + if (req->metadata_len) |
|---|
| 275 | + sg_miter_start(&prot_miter, req->metadata_sg, |
|---|
| 276 | + req->metadata_sg_cnt, iter_flags); |
|---|
| 90 | 277 | |
|---|
| 91 | 278 | for_each_sg(req->sg, sg, req->sg_cnt, i) { |
|---|
| 92 | 279 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
|---|
| 93 | 280 | != sg->length) { |
|---|
| 94 | 281 | struct bio *prev = bio; |
|---|
| 95 | 282 | |
|---|
| 283 | + if (req->metadata_len) { |
|---|
| 284 | + rc = nvmet_bdev_alloc_bip(req, bio, |
|---|
| 285 | + &prot_miter); |
|---|
| 286 | + if (unlikely(rc)) { |
|---|
| 287 | + bio_io_error(bio); |
|---|
| 288 | + return; |
|---|
| 289 | + } |
|---|
| 290 | + } |
|---|
| 291 | + |
|---|
| 96 | 292 | bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); |
|---|
| 97 | 293 | bio_set_dev(bio, req->ns->bdev); |
|---|
| 98 | 294 | bio->bi_iter.bi_sector = sector; |
|---|
| 99 | | - bio_set_op_attrs(bio, op, op_flags); |
|---|
| 295 | + bio->bi_opf = op; |
|---|
| 100 | 296 | |
|---|
| 101 | 297 | bio_chain(bio, prev); |
|---|
| 102 | 298 | submit_bio(prev); |
|---|
| .. | .. |
|---|
| 106 | 302 | sg_cnt--; |
|---|
| 107 | 303 | } |
|---|
| 108 | 304 | |
|---|
| 109 | | - cookie = submit_bio(bio); |
|---|
| 305 | + if (req->metadata_len) { |
|---|
| 306 | + rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); |
|---|
| 307 | + if (unlikely(rc)) { |
|---|
| 308 | + bio_io_error(bio); |
|---|
| 309 | + return; |
|---|
| 310 | + } |
|---|
| 311 | + } |
|---|
| 110 | 312 | |
|---|
| 111 | | - blk_poll(bdev_get_queue(req->ns->bdev), cookie); |
|---|
| 313 | + submit_bio(bio); |
|---|
| 314 | + blk_finish_plug(&plug); |
|---|
| 112 | 315 | } |
|---|
| 113 | 316 | |
|---|
| 114 | 317 | static void nvmet_bdev_execute_flush(struct nvmet_req *req) |
|---|
| 115 | 318 | { |
|---|
| 116 | 319 | struct bio *bio = &req->b.inline_bio; |
|---|
| 320 | + |
|---|
| 321 | + if (!nvmet_check_transfer_len(req, 0)) |
|---|
| 322 | + return; |
|---|
| 117 | 323 | |
|---|
| 118 | 324 | bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
|---|
| 119 | 325 | bio_set_dev(bio, req->ns->bdev); |
|---|
| .. | .. |
|---|
| 126 | 332 | |
|---|
| 127 | 333 | u16 nvmet_bdev_flush(struct nvmet_req *req) |
|---|
| 128 | 334 | { |
|---|
| 129 | | - if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL)) |
|---|
| 335 | + if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL)) |
|---|
| 130 | 336 | return NVME_SC_INTERNAL | NVME_SC_DNR; |
|---|
| 131 | 337 | return 0; |
|---|
| 132 | 338 | } |
|---|
| 133 | 339 | |
|---|
| 134 | | -static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns, |
|---|
| 340 | +static u16 nvmet_bdev_discard_range(struct nvmet_req *req, |
|---|
| 135 | 341 | struct nvme_dsm_range *range, struct bio **bio) |
|---|
| 136 | 342 | { |
|---|
| 343 | + struct nvmet_ns *ns = req->ns; |
|---|
| 137 | 344 | int ret; |
|---|
| 138 | 345 | |
|---|
| 139 | 346 | ret = __blkdev_issue_discard(ns->bdev, |
|---|
| 140 | | - le64_to_cpu(range->slba) << (ns->blksize_shift - 9), |
|---|
| 347 | + nvmet_lba_to_sect(ns, range->slba), |
|---|
| 141 | 348 | le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), |
|---|
| 142 | 349 | GFP_KERNEL, 0, bio); |
|---|
| 143 | | - if (ret && ret != -EOPNOTSUPP) |
|---|
| 144 | | - return NVME_SC_INTERNAL | NVME_SC_DNR; |
|---|
| 145 | | - return 0; |
|---|
| 350 | + if (ret && ret != -EOPNOTSUPP) { |
|---|
| 351 | + req->error_slba = le64_to_cpu(range->slba); |
|---|
| 352 | + return errno_to_nvme_status(req, ret); |
|---|
| 353 | + } |
|---|
| 354 | + return NVME_SC_SUCCESS; |
|---|
| 146 | 355 | } |
|---|
| 147 | 356 | |
|---|
| 148 | 357 | static void nvmet_bdev_execute_discard(struct nvmet_req *req) |
|---|
| .. | .. |
|---|
| 158 | 367 | if (status) |
|---|
| 159 | 368 | break; |
|---|
| 160 | 369 | |
|---|
| 161 | | - status = nvmet_bdev_discard_range(req->ns, &range, &bio); |
|---|
| 370 | + status = nvmet_bdev_discard_range(req, &range, &bio); |
|---|
| 162 | 371 | if (status) |
|---|
| 163 | 372 | break; |
|---|
| 164 | 373 | } |
|---|
| .. | .. |
|---|
| 166 | 375 | if (bio) { |
|---|
| 167 | 376 | bio->bi_private = req; |
|---|
| 168 | 377 | bio->bi_end_io = nvmet_bio_done; |
|---|
| 169 | | - if (status) { |
|---|
| 170 | | - bio->bi_status = BLK_STS_IOERR; |
|---|
| 171 | | - bio_endio(bio); |
|---|
| 172 | | - } else { |
|---|
| 378 | + if (status) |
|---|
| 379 | + bio_io_error(bio); |
|---|
| 380 | + else |
|---|
| 173 | 381 | submit_bio(bio); |
|---|
| 174 | | - } |
|---|
| 175 | 382 | } else { |
|---|
| 176 | 383 | nvmet_req_complete(req, status); |
|---|
| 177 | 384 | } |
|---|
| .. | .. |
|---|
| 179 | 386 | |
|---|
| 180 | 387 | static void nvmet_bdev_execute_dsm(struct nvmet_req *req) |
|---|
| 181 | 388 | { |
|---|
| 389 | + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) |
|---|
| 390 | + return; |
|---|
| 391 | + |
|---|
| 182 | 392 | switch (le32_to_cpu(req->cmd->dsm.attributes)) { |
|---|
| 183 | 393 | case NVME_DSMGMT_AD: |
|---|
| 184 | 394 | nvmet_bdev_execute_discard(req); |
|---|
| .. | .. |
|---|
| 196 | 406 | { |
|---|
| 197 | 407 | struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; |
|---|
| 198 | 408 | struct bio *bio = NULL; |
|---|
| 199 | | - u16 status = NVME_SC_SUCCESS; |
|---|
| 200 | 409 | sector_t sector; |
|---|
| 201 | 410 | sector_t nr_sector; |
|---|
| 411 | + int ret; |
|---|
| 202 | 412 | |
|---|
| 203 | | - sector = le64_to_cpu(write_zeroes->slba) << |
|---|
| 204 | | - (req->ns->blksize_shift - 9); |
|---|
| 413 | + if (!nvmet_check_transfer_len(req, 0)) |
|---|
| 414 | + return; |
|---|
| 415 | + |
|---|
| 416 | + sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba); |
|---|
| 205 | 417 | nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << |
|---|
| 206 | 418 | (req->ns->blksize_shift - 9)); |
|---|
| 207 | 419 | |
|---|
| 208 | | - if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, |
|---|
| 209 | | - GFP_KERNEL, &bio, 0)) |
|---|
| 210 | | - status = NVME_SC_INTERNAL | NVME_SC_DNR; |
|---|
| 211 | | - |
|---|
| 420 | + ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, |
|---|
| 421 | + GFP_KERNEL, &bio, 0); |
|---|
| 212 | 422 | if (bio) { |
|---|
| 213 | 423 | bio->bi_private = req; |
|---|
| 214 | 424 | bio->bi_end_io = nvmet_bio_done; |
|---|
| 215 | 425 | submit_bio(bio); |
|---|
| 216 | 426 | } else { |
|---|
| 217 | | - nvmet_req_complete(req, status); |
|---|
| 427 | + nvmet_req_complete(req, errno_to_nvme_status(req, ret)); |
|---|
| 218 | 428 | } |
|---|
| 219 | 429 | } |
|---|
| 220 | 430 | |
|---|
| .. | .. |
|---|
| 226 | 436 | case nvme_cmd_read: |
|---|
| 227 | 437 | case nvme_cmd_write: |
|---|
| 228 | 438 | req->execute = nvmet_bdev_execute_rw; |
|---|
| 229 | | - req->data_len = nvmet_rw_len(req); |
|---|
| 439 | + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) |
|---|
| 440 | + req->metadata_len = nvmet_rw_metadata_len(req); |
|---|
| 230 | 441 | return 0; |
|---|
| 231 | 442 | case nvme_cmd_flush: |
|---|
| 232 | 443 | req->execute = nvmet_bdev_execute_flush; |
|---|
| 233 | | - req->data_len = 0; |
|---|
| 234 | 444 | return 0; |
|---|
| 235 | 445 | case nvme_cmd_dsm: |
|---|
| 236 | 446 | req->execute = nvmet_bdev_execute_dsm; |
|---|
| 237 | | - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * |
|---|
| 238 | | - sizeof(struct nvme_dsm_range); |
|---|
| 239 | 447 | return 0; |
|---|
| 240 | 448 | case nvme_cmd_write_zeroes: |
|---|
| 241 | 449 | req->execute = nvmet_bdev_execute_write_zeroes; |
|---|
| 242 | | - req->data_len = 0; |
|---|
| 243 | 450 | return 0; |
|---|
| 244 | 451 | default: |
|---|
| 245 | 452 | pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, |
|---|
| 246 | 453 | req->sq->qid); |
|---|
| 454 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
|---|
| 247 | 455 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
|---|
| 248 | 456 | } |
|---|
| 249 | 457 | } |
|---|