hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/nvme/target/io-cmd-bdev.c
....@@ -1,20 +1,67 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * NVMe I/O command implementation.
34 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
135 */
146 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
157 #include <linux/blkdev.h>
168 #include <linux/module.h>
179 #include "nvmet.h"
10
+
11
+void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
12
+{
13
+ const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
14
+ /* Number of logical blocks per physical block. */
15
+ const u32 lpp = ql->physical_block_size / ql->logical_block_size;
16
+ /* Logical blocks per physical block, 0's based. */
17
+ const __le16 lpp0b = to0based(lpp);
18
+
19
+ /*
20
+ * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
21
+ * NAWUPF, and NACWU are defined for this namespace and should be
22
+ * used by the host for this namespace instead of the AWUN, AWUPF,
23
+ * and ACWU fields in the Identify Controller data structure. If
24
+ * any of these fields are zero that means that the corresponding
25
+ * field from the identify controller data structure should be used.
26
+ */
27
+ id->nsfeat |= 1 << 1;
28
+ id->nawun = lpp0b;
29
+ id->nawupf = lpp0b;
30
+ id->nacwu = lpp0b;
31
+
32
+ /*
33
+ * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
34
+ * NOWS are defined for this namespace and should be used by
35
+ * the host for I/O optimization.
36
+ */
37
+ id->nsfeat |= 1 << 4;
38
+ /* NPWG = Namespace Preferred Write Granularity. 0's based */
39
+ id->npwg = lpp0b;
40
+ /* NPWA = Namespace Preferred Write Alignment. 0's based */
41
+ id->npwa = id->npwg;
42
+ /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
43
+ id->npdg = to0based(ql->discard_granularity / ql->logical_block_size);
44
+ /* NPDG = Namespace Preferred Deallocate Alignment */
45
+ id->npda = id->npdg;
46
+ /* NOWS = Namespace Optimal Write Size */
47
+ id->nows = to0based(ql->io_opt / ql->logical_block_size);
48
+}
49
+
50
+static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
51
+{
52
+ struct blk_integrity *bi = bdev_get_integrity(ns->bdev);
53
+
54
+ if (bi) {
55
+ ns->metadata_size = bi->tuple_size;
56
+ if (bi->profile == &t10_pi_type1_crc)
57
+ ns->pi_type = NVME_NS_DPS_PI_TYPE1;
58
+ else if (bi->profile == &t10_pi_type3_crc)
59
+ ns->pi_type = NVME_NS_DPS_PI_TYPE3;
60
+ else
61
+ /* Unsupported metadata type */
62
+ ns->metadata_size = 0;
63
+ }
64
+}
1865
1966 int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
2067 {
....@@ -33,6 +80,12 @@
3380 }
3481 ns->size = i_size_read(ns->bdev->bd_inode);
3582 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
83
+
84
+ ns->pi_type = 0;
85
+ ns->metadata_size = 0;
86
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10))
87
+ nvmet_bdev_ns_enable_integrity(ns);
88
+
3689 return 0;
3790 }
3891
....@@ -44,25 +97,146 @@
4497 }
4598 }
4699
100
+void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
101
+{
102
+ ns->size = i_size_read(ns->bdev->bd_inode);
103
+}
104
+
105
+static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
106
+{
107
+ u16 status = NVME_SC_SUCCESS;
108
+
109
+ if (likely(blk_sts == BLK_STS_OK))
110
+ return status;
111
+ /*
112
+ * Right now there exists M : 1 mapping between block layer error
113
+ * to the NVMe status code (see nvme_error_status()). For consistency,
114
+ * when we reverse map we use most appropriate NVMe Status code from
115
+ * the group of the NVMe staus codes used in the nvme_error_status().
116
+ */
117
+ switch (blk_sts) {
118
+ case BLK_STS_NOSPC:
119
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
120
+ req->error_loc = offsetof(struct nvme_rw_command, length);
121
+ break;
122
+ case BLK_STS_TARGET:
123
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
124
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
125
+ break;
126
+ case BLK_STS_NOTSUPP:
127
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
128
+ switch (req->cmd->common.opcode) {
129
+ case nvme_cmd_dsm:
130
+ case nvme_cmd_write_zeroes:
131
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
132
+ break;
133
+ default:
134
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
135
+ }
136
+ break;
137
+ case BLK_STS_MEDIUM:
138
+ status = NVME_SC_ACCESS_DENIED;
139
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
140
+ break;
141
+ case BLK_STS_IOERR:
142
+ default:
143
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
144
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
145
+ }
146
+
147
+ switch (req->cmd->common.opcode) {
148
+ case nvme_cmd_read:
149
+ case nvme_cmd_write:
150
+ req->error_slba = le64_to_cpu(req->cmd->rw.slba);
151
+ break;
152
+ case nvme_cmd_write_zeroes:
153
+ req->error_slba =
154
+ le64_to_cpu(req->cmd->write_zeroes.slba);
155
+ break;
156
+ default:
157
+ req->error_slba = 0;
158
+ }
159
+ return status;
160
+}
161
+
47162 static void nvmet_bio_done(struct bio *bio)
48163 {
49164 struct nvmet_req *req = bio->bi_private;
50165
51
- nvmet_req_complete(req,
52
- bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
53
-
166
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
54167 if (bio != &req->b.inline_bio)
55168 bio_put(bio);
56169 }
57170
171
+#ifdef CONFIG_BLK_DEV_INTEGRITY
172
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
173
+ struct sg_mapping_iter *miter)
174
+{
175
+ struct blk_integrity *bi;
176
+ struct bio_integrity_payload *bip;
177
+ struct block_device *bdev = req->ns->bdev;
178
+ int rc;
179
+ size_t resid, len;
180
+
181
+ bi = bdev_get_integrity(bdev);
182
+ if (unlikely(!bi)) {
183
+ pr_err("Unable to locate bio_integrity\n");
184
+ return -ENODEV;
185
+ }
186
+
187
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
188
+ min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
189
+ if (IS_ERR(bip)) {
190
+ pr_err("Unable to allocate bio_integrity_payload\n");
191
+ return PTR_ERR(bip);
192
+ }
193
+
194
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
195
+ /* virtual start sector must be in integrity interval units */
196
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
197
+ (bi->interval_exp - SECTOR_SHIFT));
198
+
199
+ resid = bip->bip_iter.bi_size;
200
+ while (resid > 0 && sg_miter_next(miter)) {
201
+ len = min_t(size_t, miter->length, resid);
202
+ rc = bio_integrity_add_page(bio, miter->page, len,
203
+ offset_in_page(miter->addr));
204
+ if (unlikely(rc != len)) {
205
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
206
+ sg_miter_stop(miter);
207
+ return -ENOMEM;
208
+ }
209
+
210
+ resid -= len;
211
+ if (len < miter->length)
212
+ miter->consumed -= miter->length - len;
213
+ }
214
+ sg_miter_stop(miter);
215
+
216
+ return 0;
217
+}
218
+#else
219
+static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
220
+ struct sg_mapping_iter *miter)
221
+{
222
+ return -EINVAL;
223
+}
224
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
225
+
58226 static void nvmet_bdev_execute_rw(struct nvmet_req *req)
59227 {
60228 int sg_cnt = req->sg_cnt;
61
- struct bio *bio = &req->b.inline_bio;
229
+ struct bio *bio;
62230 struct scatterlist *sg;
231
+ struct blk_plug plug;
63232 sector_t sector;
64
- blk_qc_t cookie;
65
- int op, op_flags = 0, i;
233
+ int op, i, rc;
234
+ struct sg_mapping_iter prot_miter;
235
+ unsigned int iter_flags;
236
+ unsigned int total_len = nvmet_rw_data_len(req) + req->metadata_len;
237
+
238
+ if (!nvmet_check_transfer_len(req, total_len))
239
+ return;
66240
67241 if (!req->sg_cnt) {
68242 nvmet_req_complete(req, 0);
....@@ -70,33 +244,55 @@
70244 }
71245
72246 if (req->cmd->rw.opcode == nvme_cmd_write) {
73
- op = REQ_OP_WRITE;
74
- op_flags = REQ_SYNC | REQ_IDLE;
247
+ op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
75248 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
76
- op_flags |= REQ_FUA;
249
+ op |= REQ_FUA;
250
+ iter_flags = SG_MITER_TO_SG;
77251 } else {
78252 op = REQ_OP_READ;
253
+ iter_flags = SG_MITER_FROM_SG;
79254 }
80255
81
- sector = le64_to_cpu(req->cmd->rw.slba);
82
- sector <<= (req->ns->blksize_shift - 9);
256
+ if (is_pci_p2pdma_page(sg_page(req->sg)))
257
+ op |= REQ_NOMERGE;
83258
84
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
259
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
260
+
261
+ if (nvmet_use_inline_bvec(req)) {
262
+ bio = &req->b.inline_bio;
263
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
264
+ } else {
265
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
266
+ }
85267 bio_set_dev(bio, req->ns->bdev);
86268 bio->bi_iter.bi_sector = sector;
87269 bio->bi_private = req;
88270 bio->bi_end_io = nvmet_bio_done;
89
- bio_set_op_attrs(bio, op, op_flags);
271
+ bio->bi_opf = op;
272
+
273
+ blk_start_plug(&plug);
274
+ if (req->metadata_len)
275
+ sg_miter_start(&prot_miter, req->metadata_sg,
276
+ req->metadata_sg_cnt, iter_flags);
90277
91278 for_each_sg(req->sg, sg, req->sg_cnt, i) {
92279 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
93280 != sg->length) {
94281 struct bio *prev = bio;
95282
283
+ if (req->metadata_len) {
284
+ rc = nvmet_bdev_alloc_bip(req, bio,
285
+ &prot_miter);
286
+ if (unlikely(rc)) {
287
+ bio_io_error(bio);
288
+ return;
289
+ }
290
+ }
291
+
96292 bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
97293 bio_set_dev(bio, req->ns->bdev);
98294 bio->bi_iter.bi_sector = sector;
99
- bio_set_op_attrs(bio, op, op_flags);
295
+ bio->bi_opf = op;
100296
101297 bio_chain(bio, prev);
102298 submit_bio(prev);
....@@ -106,14 +302,24 @@
106302 sg_cnt--;
107303 }
108304
109
- cookie = submit_bio(bio);
305
+ if (req->metadata_len) {
306
+ rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter);
307
+ if (unlikely(rc)) {
308
+ bio_io_error(bio);
309
+ return;
310
+ }
311
+ }
110312
111
- blk_poll(bdev_get_queue(req->ns->bdev), cookie);
313
+ submit_bio(bio);
314
+ blk_finish_plug(&plug);
112315 }
113316
114317 static void nvmet_bdev_execute_flush(struct nvmet_req *req)
115318 {
116319 struct bio *bio = &req->b.inline_bio;
320
+
321
+ if (!nvmet_check_transfer_len(req, 0))
322
+ return;
117323
118324 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
119325 bio_set_dev(bio, req->ns->bdev);
....@@ -126,23 +332,26 @@
126332
127333 u16 nvmet_bdev_flush(struct nvmet_req *req)
128334 {
129
- if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
335
+ if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL))
130336 return NVME_SC_INTERNAL | NVME_SC_DNR;
131337 return 0;
132338 }
133339
134
-static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
340
+static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
135341 struct nvme_dsm_range *range, struct bio **bio)
136342 {
343
+ struct nvmet_ns *ns = req->ns;
137344 int ret;
138345
139346 ret = __blkdev_issue_discard(ns->bdev,
140
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
347
+ nvmet_lba_to_sect(ns, range->slba),
141348 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
142349 GFP_KERNEL, 0, bio);
143
- if (ret && ret != -EOPNOTSUPP)
144
- return NVME_SC_INTERNAL | NVME_SC_DNR;
145
- return 0;
350
+ if (ret && ret != -EOPNOTSUPP) {
351
+ req->error_slba = le64_to_cpu(range->slba);
352
+ return errno_to_nvme_status(req, ret);
353
+ }
354
+ return NVME_SC_SUCCESS;
146355 }
147356
148357 static void nvmet_bdev_execute_discard(struct nvmet_req *req)
....@@ -158,7 +367,7 @@
158367 if (status)
159368 break;
160369
161
- status = nvmet_bdev_discard_range(req->ns, &range, &bio);
370
+ status = nvmet_bdev_discard_range(req, &range, &bio);
162371 if (status)
163372 break;
164373 }
....@@ -166,12 +375,10 @@
166375 if (bio) {
167376 bio->bi_private = req;
168377 bio->bi_end_io = nvmet_bio_done;
169
- if (status) {
170
- bio->bi_status = BLK_STS_IOERR;
171
- bio_endio(bio);
172
- } else {
378
+ if (status)
379
+ bio_io_error(bio);
380
+ else
173381 submit_bio(bio);
174
- }
175382 } else {
176383 nvmet_req_complete(req, status);
177384 }
....@@ -179,6 +386,9 @@
179386
180387 static void nvmet_bdev_execute_dsm(struct nvmet_req *req)
181388 {
389
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
390
+ return;
391
+
182392 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
183393 case NVME_DSMGMT_AD:
184394 nvmet_bdev_execute_discard(req);
....@@ -196,25 +406,25 @@
196406 {
197407 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
198408 struct bio *bio = NULL;
199
- u16 status = NVME_SC_SUCCESS;
200409 sector_t sector;
201410 sector_t nr_sector;
411
+ int ret;
202412
203
- sector = le64_to_cpu(write_zeroes->slba) <<
204
- (req->ns->blksize_shift - 9);
413
+ if (!nvmet_check_transfer_len(req, 0))
414
+ return;
415
+
416
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
205417 nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
206418 (req->ns->blksize_shift - 9));
207419
208
- if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
209
- GFP_KERNEL, &bio, 0))
210
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
211
-
420
+ ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
421
+ GFP_KERNEL, &bio, 0);
212422 if (bio) {
213423 bio->bi_private = req;
214424 bio->bi_end_io = nvmet_bio_done;
215425 submit_bio(bio);
216426 } else {
217
- nvmet_req_complete(req, status);
427
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
218428 }
219429 }
220430
....@@ -226,24 +436,22 @@
226436 case nvme_cmd_read:
227437 case nvme_cmd_write:
228438 req->execute = nvmet_bdev_execute_rw;
229
- req->data_len = nvmet_rw_len(req);
439
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns))
440
+ req->metadata_len = nvmet_rw_metadata_len(req);
230441 return 0;
231442 case nvme_cmd_flush:
232443 req->execute = nvmet_bdev_execute_flush;
233
- req->data_len = 0;
234444 return 0;
235445 case nvme_cmd_dsm:
236446 req->execute = nvmet_bdev_execute_dsm;
237
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
238
- sizeof(struct nvme_dsm_range);
239447 return 0;
240448 case nvme_cmd_write_zeroes:
241449 req->execute = nvmet_bdev_execute_write_zeroes;
242
- req->data_len = 0;
243450 return 0;
244451 default:
245452 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
246453 req->sq->qid);
454
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
247455 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
248456 }
249457 }