hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/drivers/nvme/target/io-cmd-file.c
....@@ -8,10 +8,23 @@
88 #include <linux/uio.h>
99 #include <linux/falloc.h>
1010 #include <linux/file.h>
11
+#include <linux/fs.h>
1112 #include "nvmet.h"
1213
1314 #define NVMET_MAX_MPOOL_BVEC 16
1415 #define NVMET_MIN_MPOOL_OBJ 16
16
+
17
+int nvmet_file_ns_revalidate(struct nvmet_ns *ns)
18
+{
19
+ struct kstat stat;
20
+ int ret;
21
+
22
+ ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE,
23
+ AT_STATX_FORCE_SYNC);
24
+ if (!ret)
25
+ ns->size = stat.size;
26
+ return ret;
27
+}
1528
1629 void nvmet_file_ns_disable(struct nvmet_ns *ns)
1730 {
....@@ -30,7 +43,6 @@
3043 int nvmet_file_ns_enable(struct nvmet_ns *ns)
3144 {
3245 int flags = O_RDWR | O_LARGEFILE;
33
- struct kstat stat;
3446 int ret;
3547
3648 if (!ns->buffered_io)
....@@ -45,13 +57,16 @@
4557 return ret;
4658 }
4759
48
- ret = vfs_getattr(&ns->file->f_path,
49
- &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
60
+ ret = nvmet_file_ns_revalidate(ns);
5061 if (ret)
5162 goto err;
5263
53
- ns->size = stat.size;
54
- ns->blksize_shift = file_inode(ns->file)->i_blkbits;
64
+ /*
65
+ * i_blkbits can be greater than the universally accepted upper bound,
66
+ * so make sure we export a sane namespace lba_shift.
67
+ */
68
+ ns->blksize_shift = min_t(u8,
69
+ file_inode(ns->file)->i_blkbits, 12);
5570
5671 ns->bvec_cache = kmem_cache_create("nvmet-bvec",
5772 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
....@@ -77,25 +92,24 @@
7792 return ret;
7893 }
7994
80
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
95
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
8196 {
82
- bv->bv_page = sg_page_iter_page(iter);
83
- bv->bv_offset = iter->sg->offset;
84
- bv->bv_len = PAGE_SIZE - iter->sg->offset;
97
+ bv->bv_page = sg_page(sg);
98
+ bv->bv_offset = sg->offset;
99
+ bv->bv_len = sg->length;
85100 }
86101
87102 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
88
- unsigned long nr_segs, size_t count)
103
+ unsigned long nr_segs, size_t count, int ki_flags)
89104 {
90105 struct kiocb *iocb = &req->f.iocb;
91106 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
92107 struct iov_iter iter;
93
- int ki_flags = 0, rw;
94
- ssize_t ret;
108
+ int rw;
95109
96110 if (req->cmd->rw.opcode == nvme_cmd_write) {
97111 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
98
- ki_flags = IOCB_DSYNC;
112
+ ki_flags |= IOCB_DSYNC;
99113 call_iter = req->ns->file->f_op->write_iter;
100114 rw = WRITE;
101115 } else {
....@@ -103,23 +117,19 @@
103117 rw = READ;
104118 }
105119
106
- iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
120
+ iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
107121
108122 iocb->ki_pos = pos;
109123 iocb->ki_filp = req->ns->file;
110124 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
111125
112
- ret = call_iter(iocb, &iter);
113
-
114
- if (ret != -EIOCBQUEUED && iocb->ki_complete)
115
- iocb->ki_complete(iocb, ret, 0);
116
-
117
- return ret;
126
+ return call_iter(iocb, &iter);
118127 }
119128
120129 static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
121130 {
122131 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
132
+ u16 status = NVME_SC_SUCCESS;
123133
124134 if (req->f.bvec != req->inline_bvec) {
125135 if (likely(req->f.mpool_alloc == false))
....@@ -128,28 +138,117 @@
128138 mempool_free(req->f.bvec, req->ns->bvec_pool);
129139 }
130140
131
- nvmet_req_complete(req, ret != req->data_len ?
132
- NVME_SC_INTERNAL | NVME_SC_DNR : 0);
141
+ if (unlikely(ret != req->transfer_len))
142
+ status = errno_to_nvme_status(req, ret);
143
+ nvmet_req_complete(req, status);
133144 }
134145
135
-static void nvmet_file_execute_rw(struct nvmet_req *req)
146
+static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
136147 {
137
- ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
138
- struct sg_page_iter sg_pg_iter;
148
+ ssize_t nr_bvec = req->sg_cnt;
139149 unsigned long bv_cnt = 0;
140150 bool is_sync = false;
141151 size_t len = 0, total_len = 0;
142152 ssize_t ret = 0;
143153 loff_t pos;
154
+ int i;
155
+ struct scatterlist *sg;
156
+
157
+ if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
158
+ is_sync = true;
159
+
160
+ pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
161
+ if (unlikely(pos + req->transfer_len > req->ns->size)) {
162
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
163
+ return true;
164
+ }
165
+
166
+ memset(&req->f.iocb, 0, sizeof(struct kiocb));
167
+ for_each_sg(req->sg, sg, req->sg_cnt, i) {
168
+ nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
169
+ len += req->f.bvec[bv_cnt].bv_len;
170
+ total_len += req->f.bvec[bv_cnt].bv_len;
171
+ bv_cnt++;
172
+
173
+ WARN_ON_ONCE((nr_bvec - 1) < 0);
174
+
175
+ if (unlikely(is_sync) &&
176
+ (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
177
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
178
+ if (ret < 0)
179
+ goto complete;
180
+
181
+ pos += len;
182
+ bv_cnt = 0;
183
+ len = 0;
184
+ }
185
+ nr_bvec--;
186
+ }
187
+
188
+ if (WARN_ON_ONCE(total_len != req->transfer_len)) {
189
+ ret = -EIO;
190
+ goto complete;
191
+ }
192
+
193
+ if (unlikely(is_sync)) {
194
+ ret = total_len;
195
+ goto complete;
196
+ }
197
+
198
+ /*
199
+ * A NULL ki_complete ask for synchronous execution, which we want
200
+ * for the IOCB_NOWAIT case.
201
+ */
202
+ if (!(ki_flags & IOCB_NOWAIT))
203
+ req->f.iocb.ki_complete = nvmet_file_io_done;
204
+
205
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
206
+
207
+ switch (ret) {
208
+ case -EIOCBQUEUED:
209
+ return true;
210
+ case -EAGAIN:
211
+ if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
212
+ goto complete;
213
+ return false;
214
+ case -EOPNOTSUPP:
215
+ /*
216
+ * For file systems returning error -EOPNOTSUPP, handle
217
+ * IOCB_NOWAIT error case separately and retry without
218
+ * IOCB_NOWAIT.
219
+ */
220
+ if ((ki_flags & IOCB_NOWAIT))
221
+ return false;
222
+ break;
223
+ }
224
+
225
+complete:
226
+ nvmet_file_io_done(&req->f.iocb, ret, 0);
227
+ return true;
228
+}
229
+
230
+static void nvmet_file_buffered_io_work(struct work_struct *w)
231
+{
232
+ struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
233
+
234
+ nvmet_file_execute_io(req, 0);
235
+}
236
+
237
+static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
238
+{
239
+ INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
240
+ queue_work(buffered_io_wq, &req->f.work);
241
+}
242
+
243
+static void nvmet_file_execute_rw(struct nvmet_req *req)
244
+{
245
+ ssize_t nr_bvec = req->sg_cnt;
246
+
247
+ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
248
+ return;
144249
145250 if (!req->sg_cnt || !nr_bvec) {
146251 nvmet_req_complete(req, 0);
147
- return;
148
- }
149
-
150
- pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
151
- if (unlikely(pos + req->data_len > req->ns->size)) {
152
- nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
153252 return;
154253 }
155254
....@@ -159,65 +258,26 @@
159258 else
160259 req->f.bvec = req->inline_bvec;
161260
162
- req->f.mpool_alloc = false;
163261 if (unlikely(!req->f.bvec)) {
164262 /* fallback under memory pressure */
165263 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
166264 req->f.mpool_alloc = true;
167
- if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
168
- is_sync = true;
169
- }
265
+ } else
266
+ req->f.mpool_alloc = false;
170267
171
- memset(&req->f.iocb, 0, sizeof(struct kiocb));
172
- for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
173
- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
174
- len += req->f.bvec[bv_cnt].bv_len;
175
- total_len += req->f.bvec[bv_cnt].bv_len;
176
- bv_cnt++;
177
-
178
- WARN_ON_ONCE((nr_bvec - 1) < 0);
179
-
180
- if (unlikely(is_sync) &&
181
- (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
182
- ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
183
- if (ret < 0)
184
- goto out;
185
- pos += len;
186
- bv_cnt = 0;
187
- len = 0;
188
- }
189
- nr_bvec--;
190
- }
191
-
192
- if (WARN_ON_ONCE(total_len != req->data_len))
193
- ret = -EIO;
194
-out:
195
- if (unlikely(is_sync || ret)) {
196
- nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
197
- return;
198
- }
199
- req->f.iocb.ki_complete = nvmet_file_io_done;
200
- nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
201
-}
202
-
203
-static void nvmet_file_buffered_io_work(struct work_struct *w)
204
-{
205
- struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
206
-
207
- nvmet_file_execute_rw(req);
208
-}
209
-
210
-static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
211
-{
212
- INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
213
- queue_work(buffered_io_wq, &req->f.work);
268
+ if (req->ns->buffered_io) {
269
+ if (likely(!req->f.mpool_alloc) &&
270
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
271
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
272
+ return;
273
+ nvmet_file_submit_buffered_io(req);
274
+ } else
275
+ nvmet_file_execute_io(req, 0);
214276 }
215277
216278 u16 nvmet_file_flush(struct nvmet_req *req)
217279 {
218
- if (vfs_fsync(req->ns->file, 1) < 0)
219
- return NVME_SC_INTERNAL | NVME_SC_DNR;
220
- return 0;
280
+ return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
221281 }
222282
223283 static void nvmet_file_flush_work(struct work_struct *w)
....@@ -229,6 +289,8 @@
229289
230290 static void nvmet_file_execute_flush(struct nvmet_req *req)
231291 {
292
+ if (!nvmet_check_transfer_len(req, 0))
293
+ return;
232294 INIT_WORK(&req->f.work, nvmet_file_flush_work);
233295 schedule_work(&req->f.work);
234296 }
....@@ -238,30 +300,34 @@
238300 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
239301 struct nvme_dsm_range range;
240302 loff_t offset, len;
241
- u16 ret;
303
+ u16 status = 0;
304
+ int ret;
242305 int i;
243306
244307 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
245
- ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
308
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
246309 sizeof(range));
247
- if (ret)
310
+ if (status)
248311 break;
249312
250313 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
251314 len = le32_to_cpu(range.nlb);
252315 len <<= req->ns->blksize_shift;
253316 if (offset + len > req->ns->size) {
254
- ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
317
+ req->error_slba = le64_to_cpu(range.slba);
318
+ status = errno_to_nvme_status(req, -ENOSPC);
255319 break;
256320 }
257321
258
- if (vfs_fallocate(req->ns->file, mode, offset, len)) {
259
- ret = NVME_SC_INTERNAL | NVME_SC_DNR;
322
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
323
+ if (ret && ret != -EOPNOTSUPP) {
324
+ req->error_slba = le64_to_cpu(range.slba);
325
+ status = errno_to_nvme_status(req, ret);
260326 break;
261327 }
262328 }
263329
264
- nvmet_req_complete(req, ret);
330
+ nvmet_req_complete(req, status);
265331 }
266332
267333 static void nvmet_file_dsm_work(struct work_struct *w)
....@@ -283,6 +349,8 @@
283349
284350 static void nvmet_file_execute_dsm(struct nvmet_req *req)
285351 {
352
+ if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req)))
353
+ return;
286354 INIT_WORK(&req->f.work, nvmet_file_dsm_work);
287355 schedule_work(&req->f.work);
288356 }
....@@ -301,16 +369,18 @@
301369 req->ns->blksize_shift);
302370
303371 if (unlikely(offset + len > req->ns->size)) {
304
- nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
372
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
305373 return;
306374 }
307375
308376 ret = vfs_fallocate(req->ns->file, mode, offset, len);
309
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
377
+ nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
310378 }
311379
312380 static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
313381 {
382
+ if (!nvmet_check_transfer_len(req, 0))
383
+ return;
314384 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
315385 schedule_work(&req->f.work);
316386 }
....@@ -322,28 +392,21 @@
322392 switch (cmd->common.opcode) {
323393 case nvme_cmd_read:
324394 case nvme_cmd_write:
325
- if (req->ns->buffered_io)
326
- req->execute = nvmet_file_execute_rw_buffered_io;
327
- else
328
- req->execute = nvmet_file_execute_rw;
329
- req->data_len = nvmet_rw_len(req);
395
+ req->execute = nvmet_file_execute_rw;
330396 return 0;
331397 case nvme_cmd_flush:
332398 req->execute = nvmet_file_execute_flush;
333
- req->data_len = 0;
334399 return 0;
335400 case nvme_cmd_dsm:
336401 req->execute = nvmet_file_execute_dsm;
337
- req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
338
- sizeof(struct nvme_dsm_range);
339402 return 0;
340403 case nvme_cmd_write_zeroes:
341404 req->execute = nvmet_file_execute_write_zeroes;
342
- req->data_len = 0;
343405 return 0;
344406 default:
345407 pr_err("unhandled cmd for file ns %d on qid %d\n",
346408 cmd->common.opcode, req->sq->qid);
409
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
347410 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
348411 }
349412 }