.. | .. |
---|
8 | 8 | #include <linux/uio.h> |
---|
9 | 9 | #include <linux/falloc.h> |
---|
10 | 10 | #include <linux/file.h> |
---|
| 11 | +#include <linux/fs.h> |
---|
11 | 12 | #include "nvmet.h" |
---|
12 | 13 | |
---|
13 | 14 | #define NVMET_MAX_MPOOL_BVEC 16 |
---|
14 | 15 | #define NVMET_MIN_MPOOL_OBJ 16 |
---|
| 16 | + |
---|
| 17 | +int nvmet_file_ns_revalidate(struct nvmet_ns *ns) |
---|
| 18 | +{ |
---|
| 19 | + struct kstat stat; |
---|
| 20 | + int ret; |
---|
| 21 | + |
---|
| 22 | + ret = vfs_getattr(&ns->file->f_path, &stat, STATX_SIZE, |
---|
| 23 | + AT_STATX_FORCE_SYNC); |
---|
| 24 | + if (!ret) |
---|
| 25 | + ns->size = stat.size; |
---|
| 26 | + return ret; |
---|
| 27 | +} |
---|
15 | 28 | |
---|
16 | 29 | void nvmet_file_ns_disable(struct nvmet_ns *ns) |
---|
17 | 30 | { |
---|
.. | .. |
---|
30 | 43 | int nvmet_file_ns_enable(struct nvmet_ns *ns) |
---|
31 | 44 | { |
---|
32 | 45 | int flags = O_RDWR | O_LARGEFILE; |
---|
33 | | - struct kstat stat; |
---|
34 | 46 | int ret; |
---|
35 | 47 | |
---|
36 | 48 | if (!ns->buffered_io) |
---|
.. | .. |
---|
45 | 57 | return ret; |
---|
46 | 58 | } |
---|
47 | 59 | |
---|
48 | | - ret = vfs_getattr(&ns->file->f_path, |
---|
49 | | - &stat, STATX_SIZE, AT_STATX_FORCE_SYNC); |
---|
| 60 | + ret = nvmet_file_ns_revalidate(ns); |
---|
50 | 61 | if (ret) |
---|
51 | 62 | goto err; |
---|
52 | 63 | |
---|
53 | | - ns->size = stat.size; |
---|
54 | | - ns->blksize_shift = file_inode(ns->file)->i_blkbits; |
---|
| 64 | + /* |
---|
| 65 | + * i_blkbits can be greater than the universally accepted upper bound, |
---|
| 66 | + * so make sure we export a sane namespace lba_shift. |
---|
| 67 | + */ |
---|
| 68 | + ns->blksize_shift = min_t(u8, |
---|
| 69 | + file_inode(ns->file)->i_blkbits, 12); |
---|
55 | 70 | |
---|
56 | 71 | ns->bvec_cache = kmem_cache_create("nvmet-bvec", |
---|
57 | 72 | NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), |
---|
.. | .. |
---|
77 | 92 | return ret; |
---|
78 | 93 | } |
---|
79 | 94 | |
---|
80 | | -static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) |
---|
| 95 | +static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg) |
---|
81 | 96 | { |
---|
82 | | - bv->bv_page = sg_page_iter_page(iter); |
---|
83 | | - bv->bv_offset = iter->sg->offset; |
---|
84 | | - bv->bv_len = PAGE_SIZE - iter->sg->offset; |
---|
| 97 | + bv->bv_page = sg_page(sg); |
---|
| 98 | + bv->bv_offset = sg->offset; |
---|
| 99 | + bv->bv_len = sg->length; |
---|
85 | 100 | } |
---|
86 | 101 | |
---|
87 | 102 | static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, |
---|
88 | | - unsigned long nr_segs, size_t count) |
---|
| 103 | + unsigned long nr_segs, size_t count, int ki_flags) |
---|
89 | 104 | { |
---|
90 | 105 | struct kiocb *iocb = &req->f.iocb; |
---|
91 | 106 | ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter); |
---|
92 | 107 | struct iov_iter iter; |
---|
93 | | - int ki_flags = 0, rw; |
---|
94 | | - ssize_t ret; |
---|
| 108 | + int rw; |
---|
95 | 109 | |
---|
96 | 110 | if (req->cmd->rw.opcode == nvme_cmd_write) { |
---|
97 | 111 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) |
---|
98 | | - ki_flags = IOCB_DSYNC; |
---|
| 112 | + ki_flags |= IOCB_DSYNC; |
---|
99 | 113 | call_iter = req->ns->file->f_op->write_iter; |
---|
100 | 114 | rw = WRITE; |
---|
101 | 115 | } else { |
---|
.. | .. |
---|
103 | 117 | rw = READ; |
---|
104 | 118 | } |
---|
105 | 119 | |
---|
106 | | - iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count); |
---|
| 120 | + iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); |
---|
107 | 121 | |
---|
108 | 122 | iocb->ki_pos = pos; |
---|
109 | 123 | iocb->ki_filp = req->ns->file; |
---|
110 | 124 | iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); |
---|
111 | 125 | |
---|
112 | | - ret = call_iter(iocb, &iter); |
---|
113 | | - |
---|
114 | | - if (ret != -EIOCBQUEUED && iocb->ki_complete) |
---|
115 | | - iocb->ki_complete(iocb, ret, 0); |
---|
116 | | - |
---|
117 | | - return ret; |
---|
| 126 | + return call_iter(iocb, &iter); |
---|
118 | 127 | } |
---|
119 | 128 | |
---|
120 | 129 | static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2) |
---|
121 | 130 | { |
---|
122 | 131 | struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); |
---|
| 132 | + u16 status = NVME_SC_SUCCESS; |
---|
123 | 133 | |
---|
124 | 134 | if (req->f.bvec != req->inline_bvec) { |
---|
125 | 135 | if (likely(req->f.mpool_alloc == false)) |
---|
.. | .. |
---|
128 | 138 | mempool_free(req->f.bvec, req->ns->bvec_pool); |
---|
129 | 139 | } |
---|
130 | 140 | |
---|
131 | | - nvmet_req_complete(req, ret != req->data_len ? |
---|
132 | | - NVME_SC_INTERNAL | NVME_SC_DNR : 0); |
---|
| 141 | + if (unlikely(ret != req->transfer_len)) |
---|
| 142 | + status = errno_to_nvme_status(req, ret); |
---|
| 143 | + nvmet_req_complete(req, status); |
---|
133 | 144 | } |
---|
134 | 145 | |
---|
135 | | -static void nvmet_file_execute_rw(struct nvmet_req *req) |
---|
| 146 | +static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) |
---|
136 | 147 | { |
---|
137 | | - ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); |
---|
138 | | - struct sg_page_iter sg_pg_iter; |
---|
| 148 | + ssize_t nr_bvec = req->sg_cnt; |
---|
139 | 149 | unsigned long bv_cnt = 0; |
---|
140 | 150 | bool is_sync = false; |
---|
141 | 151 | size_t len = 0, total_len = 0; |
---|
142 | 152 | ssize_t ret = 0; |
---|
143 | 153 | loff_t pos; |
---|
| 154 | + int i; |
---|
| 155 | + struct scatterlist *sg; |
---|
| 156 | + |
---|
| 157 | + if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) |
---|
| 158 | + is_sync = true; |
---|
| 159 | + |
---|
| 160 | + pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; |
---|
| 161 | + if (unlikely(pos + req->transfer_len > req->ns->size)) { |
---|
| 162 | + nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); |
---|
| 163 | + return true; |
---|
| 164 | + } |
---|
| 165 | + |
---|
| 166 | + memset(&req->f.iocb, 0, sizeof(struct kiocb)); |
---|
| 167 | + for_each_sg(req->sg, sg, req->sg_cnt, i) { |
---|
| 168 | + nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg); |
---|
| 169 | + len += req->f.bvec[bv_cnt].bv_len; |
---|
| 170 | + total_len += req->f.bvec[bv_cnt].bv_len; |
---|
| 171 | + bv_cnt++; |
---|
| 172 | + |
---|
| 173 | + WARN_ON_ONCE((nr_bvec - 1) < 0); |
---|
| 174 | + |
---|
| 175 | + if (unlikely(is_sync) && |
---|
| 176 | + (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { |
---|
| 177 | + ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0); |
---|
| 178 | + if (ret < 0) |
---|
| 179 | + goto complete; |
---|
| 180 | + |
---|
| 181 | + pos += len; |
---|
| 182 | + bv_cnt = 0; |
---|
| 183 | + len = 0; |
---|
| 184 | + } |
---|
| 185 | + nr_bvec--; |
---|
| 186 | + } |
---|
| 187 | + |
---|
| 188 | + if (WARN_ON_ONCE(total_len != req->transfer_len)) { |
---|
| 189 | + ret = -EIO; |
---|
| 190 | + goto complete; |
---|
| 191 | + } |
---|
| 192 | + |
---|
| 193 | + if (unlikely(is_sync)) { |
---|
| 194 | + ret = total_len; |
---|
| 195 | + goto complete; |
---|
| 196 | + } |
---|
| 197 | + |
---|
| 198 | + /* |
---|
| 199 | + * A NULL ki_complete ask for synchronous execution, which we want |
---|
| 200 | + * for the IOCB_NOWAIT case. |
---|
| 201 | + */ |
---|
| 202 | + if (!(ki_flags & IOCB_NOWAIT)) |
---|
| 203 | + req->f.iocb.ki_complete = nvmet_file_io_done; |
---|
| 204 | + |
---|
| 205 | + ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags); |
---|
| 206 | + |
---|
| 207 | + switch (ret) { |
---|
| 208 | + case -EIOCBQUEUED: |
---|
| 209 | + return true; |
---|
| 210 | + case -EAGAIN: |
---|
| 211 | + if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT))) |
---|
| 212 | + goto complete; |
---|
| 213 | + return false; |
---|
| 214 | + case -EOPNOTSUPP: |
---|
| 215 | + /* |
---|
| 216 | + * For file systems returning error -EOPNOTSUPP, handle |
---|
| 217 | + * IOCB_NOWAIT error case separately and retry without |
---|
| 218 | + * IOCB_NOWAIT. |
---|
| 219 | + */ |
---|
| 220 | + if ((ki_flags & IOCB_NOWAIT)) |
---|
| 221 | + return false; |
---|
| 222 | + break; |
---|
| 223 | + } |
---|
| 224 | + |
---|
| 225 | +complete: |
---|
| 226 | + nvmet_file_io_done(&req->f.iocb, ret, 0); |
---|
| 227 | + return true; |
---|
| 228 | +} |
---|
| 229 | + |
---|
| 230 | +static void nvmet_file_buffered_io_work(struct work_struct *w) |
---|
| 231 | +{ |
---|
| 232 | + struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); |
---|
| 233 | + |
---|
| 234 | + nvmet_file_execute_io(req, 0); |
---|
| 235 | +} |
---|
| 236 | + |
---|
| 237 | +static void nvmet_file_submit_buffered_io(struct nvmet_req *req) |
---|
| 238 | +{ |
---|
| 239 | + INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); |
---|
| 240 | + queue_work(buffered_io_wq, &req->f.work); |
---|
| 241 | +} |
---|
| 242 | + |
---|
| 243 | +static void nvmet_file_execute_rw(struct nvmet_req *req) |
---|
| 244 | +{ |
---|
| 245 | + ssize_t nr_bvec = req->sg_cnt; |
---|
| 246 | + |
---|
| 247 | + if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) |
---|
| 248 | + return; |
---|
144 | 249 | |
---|
145 | 250 | if (!req->sg_cnt || !nr_bvec) { |
---|
146 | 251 | nvmet_req_complete(req, 0); |
---|
147 | | - return; |
---|
148 | | - } |
---|
149 | | - |
---|
150 | | - pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; |
---|
151 | | - if (unlikely(pos + req->data_len > req->ns->size)) { |
---|
152 | | - nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); |
---|
153 | 252 | return; |
---|
154 | 253 | } |
---|
155 | 254 | |
---|
.. | .. |
---|
159 | 258 | else |
---|
160 | 259 | req->f.bvec = req->inline_bvec; |
---|
161 | 260 | |
---|
162 | | - req->f.mpool_alloc = false; |
---|
163 | 261 | if (unlikely(!req->f.bvec)) { |
---|
164 | 262 | /* fallback under memory pressure */ |
---|
165 | 263 | req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); |
---|
166 | 264 | req->f.mpool_alloc = true; |
---|
167 | | - if (nr_bvec > NVMET_MAX_MPOOL_BVEC) |
---|
168 | | - is_sync = true; |
---|
169 | | - } |
---|
| 265 | + } else |
---|
| 266 | + req->f.mpool_alloc = false; |
---|
170 | 267 | |
---|
171 | | - memset(&req->f.iocb, 0, sizeof(struct kiocb)); |
---|
172 | | - for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { |
---|
173 | | - nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); |
---|
174 | | - len += req->f.bvec[bv_cnt].bv_len; |
---|
175 | | - total_len += req->f.bvec[bv_cnt].bv_len; |
---|
176 | | - bv_cnt++; |
---|
177 | | - |
---|
178 | | - WARN_ON_ONCE((nr_bvec - 1) < 0); |
---|
179 | | - |
---|
180 | | - if (unlikely(is_sync) && |
---|
181 | | - (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) { |
---|
182 | | - ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len); |
---|
183 | | - if (ret < 0) |
---|
184 | | - goto out; |
---|
185 | | - pos += len; |
---|
186 | | - bv_cnt = 0; |
---|
187 | | - len = 0; |
---|
188 | | - } |
---|
189 | | - nr_bvec--; |
---|
190 | | - } |
---|
191 | | - |
---|
192 | | - if (WARN_ON_ONCE(total_len != req->data_len)) |
---|
193 | | - ret = -EIO; |
---|
194 | | -out: |
---|
195 | | - if (unlikely(is_sync || ret)) { |
---|
196 | | - nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0); |
---|
197 | | - return; |
---|
198 | | - } |
---|
199 | | - req->f.iocb.ki_complete = nvmet_file_io_done; |
---|
200 | | - nvmet_file_submit_bvec(req, pos, bv_cnt, total_len); |
---|
201 | | -} |
---|
202 | | - |
---|
203 | | -static void nvmet_file_buffered_io_work(struct work_struct *w) |
---|
204 | | -{ |
---|
205 | | - struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); |
---|
206 | | - |
---|
207 | | - nvmet_file_execute_rw(req); |
---|
208 | | -} |
---|
209 | | - |
---|
210 | | -static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req) |
---|
211 | | -{ |
---|
212 | | - INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); |
---|
213 | | - queue_work(buffered_io_wq, &req->f.work); |
---|
| 268 | + if (req->ns->buffered_io) { |
---|
| 269 | + if (likely(!req->f.mpool_alloc) && |
---|
| 270 | + (req->ns->file->f_mode & FMODE_NOWAIT) && |
---|
| 271 | + nvmet_file_execute_io(req, IOCB_NOWAIT)) |
---|
| 272 | + return; |
---|
| 273 | + nvmet_file_submit_buffered_io(req); |
---|
| 274 | + } else |
---|
| 275 | + nvmet_file_execute_io(req, 0); |
---|
214 | 276 | } |
---|
215 | 277 | |
---|
216 | 278 | u16 nvmet_file_flush(struct nvmet_req *req) |
---|
217 | 279 | { |
---|
218 | | - if (vfs_fsync(req->ns->file, 1) < 0) |
---|
219 | | - return NVME_SC_INTERNAL | NVME_SC_DNR; |
---|
220 | | - return 0; |
---|
| 280 | + return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1)); |
---|
221 | 281 | } |
---|
222 | 282 | |
---|
223 | 283 | static void nvmet_file_flush_work(struct work_struct *w) |
---|
.. | .. |
---|
229 | 289 | |
---|
230 | 290 | static void nvmet_file_execute_flush(struct nvmet_req *req) |
---|
231 | 291 | { |
---|
| 292 | + if (!nvmet_check_transfer_len(req, 0)) |
---|
| 293 | + return; |
---|
232 | 294 | INIT_WORK(&req->f.work, nvmet_file_flush_work); |
---|
233 | 295 | schedule_work(&req->f.work); |
---|
234 | 296 | } |
---|
.. | .. |
---|
238 | 300 | int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; |
---|
239 | 301 | struct nvme_dsm_range range; |
---|
240 | 302 | loff_t offset, len; |
---|
241 | | - u16 ret; |
---|
| 303 | + u16 status = 0; |
---|
| 304 | + int ret; |
---|
242 | 305 | int i; |
---|
243 | 306 | |
---|
244 | 307 | for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { |
---|
245 | | - ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range, |
---|
| 308 | + status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, |
---|
246 | 309 | sizeof(range)); |
---|
247 | | - if (ret) |
---|
| 310 | + if (status) |
---|
248 | 311 | break; |
---|
249 | 312 | |
---|
250 | 313 | offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; |
---|
251 | 314 | len = le32_to_cpu(range.nlb); |
---|
252 | 315 | len <<= req->ns->blksize_shift; |
---|
253 | 316 | if (offset + len > req->ns->size) { |
---|
254 | | - ret = NVME_SC_LBA_RANGE | NVME_SC_DNR; |
---|
| 317 | + req->error_slba = le64_to_cpu(range.slba); |
---|
| 318 | + status = errno_to_nvme_status(req, -ENOSPC); |
---|
255 | 319 | break; |
---|
256 | 320 | } |
---|
257 | 321 | |
---|
258 | | - if (vfs_fallocate(req->ns->file, mode, offset, len)) { |
---|
259 | | - ret = NVME_SC_INTERNAL | NVME_SC_DNR; |
---|
| 322 | + ret = vfs_fallocate(req->ns->file, mode, offset, len); |
---|
| 323 | + if (ret && ret != -EOPNOTSUPP) { |
---|
| 324 | + req->error_slba = le64_to_cpu(range.slba); |
---|
| 325 | + status = errno_to_nvme_status(req, ret); |
---|
260 | 326 | break; |
---|
261 | 327 | } |
---|
262 | 328 | } |
---|
263 | 329 | |
---|
264 | | - nvmet_req_complete(req, ret); |
---|
| 330 | + nvmet_req_complete(req, status); |
---|
265 | 331 | } |
---|
266 | 332 | |
---|
267 | 333 | static void nvmet_file_dsm_work(struct work_struct *w) |
---|
.. | .. |
---|
283 | 349 | |
---|
284 | 350 | static void nvmet_file_execute_dsm(struct nvmet_req *req) |
---|
285 | 351 | { |
---|
| 352 | + if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) |
---|
| 353 | + return; |
---|
286 | 354 | INIT_WORK(&req->f.work, nvmet_file_dsm_work); |
---|
287 | 355 | schedule_work(&req->f.work); |
---|
288 | 356 | } |
---|
.. | .. |
---|
301 | 369 | req->ns->blksize_shift); |
---|
302 | 370 | |
---|
303 | 371 | if (unlikely(offset + len > req->ns->size)) { |
---|
304 | | - nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); |
---|
| 372 | + nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); |
---|
305 | 373 | return; |
---|
306 | 374 | } |
---|
307 | 375 | |
---|
308 | 376 | ret = vfs_fallocate(req->ns->file, mode, offset, len); |
---|
309 | | - nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); |
---|
| 377 | + nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0); |
---|
310 | 378 | } |
---|
311 | 379 | |
---|
312 | 380 | static void nvmet_file_execute_write_zeroes(struct nvmet_req *req) |
---|
313 | 381 | { |
---|
| 382 | + if (!nvmet_check_transfer_len(req, 0)) |
---|
| 383 | + return; |
---|
314 | 384 | INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work); |
---|
315 | 385 | schedule_work(&req->f.work); |
---|
316 | 386 | } |
---|
.. | .. |
---|
322 | 392 | switch (cmd->common.opcode) { |
---|
323 | 393 | case nvme_cmd_read: |
---|
324 | 394 | case nvme_cmd_write: |
---|
325 | | - if (req->ns->buffered_io) |
---|
326 | | - req->execute = nvmet_file_execute_rw_buffered_io; |
---|
327 | | - else |
---|
328 | | - req->execute = nvmet_file_execute_rw; |
---|
329 | | - req->data_len = nvmet_rw_len(req); |
---|
| 395 | + req->execute = nvmet_file_execute_rw; |
---|
330 | 396 | return 0; |
---|
331 | 397 | case nvme_cmd_flush: |
---|
332 | 398 | req->execute = nvmet_file_execute_flush; |
---|
333 | | - req->data_len = 0; |
---|
334 | 399 | return 0; |
---|
335 | 400 | case nvme_cmd_dsm: |
---|
336 | 401 | req->execute = nvmet_file_execute_dsm; |
---|
337 | | - req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * |
---|
338 | | - sizeof(struct nvme_dsm_range); |
---|
339 | 402 | return 0; |
---|
340 | 403 | case nvme_cmd_write_zeroes: |
---|
341 | 404 | req->execute = nvmet_file_execute_write_zeroes; |
---|
342 | | - req->data_len = 0; |
---|
343 | 405 | return 0; |
---|
344 | 406 | default: |
---|
345 | 407 | pr_err("unhandled cmd for file ns %d on qid %d\n", |
---|
346 | 408 | cmd->common.opcode, req->sq->qid); |
---|
| 409 | + req->error_loc = offsetof(struct nvme_common_command, opcode); |
---|
347 | 410 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
---|
348 | 411 | } |
---|
349 | 412 | } |
---|