hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/lightnvm/pblk-read.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
23 * Copyright (C) 2016 CNEX Labs
34 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
....@@ -25,8 +26,7 @@
2526 * issued.
2627 */
2728 static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28
- sector_t lba, struct ppa_addr ppa,
29
- int bio_iter, bool advanced_bio)
29
+ sector_t lba, struct ppa_addr ppa)
3030 {
3131 #ifdef CONFIG_NVM_PBLK_DEBUG
3232 /* Callers must ensure that the ppa points to a cache address */
....@@ -34,94 +34,100 @@
3434 BUG_ON(!pblk_addr_in_cache(ppa));
3535 #endif
3636
37
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38
- bio_iter, advanced_bio);
37
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
3938 }
4039
41
-static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
40
+static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
4241 struct bio *bio, sector_t blba,
43
- unsigned long *read_bitmap)
42
+ bool *from_cache)
4443 {
45
- struct pblk_sec_meta *meta_list = rqd->meta_list;
46
- struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
47
- int nr_secs = rqd->nr_ppas;
48
- bool advanced_bio = false;
49
- int i, j = 0;
50
-
51
- pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
52
-
53
- for (i = 0; i < nr_secs; i++) {
54
- struct ppa_addr p = ppas[i];
55
- sector_t lba = blba + i;
44
+ void *meta_list = rqd->meta_list;
45
+ int nr_secs, i;
5646
5747 retry:
58
- if (pblk_ppa_empty(p)) {
59
- WARN_ON(test_and_set_bit(i, read_bitmap));
60
- meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
48
+ nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
49
+ from_cache);
6150
62
- if (unlikely(!advanced_bio)) {
63
- bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
64
- advanced_bio = true;
51
+ if (!*from_cache)
52
+ goto end;
53
+
54
+ for (i = 0; i < nr_secs; i++) {
55
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
56
+ sector_t lba = blba + i;
57
+
58
+ if (pblk_ppa_empty(rqd->ppa_list[i])) {
59
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
60
+
61
+ meta->lba = addr_empty;
62
+ } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
63
+ /*
64
+ * Try to read from write buffer. The address is later
65
+ * checked on the write buffer to prevent retrieving
66
+ * overwritten data.
67
+ */
68
+ if (!pblk_read_from_cache(pblk, bio, lba,
69
+ rqd->ppa_list[i])) {
70
+ if (i == 0) {
71
+ /*
72
+ * We didn't call with bio_advance()
73
+ * yet, so we can just retry.
74
+ */
75
+ goto retry;
76
+ } else {
77
+ /*
78
+ * We already call bio_advance()
79
+ * so we cannot retry and we need
80
+ * to quit that function in order
81
+ * to allow caller to handle the bio
82
+ * splitting in the current sector
83
+ * position.
84
+ */
85
+ nr_secs = i;
86
+ goto end;
87
+ }
6588 }
66
-
67
- goto next;
68
- }
69
-
70
- /* Try to read from write buffer. The address is later checked
71
- * on the write buffer to prevent retrieving overwritten data.
72
- */
73
- if (pblk_addr_in_cache(p)) {
74
- if (!pblk_read_from_cache(pblk, bio, lba, p, i,
75
- advanced_bio)) {
76
- pblk_lookup_l2p_seq(pblk, &p, lba, 1);
77
- goto retry;
78
- }
79
- WARN_ON(test_and_set_bit(i, read_bitmap));
80
- meta_list[i].lba = cpu_to_le64(lba);
81
- advanced_bio = true;
89
+ meta->lba = cpu_to_le64(lba);
8290 #ifdef CONFIG_NVM_PBLK_DEBUG
8391 atomic_long_inc(&pblk->cache_reads);
8492 #endif
85
- } else {
86
- /* Read from media non-cached sectors */
87
- rqd->ppa_list[j++] = p;
8893 }
89
-
90
-next:
91
- if (advanced_bio)
92
- bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
94
+ bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
9395 }
9496
97
+end:
9598 if (pblk_io_aligned(pblk, nr_secs))
96
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
97
- else
98
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
99
+ rqd->is_seq = 1;
99100
100101 #ifdef CONFIG_NVM_PBLK_DEBUG
101102 atomic_long_add(nr_secs, &pblk->inflight_reads);
102103 #endif
104
+
105
+ return nr_secs;
103106 }
104107
105108
106109 static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
107110 sector_t blba)
108111 {
109
- struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
112
+ void *meta_list = rqd->meta_list;
110113 int nr_lbas = rqd->nr_ppas;
111114 int i;
112115
116
+ if (!pblk_is_oob_meta_supported(pblk))
117
+ return;
118
+
113119 for (i = 0; i < nr_lbas; i++) {
114
- u64 lba = le64_to_cpu(meta_lba_list[i].lba);
120
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
121
+ u64 lba = le64_to_cpu(meta->lba);
115122
116123 if (lba == ADDR_EMPTY)
117124 continue;
118125
119126 if (lba != blba + i) {
120127 #ifdef CONFIG_NVM_PBLK_DEBUG
121
- struct ppa_addr *p;
128
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
122129
123
- p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
124
- print_ppa(pblk, p, "seq", i);
130
+ print_ppa(pblk, &ppa_list[i], "seq", i);
125131 #endif
126132 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
127133 lba, (u64)blba + i);
....@@ -136,28 +142,31 @@
136142 static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
137143 u64 *lba_list, int nr_lbas)
138144 {
139
- struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
145
+ void *meta_lba_list = rqd->meta_list;
140146 int i, j;
141147
148
+ if (!pblk_is_oob_meta_supported(pblk))
149
+ return;
150
+
142151 for (i = 0, j = 0; i < nr_lbas; i++) {
152
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk,
153
+ meta_lba_list, j);
143154 u64 lba = lba_list[i];
144155 u64 meta_lba;
145156
146157 if (lba == ADDR_EMPTY)
147158 continue;
148159
149
- meta_lba = le64_to_cpu(meta_lba_list[j].lba);
160
+ meta_lba = le64_to_cpu(meta->lba);
150161
151162 if (lba != meta_lba) {
152163 #ifdef CONFIG_NVM_PBLK_DEBUG
153
- struct ppa_addr *p;
154
- int nr_ppas = rqd->nr_ppas;
164
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
155165
156
- p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
157
- print_ppa(pblk, p, "seq", j);
166
+ print_ppa(pblk, &ppa_list[j], "rnd", j);
158167 #endif
159168 pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
160
- lba, meta_lba);
169
+ meta_lba, lba);
161170 WARN_ON(1);
162171 }
163172
....@@ -167,50 +176,31 @@
167176 WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
168177 }
169178
170
-static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
179
+static void pblk_end_user_read(struct bio *bio, int error)
171180 {
172
- struct ppa_addr *ppa_list;
173
- int i;
174
-
175
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
176
-
177
- for (i = 0; i < rqd->nr_ppas; i++) {
178
- struct ppa_addr ppa = ppa_list[i];
179
- struct pblk_line *line;
180
-
181
- line = &pblk->lines[pblk_ppa_to_line(ppa)];
182
- kref_put(&line->ref, pblk_line_put_wq);
183
- }
184
-}
185
-
186
-static void pblk_end_user_read(struct bio *bio)
187
-{
188
-#ifdef CONFIG_NVM_PBLK_DEBUG
189
- WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
190
-#endif
191
- bio_endio(bio);
181
+ if (error && error != NVM_RSP_WARN_HIGHECC)
182
+ bio_io_error(bio);
183
+ else
184
+ bio_endio(bio);
192185 }
193186
194187 static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
195188 bool put_line)
196189 {
197
- struct nvm_tgt_dev *dev = pblk->dev;
198190 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
199191 struct bio *int_bio = rqd->bio;
200192 unsigned long start_time = r_ctx->start_time;
201193
202
- generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time);
194
+ bio_end_io_acct(int_bio, start_time);
203195
204196 if (rqd->error)
205197 pblk_log_read_err(pblk, rqd);
206198
207199 pblk_read_check_seq(pblk, rqd, r_ctx->lba);
208
-
209
- if (int_bio)
210
- bio_put(int_bio);
200
+ bio_put(int_bio);
211201
212202 if (put_line)
213
- pblk_read_put_rqd_kref(pblk, rqd);
203
+ pblk_rq_to_line_put(pblk, rqd);
214204
215205 #ifdef CONFIG_NVM_PBLK_DEBUG
216206 atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
....@@ -227,187 +217,17 @@
227217 struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
228218 struct bio *bio = (struct bio *)r_ctx->private;
229219
230
- pblk_end_user_read(bio);
220
+ pblk_end_user_read(bio, rqd->error);
231221 __pblk_end_io_read(pblk, rqd, true);
232222 }
233223
234
-static void pblk_end_partial_read(struct nvm_rq *rqd)
235
-{
236
- struct pblk *pblk = rqd->private;
237
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
238
- struct pblk_pr_ctx *pr_ctx = r_ctx->private;
239
- struct bio *new_bio = rqd->bio;
240
- struct bio *bio = pr_ctx->orig_bio;
241
- struct bio_vec src_bv, dst_bv;
242
- struct pblk_sec_meta *meta_list = rqd->meta_list;
243
- int bio_init_idx = pr_ctx->bio_init_idx;
244
- unsigned long *read_bitmap = pr_ctx->bitmap;
245
- int nr_secs = pr_ctx->orig_nr_secs;
246
- int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
247
- __le64 *lba_list_mem, *lba_list_media;
248
- void *src_p, *dst_p;
249
- int hole, i;
250
-
251
- if (unlikely(nr_holes == 1)) {
252
- struct ppa_addr ppa;
253
-
254
- ppa = rqd->ppa_addr;
255
- rqd->ppa_list = pr_ctx->ppa_ptr;
256
- rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
257
- rqd->ppa_list[0] = ppa;
258
- }
259
-
260
- /* Re-use allocated memory for intermediate lbas */
261
- lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
262
- lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
263
-
264
- for (i = 0; i < nr_secs; i++) {
265
- lba_list_media[i] = meta_list[i].lba;
266
- meta_list[i].lba = lba_list_mem[i];
267
- }
268
-
269
- /* Fill the holes in the original bio */
270
- i = 0;
271
- hole = find_first_zero_bit(read_bitmap, nr_secs);
272
- do {
273
- int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
274
- struct pblk_line *line = &pblk->lines[line_id];
275
-
276
- kref_put(&line->ref, pblk_line_put);
277
-
278
- meta_list[hole].lba = lba_list_media[i];
279
-
280
- src_bv = new_bio->bi_io_vec[i++];
281
- dst_bv = bio->bi_io_vec[bio_init_idx + hole];
282
-
283
- src_p = kmap_atomic(src_bv.bv_page);
284
- dst_p = kmap_atomic(dst_bv.bv_page);
285
-
286
- memcpy(dst_p + dst_bv.bv_offset,
287
- src_p + src_bv.bv_offset,
288
- PBLK_EXPOSED_PAGE_SIZE);
289
-
290
- kunmap_atomic(src_p);
291
- kunmap_atomic(dst_p);
292
-
293
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
294
-
295
- hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
296
- } while (hole < nr_secs);
297
-
298
- bio_put(new_bio);
299
- kfree(pr_ctx);
300
-
301
- /* restore original request */
302
- rqd->bio = NULL;
303
- rqd->nr_ppas = nr_secs;
304
-
305
- bio_endio(bio);
306
- __pblk_end_io_read(pblk, rqd, false);
307
-}
308
-
309
-static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
310
- unsigned int bio_init_idx,
311
- unsigned long *read_bitmap,
312
- int nr_holes)
313
-{
314
- struct pblk_sec_meta *meta_list = rqd->meta_list;
315
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
316
- struct pblk_pr_ctx *pr_ctx;
317
- struct bio *new_bio, *bio = r_ctx->private;
318
- __le64 *lba_list_mem;
319
- int nr_secs = rqd->nr_ppas;
320
- int i;
321
-
322
- /* Re-use allocated memory for intermediate lbas */
323
- lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
324
-
325
- new_bio = bio_alloc(GFP_KERNEL, nr_holes);
326
-
327
- if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
328
- goto fail_bio_put;
329
-
330
- if (nr_holes != new_bio->bi_vcnt) {
331
- WARN_ONCE(1, "pblk: malformed bio\n");
332
- goto fail_free_pages;
333
- }
334
-
335
- pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
336
- if (!pr_ctx)
337
- goto fail_free_pages;
338
-
339
- for (i = 0; i < nr_secs; i++)
340
- lba_list_mem[i] = meta_list[i].lba;
341
-
342
- new_bio->bi_iter.bi_sector = 0; /* internal bio */
343
- bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
344
-
345
- rqd->bio = new_bio;
346
- rqd->nr_ppas = nr_holes;
347
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
348
-
349
- pr_ctx->ppa_ptr = NULL;
350
- pr_ctx->orig_bio = bio;
351
- bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
352
- pr_ctx->bio_init_idx = bio_init_idx;
353
- pr_ctx->orig_nr_secs = nr_secs;
354
- r_ctx->private = pr_ctx;
355
-
356
- if (unlikely(nr_holes == 1)) {
357
- pr_ctx->ppa_ptr = rqd->ppa_list;
358
- pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
359
- rqd->ppa_addr = rqd->ppa_list[0];
360
- }
361
- return 0;
362
-
363
-fail_free_pages:
364
- pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
365
-fail_bio_put:
366
- bio_put(new_bio);
367
-
368
- return -ENOMEM;
369
-}
370
-
371
-static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
372
- unsigned int bio_init_idx,
373
- unsigned long *read_bitmap, int nr_secs)
374
-{
375
- int nr_holes;
376
- int ret;
377
-
378
- nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
379
-
380
- if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
381
- nr_holes))
382
- return NVM_IO_ERR;
383
-
384
- rqd->end_io = pblk_end_partial_read;
385
-
386
- ret = pblk_submit_io(pblk, rqd);
387
- if (ret) {
388
- bio_put(rqd->bio);
389
- pblk_err(pblk, "partial read IO submission failed\n");
390
- goto err;
391
- }
392
-
393
- return NVM_IO_OK;
394
-
395
-err:
396
- pblk_err(pblk, "failed to perform partial read\n");
397
-
398
- /* Free allocated pages in new bio */
399
- pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
400
- __pblk_end_io_read(pblk, rqd, false);
401
- return NVM_IO_ERR;
402
-}
403
-
404224 static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
405
- sector_t lba, unsigned long *read_bitmap)
225
+ sector_t lba, bool *from_cache)
406226 {
407
- struct pblk_sec_meta *meta_list = rqd->meta_list;
227
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
408228 struct ppa_addr ppa;
409229
410
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
230
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
411231
412232 #ifdef CONFIG_NVM_PBLK_DEBUG
413233 atomic_long_inc(&pblk->inflight_reads);
....@@ -415,8 +235,9 @@
415235
416236 retry:
417237 if (pblk_ppa_empty(ppa)) {
418
- WARN_ON(test_and_set_bit(0, read_bitmap));
419
- meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
238
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
239
+
240
+ meta->lba = addr_empty;
420241 return;
421242 }
422243
....@@ -424,13 +245,12 @@
424245 * write buffer to prevent retrieving overwritten data.
425246 */
426247 if (pblk_addr_in_cache(ppa)) {
427
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
428
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
248
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
249
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
429250 goto retry;
430251 }
431252
432
- WARN_ON(test_and_set_bit(0, read_bitmap));
433
- meta_list[0].lba = cpu_to_le64(lba);
253
+ meta->lba = cpu_to_le64(lba);
434254
435255 #ifdef CONFIG_NVM_PBLK_DEBUG
436256 atomic_long_inc(&pblk->cache_reads);
....@@ -438,121 +258,99 @@
438258 } else {
439259 rqd->ppa_addr = ppa;
440260 }
441
-
442
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
443261 }
444262
445
-int pblk_submit_read(struct pblk *pblk, struct bio *bio)
263
+void pblk_submit_read(struct pblk *pblk, struct bio *bio)
446264 {
447
- struct nvm_tgt_dev *dev = pblk->dev;
448
- struct request_queue *q = dev->q;
449265 sector_t blba = pblk_get_lba(bio);
450266 unsigned int nr_secs = pblk_get_secs(bio);
267
+ bool from_cache;
451268 struct pblk_g_ctx *r_ctx;
452269 struct nvm_rq *rqd;
453
- unsigned int bio_init_idx;
454
- DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
455
- int ret = NVM_IO_ERR;
270
+ struct bio *int_bio, *split_bio;
271
+ unsigned long start_time;
456272
457
- /* logic error: lba out-of-bounds. Ignore read request */
458
- if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
459
- WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
460
- (unsigned long long)blba, nr_secs);
461
- return NVM_IO_ERR;
462
- }
463
-
464
- generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
465
- &pblk->disk->part0);
466
-
467
- bitmap_zero(read_bitmap, nr_secs);
273
+ start_time = bio_start_io_acct(bio);
468274
469275 rqd = pblk_alloc_rqd(pblk, PBLK_READ);
470276
471277 rqd->opcode = NVM_OP_PREAD;
472278 rqd->nr_ppas = nr_secs;
473
- rqd->bio = NULL; /* cloned bio if needed */
474279 rqd->private = pblk;
475280 rqd->end_io = pblk_end_io_read;
476281
477282 r_ctx = nvm_rq_to_pdu(rqd);
478
- r_ctx->start_time = jiffies;
283
+ r_ctx->start_time = start_time;
479284 r_ctx->lba = blba;
480
- r_ctx->private = bio; /* original bio */
481285
482
- /* Save the index for this bio's start. This is needed in case
483
- * we need to fill a partial read.
286
+ if (pblk_alloc_rqd_meta(pblk, rqd)) {
287
+ bio_io_error(bio);
288
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
289
+ return;
290
+ }
291
+
292
+ /* Clone read bio to deal internally with:
293
+ * -read errors when reading from drive
294
+ * -bio_advance() calls during cache reads
484295 */
485
- bio_init_idx = pblk_get_bi_idx(bio);
296
+ int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
486297
487
- rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
488
- &rqd->dma_meta_list);
489
- if (!rqd->meta_list) {
490
- pblk_err(pblk, "not able to allocate ppa list\n");
491
- goto fail_rqd_free;
492
- }
298
+ if (nr_secs > 1)
299
+ nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
300
+ &from_cache);
301
+ else
302
+ pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
493303
494
- if (nr_secs > 1) {
495
- rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
496
- rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
304
+split_retry:
305
+ r_ctx->private = bio; /* original bio */
306
+ rqd->bio = int_bio; /* internal bio */
497307
498
- pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
499
- } else {
500
- pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
501
- }
502
-
503
- if (bitmap_full(read_bitmap, nr_secs)) {
308
+ if (from_cache && nr_secs == rqd->nr_ppas) {
309
+ /* All data was read from cache, we can complete the IO. */
310
+ pblk_end_user_read(bio, 0);
504311 atomic_inc(&pblk->inflight_io);
505312 __pblk_end_io_read(pblk, rqd, false);
506
- return NVM_IO_DONE;
507
- }
313
+ } else if (nr_secs != rqd->nr_ppas) {
314
+ /* The read bio request could be partially filled by the write
315
+ * buffer, but there are some holes that need to be read from
316
+ * the drive. In order to handle this, we will use block layer
317
+ * mechanism to split this request in to smaller ones and make
318
+ * a chain of it.
319
+ */
320
+ split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
321
+ &pblk_bio_set);
322
+ bio_chain(split_bio, bio);
323
+ submit_bio_noacct(bio);
508324
509
- /* All sectors are to be read from the device */
510
- if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
511
- struct bio *int_bio = NULL;
325
+ /* New bio contains first N sectors of the previous one, so
326
+ * we can continue to use existing rqd, but we need to shrink
327
+ * the number of PPAs in it. New bio is also guaranteed that
328
+ * it contains only either data from cache or from drive, newer
329
+ * mix of them.
330
+ */
331
+ bio = split_bio;
332
+ rqd->nr_ppas = nr_secs;
333
+ if (rqd->nr_ppas == 1)
334
+ rqd->ppa_addr = rqd->ppa_list[0];
512335
513
- /* Clone read bio to deal with read errors internally */
336
+ /* Recreate int_bio - existing might have some needed internal
337
+ * fields modified already.
338
+ */
339
+ bio_put(int_bio);
514340 int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
515
- if (!int_bio) {
516
- pblk_err(pblk, "could not clone read bio\n");
517
- goto fail_end_io;
518
- }
519
-
520
- rqd->bio = int_bio;
521
-
522
- if (pblk_submit_io(pblk, rqd)) {
523
- pblk_err(pblk, "read IO submission failed\n");
524
- ret = NVM_IO_ERR;
525
- goto fail_end_io;
526
- }
527
-
528
- return NVM_IO_OK;
341
+ goto split_retry;
342
+ } else if (pblk_submit_io(pblk, rqd, NULL)) {
343
+ /* Submitting IO to drive failed, let's report an error */
344
+ rqd->error = -ENODEV;
345
+ pblk_end_io_read(rqd);
529346 }
530
-
531
- /* The read bio request could be partially filled by the write buffer,
532
- * but there are some holes that need to be read from the drive.
533
- */
534
- ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
535
- nr_secs);
536
- if (ret)
537
- goto fail_meta_free;
538
-
539
- return NVM_IO_OK;
540
-
541
-fail_meta_free:
542
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
543
-fail_rqd_free:
544
- pblk_free_rqd(pblk, rqd, PBLK_READ);
545
- return ret;
546
-fail_end_io:
547
- __pblk_end_io_read(pblk, rqd, false);
548
- return ret;
549347 }
550348
551349 static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
552350 struct pblk_line *line, u64 *lba_list,
553351 u64 *paddr_list_gc, unsigned int nr_secs)
554352 {
555
- struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
353
+ struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
556354 struct ppa_addr ppa_gc;
557355 int valid_secs = 0;
558356 int i;
....@@ -590,7 +388,7 @@
590388 goto out;
591389
592390 /* logic error: lba out-of-bounds */
593
- if (lba >= pblk->rl.nr_secs) {
391
+ if (lba >= pblk->capacity) {
594392 WARN(1, "pblk: read lba out of bounds\n");
595393 goto out;
596394 }
....@@ -616,24 +414,16 @@
616414
617415 int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
618416 {
619
- struct nvm_tgt_dev *dev = pblk->dev;
620
- struct nvm_geo *geo = &dev->geo;
621
- struct bio *bio;
622417 struct nvm_rq rqd;
623
- int data_len;
624418 int ret = NVM_IO_OK;
625419
626420 memset(&rqd, 0, sizeof(struct nvm_rq));
627421
628
- rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
629
- &rqd.dma_meta_list);
630
- if (!rqd.meta_list)
631
- return -ENOMEM;
422
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
423
+ if (ret)
424
+ return ret;
632425
633426 if (gc_rq->nr_secs > 1) {
634
- rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
635
- rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
636
-
637427 gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
638428 gc_rq->lba_list,
639429 gc_rq->paddr_list,
....@@ -649,27 +439,12 @@
649439 if (!(gc_rq->secs_to_gc))
650440 goto out;
651441
652
- data_len = (gc_rq->secs_to_gc) * geo->csecs;
653
- bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
654
- PBLK_VMALLOC_META, GFP_KERNEL);
655
- if (IS_ERR(bio)) {
656
- pblk_err(pblk, "could not allocate GC bio (%lu)\n",
657
- PTR_ERR(bio));
658
- goto err_free_dma;
659
- }
660
-
661
- bio->bi_iter.bi_sector = 0; /* internal bio */
662
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
663
-
664442 rqd.opcode = NVM_OP_PREAD;
665443 rqd.nr_ppas = gc_rq->secs_to_gc;
666
- rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
667
- rqd.bio = bio;
668444
669
- if (pblk_submit_io_sync(pblk, &rqd)) {
445
+ if (pblk_submit_io_sync(pblk, &rqd, gc_rq->data)) {
670446 ret = -EIO;
671
- pblk_err(pblk, "GC read request failed\n");
672
- goto err_free_bio;
447
+ goto err_free_dma;
673448 }
674449
675450 pblk_read_check_rand(pblk, &rqd, gc_rq->lba_list, gc_rq->nr_secs);
....@@ -690,12 +465,10 @@
690465 #endif
691466
692467 out:
693
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
468
+ pblk_free_rqd_meta(pblk, &rqd);
694469 return ret;
695470
696
-err_free_bio:
697
- bio_put(bio);
698471 err_free_dma:
699
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
472
+ pblk_free_rqd_meta(pblk, &rqd);
700473 return ret;
701474 }