.. | .. |
---|
227 | 227 | const struct merkle_tree_params *params = &vi->tree_params; |
---|
228 | 228 | struct ahash_request *req; |
---|
229 | 229 | struct bio_vec *bv; |
---|
230 | | - int i; |
---|
| 230 | + struct bvec_iter_all iter_all; |
---|
231 | 231 | unsigned long max_ra_pages = 0; |
---|
232 | 232 | |
---|
233 | 233 | /* This allocation never fails, since it's mempool-backed. */ |
---|
.. | .. |
---|
243 | 243 | * This improves sequential read performance, as it greatly |
---|
244 | 244 | * reduces the number of I/O requests made to the Merkle tree. |
---|
245 | 245 | */ |
---|
246 | | - bio_for_each_segment_all(bv, bio, i) |
---|
| 246 | + bio_for_each_segment_all(bv, bio, iter_all) |
---|
247 | 247 | max_ra_pages++; |
---|
248 | 248 | max_ra_pages /= 4; |
---|
249 | 249 | } |
---|
250 | 250 | |
---|
251 | | - bio_for_each_segment_all(bv, bio, i) { |
---|
| 251 | + bio_for_each_segment_all(bv, bio, iter_all) { |
---|
252 | 252 | struct page *page = bv->bv_page; |
---|
253 | 253 | unsigned long level0_index = page->index >> params->log_arity; |
---|
254 | 254 | unsigned long level0_ra_pages = |
---|