| .. | .. |
|---|
| 10 | 10 | |
|---|
| 11 | 11 | #include "blk.h" |
|---|
| 12 | 12 | |
|---|
| 13 | | -static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
|---|
| 14 | | - gfp_t gfp) |
|---|
| 13 | +struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) |
|---|
| 15 | 14 | { |
|---|
| 16 | 15 | struct bio *new = bio_alloc(gfp, nr_pages); |
|---|
| 17 | 16 | |
|---|
| .. | .. |
|---|
| 30 | 29 | struct request_queue *q = bdev_get_queue(bdev); |
|---|
| 31 | 30 | struct bio *bio = *biop; |
|---|
| 32 | 31 | unsigned int op; |
|---|
| 33 | | - sector_t bs_mask; |
|---|
| 32 | + sector_t bs_mask, part_offset = 0; |
|---|
| 34 | 33 | |
|---|
| 35 | 34 | if (!q) |
|---|
| 36 | 35 | return -ENXIO; |
|---|
| .. | .. |
|---|
| 48 | 47 | op = REQ_OP_DISCARD; |
|---|
| 49 | 48 | } |
|---|
| 50 | 49 | |
|---|
| 50 | + /* In case the discard granularity isn't set by buggy device driver */ |
|---|
| 51 | + if (WARN_ON_ONCE(!q->limits.discard_granularity)) { |
|---|
| 52 | + char dev_name[BDEVNAME_SIZE]; |
|---|
| 53 | + |
|---|
| 54 | + bdevname(bdev, dev_name); |
|---|
| 55 | + pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name); |
|---|
| 56 | + return -EOPNOTSUPP; |
|---|
| 57 | + } |
|---|
| 58 | + |
|---|
| 51 | 59 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
|---|
| 52 | 60 | if ((sector | nr_sects) & bs_mask) |
|---|
| 53 | 61 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 55 | 63 | if (!nr_sects) |
|---|
| 56 | 64 | return -EINVAL; |
|---|
| 57 | 65 | |
|---|
| 66 | + /* In case the discard request is in a partition */ |
|---|
| 67 | + if (bdev_is_partition(bdev)) |
|---|
| 68 | + part_offset = bdev->bd_part->start_sect; |
|---|
| 69 | + |
|---|
| 58 | 70 | while (nr_sects) { |
|---|
| 59 | | - sector_t req_sects = min_t(sector_t, nr_sects, |
|---|
| 60 | | - bio_allowed_max_sectors(q)); |
|---|
| 71 | + sector_t granularity_aligned_lba, req_sects; |
|---|
| 72 | + sector_t sector_mapped = sector + part_offset; |
|---|
| 73 | + |
|---|
| 74 | + granularity_aligned_lba = round_up(sector_mapped, |
|---|
| 75 | + q->limits.discard_granularity >> SECTOR_SHIFT); |
|---|
| 76 | + |
|---|
| 77 | + /* |
|---|
| 78 | + * Check whether the discard bio starts at a discard_granularity |
|---|
| 79 | + * aligned LBA, |
|---|
| 80 | + * - If no: set (granularity_aligned_lba - sector_mapped) to |
|---|
| 81 | + * bi_size of the first split bio, then the second bio will |
|---|
| 82 | + * start at a discard_granularity aligned LBA on the device. |
|---|
| 83 | + * - If yes: use bio_aligned_discard_max_sectors() as the max |
|---|
| 84 | + * possible bi_size of the first split bio. Then when this bio |
|---|
| 85 | + * is split in device drive, the split ones are very probably |
|---|
| 86 | + * to be aligned to discard_granularity of the device's queue. |
|---|
| 87 | + */ |
|---|
| 88 | + if (granularity_aligned_lba == sector_mapped) |
|---|
| 89 | + req_sects = min_t(sector_t, nr_sects, |
|---|
| 90 | + bio_aligned_discard_max_sectors(q)); |
|---|
| 91 | + else |
|---|
| 92 | + req_sects = min_t(sector_t, nr_sects, |
|---|
| 93 | + granularity_aligned_lba - sector_mapped); |
|---|
| 61 | 94 | |
|---|
| 62 | 95 | WARN_ON_ONCE((req_sects << 9) > UINT_MAX); |
|---|
| 63 | 96 | |
|---|
| 64 | | - bio = next_bio(bio, 0, gfp_mask); |
|---|
| 97 | + bio = blk_next_bio(bio, 0, gfp_mask); |
|---|
| 65 | 98 | bio->bi_iter.bi_sector = sector; |
|---|
| 66 | 99 | bio_set_dev(bio, bdev); |
|---|
| 67 | 100 | bio_set_op_attrs(bio, op, 0); |
|---|
| .. | .. |
|---|
| 155 | 188 | max_write_same_sectors = bio_allowed_max_sectors(q); |
|---|
| 156 | 189 | |
|---|
| 157 | 190 | while (nr_sects) { |
|---|
| 158 | | - bio = next_bio(bio, 1, gfp_mask); |
|---|
| 191 | + bio = blk_next_bio(bio, 1, gfp_mask); |
|---|
| 159 | 192 | bio->bi_iter.bi_sector = sector; |
|---|
| 160 | 193 | bio_set_dev(bio, bdev); |
|---|
| 161 | 194 | bio->bi_vcnt = 1; |
|---|
| .. | .. |
|---|
| 231 | 264 | return -EOPNOTSUPP; |
|---|
| 232 | 265 | |
|---|
| 233 | 266 | while (nr_sects) { |
|---|
| 234 | | - bio = next_bio(bio, 0, gfp_mask); |
|---|
| 267 | + bio = blk_next_bio(bio, 0, gfp_mask); |
|---|
| 235 | 268 | bio->bi_iter.bi_sector = sector; |
|---|
| 236 | 269 | bio_set_dev(bio, bdev); |
|---|
| 237 | 270 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
|---|
| .. | .. |
|---|
| 282 | 315 | return -EPERM; |
|---|
| 283 | 316 | |
|---|
| 284 | 317 | while (nr_sects != 0) { |
|---|
| 285 | | - bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
|---|
| 286 | | - gfp_mask); |
|---|
| 318 | + bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
|---|
| 319 | + gfp_mask); |
|---|
| 287 | 320 | bio->bi_iter.bi_sector = sector; |
|---|
| 288 | 321 | bio_set_dev(bio, bdev); |
|---|
| 289 | 322 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
|---|