hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-zoned-target.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
34 *
....@@ -16,10 +17,10 @@
1617 * Zone BIO context.
1718 */
1819 struct dmz_bioctx {
19
- struct dmz_target *target;
20
+ struct dmz_dev *dev;
2021 struct dm_zone *zone;
2122 struct bio *bio;
22
- atomic_t ref;
23
+ refcount_t ref;
2324 };
2425
2526 /*
....@@ -27,7 +28,7 @@
2728 */
2829 struct dm_chunk_work {
2930 struct work_struct work;
30
- atomic_t refcount;
31
+ refcount_t refcount;
3132 struct dmz_target *target;
3233 unsigned int chunk;
3334 struct bio_list bio_list;
....@@ -37,18 +38,16 @@
3738 * Target descriptor.
3839 */
3940 struct dmz_target {
40
- struct dm_dev *ddev;
41
+ struct dm_dev **ddev;
42
+ unsigned int nr_ddevs;
4143
42
- unsigned long flags;
44
+ unsigned int flags;
4345
4446 /* Zoned block device information */
4547 struct dmz_dev *dev;
4648
4749 /* For metadata handling */
4850 struct dmz_metadata *metadata;
49
-
50
- /* For reclaim */
51
- struct dmz_reclaim *reclaim;
5251
5352 /* For chunk work */
5453 struct radix_tree_root chunk_rxtree;
....@@ -75,14 +74,15 @@
7574 */
7675 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
7776 {
78
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
77
+ struct dmz_bioctx *bioctx =
78
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
7979
8080 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
8181 bio->bi_status = status;
82
- if (bio->bi_status != BLK_STS_OK)
83
- bioctx->target->dev->flags |= DMZ_CHECK_BDEV;
82
+ if (bioctx->dev && bio->bi_status != BLK_STS_OK)
83
+ bioctx->dev->flags |= DMZ_CHECK_BDEV;
8484
85
- if (atomic_dec_and_test(&bioctx->ref)) {
85
+ if (refcount_dec_and_test(&bioctx->ref)) {
8686 struct dm_zone *zone = bioctx->zone;
8787
8888 if (zone) {
....@@ -117,14 +117,20 @@
117117 struct bio *bio, sector_t chunk_block,
118118 unsigned int nr_blocks)
119119 {
120
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
120
+ struct dmz_bioctx *bioctx =
121
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
122
+ struct dmz_dev *dev = zone->dev;
121123 struct bio *clone;
124
+
125
+ if (dev->flags & DMZ_BDEV_DYING)
126
+ return -EIO;
122127
123128 clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
124129 if (!clone)
125130 return -ENOMEM;
126131
127
- bio_set_dev(clone, dmz->dev->bdev);
132
+ bio_set_dev(clone, dev->bdev);
133
+ bioctx->dev = dev;
128134 clone->bi_iter.bi_sector =
129135 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
130136 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
....@@ -133,8 +139,8 @@
133139
134140 bio_advance(bio, clone->bi_iter.bi_size);
135141
136
- atomic_inc(&bioctx->ref);
137
- generic_make_request(clone);
142
+ refcount_inc(&bioctx->ref);
143
+ submit_bio_noacct(clone);
138144
139145 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
140146 zone->wp_block += nr_blocks;
....@@ -164,7 +170,8 @@
164170 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
165171 struct bio *bio)
166172 {
167
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
173
+ struct dmz_metadata *zmd = dmz->metadata;
174
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
168175 unsigned int nr_blocks = dmz_bio_blocks(bio);
169176 sector_t end_block = chunk_block + nr_blocks;
170177 struct dm_zone *rzone, *bzone;
....@@ -176,19 +183,22 @@
176183 return 0;
177184 }
178185
179
- dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
180
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
181
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
182
- dmz_id(dmz->metadata, zone),
183
- (unsigned long long)chunk_block, nr_blocks);
186
+ DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
187
+ dmz_metadata_label(zmd),
188
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
189
+ (dmz_is_rnd(zone) ? "RND" :
190
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
191
+ zone->id,
192
+ (unsigned long long)chunk_block, nr_blocks);
184193
185194 /* Check block validity to determine the read location */
186195 bzone = zone->bzone;
187196 while (chunk_block < end_block) {
188197 nr_blocks = 0;
189
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
198
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
199
+ chunk_block < zone->wp_block) {
190200 /* Test block validity in the data zone */
191
- ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
201
+ ret = dmz_block_valid(zmd, zone, chunk_block);
192202 if (ret < 0)
193203 return ret;
194204 if (ret > 0) {
....@@ -203,7 +213,7 @@
203213 * Check the buffer zone, if there is one.
204214 */
205215 if (!nr_blocks && bzone) {
206
- ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
216
+ ret = dmz_block_valid(zmd, bzone, chunk_block);
207217 if (ret < 0)
208218 return ret;
209219 if (ret > 0) {
....@@ -215,8 +225,10 @@
215225
216226 if (nr_blocks) {
217227 /* Valid blocks found: read them */
218
- nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
219
- ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
228
+ nr_blocks = min_t(unsigned int, nr_blocks,
229
+ end_block - chunk_block);
230
+ ret = dmz_submit_bio(dmz, rzone, bio,
231
+ chunk_block, nr_blocks);
220232 if (ret)
221233 return ret;
222234 chunk_block += nr_blocks;
....@@ -307,25 +319,30 @@
307319 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
308320 struct bio *bio)
309321 {
310
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
322
+ struct dmz_metadata *zmd = dmz->metadata;
323
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
311324 unsigned int nr_blocks = dmz_bio_blocks(bio);
312325
313326 if (!zone)
314327 return -ENOSPC;
315328
316
- dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
317
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
318
- (dmz_is_rnd(zone) ? "RND" : "SEQ"),
319
- dmz_id(dmz->metadata, zone),
320
- (unsigned long long)chunk_block, nr_blocks);
329
+ DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
330
+ dmz_metadata_label(zmd),
331
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
332
+ (dmz_is_rnd(zone) ? "RND" :
333
+ (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
334
+ zone->id,
335
+ (unsigned long long)chunk_block, nr_blocks);
321336
322
- if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
337
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
338
+ chunk_block == zone->wp_block) {
323339 /*
324340 * zone is a random zone or it is a sequential zone
325341 * and the BIO is aligned to the zone write pointer:
326342 * direct write the zone.
327343 */
328
- return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
344
+ return dmz_handle_direct_write(dmz, zone, bio,
345
+ chunk_block, nr_blocks);
329346 }
330347
331348 /*
....@@ -344,7 +361,7 @@
344361 struct dmz_metadata *zmd = dmz->metadata;
345362 sector_t block = dmz_bio_block(bio);
346363 unsigned int nr_blocks = dmz_bio_blocks(bio);
347
- sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
364
+ sector_t chunk_block = dmz_chunk_block(zmd, block);
348365 int ret = 0;
349366
350367 /* For unmapped chunks, there is nothing to do */
....@@ -354,16 +371,18 @@
354371 if (dmz_is_readonly(zone))
355372 return -EROFS;
356373
357
- dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
358
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
359
- dmz_id(zmd, zone),
360
- (unsigned long long)chunk_block, nr_blocks);
374
+ DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
375
+ dmz_metadata_label(dmz->metadata),
376
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
377
+ zone->id,
378
+ (unsigned long long)chunk_block, nr_blocks);
361379
362380 /*
363381 * Invalidate blocks in the data zone and its
364382 * buffer zone if one is mapped.
365383 */
366
- if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
384
+ if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
385
+ chunk_block < zone->wp_block)
367386 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
368387 if (ret == 0 && zone->bzone)
369388 ret = dmz_invalidate_blocks(zmd, zone->bzone,
....@@ -377,31 +396,20 @@
377396 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
378397 struct bio *bio)
379398 {
380
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
399
+ struct dmz_bioctx *bioctx =
400
+ dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
381401 struct dmz_metadata *zmd = dmz->metadata;
382402 struct dm_zone *zone;
383403 int ret;
384404
385
- /*
386
- * Write may trigger a zone allocation. So make sure the
387
- * allocation can succeed.
388
- */
389
- if (bio_op(bio) == REQ_OP_WRITE)
390
- dmz_schedule_reclaim(dmz->reclaim);
391
-
392405 dmz_lock_metadata(zmd);
393
-
394
- if (dmz->dev->flags & DMZ_BDEV_DYING) {
395
- ret = -EIO;
396
- goto out;
397
- }
398406
399407 /*
400408 * Get the data zone mapping the chunk. There may be no
401409 * mapping for read and discard. If a mapping is obtained,
402410 + the zone returned will be set to active state.
403411 */
404
- zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
412
+ zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
405413 bio_op(bio));
406414 if (IS_ERR(zone)) {
407415 ret = PTR_ERR(zone);
....@@ -412,6 +420,7 @@
412420 if (zone) {
413421 dmz_activate_zone(zone);
414422 bioctx->zone = zone;
423
+ dmz_reclaim_bio_acc(zone->dev->reclaim);
415424 }
416425
417426 switch (bio_op(bio)) {
....@@ -426,8 +435,8 @@
426435 ret = dmz_handle_discard(dmz, zone, bio);
427436 break;
428437 default:
429
- dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
430
- bio_op(bio));
438
+ DMERR("(%s): Unsupported BIO operation 0x%x",
439
+ dmz_metadata_label(dmz->metadata), bio_op(bio));
431440 ret = -EIO;
432441 }
433442
....@@ -448,7 +457,7 @@
448457 */
449458 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
450459 {
451
- atomic_inc(&cw->refcount);
460
+ refcount_inc(&cw->refcount);
452461 }
453462
454463 /*
....@@ -457,7 +466,7 @@
457466 */
458467 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
459468 {
460
- if (atomic_dec_and_test(&cw->refcount)) {
469
+ if (refcount_dec_and_test(&cw->refcount)) {
461470 WARN_ON(!bio_list_empty(&cw->bio_list));
462471 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
463472 kfree(cw);
....@@ -501,7 +510,8 @@
501510 /* Flush dirty metadata blocks */
502511 ret = dmz_flush_metadata(dmz->metadata);
503512 if (ret)
504
- dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
513
+ DMDEBUG("(%s): Metadata flush failed, rc=%d",
514
+ dmz_metadata_label(dmz->metadata), ret);
505515
506516 /* Process queued flush requests */
507517 while (1) {
....@@ -524,7 +534,7 @@
524534 */
525535 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
526536 {
527
- unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
537
+ unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
528538 struct dm_chunk_work *cw;
529539 int ret = 0;
530540
....@@ -532,8 +542,9 @@
532542
533543 /* Get the BIO chunk work. If one is not active yet, create one */
534544 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
535
- if (!cw) {
536
-
545
+ if (cw) {
546
+ dmz_get_chunk_work(cw);
547
+ } else {
537548 /* Create a new chunk work */
538549 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
539550 if (unlikely(!cw)) {
....@@ -542,7 +553,7 @@
542553 }
543554
544555 INIT_WORK(&cw->work, dmz_chunk_work);
545
- atomic_set(&cw->refcount, 0);
556
+ refcount_set(&cw->refcount, 1);
546557 cw->target = dmz;
547558 cw->chunk = chunk;
548559 bio_list_init(&cw->bio_list);
....@@ -555,9 +566,7 @@
555566 }
556567
557568 bio_list_add(&cw->bio_list, bio);
558
- dmz_get_chunk_work(cw);
559569
560
- dmz_reclaim_bio_acc(dmz->reclaim);
561570 if (queue_work(dmz->chunk_wq, &cw->work))
562571 dmz_get_chunk_work(cw);
563572 out:
....@@ -617,23 +626,22 @@
617626 static int dmz_map(struct dm_target *ti, struct bio *bio)
618627 {
619628 struct dmz_target *dmz = ti->private;
620
- struct dmz_dev *dev = dmz->dev;
629
+ struct dmz_metadata *zmd = dmz->metadata;
621630 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
622631 sector_t sector = bio->bi_iter.bi_sector;
623632 unsigned int nr_sectors = bio_sectors(bio);
624633 sector_t chunk_sector;
625634 int ret;
626635
627
- if (dmz_bdev_is_dying(dmz->dev))
636
+ if (dmz_dev_is_dying(zmd))
628637 return DM_MAPIO_KILL;
629638
630
- dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
631
- bio_op(bio), (unsigned long long)sector, nr_sectors,
632
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
633
- (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
634
- (unsigned int)dmz_bio_blocks(bio));
635
-
636
- bio_set_dev(bio, dev->bdev);
639
+ DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
640
+ dmz_metadata_label(zmd),
641
+ bio_op(bio), (unsigned long long)sector, nr_sectors,
642
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
643
+ (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
644
+ (unsigned int)dmz_bio_blocks(bio));
637645
638646 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
639647 return DM_MAPIO_REMAPPED;
....@@ -643,10 +651,10 @@
643651 return DM_MAPIO_KILL;
644652
645653 /* Initialize the BIO context */
646
- bioctx->target = dmz;
654
+ bioctx->dev = NULL;
647655 bioctx->zone = NULL;
648656 bioctx->bio = bio;
649
- atomic_set(&bioctx->ref, 1);
657
+ refcount_set(&bioctx->ref, 1);
650658
651659 /* Set the BIO pending in the flush list */
652660 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
....@@ -658,17 +666,17 @@
658666 }
659667
660668 /* Split zone BIOs to fit entirely into a zone */
661
- chunk_sector = sector & (dev->zone_nr_sectors - 1);
662
- if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
663
- dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
669
+ chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
670
+ if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
671
+ dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
664672
665673 /* Now ready to handle this BIO */
666674 ret = dmz_queue_chunk_work(dmz, bio);
667675 if (ret) {
668
- dmz_dev_debug(dmz->dev,
669
- "BIO op %d, can't process chunk %llu, err %i\n",
670
- bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
671
- ret);
676
+ DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
677
+ dmz_metadata_label(zmd),
678
+ bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
679
+ ret);
672680 return DM_MAPIO_REQUEUE;
673681 }
674682
....@@ -678,76 +686,144 @@
678686 /*
679687 * Get zoned device information.
680688 */
681
-static int dmz_get_zoned_device(struct dm_target *ti, char *path)
689
+static int dmz_get_zoned_device(struct dm_target *ti, char *path,
690
+ int idx, int nr_devs)
682691 {
683692 struct dmz_target *dmz = ti->private;
684
- struct request_queue *q;
693
+ struct dm_dev *ddev;
685694 struct dmz_dev *dev;
686
- sector_t aligned_capacity;
687695 int ret;
696
+ struct block_device *bdev;
688697
689698 /* Get the target device */
690
- ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
699
+ ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
691700 if (ret) {
692701 ti->error = "Get target device failed";
693
- dmz->ddev = NULL;
694702 return ret;
695703 }
696704
697
- dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
698
- if (!dev) {
699
- ret = -ENOMEM;
700
- goto err;
705
+ bdev = ddev->bdev;
706
+ if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
707
+ if (nr_devs == 1) {
708
+ ti->error = "Invalid regular device";
709
+ goto err;
710
+ }
711
+ if (idx != 0) {
712
+ ti->error = "First device must be a regular device";
713
+ goto err;
714
+ }
715
+ if (dmz->ddev[0]) {
716
+ ti->error = "Too many regular devices";
717
+ goto err;
718
+ }
719
+ dev = &dmz->dev[idx];
720
+ dev->flags = DMZ_BDEV_REGULAR;
721
+ } else {
722
+ if (dmz->ddev[idx]) {
723
+ ti->error = "Too many zoned devices";
724
+ goto err;
725
+ }
726
+ if (nr_devs > 1 && idx == 0) {
727
+ ti->error = "First device must be a regular device";
728
+ goto err;
729
+ }
730
+ dev = &dmz->dev[idx];
701731 }
702
-
703
- dev->bdev = dmz->ddev->bdev;
732
+ dev->bdev = bdev;
733
+ dev->dev_idx = idx;
704734 (void)bdevname(dev->bdev, dev->name);
705735
706
- if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
707
- ti->error = "Not a zoned block device";
708
- ret = -EINVAL;
736
+ dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
737
+ if (ti->begin) {
738
+ ti->error = "Partial mapping is not supported";
709739 goto err;
710740 }
711741
712
- q = bdev_get_queue(dev->bdev);
713
- dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
714
- aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
715
- if (ti->begin ||
716
- ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
717
- ti->error = "Partial mapping not supported";
718
- ret = -EINVAL;
719
- goto err;
720
- }
721
-
722
- dev->zone_nr_sectors = blk_queue_zone_sectors(q);
723
- dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
724
-
725
- dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
726
- dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
727
-
728
- dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
729
- >> dev->zone_nr_sectors_shift;
730
-
731
- dmz->dev = dev;
742
+ dmz->ddev[idx] = ddev;
732743
733744 return 0;
734745 err:
735
- dm_put_device(ti, dmz->ddev);
736
- kfree(dev);
737
-
738
- return ret;
746
+ dm_put_device(ti, ddev);
747
+ return -EINVAL;
739748 }
740749
741750 /*
742751 * Cleanup zoned device information.
743752 */
744
-static void dmz_put_zoned_device(struct dm_target *ti)
753
+static void dmz_put_zoned_devices(struct dm_target *ti)
745754 {
746755 struct dmz_target *dmz = ti->private;
756
+ int i;
747757
748
- dm_put_device(ti, dmz->ddev);
749
- kfree(dmz->dev);
750
- dmz->dev = NULL;
758
+ for (i = 0; i < dmz->nr_ddevs; i++)
759
+ if (dmz->ddev[i])
760
+ dm_put_device(ti, dmz->ddev[i]);
761
+
762
+ kfree(dmz->ddev);
763
+}
764
+
765
+static int dmz_fixup_devices(struct dm_target *ti)
766
+{
767
+ struct dmz_target *dmz = ti->private;
768
+ struct dmz_dev *reg_dev, *zoned_dev;
769
+ struct request_queue *q;
770
+ sector_t zone_nr_sectors = 0;
771
+ int i;
772
+
773
+ /*
774
+ * When we have more than on devices, the first one must be a
775
+ * regular block device and the others zoned block devices.
776
+ */
777
+ if (dmz->nr_ddevs > 1) {
778
+ reg_dev = &dmz->dev[0];
779
+ if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
780
+ ti->error = "Primary disk is not a regular device";
781
+ return -EINVAL;
782
+ }
783
+ for (i = 1; i < dmz->nr_ddevs; i++) {
784
+ zoned_dev = &dmz->dev[i];
785
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
786
+ ti->error = "Secondary disk is not a zoned device";
787
+ return -EINVAL;
788
+ }
789
+ q = bdev_get_queue(zoned_dev->bdev);
790
+ if (zone_nr_sectors &&
791
+ zone_nr_sectors != blk_queue_zone_sectors(q)) {
792
+ ti->error = "Zone nr sectors mismatch";
793
+ return -EINVAL;
794
+ }
795
+ zone_nr_sectors = blk_queue_zone_sectors(q);
796
+ zoned_dev->zone_nr_sectors = zone_nr_sectors;
797
+ zoned_dev->nr_zones =
798
+ blkdev_nr_zones(zoned_dev->bdev->bd_disk);
799
+ }
800
+ } else {
801
+ reg_dev = NULL;
802
+ zoned_dev = &dmz->dev[0];
803
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
804
+ ti->error = "Disk is not a zoned device";
805
+ return -EINVAL;
806
+ }
807
+ q = bdev_get_queue(zoned_dev->bdev);
808
+ zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
809
+ zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
810
+ }
811
+
812
+ if (reg_dev) {
813
+ sector_t zone_offset;
814
+
815
+ reg_dev->zone_nr_sectors = zone_nr_sectors;
816
+ reg_dev->nr_zones =
817
+ DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
818
+ reg_dev->zone_nr_sectors);
819
+ reg_dev->zone_offset = 0;
820
+ zone_offset = reg_dev->nr_zones;
821
+ for (i = 1; i < dmz->nr_ddevs; i++) {
822
+ dmz->dev[i].zone_offset = zone_offset;
823
+ zone_offset += dmz->dev[i].nr_zones;
824
+ }
825
+ }
826
+ return 0;
751827 }
752828
753829 /*
....@@ -756,11 +832,10 @@
756832 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
757833 {
758834 struct dmz_target *dmz;
759
- struct dmz_dev *dev;
760
- int ret;
835
+ int ret, i;
761836
762837 /* Check arguments */
763
- if (argc != 1) {
838
+ if (argc < 1) {
764839 ti->error = "Invalid argument count";
765840 return -EINVAL;
766841 }
....@@ -771,35 +846,52 @@
771846 ti->error = "Unable to allocate the zoned target descriptor";
772847 return -ENOMEM;
773848 }
849
+ dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
850
+ if (!dmz->dev) {
851
+ ti->error = "Unable to allocate the zoned device descriptors";
852
+ kfree(dmz);
853
+ return -ENOMEM;
854
+ }
855
+ dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
856
+ if (!dmz->ddev) {
857
+ ti->error = "Unable to allocate the dm device descriptors";
858
+ ret = -ENOMEM;
859
+ goto err;
860
+ }
861
+ dmz->nr_ddevs = argc;
862
+
774863 ti->private = dmz;
775864
776865 /* Get the target zoned block device */
777
- ret = dmz_get_zoned_device(ti, argv[0]);
778
- if (ret) {
779
- dmz->ddev = NULL;
780
- goto err;
866
+ for (i = 0; i < argc; i++) {
867
+ ret = dmz_get_zoned_device(ti, argv[i], i, argc);
868
+ if (ret)
869
+ goto err_dev;
781870 }
871
+ ret = dmz_fixup_devices(ti);
872
+ if (ret)
873
+ goto err_dev;
782874
783875 /* Initialize metadata */
784
- dev = dmz->dev;
785
- ret = dmz_ctr_metadata(dev, &dmz->metadata);
876
+ ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
877
+ dm_table_device_name(ti->table));
786878 if (ret) {
787879 ti->error = "Metadata initialization failed";
788880 goto err_dev;
789881 }
790882
791883 /* Set target (no write same support) */
792
- ti->max_io_len = dev->zone_nr_sectors;
884
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
793885 ti->num_flush_bios = 1;
794886 ti->num_discard_bios = 1;
795887 ti->num_write_zeroes_bios = 1;
796888 ti->per_io_data_size = sizeof(struct dmz_bioctx);
797889 ti->flush_supported = true;
798890 ti->discards_supported = true;
799
- ti->split_discard_bios = true;
800891
801892 /* The exposed capacity is the number of chunks that can be mapped */
802
- ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
893
+ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
894
+ dmz_zone_nr_sectors_shift(dmz->metadata);
803895
804896 /* Zone BIO */
805897 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
....@@ -811,8 +903,9 @@
811903 /* Chunk BIO work */
812904 mutex_init(&dmz->chunk_lock);
813905 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
814
- dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
815
- 0, dev->name);
906
+ dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
907
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
908
+ dmz_metadata_label(dmz->metadata));
816909 if (!dmz->chunk_wq) {
817910 ti->error = "Create chunk workqueue failed";
818911 ret = -ENOMEM;
....@@ -824,7 +917,7 @@
824917 bio_list_init(&dmz->flush_list);
825918 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
826919 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
827
- dev->name);
920
+ dmz_metadata_label(dmz->metadata));
828921 if (!dmz->flush_wq) {
829922 ti->error = "Create flush workqueue failed";
830923 ret = -ENOMEM;
....@@ -833,15 +926,18 @@
833926 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
834927
835928 /* Initialize reclaim */
836
- ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
837
- if (ret) {
838
- ti->error = "Zone reclaim initialization failed";
839
- goto err_fwq;
929
+ for (i = 0; i < dmz->nr_ddevs; i++) {
930
+ ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
931
+ if (ret) {
932
+ ti->error = "Zone reclaim initialization failed";
933
+ goto err_fwq;
934
+ }
840935 }
841936
842
- dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
843
- (unsigned long long)ti->len,
844
- (unsigned long long)dmz_sect2blk(ti->len));
937
+ DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
938
+ dmz_metadata_label(dmz->metadata),
939
+ (unsigned long long)ti->len,
940
+ (unsigned long long)dmz_sect2blk(ti->len));
845941
846942 return 0;
847943 err_fwq:
....@@ -854,8 +950,9 @@
854950 err_meta:
855951 dmz_dtr_metadata(dmz->metadata);
856952 err_dev:
857
- dmz_put_zoned_device(ti);
953
+ dmz_put_zoned_devices(ti);
858954 err:
955
+ kfree(dmz->dev);
859956 kfree(dmz);
860957
861958 return ret;
....@@ -867,11 +964,13 @@
867964 static void dmz_dtr(struct dm_target *ti)
868965 {
869966 struct dmz_target *dmz = ti->private;
967
+ int i;
870968
871969 flush_workqueue(dmz->chunk_wq);
872970 destroy_workqueue(dmz->chunk_wq);
873971
874
- dmz_dtr_reclaim(dmz->reclaim);
972
+ for (i = 0; i < dmz->nr_ddevs; i++)
973
+ dmz_dtr_reclaim(dmz->dev[i].reclaim);
875974
876975 cancel_delayed_work_sync(&dmz->flush_work);
877976 destroy_workqueue(dmz->flush_wq);
....@@ -882,10 +981,11 @@
882981
883982 bioset_exit(&dmz->bio_set);
884983
885
- dmz_put_zoned_device(ti);
984
+ dmz_put_zoned_devices(ti);
886985
887986 mutex_destroy(&dmz->chunk_lock);
888987
988
+ kfree(dmz->dev);
889989 kfree(dmz);
890990 }
891991
....@@ -895,7 +995,7 @@
895995 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
896996 {
897997 struct dmz_target *dmz = ti->private;
898
- unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
998
+ unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
899999
9001000 limits->logical_block_size = DMZ_BLOCK_SIZE;
9011001 limits->physical_block_size = DMZ_BLOCK_SIZE;
....@@ -923,11 +1023,12 @@
9231023 static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
9241024 {
9251025 struct dmz_target *dmz = ti->private;
1026
+ struct dmz_dev *dev = &dmz->dev[0];
9261027
927
- if (!dmz_check_bdev(dmz->dev))
1028
+ if (!dmz_check_bdev(dev))
9281029 return -EIO;
9291030
930
- *bdev = dmz->dev->bdev;
1031
+ *bdev = dev->bdev;
9311032
9321033 return 0;
9331034 }
....@@ -938,9 +1039,11 @@
9381039 static void dmz_suspend(struct dm_target *ti)
9391040 {
9401041 struct dmz_target *dmz = ti->private;
1042
+ int i;
9411043
9421044 flush_workqueue(dmz->chunk_wq);
943
- dmz_suspend_reclaim(dmz->reclaim);
1045
+ for (i = 0; i < dmz->nr_ddevs; i++)
1046
+ dmz_suspend_reclaim(dmz->dev[i].reclaim);
9441047 cancel_delayed_work_sync(&dmz->flush_work);
9451048 }
9461049
....@@ -950,25 +1053,96 @@
9501053 static void dmz_resume(struct dm_target *ti)
9511054 {
9521055 struct dmz_target *dmz = ti->private;
1056
+ int i;
9531057
9541058 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
955
- dmz_resume_reclaim(dmz->reclaim);
1059
+ for (i = 0; i < dmz->nr_ddevs; i++)
1060
+ dmz_resume_reclaim(dmz->dev[i].reclaim);
9561061 }
9571062
9581063 static int dmz_iterate_devices(struct dm_target *ti,
9591064 iterate_devices_callout_fn fn, void *data)
9601065 {
9611066 struct dmz_target *dmz = ti->private;
962
- struct dmz_dev *dev = dmz->dev;
963
- sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
1067
+ unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
1068
+ sector_t capacity;
1069
+ int i, r;
9641070
965
- return fn(ti, dmz->ddev, 0, capacity, data);
1071
+ for (i = 0; i < dmz->nr_ddevs; i++) {
1072
+ capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
1073
+ r = fn(ti, dmz->ddev[i], 0, capacity, data);
1074
+ if (r)
1075
+ break;
1076
+ }
1077
+ return r;
1078
+}
1079
+
1080
+static void dmz_status(struct dm_target *ti, status_type_t type,
1081
+ unsigned int status_flags, char *result,
1082
+ unsigned int maxlen)
1083
+{
1084
+ struct dmz_target *dmz = ti->private;
1085
+ ssize_t sz = 0;
1086
+ char buf[BDEVNAME_SIZE];
1087
+ struct dmz_dev *dev;
1088
+ int i;
1089
+
1090
+ switch (type) {
1091
+ case STATUSTYPE_INFO:
1092
+ DMEMIT("%u zones %u/%u cache",
1093
+ dmz_nr_zones(dmz->metadata),
1094
+ dmz_nr_unmap_cache_zones(dmz->metadata),
1095
+ dmz_nr_cache_zones(dmz->metadata));
1096
+ for (i = 0; i < dmz->nr_ddevs; i++) {
1097
+ /*
1098
+ * For a multi-device setup the first device
1099
+ * contains only cache zones.
1100
+ */
1101
+ if ((i == 0) &&
1102
+ (dmz_nr_cache_zones(dmz->metadata) > 0))
1103
+ continue;
1104
+ DMEMIT(" %u/%u random %u/%u sequential",
1105
+ dmz_nr_unmap_rnd_zones(dmz->metadata, i),
1106
+ dmz_nr_rnd_zones(dmz->metadata, i),
1107
+ dmz_nr_unmap_seq_zones(dmz->metadata, i),
1108
+ dmz_nr_seq_zones(dmz->metadata, i));
1109
+ }
1110
+ break;
1111
+ case STATUSTYPE_TABLE:
1112
+ dev = &dmz->dev[0];
1113
+ format_dev_t(buf, dev->bdev->bd_dev);
1114
+ DMEMIT("%s", buf);
1115
+ for (i = 1; i < dmz->nr_ddevs; i++) {
1116
+ dev = &dmz->dev[i];
1117
+ format_dev_t(buf, dev->bdev->bd_dev);
1118
+ DMEMIT(" %s", buf);
1119
+ }
1120
+ break;
1121
+ }
1122
+ return;
1123
+}
1124
+
1125
+static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
1126
+ char *result, unsigned int maxlen)
1127
+{
1128
+ struct dmz_target *dmz = ti->private;
1129
+ int r = -EINVAL;
1130
+
1131
+ if (!strcasecmp(argv[0], "reclaim")) {
1132
+ int i;
1133
+
1134
+ for (i = 0; i < dmz->nr_ddevs; i++)
1135
+ dmz_schedule_reclaim(dmz->dev[i].reclaim);
1136
+ r = 0;
1137
+ } else
1138
+ DMERR("unrecognized message %s", argv[0]);
1139
+ return r;
9661140 }
9671141
9681142 static struct target_type dmz_type = {
9691143 .name = "zoned",
970
- .version = {1, 0, 0},
971
- .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
1144
+ .version = {2, 0, 0},
1145
+ .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
9721146 .module = THIS_MODULE,
9731147 .ctr = dmz_ctr,
9741148 .dtr = dmz_dtr,
....@@ -978,6 +1152,8 @@
9781152 .postsuspend = dmz_suspend,
9791153 .resume = dmz_resume,
9801154 .iterate_devices = dmz_iterate_devices,
1155
+ .status = dmz_status,
1156
+ .message = dmz_message,
9811157 };
9821158
9831159 static int __init dmz_init(void)