| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 16 | 17 | * Zone BIO context. |
|---|
| 17 | 18 | */ |
|---|
| 18 | 19 | struct dmz_bioctx { |
|---|
| 19 | | - struct dmz_target *target; |
|---|
| 20 | + struct dmz_dev *dev; |
|---|
| 20 | 21 | struct dm_zone *zone; |
|---|
| 21 | 22 | struct bio *bio; |
|---|
| 22 | | - atomic_t ref; |
|---|
| 23 | + refcount_t ref; |
|---|
| 23 | 24 | }; |
|---|
| 24 | 25 | |
|---|
| 25 | 26 | /* |
|---|
| .. | .. |
|---|
| 27 | 28 | */ |
|---|
| 28 | 29 | struct dm_chunk_work { |
|---|
| 29 | 30 | struct work_struct work; |
|---|
| 30 | | - atomic_t refcount; |
|---|
| 31 | + refcount_t refcount; |
|---|
| 31 | 32 | struct dmz_target *target; |
|---|
| 32 | 33 | unsigned int chunk; |
|---|
| 33 | 34 | struct bio_list bio_list; |
|---|
| .. | .. |
|---|
| 37 | 38 | * Target descriptor. |
|---|
| 38 | 39 | */ |
|---|
| 39 | 40 | struct dmz_target { |
|---|
| 40 | | - struct dm_dev *ddev; |
|---|
| 41 | + struct dm_dev **ddev; |
|---|
| 42 | + unsigned int nr_ddevs; |
|---|
| 41 | 43 | |
|---|
| 42 | | - unsigned long flags; |
|---|
| 44 | + unsigned int flags; |
|---|
| 43 | 45 | |
|---|
| 44 | 46 | /* Zoned block device information */ |
|---|
| 45 | 47 | struct dmz_dev *dev; |
|---|
| 46 | 48 | |
|---|
| 47 | 49 | /* For metadata handling */ |
|---|
| 48 | 50 | struct dmz_metadata *metadata; |
|---|
| 49 | | - |
|---|
| 50 | | - /* For reclaim */ |
|---|
| 51 | | - struct dmz_reclaim *reclaim; |
|---|
| 52 | 51 | |
|---|
| 53 | 52 | /* For chunk work */ |
|---|
| 54 | 53 | struct radix_tree_root chunk_rxtree; |
|---|
| .. | .. |
|---|
| 75 | 74 | */ |
|---|
| 76 | 75 | static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) |
|---|
| 77 | 76 | { |
|---|
| 78 | | - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 77 | + struct dmz_bioctx *bioctx = |
|---|
| 78 | + dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 79 | 79 | |
|---|
| 80 | 80 | if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) |
|---|
| 81 | 81 | bio->bi_status = status; |
|---|
| 82 | | - if (bio->bi_status != BLK_STS_OK) |
|---|
| 83 | | - bioctx->target->dev->flags |= DMZ_CHECK_BDEV; |
|---|
| 82 | + if (bioctx->dev && bio->bi_status != BLK_STS_OK) |
|---|
| 83 | + bioctx->dev->flags |= DMZ_CHECK_BDEV; |
|---|
| 84 | 84 | |
|---|
| 85 | | - if (atomic_dec_and_test(&bioctx->ref)) { |
|---|
| 85 | + if (refcount_dec_and_test(&bioctx->ref)) { |
|---|
| 86 | 86 | struct dm_zone *zone = bioctx->zone; |
|---|
| 87 | 87 | |
|---|
| 88 | 88 | if (zone) { |
|---|
| .. | .. |
|---|
| 117 | 117 | struct bio *bio, sector_t chunk_block, |
|---|
| 118 | 118 | unsigned int nr_blocks) |
|---|
| 119 | 119 | { |
|---|
| 120 | | - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 120 | + struct dmz_bioctx *bioctx = |
|---|
| 121 | + dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 122 | + struct dmz_dev *dev = zone->dev; |
|---|
| 121 | 123 | struct bio *clone; |
|---|
| 124 | + |
|---|
| 125 | + if (dev->flags & DMZ_BDEV_DYING) |
|---|
| 126 | + return -EIO; |
|---|
| 122 | 127 | |
|---|
| 123 | 128 | clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); |
|---|
| 124 | 129 | if (!clone) |
|---|
| 125 | 130 | return -ENOMEM; |
|---|
| 126 | 131 | |
|---|
| 127 | | - bio_set_dev(clone, dmz->dev->bdev); |
|---|
| 132 | + bio_set_dev(clone, dev->bdev); |
|---|
| 133 | + bioctx->dev = dev; |
|---|
| 128 | 134 | clone->bi_iter.bi_sector = |
|---|
| 129 | 135 | dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); |
|---|
| 130 | 136 | clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; |
|---|
| .. | .. |
|---|
| 133 | 139 | |
|---|
| 134 | 140 | bio_advance(bio, clone->bi_iter.bi_size); |
|---|
| 135 | 141 | |
|---|
| 136 | | - atomic_inc(&bioctx->ref); |
|---|
| 137 | | - generic_make_request(clone); |
|---|
| 142 | + refcount_inc(&bioctx->ref); |
|---|
| 143 | + submit_bio_noacct(clone); |
|---|
| 138 | 144 | |
|---|
| 139 | 145 | if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) |
|---|
| 140 | 146 | zone->wp_block += nr_blocks; |
|---|
| .. | .. |
|---|
| 164 | 170 | static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, |
|---|
| 165 | 171 | struct bio *bio) |
|---|
| 166 | 172 | { |
|---|
| 167 | | - sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); |
|---|
| 173 | + struct dmz_metadata *zmd = dmz->metadata; |
|---|
| 174 | + sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio)); |
|---|
| 168 | 175 | unsigned int nr_blocks = dmz_bio_blocks(bio); |
|---|
| 169 | 176 | sector_t end_block = chunk_block + nr_blocks; |
|---|
| 170 | 177 | struct dm_zone *rzone, *bzone; |
|---|
| .. | .. |
|---|
| 176 | 183 | return 0; |
|---|
| 177 | 184 | } |
|---|
| 178 | 185 | |
|---|
| 179 | | - dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks", |
|---|
| 180 | | - (unsigned long long)dmz_bio_chunk(dmz->dev, bio), |
|---|
| 181 | | - (dmz_is_rnd(zone) ? "RND" : "SEQ"), |
|---|
| 182 | | - dmz_id(dmz->metadata, zone), |
|---|
| 183 | | - (unsigned long long)chunk_block, nr_blocks); |
|---|
| 186 | + DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks", |
|---|
| 187 | + dmz_metadata_label(zmd), |
|---|
| 188 | + (unsigned long long)dmz_bio_chunk(zmd, bio), |
|---|
| 189 | + (dmz_is_rnd(zone) ? "RND" : |
|---|
| 190 | + (dmz_is_cache(zone) ? "CACHE" : "SEQ")), |
|---|
| 191 | + zone->id, |
|---|
| 192 | + (unsigned long long)chunk_block, nr_blocks); |
|---|
| 184 | 193 | |
|---|
| 185 | 194 | /* Check block validity to determine the read location */ |
|---|
| 186 | 195 | bzone = zone->bzone; |
|---|
| 187 | 196 | while (chunk_block < end_block) { |
|---|
| 188 | 197 | nr_blocks = 0; |
|---|
| 189 | | - if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) { |
|---|
| 198 | + if (dmz_is_rnd(zone) || dmz_is_cache(zone) || |
|---|
| 199 | + chunk_block < zone->wp_block) { |
|---|
| 190 | 200 | /* Test block validity in the data zone */ |
|---|
| 191 | | - ret = dmz_block_valid(dmz->metadata, zone, chunk_block); |
|---|
| 201 | + ret = dmz_block_valid(zmd, zone, chunk_block); |
|---|
| 192 | 202 | if (ret < 0) |
|---|
| 193 | 203 | return ret; |
|---|
| 194 | 204 | if (ret > 0) { |
|---|
| .. | .. |
|---|
| 203 | 213 | * Check the buffer zone, if there is one. |
|---|
| 204 | 214 | */ |
|---|
| 205 | 215 | if (!nr_blocks && bzone) { |
|---|
| 206 | | - ret = dmz_block_valid(dmz->metadata, bzone, chunk_block); |
|---|
| 216 | + ret = dmz_block_valid(zmd, bzone, chunk_block); |
|---|
| 207 | 217 | if (ret < 0) |
|---|
| 208 | 218 | return ret; |
|---|
| 209 | 219 | if (ret > 0) { |
|---|
| .. | .. |
|---|
| 215 | 225 | |
|---|
| 216 | 226 | if (nr_blocks) { |
|---|
| 217 | 227 | /* Valid blocks found: read them */ |
|---|
| 218 | | - nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); |
|---|
| 219 | | - ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); |
|---|
| 228 | + nr_blocks = min_t(unsigned int, nr_blocks, |
|---|
| 229 | + end_block - chunk_block); |
|---|
| 230 | + ret = dmz_submit_bio(dmz, rzone, bio, |
|---|
| 231 | + chunk_block, nr_blocks); |
|---|
| 220 | 232 | if (ret) |
|---|
| 221 | 233 | return ret; |
|---|
| 222 | 234 | chunk_block += nr_blocks; |
|---|
| .. | .. |
|---|
| 307 | 319 | static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, |
|---|
| 308 | 320 | struct bio *bio) |
|---|
| 309 | 321 | { |
|---|
| 310 | | - sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); |
|---|
| 322 | + struct dmz_metadata *zmd = dmz->metadata; |
|---|
| 323 | + sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio)); |
|---|
| 311 | 324 | unsigned int nr_blocks = dmz_bio_blocks(bio); |
|---|
| 312 | 325 | |
|---|
| 313 | 326 | if (!zone) |
|---|
| 314 | 327 | return -ENOSPC; |
|---|
| 315 | 328 | |
|---|
| 316 | | - dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", |
|---|
| 317 | | - (unsigned long long)dmz_bio_chunk(dmz->dev, bio), |
|---|
| 318 | | - (dmz_is_rnd(zone) ? "RND" : "SEQ"), |
|---|
| 319 | | - dmz_id(dmz->metadata, zone), |
|---|
| 320 | | - (unsigned long long)chunk_block, nr_blocks); |
|---|
| 329 | + DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", |
|---|
| 330 | + dmz_metadata_label(zmd), |
|---|
| 331 | + (unsigned long long)dmz_bio_chunk(zmd, bio), |
|---|
| 332 | + (dmz_is_rnd(zone) ? "RND" : |
|---|
| 333 | + (dmz_is_cache(zone) ? "CACHE" : "SEQ")), |
|---|
| 334 | + zone->id, |
|---|
| 335 | + (unsigned long long)chunk_block, nr_blocks); |
|---|
| 321 | 336 | |
|---|
| 322 | | - if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) { |
|---|
| 337 | + if (dmz_is_rnd(zone) || dmz_is_cache(zone) || |
|---|
| 338 | + chunk_block == zone->wp_block) { |
|---|
| 323 | 339 | /* |
|---|
| 324 | 340 | * zone is a random zone or it is a sequential zone |
|---|
| 325 | 341 | * and the BIO is aligned to the zone write pointer: |
|---|
| 326 | 342 | * direct write the zone. |
|---|
| 327 | 343 | */ |
|---|
| 328 | | - return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks); |
|---|
| 344 | + return dmz_handle_direct_write(dmz, zone, bio, |
|---|
| 345 | + chunk_block, nr_blocks); |
|---|
| 329 | 346 | } |
|---|
| 330 | 347 | |
|---|
| 331 | 348 | /* |
|---|
| .. | .. |
|---|
| 344 | 361 | struct dmz_metadata *zmd = dmz->metadata; |
|---|
| 345 | 362 | sector_t block = dmz_bio_block(bio); |
|---|
| 346 | 363 | unsigned int nr_blocks = dmz_bio_blocks(bio); |
|---|
| 347 | | - sector_t chunk_block = dmz_chunk_block(dmz->dev, block); |
|---|
| 364 | + sector_t chunk_block = dmz_chunk_block(zmd, block); |
|---|
| 348 | 365 | int ret = 0; |
|---|
| 349 | 366 | |
|---|
| 350 | 367 | /* For unmapped chunks, there is nothing to do */ |
|---|
| .. | .. |
|---|
| 354 | 371 | if (dmz_is_readonly(zone)) |
|---|
| 355 | 372 | return -EROFS; |
|---|
| 356 | 373 | |
|---|
| 357 | | - dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks", |
|---|
| 358 | | - (unsigned long long)dmz_bio_chunk(dmz->dev, bio), |
|---|
| 359 | | - dmz_id(zmd, zone), |
|---|
| 360 | | - (unsigned long long)chunk_block, nr_blocks); |
|---|
| 374 | + DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks", |
|---|
| 375 | + dmz_metadata_label(dmz->metadata), |
|---|
| 376 | + (unsigned long long)dmz_bio_chunk(zmd, bio), |
|---|
| 377 | + zone->id, |
|---|
| 378 | + (unsigned long long)chunk_block, nr_blocks); |
|---|
| 361 | 379 | |
|---|
| 362 | 380 | /* |
|---|
| 363 | 381 | * Invalidate blocks in the data zone and its |
|---|
| 364 | 382 | * buffer zone if one is mapped. |
|---|
| 365 | 383 | */ |
|---|
| 366 | | - if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) |
|---|
| 384 | + if (dmz_is_rnd(zone) || dmz_is_cache(zone) || |
|---|
| 385 | + chunk_block < zone->wp_block) |
|---|
| 367 | 386 | ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); |
|---|
| 368 | 387 | if (ret == 0 && zone->bzone) |
|---|
| 369 | 388 | ret = dmz_invalidate_blocks(zmd, zone->bzone, |
|---|
| .. | .. |
|---|
| 377 | 396 | static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, |
|---|
| 378 | 397 | struct bio *bio) |
|---|
| 379 | 398 | { |
|---|
| 380 | | - struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 399 | + struct dmz_bioctx *bioctx = |
|---|
| 400 | + dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 381 | 401 | struct dmz_metadata *zmd = dmz->metadata; |
|---|
| 382 | 402 | struct dm_zone *zone; |
|---|
| 383 | 403 | int ret; |
|---|
| 384 | 404 | |
|---|
| 385 | | - /* |
|---|
| 386 | | - * Write may trigger a zone allocation. So make sure the |
|---|
| 387 | | - * allocation can succeed. |
|---|
| 388 | | - */ |
|---|
| 389 | | - if (bio_op(bio) == REQ_OP_WRITE) |
|---|
| 390 | | - dmz_schedule_reclaim(dmz->reclaim); |
|---|
| 391 | | - |
|---|
| 392 | 405 | dmz_lock_metadata(zmd); |
|---|
| 393 | | - |
|---|
| 394 | | - if (dmz->dev->flags & DMZ_BDEV_DYING) { |
|---|
| 395 | | - ret = -EIO; |
|---|
| 396 | | - goto out; |
|---|
| 397 | | - } |
|---|
| 398 | 406 | |
|---|
| 399 | 407 | /* |
|---|
| 400 | 408 | * Get the data zone mapping the chunk. There may be no |
|---|
| 401 | 409 | * mapping for read and discard. If a mapping is obtained, |
|---|
| 402 | 410 | + the zone returned will be set to active state. |
|---|
| 403 | 411 | */ |
|---|
| 404 | | - zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio), |
|---|
| 412 | + zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio), |
|---|
| 405 | 413 | bio_op(bio)); |
|---|
| 406 | 414 | if (IS_ERR(zone)) { |
|---|
| 407 | 415 | ret = PTR_ERR(zone); |
|---|
| .. | .. |
|---|
| 412 | 420 | if (zone) { |
|---|
| 413 | 421 | dmz_activate_zone(zone); |
|---|
| 414 | 422 | bioctx->zone = zone; |
|---|
| 423 | + dmz_reclaim_bio_acc(zone->dev->reclaim); |
|---|
| 415 | 424 | } |
|---|
| 416 | 425 | |
|---|
| 417 | 426 | switch (bio_op(bio)) { |
|---|
| .. | .. |
|---|
| 426 | 435 | ret = dmz_handle_discard(dmz, zone, bio); |
|---|
| 427 | 436 | break; |
|---|
| 428 | 437 | default: |
|---|
| 429 | | - dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x", |
|---|
| 430 | | - bio_op(bio)); |
|---|
| 438 | + DMERR("(%s): Unsupported BIO operation 0x%x", |
|---|
| 439 | + dmz_metadata_label(dmz->metadata), bio_op(bio)); |
|---|
| 431 | 440 | ret = -EIO; |
|---|
| 432 | 441 | } |
|---|
| 433 | 442 | |
|---|
| .. | .. |
|---|
| 448 | 457 | */ |
|---|
| 449 | 458 | static inline void dmz_get_chunk_work(struct dm_chunk_work *cw) |
|---|
| 450 | 459 | { |
|---|
| 451 | | - atomic_inc(&cw->refcount); |
|---|
| 460 | + refcount_inc(&cw->refcount); |
|---|
| 452 | 461 | } |
|---|
| 453 | 462 | |
|---|
| 454 | 463 | /* |
|---|
| .. | .. |
|---|
| 457 | 466 | */ |
|---|
| 458 | 467 | static void dmz_put_chunk_work(struct dm_chunk_work *cw) |
|---|
| 459 | 468 | { |
|---|
| 460 | | - if (atomic_dec_and_test(&cw->refcount)) { |
|---|
| 469 | + if (refcount_dec_and_test(&cw->refcount)) { |
|---|
| 461 | 470 | WARN_ON(!bio_list_empty(&cw->bio_list)); |
|---|
| 462 | 471 | radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk); |
|---|
| 463 | 472 | kfree(cw); |
|---|
| .. | .. |
|---|
| 501 | 510 | /* Flush dirty metadata blocks */ |
|---|
| 502 | 511 | ret = dmz_flush_metadata(dmz->metadata); |
|---|
| 503 | 512 | if (ret) |
|---|
| 504 | | - dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret); |
|---|
| 513 | + DMDEBUG("(%s): Metadata flush failed, rc=%d", |
|---|
| 514 | + dmz_metadata_label(dmz->metadata), ret); |
|---|
| 505 | 515 | |
|---|
| 506 | 516 | /* Process queued flush requests */ |
|---|
| 507 | 517 | while (1) { |
|---|
| .. | .. |
|---|
| 524 | 534 | */ |
|---|
| 525 | 535 | static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) |
|---|
| 526 | 536 | { |
|---|
| 527 | | - unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); |
|---|
| 537 | + unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio); |
|---|
| 528 | 538 | struct dm_chunk_work *cw; |
|---|
| 529 | 539 | int ret = 0; |
|---|
| 530 | 540 | |
|---|
| .. | .. |
|---|
| 532 | 542 | |
|---|
| 533 | 543 | /* Get the BIO chunk work. If one is not active yet, create one */ |
|---|
| 534 | 544 | cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); |
|---|
| 535 | | - if (!cw) { |
|---|
| 536 | | - |
|---|
| 545 | + if (cw) { |
|---|
| 546 | + dmz_get_chunk_work(cw); |
|---|
| 547 | + } else { |
|---|
| 537 | 548 | /* Create a new chunk work */ |
|---|
| 538 | 549 | cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); |
|---|
| 539 | 550 | if (unlikely(!cw)) { |
|---|
| .. | .. |
|---|
| 542 | 553 | } |
|---|
| 543 | 554 | |
|---|
| 544 | 555 | INIT_WORK(&cw->work, dmz_chunk_work); |
|---|
| 545 | | - atomic_set(&cw->refcount, 0); |
|---|
| 556 | + refcount_set(&cw->refcount, 1); |
|---|
| 546 | 557 | cw->target = dmz; |
|---|
| 547 | 558 | cw->chunk = chunk; |
|---|
| 548 | 559 | bio_list_init(&cw->bio_list); |
|---|
| .. | .. |
|---|
| 555 | 566 | } |
|---|
| 556 | 567 | |
|---|
| 557 | 568 | bio_list_add(&cw->bio_list, bio); |
|---|
| 558 | | - dmz_get_chunk_work(cw); |
|---|
| 559 | 569 | |
|---|
| 560 | | - dmz_reclaim_bio_acc(dmz->reclaim); |
|---|
| 561 | 570 | if (queue_work(dmz->chunk_wq, &cw->work)) |
|---|
| 562 | 571 | dmz_get_chunk_work(cw); |
|---|
| 563 | 572 | out: |
|---|
| .. | .. |
|---|
| 617 | 626 | static int dmz_map(struct dm_target *ti, struct bio *bio) |
|---|
| 618 | 627 | { |
|---|
| 619 | 628 | struct dmz_target *dmz = ti->private; |
|---|
| 620 | | - struct dmz_dev *dev = dmz->dev; |
|---|
| 629 | + struct dmz_metadata *zmd = dmz->metadata; |
|---|
| 621 | 630 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); |
|---|
| 622 | 631 | sector_t sector = bio->bi_iter.bi_sector; |
|---|
| 623 | 632 | unsigned int nr_sectors = bio_sectors(bio); |
|---|
| 624 | 633 | sector_t chunk_sector; |
|---|
| 625 | 634 | int ret; |
|---|
| 626 | 635 | |
|---|
| 627 | | - if (dmz_bdev_is_dying(dmz->dev)) |
|---|
| 636 | + if (dmz_dev_is_dying(zmd)) |
|---|
| 628 | 637 | return DM_MAPIO_KILL; |
|---|
| 629 | 638 | |
|---|
| 630 | | - dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", |
|---|
| 631 | | - bio_op(bio), (unsigned long long)sector, nr_sectors, |
|---|
| 632 | | - (unsigned long long)dmz_bio_chunk(dmz->dev, bio), |
|---|
| 633 | | - (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), |
|---|
| 634 | | - (unsigned int)dmz_bio_blocks(bio)); |
|---|
| 635 | | - |
|---|
| 636 | | - bio_set_dev(bio, dev->bdev); |
|---|
| 639 | + DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", |
|---|
| 640 | + dmz_metadata_label(zmd), |
|---|
| 641 | + bio_op(bio), (unsigned long long)sector, nr_sectors, |
|---|
| 642 | + (unsigned long long)dmz_bio_chunk(zmd, bio), |
|---|
| 643 | + (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)), |
|---|
| 644 | + (unsigned int)dmz_bio_blocks(bio)); |
|---|
| 637 | 645 | |
|---|
| 638 | 646 | if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) |
|---|
| 639 | 647 | return DM_MAPIO_REMAPPED; |
|---|
| .. | .. |
|---|
| 643 | 651 | return DM_MAPIO_KILL; |
|---|
| 644 | 652 | |
|---|
| 645 | 653 | /* Initialize the BIO context */ |
|---|
| 646 | | - bioctx->target = dmz; |
|---|
| 654 | + bioctx->dev = NULL; |
|---|
| 647 | 655 | bioctx->zone = NULL; |
|---|
| 648 | 656 | bioctx->bio = bio; |
|---|
| 649 | | - atomic_set(&bioctx->ref, 1); |
|---|
| 657 | + refcount_set(&bioctx->ref, 1); |
|---|
| 650 | 658 | |
|---|
| 651 | 659 | /* Set the BIO pending in the flush list */ |
|---|
| 652 | 660 | if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { |
|---|
| .. | .. |
|---|
| 658 | 666 | } |
|---|
| 659 | 667 | |
|---|
| 660 | 668 | /* Split zone BIOs to fit entirely into a zone */ |
|---|
| 661 | | - chunk_sector = sector & (dev->zone_nr_sectors - 1); |
|---|
| 662 | | - if (chunk_sector + nr_sectors > dev->zone_nr_sectors) |
|---|
| 663 | | - dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); |
|---|
| 669 | + chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1); |
|---|
| 670 | + if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd)) |
|---|
| 671 | + dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector); |
|---|
| 664 | 672 | |
|---|
| 665 | 673 | /* Now ready to handle this BIO */ |
|---|
| 666 | 674 | ret = dmz_queue_chunk_work(dmz, bio); |
|---|
| 667 | 675 | if (ret) { |
|---|
| 668 | | - dmz_dev_debug(dmz->dev, |
|---|
| 669 | | - "BIO op %d, can't process chunk %llu, err %i\n", |
|---|
| 670 | | - bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), |
|---|
| 671 | | - ret); |
|---|
| 676 | + DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i", |
|---|
| 677 | + dmz_metadata_label(zmd), |
|---|
| 678 | + bio_op(bio), (u64)dmz_bio_chunk(zmd, bio), |
|---|
| 679 | + ret); |
|---|
| 672 | 680 | return DM_MAPIO_REQUEUE; |
|---|
| 673 | 681 | } |
|---|
| 674 | 682 | |
|---|
| .. | .. |
|---|
| 678 | 686 | /* |
|---|
| 679 | 687 | * Get zoned device information. |
|---|
| 680 | 688 | */ |
|---|
| 681 | | -static int dmz_get_zoned_device(struct dm_target *ti, char *path) |
|---|
| 689 | +static int dmz_get_zoned_device(struct dm_target *ti, char *path, |
|---|
| 690 | + int idx, int nr_devs) |
|---|
| 682 | 691 | { |
|---|
| 683 | 692 | struct dmz_target *dmz = ti->private; |
|---|
| 684 | | - struct request_queue *q; |
|---|
| 693 | + struct dm_dev *ddev; |
|---|
| 685 | 694 | struct dmz_dev *dev; |
|---|
| 686 | | - sector_t aligned_capacity; |
|---|
| 687 | 695 | int ret; |
|---|
| 696 | + struct block_device *bdev; |
|---|
| 688 | 697 | |
|---|
| 689 | 698 | /* Get the target device */ |
|---|
| 690 | | - ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); |
|---|
| 699 | + ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev); |
|---|
| 691 | 700 | if (ret) { |
|---|
| 692 | 701 | ti->error = "Get target device failed"; |
|---|
| 693 | | - dmz->ddev = NULL; |
|---|
| 694 | 702 | return ret; |
|---|
| 695 | 703 | } |
|---|
| 696 | 704 | |
|---|
| 697 | | - dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL); |
|---|
| 698 | | - if (!dev) { |
|---|
| 699 | | - ret = -ENOMEM; |
|---|
| 700 | | - goto err; |
|---|
| 705 | + bdev = ddev->bdev; |
|---|
| 706 | + if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) { |
|---|
| 707 | + if (nr_devs == 1) { |
|---|
| 708 | + ti->error = "Invalid regular device"; |
|---|
| 709 | + goto err; |
|---|
| 710 | + } |
|---|
| 711 | + if (idx != 0) { |
|---|
| 712 | + ti->error = "First device must be a regular device"; |
|---|
| 713 | + goto err; |
|---|
| 714 | + } |
|---|
| 715 | + if (dmz->ddev[0]) { |
|---|
| 716 | + ti->error = "Too many regular devices"; |
|---|
| 717 | + goto err; |
|---|
| 718 | + } |
|---|
| 719 | + dev = &dmz->dev[idx]; |
|---|
| 720 | + dev->flags = DMZ_BDEV_REGULAR; |
|---|
| 721 | + } else { |
|---|
| 722 | + if (dmz->ddev[idx]) { |
|---|
| 723 | + ti->error = "Too many zoned devices"; |
|---|
| 724 | + goto err; |
|---|
| 725 | + } |
|---|
| 726 | + if (nr_devs > 1 && idx == 0) { |
|---|
| 727 | + ti->error = "First device must be a regular device"; |
|---|
| 728 | + goto err; |
|---|
| 729 | + } |
|---|
| 730 | + dev = &dmz->dev[idx]; |
|---|
| 701 | 731 | } |
|---|
| 702 | | - |
|---|
| 703 | | - dev->bdev = dmz->ddev->bdev; |
|---|
| 732 | + dev->bdev = bdev; |
|---|
| 733 | + dev->dev_idx = idx; |
|---|
| 704 | 734 | (void)bdevname(dev->bdev, dev->name); |
|---|
| 705 | 735 | |
|---|
| 706 | | - if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) { |
|---|
| 707 | | - ti->error = "Not a zoned block device"; |
|---|
| 708 | | - ret = -EINVAL; |
|---|
| 736 | + dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; |
|---|
| 737 | + if (ti->begin) { |
|---|
| 738 | + ti->error = "Partial mapping is not supported"; |
|---|
| 709 | 739 | goto err; |
|---|
| 710 | 740 | } |
|---|
| 711 | 741 | |
|---|
| 712 | | - q = bdev_get_queue(dev->bdev); |
|---|
| 713 | | - dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; |
|---|
| 714 | | - aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1); |
|---|
| 715 | | - if (ti->begin || |
|---|
| 716 | | - ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { |
|---|
| 717 | | - ti->error = "Partial mapping not supported"; |
|---|
| 718 | | - ret = -EINVAL; |
|---|
| 719 | | - goto err; |
|---|
| 720 | | - } |
|---|
| 721 | | - |
|---|
| 722 | | - dev->zone_nr_sectors = blk_queue_zone_sectors(q); |
|---|
| 723 | | - dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); |
|---|
| 724 | | - |
|---|
| 725 | | - dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); |
|---|
| 726 | | - dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks); |
|---|
| 727 | | - |
|---|
| 728 | | - dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1) |
|---|
| 729 | | - >> dev->zone_nr_sectors_shift; |
|---|
| 730 | | - |
|---|
| 731 | | - dmz->dev = dev; |
|---|
| 742 | + dmz->ddev[idx] = ddev; |
|---|
| 732 | 743 | |
|---|
| 733 | 744 | return 0; |
|---|
| 734 | 745 | err: |
|---|
| 735 | | - dm_put_device(ti, dmz->ddev); |
|---|
| 736 | | - kfree(dev); |
|---|
| 737 | | - |
|---|
| 738 | | - return ret; |
|---|
| 746 | + dm_put_device(ti, ddev); |
|---|
| 747 | + return -EINVAL; |
|---|
| 739 | 748 | } |
|---|
| 740 | 749 | |
|---|
| 741 | 750 | /* |
|---|
| 742 | 751 | * Cleanup zoned device information. |
|---|
| 743 | 752 | */ |
|---|
| 744 | | -static void dmz_put_zoned_device(struct dm_target *ti) |
|---|
| 753 | +static void dmz_put_zoned_devices(struct dm_target *ti) |
|---|
| 745 | 754 | { |
|---|
| 746 | 755 | struct dmz_target *dmz = ti->private; |
|---|
| 756 | + int i; |
|---|
| 747 | 757 | |
|---|
| 748 | | - dm_put_device(ti, dmz->ddev); |
|---|
| 749 | | - kfree(dmz->dev); |
|---|
| 750 | | - dmz->dev = NULL; |
|---|
| 758 | + for (i = 0; i < dmz->nr_ddevs; i++) |
|---|
| 759 | + if (dmz->ddev[i]) |
|---|
| 760 | + dm_put_device(ti, dmz->ddev[i]); |
|---|
| 761 | + |
|---|
| 762 | + kfree(dmz->ddev); |
|---|
| 763 | +} |
|---|
| 764 | + |
|---|
| 765 | +static int dmz_fixup_devices(struct dm_target *ti) |
|---|
| 766 | +{ |
|---|
| 767 | + struct dmz_target *dmz = ti->private; |
|---|
| 768 | + struct dmz_dev *reg_dev, *zoned_dev; |
|---|
| 769 | + struct request_queue *q; |
|---|
| 770 | + sector_t zone_nr_sectors = 0; |
|---|
| 771 | + int i; |
|---|
| 772 | + |
|---|
| 773 | + /* |
|---|
| 774 | + * When we have more than on devices, the first one must be a |
|---|
| 775 | + * regular block device and the others zoned block devices. |
|---|
| 776 | + */ |
|---|
| 777 | + if (dmz->nr_ddevs > 1) { |
|---|
| 778 | + reg_dev = &dmz->dev[0]; |
|---|
| 779 | + if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) { |
|---|
| 780 | + ti->error = "Primary disk is not a regular device"; |
|---|
| 781 | + return -EINVAL; |
|---|
| 782 | + } |
|---|
| 783 | + for (i = 1; i < dmz->nr_ddevs; i++) { |
|---|
| 784 | + zoned_dev = &dmz->dev[i]; |
|---|
| 785 | + if (zoned_dev->flags & DMZ_BDEV_REGULAR) { |
|---|
| 786 | + ti->error = "Secondary disk is not a zoned device"; |
|---|
| 787 | + return -EINVAL; |
|---|
| 788 | + } |
|---|
| 789 | + q = bdev_get_queue(zoned_dev->bdev); |
|---|
| 790 | + if (zone_nr_sectors && |
|---|
| 791 | + zone_nr_sectors != blk_queue_zone_sectors(q)) { |
|---|
| 792 | + ti->error = "Zone nr sectors mismatch"; |
|---|
| 793 | + return -EINVAL; |
|---|
| 794 | + } |
|---|
| 795 | + zone_nr_sectors = blk_queue_zone_sectors(q); |
|---|
| 796 | + zoned_dev->zone_nr_sectors = zone_nr_sectors; |
|---|
| 797 | + zoned_dev->nr_zones = |
|---|
| 798 | + blkdev_nr_zones(zoned_dev->bdev->bd_disk); |
|---|
| 799 | + } |
|---|
| 800 | + } else { |
|---|
| 801 | + reg_dev = NULL; |
|---|
| 802 | + zoned_dev = &dmz->dev[0]; |
|---|
| 803 | + if (zoned_dev->flags & DMZ_BDEV_REGULAR) { |
|---|
| 804 | + ti->error = "Disk is not a zoned device"; |
|---|
| 805 | + return -EINVAL; |
|---|
| 806 | + } |
|---|
| 807 | + q = bdev_get_queue(zoned_dev->bdev); |
|---|
| 808 | + zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q); |
|---|
| 809 | + zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk); |
|---|
| 810 | + } |
|---|
| 811 | + |
|---|
| 812 | + if (reg_dev) { |
|---|
| 813 | + sector_t zone_offset; |
|---|
| 814 | + |
|---|
| 815 | + reg_dev->zone_nr_sectors = zone_nr_sectors; |
|---|
| 816 | + reg_dev->nr_zones = |
|---|
| 817 | + DIV_ROUND_UP_SECTOR_T(reg_dev->capacity, |
|---|
| 818 | + reg_dev->zone_nr_sectors); |
|---|
| 819 | + reg_dev->zone_offset = 0; |
|---|
| 820 | + zone_offset = reg_dev->nr_zones; |
|---|
| 821 | + for (i = 1; i < dmz->nr_ddevs; i++) { |
|---|
| 822 | + dmz->dev[i].zone_offset = zone_offset; |
|---|
| 823 | + zone_offset += dmz->dev[i].nr_zones; |
|---|
| 824 | + } |
|---|
| 825 | + } |
|---|
| 826 | + return 0; |
|---|
| 751 | 827 | } |
|---|
| 752 | 828 | |
|---|
| 753 | 829 | /* |
|---|
| .. | .. |
|---|
| 756 | 832 | static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
|---|
| 757 | 833 | { |
|---|
| 758 | 834 | struct dmz_target *dmz; |
|---|
| 759 | | - struct dmz_dev *dev; |
|---|
| 760 | | - int ret; |
|---|
| 835 | + int ret, i; |
|---|
| 761 | 836 | |
|---|
| 762 | 837 | /* Check arguments */ |
|---|
| 763 | | - if (argc != 1) { |
|---|
| 838 | + if (argc < 1) { |
|---|
| 764 | 839 | ti->error = "Invalid argument count"; |
|---|
| 765 | 840 | return -EINVAL; |
|---|
| 766 | 841 | } |
|---|
| .. | .. |
|---|
| 771 | 846 | ti->error = "Unable to allocate the zoned target descriptor"; |
|---|
| 772 | 847 | return -ENOMEM; |
|---|
| 773 | 848 | } |
|---|
| 849 | + dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL); |
|---|
| 850 | + if (!dmz->dev) { |
|---|
| 851 | + ti->error = "Unable to allocate the zoned device descriptors"; |
|---|
| 852 | + kfree(dmz); |
|---|
| 853 | + return -ENOMEM; |
|---|
| 854 | + } |
|---|
| 855 | + dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL); |
|---|
| 856 | + if (!dmz->ddev) { |
|---|
| 857 | + ti->error = "Unable to allocate the dm device descriptors"; |
|---|
| 858 | + ret = -ENOMEM; |
|---|
| 859 | + goto err; |
|---|
| 860 | + } |
|---|
| 861 | + dmz->nr_ddevs = argc; |
|---|
| 862 | + |
|---|
| 774 | 863 | ti->private = dmz; |
|---|
| 775 | 864 | |
|---|
| 776 | 865 | /* Get the target zoned block device */ |
|---|
| 777 | | - ret = dmz_get_zoned_device(ti, argv[0]); |
|---|
| 778 | | - if (ret) { |
|---|
| 779 | | - dmz->ddev = NULL; |
|---|
| 780 | | - goto err; |
|---|
| 866 | + for (i = 0; i < argc; i++) { |
|---|
| 867 | + ret = dmz_get_zoned_device(ti, argv[i], i, argc); |
|---|
| 868 | + if (ret) |
|---|
| 869 | + goto err_dev; |
|---|
| 781 | 870 | } |
|---|
| 871 | + ret = dmz_fixup_devices(ti); |
|---|
| 872 | + if (ret) |
|---|
| 873 | + goto err_dev; |
|---|
| 782 | 874 | |
|---|
| 783 | 875 | /* Initialize metadata */ |
|---|
| 784 | | - dev = dmz->dev; |
|---|
| 785 | | - ret = dmz_ctr_metadata(dev, &dmz->metadata); |
|---|
| 876 | + ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata, |
|---|
| 877 | + dm_table_device_name(ti->table)); |
|---|
| 786 | 878 | if (ret) { |
|---|
| 787 | 879 | ti->error = "Metadata initialization failed"; |
|---|
| 788 | 880 | goto err_dev; |
|---|
| 789 | 881 | } |
|---|
| 790 | 882 | |
|---|
| 791 | 883 | /* Set target (no write same support) */ |
|---|
| 792 | | - ti->max_io_len = dev->zone_nr_sectors; |
|---|
| 884 | + ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata); |
|---|
| 793 | 885 | ti->num_flush_bios = 1; |
|---|
| 794 | 886 | ti->num_discard_bios = 1; |
|---|
| 795 | 887 | ti->num_write_zeroes_bios = 1; |
|---|
| 796 | 888 | ti->per_io_data_size = sizeof(struct dmz_bioctx); |
|---|
| 797 | 889 | ti->flush_supported = true; |
|---|
| 798 | 890 | ti->discards_supported = true; |
|---|
| 799 | | - ti->split_discard_bios = true; |
|---|
| 800 | 891 | |
|---|
| 801 | 892 | /* The exposed capacity is the number of chunks that can be mapped */ |
|---|
| 802 | | - ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; |
|---|
| 893 | + ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << |
|---|
| 894 | + dmz_zone_nr_sectors_shift(dmz->metadata); |
|---|
| 803 | 895 | |
|---|
| 804 | 896 | /* Zone BIO */ |
|---|
| 805 | 897 | ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0); |
|---|
| .. | .. |
|---|
| 811 | 903 | /* Chunk BIO work */ |
|---|
| 812 | 904 | mutex_init(&dmz->chunk_lock); |
|---|
| 813 | 905 | INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); |
|---|
| 814 | | - dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, |
|---|
| 815 | | - 0, dev->name); |
|---|
| 906 | + dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", |
|---|
| 907 | + WQ_MEM_RECLAIM | WQ_UNBOUND, 0, |
|---|
| 908 | + dmz_metadata_label(dmz->metadata)); |
|---|
| 816 | 909 | if (!dmz->chunk_wq) { |
|---|
| 817 | 910 | ti->error = "Create chunk workqueue failed"; |
|---|
| 818 | 911 | ret = -ENOMEM; |
|---|
| .. | .. |
|---|
| 824 | 917 | bio_list_init(&dmz->flush_list); |
|---|
| 825 | 918 | INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work); |
|---|
| 826 | 919 | dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM, |
|---|
| 827 | | - dev->name); |
|---|
| 920 | + dmz_metadata_label(dmz->metadata)); |
|---|
| 828 | 921 | if (!dmz->flush_wq) { |
|---|
| 829 | 922 | ti->error = "Create flush workqueue failed"; |
|---|
| 830 | 923 | ret = -ENOMEM; |
|---|
| .. | .. |
|---|
| 833 | 926 | mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); |
|---|
| 834 | 927 | |
|---|
| 835 | 928 | /* Initialize reclaim */ |
|---|
| 836 | | - ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim); |
|---|
| 837 | | - if (ret) { |
|---|
| 838 | | - ti->error = "Zone reclaim initialization failed"; |
|---|
| 839 | | - goto err_fwq; |
|---|
| 929 | + for (i = 0; i < dmz->nr_ddevs; i++) { |
|---|
| 930 | + ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i); |
|---|
| 931 | + if (ret) { |
|---|
| 932 | + ti->error = "Zone reclaim initialization failed"; |
|---|
| 933 | + goto err_fwq; |
|---|
| 934 | + } |
|---|
| 840 | 935 | } |
|---|
| 841 | 936 | |
|---|
| 842 | | - dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)", |
|---|
| 843 | | - (unsigned long long)ti->len, |
|---|
| 844 | | - (unsigned long long)dmz_sect2blk(ti->len)); |
|---|
| 937 | + DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)", |
|---|
| 938 | + dmz_metadata_label(dmz->metadata), |
|---|
| 939 | + (unsigned long long)ti->len, |
|---|
| 940 | + (unsigned long long)dmz_sect2blk(ti->len)); |
|---|
| 845 | 941 | |
|---|
| 846 | 942 | return 0; |
|---|
| 847 | 943 | err_fwq: |
|---|
| .. | .. |
|---|
| 854 | 950 | err_meta: |
|---|
| 855 | 951 | dmz_dtr_metadata(dmz->metadata); |
|---|
| 856 | 952 | err_dev: |
|---|
| 857 | | - dmz_put_zoned_device(ti); |
|---|
| 953 | + dmz_put_zoned_devices(ti); |
|---|
| 858 | 954 | err: |
|---|
| 955 | + kfree(dmz->dev); |
|---|
| 859 | 956 | kfree(dmz); |
|---|
| 860 | 957 | |
|---|
| 861 | 958 | return ret; |
|---|
| .. | .. |
|---|
| 867 | 964 | static void dmz_dtr(struct dm_target *ti) |
|---|
| 868 | 965 | { |
|---|
| 869 | 966 | struct dmz_target *dmz = ti->private; |
|---|
| 967 | + int i; |
|---|
| 870 | 968 | |
|---|
| 871 | 969 | flush_workqueue(dmz->chunk_wq); |
|---|
| 872 | 970 | destroy_workqueue(dmz->chunk_wq); |
|---|
| 873 | 971 | |
|---|
| 874 | | - dmz_dtr_reclaim(dmz->reclaim); |
|---|
| 972 | + for (i = 0; i < dmz->nr_ddevs; i++) |
|---|
| 973 | + dmz_dtr_reclaim(dmz->dev[i].reclaim); |
|---|
| 875 | 974 | |
|---|
| 876 | 975 | cancel_delayed_work_sync(&dmz->flush_work); |
|---|
| 877 | 976 | destroy_workqueue(dmz->flush_wq); |
|---|
| .. | .. |
|---|
| 882 | 981 | |
|---|
| 883 | 982 | bioset_exit(&dmz->bio_set); |
|---|
| 884 | 983 | |
|---|
| 885 | | - dmz_put_zoned_device(ti); |
|---|
| 984 | + dmz_put_zoned_devices(ti); |
|---|
| 886 | 985 | |
|---|
| 887 | 986 | mutex_destroy(&dmz->chunk_lock); |
|---|
| 888 | 987 | |
|---|
| 988 | + kfree(dmz->dev); |
|---|
| 889 | 989 | kfree(dmz); |
|---|
| 890 | 990 | } |
|---|
| 891 | 991 | |
|---|
| .. | .. |
|---|
| 895 | 995 | static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) |
|---|
| 896 | 996 | { |
|---|
| 897 | 997 | struct dmz_target *dmz = ti->private; |
|---|
| 898 | | - unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; |
|---|
| 998 | + unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata); |
|---|
| 899 | 999 | |
|---|
| 900 | 1000 | limits->logical_block_size = DMZ_BLOCK_SIZE; |
|---|
| 901 | 1001 | limits->physical_block_size = DMZ_BLOCK_SIZE; |
|---|
| .. | .. |
|---|
| 923 | 1023 | static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) |
|---|
| 924 | 1024 | { |
|---|
| 925 | 1025 | struct dmz_target *dmz = ti->private; |
|---|
| 1026 | + struct dmz_dev *dev = &dmz->dev[0]; |
|---|
| 926 | 1027 | |
|---|
| 927 | | - if (!dmz_check_bdev(dmz->dev)) |
|---|
| 1028 | + if (!dmz_check_bdev(dev)) |
|---|
| 928 | 1029 | return -EIO; |
|---|
| 929 | 1030 | |
|---|
| 930 | | - *bdev = dmz->dev->bdev; |
|---|
| 1031 | + *bdev = dev->bdev; |
|---|
| 931 | 1032 | |
|---|
| 932 | 1033 | return 0; |
|---|
| 933 | 1034 | } |
|---|
| .. | .. |
|---|
| 938 | 1039 | static void dmz_suspend(struct dm_target *ti) |
|---|
| 939 | 1040 | { |
|---|
| 940 | 1041 | struct dmz_target *dmz = ti->private; |
|---|
| 1042 | + int i; |
|---|
| 941 | 1043 | |
|---|
| 942 | 1044 | flush_workqueue(dmz->chunk_wq); |
|---|
| 943 | | - dmz_suspend_reclaim(dmz->reclaim); |
|---|
| 1045 | + for (i = 0; i < dmz->nr_ddevs; i++) |
|---|
| 1046 | + dmz_suspend_reclaim(dmz->dev[i].reclaim); |
|---|
| 944 | 1047 | cancel_delayed_work_sync(&dmz->flush_work); |
|---|
| 945 | 1048 | } |
|---|
| 946 | 1049 | |
|---|
| .. | .. |
|---|
| 950 | 1053 | static void dmz_resume(struct dm_target *ti) |
|---|
| 951 | 1054 | { |
|---|
| 952 | 1055 | struct dmz_target *dmz = ti->private; |
|---|
| 1056 | + int i; |
|---|
| 953 | 1057 | |
|---|
| 954 | 1058 | queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); |
|---|
| 955 | | - dmz_resume_reclaim(dmz->reclaim); |
|---|
| 1059 | + for (i = 0; i < dmz->nr_ddevs; i++) |
|---|
| 1060 | + dmz_resume_reclaim(dmz->dev[i].reclaim); |
|---|
| 956 | 1061 | } |
|---|
| 957 | 1062 | |
|---|
| 958 | 1063 | static int dmz_iterate_devices(struct dm_target *ti, |
|---|
| 959 | 1064 | iterate_devices_callout_fn fn, void *data) |
|---|
| 960 | 1065 | { |
|---|
| 961 | 1066 | struct dmz_target *dmz = ti->private; |
|---|
| 962 | | - struct dmz_dev *dev = dmz->dev; |
|---|
| 963 | | - sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); |
|---|
| 1067 | + unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata); |
|---|
| 1068 | + sector_t capacity; |
|---|
| 1069 | + int i, r; |
|---|
| 964 | 1070 | |
|---|
| 965 | | - return fn(ti, dmz->ddev, 0, capacity, data); |
|---|
| 1071 | + for (i = 0; i < dmz->nr_ddevs; i++) { |
|---|
| 1072 | + capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1); |
|---|
| 1073 | + r = fn(ti, dmz->ddev[i], 0, capacity, data); |
|---|
| 1074 | + if (r) |
|---|
| 1075 | + break; |
|---|
| 1076 | + } |
|---|
| 1077 | + return r; |
|---|
| 1078 | +} |
|---|
| 1079 | + |
|---|
| 1080 | +static void dmz_status(struct dm_target *ti, status_type_t type, |
|---|
| 1081 | + unsigned int status_flags, char *result, |
|---|
| 1082 | + unsigned int maxlen) |
|---|
| 1083 | +{ |
|---|
| 1084 | + struct dmz_target *dmz = ti->private; |
|---|
| 1085 | + ssize_t sz = 0; |
|---|
| 1086 | + char buf[BDEVNAME_SIZE]; |
|---|
| 1087 | + struct dmz_dev *dev; |
|---|
| 1088 | + int i; |
|---|
| 1089 | + |
|---|
| 1090 | + switch (type) { |
|---|
| 1091 | + case STATUSTYPE_INFO: |
|---|
| 1092 | + DMEMIT("%u zones %u/%u cache", |
|---|
| 1093 | + dmz_nr_zones(dmz->metadata), |
|---|
| 1094 | + dmz_nr_unmap_cache_zones(dmz->metadata), |
|---|
| 1095 | + dmz_nr_cache_zones(dmz->metadata)); |
|---|
| 1096 | + for (i = 0; i < dmz->nr_ddevs; i++) { |
|---|
| 1097 | + /* |
|---|
| 1098 | + * For a multi-device setup the first device |
|---|
| 1099 | + * contains only cache zones. |
|---|
| 1100 | + */ |
|---|
| 1101 | + if ((i == 0) && |
|---|
| 1102 | + (dmz_nr_cache_zones(dmz->metadata) > 0)) |
|---|
| 1103 | + continue; |
|---|
| 1104 | + DMEMIT(" %u/%u random %u/%u sequential", |
|---|
| 1105 | + dmz_nr_unmap_rnd_zones(dmz->metadata, i), |
|---|
| 1106 | + dmz_nr_rnd_zones(dmz->metadata, i), |
|---|
| 1107 | + dmz_nr_unmap_seq_zones(dmz->metadata, i), |
|---|
| 1108 | + dmz_nr_seq_zones(dmz->metadata, i)); |
|---|
| 1109 | + } |
|---|
| 1110 | + break; |
|---|
| 1111 | + case STATUSTYPE_TABLE: |
|---|
| 1112 | + dev = &dmz->dev[0]; |
|---|
| 1113 | + format_dev_t(buf, dev->bdev->bd_dev); |
|---|
| 1114 | + DMEMIT("%s", buf); |
|---|
| 1115 | + for (i = 1; i < dmz->nr_ddevs; i++) { |
|---|
| 1116 | + dev = &dmz->dev[i]; |
|---|
| 1117 | + format_dev_t(buf, dev->bdev->bd_dev); |
|---|
| 1118 | + DMEMIT(" %s", buf); |
|---|
| 1119 | + } |
|---|
| 1120 | + break; |
|---|
| 1121 | + } |
|---|
| 1122 | + return; |
|---|
| 1123 | +} |
|---|
| 1124 | + |
|---|
| 1125 | +static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv, |
|---|
| 1126 | + char *result, unsigned int maxlen) |
|---|
| 1127 | +{ |
|---|
| 1128 | + struct dmz_target *dmz = ti->private; |
|---|
| 1129 | + int r = -EINVAL; |
|---|
| 1130 | + |
|---|
| 1131 | + if (!strcasecmp(argv[0], "reclaim")) { |
|---|
| 1132 | + int i; |
|---|
| 1133 | + |
|---|
| 1134 | + for (i = 0; i < dmz->nr_ddevs; i++) |
|---|
| 1135 | + dmz_schedule_reclaim(dmz->dev[i].reclaim); |
|---|
| 1136 | + r = 0; |
|---|
| 1137 | + } else |
|---|
| 1138 | + DMERR("unrecognized message %s", argv[0]); |
|---|
| 1139 | + return r; |
|---|
| 966 | 1140 | } |
|---|
| 967 | 1141 | |
|---|
| 968 | 1142 | static struct target_type dmz_type = { |
|---|
| 969 | 1143 | .name = "zoned", |
|---|
| 970 | | - .version = {1, 0, 0}, |
|---|
| 971 | | - .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, |
|---|
| 1144 | + .version = {2, 0, 0}, |
|---|
| 1145 | + .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL, |
|---|
| 972 | 1146 | .module = THIS_MODULE, |
|---|
| 973 | 1147 | .ctr = dmz_ctr, |
|---|
| 974 | 1148 | .dtr = dmz_dtr, |
|---|
| .. | .. |
|---|
| 978 | 1152 | .postsuspend = dmz_suspend, |
|---|
| 979 | 1153 | .resume = dmz_resume, |
|---|
| 980 | 1154 | .iterate_devices = dmz_iterate_devices, |
|---|
| 1155 | + .status = dmz_status, |
|---|
| 1156 | + .message = dmz_message, |
|---|
| 981 | 1157 | }; |
|---|
| 982 | 1158 | |
|---|
| 983 | 1159 | static int __init dmz_init(void) |
|---|