| .. | .. |
|---|
| 74 | 74 | static bool iot_idle_for(struct io_tracker *iot, unsigned long jifs) |
|---|
| 75 | 75 | { |
|---|
| 76 | 76 | bool r; |
|---|
| 77 | | - unsigned long flags; |
|---|
| 78 | 77 | |
|---|
| 79 | | - spin_lock_irqsave(&iot->lock, flags); |
|---|
| 78 | + spin_lock_irq(&iot->lock); |
|---|
| 80 | 79 | r = __iot_idle_for(iot, jifs); |
|---|
| 81 | | - spin_unlock_irqrestore(&iot->lock, flags); |
|---|
| 80 | + spin_unlock_irq(&iot->lock); |
|---|
| 82 | 81 | |
|---|
| 83 | 82 | return r; |
|---|
| 84 | 83 | } |
|---|
| 85 | 84 | |
|---|
| 86 | 85 | static void iot_io_begin(struct io_tracker *iot, sector_t len) |
|---|
| 87 | 86 | { |
|---|
| 88 | | - unsigned long flags; |
|---|
| 89 | | - |
|---|
| 90 | | - spin_lock_irqsave(&iot->lock, flags); |
|---|
| 87 | + spin_lock_irq(&iot->lock); |
|---|
| 91 | 88 | iot->in_flight += len; |
|---|
| 92 | | - spin_unlock_irqrestore(&iot->lock, flags); |
|---|
| 89 | + spin_unlock_irq(&iot->lock); |
|---|
| 93 | 90 | } |
|---|
| 94 | 91 | |
|---|
| 95 | 92 | static void __iot_io_end(struct io_tracker *iot, sector_t len) |
|---|
| .. | .. |
|---|
| 172 | 169 | { |
|---|
| 173 | 170 | struct batcher *b = container_of(_ws, struct batcher, commit_work); |
|---|
| 174 | 171 | blk_status_t r; |
|---|
| 175 | | - unsigned long flags; |
|---|
| 176 | 172 | struct list_head work_items; |
|---|
| 177 | 173 | struct work_struct *ws, *tmp; |
|---|
| 178 | 174 | struct continuation *k; |
|---|
| .. | .. |
|---|
| 186 | 182 | * We have to grab these before the commit_op to avoid a race |
|---|
| 187 | 183 | * condition. |
|---|
| 188 | 184 | */ |
|---|
| 189 | | - spin_lock_irqsave(&b->lock, flags); |
|---|
| 185 | + spin_lock_irq(&b->lock); |
|---|
| 190 | 186 | list_splice_init(&b->work_items, &work_items); |
|---|
| 191 | 187 | bio_list_merge(&bios, &b->bios); |
|---|
| 192 | 188 | bio_list_init(&b->bios); |
|---|
| 193 | 189 | b->commit_scheduled = false; |
|---|
| 194 | | - spin_unlock_irqrestore(&b->lock, flags); |
|---|
| 190 | + spin_unlock_irq(&b->lock); |
|---|
| 195 | 191 | |
|---|
| 196 | 192 | r = b->commit_op(b->commit_context); |
|---|
| 197 | 193 | |
|---|
| .. | .. |
|---|
| 238 | 234 | |
|---|
| 239 | 235 | static void continue_after_commit(struct batcher *b, struct continuation *k) |
|---|
| 240 | 236 | { |
|---|
| 241 | | - unsigned long flags; |
|---|
| 242 | 237 | bool commit_scheduled; |
|---|
| 243 | 238 | |
|---|
| 244 | | - spin_lock_irqsave(&b->lock, flags); |
|---|
| 239 | + spin_lock_irq(&b->lock); |
|---|
| 245 | 240 | commit_scheduled = b->commit_scheduled; |
|---|
| 246 | 241 | list_add_tail(&k->ws.entry, &b->work_items); |
|---|
| 247 | | - spin_unlock_irqrestore(&b->lock, flags); |
|---|
| 242 | + spin_unlock_irq(&b->lock); |
|---|
| 248 | 243 | |
|---|
| 249 | 244 | if (commit_scheduled) |
|---|
| 250 | 245 | async_commit(b); |
|---|
| .. | .. |
|---|
| 255 | 250 | */ |
|---|
| 256 | 251 | static void issue_after_commit(struct batcher *b, struct bio *bio) |
|---|
| 257 | 252 | { |
|---|
| 258 | | - unsigned long flags; |
|---|
| 259 | 253 | bool commit_scheduled; |
|---|
| 260 | 254 | |
|---|
| 261 | | - spin_lock_irqsave(&b->lock, flags); |
|---|
| 255 | + spin_lock_irq(&b->lock); |
|---|
| 262 | 256 | commit_scheduled = b->commit_scheduled; |
|---|
| 263 | 257 | bio_list_add(&b->bios, bio); |
|---|
| 264 | | - spin_unlock_irqrestore(&b->lock, flags); |
|---|
| 258 | + spin_unlock_irq(&b->lock); |
|---|
| 265 | 259 | |
|---|
| 266 | 260 | if (commit_scheduled) |
|---|
| 267 | 261 | async_commit(b); |
|---|
| .. | .. |
|---|
| 273 | 267 | static void schedule_commit(struct batcher *b) |
|---|
| 274 | 268 | { |
|---|
| 275 | 269 | bool immediate; |
|---|
| 276 | | - unsigned long flags; |
|---|
| 277 | 270 | |
|---|
| 278 | | - spin_lock_irqsave(&b->lock, flags); |
|---|
| 271 | + spin_lock_irq(&b->lock); |
|---|
| 279 | 272 | immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios); |
|---|
| 280 | 273 | b->commit_scheduled = true; |
|---|
| 281 | | - spin_unlock_irqrestore(&b->lock, flags); |
|---|
| 274 | + spin_unlock_irq(&b->lock); |
|---|
| 282 | 275 | |
|---|
| 283 | 276 | if (immediate) |
|---|
| 284 | 277 | async_commit(b); |
|---|
| .. | .. |
|---|
| 353 | 346 | enum cache_metadata_mode mode; |
|---|
| 354 | 347 | enum cache_io_mode io_mode; |
|---|
| 355 | 348 | unsigned metadata_version; |
|---|
| 349 | + bool discard_passdown:1; |
|---|
| 356 | 350 | }; |
|---|
| 357 | 351 | |
|---|
| 358 | 352 | struct cache_stats { |
|---|
| .. | .. |
|---|
| 426 | 420 | struct bio_list deferred_bios; |
|---|
| 427 | 421 | |
|---|
| 428 | 422 | struct rw_semaphore quiesce_lock; |
|---|
| 429 | | - |
|---|
| 430 | | - struct dm_target_callbacks callbacks; |
|---|
| 431 | 423 | |
|---|
| 432 | 424 | /* |
|---|
| 433 | 425 | * origin_blocks entries, discarded if set. |
|---|
| .. | .. |
|---|
| 629 | 621 | |
|---|
| 630 | 622 | static void defer_bio(struct cache *cache, struct bio *bio) |
|---|
| 631 | 623 | { |
|---|
| 632 | | - unsigned long flags; |
|---|
| 633 | | - |
|---|
| 634 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 624 | + spin_lock_irq(&cache->lock); |
|---|
| 635 | 625 | bio_list_add(&cache->deferred_bios, bio); |
|---|
| 636 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 626 | + spin_unlock_irq(&cache->lock); |
|---|
| 637 | 627 | |
|---|
| 638 | 628 | wake_deferred_bio_worker(cache); |
|---|
| 639 | 629 | } |
|---|
| 640 | 630 | |
|---|
| 641 | 631 | static void defer_bios(struct cache *cache, struct bio_list *bios) |
|---|
| 642 | 632 | { |
|---|
| 643 | | - unsigned long flags; |
|---|
| 644 | | - |
|---|
| 645 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 633 | + spin_lock_irq(&cache->lock); |
|---|
| 646 | 634 | bio_list_merge(&cache->deferred_bios, bios); |
|---|
| 647 | 635 | bio_list_init(bios); |
|---|
| 648 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 636 | + spin_unlock_irq(&cache->lock); |
|---|
| 649 | 637 | |
|---|
| 650 | 638 | wake_deferred_bio_worker(cache); |
|---|
| 651 | 639 | } |
|---|
| .. | .. |
|---|
| 724 | 712 | return cache->sectors_per_block_shift >= 0; |
|---|
| 725 | 713 | } |
|---|
| 726 | 714 | |
|---|
| 727 | | -/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */ |
|---|
| 728 | | -#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6 |
|---|
| 729 | | -__always_inline |
|---|
| 730 | | -#endif |
|---|
| 731 | 715 | static dm_block_t block_div(dm_block_t b, uint32_t n) |
|---|
| 732 | 716 | { |
|---|
| 733 | 717 | do_div(b, n); |
|---|
| .. | .. |
|---|
| 755 | 739 | |
|---|
| 756 | 740 | static void set_discard(struct cache *cache, dm_dblock_t b) |
|---|
| 757 | 741 | { |
|---|
| 758 | | - unsigned long flags; |
|---|
| 759 | | - |
|---|
| 760 | 742 | BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); |
|---|
| 761 | 743 | atomic_inc(&cache->stats.discard_count); |
|---|
| 762 | 744 | |
|---|
| 763 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 745 | + spin_lock_irq(&cache->lock); |
|---|
| 764 | 746 | set_bit(from_dblock(b), cache->discard_bitset); |
|---|
| 765 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 747 | + spin_unlock_irq(&cache->lock); |
|---|
| 766 | 748 | } |
|---|
| 767 | 749 | |
|---|
| 768 | 750 | static void clear_discard(struct cache *cache, dm_dblock_t b) |
|---|
| 769 | 751 | { |
|---|
| 770 | | - unsigned long flags; |
|---|
| 771 | | - |
|---|
| 772 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 752 | + spin_lock_irq(&cache->lock); |
|---|
| 773 | 753 | clear_bit(from_dblock(b), cache->discard_bitset); |
|---|
| 774 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 754 | + spin_unlock_irq(&cache->lock); |
|---|
| 775 | 755 | } |
|---|
| 776 | 756 | |
|---|
| 777 | 757 | static bool is_discarded(struct cache *cache, dm_dblock_t b) |
|---|
| 778 | 758 | { |
|---|
| 779 | 759 | int r; |
|---|
| 780 | | - unsigned long flags; |
|---|
| 781 | | - |
|---|
| 782 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 760 | + spin_lock_irq(&cache->lock); |
|---|
| 783 | 761 | r = test_bit(from_dblock(b), cache->discard_bitset); |
|---|
| 784 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 762 | + spin_unlock_irq(&cache->lock); |
|---|
| 785 | 763 | |
|---|
| 786 | 764 | return r; |
|---|
| 787 | 765 | } |
|---|
| .. | .. |
|---|
| 789 | 767 | static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) |
|---|
| 790 | 768 | { |
|---|
| 791 | 769 | int r; |
|---|
| 792 | | - unsigned long flags; |
|---|
| 793 | | - |
|---|
| 794 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 770 | + spin_lock_irq(&cache->lock); |
|---|
| 795 | 771 | r = test_bit(from_dblock(oblock_to_dblock(cache, b)), |
|---|
| 796 | 772 | cache->discard_bitset); |
|---|
| 797 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 773 | + spin_unlock_irq(&cache->lock); |
|---|
| 798 | 774 | |
|---|
| 799 | 775 | return r; |
|---|
| 800 | 776 | } |
|---|
| .. | .. |
|---|
| 826 | 802 | |
|---|
| 827 | 803 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
|---|
| 828 | 804 | { |
|---|
| 829 | | - unsigned long flags; |
|---|
| 830 | 805 | struct per_bio_data *pb; |
|---|
| 831 | 806 | |
|---|
| 832 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 807 | + spin_lock_irq(&cache->lock); |
|---|
| 833 | 808 | if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && |
|---|
| 834 | 809 | bio_op(bio) != REQ_OP_DISCARD) { |
|---|
| 835 | 810 | pb = get_per_bio_data(bio); |
|---|
| 836 | 811 | pb->tick = true; |
|---|
| 837 | 812 | cache->need_tick_bio = false; |
|---|
| 838 | 813 | } |
|---|
| 839 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 814 | + spin_unlock_irq(&cache->lock); |
|---|
| 840 | 815 | } |
|---|
| 841 | 816 | |
|---|
| 842 | 817 | static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, |
|---|
| .. | .. |
|---|
| 905 | 880 | static void accounted_request(struct cache *cache, struct bio *bio) |
|---|
| 906 | 881 | { |
|---|
| 907 | 882 | accounted_begin(cache, bio); |
|---|
| 908 | | - generic_make_request(bio); |
|---|
| 883 | + submit_bio_noacct(bio); |
|---|
| 909 | 884 | } |
|---|
| 910 | 885 | |
|---|
| 911 | 886 | static void issue_op(struct bio *bio, void *context) |
|---|
| .. | .. |
|---|
| 946 | 921 | |
|---|
| 947 | 922 | static const char *cache_device_name(struct cache *cache) |
|---|
| 948 | 923 | { |
|---|
| 949 | | - return dm_device_name(dm_table_get_md(cache->ti->table)); |
|---|
| 924 | + return dm_table_device_name(cache->ti->table); |
|---|
| 950 | 925 | } |
|---|
| 951 | 926 | |
|---|
| 952 | 927 | static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) |
|---|
| .. | .. |
|---|
| 1010 | 985 | if (get_cache_mode(cache) >= CM_READ_ONLY) |
|---|
| 1011 | 986 | return; |
|---|
| 1012 | 987 | |
|---|
| 1013 | | - if (dm_cache_metadata_set_needs_check(cache->cmd)) { |
|---|
| 1014 | | - DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); |
|---|
| 1015 | | - set_cache_mode(cache, CM_FAIL); |
|---|
| 1016 | | - } |
|---|
| 1017 | | - |
|---|
| 1018 | 988 | DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); |
|---|
| 1019 | 989 | if (dm_cache_metadata_abort(cache->cmd)) { |
|---|
| 1020 | 990 | DMERR("%s: failed to abort metadata transaction", dev_name); |
|---|
| 991 | + set_cache_mode(cache, CM_FAIL); |
|---|
| 992 | + } |
|---|
| 993 | + |
|---|
| 994 | + if (dm_cache_metadata_set_needs_check(cache->cmd)) { |
|---|
| 995 | + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); |
|---|
| 1021 | 996 | set_cache_mode(cache, CM_FAIL); |
|---|
| 1022 | 997 | } |
|---|
| 1023 | 998 | } |
|---|
| .. | .. |
|---|
| 1811 | 1786 | bool commit_needed; |
|---|
| 1812 | 1787 | |
|---|
| 1813 | 1788 | if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) |
|---|
| 1814 | | - generic_make_request(bio); |
|---|
| 1789 | + submit_bio_noacct(bio); |
|---|
| 1815 | 1790 | |
|---|
| 1816 | 1791 | return commit_needed; |
|---|
| 1817 | 1792 | } |
|---|
| .. | .. |
|---|
| 1875 | 1850 | b = to_dblock(from_dblock(b) + 1); |
|---|
| 1876 | 1851 | } |
|---|
| 1877 | 1852 | |
|---|
| 1878 | | - bio_endio(bio); |
|---|
| 1853 | + if (cache->features.discard_passdown) { |
|---|
| 1854 | + remap_to_origin(cache, bio); |
|---|
| 1855 | + submit_bio_noacct(bio); |
|---|
| 1856 | + } else |
|---|
| 1857 | + bio_endio(bio); |
|---|
| 1879 | 1858 | |
|---|
| 1880 | 1859 | return false; |
|---|
| 1881 | 1860 | } |
|---|
| .. | .. |
|---|
| 1884 | 1863 | { |
|---|
| 1885 | 1864 | struct cache *cache = container_of(ws, struct cache, deferred_bio_worker); |
|---|
| 1886 | 1865 | |
|---|
| 1887 | | - unsigned long flags; |
|---|
| 1888 | 1866 | bool commit_needed = false; |
|---|
| 1889 | 1867 | struct bio_list bios; |
|---|
| 1890 | 1868 | struct bio *bio; |
|---|
| 1891 | 1869 | |
|---|
| 1892 | 1870 | bio_list_init(&bios); |
|---|
| 1893 | 1871 | |
|---|
| 1894 | | - spin_lock_irqsave(&cache->lock, flags); |
|---|
| 1872 | + spin_lock_irq(&cache->lock); |
|---|
| 1895 | 1873 | bio_list_merge(&bios, &cache->deferred_bios); |
|---|
| 1896 | 1874 | bio_list_init(&cache->deferred_bios); |
|---|
| 1897 | | - spin_unlock_irqrestore(&cache->lock, flags); |
|---|
| 1875 | + spin_unlock_irq(&cache->lock); |
|---|
| 1898 | 1876 | |
|---|
| 1899 | 1877 | while ((bio = bio_list_pop(&bios))) { |
|---|
| 1900 | 1878 | if (bio->bi_opf & REQ_PREFLUSH) |
|---|
| .. | .. |
|---|
| 1905 | 1883 | |
|---|
| 1906 | 1884 | else |
|---|
| 1907 | 1885 | commit_needed = process_bio(cache, bio) || commit_needed; |
|---|
| 1886 | + cond_resched(); |
|---|
| 1908 | 1887 | } |
|---|
| 1909 | 1888 | |
|---|
| 1910 | 1889 | if (commit_needed) |
|---|
| .. | .. |
|---|
| 1927 | 1906 | while ((bio = bio_list_pop(&bios))) { |
|---|
| 1928 | 1907 | bio->bi_status = BLK_STS_DM_REQUEUE; |
|---|
| 1929 | 1908 | bio_endio(bio); |
|---|
| 1909 | + cond_resched(); |
|---|
| 1930 | 1910 | } |
|---|
| 1931 | 1911 | } |
|---|
| 1932 | 1912 | |
|---|
| .. | .. |
|---|
| 1967 | 1947 | r = mg_start(cache, op, NULL); |
|---|
| 1968 | 1948 | if (r) |
|---|
| 1969 | 1949 | break; |
|---|
| 1950 | + |
|---|
| 1951 | + cond_resched(); |
|---|
| 1970 | 1952 | } |
|---|
| 1971 | 1953 | } |
|---|
| 1972 | 1954 | |
|---|
| .. | .. |
|---|
| 1987 | 1969 | if (cache->prison) |
|---|
| 1988 | 1970 | dm_bio_prison_destroy_v2(cache->prison); |
|---|
| 1989 | 1971 | |
|---|
| 1972 | + cancel_delayed_work_sync(&cache->waker); |
|---|
| 1990 | 1973 | if (cache->wq) |
|---|
| 1991 | 1974 | destroy_workqueue(cache->wq); |
|---|
| 1992 | 1975 | |
|---|
| .. | .. |
|---|
| 2209 | 2192 | cf->mode = CM_WRITE; |
|---|
| 2210 | 2193 | cf->io_mode = CM_IO_WRITEBACK; |
|---|
| 2211 | 2194 | cf->metadata_version = 1; |
|---|
| 2195 | + cf->discard_passdown = true; |
|---|
| 2212 | 2196 | } |
|---|
| 2213 | 2197 | |
|---|
| 2214 | 2198 | static int parse_features(struct cache_args *ca, struct dm_arg_set *as, |
|---|
| 2215 | 2199 | char **error) |
|---|
| 2216 | 2200 | { |
|---|
| 2217 | 2201 | static const struct dm_arg _args[] = { |
|---|
| 2218 | | - {0, 2, "Invalid number of cache feature arguments"}, |
|---|
| 2202 | + {0, 3, "Invalid number of cache feature arguments"}, |
|---|
| 2219 | 2203 | }; |
|---|
| 2220 | 2204 | |
|---|
| 2221 | 2205 | int r, mode_ctr = 0; |
|---|
| .. | .. |
|---|
| 2249 | 2233 | |
|---|
| 2250 | 2234 | else if (!strcasecmp(arg, "metadata2")) |
|---|
| 2251 | 2235 | cf->metadata_version = 2; |
|---|
| 2236 | + |
|---|
| 2237 | + else if (!strcasecmp(arg, "no_discard_passdown")) |
|---|
| 2238 | + cf->discard_passdown = false; |
|---|
| 2252 | 2239 | |
|---|
| 2253 | 2240 | else { |
|---|
| 2254 | 2241 | *error = "Unrecognised cache feature requested"; |
|---|
| .. | .. |
|---|
| 2435 | 2422 | cache->cache_size = size; |
|---|
| 2436 | 2423 | } |
|---|
| 2437 | 2424 | |
|---|
| 2438 | | -static int is_congested(struct dm_dev *dev, int bdi_bits) |
|---|
| 2439 | | -{ |
|---|
| 2440 | | - struct request_queue *q = bdev_get_queue(dev->bdev); |
|---|
| 2441 | | - return bdi_congested(q->backing_dev_info, bdi_bits); |
|---|
| 2442 | | -} |
|---|
| 2443 | | - |
|---|
| 2444 | | -static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) |
|---|
| 2445 | | -{ |
|---|
| 2446 | | - struct cache *cache = container_of(cb, struct cache, callbacks); |
|---|
| 2447 | | - |
|---|
| 2448 | | - return is_congested(cache->origin_dev, bdi_bits) || |
|---|
| 2449 | | - is_congested(cache->cache_dev, bdi_bits); |
|---|
| 2450 | | -} |
|---|
| 2451 | | - |
|---|
| 2452 | 2425 | #define DEFAULT_MIGRATION_THRESHOLD 2048 |
|---|
| 2453 | 2426 | |
|---|
| 2454 | 2427 | static int cache_create(struct cache_args *ca, struct cache **result) |
|---|
| .. | .. |
|---|
| 2472 | 2445 | |
|---|
| 2473 | 2446 | ti->num_discard_bios = 1; |
|---|
| 2474 | 2447 | ti->discards_supported = true; |
|---|
| 2475 | | - ti->split_discard_bios = false; |
|---|
| 2476 | 2448 | |
|---|
| 2477 | 2449 | ti->per_io_data_size = sizeof(struct per_bio_data); |
|---|
| 2478 | 2450 | |
|---|
| .. | .. |
|---|
| 2483 | 2455 | if (r) |
|---|
| 2484 | 2456 | goto bad; |
|---|
| 2485 | 2457 | } |
|---|
| 2486 | | - |
|---|
| 2487 | | - cache->callbacks.congested_fn = cache_is_congested; |
|---|
| 2488 | | - dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
|---|
| 2489 | 2458 | |
|---|
| 2490 | 2459 | cache->metadata_dev = ca->metadata_dev; |
|---|
| 2491 | 2460 | cache->origin_dev = ca->origin_dev; |
|---|
| .. | .. |
|---|
| 3096 | 3065 | do_waker(&cache->waker.work); |
|---|
| 3097 | 3066 | } |
|---|
| 3098 | 3067 | |
|---|
| 3068 | +static void emit_flags(struct cache *cache, char *result, |
|---|
| 3069 | + unsigned maxlen, ssize_t *sz_ptr) |
|---|
| 3070 | +{ |
|---|
| 3071 | + ssize_t sz = *sz_ptr; |
|---|
| 3072 | + struct cache_features *cf = &cache->features; |
|---|
| 3073 | + unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1; |
|---|
| 3074 | + |
|---|
| 3075 | + DMEMIT("%u ", count); |
|---|
| 3076 | + |
|---|
| 3077 | + if (cf->metadata_version == 2) |
|---|
| 3078 | + DMEMIT("metadata2 "); |
|---|
| 3079 | + |
|---|
| 3080 | + if (writethrough_mode(cache)) |
|---|
| 3081 | + DMEMIT("writethrough "); |
|---|
| 3082 | + |
|---|
| 3083 | + else if (passthrough_mode(cache)) |
|---|
| 3084 | + DMEMIT("passthrough "); |
|---|
| 3085 | + |
|---|
| 3086 | + else if (writeback_mode(cache)) |
|---|
| 3087 | + DMEMIT("writeback "); |
|---|
| 3088 | + |
|---|
| 3089 | + else { |
|---|
| 3090 | + DMEMIT("unknown "); |
|---|
| 3091 | + DMERR("%s: internal error: unknown io mode: %d", |
|---|
| 3092 | + cache_device_name(cache), (int) cf->io_mode); |
|---|
| 3093 | + } |
|---|
| 3094 | + |
|---|
| 3095 | + if (!cf->discard_passdown) |
|---|
| 3096 | + DMEMIT("no_discard_passdown "); |
|---|
| 3097 | + |
|---|
| 3098 | + *sz_ptr = sz; |
|---|
| 3099 | +} |
|---|
| 3100 | + |
|---|
| 3099 | 3101 | /* |
|---|
| 3100 | 3102 | * Status format: |
|---|
| 3101 | 3103 | * |
|---|
| .. | .. |
|---|
| 3162 | 3164 | (unsigned) atomic_read(&cache->stats.promotion), |
|---|
| 3163 | 3165 | (unsigned long) atomic_read(&cache->nr_dirty)); |
|---|
| 3164 | 3166 | |
|---|
| 3165 | | - if (cache->features.metadata_version == 2) |
|---|
| 3166 | | - DMEMIT("2 metadata2 "); |
|---|
| 3167 | | - else |
|---|
| 3168 | | - DMEMIT("1 "); |
|---|
| 3169 | | - |
|---|
| 3170 | | - if (writethrough_mode(cache)) |
|---|
| 3171 | | - DMEMIT("writethrough "); |
|---|
| 3172 | | - |
|---|
| 3173 | | - else if (passthrough_mode(cache)) |
|---|
| 3174 | | - DMEMIT("passthrough "); |
|---|
| 3175 | | - |
|---|
| 3176 | | - else if (writeback_mode(cache)) |
|---|
| 3177 | | - DMEMIT("writeback "); |
|---|
| 3178 | | - |
|---|
| 3179 | | - else { |
|---|
| 3180 | | - DMERR("%s: internal error: unknown io mode: %d", |
|---|
| 3181 | | - cache_device_name(cache), (int) cache->features.io_mode); |
|---|
| 3182 | | - goto err; |
|---|
| 3183 | | - } |
|---|
| 3167 | + emit_flags(cache, result, maxlen, &sz); |
|---|
| 3184 | 3168 | |
|---|
| 3185 | 3169 | DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); |
|---|
| 3186 | 3170 | |
|---|
| .. | .. |
|---|
| 3409 | 3393 | return r; |
|---|
| 3410 | 3394 | } |
|---|
| 3411 | 3395 | |
|---|
| 3396 | +static bool origin_dev_supports_discard(struct block_device *origin_bdev) |
|---|
| 3397 | +{ |
|---|
| 3398 | + struct request_queue *q = bdev_get_queue(origin_bdev); |
|---|
| 3399 | + |
|---|
| 3400 | + return q && blk_queue_discard(q); |
|---|
| 3401 | +} |
|---|
| 3402 | + |
|---|
| 3403 | +/* |
|---|
| 3404 | + * If discard_passdown was enabled verify that the origin device |
|---|
| 3405 | + * supports discards. Disable discard_passdown if not. |
|---|
| 3406 | + */ |
|---|
| 3407 | +static void disable_passdown_if_not_supported(struct cache *cache) |
|---|
| 3408 | +{ |
|---|
| 3409 | + struct block_device *origin_bdev = cache->origin_dev->bdev; |
|---|
| 3410 | + struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; |
|---|
| 3411 | + const char *reason = NULL; |
|---|
| 3412 | + char buf[BDEVNAME_SIZE]; |
|---|
| 3413 | + |
|---|
| 3414 | + if (!cache->features.discard_passdown) |
|---|
| 3415 | + return; |
|---|
| 3416 | + |
|---|
| 3417 | + if (!origin_dev_supports_discard(origin_bdev)) |
|---|
| 3418 | + reason = "discard unsupported"; |
|---|
| 3419 | + |
|---|
| 3420 | + else if (origin_limits->max_discard_sectors < cache->sectors_per_block) |
|---|
| 3421 | + reason = "max discard sectors smaller than a block"; |
|---|
| 3422 | + |
|---|
| 3423 | + if (reason) { |
|---|
| 3424 | + DMWARN("Origin device (%s) %s: Disabling discard passdown.", |
|---|
| 3425 | + bdevname(origin_bdev, buf), reason); |
|---|
| 3426 | + cache->features.discard_passdown = false; |
|---|
| 3427 | + } |
|---|
| 3428 | +} |
|---|
| 3429 | + |
|---|
| 3412 | 3430 | static void set_discard_limits(struct cache *cache, struct queue_limits *limits) |
|---|
| 3413 | 3431 | { |
|---|
| 3432 | + struct block_device *origin_bdev = cache->origin_dev->bdev; |
|---|
| 3433 | + struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits; |
|---|
| 3434 | + |
|---|
| 3435 | + if (!cache->features.discard_passdown) { |
|---|
| 3436 | + /* No passdown is done so setting own virtual limits */ |
|---|
| 3437 | + limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, |
|---|
| 3438 | + cache->origin_sectors); |
|---|
| 3439 | + limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; |
|---|
| 3440 | + return; |
|---|
| 3441 | + } |
|---|
| 3442 | + |
|---|
| 3414 | 3443 | /* |
|---|
| 3415 | | - * FIXME: these limits may be incompatible with the cache device |
|---|
| 3444 | + * cache_iterate_devices() is stacking both origin and fast device limits |
|---|
| 3445 | + * but discards aren't passed to fast device, so inherit origin's limits. |
|---|
| 3416 | 3446 | */ |
|---|
| 3417 | | - limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, |
|---|
| 3418 | | - cache->origin_sectors); |
|---|
| 3419 | | - limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; |
|---|
| 3447 | + limits->max_discard_sectors = origin_limits->max_discard_sectors; |
|---|
| 3448 | + limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors; |
|---|
| 3449 | + limits->discard_granularity = origin_limits->discard_granularity; |
|---|
| 3450 | + limits->discard_alignment = origin_limits->discard_alignment; |
|---|
| 3451 | + limits->discard_misaligned = origin_limits->discard_misaligned; |
|---|
| 3420 | 3452 | } |
|---|
| 3421 | 3453 | |
|---|
| 3422 | 3454 | static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) |
|---|
| .. | .. |
|---|
| 3433 | 3465 | blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); |
|---|
| 3434 | 3466 | blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); |
|---|
| 3435 | 3467 | } |
|---|
| 3468 | + |
|---|
| 3469 | + disable_passdown_if_not_supported(cache); |
|---|
| 3436 | 3470 | set_discard_limits(cache, limits); |
|---|
| 3437 | 3471 | } |
|---|
| 3438 | 3472 | |
|---|
| .. | .. |
|---|
| 3440 | 3474 | |
|---|
| 3441 | 3475 | static struct target_type cache_target = { |
|---|
| 3442 | 3476 | .name = "cache", |
|---|
| 3443 | | - .version = {2, 0, 0}, |
|---|
| 3477 | + .version = {2, 2, 0}, |
|---|
| 3444 | 3478 | .module = THIS_MODULE, |
|---|
| 3445 | 3479 | .ctr = cache_ctr, |
|---|
| 3446 | 3480 | .dtr = cache_dtr, |
|---|