.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 |
---|
3 | 4 | * |
---|
.. | .. |
---|
53 | 54 | { |
---|
54 | 55 | unsigned char *mappage; |
---|
55 | 56 | |
---|
56 | | - if (page >= bitmap->pages) { |
---|
57 | | - /* This can happen if bitmap_start_sync goes beyond |
---|
58 | | - * End-of-device while looking for a whole page. |
---|
59 | | - * It is harmless. |
---|
60 | | - */ |
---|
61 | | - return -EINVAL; |
---|
62 | | - } |
---|
63 | | - |
---|
| 57 | + WARN_ON_ONCE(page >= bitmap->pages); |
---|
64 | 58 | if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ |
---|
65 | 59 | return 0; |
---|
66 | 60 | |
---|
.. | .. |
---|
323 | 317 | wake_up(&bitmap->write_wait); |
---|
324 | 318 | } |
---|
325 | 319 | |
---|
326 | | -/* copied from buffer.c */ |
---|
327 | | -static void |
---|
328 | | -__clear_page_buffers(struct page *page) |
---|
329 | | -{ |
---|
330 | | - ClearPagePrivate(page); |
---|
331 | | - set_page_private(page, 0); |
---|
332 | | - put_page(page); |
---|
333 | | -} |
---|
334 | 320 | static void free_buffers(struct page *page) |
---|
335 | 321 | { |
---|
336 | 322 | struct buffer_head *bh; |
---|
.. | .. |
---|
344 | 330 | free_buffer_head(bh); |
---|
345 | 331 | bh = next; |
---|
346 | 332 | } |
---|
347 | | - __clear_page_buffers(page); |
---|
| 333 | + detach_page_private(page); |
---|
348 | 334 | put_page(page); |
---|
349 | 335 | } |
---|
350 | 336 | |
---|
.. | .. |
---|
363 | 349 | int ret = 0; |
---|
364 | 350 | struct inode *inode = file_inode(file); |
---|
365 | 351 | struct buffer_head *bh; |
---|
366 | | - sector_t block; |
---|
| 352 | + sector_t block, blk_cur; |
---|
| 353 | + unsigned long blocksize = i_blocksize(inode); |
---|
367 | 354 | |
---|
368 | 355 | pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, |
---|
369 | 356 | (unsigned long long)index << PAGE_SHIFT); |
---|
370 | 357 | |
---|
371 | | - bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false); |
---|
| 358 | + bh = alloc_page_buffers(page, blocksize, false); |
---|
372 | 359 | if (!bh) { |
---|
373 | 360 | ret = -ENOMEM; |
---|
374 | 361 | goto out; |
---|
375 | 362 | } |
---|
376 | | - attach_page_buffers(page, bh); |
---|
377 | | - block = index << (PAGE_SHIFT - inode->i_blkbits); |
---|
| 363 | + attach_page_private(page, bh); |
---|
| 364 | + blk_cur = index << (PAGE_SHIFT - inode->i_blkbits); |
---|
378 | 365 | while (bh) { |
---|
| 366 | + block = blk_cur; |
---|
| 367 | + |
---|
379 | 368 | if (count == 0) |
---|
380 | 369 | bh->b_blocknr = 0; |
---|
381 | 370 | else { |
---|
382 | | - bh->b_blocknr = bmap(inode, block); |
---|
383 | | - if (bh->b_blocknr == 0) { |
---|
384 | | - /* Cannot use this file! */ |
---|
| 371 | + ret = bmap(inode, &block); |
---|
| 372 | + if (ret || !block) { |
---|
385 | 373 | ret = -EINVAL; |
---|
| 374 | + bh->b_blocknr = 0; |
---|
386 | 375 | goto out; |
---|
387 | 376 | } |
---|
| 377 | + |
---|
| 378 | + bh->b_blocknr = block; |
---|
388 | 379 | bh->b_bdev = inode->i_sb->s_bdev; |
---|
389 | | - if (count < (1<<inode->i_blkbits)) |
---|
| 380 | + if (count < blocksize) |
---|
390 | 381 | count = 0; |
---|
391 | 382 | else |
---|
392 | | - count -= (1<<inode->i_blkbits); |
---|
| 383 | + count -= blocksize; |
---|
393 | 384 | |
---|
394 | 385 | bh->b_end_io = end_bitmap_write; |
---|
395 | 386 | bh->b_private = bitmap; |
---|
.. | .. |
---|
398 | 389 | set_buffer_mapped(bh); |
---|
399 | 390 | submit_bh(REQ_OP_READ, 0, bh); |
---|
400 | 391 | } |
---|
401 | | - block++; |
---|
| 392 | + blk_cur++; |
---|
402 | 393 | bh = bh->b_this_page; |
---|
403 | 394 | } |
---|
404 | 395 | page->index = index; |
---|
.. | .. |
---|
488 | 479 | sb = kmap_atomic(bitmap->storage.sb_page); |
---|
489 | 480 | pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); |
---|
490 | 481 | pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); |
---|
491 | | - pr_debug(" version: %d\n", le32_to_cpu(sb->version)); |
---|
| 482 | + pr_debug(" version: %u\n", le32_to_cpu(sb->version)); |
---|
492 | 483 | pr_debug(" uuid: %08x.%08x.%08x.%08x\n", |
---|
493 | | - le32_to_cpu(*(__u32 *)(sb->uuid+0)), |
---|
494 | | - le32_to_cpu(*(__u32 *)(sb->uuid+4)), |
---|
495 | | - le32_to_cpu(*(__u32 *)(sb->uuid+8)), |
---|
496 | | - le32_to_cpu(*(__u32 *)(sb->uuid+12))); |
---|
| 484 | + le32_to_cpu(*(__le32 *)(sb->uuid+0)), |
---|
| 485 | + le32_to_cpu(*(__le32 *)(sb->uuid+4)), |
---|
| 486 | + le32_to_cpu(*(__le32 *)(sb->uuid+8)), |
---|
| 487 | + le32_to_cpu(*(__le32 *)(sb->uuid+12))); |
---|
497 | 488 | pr_debug(" events: %llu\n", |
---|
498 | 489 | (unsigned long long) le64_to_cpu(sb->events)); |
---|
499 | 490 | pr_debug("events cleared: %llu\n", |
---|
500 | 491 | (unsigned long long) le64_to_cpu(sb->events_cleared)); |
---|
501 | 492 | pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); |
---|
502 | | - pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize)); |
---|
503 | | - pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep)); |
---|
| 493 | + pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize)); |
---|
| 494 | + pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep)); |
---|
504 | 495 | pr_debug(" sync size: %llu KB\n", |
---|
505 | 496 | (unsigned long long)le64_to_cpu(sb->sync_size)/2); |
---|
506 | | - pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind)); |
---|
| 497 | + pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind)); |
---|
507 | 498 | kunmap_atomic(sb); |
---|
508 | 499 | } |
---|
509 | 500 | |
---|
.. | .. |
---|
608 | 599 | if (bitmap->cluster_slot >= 0) { |
---|
609 | 600 | sector_t bm_blocks = bitmap->mddev->resync_max_sectors; |
---|
610 | 601 | |
---|
611 | | - sector_div(bm_blocks, |
---|
612 | | - bitmap->mddev->bitmap_info.chunksize >> 9); |
---|
| 602 | + bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, |
---|
| 603 | + (bitmap->mddev->bitmap_info.chunksize >> 9)); |
---|
613 | 604 | /* bits to bytes */ |
---|
614 | 605 | bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); |
---|
615 | 606 | /* to 4k blocks */ |
---|
.. | .. |
---|
641 | 632 | daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; |
---|
642 | 633 | write_behind = le32_to_cpu(sb->write_behind); |
---|
643 | 634 | sectors_reserved = le32_to_cpu(sb->sectors_reserved); |
---|
644 | | - /* Setup nodes/clustername only if bitmap version is |
---|
645 | | - * cluster-compatible |
---|
646 | | - */ |
---|
647 | | - if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { |
---|
648 | | - nodes = le32_to_cpu(sb->nodes); |
---|
649 | | - strlcpy(bitmap->mddev->bitmap_info.cluster_name, |
---|
650 | | - sb->cluster_name, 64); |
---|
651 | | - } |
---|
652 | 635 | |
---|
653 | 636 | /* verify that the bitmap-specific fields are valid */ |
---|
654 | 637 | if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) |
---|
.. | .. |
---|
668 | 651 | pr_warn("%s: invalid bitmap file superblock: %s\n", |
---|
669 | 652 | bmname(bitmap), reason); |
---|
670 | 653 | goto out; |
---|
| 654 | + } |
---|
| 655 | + |
---|
| 656 | + /* |
---|
| 657 | + * Setup nodes/clustername only if bitmap version is |
---|
| 658 | + * cluster-compatible |
---|
| 659 | + */ |
---|
| 660 | + if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { |
---|
| 661 | + nodes = le32_to_cpu(sb->nodes); |
---|
| 662 | + strlcpy(bitmap->mddev->bitmap_info.cluster_name, |
---|
| 663 | + sb->cluster_name, 64); |
---|
671 | 664 | } |
---|
672 | 665 | |
---|
673 | 666 | /* keep the array size field of the bitmap superblock up to date */ |
---|
.. | .. |
---|
702 | 695 | |
---|
703 | 696 | out: |
---|
704 | 697 | kunmap_atomic(sb); |
---|
705 | | - /* Assigning chunksize is required for "re_read" */ |
---|
706 | | - bitmap->mddev->bitmap_info.chunksize = chunksize; |
---|
707 | 698 | if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { |
---|
| 699 | + /* Assigning chunksize is required for "re_read" */ |
---|
| 700 | + bitmap->mddev->bitmap_info.chunksize = chunksize; |
---|
708 | 701 | err = md_setup_cluster(bitmap->mddev, nodes); |
---|
709 | 702 | if (err) { |
---|
710 | 703 | pr_warn("%s: Could not setup cluster service (%d)\n", |
---|
.. | .. |
---|
715 | 708 | goto re_read; |
---|
716 | 709 | } |
---|
717 | 710 | |
---|
718 | | - |
---|
719 | 711 | out_no_sb: |
---|
720 | | - if (test_bit(BITMAP_STALE, &bitmap->flags)) |
---|
721 | | - bitmap->events_cleared = bitmap->mddev->events; |
---|
722 | | - bitmap->mddev->bitmap_info.chunksize = chunksize; |
---|
723 | | - bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; |
---|
724 | | - bitmap->mddev->bitmap_info.max_write_behind = write_behind; |
---|
725 | | - bitmap->mddev->bitmap_info.nodes = nodes; |
---|
726 | | - if (bitmap->mddev->bitmap_info.space == 0 || |
---|
727 | | - bitmap->mddev->bitmap_info.space > sectors_reserved) |
---|
728 | | - bitmap->mddev->bitmap_info.space = sectors_reserved; |
---|
729 | | - if (err) { |
---|
| 712 | + if (err == 0) { |
---|
| 713 | + if (test_bit(BITMAP_STALE, &bitmap->flags)) |
---|
| 714 | + bitmap->events_cleared = bitmap->mddev->events; |
---|
| 715 | + bitmap->mddev->bitmap_info.chunksize = chunksize; |
---|
| 716 | + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; |
---|
| 717 | + bitmap->mddev->bitmap_info.max_write_behind = write_behind; |
---|
| 718 | + bitmap->mddev->bitmap_info.nodes = nodes; |
---|
| 719 | + if (bitmap->mddev->bitmap_info.space == 0 || |
---|
| 720 | + bitmap->mddev->bitmap_info.space > sectors_reserved) |
---|
| 721 | + bitmap->mddev->bitmap_info.space = sectors_reserved; |
---|
| 722 | + } else { |
---|
730 | 723 | md_bitmap_print_sb(bitmap); |
---|
731 | 724 | if (bitmap->cluster_slot < 0) |
---|
732 | 725 | md_cluster_stop(bitmap->mddev); |
---|
.. | .. |
---|
1018 | 1011 | /* look at each page to see if there are any set bits that need to be |
---|
1019 | 1012 | * flushed out to disk */ |
---|
1020 | 1013 | for (i = 0; i < bitmap->storage.file_pages; i++) { |
---|
1021 | | - if (!bitmap->storage.filemap) |
---|
1022 | | - return; |
---|
1023 | 1014 | dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); |
---|
1024 | 1015 | need_write = test_and_clear_page_attr(bitmap, i, |
---|
1025 | 1016 | BITMAP_PAGE_NEEDWRITE); |
---|
.. | .. |
---|
1337 | 1328 | BITMAP_PAGE_DIRTY)) |
---|
1338 | 1329 | /* bitmap_unplug will handle the rest */ |
---|
1339 | 1330 | break; |
---|
1340 | | - if (test_and_clear_page_attr(bitmap, j, |
---|
| 1331 | + if (bitmap->storage.filemap && |
---|
| 1332 | + test_and_clear_page_attr(bitmap, j, |
---|
1341 | 1333 | BITMAP_PAGE_NEEDWRITE)) { |
---|
1342 | 1334 | write_page(bitmap, bitmap->storage.filemap[j], 0); |
---|
1343 | 1335 | } |
---|
.. | .. |
---|
1366 | 1358 | sector_t csize; |
---|
1367 | 1359 | int err; |
---|
1368 | 1360 | |
---|
| 1361 | + if (page >= bitmap->pages) { |
---|
| 1362 | + /* |
---|
| 1363 | + * This can happen if bitmap_start_sync goes beyond |
---|
| 1364 | + * End-of-device while looking for a whole page or |
---|
| 1365 | + * user set a huge number to sysfs bitmap_set_bits. |
---|
| 1366 | + */ |
---|
| 1367 | + return NULL; |
---|
| 1368 | + } |
---|
1369 | 1369 | err = md_bitmap_checkpage(bitmap, page, create, 0); |
---|
1370 | 1370 | |
---|
1371 | 1371 | if (bitmap->bp[page].hijacked || |
---|
.. | .. |
---|
1437 | 1437 | case 0: |
---|
1438 | 1438 | md_bitmap_file_set_bit(bitmap, offset); |
---|
1439 | 1439 | md_bitmap_count_page(&bitmap->counts, offset, 1); |
---|
1440 | | - /* fall through */ |
---|
| 1440 | + fallthrough; |
---|
1441 | 1441 | case 1: |
---|
1442 | 1442 | *bmc = 2; |
---|
1443 | 1443 | } |
---|
.. | .. |
---|
1635 | 1635 | s += blocks; |
---|
1636 | 1636 | } |
---|
1637 | 1637 | bitmap->last_end_sync = jiffies; |
---|
1638 | | - sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); |
---|
| 1638 | + sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed); |
---|
1639 | 1639 | } |
---|
1640 | 1640 | EXPORT_SYMBOL(md_bitmap_cond_end_sync); |
---|
1641 | 1641 | |
---|
.. | .. |
---|
1791 | 1791 | return; |
---|
1792 | 1792 | |
---|
1793 | 1793 | md_bitmap_wait_behind_writes(mddev); |
---|
| 1794 | + if (!mddev->serialize_policy) |
---|
| 1795 | + mddev_destroy_serial_pool(mddev, NULL, true); |
---|
1794 | 1796 | |
---|
1795 | 1797 | mutex_lock(&mddev->bitmap_info.mutex); |
---|
1796 | 1798 | spin_lock(&mddev->lock); |
---|
.. | .. |
---|
1901 | 1903 | sector_t start = 0; |
---|
1902 | 1904 | sector_t sector = 0; |
---|
1903 | 1905 | struct bitmap *bitmap = mddev->bitmap; |
---|
| 1906 | + struct md_rdev *rdev; |
---|
1904 | 1907 | |
---|
1905 | 1908 | if (!bitmap) |
---|
1906 | 1909 | goto out; |
---|
| 1910 | + |
---|
| 1911 | + rdev_for_each(rdev, mddev) |
---|
| 1912 | + mddev_create_serial_pool(mddev, rdev, true); |
---|
1907 | 1913 | |
---|
1908 | 1914 | if (mddev_is_clustered(mddev)) |
---|
1909 | 1915 | md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); |
---|
.. | .. |
---|
1949 | 1955 | } |
---|
1950 | 1956 | EXPORT_SYMBOL_GPL(md_bitmap_load); |
---|
1951 | 1957 | |
---|
| 1958 | +/* caller need to free returned bitmap with md_bitmap_free() */ |
---|
1952 | 1959 | struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) |
---|
1953 | 1960 | { |
---|
1954 | 1961 | int rv = 0; |
---|
.. | .. |
---|
2012 | 2019 | md_bitmap_unplug(mddev->bitmap); |
---|
2013 | 2020 | *low = lo; |
---|
2014 | 2021 | *high = hi; |
---|
| 2022 | + md_bitmap_free(bitmap); |
---|
2015 | 2023 | |
---|
2016 | 2024 | return rv; |
---|
2017 | 2025 | } |
---|
.. | .. |
---|
2099 | 2107 | bytes = DIV_ROUND_UP(chunks, 8); |
---|
2100 | 2108 | if (!bitmap->mddev->bitmap_info.external) |
---|
2101 | 2109 | bytes += sizeof(bitmap_super_t); |
---|
2102 | | - } while (bytes > (space << 9)); |
---|
| 2110 | + } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) < |
---|
| 2111 | + (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1)); |
---|
2103 | 2112 | } else |
---|
2104 | 2113 | chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; |
---|
2105 | 2114 | |
---|
.. | .. |
---|
2144 | 2153 | bitmap->counts.missing_pages = pages; |
---|
2145 | 2154 | bitmap->counts.chunkshift = chunkshift; |
---|
2146 | 2155 | bitmap->counts.chunks = chunks; |
---|
2147 | | - bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + |
---|
| 2156 | + bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift + |
---|
2148 | 2157 | BITMAP_BLOCK_SHIFT); |
---|
2149 | 2158 | |
---|
2150 | 2159 | blocks = min(old_counts.chunks << old_counts.chunkshift, |
---|
.. | .. |
---|
2170 | 2179 | bitmap->counts.missing_pages = old_counts.pages; |
---|
2171 | 2180 | bitmap->counts.chunkshift = old_counts.chunkshift; |
---|
2172 | 2181 | bitmap->counts.chunks = old_counts.chunks; |
---|
2173 | | - bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + |
---|
2174 | | - BITMAP_BLOCK_SHIFT); |
---|
| 2182 | + bitmap->mddev->bitmap_info.chunksize = |
---|
| 2183 | + 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); |
---|
2175 | 2184 | blocks = old_counts.chunks << old_counts.chunkshift; |
---|
2176 | 2185 | pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); |
---|
2177 | 2186 | break; |
---|
.. | .. |
---|
2189 | 2198 | |
---|
2190 | 2199 | if (set) { |
---|
2191 | 2200 | bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); |
---|
2192 | | - if (*bmc_new == 0) { |
---|
2193 | | - /* need to set on-disk bits too. */ |
---|
2194 | | - sector_t end = block + new_blocks; |
---|
2195 | | - sector_t start = block >> chunkshift; |
---|
2196 | | - start <<= chunkshift; |
---|
2197 | | - while (start < end) { |
---|
2198 | | - md_bitmap_file_set_bit(bitmap, block); |
---|
2199 | | - start += 1 << chunkshift; |
---|
| 2201 | + if (bmc_new) { |
---|
| 2202 | + if (*bmc_new == 0) { |
---|
| 2203 | + /* need to set on-disk bits too. */ |
---|
| 2204 | + sector_t end = block + new_blocks; |
---|
| 2205 | + sector_t start = block >> chunkshift; |
---|
| 2206 | + |
---|
| 2207 | + start <<= chunkshift; |
---|
| 2208 | + while (start < end) { |
---|
| 2209 | + md_bitmap_file_set_bit(bitmap, block); |
---|
| 2210 | + start += 1 << chunkshift; |
---|
| 2211 | + } |
---|
| 2212 | + *bmc_new = 2; |
---|
| 2213 | + md_bitmap_count_page(&bitmap->counts, block, 1); |
---|
| 2214 | + md_bitmap_set_pending(&bitmap->counts, block); |
---|
2200 | 2215 | } |
---|
2201 | | - *bmc_new = 2; |
---|
2202 | | - md_bitmap_count_page(&bitmap->counts, block, 1); |
---|
2203 | | - md_bitmap_set_pending(&bitmap->counts, block); |
---|
| 2216 | + *bmc_new |= NEEDED_MASK; |
---|
2204 | 2217 | } |
---|
2205 | | - *bmc_new |= NEEDED_MASK; |
---|
2206 | 2218 | if (new_blocks < old_blocks) |
---|
2207 | 2219 | old_blocks = new_blocks; |
---|
2208 | 2220 | } |
---|
.. | .. |
---|
2290 | 2302 | goto out; |
---|
2291 | 2303 | } |
---|
2292 | 2304 | if (mddev->pers) { |
---|
2293 | | - mddev->pers->quiesce(mddev, 1); |
---|
| 2305 | + mddev_suspend(mddev); |
---|
2294 | 2306 | md_bitmap_destroy(mddev); |
---|
2295 | | - mddev->pers->quiesce(mddev, 0); |
---|
| 2307 | + mddev_resume(mddev); |
---|
2296 | 2308 | } |
---|
2297 | 2309 | mddev->bitmap_info.offset = 0; |
---|
2298 | 2310 | if (mddev->bitmap_info.file) { |
---|
.. | .. |
---|
2329 | 2341 | mddev->bitmap_info.offset = offset; |
---|
2330 | 2342 | if (mddev->pers) { |
---|
2331 | 2343 | struct bitmap *bitmap; |
---|
2332 | | - mddev->pers->quiesce(mddev, 1); |
---|
2333 | 2344 | bitmap = md_bitmap_create(mddev, -1); |
---|
| 2345 | + mddev_suspend(mddev); |
---|
2334 | 2346 | if (IS_ERR(bitmap)) |
---|
2335 | 2347 | rv = PTR_ERR(bitmap); |
---|
2336 | 2348 | else { |
---|
.. | .. |
---|
2339 | 2351 | if (rv) |
---|
2340 | 2352 | mddev->bitmap_info.offset = 0; |
---|
2341 | 2353 | } |
---|
2342 | | - mddev->pers->quiesce(mddev, 0); |
---|
2343 | 2354 | if (rv) { |
---|
2344 | 2355 | md_bitmap_destroy(mddev); |
---|
| 2356 | + mddev_resume(mddev); |
---|
2345 | 2357 | goto out; |
---|
2346 | 2358 | } |
---|
| 2359 | + mddev_resume(mddev); |
---|
2347 | 2360 | } |
---|
2348 | 2361 | } |
---|
2349 | 2362 | } |
---|
.. | .. |
---|
2462 | 2475 | backlog_store(struct mddev *mddev, const char *buf, size_t len) |
---|
2463 | 2476 | { |
---|
2464 | 2477 | unsigned long backlog; |
---|
| 2478 | + unsigned long old_mwb = mddev->bitmap_info.max_write_behind; |
---|
| 2479 | + struct md_rdev *rdev; |
---|
| 2480 | + bool has_write_mostly = false; |
---|
2465 | 2481 | int rv = kstrtoul(buf, 10, &backlog); |
---|
2466 | 2482 | if (rv) |
---|
2467 | 2483 | return rv; |
---|
2468 | 2484 | if (backlog > COUNTER_MAX) |
---|
2469 | 2485 | return -EINVAL; |
---|
| 2486 | + |
---|
| 2487 | + rv = mddev_lock(mddev); |
---|
| 2488 | + if (rv) |
---|
| 2489 | + return rv; |
---|
| 2490 | + |
---|
| 2491 | + /* |
---|
| 2492 | + * Without write mostly device, it doesn't make sense to set |
---|
| 2493 | + * backlog for max_write_behind. |
---|
| 2494 | + */ |
---|
| 2495 | + rdev_for_each(rdev, mddev) { |
---|
| 2496 | + if (test_bit(WriteMostly, &rdev->flags)) { |
---|
| 2497 | + has_write_mostly = true; |
---|
| 2498 | + break; |
---|
| 2499 | + } |
---|
| 2500 | + } |
---|
| 2501 | + if (!has_write_mostly) { |
---|
| 2502 | + pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n", |
---|
| 2503 | + mdname(mddev)); |
---|
| 2504 | + mddev_unlock(mddev); |
---|
| 2505 | + return -EINVAL; |
---|
| 2506 | + } |
---|
| 2507 | + |
---|
2470 | 2508 | mddev->bitmap_info.max_write_behind = backlog; |
---|
| 2509 | + if (!backlog && mddev->serial_info_pool) { |
---|
| 2510 | + /* serial_info_pool is not needed if backlog is zero */ |
---|
| 2511 | + if (!mddev->serialize_policy) |
---|
| 2512 | + mddev_destroy_serial_pool(mddev, NULL, false); |
---|
| 2513 | + } else if (backlog && !mddev->serial_info_pool) { |
---|
| 2514 | + /* serial_info_pool is needed since backlog is not zero */ |
---|
| 2515 | + rdev_for_each(rdev, mddev) |
---|
| 2516 | + mddev_create_serial_pool(mddev, rdev, false); |
---|
| 2517 | + } |
---|
| 2518 | + if (old_mwb != backlog) |
---|
| 2519 | + md_bitmap_update_sb(mddev->bitmap); |
---|
| 2520 | + |
---|
| 2521 | + mddev_unlock(mddev); |
---|
2471 | 2522 | return len; |
---|
2472 | 2523 | } |
---|
2473 | 2524 | |
---|
.. | .. |
---|
2494 | 2545 | if (csize < 512 || |
---|
2495 | 2546 | !is_power_of_2(csize)) |
---|
2496 | 2547 | return -EINVAL; |
---|
| 2548 | + if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE * |
---|
| 2549 | + sizeof(((bitmap_super_t *)0)->chunksize)))) |
---|
| 2550 | + return -EOVERFLOW; |
---|
2497 | 2551 | mddev->bitmap_info.chunksize = csize; |
---|
2498 | 2552 | return len; |
---|
2499 | 2553 | } |
---|
.. | .. |
---|
2600 | 2654 | .name = "bitmap", |
---|
2601 | 2655 | .attrs = md_bitmap_attrs, |
---|
2602 | 2656 | }; |
---|
2603 | | - |
---|