hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/drivers/md/md-bitmap.c
....@@ -54,14 +54,7 @@
5454 {
5555 unsigned char *mappage;
5656
57
- if (page >= bitmap->pages) {
58
- /* This can happen if bitmap_start_sync goes beyond
59
- * End-of-device while looking for a whole page.
60
- * It is harmless.
61
- */
62
- return -EINVAL;
63
- }
64
-
57
+ WARN_ON_ONCE(page >= bitmap->pages);
6558 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
6659 return 0;
6760
....@@ -486,7 +479,7 @@
486479 sb = kmap_atomic(bitmap->storage.sb_page);
487480 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
488481 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
489
- pr_debug(" version: %d\n", le32_to_cpu(sb->version));
482
+ pr_debug(" version: %u\n", le32_to_cpu(sb->version));
490483 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
491484 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
492485 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
....@@ -497,11 +490,11 @@
497490 pr_debug("events cleared: %llu\n",
498491 (unsigned long long) le64_to_cpu(sb->events_cleared));
499492 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
500
- pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
501
- pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
493
+ pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize));
494
+ pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep));
502495 pr_debug(" sync size: %llu KB\n",
503496 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
504
- pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
497
+ pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind));
505498 kunmap_atomic(sb);
506499 }
507500
....@@ -1365,6 +1358,14 @@
13651358 sector_t csize;
13661359 int err;
13671360
1361
+ if (page >= bitmap->pages) {
1362
+ /*
1363
+ * This can happen if bitmap_start_sync goes beyond
1364
+ * End-of-device while looking for a whole page or
1365
+ * user set a huge number to sysfs bitmap_set_bits.
1366
+ */
1367
+ return NULL;
1368
+ }
13681369 err = md_bitmap_checkpage(bitmap, page, create, 0);
13691370
13701371 if (bitmap->bp[page].hijacked ||
....@@ -2106,7 +2107,8 @@
21062107 bytes = DIV_ROUND_UP(chunks, 8);
21072108 if (!bitmap->mddev->bitmap_info.external)
21082109 bytes += sizeof(bitmap_super_t);
2109
- } while (bytes > (space << 9));
2110
+ } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) <
2111
+ (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1));
21102112 } else
21112113 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
21122114
....@@ -2151,7 +2153,7 @@
21512153 bitmap->counts.missing_pages = pages;
21522154 bitmap->counts.chunkshift = chunkshift;
21532155 bitmap->counts.chunks = chunks;
2154
- bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
2156
+ bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift +
21552157 BITMAP_BLOCK_SHIFT);
21562158
21572159 blocks = min(old_counts.chunks << old_counts.chunkshift,
....@@ -2177,8 +2179,8 @@
21772179 bitmap->counts.missing_pages = old_counts.pages;
21782180 bitmap->counts.chunkshift = old_counts.chunkshift;
21792181 bitmap->counts.chunks = old_counts.chunks;
2180
- bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
2181
- BITMAP_BLOCK_SHIFT);
2182
+ bitmap->mddev->bitmap_info.chunksize =
2183
+ 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT);
21822184 blocks = old_counts.chunks << old_counts.chunkshift;
21832185 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
21842186 break;
....@@ -2196,20 +2198,23 @@
21962198
21972199 if (set) {
21982200 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2199
- if (*bmc_new == 0) {
2200
- /* need to set on-disk bits too. */
2201
- sector_t end = block + new_blocks;
2202
- sector_t start = block >> chunkshift;
2203
- start <<= chunkshift;
2204
- while (start < end) {
2205
- md_bitmap_file_set_bit(bitmap, block);
2206
- start += 1 << chunkshift;
2201
+ if (bmc_new) {
2202
+ if (*bmc_new == 0) {
2203
+ /* need to set on-disk bits too. */
2204
+ sector_t end = block + new_blocks;
2205
+ sector_t start = block >> chunkshift;
2206
+
2207
+ start <<= chunkshift;
2208
+ while (start < end) {
2209
+ md_bitmap_file_set_bit(bitmap, block);
2210
+ start += 1 << chunkshift;
2211
+ }
2212
+ *bmc_new = 2;
2213
+ md_bitmap_count_page(&bitmap->counts, block, 1);
2214
+ md_bitmap_set_pending(&bitmap->counts, block);
22072215 }
2208
- *bmc_new = 2;
2209
- md_bitmap_count_page(&bitmap->counts, block, 1);
2210
- md_bitmap_set_pending(&bitmap->counts, block);
2216
+ *bmc_new |= NEEDED_MASK;
22112217 }
2212
- *bmc_new |= NEEDED_MASK;
22132218 if (new_blocks < old_blocks)
22142219 old_blocks = new_blocks;
22152220 }
....@@ -2471,11 +2476,35 @@
24712476 {
24722477 unsigned long backlog;
24732478 unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
2479
+ struct md_rdev *rdev;
2480
+ bool has_write_mostly = false;
24742481 int rv = kstrtoul(buf, 10, &backlog);
24752482 if (rv)
24762483 return rv;
24772484 if (backlog > COUNTER_MAX)
24782485 return -EINVAL;
2486
+
2487
+ rv = mddev_lock(mddev);
2488
+ if (rv)
2489
+ return rv;
2490
+
2491
+ /*
2492
+ * Without write mostly device, it doesn't make sense to set
2493
+ * backlog for max_write_behind.
2494
+ */
2495
+ rdev_for_each(rdev, mddev) {
2496
+ if (test_bit(WriteMostly, &rdev->flags)) {
2497
+ has_write_mostly = true;
2498
+ break;
2499
+ }
2500
+ }
2501
+ if (!has_write_mostly) {
2502
+ pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n",
2503
+ mdname(mddev));
2504
+ mddev_unlock(mddev);
2505
+ return -EINVAL;
2506
+ }
2507
+
24792508 mddev->bitmap_info.max_write_behind = backlog;
24802509 if (!backlog && mddev->serial_info_pool) {
24812510 /* serial_info_pool is not needed if backlog is zero */
....@@ -2483,13 +2512,13 @@
24832512 mddev_destroy_serial_pool(mddev, NULL, false);
24842513 } else if (backlog && !mddev->serial_info_pool) {
24852514 /* serial_info_pool is needed since backlog is not zero */
2486
- struct md_rdev *rdev;
2487
-
24882515 rdev_for_each(rdev, mddev)
24892516 mddev_create_serial_pool(mddev, rdev, false);
24902517 }
24912518 if (old_mwb != backlog)
24922519 md_bitmap_update_sb(mddev->bitmap);
2520
+
2521
+ mddev_unlock(mddev);
24932522 return len;
24942523 }
24952524
....@@ -2516,6 +2545,9 @@
25162545 if (csize < 512 ||
25172546 !is_power_of_2(csize))
25182547 return -EINVAL;
2548
+ if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE *
2549
+ sizeof(((bitmap_super_t *)0)->chunksize))))
2550
+ return -EOVERFLOW;
25192551 mddev->bitmap_info.chunksize = csize;
25202552 return len;
25212553 }