hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-thin.c
....@@ -231,6 +231,7 @@
231231 struct dm_target *ti; /* Only set if a pool target is bound */
232232
233233 struct mapped_device *pool_md;
234
+ struct block_device *data_dev;
234235 struct block_device *md_dev;
235236 struct dm_pool_metadata *pmd;
236237
....@@ -281,6 +282,8 @@
281282 struct dm_bio_prison_cell **cell_sort_array;
282283
283284 mempool_t mapping_pool;
285
+
286
+ struct bio flush_bio;
284287 };
285288
286289 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
....@@ -323,7 +326,6 @@
323326 struct pool *pool;
324327 struct dm_dev *data_dev;
325328 struct dm_dev *metadata_dev;
326
- struct dm_target_callbacks callbacks;
327329
328330 dm_block_t low_water_blocks;
329331 struct pool_features requested_pf; /* Features requested during table load */
....@@ -355,7 +357,7 @@
355357 * Ensures the thin is not destroyed until the worker has finished
356358 * iterating the active_thins list.
357359 */
358
- atomic_t refcount;
360
+ refcount_t refcount;
359361 struct completion can_destroy;
360362 };
361363
....@@ -609,13 +611,12 @@
609611 blk_status_t error)
610612 {
611613 struct bio_list bios;
612
- unsigned long flags;
613614
614615 bio_list_init(&bios);
615616
616
- spin_lock_irqsave(&tc->lock, flags);
617
+ spin_lock_irq(&tc->lock);
617618 __merge_bio_list(&bios, master);
618
- spin_unlock_irqrestore(&tc->lock, flags);
619
+ spin_unlock_irq(&tc->lock);
619620
620621 error_bio_list(&bios, error);
621622 }
....@@ -623,15 +624,14 @@
623624 static void requeue_deferred_cells(struct thin_c *tc)
624625 {
625626 struct pool *pool = tc->pool;
626
- unsigned long flags;
627627 struct list_head cells;
628628 struct dm_bio_prison_cell *cell, *tmp;
629629
630630 INIT_LIST_HEAD(&cells);
631631
632
- spin_lock_irqsave(&tc->lock, flags);
632
+ spin_lock_irq(&tc->lock);
633633 list_splice_init(&tc->deferred_cells, &cells);
634
- spin_unlock_irqrestore(&tc->lock, flags);
634
+ spin_unlock_irq(&tc->lock);
635635
636636 list_for_each_entry_safe(cell, tmp, &cells, user_list)
637637 cell_requeue(pool, cell);
....@@ -640,14 +640,13 @@
640640 static void requeue_io(struct thin_c *tc)
641641 {
642642 struct bio_list bios;
643
- unsigned long flags;
644643
645644 bio_list_init(&bios);
646645
647
- spin_lock_irqsave(&tc->lock, flags);
646
+ spin_lock_irq(&tc->lock);
648647 __merge_bio_list(&bios, &tc->deferred_bio_list);
649648 __merge_bio_list(&bios, &tc->retry_on_resume_list);
650
- spin_unlock_irqrestore(&tc->lock, flags);
649
+ spin_unlock_irq(&tc->lock);
651650
652651 error_bio_list(&bios, BLK_STS_DM_REQUEUE);
653652 requeue_deferred_cells(tc);
....@@ -756,10 +755,9 @@
756755 static void issue(struct thin_c *tc, struct bio *bio)
757756 {
758757 struct pool *pool = tc->pool;
759
- unsigned long flags;
760758
761759 if (!bio_triggers_commit(tc, bio)) {
762
- generic_make_request(bio);
760
+ submit_bio_noacct(bio);
763761 return;
764762 }
765763
....@@ -777,9 +775,9 @@
777775 * Batch together any bios that trigger commits and then issue a
778776 * single commit for them in process_deferred_bios().
779777 */
780
- spin_lock_irqsave(&pool->lock, flags);
778
+ spin_lock_irq(&pool->lock);
781779 bio_list_add(&pool->deferred_flush_bios, bio);
782
- spin_unlock_irqrestore(&pool->lock, flags);
780
+ spin_unlock_irq(&pool->lock);
783781 }
784782
785783 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
....@@ -886,12 +884,15 @@
886884 {
887885 struct pool *pool = tc->pool;
888886 unsigned long flags;
887
+ int has_work;
889888
890889 spin_lock_irqsave(&tc->lock, flags);
891890 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
891
+ has_work = !bio_list_empty(&tc->deferred_bio_list);
892892 spin_unlock_irqrestore(&tc->lock, flags);
893893
894
- wake_worker(pool);
894
+ if (has_work)
895
+ wake_worker(pool);
895896 }
896897
897898 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
....@@ -960,7 +961,6 @@
960961 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961962 {
962963 struct pool *pool = tc->pool;
963
- unsigned long flags;
964964
965965 /*
966966 * If the bio has the REQ_FUA flag set we must commit the metadata
....@@ -985,9 +985,9 @@
985985 * Batch together any bios that trigger commits and then issue a
986986 * single commit for them in process_deferred_bios().
987987 */
988
- spin_lock_irqsave(&pool->lock, flags);
988
+ spin_lock_irq(&pool->lock);
989989 bio_list_add(&pool->deferred_flush_completions, bio);
990
- spin_unlock_irqrestore(&pool->lock, flags);
990
+ spin_unlock_irq(&pool->lock);
991991 }
992992
993993 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
....@@ -1226,14 +1226,13 @@
12261226 static void process_prepared(struct pool *pool, struct list_head *head,
12271227 process_mapping_fn *fn)
12281228 {
1229
- unsigned long flags;
12301229 struct list_head maps;
12311230 struct dm_thin_new_mapping *m, *tmp;
12321231
12331232 INIT_LIST_HEAD(&maps);
1234
- spin_lock_irqsave(&pool->lock, flags);
1233
+ spin_lock_irq(&pool->lock);
12351234 list_splice_init(head, &maps);
1236
- spin_unlock_irqrestore(&pool->lock, flags);
1235
+ spin_unlock_irq(&pool->lock);
12371236
12381237 list_for_each_entry_safe(m, tmp, &maps, list)
12391238 (*fn)(m);
....@@ -1510,14 +1509,12 @@
15101509
15111510 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
15121511 {
1513
- unsigned long flags;
1514
-
15151512 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
15161513 DMWARN("%s: reached low water mark for data device: sending event.",
15171514 dm_device_name(pool->pool_md));
1518
- spin_lock_irqsave(&pool->lock, flags);
1515
+ spin_lock_irq(&pool->lock);
15191516 pool->low_water_triggered = true;
1520
- spin_unlock_irqrestore(&pool->lock, flags);
1517
+ spin_unlock_irq(&pool->lock);
15211518 dm_table_event(pool->ti->table);
15221519 }
15231520 }
....@@ -1593,11 +1590,10 @@
15931590 {
15941591 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
15951592 struct thin_c *tc = h->tc;
1596
- unsigned long flags;
15971593
1598
- spin_lock_irqsave(&tc->lock, flags);
1594
+ spin_lock_irq(&tc->lock);
15991595 bio_list_add(&tc->retry_on_resume_list, bio);
1600
- spin_unlock_irqrestore(&tc->lock, flags);
1596
+ spin_unlock_irq(&tc->lock);
16011597 }
16021598
16031599 static blk_status_t should_error_unserviceable_bio(struct pool *pool)
....@@ -2170,7 +2166,6 @@
21702166 static void process_thin_deferred_bios(struct thin_c *tc)
21712167 {
21722168 struct pool *pool = tc->pool;
2173
- unsigned long flags;
21742169 struct bio *bio;
21752170 struct bio_list bios;
21762171 struct blk_plug plug;
....@@ -2184,10 +2179,10 @@
21842179
21852180 bio_list_init(&bios);
21862181
2187
- spin_lock_irqsave(&tc->lock, flags);
2182
+ spin_lock_irq(&tc->lock);
21882183
21892184 if (bio_list_empty(&tc->deferred_bio_list)) {
2190
- spin_unlock_irqrestore(&tc->lock, flags);
2185
+ spin_unlock_irq(&tc->lock);
21912186 return;
21922187 }
21932188
....@@ -2196,7 +2191,7 @@
21962191 bio_list_merge(&bios, &tc->deferred_bio_list);
21972192 bio_list_init(&tc->deferred_bio_list);
21982193
2199
- spin_unlock_irqrestore(&tc->lock, flags);
2194
+ spin_unlock_irq(&tc->lock);
22002195
22012196 blk_start_plug(&plug);
22022197 while ((bio = bio_list_pop(&bios))) {
....@@ -2206,10 +2201,10 @@
22062201 * prepared mappings to process.
22072202 */
22082203 if (ensure_next_mapping(pool)) {
2209
- spin_lock_irqsave(&tc->lock, flags);
2204
+ spin_lock_irq(&tc->lock);
22102205 bio_list_add(&tc->deferred_bio_list, bio);
22112206 bio_list_merge(&tc->deferred_bio_list, &bios);
2212
- spin_unlock_irqrestore(&tc->lock, flags);
2207
+ spin_unlock_irq(&tc->lock);
22132208 break;
22142209 }
22152210
....@@ -2222,6 +2217,7 @@
22222217 throttle_work_update(&pool->throttle);
22232218 dm_pool_issue_prefetches(pool->pmd);
22242219 }
2220
+ cond_resched();
22252221 }
22262222 blk_finish_plug(&plug);
22272223 }
....@@ -2264,16 +2260,15 @@
22642260 static void process_thin_deferred_cells(struct thin_c *tc)
22652261 {
22662262 struct pool *pool = tc->pool;
2267
- unsigned long flags;
22682263 struct list_head cells;
22692264 struct dm_bio_prison_cell *cell;
22702265 unsigned i, j, count;
22712266
22722267 INIT_LIST_HEAD(&cells);
22732268
2274
- spin_lock_irqsave(&tc->lock, flags);
2269
+ spin_lock_irq(&tc->lock);
22752270 list_splice_init(&tc->deferred_cells, &cells);
2276
- spin_unlock_irqrestore(&tc->lock, flags);
2271
+ spin_unlock_irq(&tc->lock);
22772272
22782273 if (list_empty(&cells))
22792274 return;
....@@ -2294,9 +2289,9 @@
22942289 for (j = i; j < count; j++)
22952290 list_add(&pool->cell_sort_array[j]->user_list, &cells);
22962291
2297
- spin_lock_irqsave(&tc->lock, flags);
2292
+ spin_lock_irq(&tc->lock);
22982293 list_splice(&cells, &tc->deferred_cells);
2299
- spin_unlock_irqrestore(&tc->lock, flags);
2294
+ spin_unlock_irq(&tc->lock);
23002295 return;
23012296 }
23022297
....@@ -2305,6 +2300,7 @@
23052300 else
23062301 pool->process_cell(tc, cell);
23072302 }
2303
+ cond_resched();
23082304 } while (!list_empty(&cells));
23092305 }
23102306
....@@ -2349,7 +2345,6 @@
23492345
23502346 static void process_deferred_bios(struct pool *pool)
23512347 {
2352
- unsigned long flags;
23532348 struct bio *bio;
23542349 struct bio_list bios, bio_completions;
23552350 struct thin_c *tc;
....@@ -2368,13 +2363,13 @@
23682363 bio_list_init(&bios);
23692364 bio_list_init(&bio_completions);
23702365
2371
- spin_lock_irqsave(&pool->lock, flags);
2366
+ spin_lock_irq(&pool->lock);
23722367 bio_list_merge(&bios, &pool->deferred_flush_bios);
23732368 bio_list_init(&pool->deferred_flush_bios);
23742369
23752370 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
23762371 bio_list_init(&pool->deferred_flush_completions);
2377
- spin_unlock_irqrestore(&pool->lock, flags);
2372
+ spin_unlock_irq(&pool->lock);
23782373
23792374 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
23802375 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
....@@ -2392,8 +2387,16 @@
23922387 while ((bio = bio_list_pop(&bio_completions)))
23932388 bio_endio(bio);
23942389
2395
- while ((bio = bio_list_pop(&bios)))
2396
- generic_make_request(bio);
2390
+ while ((bio = bio_list_pop(&bios))) {
2391
+ /*
2392
+ * The data device was flushed as part of metadata commit,
2393
+ * so complete redundant flushes immediately.
2394
+ */
2395
+ if (bio->bi_opf & REQ_PREFLUSH)
2396
+ bio_endio(bio);
2397
+ else
2398
+ submit_bio_noacct(bio);
2399
+ }
23972400 }
23982401
23992402 static void do_worker(struct work_struct *ws)
....@@ -2657,12 +2660,11 @@
26572660 */
26582661 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
26592662 {
2660
- unsigned long flags;
26612663 struct pool *pool = tc->pool;
26622664
2663
- spin_lock_irqsave(&tc->lock, flags);
2665
+ spin_lock_irq(&tc->lock);
26642666 bio_list_add(&tc->deferred_bio_list, bio);
2665
- spin_unlock_irqrestore(&tc->lock, flags);
2667
+ spin_unlock_irq(&tc->lock);
26662668
26672669 wake_worker(pool);
26682670 }
....@@ -2678,13 +2680,12 @@
26782680
26792681 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
26802682 {
2681
- unsigned long flags;
26822683 struct pool *pool = tc->pool;
26832684
26842685 throttle_lock(&pool->throttle);
2685
- spin_lock_irqsave(&tc->lock, flags);
2686
+ spin_lock_irq(&tc->lock);
26862687 list_add_tail(&cell->user_list, &tc->deferred_cells);
2687
- spin_unlock_irqrestore(&tc->lock, flags);
2688
+ spin_unlock_irq(&tc->lock);
26882689 throttle_unlock(&pool->throttle);
26892690
26902691 wake_worker(pool);
....@@ -2796,29 +2797,16 @@
27962797 }
27972798 }
27982799
2799
-static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2800
-{
2801
- struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2802
- struct request_queue *q;
2803
-
2804
- if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2805
- return 1;
2806
-
2807
- q = bdev_get_queue(pt->data_dev->bdev);
2808
- return bdi_congested(q->backing_dev_info, bdi_bits);
2809
-}
2810
-
28112800 static void requeue_bios(struct pool *pool)
28122801 {
2813
- unsigned long flags;
28142802 struct thin_c *tc;
28152803
28162804 rcu_read_lock();
28172805 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2818
- spin_lock_irqsave(&tc->lock, flags);
2806
+ spin_lock_irq(&tc->lock);
28192807 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
28202808 bio_list_init(&tc->retry_on_resume_list);
2821
- spin_unlock_irqrestore(&tc->lock, flags);
2809
+ spin_unlock_irq(&tc->lock);
28222810 }
28232811 rcu_read_unlock();
28242812 }
....@@ -2921,12 +2909,15 @@
29212909 dm_bio_prison_destroy(pool->prison);
29222910 dm_kcopyd_client_destroy(pool->copier);
29232911
2912
+ cancel_delayed_work_sync(&pool->waker);
2913
+ cancel_delayed_work_sync(&pool->no_space_timeout);
29242914 if (pool->wq)
29252915 destroy_workqueue(pool->wq);
29262916
29272917 if (pool->next_mapping)
29282918 mempool_free(pool->next_mapping, &pool->mapping_pool);
29292919 mempool_exit(&pool->mapping_pool);
2920
+ bio_uninit(&pool->flush_bio);
29302921 dm_deferred_set_destroy(pool->shared_read_ds);
29312922 dm_deferred_set_destroy(pool->all_io_ds);
29322923 kfree(pool);
....@@ -2936,6 +2927,7 @@
29362927
29372928 static struct pool *pool_create(struct mapped_device *pool_md,
29382929 struct block_device *metadata_dev,
2930
+ struct block_device *data_dev,
29392931 unsigned long block_size,
29402932 int read_only, char **error)
29412933 {
....@@ -3006,6 +2998,7 @@
30062998 pool->low_water_triggered = false;
30072999 pool->suspended = true;
30083000 pool->out_of_data_space = false;
3001
+ bio_init(&pool->flush_bio, NULL, 0);
30093002
30103003 pool->shared_read_ds = dm_deferred_set_create();
30113004 if (!pool->shared_read_ds) {
....@@ -3043,6 +3036,7 @@
30433036 pool->last_commit_jiffies = jiffies;
30443037 pool->pool_md = pool_md;
30453038 pool->md_dev = metadata_dev;
3039
+ pool->data_dev = data_dev;
30463040 __pool_table_insert(pool);
30473041
30483042 return pool;
....@@ -3084,6 +3078,7 @@
30843078
30853079 static struct pool *__pool_find(struct mapped_device *pool_md,
30863080 struct block_device *metadata_dev,
3081
+ struct block_device *data_dev,
30873082 unsigned long block_size, int read_only,
30883083 char **error, int *created)
30893084 {
....@@ -3094,19 +3089,23 @@
30943089 *error = "metadata device already in use by a pool";
30953090 return ERR_PTR(-EBUSY);
30963091 }
3092
+ if (pool->data_dev != data_dev) {
3093
+ *error = "data device already in use by a pool";
3094
+ return ERR_PTR(-EBUSY);
3095
+ }
30973096 __pool_inc(pool);
30983097
30993098 } else {
31003099 pool = __pool_table_lookup(pool_md);
31013100 if (pool) {
3102
- if (pool->md_dev != metadata_dev) {
3101
+ if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
31033102 *error = "different pool cannot replace a pool";
31043103 return ERR_PTR(-EINVAL);
31053104 }
31063105 __pool_inc(pool);
31073106
31083107 } else {
3109
- pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
3108
+ pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
31103109 *created = 1;
31113110 }
31123111 }
....@@ -3190,6 +3189,29 @@
31903189 dm_device_name(pool->pool_md));
31913190
31923191 dm_table_event(pool->ti->table);
3192
+}
3193
+
3194
+/*
3195
+ * We need to flush the data device **before** committing the metadata.
3196
+ *
3197
+ * This ensures that the data blocks of any newly inserted mappings are
3198
+ * properly written to non-volatile storage and won't be lost in case of a
3199
+ * crash.
3200
+ *
3201
+ * Failure to do so can result in data corruption in the case of internal or
3202
+ * external snapshots and in the case of newly provisioned blocks, when block
3203
+ * zeroing is enabled.
3204
+ */
3205
+static int metadata_pre_commit_callback(void *context)
3206
+{
3207
+ struct pool *pool = context;
3208
+ struct bio *flush_bio = &pool->flush_bio;
3209
+
3210
+ bio_reset(flush_bio);
3211
+ bio_set_dev(flush_bio, pool->data_dev);
3212
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
3213
+
3214
+ return submit_bio_wait(flush_bio);
31933215 }
31943216
31953217 static sector_t get_dev_size(struct block_device *bdev)
....@@ -3335,7 +3357,7 @@
33353357 goto out;
33363358 }
33373359
3338
- pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
3360
+ pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
33393361 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
33403362 if (IS_ERR(pool)) {
33413363 r = PTR_ERR(pool);
....@@ -3361,6 +3383,7 @@
33613383 pt->low_water_blocks = low_water_blocks;
33623384 pt->adjusted_pf = pt->requested_pf = pf;
33633385 ti->num_flush_bios = 1;
3386
+ ti->limit_swap_bios = true;
33643387
33653388 /*
33663389 * Only need to enable discards if the pool should pass
....@@ -3383,11 +3406,13 @@
33833406 calc_metadata_threshold(pt),
33843407 metadata_low_callback,
33853408 pool);
3386
- if (r)
3409
+ if (r) {
3410
+ ti->error = "Error registering metadata threshold";
33873411 goto out_flags_changed;
3412
+ }
33883413
3389
- pt->callbacks.congested_fn = pool_is_congested;
3390
- dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3414
+ dm_pool_register_pre_commit_callback(pool->pmd,
3415
+ metadata_pre_commit_callback, pool);
33913416
33923417 mutex_unlock(&dm_thin_pool_table.mutex);
33933418
....@@ -3412,15 +3437,14 @@
34123437 int r;
34133438 struct pool_c *pt = ti->private;
34143439 struct pool *pool = pt->pool;
3415
- unsigned long flags;
34163440
34173441 /*
34183442 * As this is a singleton target, ti->begin is always zero.
34193443 */
3420
- spin_lock_irqsave(&pool->lock, flags);
3444
+ spin_lock_irq(&pool->lock);
34213445 bio_set_dev(bio, pt->data_dev->bdev);
34223446 r = DM_MAPIO_REMAPPED;
3423
- spin_unlock_irqrestore(&pool->lock, flags);
3447
+ spin_unlock_irq(&pool->lock);
34243448
34253449 return r;
34263450 }
....@@ -3547,20 +3571,28 @@
35473571 */
35483572 r = bind_control_target(pool, ti);
35493573 if (r)
3550
- return r;
3574
+ goto out;
35513575
35523576 r = maybe_resize_data_dev(ti, &need_commit1);
35533577 if (r)
3554
- return r;
3578
+ goto out;
35553579
35563580 r = maybe_resize_metadata_dev(ti, &need_commit2);
35573581 if (r)
3558
- return r;
3582
+ goto out;
35593583
35603584 if (need_commit1 || need_commit2)
35613585 (void) commit(pool);
3586
+out:
3587
+ /*
3588
+ * When a thin-pool is PM_FAIL, it cannot be rebuilt if
3589
+ * bio is in deferred list. Therefore need to return 0
3590
+ * to allow pool_resume() to flush IO.
3591
+ */
3592
+ if (r && get_pool_mode(pool) == PM_FAIL)
3593
+ r = 0;
35623594
3563
- return 0;
3595
+ return r;
35643596 }
35653597
35663598 static void pool_suspend_active_thins(struct pool *pool)
....@@ -3591,7 +3623,6 @@
35913623 {
35923624 struct pool_c *pt = ti->private;
35933625 struct pool *pool = pt->pool;
3594
- unsigned long flags;
35953626
35963627 /*
35973628 * Must requeue active_thins' bios and then resume
....@@ -3600,10 +3631,10 @@
36003631 requeue_bios(pool);
36013632 pool_resume_active_thins(pool);
36023633
3603
- spin_lock_irqsave(&pool->lock, flags);
3634
+ spin_lock_irq(&pool->lock);
36043635 pool->low_water_triggered = false;
36053636 pool->suspended = false;
3606
- spin_unlock_irqrestore(&pool->lock, flags);
3637
+ spin_unlock_irq(&pool->lock);
36073638
36083639 do_waker(&pool->waker.work);
36093640 }
....@@ -3612,11 +3643,10 @@
36123643 {
36133644 struct pool_c *pt = ti->private;
36143645 struct pool *pool = pt->pool;
3615
- unsigned long flags;
36163646
3617
- spin_lock_irqsave(&pool->lock, flags);
3647
+ spin_lock_irq(&pool->lock);
36183648 pool->suspended = true;
3619
- spin_unlock_irqrestore(&pool->lock, flags);
3649
+ spin_unlock_irq(&pool->lock);
36203650
36213651 pool_suspend_active_thins(pool);
36223652 }
....@@ -3625,13 +3655,12 @@
36253655 {
36263656 struct pool_c *pt = ti->private;
36273657 struct pool *pool = pt->pool;
3628
- unsigned long flags;
36293658
36303659 pool_resume_active_thins(pool);
36313660
3632
- spin_lock_irqsave(&pool->lock, flags);
3661
+ spin_lock_irq(&pool->lock);
36333662 pool->suspended = false;
3634
- spin_unlock_irqrestore(&pool->lock, flags);
3663
+ spin_unlock_irq(&pool->lock);
36353664 }
36363665
36373666 static void pool_postsuspend(struct dm_target *ti)
....@@ -4077,7 +4106,7 @@
40774106 .name = "thin-pool",
40784107 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
40794108 DM_TARGET_IMMUTABLE,
4080
- .version = {1, 20, 0},
4109
+ .version = {1, 22, 0},
40814110 .module = THIS_MODULE,
40824111 .ctr = pool_ctr,
40834112 .dtr = pool_dtr,
....@@ -4098,23 +4127,22 @@
40984127 *--------------------------------------------------------------*/
40994128 static void thin_get(struct thin_c *tc)
41004129 {
4101
- atomic_inc(&tc->refcount);
4130
+ refcount_inc(&tc->refcount);
41024131 }
41034132
41044133 static void thin_put(struct thin_c *tc)
41054134 {
4106
- if (atomic_dec_and_test(&tc->refcount))
4135
+ if (refcount_dec_and_test(&tc->refcount))
41074136 complete(&tc->can_destroy);
41084137 }
41094138
41104139 static void thin_dtr(struct dm_target *ti)
41114140 {
41124141 struct thin_c *tc = ti->private;
4113
- unsigned long flags;
41144142
4115
- spin_lock_irqsave(&tc->pool->lock, flags);
4143
+ spin_lock_irq(&tc->pool->lock);
41164144 list_del_rcu(&tc->list);
4117
- spin_unlock_irqrestore(&tc->pool->lock, flags);
4145
+ spin_unlock_irq(&tc->pool->lock);
41184146 synchronize_rcu();
41194147
41204148 thin_put(tc);
....@@ -4150,7 +4178,6 @@
41504178 struct thin_c *tc;
41514179 struct dm_dev *pool_dev, *origin_dev;
41524180 struct mapped_device *pool_md;
4153
- unsigned long flags;
41544181
41554182 mutex_lock(&dm_thin_pool_table.mutex);
41564183
....@@ -4233,6 +4260,7 @@
42334260 goto bad;
42344261
42354262 ti->num_flush_bios = 1;
4263
+ ti->limit_swap_bios = true;
42364264 ti->flush_supported = true;
42374265 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
42384266
....@@ -4240,23 +4268,22 @@
42404268 if (tc->pool->pf.discard_enabled) {
42414269 ti->discards_supported = true;
42424270 ti->num_discard_bios = 1;
4243
- ti->split_discard_bios = false;
42444271 }
42454272
42464273 mutex_unlock(&dm_thin_pool_table.mutex);
42474274
4248
- spin_lock_irqsave(&tc->pool->lock, flags);
4275
+ spin_lock_irq(&tc->pool->lock);
42494276 if (tc->pool->suspended) {
4250
- spin_unlock_irqrestore(&tc->pool->lock, flags);
4277
+ spin_unlock_irq(&tc->pool->lock);
42514278 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
42524279 ti->error = "Unable to activate thin device while pool is suspended";
42534280 r = -EINVAL;
42544281 goto bad;
42554282 }
4256
- atomic_set(&tc->refcount, 1);
4283
+ refcount_set(&tc->refcount, 1);
42574284 init_completion(&tc->can_destroy);
42584285 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4259
- spin_unlock_irqrestore(&tc->pool->lock, flags);
4286
+ spin_unlock_irq(&tc->pool->lock);
42604287 /*
42614288 * This synchronize_rcu() call is needed here otherwise we risk a
42624289 * wake_worker() call finding no bios to process (because the newly
....@@ -4457,7 +4484,7 @@
44574484
44584485 static struct target_type thin_target = {
44594486 .name = "thin",
4460
- .version = {1, 20, 0},
4487
+ .version = {1, 22, 0},
44614488 .module = THIS_MODULE,
44624489 .ctr = thin_ctr,
44634490 .dtr = thin_dtr,