forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/md/dm-thin.c
....@@ -231,6 +231,7 @@
231231 struct dm_target *ti; /* Only set if a pool target is bound */
232232
233233 struct mapped_device *pool_md;
234
+ struct block_device *data_dev;
234235 struct block_device *md_dev;
235236 struct dm_pool_metadata *pmd;
236237
....@@ -281,6 +282,8 @@
281282 struct dm_bio_prison_cell **cell_sort_array;
282283
283284 mempool_t mapping_pool;
285
+
286
+ struct bio flush_bio;
284287 };
285288
286289 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
....@@ -323,7 +326,6 @@
323326 struct pool *pool;
324327 struct dm_dev *data_dev;
325328 struct dm_dev *metadata_dev;
326
- struct dm_target_callbacks callbacks;
327329
328330 dm_block_t low_water_blocks;
329331 struct pool_features requested_pf; /* Features requested during table load */
....@@ -355,7 +357,7 @@
355357 * Ensures the thin is not destroyed until the worker has finished
356358 * iterating the active_thins list.
357359 */
358
- atomic_t refcount;
360
+ refcount_t refcount;
359361 struct completion can_destroy;
360362 };
361363
....@@ -609,13 +611,12 @@
609611 blk_status_t error)
610612 {
611613 struct bio_list bios;
612
- unsigned long flags;
613614
614615 bio_list_init(&bios);
615616
616
- spin_lock_irqsave(&tc->lock, flags);
617
+ spin_lock_irq(&tc->lock);
617618 __merge_bio_list(&bios, master);
618
- spin_unlock_irqrestore(&tc->lock, flags);
619
+ spin_unlock_irq(&tc->lock);
619620
620621 error_bio_list(&bios, error);
621622 }
....@@ -623,15 +624,14 @@
623624 static void requeue_deferred_cells(struct thin_c *tc)
624625 {
625626 struct pool *pool = tc->pool;
626
- unsigned long flags;
627627 struct list_head cells;
628628 struct dm_bio_prison_cell *cell, *tmp;
629629
630630 INIT_LIST_HEAD(&cells);
631631
632
- spin_lock_irqsave(&tc->lock, flags);
632
+ spin_lock_irq(&tc->lock);
633633 list_splice_init(&tc->deferred_cells, &cells);
634
- spin_unlock_irqrestore(&tc->lock, flags);
634
+ spin_unlock_irq(&tc->lock);
635635
636636 list_for_each_entry_safe(cell, tmp, &cells, user_list)
637637 cell_requeue(pool, cell);
....@@ -640,14 +640,13 @@
640640 static void requeue_io(struct thin_c *tc)
641641 {
642642 struct bio_list bios;
643
- unsigned long flags;
644643
645644 bio_list_init(&bios);
646645
647
- spin_lock_irqsave(&tc->lock, flags);
646
+ spin_lock_irq(&tc->lock);
648647 __merge_bio_list(&bios, &tc->deferred_bio_list);
649648 __merge_bio_list(&bios, &tc->retry_on_resume_list);
650
- spin_unlock_irqrestore(&tc->lock, flags);
649
+ spin_unlock_irq(&tc->lock);
651650
652651 error_bio_list(&bios, BLK_STS_DM_REQUEUE);
653652 requeue_deferred_cells(tc);
....@@ -756,10 +755,9 @@
756755 static void issue(struct thin_c *tc, struct bio *bio)
757756 {
758757 struct pool *pool = tc->pool;
759
- unsigned long flags;
760758
761759 if (!bio_triggers_commit(tc, bio)) {
762
- generic_make_request(bio);
760
+ submit_bio_noacct(bio);
763761 return;
764762 }
765763
....@@ -777,9 +775,9 @@
777775 * Batch together any bios that trigger commits and then issue a
778776 * single commit for them in process_deferred_bios().
779777 */
780
- spin_lock_irqsave(&pool->lock, flags);
778
+ spin_lock_irq(&pool->lock);
781779 bio_list_add(&pool->deferred_flush_bios, bio);
782
- spin_unlock_irqrestore(&pool->lock, flags);
780
+ spin_unlock_irq(&pool->lock);
783781 }
784782
785783 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
....@@ -886,12 +884,15 @@
886884 {
887885 struct pool *pool = tc->pool;
888886 unsigned long flags;
887
+ int has_work;
889888
890889 spin_lock_irqsave(&tc->lock, flags);
891890 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
891
+ has_work = !bio_list_empty(&tc->deferred_bio_list);
892892 spin_unlock_irqrestore(&tc->lock, flags);
893893
894
- wake_worker(pool);
894
+ if (has_work)
895
+ wake_worker(pool);
895896 }
896897
897898 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
....@@ -960,7 +961,6 @@
960961 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961962 {
962963 struct pool *pool = tc->pool;
963
- unsigned long flags;
964964
965965 /*
966966 * If the bio has the REQ_FUA flag set we must commit the metadata
....@@ -985,9 +985,9 @@
985985 * Batch together any bios that trigger commits and then issue a
986986 * single commit for them in process_deferred_bios().
987987 */
988
- spin_lock_irqsave(&pool->lock, flags);
988
+ spin_lock_irq(&pool->lock);
989989 bio_list_add(&pool->deferred_flush_completions, bio);
990
- spin_unlock_irqrestore(&pool->lock, flags);
990
+ spin_unlock_irq(&pool->lock);
991991 }
992992
993993 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
....@@ -1226,14 +1226,13 @@
12261226 static void process_prepared(struct pool *pool, struct list_head *head,
12271227 process_mapping_fn *fn)
12281228 {
1229
- unsigned long flags;
12301229 struct list_head maps;
12311230 struct dm_thin_new_mapping *m, *tmp;
12321231
12331232 INIT_LIST_HEAD(&maps);
1234
- spin_lock_irqsave(&pool->lock, flags);
1233
+ spin_lock_irq(&pool->lock);
12351234 list_splice_init(head, &maps);
1236
- spin_unlock_irqrestore(&pool->lock, flags);
1235
+ spin_unlock_irq(&pool->lock);
12371236
12381237 list_for_each_entry_safe(m, tmp, &maps, list)
12391238 (*fn)(m);
....@@ -1510,14 +1509,12 @@
15101509
15111510 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
15121511 {
1513
- unsigned long flags;
1514
-
15151512 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
15161513 DMWARN("%s: reached low water mark for data device: sending event.",
15171514 dm_device_name(pool->pool_md));
1518
- spin_lock_irqsave(&pool->lock, flags);
1515
+ spin_lock_irq(&pool->lock);
15191516 pool->low_water_triggered = true;
1520
- spin_unlock_irqrestore(&pool->lock, flags);
1517
+ spin_unlock_irq(&pool->lock);
15211518 dm_table_event(pool->ti->table);
15221519 }
15231520 }
....@@ -1593,11 +1590,10 @@
15931590 {
15941591 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
15951592 struct thin_c *tc = h->tc;
1596
- unsigned long flags;
15971593
1598
- spin_lock_irqsave(&tc->lock, flags);
1594
+ spin_lock_irq(&tc->lock);
15991595 bio_list_add(&tc->retry_on_resume_list, bio);
1600
- spin_unlock_irqrestore(&tc->lock, flags);
1596
+ spin_unlock_irq(&tc->lock);
16011597 }
16021598
16031599 static blk_status_t should_error_unserviceable_bio(struct pool *pool)
....@@ -2170,7 +2166,6 @@
21702166 static void process_thin_deferred_bios(struct thin_c *tc)
21712167 {
21722168 struct pool *pool = tc->pool;
2173
- unsigned long flags;
21742169 struct bio *bio;
21752170 struct bio_list bios;
21762171 struct blk_plug plug;
....@@ -2184,10 +2179,10 @@
21842179
21852180 bio_list_init(&bios);
21862181
2187
- spin_lock_irqsave(&tc->lock, flags);
2182
+ spin_lock_irq(&tc->lock);
21882183
21892184 if (bio_list_empty(&tc->deferred_bio_list)) {
2190
- spin_unlock_irqrestore(&tc->lock, flags);
2185
+ spin_unlock_irq(&tc->lock);
21912186 return;
21922187 }
21932188
....@@ -2196,7 +2191,7 @@
21962191 bio_list_merge(&bios, &tc->deferred_bio_list);
21972192 bio_list_init(&tc->deferred_bio_list);
21982193
2199
- spin_unlock_irqrestore(&tc->lock, flags);
2194
+ spin_unlock_irq(&tc->lock);
22002195
22012196 blk_start_plug(&plug);
22022197 while ((bio = bio_list_pop(&bios))) {
....@@ -2206,10 +2201,10 @@
22062201 * prepared mappings to process.
22072202 */
22082203 if (ensure_next_mapping(pool)) {
2209
- spin_lock_irqsave(&tc->lock, flags);
2204
+ spin_lock_irq(&tc->lock);
22102205 bio_list_add(&tc->deferred_bio_list, bio);
22112206 bio_list_merge(&tc->deferred_bio_list, &bios);
2212
- spin_unlock_irqrestore(&tc->lock, flags);
2207
+ spin_unlock_irq(&tc->lock);
22132208 break;
22142209 }
22152210
....@@ -2264,16 +2259,15 @@
22642259 static void process_thin_deferred_cells(struct thin_c *tc)
22652260 {
22662261 struct pool *pool = tc->pool;
2267
- unsigned long flags;
22682262 struct list_head cells;
22692263 struct dm_bio_prison_cell *cell;
22702264 unsigned i, j, count;
22712265
22722266 INIT_LIST_HEAD(&cells);
22732267
2274
- spin_lock_irqsave(&tc->lock, flags);
2268
+ spin_lock_irq(&tc->lock);
22752269 list_splice_init(&tc->deferred_cells, &cells);
2276
- spin_unlock_irqrestore(&tc->lock, flags);
2270
+ spin_unlock_irq(&tc->lock);
22772271
22782272 if (list_empty(&cells))
22792273 return;
....@@ -2294,9 +2288,9 @@
22942288 for (j = i; j < count; j++)
22952289 list_add(&pool->cell_sort_array[j]->user_list, &cells);
22962290
2297
- spin_lock_irqsave(&tc->lock, flags);
2291
+ spin_lock_irq(&tc->lock);
22982292 list_splice(&cells, &tc->deferred_cells);
2299
- spin_unlock_irqrestore(&tc->lock, flags);
2293
+ spin_unlock_irq(&tc->lock);
23002294 return;
23012295 }
23022296
....@@ -2349,7 +2343,6 @@
23492343
23502344 static void process_deferred_bios(struct pool *pool)
23512345 {
2352
- unsigned long flags;
23532346 struct bio *bio;
23542347 struct bio_list bios, bio_completions;
23552348 struct thin_c *tc;
....@@ -2368,13 +2361,13 @@
23682361 bio_list_init(&bios);
23692362 bio_list_init(&bio_completions);
23702363
2371
- spin_lock_irqsave(&pool->lock, flags);
2364
+ spin_lock_irq(&pool->lock);
23722365 bio_list_merge(&bios, &pool->deferred_flush_bios);
23732366 bio_list_init(&pool->deferred_flush_bios);
23742367
23752368 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
23762369 bio_list_init(&pool->deferred_flush_completions);
2377
- spin_unlock_irqrestore(&pool->lock, flags);
2370
+ spin_unlock_irq(&pool->lock);
23782371
23792372 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
23802373 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
....@@ -2392,8 +2385,16 @@
23922385 while ((bio = bio_list_pop(&bio_completions)))
23932386 bio_endio(bio);
23942387
2395
- while ((bio = bio_list_pop(&bios)))
2396
- generic_make_request(bio);
2388
+ while ((bio = bio_list_pop(&bios))) {
2389
+ /*
2390
+ * The data device was flushed as part of metadata commit,
2391
+ * so complete redundant flushes immediately.
2392
+ */
2393
+ if (bio->bi_opf & REQ_PREFLUSH)
2394
+ bio_endio(bio);
2395
+ else
2396
+ submit_bio_noacct(bio);
2397
+ }
23972398 }
23982399
23992400 static void do_worker(struct work_struct *ws)
....@@ -2657,12 +2658,11 @@
26572658 */
26582659 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
26592660 {
2660
- unsigned long flags;
26612661 struct pool *pool = tc->pool;
26622662
2663
- spin_lock_irqsave(&tc->lock, flags);
2663
+ spin_lock_irq(&tc->lock);
26642664 bio_list_add(&tc->deferred_bio_list, bio);
2665
- spin_unlock_irqrestore(&tc->lock, flags);
2665
+ spin_unlock_irq(&tc->lock);
26662666
26672667 wake_worker(pool);
26682668 }
....@@ -2678,13 +2678,12 @@
26782678
26792679 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
26802680 {
2681
- unsigned long flags;
26822681 struct pool *pool = tc->pool;
26832682
26842683 throttle_lock(&pool->throttle);
2685
- spin_lock_irqsave(&tc->lock, flags);
2684
+ spin_lock_irq(&tc->lock);
26862685 list_add_tail(&cell->user_list, &tc->deferred_cells);
2687
- spin_unlock_irqrestore(&tc->lock, flags);
2686
+ spin_unlock_irq(&tc->lock);
26882687 throttle_unlock(&pool->throttle);
26892688
26902689 wake_worker(pool);
....@@ -2796,29 +2795,16 @@
27962795 }
27972796 }
27982797
2799
-static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2800
-{
2801
- struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2802
- struct request_queue *q;
2803
-
2804
- if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2805
- return 1;
2806
-
2807
- q = bdev_get_queue(pt->data_dev->bdev);
2808
- return bdi_congested(q->backing_dev_info, bdi_bits);
2809
-}
2810
-
28112798 static void requeue_bios(struct pool *pool)
28122799 {
2813
- unsigned long flags;
28142800 struct thin_c *tc;
28152801
28162802 rcu_read_lock();
28172803 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2818
- spin_lock_irqsave(&tc->lock, flags);
2804
+ spin_lock_irq(&tc->lock);
28192805 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
28202806 bio_list_init(&tc->retry_on_resume_list);
2821
- spin_unlock_irqrestore(&tc->lock, flags);
2807
+ spin_unlock_irq(&tc->lock);
28222808 }
28232809 rcu_read_unlock();
28242810 }
....@@ -2927,6 +2913,7 @@
29272913 if (pool->next_mapping)
29282914 mempool_free(pool->next_mapping, &pool->mapping_pool);
29292915 mempool_exit(&pool->mapping_pool);
2916
+ bio_uninit(&pool->flush_bio);
29302917 dm_deferred_set_destroy(pool->shared_read_ds);
29312918 dm_deferred_set_destroy(pool->all_io_ds);
29322919 kfree(pool);
....@@ -2936,6 +2923,7 @@
29362923
29372924 static struct pool *pool_create(struct mapped_device *pool_md,
29382925 struct block_device *metadata_dev,
2926
+ struct block_device *data_dev,
29392927 unsigned long block_size,
29402928 int read_only, char **error)
29412929 {
....@@ -3006,6 +2994,7 @@
30062994 pool->low_water_triggered = false;
30072995 pool->suspended = true;
30082996 pool->out_of_data_space = false;
2997
+ bio_init(&pool->flush_bio, NULL, 0);
30092998
30102999 pool->shared_read_ds = dm_deferred_set_create();
30113000 if (!pool->shared_read_ds) {
....@@ -3043,6 +3032,7 @@
30433032 pool->last_commit_jiffies = jiffies;
30443033 pool->pool_md = pool_md;
30453034 pool->md_dev = metadata_dev;
3035
+ pool->data_dev = data_dev;
30463036 __pool_table_insert(pool);
30473037
30483038 return pool;
....@@ -3084,6 +3074,7 @@
30843074
30853075 static struct pool *__pool_find(struct mapped_device *pool_md,
30863076 struct block_device *metadata_dev,
3077
+ struct block_device *data_dev,
30873078 unsigned long block_size, int read_only,
30883079 char **error, int *created)
30893080 {
....@@ -3094,19 +3085,23 @@
30943085 *error = "metadata device already in use by a pool";
30953086 return ERR_PTR(-EBUSY);
30963087 }
3088
+ if (pool->data_dev != data_dev) {
3089
+ *error = "data device already in use by a pool";
3090
+ return ERR_PTR(-EBUSY);
3091
+ }
30973092 __pool_inc(pool);
30983093
30993094 } else {
31003095 pool = __pool_table_lookup(pool_md);
31013096 if (pool) {
3102
- if (pool->md_dev != metadata_dev) {
3097
+ if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) {
31033098 *error = "different pool cannot replace a pool";
31043099 return ERR_PTR(-EINVAL);
31053100 }
31063101 __pool_inc(pool);
31073102
31083103 } else {
3109
- pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
3104
+ pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error);
31103105 *created = 1;
31113106 }
31123107 }
....@@ -3190,6 +3185,29 @@
31903185 dm_device_name(pool->pool_md));
31913186
31923187 dm_table_event(pool->ti->table);
3188
+}
3189
+
3190
+/*
3191
+ * We need to flush the data device **before** committing the metadata.
3192
+ *
3193
+ * This ensures that the data blocks of any newly inserted mappings are
3194
+ * properly written to non-volatile storage and won't be lost in case of a
3195
+ * crash.
3196
+ *
3197
+ * Failure to do so can result in data corruption in the case of internal or
3198
+ * external snapshots and in the case of newly provisioned blocks, when block
3199
+ * zeroing is enabled.
3200
+ */
3201
+static int metadata_pre_commit_callback(void *context)
3202
+{
3203
+ struct pool *pool = context;
3204
+ struct bio *flush_bio = &pool->flush_bio;
3205
+
3206
+ bio_reset(flush_bio);
3207
+ bio_set_dev(flush_bio, pool->data_dev);
3208
+ flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
3209
+
3210
+ return submit_bio_wait(flush_bio);
31933211 }
31943212
31953213 static sector_t get_dev_size(struct block_device *bdev)
....@@ -3335,7 +3353,7 @@
33353353 goto out;
33363354 }
33373355
3338
- pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
3356
+ pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
33393357 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
33403358 if (IS_ERR(pool)) {
33413359 r = PTR_ERR(pool);
....@@ -3383,11 +3401,13 @@
33833401 calc_metadata_threshold(pt),
33843402 metadata_low_callback,
33853403 pool);
3386
- if (r)
3404
+ if (r) {
3405
+ ti->error = "Error registering metadata threshold";
33873406 goto out_flags_changed;
3407
+ }
33883408
3389
- pt->callbacks.congested_fn = pool_is_congested;
3390
- dm_table_add_target_callbacks(ti->table, &pt->callbacks);
3409
+ dm_pool_register_pre_commit_callback(pool->pmd,
3410
+ metadata_pre_commit_callback, pool);
33913411
33923412 mutex_unlock(&dm_thin_pool_table.mutex);
33933413
....@@ -3412,15 +3432,14 @@
34123432 int r;
34133433 struct pool_c *pt = ti->private;
34143434 struct pool *pool = pt->pool;
3415
- unsigned long flags;
34163435
34173436 /*
34183437 * As this is a singleton target, ti->begin is always zero.
34193438 */
3420
- spin_lock_irqsave(&pool->lock, flags);
3439
+ spin_lock_irq(&pool->lock);
34213440 bio_set_dev(bio, pt->data_dev->bdev);
34223441 r = DM_MAPIO_REMAPPED;
3423
- spin_unlock_irqrestore(&pool->lock, flags);
3442
+ spin_unlock_irq(&pool->lock);
34243443
34253444 return r;
34263445 }
....@@ -3591,7 +3610,6 @@
35913610 {
35923611 struct pool_c *pt = ti->private;
35933612 struct pool *pool = pt->pool;
3594
- unsigned long flags;
35953613
35963614 /*
35973615 * Must requeue active_thins' bios and then resume
....@@ -3600,10 +3618,10 @@
36003618 requeue_bios(pool);
36013619 pool_resume_active_thins(pool);
36023620
3603
- spin_lock_irqsave(&pool->lock, flags);
3621
+ spin_lock_irq(&pool->lock);
36043622 pool->low_water_triggered = false;
36053623 pool->suspended = false;
3606
- spin_unlock_irqrestore(&pool->lock, flags);
3624
+ spin_unlock_irq(&pool->lock);
36073625
36083626 do_waker(&pool->waker.work);
36093627 }
....@@ -3612,11 +3630,10 @@
36123630 {
36133631 struct pool_c *pt = ti->private;
36143632 struct pool *pool = pt->pool;
3615
- unsigned long flags;
36163633
3617
- spin_lock_irqsave(&pool->lock, flags);
3634
+ spin_lock_irq(&pool->lock);
36183635 pool->suspended = true;
3619
- spin_unlock_irqrestore(&pool->lock, flags);
3636
+ spin_unlock_irq(&pool->lock);
36203637
36213638 pool_suspend_active_thins(pool);
36223639 }
....@@ -3625,13 +3642,12 @@
36253642 {
36263643 struct pool_c *pt = ti->private;
36273644 struct pool *pool = pt->pool;
3628
- unsigned long flags;
36293645
36303646 pool_resume_active_thins(pool);
36313647
3632
- spin_lock_irqsave(&pool->lock, flags);
3648
+ spin_lock_irq(&pool->lock);
36333649 pool->suspended = false;
3634
- spin_unlock_irqrestore(&pool->lock, flags);
3650
+ spin_unlock_irq(&pool->lock);
36353651 }
36363652
36373653 static void pool_postsuspend(struct dm_target *ti)
....@@ -4077,7 +4093,7 @@
40774093 .name = "thin-pool",
40784094 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
40794095 DM_TARGET_IMMUTABLE,
4080
- .version = {1, 20, 0},
4096
+ .version = {1, 22, 0},
40814097 .module = THIS_MODULE,
40824098 .ctr = pool_ctr,
40834099 .dtr = pool_dtr,
....@@ -4098,23 +4114,22 @@
40984114 *--------------------------------------------------------------*/
40994115 static void thin_get(struct thin_c *tc)
41004116 {
4101
- atomic_inc(&tc->refcount);
4117
+ refcount_inc(&tc->refcount);
41024118 }
41034119
41044120 static void thin_put(struct thin_c *tc)
41054121 {
4106
- if (atomic_dec_and_test(&tc->refcount))
4122
+ if (refcount_dec_and_test(&tc->refcount))
41074123 complete(&tc->can_destroy);
41084124 }
41094125
41104126 static void thin_dtr(struct dm_target *ti)
41114127 {
41124128 struct thin_c *tc = ti->private;
4113
- unsigned long flags;
41144129
4115
- spin_lock_irqsave(&tc->pool->lock, flags);
4130
+ spin_lock_irq(&tc->pool->lock);
41164131 list_del_rcu(&tc->list);
4117
- spin_unlock_irqrestore(&tc->pool->lock, flags);
4132
+ spin_unlock_irq(&tc->pool->lock);
41184133 synchronize_rcu();
41194134
41204135 thin_put(tc);
....@@ -4150,7 +4165,6 @@
41504165 struct thin_c *tc;
41514166 struct dm_dev *pool_dev, *origin_dev;
41524167 struct mapped_device *pool_md;
4153
- unsigned long flags;
41544168
41554169 mutex_lock(&dm_thin_pool_table.mutex);
41564170
....@@ -4240,23 +4254,22 @@
42404254 if (tc->pool->pf.discard_enabled) {
42414255 ti->discards_supported = true;
42424256 ti->num_discard_bios = 1;
4243
- ti->split_discard_bios = false;
42444257 }
42454258
42464259 mutex_unlock(&dm_thin_pool_table.mutex);
42474260
4248
- spin_lock_irqsave(&tc->pool->lock, flags);
4261
+ spin_lock_irq(&tc->pool->lock);
42494262 if (tc->pool->suspended) {
4250
- spin_unlock_irqrestore(&tc->pool->lock, flags);
4263
+ spin_unlock_irq(&tc->pool->lock);
42514264 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
42524265 ti->error = "Unable to activate thin device while pool is suspended";
42534266 r = -EINVAL;
42544267 goto bad;
42554268 }
4256
- atomic_set(&tc->refcount, 1);
4269
+ refcount_set(&tc->refcount, 1);
42574270 init_completion(&tc->can_destroy);
42584271 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
4259
- spin_unlock_irqrestore(&tc->pool->lock, flags);
4272
+ spin_unlock_irq(&tc->pool->lock);
42604273 /*
42614274 * This synchronize_rcu() call is needed here otherwise we risk a
42624275 * wake_worker() call finding no bios to process (because the newly
....@@ -4457,7 +4470,7 @@
44574470
44584471 static struct target_type thin_target = {
44594472 .name = "thin",
4460
- .version = {1, 20, 0},
4473
+ .version = {1, 22, 0},
44614474 .module = THIS_MODULE,
44624475 .ctr = thin_ctr,
44634476 .dtr = thin_dtr,