hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-snap.c
....@@ -1,6 +1,4 @@
11 /*
2
- * dm-snapshot.c
3
- *
42 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
53 *
64 * This file is released under the GPL.
....@@ -13,6 +11,7 @@
1311 #include <linux/init.h>
1412 #include <linux/kdev_t.h>
1513 #include <linux/list.h>
14
+#include <linux/list_bl.h>
1615 #include <linux/mempool.h>
1716 #include <linux/module.h>
1817 #include <linux/slab.h>
....@@ -43,11 +42,11 @@
4342 struct dm_exception_table {
4443 uint32_t hash_mask;
4544 unsigned hash_shift;
46
- struct list_head *table;
45
+ struct hlist_bl_head *table;
4746 };
4847
4948 struct dm_snapshot {
50
- struct mutex lock;
49
+ struct rw_semaphore lock;
5150
5251 struct dm_dev *origin;
5352 struct dm_dev *cow;
....@@ -75,7 +74,9 @@
7574
7675 atomic_t pending_exceptions_count;
7776
78
- /* Protected by "lock" */
77
+ spinlock_t pe_allocation_lock;
78
+
79
+ /* Protected by "pe_allocation_lock" */
7980 sector_t exception_start_sequence;
8081
8182 /* Protected by kcopyd single-threaded callback */
....@@ -130,7 +131,10 @@
130131 * - I/O error while merging
131132 * => stop merging; set merge_failed; process I/O normally.
132133 */
133
- int merge_failed;
134
+ bool merge_failed:1;
135
+
136
+ bool discard_zeroes_cow:1;
137
+ bool discard_passdown_origin:1;
134138
135139 /*
136140 * Incoming bios that overlap with chunks being merged must wait
....@@ -461,9 +465,9 @@
461465 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
462466 continue;
463467
464
- mutex_lock(&s->lock);
468
+ down_read(&s->lock);
465469 active = s->active;
466
- mutex_unlock(&s->lock);
470
+ up_read(&s->lock);
467471
468472 if (active) {
469473 if (snap_src)
....@@ -622,6 +626,36 @@
622626 * The lowest hash_shift bits of the chunk number are ignored, allowing
623627 * some consecutive chunks to be grouped together.
624628 */
629
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
630
+
631
+/* Lock to protect access to the completed and pending exception hash tables. */
632
+struct dm_exception_table_lock {
633
+ struct hlist_bl_head *complete_slot;
634
+ struct hlist_bl_head *pending_slot;
635
+};
636
+
637
+static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
638
+ struct dm_exception_table_lock *lock)
639
+{
640
+ struct dm_exception_table *complete = &s->complete;
641
+ struct dm_exception_table *pending = &s->pending;
642
+
643
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
644
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
645
+}
646
+
647
+static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
648
+{
649
+ hlist_bl_lock(lock->complete_slot);
650
+ hlist_bl_lock(lock->pending_slot);
651
+}
652
+
653
+static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
654
+{
655
+ hlist_bl_unlock(lock->pending_slot);
656
+ hlist_bl_unlock(lock->complete_slot);
657
+}
658
+
625659 static int dm_exception_table_init(struct dm_exception_table *et,
626660 uint32_t size, unsigned hash_shift)
627661 {
....@@ -629,12 +663,12 @@
629663
630664 et->hash_shift = hash_shift;
631665 et->hash_mask = size - 1;
632
- et->table = dm_vcalloc(size, sizeof(struct list_head));
666
+ et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
633667 if (!et->table)
634668 return -ENOMEM;
635669
636670 for (i = 0; i < size; i++)
637
- INIT_LIST_HEAD(et->table + i);
671
+ INIT_HLIST_BL_HEAD(et->table + i);
638672
639673 return 0;
640674 }
....@@ -642,15 +676,16 @@
642676 static void dm_exception_table_exit(struct dm_exception_table *et,
643677 struct kmem_cache *mem)
644678 {
645
- struct list_head *slot;
646
- struct dm_exception *ex, *next;
679
+ struct hlist_bl_head *slot;
680
+ struct dm_exception *ex;
681
+ struct hlist_bl_node *pos, *n;
647682 int i, size;
648683
649684 size = et->hash_mask + 1;
650685 for (i = 0; i < size; i++) {
651686 slot = et->table + i;
652687
653
- list_for_each_entry_safe (ex, next, slot, hash_list)
688
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
654689 kmem_cache_free(mem, ex);
655690 }
656691
....@@ -664,7 +699,7 @@
664699
665700 static void dm_remove_exception(struct dm_exception *e)
666701 {
667
- list_del(&e->hash_list);
702
+ hlist_bl_del(&e->hash_list);
668703 }
669704
670705 /*
....@@ -674,11 +709,12 @@
674709 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
675710 chunk_t chunk)
676711 {
677
- struct list_head *slot;
712
+ struct hlist_bl_head *slot;
713
+ struct hlist_bl_node *pos;
678714 struct dm_exception *e;
679715
680716 slot = &et->table[exception_hash(et, chunk)];
681
- list_for_each_entry (e, slot, hash_list)
717
+ hlist_bl_for_each_entry(e, pos, slot, hash_list)
682718 if (chunk >= e->old_chunk &&
683719 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
684720 return e;
....@@ -725,7 +761,8 @@
725761 static void dm_insert_exception(struct dm_exception_table *eh,
726762 struct dm_exception *new_e)
727763 {
728
- struct list_head *l;
764
+ struct hlist_bl_head *l;
765
+ struct hlist_bl_node *pos;
729766 struct dm_exception *e = NULL;
730767
731768 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
....@@ -735,7 +772,7 @@
735772 goto out;
736773
737774 /* List is ordered by old_chunk */
738
- list_for_each_entry_reverse(e, l, hash_list) {
775
+ hlist_bl_for_each_entry(e, pos, l, hash_list) {
739776 /* Insert after an existing chunk? */
740777 if (new_e->old_chunk == (e->old_chunk +
741778 dm_consecutive_chunk_count(e) + 1) &&
....@@ -756,12 +793,24 @@
756793 return;
757794 }
758795
759
- if (new_e->old_chunk > e->old_chunk)
796
+ if (new_e->old_chunk < e->old_chunk)
760797 break;
761798 }
762799
763800 out:
764
- list_add(&new_e->hash_list, e ? &e->hash_list : l);
801
+ if (!e) {
802
+ /*
803
+ * Either the table doesn't support consecutive chunks or slot
804
+ * l is empty.
805
+ */
806
+ hlist_bl_add_head(&new_e->hash_list, l);
807
+ } else if (new_e->old_chunk < e->old_chunk) {
808
+ /* Add before an existing exception */
809
+ hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
810
+ } else {
811
+ /* Add to l's tail: e is the last exception in this slot */
812
+ hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
813
+ }
765814 }
766815
767816 /*
....@@ -770,6 +819,7 @@
770819 */
771820 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
772821 {
822
+ struct dm_exception_table_lock lock;
773823 struct dm_snapshot *s = context;
774824 struct dm_exception *e;
775825
....@@ -782,7 +832,17 @@
782832 /* Consecutive_count is implicitly initialised to zero */
783833 e->new_chunk = new;
784834
835
+ /*
836
+ * Although there is no need to lock access to the exception tables
837
+ * here, if we don't then hlist_bl_add_head(), called by
838
+ * dm_insert_exception(), will complain about accessing the
839
+ * corresponding list without locking it first.
840
+ */
841
+ dm_exception_table_lock_init(s, old, &lock);
842
+
843
+ dm_exception_table_lock(&lock);
785844 dm_insert_exception(&s->complete, e);
845
+ dm_exception_table_unlock(&lock);
786846
787847 return 0;
788848 }
....@@ -811,7 +871,7 @@
811871 {
812872 /* use a fixed size of 2MB */
813873 unsigned long mem = 2 * 1024 * 1024;
814
- mem /= sizeof(struct list_head);
874
+ mem /= sizeof(struct hlist_bl_head);
815875
816876 return mem;
817877 }
....@@ -931,7 +991,7 @@
931991 int r;
932992 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
933993
934
- mutex_lock(&s->lock);
994
+ down_write(&s->lock);
935995
936996 /*
937997 * Process chunks (and associated exceptions) in reverse order
....@@ -946,7 +1006,7 @@
9461006 b = __release_queued_bios_after_merge(s);
9471007
9481008 out:
949
- mutex_unlock(&s->lock);
1009
+ up_write(&s->lock);
9501010 if (b)
9511011 flush_bios(b);
9521012
....@@ -1005,9 +1065,9 @@
10051065 if (linear_chunks < 0) {
10061066 DMERR("Read error in exception store: "
10071067 "shutting down merge");
1008
- mutex_lock(&s->lock);
1009
- s->merge_failed = 1;
1010
- mutex_unlock(&s->lock);
1068
+ down_write(&s->lock);
1069
+ s->merge_failed = true;
1070
+ up_write(&s->lock);
10111071 }
10121072 goto shut;
10131073 }
....@@ -1048,16 +1108,17 @@
10481108 previous_count = read_pending_exceptions_done_count();
10491109 }
10501110
1051
- mutex_lock(&s->lock);
1111
+ down_write(&s->lock);
10521112 s->first_merging_chunk = old_chunk;
10531113 s->num_merging_chunks = linear_chunks;
1054
- mutex_unlock(&s->lock);
1114
+ up_write(&s->lock);
10551115
10561116 /* Wait until writes to all 'linear_chunks' drain */
10571117 for (i = 0; i < linear_chunks; i++)
10581118 __check_for_conflicting_io(s, old_chunk + i);
10591119
1060
- dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1120
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
1121
+ merge_callback, s);
10611122 return;
10621123
10631124 shut:
....@@ -1109,10 +1170,10 @@
11091170 return;
11101171
11111172 shut:
1112
- mutex_lock(&s->lock);
1113
- s->merge_failed = 1;
1173
+ down_write(&s->lock);
1174
+ s->merge_failed = true;
11141175 b = __release_queued_bios_after_merge(s);
1115
- mutex_unlock(&s->lock);
1176
+ up_write(&s->lock);
11161177 error_bios(b);
11171178
11181179 merge_shutdown(s);
....@@ -1134,12 +1195,64 @@
11341195 clear_bit(SHUTDOWN_MERGE, &s->state_bits);
11351196 }
11361197
1198
+static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
1199
+ struct dm_target *ti)
1200
+{
1201
+ int r;
1202
+ unsigned argc;
1203
+ const char *arg_name;
1204
+
1205
+ static const struct dm_arg _args[] = {
1206
+ {0, 2, "Invalid number of feature arguments"},
1207
+ };
1208
+
1209
+ /*
1210
+ * No feature arguments supplied.
1211
+ */
1212
+ if (!as->argc)
1213
+ return 0;
1214
+
1215
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
1216
+ if (r)
1217
+ return -EINVAL;
1218
+
1219
+ while (argc && !r) {
1220
+ arg_name = dm_shift_arg(as);
1221
+ argc--;
1222
+
1223
+ if (!strcasecmp(arg_name, "discard_zeroes_cow"))
1224
+ s->discard_zeroes_cow = true;
1225
+
1226
+ else if (!strcasecmp(arg_name, "discard_passdown_origin"))
1227
+ s->discard_passdown_origin = true;
1228
+
1229
+ else {
1230
+ ti->error = "Unrecognised feature requested";
1231
+ r = -EINVAL;
1232
+ break;
1233
+ }
1234
+ }
1235
+
1236
+ if (!s->discard_zeroes_cow && s->discard_passdown_origin) {
1237
+ /*
1238
+ * TODO: really these are disjoint.. but ti->num_discard_bios
1239
+ * and dm_bio_get_target_bio_nr() require rigid constraints.
1240
+ */
1241
+ ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow";
1242
+ r = -EINVAL;
1243
+ }
1244
+
1245
+ return r;
1246
+}
1247
+
11371248 /*
1138
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
1249
+ * Construct a snapshot mapping:
1250
+ * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
11391251 */
11401252 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
11411253 {
11421254 struct dm_snapshot *s;
1255
+ struct dm_arg_set as;
11431256 int i;
11441257 int r = -EINVAL;
11451258 char *origin_path, *cow_path;
....@@ -1147,8 +1260,8 @@
11471260 unsigned args_used, num_flush_bios = 1;
11481261 fmode_t origin_mode = FMODE_READ;
11491262
1150
- if (argc != 4) {
1151
- ti->error = "requires exactly 4 arguments";
1263
+ if (argc < 4) {
1264
+ ti->error = "requires 4 or more arguments";
11521265 r = -EINVAL;
11531266 goto bad;
11541267 }
....@@ -1164,6 +1277,13 @@
11641277 r = -ENOMEM;
11651278 goto bad;
11661279 }
1280
+
1281
+ as.argc = argc;
1282
+ as.argv = argv;
1283
+ dm_consume_args(&as, 4);
1284
+ r = parse_snapshot_features(&as, s, ti);
1285
+ if (r)
1286
+ goto bad_features;
11671287
11681288 origin_path = argv[0];
11691289 argv++;
....@@ -1208,14 +1328,15 @@
12081328 s->snapshot_overflowed = 0;
12091329 s->active = 0;
12101330 atomic_set(&s->pending_exceptions_count, 0);
1331
+ spin_lock_init(&s->pe_allocation_lock);
12111332 s->exception_start_sequence = 0;
12121333 s->exception_complete_sequence = 0;
12131334 s->out_of_order_tree = RB_ROOT;
1214
- mutex_init(&s->lock);
1335
+ init_rwsem(&s->lock);
12151336 INIT_LIST_HEAD(&s->list);
12161337 spin_lock_init(&s->pe_lock);
12171338 s->state_bits = 0;
1218
- s->merge_failed = 0;
1339
+ s->merge_failed = false;
12191340 s->first_merging_chunk = 0;
12201341 s->num_merging_chunks = 0;
12211342 bio_list_init(&s->bios_queued_during_merge);
....@@ -1250,6 +1371,8 @@
12501371
12511372 ti->private = s;
12521373 ti->num_flush_bios = num_flush_bios;
1374
+ if (s->discard_zeroes_cow)
1375
+ ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
12531376 ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
12541377
12551378 /* Add snapshot to the list of snapshots for this origin */
....@@ -1298,29 +1421,22 @@
12981421
12991422 bad_read_metadata:
13001423 unregister_snapshot(s);
1301
-
13021424 bad_load_and_register:
13031425 mempool_exit(&s->pending_pool);
1304
-
13051426 bad_pending_pool:
13061427 dm_kcopyd_client_destroy(s->kcopyd_client);
1307
-
13081428 bad_kcopyd:
13091429 dm_exception_table_exit(&s->pending, pending_cache);
13101430 dm_exception_table_exit(&s->complete, exception_cache);
1311
-
13121431 bad_hash_tables:
13131432 dm_exception_store_destroy(s->store);
1314
-
13151433 bad_store:
13161434 dm_put_device(ti, s->cow);
1317
-
13181435 bad_cow:
13191436 dm_put_device(ti, s->origin);
1320
-
13211437 bad_origin:
1438
+bad_features:
13221439 kfree(s);
1323
-
13241440 bad:
13251441 return r;
13261442 }
....@@ -1379,9 +1495,9 @@
13791495 /* Check whether exception handover must be cancelled */
13801496 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
13811497 if (snap_src && snap_dest && (s == snap_src)) {
1382
- mutex_lock(&snap_dest->lock);
1498
+ down_write(&snap_dest->lock);
13831499 snap_dest->valid = 0;
1384
- mutex_unlock(&snap_dest->lock);
1500
+ up_write(&snap_dest->lock);
13851501 DMERR("Cancelling snapshot handover.");
13861502 }
13871503 up_read(&_origins_lock);
....@@ -1411,8 +1527,6 @@
14111527 mempool_exit(&s->pending_pool);
14121528
14131529 dm_exception_store_destroy(s->store);
1414
-
1415
- mutex_destroy(&s->lock);
14161530
14171531 bio_uninit(&s->flush_bio);
14181532
....@@ -1480,7 +1594,7 @@
14801594 while (bio) {
14811595 n = bio->bi_next;
14821596 bio->bi_next = NULL;
1483
- generic_make_request(bio);
1597
+ submit_bio_noacct(bio);
14841598 bio = n;
14851599 }
14861600 }
....@@ -1500,7 +1614,7 @@
15001614 bio->bi_next = NULL;
15011615 r = do_origin(s->origin, bio, false);
15021616 if (r == DM_MAPIO_REMAPPED)
1503
- generic_make_request(bio);
1617
+ submit_bio_noacct(bio);
15041618 bio = n;
15051619 }
15061620 }
....@@ -1538,6 +1652,13 @@
15381652 dm_table_event(s->ti->table);
15391653 }
15401654
1655
+static void invalidate_snapshot(struct dm_snapshot *s, int err)
1656
+{
1657
+ down_write(&s->lock);
1658
+ __invalidate_snapshot(s, err);
1659
+ up_write(&s->lock);
1660
+}
1661
+
15411662 static void pending_complete(void *context, int success)
15421663 {
15431664 struct dm_snap_pending_exception *pe = context;
....@@ -1546,51 +1667,69 @@
15461667 struct bio *origin_bios = NULL;
15471668 struct bio *snapshot_bios = NULL;
15481669 struct bio *full_bio = NULL;
1670
+ struct dm_exception_table_lock lock;
15491671 int error = 0;
1672
+
1673
+ dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
15501674
15511675 if (!success) {
15521676 /* Read/write error - snapshot is unusable */
1553
- mutex_lock(&s->lock);
1554
- __invalidate_snapshot(s, -EIO);
1677
+ invalidate_snapshot(s, -EIO);
15551678 error = 1;
1679
+
1680
+ dm_exception_table_lock(&lock);
15561681 goto out;
15571682 }
15581683
15591684 e = alloc_completed_exception(GFP_NOIO);
15601685 if (!e) {
1561
- mutex_lock(&s->lock);
1562
- __invalidate_snapshot(s, -ENOMEM);
1686
+ invalidate_snapshot(s, -ENOMEM);
15631687 error = 1;
1688
+
1689
+ dm_exception_table_lock(&lock);
15641690 goto out;
15651691 }
15661692 *e = pe->e;
15671693
1568
- mutex_lock(&s->lock);
1694
+ down_read(&s->lock);
1695
+ dm_exception_table_lock(&lock);
15691696 if (!s->valid) {
1697
+ up_read(&s->lock);
15701698 free_completed_exception(e);
15711699 error = 1;
1700
+
15721701 goto out;
15731702 }
15741703
1575
- /* Check for conflicting reads */
1576
- __check_for_conflicting_io(s, pe->e.old_chunk);
1577
-
15781704 /*
1579
- * Add a proper exception, and remove the
1580
- * in-flight exception from the list.
1705
+ * Add a proper exception. After inserting the completed exception all
1706
+ * subsequent snapshot reads to this chunk will be redirected to the
1707
+ * COW device. This ensures that we do not starve. Moreover, as long
1708
+ * as the pending exception exists, neither origin writes nor snapshot
1709
+ * merging can overwrite the chunk in origin.
15811710 */
15821711 dm_insert_exception(&s->complete, e);
1712
+ up_read(&s->lock);
1713
+
1714
+ /* Wait for conflicting reads to drain */
1715
+ if (__chunk_is_tracked(s, pe->e.old_chunk)) {
1716
+ dm_exception_table_unlock(&lock);
1717
+ __check_for_conflicting_io(s, pe->e.old_chunk);
1718
+ dm_exception_table_lock(&lock);
1719
+ }
15831720
15841721 out:
1722
+ /* Remove the in-flight exception from the list */
15851723 dm_remove_exception(&pe->e);
1724
+
1725
+ dm_exception_table_unlock(&lock);
1726
+
15861727 snapshot_bios = bio_list_get(&pe->snapshot_bios);
15871728 origin_bios = bio_list_get(&pe->origin_bios);
15881729 full_bio = pe->full_bio;
15891730 if (full_bio)
15901731 full_bio->bi_end_io = pe->full_bio_end_io;
15911732 increment_pending_exceptions_done_count();
1592
-
1593
- mutex_unlock(&s->lock);
15941733
15951734 /* Submit any pending write bios */
15961735 if (error) {
....@@ -1716,7 +1855,7 @@
17161855 bio->bi_end_io = full_bio_end_io;
17171856 bio->bi_private = callback_data;
17181857
1719
- generic_make_request(bio);
1858
+ submit_bio_noacct(bio);
17201859 }
17211860
17221861 static struct dm_snap_pending_exception *
....@@ -1731,12 +1870,43 @@
17311870 }
17321871
17331872 /*
1873
+ * Inserts a pending exception into the pending table.
1874
+ *
1875
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
1876
+ * before calling this.
1877
+ */
1878
+static struct dm_snap_pending_exception *
1879
+__insert_pending_exception(struct dm_snapshot *s,
1880
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
1881
+{
1882
+ pe->e.old_chunk = chunk;
1883
+ bio_list_init(&pe->origin_bios);
1884
+ bio_list_init(&pe->snapshot_bios);
1885
+ pe->started = 0;
1886
+ pe->full_bio = NULL;
1887
+
1888
+ spin_lock(&s->pe_allocation_lock);
1889
+ if (s->store->type->prepare_exception(s->store, &pe->e)) {
1890
+ spin_unlock(&s->pe_allocation_lock);
1891
+ free_pending_exception(pe);
1892
+ return NULL;
1893
+ }
1894
+
1895
+ pe->exception_sequence = s->exception_start_sequence++;
1896
+ spin_unlock(&s->pe_allocation_lock);
1897
+
1898
+ dm_insert_exception(&s->pending, &pe->e);
1899
+
1900
+ return pe;
1901
+}
1902
+
1903
+/*
17341904 * Looks to see if this snapshot already has a pending exception
17351905 * for this chunk, otherwise it allocates a new one and inserts
17361906 * it into the pending table.
17371907 *
1738
- * NOTE: a write lock must be held on snap->lock before calling
1739
- * this.
1908
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
1909
+ * before calling this.
17401910 */
17411911 static struct dm_snap_pending_exception *
17421912 __find_pending_exception(struct dm_snapshot *s,
....@@ -1750,22 +1920,7 @@
17501920 return pe2;
17511921 }
17521922
1753
- pe->e.old_chunk = chunk;
1754
- bio_list_init(&pe->origin_bios);
1755
- bio_list_init(&pe->snapshot_bios);
1756
- pe->started = 0;
1757
- pe->full_bio = NULL;
1758
-
1759
- if (s->store->type->prepare_exception(s->store, &pe->e)) {
1760
- free_pending_exception(pe);
1761
- return NULL;
1762
- }
1763
-
1764
- pe->exception_sequence = s->exception_start_sequence++;
1765
-
1766
- dm_insert_exception(&s->pending, &pe->e);
1767
-
1768
- return pe;
1923
+ return __insert_pending_exception(s, pe, chunk);
17691924 }
17701925
17711926 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
....@@ -1778,6 +1933,37 @@
17781933 (bio->bi_iter.bi_sector & s->store->chunk_mask);
17791934 }
17801935
1936
+static void zero_callback(int read_err, unsigned long write_err, void *context)
1937
+{
1938
+ struct bio *bio = context;
1939
+ struct dm_snapshot *s = bio->bi_private;
1940
+
1941
+ account_end_copy(s);
1942
+ bio->bi_status = write_err ? BLK_STS_IOERR : 0;
1943
+ bio_endio(bio);
1944
+}
1945
+
1946
+static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
1947
+ struct bio *bio, chunk_t chunk)
1948
+{
1949
+ struct dm_io_region dest;
1950
+
1951
+ dest.bdev = s->cow->bdev;
1952
+ dest.sector = bio->bi_iter.bi_sector;
1953
+ dest.count = s->store->chunk_size;
1954
+
1955
+ account_start_copy(s);
1956
+ WARN_ON_ONCE(bio->bi_private);
1957
+ bio->bi_private = s;
1958
+ dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
1959
+}
1960
+
1961
+static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
1962
+{
1963
+ return bio->bi_iter.bi_size ==
1964
+ (s->store->chunk_size << SECTOR_SHIFT);
1965
+}
1966
+
17811967 static int snapshot_map(struct dm_target *ti, struct bio *bio)
17821968 {
17831969 struct dm_exception *e;
....@@ -1785,6 +1971,7 @@
17851971 int r = DM_MAPIO_REMAPPED;
17861972 chunk_t chunk;
17871973 struct dm_snap_pending_exception *pe = NULL;
1974
+ struct dm_exception_table_lock lock;
17881975
17891976 init_tracked_chunk(bio);
17901977
....@@ -1794,6 +1981,7 @@
17941981 }
17951982
17961983 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1984
+ dm_exception_table_lock_init(s, chunk, &lock);
17971985
17981986 /* Full snapshots are not usable */
17991987 /* To get here the table must be live so s->active is always set. */
....@@ -1805,7 +1993,8 @@
18051993 ; /* wait_for_in_progress() has slept */
18061994 }
18071995
1808
- mutex_lock(&s->lock);
1996
+ down_read(&s->lock);
1997
+ dm_exception_table_lock(&lock);
18091998
18101999 if (!s->valid || (unlikely(s->snapshot_overflowed) &&
18112000 bio_data_dir(bio) == WRITE)) {
....@@ -1813,10 +2002,43 @@
18132002 goto out_unlock;
18142003 }
18152004
2005
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2006
+ if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
2007
+ /*
2008
+ * passdown discard to origin (without triggering
2009
+ * snapshot exceptions via do_origin; doing so would
2010
+ * defeat the goal of freeing space in origin that is
2011
+ * implied by the "discard_passdown_origin" feature)
2012
+ */
2013
+ bio_set_dev(bio, s->origin->bdev);
2014
+ track_chunk(s, bio, chunk);
2015
+ goto out_unlock;
2016
+ }
2017
+ /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
2018
+ }
2019
+
18162020 /* If the block is already remapped - use that, else remap it */
18172021 e = dm_lookup_exception(&s->complete, chunk);
18182022 if (e) {
18192023 remap_exception(s, e, bio, chunk);
2024
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
2025
+ io_overlaps_chunk(s, bio)) {
2026
+ dm_exception_table_unlock(&lock);
2027
+ up_read(&s->lock);
2028
+ zero_exception(s, e, bio, chunk);
2029
+ r = DM_MAPIO_SUBMITTED; /* discard is not issued */
2030
+ goto out;
2031
+ }
2032
+ goto out_unlock;
2033
+ }
2034
+
2035
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2036
+ /*
2037
+ * If no exception exists, complete discard immediately
2038
+ * otherwise it'll trigger copy-out.
2039
+ */
2040
+ bio_endio(bio);
2041
+ r = DM_MAPIO_SUBMITTED;
18202042 goto out_unlock;
18212043 }
18222044
....@@ -1828,15 +2050,9 @@
18282050 if (bio_data_dir(bio) == WRITE) {
18292051 pe = __lookup_pending_exception(s, chunk);
18302052 if (!pe) {
1831
- mutex_unlock(&s->lock);
2053
+ dm_exception_table_unlock(&lock);
18322054 pe = alloc_pending_exception(s);
1833
- mutex_lock(&s->lock);
1834
-
1835
- if (!s->valid || s->snapshot_overflowed) {
1836
- free_pending_exception(pe);
1837
- r = DM_MAPIO_KILL;
1838
- goto out_unlock;
1839
- }
2055
+ dm_exception_table_lock(&lock);
18402056
18412057 e = dm_lookup_exception(&s->complete, chunk);
18422058 if (e) {
....@@ -1847,13 +2063,22 @@
18472063
18482064 pe = __find_pending_exception(s, pe, chunk);
18492065 if (!pe) {
2066
+ dm_exception_table_unlock(&lock);
2067
+ up_read(&s->lock);
2068
+
2069
+ down_write(&s->lock);
2070
+
18502071 if (s->store->userspace_supports_overflow) {
1851
- s->snapshot_overflowed = 1;
1852
- DMERR("Snapshot overflowed: Unable to allocate exception.");
2072
+ if (s->valid && !s->snapshot_overflowed) {
2073
+ s->snapshot_overflowed = 1;
2074
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
2075
+ }
18532076 } else
18542077 __invalidate_snapshot(s, -ENOMEM);
2078
+ up_write(&s->lock);
2079
+
18552080 r = DM_MAPIO_KILL;
1856
- goto out_unlock;
2081
+ goto out;
18572082 }
18582083 }
18592084
....@@ -1861,11 +2086,12 @@
18612086
18622087 r = DM_MAPIO_SUBMITTED;
18632088
1864
- if (!pe->started &&
1865
- bio->bi_iter.bi_size ==
1866
- (s->store->chunk_size << SECTOR_SHIFT)) {
2089
+ if (!pe->started && io_overlaps_chunk(s, bio)) {
18672090 pe->started = 1;
1868
- mutex_unlock(&s->lock);
2091
+
2092
+ dm_exception_table_unlock(&lock);
2093
+ up_read(&s->lock);
2094
+
18692095 start_full_bio(pe, bio);
18702096 goto out;
18712097 }
....@@ -1873,9 +2099,12 @@
18732099 bio_list_add(&pe->snapshot_bios, bio);
18742100
18752101 if (!pe->started) {
1876
- /* this is protected by snap->lock */
2102
+ /* this is protected by the exception table lock */
18772103 pe->started = 1;
1878
- mutex_unlock(&s->lock);
2104
+
2105
+ dm_exception_table_unlock(&lock);
2106
+ up_read(&s->lock);
2107
+
18792108 start_copy(pe);
18802109 goto out;
18812110 }
....@@ -1885,7 +2114,8 @@
18852114 }
18862115
18872116 out_unlock:
1888
- mutex_unlock(&s->lock);
2117
+ dm_exception_table_unlock(&lock);
2118
+ up_read(&s->lock);
18892119 out:
18902120 return r;
18912121 }
....@@ -1919,9 +2149,15 @@
19192149 return DM_MAPIO_REMAPPED;
19202150 }
19212151
2152
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2153
+ /* Once merging, discards no longer effect change */
2154
+ bio_endio(bio);
2155
+ return DM_MAPIO_SUBMITTED;
2156
+ }
2157
+
19222158 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
19232159
1924
- mutex_lock(&s->lock);
2160
+ down_write(&s->lock);
19252161
19262162 /* Full merging snapshots are redirected to the origin */
19272163 if (!s->valid)
....@@ -1952,12 +2188,12 @@
19522188 bio_set_dev(bio, s->origin->bdev);
19532189
19542190 if (bio_data_dir(bio) == WRITE) {
1955
- mutex_unlock(&s->lock);
2191
+ up_write(&s->lock);
19562192 return do_origin(s->origin, bio, false);
19572193 }
19582194
19592195 out_unlock:
1960
- mutex_unlock(&s->lock);
2196
+ up_write(&s->lock);
19612197
19622198 return r;
19632199 }
....@@ -1989,7 +2225,7 @@
19892225 down_read(&_origins_lock);
19902226 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
19912227 if (snap_src && snap_dest) {
1992
- mutex_lock(&snap_src->lock);
2228
+ down_read(&snap_src->lock);
19932229 if (s == snap_src) {
19942230 DMERR("Unable to resume snapshot source until "
19952231 "handover completes.");
....@@ -1999,7 +2235,7 @@
19992235 "source is suspended.");
20002236 r = -EINVAL;
20012237 }
2002
- mutex_unlock(&snap_src->lock);
2238
+ up_read(&snap_src->lock);
20032239 }
20042240 up_read(&_origins_lock);
20052241
....@@ -2045,11 +2281,11 @@
20452281
20462282 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
20472283 if (snap_src && snap_dest) {
2048
- mutex_lock(&snap_src->lock);
2049
- mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
2284
+ down_write(&snap_src->lock);
2285
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
20502286 __handover_exceptions(snap_src, snap_dest);
2051
- mutex_unlock(&snap_dest->lock);
2052
- mutex_unlock(&snap_src->lock);
2287
+ up_write(&snap_dest->lock);
2288
+ up_write(&snap_src->lock);
20532289 }
20542290
20552291 up_read(&_origins_lock);
....@@ -2064,9 +2300,9 @@
20642300 /* Now we have correct chunk size, reregister */
20652301 reregister_snapshot(s);
20662302
2067
- mutex_lock(&s->lock);
2303
+ down_write(&s->lock);
20682304 s->active = 1;
2069
- mutex_unlock(&s->lock);
2305
+ up_write(&s->lock);
20702306 }
20712307
20722308 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
....@@ -2102,11 +2338,12 @@
21022338 {
21032339 unsigned sz = 0;
21042340 struct dm_snapshot *snap = ti->private;
2341
+ unsigned num_features;
21052342
21062343 switch (type) {
21072344 case STATUSTYPE_INFO:
21082345
2109
- mutex_lock(&snap->lock);
2346
+ down_write(&snap->lock);
21102347
21112348 if (!snap->valid)
21122349 DMEMIT("Invalid");
....@@ -2131,7 +2368,7 @@
21312368 DMEMIT("Unknown");
21322369 }
21332370
2134
- mutex_unlock(&snap->lock);
2371
+ up_write(&snap->lock);
21352372
21362373 break;
21372374
....@@ -2142,8 +2379,16 @@
21422379 * make sense.
21432380 */
21442381 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2145
- snap->store->type->status(snap->store, type, result + sz,
2146
- maxlen - sz);
2382
+ sz += snap->store->type->status(snap->store, type, result + sz,
2383
+ maxlen - sz);
2384
+ num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin;
2385
+ if (num_features) {
2386
+ DMEMIT(" %u", num_features);
2387
+ if (snap->discard_zeroes_cow)
2388
+ DMEMIT(" discard_zeroes_cow");
2389
+ if (snap->discard_passdown_origin)
2390
+ DMEMIT(" discard_passdown_origin");
2391
+ }
21472392 break;
21482393 }
21492394 }
....@@ -2162,6 +2407,26 @@
21622407 return r;
21632408 }
21642409
2410
+static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
2411
+{
2412
+ struct dm_snapshot *snap = ti->private;
2413
+
2414
+ if (snap->discard_zeroes_cow) {
2415
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
2416
+
2417
+ down_read(&_origins_lock);
2418
+
2419
+ (void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
2420
+ if (snap_src && snap_dest)
2421
+ snap = snap_src;
2422
+
2423
+ /* All discards are split on chunk_size boundary */
2424
+ limits->discard_granularity = snap->store->chunk_size;
2425
+ limits->max_discard_sectors = snap->store->chunk_size;
2426
+
2427
+ up_read(&_origins_lock);
2428
+ }
2429
+}
21652430
21662431 /*-----------------------------------------------------------------
21672432 * Origin methods
....@@ -2183,9 +2448,10 @@
21832448 int r = DM_MAPIO_REMAPPED;
21842449 struct dm_snapshot *snap;
21852450 struct dm_exception *e;
2186
- struct dm_snap_pending_exception *pe;
2451
+ struct dm_snap_pending_exception *pe, *pe2;
21872452 struct dm_snap_pending_exception *pe_to_start_now = NULL;
21882453 struct dm_snap_pending_exception *pe_to_start_last = NULL;
2454
+ struct dm_exception_table_lock lock;
21892455 chunk_t chunk;
21902456
21912457 /* Do all the snapshots on this origin */
....@@ -2197,52 +2463,59 @@
21972463 if (dm_target_is_snapshot_merge(snap->ti))
21982464 continue;
21992465
2200
- mutex_lock(&snap->lock);
2201
-
2202
- /* Only deal with valid and active snapshots */
2203
- if (!snap->valid || !snap->active)
2204
- goto next_snapshot;
2205
-
22062466 /* Nothing to do if writing beyond end of snapshot */
22072467 if (sector >= dm_table_get_size(snap->ti->table))
2208
- goto next_snapshot;
2468
+ continue;
22092469
22102470 /*
22112471 * Remember, different snapshots can have
22122472 * different chunk sizes.
22132473 */
22142474 chunk = sector_to_chunk(snap->store, sector);
2475
+ dm_exception_table_lock_init(snap, chunk, &lock);
22152476
2216
- /*
2217
- * Check exception table to see if block
2218
- * is already remapped in this snapshot
2219
- * and trigger an exception if not.
2220
- */
2221
- e = dm_lookup_exception(&snap->complete, chunk);
2222
- if (e)
2477
+ down_read(&snap->lock);
2478
+ dm_exception_table_lock(&lock);
2479
+
2480
+ /* Only deal with valid and active snapshots */
2481
+ if (!snap->valid || !snap->active)
22232482 goto next_snapshot;
22242483
22252484 pe = __lookup_pending_exception(snap, chunk);
22262485 if (!pe) {
2227
- mutex_unlock(&snap->lock);
2228
- pe = alloc_pending_exception(snap);
2229
- mutex_lock(&snap->lock);
2230
-
2231
- if (!snap->valid) {
2232
- free_pending_exception(pe);
2233
- goto next_snapshot;
2234
- }
2235
-
2486
+ /*
2487
+ * Check exception table to see if block is already
2488
+ * remapped in this snapshot and trigger an exception
2489
+ * if not.
2490
+ */
22362491 e = dm_lookup_exception(&snap->complete, chunk);
2237
- if (e) {
2238
- free_pending_exception(pe);
2492
+ if (e)
22392493 goto next_snapshot;
2240
- }
22412494
2242
- pe = __find_pending_exception(snap, pe, chunk);
2243
- if (!pe) {
2244
- __invalidate_snapshot(snap, -ENOMEM);
2245
- goto next_snapshot;
2495
+ dm_exception_table_unlock(&lock);
2496
+ pe = alloc_pending_exception(snap);
2497
+ dm_exception_table_lock(&lock);
2498
+
2499
+ pe2 = __lookup_pending_exception(snap, chunk);
2500
+
2501
+ if (!pe2) {
2502
+ e = dm_lookup_exception(&snap->complete, chunk);
2503
+ if (e) {
2504
+ free_pending_exception(pe);
2505
+ goto next_snapshot;
2506
+ }
2507
+
2508
+ pe = __insert_pending_exception(snap, pe, chunk);
2509
+ if (!pe) {
2510
+ dm_exception_table_unlock(&lock);
2511
+ up_read(&snap->lock);
2512
+
2513
+ invalidate_snapshot(snap, -ENOMEM);
2514
+ continue;
2515
+ }
2516
+ } else {
2517
+ free_pending_exception(pe);
2518
+ pe = pe2;
22462519 }
22472520 }
22482521
....@@ -2269,7 +2542,8 @@
22692542 }
22702543
22712544 next_snapshot:
2272
- mutex_unlock(&snap->lock);
2545
+ dm_exception_table_unlock(&lock);
2546
+ up_read(&snap->lock);
22732547
22742548 if (pe_to_start_now) {
22752549 start_copy(pe_to_start_now);
....@@ -2423,13 +2697,6 @@
24232697 return do_origin(o->dev, bio, true);
24242698 }
24252699
2426
-static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
2427
- long nr_pages, void **kaddr, pfn_t *pfn)
2428
-{
2429
- DMWARN("device does not support dax.");
2430
- return -EIO;
2431
-}
2432
-
24332700 /*
24342701 * Set the target "max_io_len" field to the minimum of all the snapshots'
24352702 * chunk sizes.
....@@ -2489,12 +2756,11 @@
24892756 .postsuspend = origin_postsuspend,
24902757 .status = origin_status,
24912758 .iterate_devices = origin_iterate_devices,
2492
- .direct_access = origin_dax_direct_access,
24932759 };
24942760
24952761 static struct target_type snapshot_target = {
24962762 .name = "snapshot",
2497
- .version = {1, 15, 0},
2763
+ .version = {1, 16, 0},
24982764 .module = THIS_MODULE,
24992765 .ctr = snapshot_ctr,
25002766 .dtr = snapshot_dtr,
....@@ -2504,11 +2770,12 @@
25042770 .resume = snapshot_resume,
25052771 .status = snapshot_status,
25062772 .iterate_devices = snapshot_iterate_devices,
2773
+ .io_hints = snapshot_io_hints,
25072774 };
25082775
25092776 static struct target_type merge_target = {
25102777 .name = dm_snapshot_merge_target_name,
2511
- .version = {1, 4, 0},
2778
+ .version = {1, 5, 0},
25122779 .module = THIS_MODULE,
25132780 .ctr = snapshot_ctr,
25142781 .dtr = snapshot_dtr,
....@@ -2519,6 +2786,7 @@
25192786 .resume = snapshot_merge_resume,
25202787 .status = snapshot_status,
25212788 .iterate_devices = snapshot_iterate_devices,
2789
+ .io_hints = snapshot_io_hints,
25222790 };
25232791
25242792 static int __init dm_snapshot_init(void)