From 748e4f3d702def1a4bff191e0cf93b6a05340f01 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:41:34 +0000
Subject: [PATCH] add gpio led uart
---
kernel/drivers/md/dm-snap.c | 584 ++++++++++++++++++++++++++++++++++++++++++---------------
1 files changed, 426 insertions(+), 158 deletions(-)
diff --git a/kernel/drivers/md/dm-snap.c b/kernel/drivers/md/dm-snap.c
index 52101e5..b2d8b37 100644
--- a/kernel/drivers/md/dm-snap.c
+++ b/kernel/drivers/md/dm-snap.c
@@ -1,6 +1,4 @@
/*
- * dm-snapshot.c
- *
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
@@ -13,6 +11,7 @@
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/list_bl.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -43,11 +42,11 @@
struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
- struct list_head *table;
+ struct hlist_bl_head *table;
};
struct dm_snapshot {
- struct mutex lock;
+ struct rw_semaphore lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -75,7 +74,9 @@
atomic_t pending_exceptions_count;
- /* Protected by "lock" */
+ spinlock_t pe_allocation_lock;
+
+ /* Protected by "pe_allocation_lock" */
sector_t exception_start_sequence;
/* Protected by kcopyd single-threaded callback */
@@ -130,7 +131,10 @@
* - I/O error while merging
* => stop merging; set merge_failed; process I/O normally.
*/
- int merge_failed;
+ bool merge_failed:1;
+
+ bool discard_zeroes_cow:1;
+ bool discard_passdown_origin:1;
/*
* Incoming bios that overlap with chunks being merged must wait
@@ -461,9 +465,9 @@
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
active = s->active;
- mutex_unlock(&s->lock);
+ up_read(&s->lock);
if (active) {
if (snap_src)
@@ -622,6 +626,36 @@
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
+
+/* Lock to protect access to the completed and pending exception hash tables. */
+struct dm_exception_table_lock {
+ struct hlist_bl_head *complete_slot;
+ struct hlist_bl_head *pending_slot;
+};
+
+static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
+ struct dm_exception_table_lock *lock)
+{
+ struct dm_exception_table *complete = &s->complete;
+ struct dm_exception_table *pending = &s->pending;
+
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
+}
+
+static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_lock(lock->complete_slot);
+ hlist_bl_lock(lock->pending_slot);
+}
+
+static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_unlock(lock->pending_slot);
+ hlist_bl_unlock(lock->complete_slot);
+}
+
static int dm_exception_table_init(struct dm_exception_table *et,
uint32_t size, unsigned hash_shift)
{
@@ -629,12 +663,12 @@
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct list_head));
+ et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
if (!et->table)
return -ENOMEM;
for (i = 0; i < size; i++)
- INIT_LIST_HEAD(et->table + i);
+ INIT_HLIST_BL_HEAD(et->table + i);
return 0;
}
@@ -642,15 +676,16 @@
static void dm_exception_table_exit(struct dm_exception_table *et,
struct kmem_cache *mem)
{
- struct list_head *slot;
- struct dm_exception *ex, *next;
+ struct hlist_bl_head *slot;
+ struct dm_exception *ex;
+ struct hlist_bl_node *pos, *n;
int i, size;
size = et->hash_mask + 1;
for (i = 0; i < size; i++) {
slot = et->table + i;
- list_for_each_entry_safe (ex, next, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
kmem_cache_free(mem, ex);
}
@@ -664,7 +699,7 @@
static void dm_remove_exception(struct dm_exception *e)
{
- list_del(&e->hash_list);
+ hlist_bl_del(&e->hash_list);
}
/*
@@ -674,11 +709,12 @@
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
chunk_t chunk)
{
- struct list_head *slot;
+ struct hlist_bl_head *slot;
+ struct hlist_bl_node *pos;
struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
- list_for_each_entry (e, slot, hash_list)
+ hlist_bl_for_each_entry(e, pos, slot, hash_list)
if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
@@ -725,7 +761,8 @@
static void dm_insert_exception(struct dm_exception_table *eh,
struct dm_exception *new_e)
{
- struct list_head *l;
+ struct hlist_bl_head *l;
+ struct hlist_bl_node *pos;
struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -735,7 +772,7 @@
goto out;
/* List is ordered by old_chunk */
- list_for_each_entry_reverse(e, l, hash_list) {
+ hlist_bl_for_each_entry(e, pos, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
@@ -756,12 +793,24 @@
return;
}
- if (new_e->old_chunk > e->old_chunk)
+ if (new_e->old_chunk < e->old_chunk)
break;
}
out:
- list_add(&new_e->hash_list, e ? &e->hash_list : l);
+ if (!e) {
+ /*
+ * Either the table doesn't support consecutive chunks or slot
+ * l is empty.
+ */
+ hlist_bl_add_head(&new_e->hash_list, l);
+ } else if (new_e->old_chunk < e->old_chunk) {
+ /* Add before an existing exception */
+ hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
+ } else {
+ /* Add to l's tail: e is the last exception in this slot */
+ hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
+ }
}
/*
@@ -770,6 +819,7 @@
*/
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
+ struct dm_exception_table_lock lock;
struct dm_snapshot *s = context;
struct dm_exception *e;
@@ -782,7 +832,17 @@
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
+ /*
+ * Although there is no need to lock access to the exception tables
+ * here, if we don't then hlist_bl_add_head(), called by
+ * dm_insert_exception(), will complain about accessing the
+ * corresponding list without locking it first.
+ */
+ dm_exception_table_lock_init(s, old, &lock);
+
+ dm_exception_table_lock(&lock);
dm_insert_exception(&s->complete, e);
+ dm_exception_table_unlock(&lock);
return 0;
}
@@ -811,7 +871,7 @@
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
- mem /= sizeof(struct list_head);
+ mem /= sizeof(struct hlist_bl_head);
return mem;
}
@@ -931,7 +991,7 @@
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -946,7 +1006,7 @@
b = __release_queued_bios_after_merge(s);
out:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
if (b)
flush_bios(b);
@@ -1005,9 +1065,9 @@
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- mutex_lock(&s->lock);
- s->merge_failed = 1;
- mutex_unlock(&s->lock);
+ down_write(&s->lock);
+ s->merge_failed = true;
+ up_write(&s->lock);
}
goto shut;
}
@@ -1048,16 +1108,17 @@
previous_count = read_pending_exceptions_done_count();
}
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
__check_for_conflicting_io(s, old_chunk + i);
- dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
+ merge_callback, s);
return;
shut:
@@ -1109,10 +1170,10 @@
return;
shut:
- mutex_lock(&s->lock);
- s->merge_failed = 1;
+ down_write(&s->lock);
+ s->merge_failed = true;
b = __release_queued_bios_after_merge(s);
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1134,12 +1195,64 @@
clear_bit(SHUTDOWN_MERGE, &s->state_bits);
}
+static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
+ struct dm_target *ti)
+{
+ int r;
+ unsigned argc;
+ const char *arg_name;
+
+ static const struct dm_arg _args[] = {
+ {0, 2, "Invalid number of feature arguments"},
+ };
+
+ /*
+ * No feature arguments supplied.
+ */
+ if (!as->argc)
+ return 0;
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return -EINVAL;
+
+ while (argc && !r) {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ if (!strcasecmp(arg_name, "discard_zeroes_cow"))
+ s->discard_zeroes_cow = true;
+
+ else if (!strcasecmp(arg_name, "discard_passdown_origin"))
+ s->discard_passdown_origin = true;
+
+ else {
+ ti->error = "Unrecognised feature requested";
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ if (!s->discard_zeroes_cow && s->discard_passdown_origin) {
+ /*
+ * TODO: really these are disjoint.. but ti->num_discard_bios
+ * and dm_bio_get_target_bio_nr() require rigid constraints.
+ */
+ ti->error = "discard_passdown_origin feature depends on discard_zeroes_cow";
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
/*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
+ * Construct a snapshot mapping:
+ * <origin_dev> <COW-dev> <p|po|n> <chunk-size> [<# feature args> [<arg>]*]
*/
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_snapshot *s;
+ struct dm_arg_set as;
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
@@ -1147,8 +1260,8 @@
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
- if (argc != 4) {
- ti->error = "requires exactly 4 arguments";
+ if (argc < 4) {
+ ti->error = "requires 4 or more arguments";
r = -EINVAL;
goto bad;
}
@@ -1164,6 +1277,13 @@
r = -ENOMEM;
goto bad;
}
+
+ as.argc = argc;
+ as.argv = argv;
+ dm_consume_args(&as, 4);
+ r = parse_snapshot_features(&as, s, ti);
+ if (r)
+ goto bad_features;
origin_path = argv[0];
argv++;
@@ -1208,14 +1328,15 @@
s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ spin_lock_init(&s->pe_allocation_lock);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
s->out_of_order_tree = RB_ROOT;
- mutex_init(&s->lock);
+ init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
- s->merge_failed = 0;
+ s->merge_failed = false;
s->first_merging_chunk = 0;
s->num_merging_chunks = 0;
bio_list_init(&s->bios_queued_during_merge);
@@ -1250,6 +1371,8 @@
ti->private = s;
ti->num_flush_bios = num_flush_bios;
+ if (s->discard_zeroes_cow)
+ ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1);
ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
@@ -1298,29 +1421,22 @@
bad_read_metadata:
unregister_snapshot(s);
-
bad_load_and_register:
mempool_exit(&s->pending_pool);
-
bad_pending_pool:
dm_kcopyd_client_destroy(s->kcopyd_client);
-
bad_kcopyd:
dm_exception_table_exit(&s->pending, pending_cache);
dm_exception_table_exit(&s->complete, exception_cache);
-
bad_hash_tables:
dm_exception_store_destroy(s->store);
-
bad_store:
dm_put_device(ti, s->cow);
-
bad_cow:
dm_put_device(ti, s->origin);
-
bad_origin:
+bad_features:
kfree(s);
-
bad:
return r;
}
@@ -1379,9 +1495,9 @@
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- mutex_lock(&snap_dest->lock);
+ down_write(&snap_dest->lock);
snap_dest->valid = 0;
- mutex_unlock(&snap_dest->lock);
+ up_write(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1411,8 +1527,6 @@
mempool_exit(&s->pending_pool);
dm_exception_store_destroy(s->store);
-
- mutex_destroy(&s->lock);
bio_uninit(&s->flush_bio);
@@ -1480,7 +1594,7 @@
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
@@ -1500,7 +1614,7 @@
bio->bi_next = NULL;
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
- generic_make_request(bio);
+ submit_bio_noacct(bio);
bio = n;
}
}
@@ -1538,6 +1652,13 @@
dm_table_event(s->ti->table);
}
+static void invalidate_snapshot(struct dm_snapshot *s, int err)
+{
+ down_write(&s->lock);
+ __invalidate_snapshot(s, err);
+ up_write(&s->lock);
+}
+
static void pending_complete(void *context, int success)
{
struct dm_snap_pending_exception *pe = context;
@@ -1546,51 +1667,69 @@
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
struct bio *full_bio = NULL;
+ struct dm_exception_table_lock lock;
int error = 0;
+
+ dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
if (!success) {
/* Read/write error - snapshot is unusable */
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -EIO);
+ invalidate_snapshot(s, -EIO);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -ENOMEM);
+ invalidate_snapshot(s, -ENOMEM);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
*e = pe->e;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid) {
+ up_read(&s->lock);
free_completed_exception(e);
error = 1;
+
goto out;
}
- /* Check for conflicting reads */
- __check_for_conflicting_io(s, pe->e.old_chunk);
-
/*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
+ * Add a proper exception. After inserting the completed exception all
+ * subsequent snapshot reads to this chunk will be redirected to the
+ * COW device. This ensures that we do not starve. Moreover, as long
+ * as the pending exception exists, neither origin writes nor snapshot
+ * merging can overwrite the chunk in origin.
*/
dm_insert_exception(&s->complete, e);
+ up_read(&s->lock);
+
+ /* Wait for conflicting reads to drain */
+ if (__chunk_is_tracked(s, pe->e.old_chunk)) {
+ dm_exception_table_unlock(&lock);
+ __check_for_conflicting_io(s, pe->e.old_chunk);
+ dm_exception_table_lock(&lock);
+ }
out:
+ /* Remove the in-flight exception from the list */
dm_remove_exception(&pe->e);
+
+ dm_exception_table_unlock(&lock);
+
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
full_bio = pe->full_bio;
if (full_bio)
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
-
- mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@@ -1716,7 +1855,7 @@
bio->bi_end_io = full_bio_end_io;
bio->bi_private = callback_data;
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static struct dm_snap_pending_exception *
@@ -1731,12 +1870,43 @@
}
/*
+ * Inserts a pending exception into the pending table.
+ *
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
+ */
+static struct dm_snap_pending_exception *
+__insert_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
+{
+ pe->e.old_chunk = chunk;
+ bio_list_init(&pe->origin_bios);
+ bio_list_init(&pe->snapshot_bios);
+ pe->started = 0;
+ pe->full_bio = NULL;
+
+ spin_lock(&s->pe_allocation_lock);
+ if (s->store->type->prepare_exception(s->store, &pe->e)) {
+ spin_unlock(&s->pe_allocation_lock);
+ free_pending_exception(pe);
+ return NULL;
+ }
+
+ pe->exception_sequence = s->exception_start_sequence++;
+ spin_unlock(&s->pe_allocation_lock);
+
+ dm_insert_exception(&s->pending, &pe->e);
+
+ return pe;
+}
+
+/*
* Looks to see if this snapshot already has a pending exception
* for this chunk, otherwise it allocates a new one and inserts
* it into the pending table.
*
- * NOTE: a write lock must be held on snap->lock before calling
- * this.
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
*/
static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s,
@@ -1750,22 +1920,7 @@
return pe2;
}
- pe->e.old_chunk = chunk;
- bio_list_init(&pe->origin_bios);
- bio_list_init(&pe->snapshot_bios);
- pe->started = 0;
- pe->full_bio = NULL;
-
- if (s->store->type->prepare_exception(s->store, &pe->e)) {
- free_pending_exception(pe);
- return NULL;
- }
-
- pe->exception_sequence = s->exception_start_sequence++;
-
- dm_insert_exception(&s->pending, &pe->e);
-
- return pe;
+ return __insert_pending_exception(s, pe, chunk);
}
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
@@ -1778,6 +1933,37 @@
(bio->bi_iter.bi_sector & s->store->chunk_mask);
}
+static void zero_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct bio *bio = context;
+ struct dm_snapshot *s = bio->bi_private;
+
+ account_end_copy(s);
+ bio->bi_status = write_err ? BLK_STS_IOERR : 0;
+ bio_endio(bio);
+}
+
+static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
+ struct bio *bio, chunk_t chunk)
+{
+ struct dm_io_region dest;
+
+ dest.bdev = s->cow->bdev;
+ dest.sector = bio->bi_iter.bi_sector;
+ dest.count = s->store->chunk_size;
+
+ account_start_copy(s);
+ WARN_ON_ONCE(bio->bi_private);
+ bio->bi_private = s;
+ dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
+}
+
+static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
+{
+ return bio->bi_iter.bi_size ==
+ (s->store->chunk_size << SECTOR_SHIFT);
+}
+
static int snapshot_map(struct dm_target *ti, struct bio *bio)
{
struct dm_exception *e;
@@ -1785,6 +1971,7 @@
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ struct dm_exception_table_lock lock;
init_tracked_chunk(bio);
@@ -1794,6 +1981,7 @@
}
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
+ dm_exception_table_lock_init(s, chunk, &lock);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
@@ -1805,7 +1993,8 @@
; /* wait_for_in_progress() has slept */
}
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1813,10 +2002,43 @@
goto out_unlock;
}
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
+ /*
+ * passdown discard to origin (without triggering
+ * snapshot exceptions via do_origin; doing so would
+ * defeat the goal of freeing space in origin that is
+ * implied by the "discard_passdown_origin" feature)
+ */
+ bio_set_dev(bio, s->origin->bdev);
+ track_chunk(s, bio, chunk);
+ goto out_unlock;
+ }
+ /* discard to snapshot (target_bio_nr == 0) zeroes exceptions */
+ }
+
/* If the block is already remapped - use that, else remap it */
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
remap_exception(s, e, bio, chunk);
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
+ io_overlaps_chunk(s, bio)) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+ zero_exception(s, e, bio, chunk);
+ r = DM_MAPIO_SUBMITTED; /* discard is not issued */
+ goto out;
+ }
+ goto out_unlock;
+ }
+
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ /*
+ * If no exception exists, complete discard immediately
+ * otherwise it'll trigger copy-out.
+ */
+ bio_endio(bio);
+ r = DM_MAPIO_SUBMITTED;
goto out_unlock;
}
@@ -1828,15 +2050,9 @@
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
pe = alloc_pending_exception(s);
- mutex_lock(&s->lock);
-
- if (!s->valid || s->snapshot_overflowed) {
- free_pending_exception(pe);
- r = DM_MAPIO_KILL;
- goto out_unlock;
- }
+ dm_exception_table_lock(&lock);
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
@@ -1847,13 +2063,22 @@
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
+ down_write(&s->lock);
+
if (s->store->userspace_supports_overflow) {
- s->snapshot_overflowed = 1;
- DMERR("Snapshot overflowed: Unable to allocate exception.");
+ if (s->valid && !s->snapshot_overflowed) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ }
} else
__invalidate_snapshot(s, -ENOMEM);
+ up_write(&s->lock);
+
r = DM_MAPIO_KILL;
- goto out_unlock;
+ goto out;
}
}
@@ -1861,11 +2086,12 @@
r = DM_MAPIO_SUBMITTED;
- if (!pe->started &&
- bio->bi_iter.bi_size ==
- (s->store->chunk_size << SECTOR_SHIFT)) {
+ if (!pe->started && io_overlaps_chunk(s, bio)) {
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_full_bio(pe, bio);
goto out;
}
@@ -1873,9 +2099,12 @@
bio_list_add(&pe->snapshot_bios, bio);
if (!pe->started) {
- /* this is protected by snap->lock */
+ /* this is protected by the exception table lock */
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_copy(pe);
goto out;
}
@@ -1885,7 +2114,8 @@
}
out_unlock:
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
out:
return r;
}
@@ -1919,9 +2149,15 @@
return DM_MAPIO_REMAPPED;
}
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
+ /* Once merging, discards no longer effect change */
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1952,12 +2188,12 @@
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return do_origin(s->origin, bio, false);
}
out_unlock:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return r;
}
@@ -1989,7 +2225,7 @@
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
+ down_read(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1999,7 +2235,7 @@
"source is suspended.");
r = -EINVAL;
}
- mutex_unlock(&snap_src->lock);
+ up_read(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -2045,11 +2281,11 @@
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
- mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- mutex_unlock(&snap_dest->lock);
- mutex_unlock(&snap_src->lock);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -2064,9 +2300,9 @@
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->active = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -2102,11 +2338,12 @@
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
+ unsigned num_features;
switch (type) {
case STATUSTYPE_INFO:
- mutex_lock(&snap->lock);
+ down_write(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2131,7 +2368,7 @@
DMEMIT("Unknown");
}
- mutex_unlock(&snap->lock);
+ up_write(&snap->lock);
break;
@@ -2142,8 +2379,16 @@
* make sense.
*/
DMEMIT("%s %s", snap->origin->name, snap->cow->name);
- snap->store->type->status(snap->store, type, result + sz,
- maxlen - sz);
+ sz += snap->store->type->status(snap->store, type, result + sz,
+ maxlen - sz);
+ num_features = snap->discard_zeroes_cow + snap->discard_passdown_origin;
+ if (num_features) {
+ DMEMIT(" %u", num_features);
+ if (snap->discard_zeroes_cow)
+ DMEMIT(" discard_zeroes_cow");
+ if (snap->discard_passdown_origin)
+ DMEMIT(" discard_passdown_origin");
+ }
break;
}
}
@@ -2162,6 +2407,26 @@
return r;
}
+static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct dm_snapshot *snap = ti->private;
+
+ if (snap->discard_zeroes_cow) {
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+
+ (void) __find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, NULL);
+ if (snap_src && snap_dest)
+ snap = snap_src;
+
+ /* All discards are split on chunk_size boundary */
+ limits->discard_granularity = snap->store->chunk_size;
+ limits->max_discard_sectors = snap->store->chunk_size;
+
+ up_read(&_origins_lock);
+ }
+}
/*-----------------------------------------------------------------
* Origin methods
@@ -2183,9 +2448,10 @@
int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
struct dm_exception *e;
- struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe, *pe2;
struct dm_snap_pending_exception *pe_to_start_now = NULL;
struct dm_snap_pending_exception *pe_to_start_last = NULL;
+ struct dm_exception_table_lock lock;
chunk_t chunk;
/* Do all the snapshots on this origin */
@@ -2197,52 +2463,59 @@
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- mutex_lock(&snap->lock);
-
- /* Only deal with valid and active snapshots */
- if (!snap->valid || !snap->active)
- goto next_snapshot;
-
/* Nothing to do if writing beyond end of snapshot */
if (sector >= dm_table_get_size(snap->ti->table))
- goto next_snapshot;
+ continue;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
chunk = sector_to_chunk(snap->store, sector);
+ dm_exception_table_lock_init(snap, chunk, &lock);
- /*
- * Check exception table to see if block
- * is already remapped in this snapshot
- * and trigger an exception if not.
- */
- e = dm_lookup_exception(&snap->complete, chunk);
- if (e)
+ down_read(&snap->lock);
+ dm_exception_table_lock(&lock);
+
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
goto next_snapshot;
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- mutex_unlock(&snap->lock);
- pe = alloc_pending_exception(snap);
- mutex_lock(&snap->lock);
-
- if (!snap->valid) {
- free_pending_exception(pe);
- goto next_snapshot;
- }
-
+ /*
+ * Check exception table to see if block is already
+ * remapped in this snapshot and trigger an exception
+ * if not.
+ */
e = dm_lookup_exception(&snap->complete, chunk);
- if (e) {
- free_pending_exception(pe);
+ if (e)
goto next_snapshot;
- }
- pe = __find_pending_exception(snap, pe, chunk);
- if (!pe) {
- __invalidate_snapshot(snap, -ENOMEM);
- goto next_snapshot;
+ dm_exception_table_unlock(&lock);
+ pe = alloc_pending_exception(snap);
+ dm_exception_table_lock(&lock);
+
+ pe2 = __lookup_pending_exception(snap, chunk);
+
+ if (!pe2) {
+ e = dm_lookup_exception(&snap->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ pe = __insert_pending_exception(snap, pe, chunk);
+ if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
+
+ invalidate_snapshot(snap, -ENOMEM);
+ continue;
+ }
+ } else {
+ free_pending_exception(pe);
+ pe = pe2;
}
}
@@ -2269,7 +2542,8 @@
}
next_snapshot:
- mutex_unlock(&snap->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
@@ -2423,13 +2697,6 @@
return do_origin(o->dev, bio, true);
}
-static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
-{
- DMWARN("device does not support dax.");
- return -EIO;
-}
-
/*
* Set the target "max_io_len" field to the minimum of all the snapshots'
* chunk sizes.
@@ -2489,12 +2756,11 @@
.postsuspend = origin_postsuspend,
.status = origin_status,
.iterate_devices = origin_iterate_devices,
- .direct_access = origin_dax_direct_access,
};
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 15, 0},
+ .version = {1, 16, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2504,11 +2770,12 @@
.resume = snapshot_resume,
.status = snapshot_status,
.iterate_devices = snapshot_iterate_devices,
+ .io_hints = snapshot_io_hints,
};
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2519,6 +2786,7 @@
.resume = snapshot_merge_resume,
.status = snapshot_status,
.iterate_devices = snapshot_iterate_devices,
+ .io_hints = snapshot_io_hints,
};
static int __init dm_snapshot_init(void)
--
Gitblit v1.6.2