hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/md/dm-core.h
....@@ -11,7 +11,9 @@
1111
1212 #include <linux/kthread.h>
1313 #include <linux/ktime.h>
14
+#include <linux/genhd.h>
1415 #include <linux/blk-mq.h>
16
+#include <linux/keyslot-manager.h>
1517
1618 #include <trace/events/block.h>
1719
....@@ -25,9 +27,11 @@
2527 };
2628
2729 /*
28
- * DM core internal structure that used directly by dm.c and dm-rq.c
29
- * DM targets must _not_ deference a mapped_device to directly access its members!
30
+ * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
31
+ * DM targets must _not_ deference a mapped_device or dm_table to directly
32
+ * access their members!
3033 */
34
+
3135 struct mapped_device {
3236 struct mutex suspend_lock;
3337
....@@ -65,7 +69,6 @@
6569 */
6670 struct work_struct work;
6771 wait_queue_head_t wait;
68
- atomic_t pending[2];
6972 spinlock_t deferred_lock;
7073 struct bio_list deferred;
7174
....@@ -94,11 +97,6 @@
9497 */
9598 struct workqueue_struct *wq;
9699
97
- /*
98
- * freeze/thaw support require holding onto a super block
99
- */
100
- struct super_block *frozen_sb;
101
-
102100 /* forced geometry settings */
103101 struct hd_geometry geometry;
104102
....@@ -107,33 +105,76 @@
107105
108106 struct block_device *bdev;
109107
110
- /* zero-length flush that will be cloned and submitted to targets */
111
- struct bio flush_bio;
108
+ int swap_bios;
109
+ struct semaphore swap_bios_semaphore;
110
+ struct mutex swap_bios_lock;
112111
113112 struct dm_stats stats;
114113
115
- struct kthread_worker kworker;
116
- struct task_struct *kworker_task;
117
-
118
- /* for request-based merge heuristic in dm_request_fn() */
119
- unsigned seq_rq_merge_deadline_usecs;
120
- int last_rq_rw;
121
- sector_t last_rq_pos;
122
- ktime_t last_rq_start_time;
123
-
124114 /* for blk-mq request-based DM support */
125115 struct blk_mq_tag_set *tag_set;
126
- bool use_blk_mq:1;
127116 bool init_tio_pdu:1;
128117
129118 struct srcu_struct io_barrier;
130119 };
131120
132
-int md_in_flight(struct mapped_device *md);
133121 void disable_discard(struct mapped_device *md);
134122 void disable_write_same(struct mapped_device *md);
135123 void disable_write_zeroes(struct mapped_device *md);
136124
125
+static inline sector_t dm_get_size(struct mapped_device *md)
126
+{
127
+ return get_capacity(md->disk);
128
+}
129
+
130
+static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
131
+{
132
+ return &md->stats;
133
+}
134
+
135
+#define DM_TABLE_MAX_DEPTH 16
136
+
137
+struct dm_table {
138
+ struct mapped_device *md;
139
+ enum dm_queue_mode type;
140
+
141
+ /* btree table */
142
+ unsigned int depth;
143
+ unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
144
+ sector_t *index[DM_TABLE_MAX_DEPTH];
145
+
146
+ unsigned int num_targets;
147
+ unsigned int num_allocated;
148
+ sector_t *highs;
149
+ struct dm_target *targets;
150
+
151
+ struct target_type *immutable_target_type;
152
+
153
+ bool integrity_supported:1;
154
+ bool singleton:1;
155
+ unsigned integrity_added:1;
156
+
157
+ /*
158
+ * Indicates the rw permissions for the new logical
159
+ * device. This should be a combination of FMODE_READ
160
+ * and FMODE_WRITE.
161
+ */
162
+ fmode_t mode;
163
+
164
+ /* a list of devices used by this table */
165
+ struct list_head devices;
166
+
167
+ /* events get handed up using this callback */
168
+ void (*event_fn)(void *);
169
+ void *event_context;
170
+
171
+ struct dm_md_mempools *mempools;
172
+
173
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
174
+ struct blk_keyslot_manager *ksm;
175
+#endif
176
+};
177
+
137178 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
138179 {
139180 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;