hc
2024-05-10 9999e48639b3cecb08ffb37358bcba3b48161b29
kernel/fs/btrfs/delayed-ref.h
....@@ -79,7 +79,7 @@
7979 struct mutex mutex;
8080
8181 spinlock_t lock;
82
- struct rb_root ref_tree;
82
+ struct rb_root_cached ref_tree;
8383 /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
8484 struct list_head ref_add_list;
8585
....@@ -101,17 +101,6 @@
101101 * on disk reference count + ref_mod is accurate.
102102 */
103103 int ref_mod;
104
-
105
- /*
106
- * For qgroup reserved space freeing.
107
- *
108
- * ref_root and reserved will be recorded after
109
- * BTRFS_ADD_DELAYED_EXTENT is called.
110
- * And will be used to free reserved qgroup space at
111
- * run_delayed_refs() time.
112
- */
113
- u64 qgroup_ref_root;
114
- u64 qgroup_reserved;
115104
116105 /*
117106 * when a new extent is allocated, it is just reserved in memory
....@@ -148,7 +137,7 @@
148137
149138 struct btrfs_delayed_ref_root {
150139 /* head ref rbtree */
151
- struct rb_root href_root;
140
+ struct rb_root_cached href_root;
152141
153142 /* dirty extent records */
154143 struct rb_root dirty_extent_root;
....@@ -187,6 +176,83 @@
187176 u64 qgroup_to_skip;
188177 };
189178
179
+enum btrfs_ref_type {
180
+ BTRFS_REF_NOT_SET,
181
+ BTRFS_REF_DATA,
182
+ BTRFS_REF_METADATA,
183
+ BTRFS_REF_LAST,
184
+};
185
+
186
+struct btrfs_data_ref {
187
+ /* For EXTENT_DATA_REF */
188
+
189
+ /* Root which refers to this data extent */
190
+ u64 ref_root;
191
+
192
+ /* Inode which refers to this data extent */
193
+ u64 ino;
194
+
195
+ /*
196
+ * file_offset - extent_offset
197
+ *
198
+ * file_offset is the key.offset of the EXTENT_DATA key.
199
+ * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
200
+ */
201
+ u64 offset;
202
+};
203
+
204
+struct btrfs_tree_ref {
205
+ /*
206
+ * Level of this tree block
207
+ *
208
+ * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
209
+ */
210
+ int level;
211
+
212
+ /*
213
+ * Root which refers to this tree block.
214
+ *
215
+ * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
216
+ */
217
+ u64 root;
218
+
219
+ /* For non-skinny metadata, no special member needed */
220
+};
221
+
222
+struct btrfs_ref {
223
+ enum btrfs_ref_type type;
224
+ int action;
225
+
226
+ /*
227
+ * Whether this extent should go through qgroup record.
228
+ *
229
+ * Normally false, but for certain cases like delayed subtree scan,
230
+ * setting this flag can hugely reduce qgroup overhead.
231
+ */
232
+ bool skip_qgroup;
233
+
234
+ /*
235
+ * Optional. For which root is this modification.
236
+ * Mostly used for qgroup optimization.
237
+ *
238
+ * When unset, data/tree ref init code will populate it.
239
+ * In certain cases, we're modifying reference for a different root.
240
+ * E.g. COW fs tree blocks for balance.
241
+ * In that case, tree_ref::root will be fs tree, but we're doing this
242
+ * for reloc tree, then we should set @real_root to reloc tree.
243
+ */
244
+ u64 real_root;
245
+ u64 bytenr;
246
+ u64 len;
247
+
248
+ /* Bytenr of the parent tree block */
249
+ u64 parent;
250
+ union {
251
+ struct btrfs_data_ref data_ref;
252
+ struct btrfs_tree_ref tree_ref;
253
+ };
254
+};
255
+
190256 extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
191257 extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
192258 extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
....@@ -194,6 +260,38 @@
194260
195261 int __init btrfs_delayed_ref_init(void);
196262 void __cold btrfs_delayed_ref_exit(void);
263
+
264
+static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
265
+ int action, u64 bytenr, u64 len, u64 parent)
266
+{
267
+ generic_ref->action = action;
268
+ generic_ref->bytenr = bytenr;
269
+ generic_ref->len = len;
270
+ generic_ref->parent = parent;
271
+}
272
+
273
+static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
274
+ int level, u64 root)
275
+{
276
+ /* If @real_root not set, use @root as fallback */
277
+ if (!generic_ref->real_root)
278
+ generic_ref->real_root = root;
279
+ generic_ref->tree_ref.level = level;
280
+ generic_ref->tree_ref.root = root;
281
+ generic_ref->type = BTRFS_REF_METADATA;
282
+}
283
+
284
+static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
285
+ u64 ref_root, u64 ino, u64 offset)
286
+{
287
+ /* If @real_root not set, use @root as fallback */
288
+ if (!generic_ref->real_root)
289
+ generic_ref->real_root = ref_root;
290
+ generic_ref->data_ref.ref_root = ref_root;
291
+ generic_ref->data_ref.ino = ino;
292
+ generic_ref->data_ref.offset = offset;
293
+ generic_ref->type = BTRFS_REF_DATA;
294
+}
197295
198296 static inline struct btrfs_delayed_extent_op *
199297 btrfs_alloc_delayed_extent_op(void)
....@@ -228,6 +326,16 @@
228326 }
229327 }
230328
329
+static inline u64 btrfs_ref_head_to_space_flags(
330
+ struct btrfs_delayed_ref_head *head_ref)
331
+{
332
+ if (head_ref->is_data)
333
+ return BTRFS_BLOCK_GROUP_DATA;
334
+ else if (head_ref->is_system)
335
+ return BTRFS_BLOCK_GROUP_SYSTEM;
336
+ return BTRFS_BLOCK_GROUP_METADATA;
337
+}
338
+
231339 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
232340 {
233341 if (refcount_dec_and_test(&head->refs))
....@@ -235,17 +343,12 @@
235343 }
236344
237345 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
238
- u64 bytenr, u64 num_bytes, u64 parent,
239
- u64 ref_root, int level, int action,
240
- struct btrfs_delayed_extent_op *extent_op,
241
- int *old_ref_mod, int *new_ref_mod);
346
+ struct btrfs_ref *generic_ref,
347
+ struct btrfs_delayed_extent_op *extent_op);
242348 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
243
- u64 bytenr, u64 num_bytes,
244
- u64 parent, u64 ref_root,
245
- u64 owner, u64 offset, u64 reserved, int action,
246
- int *old_ref_mod, int *new_ref_mod);
247
-int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
248
- struct btrfs_trans_handle *trans,
349
+ struct btrfs_ref *generic_ref,
350
+ u64 reserved);
351
+int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
249352 u64 bytenr, u64 num_bytes,
250353 struct btrfs_delayed_extent_op *extent_op);
251354 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
....@@ -255,19 +358,30 @@
255358 struct btrfs_delayed_ref_head *
256359 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
257360 u64 bytenr);
258
-int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
361
+int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
259362 struct btrfs_delayed_ref_head *head);
260363 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
261364 {
262365 mutex_unlock(&head->mutex);
263366 }
367
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
368
+ struct btrfs_delayed_ref_head *head);
264369
265
-
266
-struct btrfs_delayed_ref_head *
267
-btrfs_select_ref_head(struct btrfs_trans_handle *trans);
370
+struct btrfs_delayed_ref_head *btrfs_select_ref_head(
371
+ struct btrfs_delayed_ref_root *delayed_refs);
268372
269373 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
270374
375
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
376
+void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
377
+int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
378
+ enum btrfs_reserve_flush_enum flush);
379
+void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
380
+ struct btrfs_block_rsv *src,
381
+ u64 num_bytes);
382
+int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
383
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
384
+
271385 /*
272386 * helper functions to cast a node into its container
273387 */