hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/fs/xfs/xfs_refcount_item.c
....@@ -14,28 +14,31 @@
1414 #include "xfs_defer.h"
1515 #include "xfs_trans.h"
1616 #include "xfs_trans_priv.h"
17
-#include "xfs_buf_item.h"
1817 #include "xfs_refcount_item.h"
1918 #include "xfs_log.h"
2019 #include "xfs_refcount.h"
21
-
20
+#include "xfs_error.h"
21
+#include "xfs_log_priv.h"
22
+#include "xfs_log_recover.h"
2223
2324 kmem_zone_t *xfs_cui_zone;
2425 kmem_zone_t *xfs_cud_zone;
26
+
27
+static const struct xfs_item_ops xfs_cui_item_ops;
2528
2629 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
2730 {
2831 return container_of(lip, struct xfs_cui_log_item, cui_item);
2932 }
3033
31
-void
34
+STATIC void
3235 xfs_cui_item_free(
3336 struct xfs_cui_log_item *cuip)
3437 {
3538 if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
3639 kmem_free(cuip);
3740 else
38
- kmem_zone_free(xfs_cui_zone, cuip);
41
+ kmem_cache_free(xfs_cui_zone, cuip);
3942 }
4043
4144 /*
....@@ -45,13 +48,13 @@
4548 * committed vs unpin operations in bulk insert operations. Hence the reference
4649 * count to ensure only the last caller frees the CUI.
4750 */
48
-void
51
+STATIC void
4952 xfs_cui_release(
5053 struct xfs_cui_log_item *cuip)
5154 {
5255 ASSERT(atomic_read(&cuip->cui_refcount) > 0);
5356 if (atomic_dec_and_test(&cuip->cui_refcount)) {
54
- xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
57
+ xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
5558 xfs_cui_item_free(cuip);
5659 }
5760 }
....@@ -95,15 +98,6 @@
9598 }
9699
97100 /*
98
- * Pinning has no meaning for an cui item, so just return.
99
- */
100
-STATIC void
101
-xfs_cui_item_pin(
102
- struct xfs_log_item *lip)
103
-{
104
-}
105
-
106
-/*
107101 * The unpin operation is the last place an CUI is manipulated in the log. It is
108102 * either inserted in the AIL or aborted in the event of a log I/O error. In
109103 * either case, the CUI transaction has been successfully committed to make it
....@@ -122,77 +116,21 @@
122116 }
123117
124118 /*
125
- * CUI items have no locking or pushing. However, since CUIs are pulled from
126
- * the AIL when their corresponding CUDs are committed to disk, their situation
127
- * is very similar to being pinned. Return XFS_ITEM_PINNED so that the caller
128
- * will eventually flush the log. This should help in getting the CUI out of
129
- * the AIL.
130
- */
131
-STATIC uint
132
-xfs_cui_item_push(
133
- struct xfs_log_item *lip,
134
- struct list_head *buffer_list)
135
-{
136
- return XFS_ITEM_PINNED;
137
-}
138
-
139
-/*
140119 * The CUI has been either committed or aborted if the transaction has been
141120 * cancelled. If the transaction was cancelled, an CUD isn't going to be
142121 * constructed and thus we free the CUI here directly.
143122 */
144123 STATIC void
145
-xfs_cui_item_unlock(
124
+xfs_cui_item_release(
146125 struct xfs_log_item *lip)
147126 {
148
- if (test_bit(XFS_LI_ABORTED, &lip->li_flags))
149
- xfs_cui_release(CUI_ITEM(lip));
127
+ xfs_cui_release(CUI_ITEM(lip));
150128 }
151
-
152
-/*
153
- * The CUI is logged only once and cannot be moved in the log, so simply return
154
- * the lsn at which it's been logged.
155
- */
156
-STATIC xfs_lsn_t
157
-xfs_cui_item_committed(
158
- struct xfs_log_item *lip,
159
- xfs_lsn_t lsn)
160
-{
161
- return lsn;
162
-}
163
-
164
-/*
165
- * The CUI dependency tracking op doesn't do squat. It can't because
166
- * it doesn't know where the free extent is coming from. The dependency
167
- * tracking has to be handled by the "enclosing" metadata object. For
168
- * example, for inodes, the inode is locked throughout the extent freeing
169
- * so the dependency should be recorded there.
170
- */
171
-STATIC void
172
-xfs_cui_item_committing(
173
- struct xfs_log_item *lip,
174
- xfs_lsn_t lsn)
175
-{
176
-}
177
-
178
-/*
179
- * This is the ops vector shared by all cui log items.
180
- */
181
-static const struct xfs_item_ops xfs_cui_item_ops = {
182
- .iop_size = xfs_cui_item_size,
183
- .iop_format = xfs_cui_item_format,
184
- .iop_pin = xfs_cui_item_pin,
185
- .iop_unpin = xfs_cui_item_unpin,
186
- .iop_unlock = xfs_cui_item_unlock,
187
- .iop_committed = xfs_cui_item_committed,
188
- .iop_push = xfs_cui_item_push,
189
- .iop_committing = xfs_cui_item_committing,
190
-};
191129
192130 /*
193131 * Allocate and initialize an cui item with the given number of extents.
194132 */
195
-struct xfs_cui_log_item *
133
+STATIC struct xfs_cui_log_item *
196134 xfs_cui_init(
197135 struct xfs_mount *mp,
198136 uint nextents)
....@@ -203,9 +141,10 @@
203141 ASSERT(nextents > 0);
204142 if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
205143 cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
206
- KM_SLEEP);
144
+ 0);
207145 else
208
- cuip = kmem_zone_zalloc(xfs_cui_zone, KM_SLEEP);
146
+ cuip = kmem_cache_zalloc(xfs_cui_zone,
147
+ GFP_KERNEL | __GFP_NOFAIL);
209148
210149 xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
211150 cuip->cui_format.cui_nextents = nextents;
....@@ -254,152 +193,255 @@
254193 }
255194
256195 /*
257
- * Pinning has no meaning for an cud item, so just return.
258
- */
259
-STATIC void
260
-xfs_cud_item_pin(
261
- struct xfs_log_item *lip)
262
-{
263
-}
264
-
265
-/*
266
- * Since pinning has no meaning for an cud item, unpinning does
267
- * not either.
268
- */
269
-STATIC void
270
-xfs_cud_item_unpin(
271
- struct xfs_log_item *lip,
272
- int remove)
273
-{
274
-}
275
-
276
-/*
277
- * There isn't much you can do to push on an cud item. It is simply stuck
278
- * waiting for the log to be flushed to disk.
279
- */
280
-STATIC uint
281
-xfs_cud_item_push(
282
- struct xfs_log_item *lip,
283
- struct list_head *buffer_list)
284
-{
285
- return XFS_ITEM_PINNED;
286
-}
287
-
288
-/*
289196 * The CUD is either committed or aborted if the transaction is cancelled. If
290197 * the transaction is cancelled, drop our reference to the CUI and free the
291198 * CUD.
292199 */
293200 STATIC void
294
-xfs_cud_item_unlock(
201
+xfs_cud_item_release(
295202 struct xfs_log_item *lip)
296203 {
297204 struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
298205
299
- if (test_bit(XFS_LI_ABORTED, &lip->li_flags)) {
300
- xfs_cui_release(cudp->cud_cuip);
301
- kmem_zone_free(xfs_cud_zone, cudp);
302
- }
303
-}
304
-
305
-/*
306
- * When the cud item is committed to disk, all we need to do is delete our
307
- * reference to our partner cui item and then free ourselves. Since we're
308
- * freeing ourselves we must return -1 to keep the transaction code from
309
- * further referencing this item.
310
- */
311
-STATIC xfs_lsn_t
312
-xfs_cud_item_committed(
313
- struct xfs_log_item *lip,
314
- xfs_lsn_t lsn)
315
-{
316
- struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
317
-
318
- /*
319
- * Drop the CUI reference regardless of whether the CUD has been
320
- * aborted. Once the CUD transaction is constructed, it is the sole
321
- * responsibility of the CUD to release the CUI (even if the CUI is
322
- * aborted due to log I/O error).
323
- */
324206 xfs_cui_release(cudp->cud_cuip);
325
- kmem_zone_free(xfs_cud_zone, cudp);
326
-
327
- return (xfs_lsn_t)-1;
207
+ kmem_cache_free(xfs_cud_zone, cudp);
328208 }
329209
330
-/*
331
- * The CUD dependency tracking op doesn't do squat. It can't because
332
- * it doesn't know where the free extent is coming from. The dependency
333
- * tracking has to be handled by the "enclosing" metadata object. For
334
- * example, for inodes, the inode is locked throughout the extent freeing
335
- * so the dependency should be recorded there.
336
- */
337
-STATIC void
338
-xfs_cud_item_committing(
339
- struct xfs_log_item *lip,
340
- xfs_lsn_t lsn)
341
-{
342
-}
343
-
344
-/*
345
- * This is the ops vector shared by all cud log items.
346
- */
347210 static const struct xfs_item_ops xfs_cud_item_ops = {
211
+ .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
348212 .iop_size = xfs_cud_item_size,
349213 .iop_format = xfs_cud_item_format,
350
- .iop_pin = xfs_cud_item_pin,
351
- .iop_unpin = xfs_cud_item_unpin,
352
- .iop_unlock = xfs_cud_item_unlock,
353
- .iop_committed = xfs_cud_item_committed,
354
- .iop_push = xfs_cud_item_push,
355
- .iop_committing = xfs_cud_item_committing,
214
+ .iop_release = xfs_cud_item_release,
356215 };
357216
358
-/*
359
- * Allocate and initialize an cud item with the given number of extents.
360
- */
361
-struct xfs_cud_log_item *
362
-xfs_cud_init(
363
- struct xfs_mount *mp,
217
+static struct xfs_cud_log_item *
218
+xfs_trans_get_cud(
219
+ struct xfs_trans *tp,
364220 struct xfs_cui_log_item *cuip)
365
-
366221 {
367
- struct xfs_cud_log_item *cudp;
222
+ struct xfs_cud_log_item *cudp;
368223
369
- cudp = kmem_zone_zalloc(xfs_cud_zone, KM_SLEEP);
370
- xfs_log_item_init(mp, &cudp->cud_item, XFS_LI_CUD, &xfs_cud_item_ops);
224
+ cudp = kmem_cache_zalloc(xfs_cud_zone, GFP_KERNEL | __GFP_NOFAIL);
225
+ xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
226
+ &xfs_cud_item_ops);
371227 cudp->cud_cuip = cuip;
372228 cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
373229
230
+ xfs_trans_add_item(tp, &cudp->cud_item);
374231 return cudp;
375232 }
233
+
234
+/*
235
+ * Finish an refcount update and log it to the CUD. Note that the
236
+ * transaction is marked dirty regardless of whether the refcount
237
+ * update succeeds or fails to support the CUI/CUD lifecycle rules.
238
+ */
239
+static int
240
+xfs_trans_log_finish_refcount_update(
241
+ struct xfs_trans *tp,
242
+ struct xfs_cud_log_item *cudp,
243
+ enum xfs_refcount_intent_type type,
244
+ xfs_fsblock_t startblock,
245
+ xfs_extlen_t blockcount,
246
+ xfs_fsblock_t *new_fsb,
247
+ xfs_extlen_t *new_len,
248
+ struct xfs_btree_cur **pcur)
249
+{
250
+ int error;
251
+
252
+ error = xfs_refcount_finish_one(tp, type, startblock,
253
+ blockcount, new_fsb, new_len, pcur);
254
+
255
+ /*
256
+ * Mark the transaction dirty, even on error. This ensures the
257
+ * transaction is aborted, which:
258
+ *
259
+ * 1.) releases the CUI and frees the CUD
260
+ * 2.) shuts down the filesystem
261
+ */
262
+ tp->t_flags |= XFS_TRANS_DIRTY;
263
+ set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
264
+
265
+ return error;
266
+}
267
+
268
+/* Sort refcount intents by AG. */
269
+static int
270
+xfs_refcount_update_diff_items(
271
+ void *priv,
272
+ struct list_head *a,
273
+ struct list_head *b)
274
+{
275
+ struct xfs_mount *mp = priv;
276
+ struct xfs_refcount_intent *ra;
277
+ struct xfs_refcount_intent *rb;
278
+
279
+ ra = container_of(a, struct xfs_refcount_intent, ri_list);
280
+ rb = container_of(b, struct xfs_refcount_intent, ri_list);
281
+ return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
282
+ XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
283
+}
284
+
285
+/* Set the phys extent flags for this reverse mapping. */
286
+static void
287
+xfs_trans_set_refcount_flags(
288
+ struct xfs_phys_extent *refc,
289
+ enum xfs_refcount_intent_type type)
290
+{
291
+ refc->pe_flags = 0;
292
+ switch (type) {
293
+ case XFS_REFCOUNT_INCREASE:
294
+ case XFS_REFCOUNT_DECREASE:
295
+ case XFS_REFCOUNT_ALLOC_COW:
296
+ case XFS_REFCOUNT_FREE_COW:
297
+ refc->pe_flags |= type;
298
+ break;
299
+ default:
300
+ ASSERT(0);
301
+ }
302
+}
303
+
304
+/* Log refcount updates in the intent item. */
305
+STATIC void
306
+xfs_refcount_update_log_item(
307
+ struct xfs_trans *tp,
308
+ struct xfs_cui_log_item *cuip,
309
+ struct xfs_refcount_intent *refc)
310
+{
311
+ uint next_extent;
312
+ struct xfs_phys_extent *ext;
313
+
314
+ tp->t_flags |= XFS_TRANS_DIRTY;
315
+ set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
316
+
317
+ /*
318
+ * atomic_inc_return gives us the value after the increment;
319
+ * we want to use it as an array index so we need to subtract 1 from
320
+ * it.
321
+ */
322
+ next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
323
+ ASSERT(next_extent < cuip->cui_format.cui_nextents);
324
+ ext = &cuip->cui_format.cui_extents[next_extent];
325
+ ext->pe_startblock = refc->ri_startblock;
326
+ ext->pe_len = refc->ri_blockcount;
327
+ xfs_trans_set_refcount_flags(ext, refc->ri_type);
328
+}
329
+
330
+static struct xfs_log_item *
331
+xfs_refcount_update_create_intent(
332
+ struct xfs_trans *tp,
333
+ struct list_head *items,
334
+ unsigned int count,
335
+ bool sort)
336
+{
337
+ struct xfs_mount *mp = tp->t_mountp;
338
+ struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
339
+ struct xfs_refcount_intent *refc;
340
+
341
+ ASSERT(count > 0);
342
+
343
+ xfs_trans_add_item(tp, &cuip->cui_item);
344
+ if (sort)
345
+ list_sort(mp, items, xfs_refcount_update_diff_items);
346
+ list_for_each_entry(refc, items, ri_list)
347
+ xfs_refcount_update_log_item(tp, cuip, refc);
348
+ return &cuip->cui_item;
349
+}
350
+
351
+/* Get an CUD so we can process all the deferred refcount updates. */
352
+static struct xfs_log_item *
353
+xfs_refcount_update_create_done(
354
+ struct xfs_trans *tp,
355
+ struct xfs_log_item *intent,
356
+ unsigned int count)
357
+{
358
+ return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
359
+}
360
+
361
+/* Process a deferred refcount update. */
362
+STATIC int
363
+xfs_refcount_update_finish_item(
364
+ struct xfs_trans *tp,
365
+ struct xfs_log_item *done,
366
+ struct list_head *item,
367
+ struct xfs_btree_cur **state)
368
+{
369
+ struct xfs_refcount_intent *refc;
370
+ xfs_fsblock_t new_fsb;
371
+ xfs_extlen_t new_aglen;
372
+ int error;
373
+
374
+ refc = container_of(item, struct xfs_refcount_intent, ri_list);
375
+ error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
376
+ refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
377
+ &new_fsb, &new_aglen, state);
378
+
379
+ /* Did we run out of reservation? Requeue what we didn't finish. */
380
+ if (!error && new_aglen > 0) {
381
+ ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
382
+ refc->ri_type == XFS_REFCOUNT_DECREASE);
383
+ refc->ri_startblock = new_fsb;
384
+ refc->ri_blockcount = new_aglen;
385
+ return -EAGAIN;
386
+ }
387
+ kmem_free(refc);
388
+ return error;
389
+}
390
+
391
+/* Abort all pending CUIs. */
392
+STATIC void
393
+xfs_refcount_update_abort_intent(
394
+ struct xfs_log_item *intent)
395
+{
396
+ xfs_cui_release(CUI_ITEM(intent));
397
+}
398
+
399
+/* Cancel a deferred refcount update. */
400
+STATIC void
401
+xfs_refcount_update_cancel_item(
402
+ struct list_head *item)
403
+{
404
+ struct xfs_refcount_intent *refc;
405
+
406
+ refc = container_of(item, struct xfs_refcount_intent, ri_list);
407
+ kmem_free(refc);
408
+}
409
+
410
+const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
411
+ .max_items = XFS_CUI_MAX_FAST_EXTENTS,
412
+ .create_intent = xfs_refcount_update_create_intent,
413
+ .abort_intent = xfs_refcount_update_abort_intent,
414
+ .create_done = xfs_refcount_update_create_done,
415
+ .finish_item = xfs_refcount_update_finish_item,
416
+ .finish_cleanup = xfs_refcount_finish_one_cleanup,
417
+ .cancel_item = xfs_refcount_update_cancel_item,
418
+};
376419
377420 /*
378421 * Process a refcount update intent item that was recovered from the log.
379422 * We need to update the refcountbt.
380423 */
381
-int
382
-xfs_cui_recover(
383
- struct xfs_trans *parent_tp,
384
- struct xfs_cui_log_item *cuip)
424
+STATIC int
425
+xfs_cui_item_recover(
426
+ struct xfs_log_item *lip,
427
+ struct list_head *capture_list)
385428 {
386
- int i;
387
- int error = 0;
388
- unsigned int refc_type;
429
+ struct xfs_bmbt_irec irec;
430
+ struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
389431 struct xfs_phys_extent *refc;
390
- xfs_fsblock_t startblock_fsb;
391
- bool op_ok;
392432 struct xfs_cud_log_item *cudp;
393433 struct xfs_trans *tp;
394434 struct xfs_btree_cur *rcur = NULL;
395
- enum xfs_refcount_intent_type type;
435
+ struct xfs_mount *mp = lip->li_mountp;
436
+ xfs_fsblock_t startblock_fsb;
396437 xfs_fsblock_t new_fsb;
397438 xfs_extlen_t new_len;
398
- struct xfs_bmbt_irec irec;
439
+ unsigned int refc_type;
440
+ bool op_ok;
399441 bool requeue_only = false;
400
- struct xfs_mount *mp = parent_tp->t_mountp;
401
-
402
- ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
442
+ enum xfs_refcount_intent_type type;
443
+ int i;
444
+ int error = 0;
403445
404446 /*
405447 * First check the validity of the extents described by the
....@@ -425,15 +467,8 @@
425467 refc->pe_len == 0 ||
426468 startblock_fsb >= mp->m_sb.sb_dblocks ||
427469 refc->pe_len >= mp->m_sb.sb_agblocks ||
428
- (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)) {
429
- /*
430
- * This will pull the CUI from the AIL and
431
- * free the memory associated with it.
432
- */
433
- set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
434
- xfs_cui_release(cuip);
435
- return -EIO;
436
- }
470
+ (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS))
471
+ return -EFSCORRUPTED;
437472 }
438473
439474 /*
....@@ -444,7 +479,7 @@
444479 * transaction. Normally, any work that needs to be deferred
445480 * gets attached to the same defer_ops that scheduled the
446481 * refcount update. However, we're in log recovery here, so we
447
- * we use the passed in defer_ops and to finish up any work that
482
+ * use the passed in defer_ops and to finish up any work that
448483 * doesn't fit. We need to reserve enough blocks to handle a
449484 * full btree split on either end of the refcount range.
450485 */
....@@ -452,12 +487,7 @@
452487 mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
453488 if (error)
454489 return error;
455
- /*
456
- * Recovery stashes all deferred ops during intent processing and
457
- * finishes them on completion. Transfer current dfops state to this
458
- * transaction and transfer the result back before we return.
459
- */
460
- xfs_defer_move(tp, parent_tp);
490
+
461491 cudp = xfs_trans_get_cud(tp, cuip);
462492
463493 for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
....@@ -471,6 +501,7 @@
471501 type = refc_type;
472502 break;
473503 default:
504
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
474505 error = -EFSCORRUPTED;
475506 goto abort_error;
476507 }
....@@ -490,39 +521,174 @@
490521 irec.br_blockcount = new_len;
491522 switch (type) {
492523 case XFS_REFCOUNT_INCREASE:
493
- error = xfs_refcount_increase_extent(tp, &irec);
524
+ xfs_refcount_increase_extent(tp, &irec);
494525 break;
495526 case XFS_REFCOUNT_DECREASE:
496
- error = xfs_refcount_decrease_extent(tp, &irec);
527
+ xfs_refcount_decrease_extent(tp, &irec);
497528 break;
498529 case XFS_REFCOUNT_ALLOC_COW:
499
- error = xfs_refcount_alloc_cow_extent(tp,
530
+ xfs_refcount_alloc_cow_extent(tp,
500531 irec.br_startblock,
501532 irec.br_blockcount);
502533 break;
503534 case XFS_REFCOUNT_FREE_COW:
504
- error = xfs_refcount_free_cow_extent(tp,
535
+ xfs_refcount_free_cow_extent(tp,
505536 irec.br_startblock,
506537 irec.br_blockcount);
507538 break;
508539 default:
509540 ASSERT(0);
510541 }
511
- if (error)
512
- goto abort_error;
513542 requeue_only = true;
514543 }
515544 }
516545
517546 xfs_refcount_finish_one_cleanup(tp, rcur, error);
518
- set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
519
- xfs_defer_move(parent_tp, tp);
520
- error = xfs_trans_commit(tp);
521
- return error;
547
+ return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
522548
523549 abort_error:
524550 xfs_refcount_finish_one_cleanup(tp, rcur, error);
525
- xfs_defer_move(parent_tp, tp);
526551 xfs_trans_cancel(tp);
527552 return error;
528553 }
554
+
555
+STATIC bool
556
+xfs_cui_item_match(
557
+ struct xfs_log_item *lip,
558
+ uint64_t intent_id)
559
+{
560
+ return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
561
+}
562
+
563
+/* Relog an intent item to push the log tail forward. */
564
+static struct xfs_log_item *
565
+xfs_cui_item_relog(
566
+ struct xfs_log_item *intent,
567
+ struct xfs_trans *tp)
568
+{
569
+ struct xfs_cud_log_item *cudp;
570
+ struct xfs_cui_log_item *cuip;
571
+ struct xfs_phys_extent *extp;
572
+ unsigned int count;
573
+
574
+ count = CUI_ITEM(intent)->cui_format.cui_nextents;
575
+ extp = CUI_ITEM(intent)->cui_format.cui_extents;
576
+
577
+ tp->t_flags |= XFS_TRANS_DIRTY;
578
+ cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
579
+ set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
580
+
581
+ cuip = xfs_cui_init(tp->t_mountp, count);
582
+ memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
583
+ atomic_set(&cuip->cui_next_extent, count);
584
+ xfs_trans_add_item(tp, &cuip->cui_item);
585
+ set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
586
+ return &cuip->cui_item;
587
+}
588
+
589
+static const struct xfs_item_ops xfs_cui_item_ops = {
590
+ .iop_size = xfs_cui_item_size,
591
+ .iop_format = xfs_cui_item_format,
592
+ .iop_unpin = xfs_cui_item_unpin,
593
+ .iop_release = xfs_cui_item_release,
594
+ .iop_recover = xfs_cui_item_recover,
595
+ .iop_match = xfs_cui_item_match,
596
+ .iop_relog = xfs_cui_item_relog,
597
+};
598
+
599
+/*
600
+ * Copy an CUI format buffer from the given buf, and into the destination
601
+ * CUI format structure. The CUI/CUD items were designed not to need any
602
+ * special alignment handling.
603
+ */
604
+static int
605
+xfs_cui_copy_format(
606
+ struct xfs_log_iovec *buf,
607
+ struct xfs_cui_log_format *dst_cui_fmt)
608
+{
609
+ struct xfs_cui_log_format *src_cui_fmt;
610
+ uint len;
611
+
612
+ src_cui_fmt = buf->i_addr;
613
+ len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
614
+
615
+ if (buf->i_len == len) {
616
+ memcpy(dst_cui_fmt, src_cui_fmt, len);
617
+ return 0;
618
+ }
619
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
620
+ return -EFSCORRUPTED;
621
+}
622
+
623
+/*
624
+ * This routine is called to create an in-core extent refcount update
625
+ * item from the cui format structure which was logged on disk.
626
+ * It allocates an in-core cui, copies the extents from the format
627
+ * structure into it, and adds the cui to the AIL with the given
628
+ * LSN.
629
+ */
630
+STATIC int
631
+xlog_recover_cui_commit_pass2(
632
+ struct xlog *log,
633
+ struct list_head *buffer_list,
634
+ struct xlog_recover_item *item,
635
+ xfs_lsn_t lsn)
636
+{
637
+ int error;
638
+ struct xfs_mount *mp = log->l_mp;
639
+ struct xfs_cui_log_item *cuip;
640
+ struct xfs_cui_log_format *cui_formatp;
641
+
642
+ cui_formatp = item->ri_buf[0].i_addr;
643
+
644
+ cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
645
+ error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
646
+ if (error) {
647
+ xfs_cui_item_free(cuip);
648
+ return error;
649
+ }
650
+ atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
651
+ /*
652
+ * Insert the intent into the AIL directly and drop one reference so
653
+ * that finishing or canceling the work will drop the other.
654
+ */
655
+ xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
656
+ xfs_cui_release(cuip);
657
+ return 0;
658
+}
659
+
660
+const struct xlog_recover_item_ops xlog_cui_item_ops = {
661
+ .item_type = XFS_LI_CUI,
662
+ .commit_pass2 = xlog_recover_cui_commit_pass2,
663
+};
664
+
665
+/*
666
+ * This routine is called when an CUD format structure is found in a committed
667
+ * transaction in the log. Its purpose is to cancel the corresponding CUI if it
668
+ * was still in the log. To do this it searches the AIL for the CUI with an id
669
+ * equal to that in the CUD format structure. If we find it we drop the CUD
670
+ * reference, which removes the CUI from the AIL and frees it.
671
+ */
672
+STATIC int
673
+xlog_recover_cud_commit_pass2(
674
+ struct xlog *log,
675
+ struct list_head *buffer_list,
676
+ struct xlog_recover_item *item,
677
+ xfs_lsn_t lsn)
678
+{
679
+ struct xfs_cud_log_format *cud_formatp;
680
+
681
+ cud_formatp = item->ri_buf[0].i_addr;
682
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
683
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
684
+ return -EFSCORRUPTED;
685
+ }
686
+
687
+ xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
688
+ return 0;
689
+}
690
+
691
+const struct xlog_recover_item_ops xlog_cud_item_ops = {
692
+ .item_type = XFS_LI_CUD,
693
+ .commit_pass2 = xlog_recover_cud_commit_pass2,
694
+};