hc
2024-05-10 10ebd8556b7990499c896a550e3d416b444211e6
kernel/fs/xfs/libxfs/xfs_rmap_btree.c
....@@ -9,18 +9,15 @@
99 #include "xfs_format.h"
1010 #include "xfs_log_format.h"
1111 #include "xfs_trans_resv.h"
12
-#include "xfs_bit.h"
1312 #include "xfs_sb.h"
1413 #include "xfs_mount.h"
15
-#include "xfs_defer.h"
16
-#include "xfs_inode.h"
1714 #include "xfs_trans.h"
1815 #include "xfs_alloc.h"
1916 #include "xfs_btree.h"
17
+#include "xfs_btree_staging.h"
2018 #include "xfs_rmap.h"
2119 #include "xfs_rmap_btree.h"
2220 #include "xfs_trace.h"
23
-#include "xfs_cksum.h"
2421 #include "xfs_error.h"
2522 #include "xfs_extent_busy.h"
2623 #include "xfs_ag_resv.h"
....@@ -55,7 +52,7 @@
5552 struct xfs_btree_cur *cur)
5653 {
5754 return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
58
- cur->bc_private.a.agbp, cur->bc_private.a.agno);
55
+ cur->bc_ag.agbp, cur->bc_ag.agno);
5956 }
6057
6158 STATIC void
....@@ -64,18 +61,16 @@
6461 union xfs_btree_ptr *ptr,
6562 int inc)
6663 {
67
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
68
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
69
- xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
64
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
65
+ struct xfs_agf *agf = agbp->b_addr;
7066 int btnum = cur->bc_btnum;
71
- struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
67
+ struct xfs_perag *pag = agbp->b_pag;
7268
7369 ASSERT(ptr->s != 0);
7470
7571 agf->agf_roots[btnum] = ptr->s;
7672 be32_add_cpu(&agf->agf_levels[btnum], inc);
7773 pag->pagf_levels[btnum] += inc;
78
- xfs_perag_put(pag);
7974
8075 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
8176 }
....@@ -87,25 +82,25 @@
8782 union xfs_btree_ptr *new,
8883 int *stat)
8984 {
90
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
91
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
85
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
86
+ struct xfs_agf *agf = agbp->b_addr;
9287 int error;
9388 xfs_agblock_t bno;
9489
9590 /* Allocate the new block from the freelist. If we can't, give up. */
96
- error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
91
+ error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp,
9792 &bno, 1);
9893 if (error)
9994 return error;
10095
101
- trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
96
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
10297 bno, 1);
10398 if (bno == NULLAGBLOCK) {
10499 *stat = 0;
105100 return 0;
106101 }
107102
108
- xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
103
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1,
109104 false);
110105
111106 xfs_trans_agbtree_delta(cur->bc_tp, 1);
....@@ -113,7 +108,7 @@
113108 be32_add_cpu(&agf->agf_rmap_blocks, 1);
114109 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
115110
116
- xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_private.a.agno);
111
+ xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno);
117112
118113 *stat = 1;
119114 return 0;
....@@ -124,13 +119,14 @@
124119 struct xfs_btree_cur *cur,
125120 struct xfs_buf *bp)
126121 {
127
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
128
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
122
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
123
+ struct xfs_agf *agf = agbp->b_addr;
124
+ struct xfs_perag *pag;
129125 xfs_agblock_t bno;
130126 int error;
131127
132128 bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
133
- trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
129
+ trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno,
134130 bno, 1);
135131 be32_add_cpu(&agf->agf_rmap_blocks, -1);
136132 xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS);
....@@ -142,8 +138,8 @@
142138 XFS_EXTENT_BUSY_SKIP_DISCARD);
143139 xfs_trans_agbtree_delta(cur->bc_tp, -1);
144140
145
- xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_private.a.agno);
146
-
141
+ pag = cur->bc_ag.agbp->b_pag;
142
+ xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1);
147143 return 0;
148144 }
149145
....@@ -219,9 +215,9 @@
219215 struct xfs_btree_cur *cur,
220216 union xfs_btree_ptr *ptr)
221217 {
222
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
218
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
223219
224
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
220
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
225221
226222 ptr->s = agf->agf_roots[cur->bc_btnum];
227223 }
....@@ -292,7 +288,7 @@
292288 xfs_rmapbt_verify(
293289 struct xfs_buf *bp)
294290 {
295
- struct xfs_mount *mp = bp->b_target->bt_mount;
291
+ struct xfs_mount *mp = bp->b_mount;
296292 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
297293 struct xfs_perag *pag = bp->b_pag;
298294 xfs_failaddr_t fa;
....@@ -310,7 +306,7 @@
310306 * from the on disk AGF. Again, we can only check against maximum limits
311307 * in this case.
312308 */
313
- if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
309
+ if (!xfs_verify_magic(bp, block->bb_magic))
314310 return __this_address;
315311
316312 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
....@@ -365,6 +361,7 @@
365361
366362 const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
367363 .name = "xfs_rmapbt",
364
+ .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) },
368365 .verify_read = xfs_rmapbt_read_verify,
369366 .verify_write = xfs_rmapbt_write_verify,
370367 .verify_struct = xfs_rmapbt_verify,
....@@ -451,9 +448,29 @@
451448 .recs_inorder = xfs_rmapbt_recs_inorder,
452449 };
453450
454
-/*
455
- * Allocate a new allocation btree cursor.
456
- */
451
+static struct xfs_btree_cur *
452
+xfs_rmapbt_init_common(
453
+ struct xfs_mount *mp,
454
+ struct xfs_trans *tp,
455
+ xfs_agnumber_t agno)
456
+{
457
+ struct xfs_btree_cur *cur;
458
+
459
+ cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL);
460
+ cur->bc_tp = tp;
461
+ cur->bc_mp = mp;
462
+ /* Overlapping btree; 2 keys per pointer. */
463
+ cur->bc_btnum = XFS_BTNUM_RMAP;
464
+ cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
465
+ cur->bc_blocklog = mp->m_sb.sb_blocklog;
466
+ cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
467
+ cur->bc_ag.agno = agno;
468
+ cur->bc_ops = &xfs_rmapbt_ops;
469
+
470
+ return cur;
471
+}
472
+
473
+/* Create a new reverse mapping btree cursor. */
457474 struct xfs_btree_cur *
458475 xfs_rmapbt_init_cursor(
459476 struct xfs_mount *mp,
....@@ -461,24 +478,50 @@
461478 struct xfs_buf *agbp,
462479 xfs_agnumber_t agno)
463480 {
464
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
481
+ struct xfs_agf *agf = agbp->b_addr;
465482 struct xfs_btree_cur *cur;
466483
467
- cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
468
- cur->bc_tp = tp;
469
- cur->bc_mp = mp;
470
- /* Overlapping btree; 2 keys per pointer. */
471
- cur->bc_btnum = XFS_BTNUM_RMAP;
472
- cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
473
- cur->bc_blocklog = mp->m_sb.sb_blocklog;
474
- cur->bc_ops = &xfs_rmapbt_ops;
484
+ cur = xfs_rmapbt_init_common(mp, tp, agno);
475485 cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
476
- cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
477
-
478
- cur->bc_private.a.agbp = agbp;
479
- cur->bc_private.a.agno = agno;
480
-
486
+ cur->bc_ag.agbp = agbp;
481487 return cur;
488
+}
489
+
490
+/* Create a new reverse mapping btree cursor with a fake root for staging. */
491
+struct xfs_btree_cur *
492
+xfs_rmapbt_stage_cursor(
493
+ struct xfs_mount *mp,
494
+ struct xbtree_afakeroot *afake,
495
+ xfs_agnumber_t agno)
496
+{
497
+ struct xfs_btree_cur *cur;
498
+
499
+ cur = xfs_rmapbt_init_common(mp, NULL, agno);
500
+ xfs_btree_stage_afakeroot(cur, afake);
501
+ return cur;
502
+}
503
+
504
+/*
505
+ * Install a new reverse mapping btree root. Caller is responsible for
506
+ * invalidating and freeing the old btree blocks.
507
+ */
508
+void
509
+xfs_rmapbt_commit_staged_btree(
510
+ struct xfs_btree_cur *cur,
511
+ struct xfs_trans *tp,
512
+ struct xfs_buf *agbp)
513
+{
514
+ struct xfs_agf *agf = agbp->b_addr;
515
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
516
+
517
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
518
+
519
+ agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root);
520
+ agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels);
521
+ agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks);
522
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS |
523
+ XFS_AGF_RMAP_BLOCKS);
524
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops);
482525 }
483526
484527 /*
....@@ -572,11 +615,20 @@
572615 if (error)
573616 return error;
574617
575
- agf = XFS_BUF_TO_AGF(agbp);
618
+ agf = agbp->b_addr;
576619 agblocks = be32_to_cpu(agf->agf_length);
577620 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
578621 xfs_trans_brelse(tp, agbp);
579622
623
+ /*
624
+ * The log is permanently allocated, so the space it occupies will
625
+ * never be available for the kinds of things that would require btree
626
+ * expansion. We therefore can pretend the space isn't there.
627
+ */
628
+ if (mp->m_sb.sb_logstart &&
629
+ XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
630
+ agblocks -= mp->m_sb.sb_logblocks;
631
+
580632 /* Reserve 1% of the AG or enough for 1 block per record. */
581633 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
582634 *used += tree_len;