.. | .. |
---|
9 | 9 | #include "xfs_format.h" |
---|
10 | 10 | #include "xfs_log_format.h" |
---|
11 | 11 | #include "xfs_trans_resv.h" |
---|
12 | | -#include "xfs_bit.h" |
---|
13 | 12 | #include "xfs_sb.h" |
---|
14 | 13 | #include "xfs_mount.h" |
---|
15 | | -#include "xfs_defer.h" |
---|
16 | | -#include "xfs_inode.h" |
---|
17 | 14 | #include "xfs_trans.h" |
---|
18 | 15 | #include "xfs_alloc.h" |
---|
19 | 16 | #include "xfs_btree.h" |
---|
| 17 | +#include "xfs_btree_staging.h" |
---|
20 | 18 | #include "xfs_rmap.h" |
---|
21 | 19 | #include "xfs_rmap_btree.h" |
---|
22 | 20 | #include "xfs_trace.h" |
---|
23 | | -#include "xfs_cksum.h" |
---|
24 | 21 | #include "xfs_error.h" |
---|
25 | 22 | #include "xfs_extent_busy.h" |
---|
26 | 23 | #include "xfs_ag_resv.h" |
---|
.. | .. |
---|
55 | 52 | struct xfs_btree_cur *cur) |
---|
56 | 53 | { |
---|
57 | 54 | return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp, |
---|
58 | | - cur->bc_private.a.agbp, cur->bc_private.a.agno); |
---|
| 55 | + cur->bc_ag.agbp, cur->bc_ag.agno); |
---|
59 | 56 | } |
---|
60 | 57 | |
---|
61 | 58 | STATIC void |
---|
.. | .. |
---|
64 | 61 | union xfs_btree_ptr *ptr, |
---|
65 | 62 | int inc) |
---|
66 | 63 | { |
---|
67 | | - struct xfs_buf *agbp = cur->bc_private.a.agbp; |
---|
68 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
69 | | - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); |
---|
| 64 | + struct xfs_buf *agbp = cur->bc_ag.agbp; |
---|
| 65 | + struct xfs_agf *agf = agbp->b_addr; |
---|
70 | 66 | int btnum = cur->bc_btnum; |
---|
71 | | - struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); |
---|
| 67 | + struct xfs_perag *pag = agbp->b_pag; |
---|
72 | 68 | |
---|
73 | 69 | ASSERT(ptr->s != 0); |
---|
74 | 70 | |
---|
75 | 71 | agf->agf_roots[btnum] = ptr->s; |
---|
76 | 72 | be32_add_cpu(&agf->agf_levels[btnum], inc); |
---|
77 | 73 | pag->pagf_levels[btnum] += inc; |
---|
78 | | - xfs_perag_put(pag); |
---|
79 | 74 | |
---|
80 | 75 | xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); |
---|
81 | 76 | } |
---|
.. | .. |
---|
87 | 82 | union xfs_btree_ptr *new, |
---|
88 | 83 | int *stat) |
---|
89 | 84 | { |
---|
90 | | - struct xfs_buf *agbp = cur->bc_private.a.agbp; |
---|
91 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
| 85 | + struct xfs_buf *agbp = cur->bc_ag.agbp; |
---|
| 86 | + struct xfs_agf *agf = agbp->b_addr; |
---|
92 | 87 | int error; |
---|
93 | 88 | xfs_agblock_t bno; |
---|
94 | 89 | |
---|
95 | 90 | /* Allocate the new block from the freelist. If we can't, give up. */ |
---|
96 | | - error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, |
---|
| 91 | + error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp, |
---|
97 | 92 | &bno, 1); |
---|
98 | 93 | if (error) |
---|
99 | 94 | return error; |
---|
100 | 95 | |
---|
101 | | - trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno, |
---|
| 96 | + trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_ag.agno, |
---|
102 | 97 | bno, 1); |
---|
103 | 98 | if (bno == NULLAGBLOCK) { |
---|
104 | 99 | *stat = 0; |
---|
105 | 100 | return 0; |
---|
106 | 101 | } |
---|
107 | 102 | |
---|
108 | | - xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, |
---|
| 103 | + xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, |
---|
109 | 104 | false); |
---|
110 | 105 | |
---|
111 | 106 | xfs_trans_agbtree_delta(cur->bc_tp, 1); |
---|
.. | .. |
---|
113 | 108 | be32_add_cpu(&agf->agf_rmap_blocks, 1); |
---|
114 | 109 | xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); |
---|
115 | 110 | |
---|
116 | | - xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_private.a.agno); |
---|
| 111 | + xfs_ag_resv_rmapbt_alloc(cur->bc_mp, cur->bc_ag.agno); |
---|
117 | 112 | |
---|
118 | 113 | *stat = 1; |
---|
119 | 114 | return 0; |
---|
.. | .. |
---|
124 | 119 | struct xfs_btree_cur *cur, |
---|
125 | 120 | struct xfs_buf *bp) |
---|
126 | 121 | { |
---|
127 | | - struct xfs_buf *agbp = cur->bc_private.a.agbp; |
---|
128 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
| 122 | + struct xfs_buf *agbp = cur->bc_ag.agbp; |
---|
| 123 | + struct xfs_agf *agf = agbp->b_addr; |
---|
| 124 | + struct xfs_perag *pag; |
---|
129 | 125 | xfs_agblock_t bno; |
---|
130 | 126 | int error; |
---|
131 | 127 | |
---|
132 | 128 | bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); |
---|
133 | | - trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, |
---|
| 129 | + trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_ag.agno, |
---|
134 | 130 | bno, 1); |
---|
135 | 131 | be32_add_cpu(&agf->agf_rmap_blocks, -1); |
---|
136 | 132 | xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); |
---|
.. | .. |
---|
142 | 138 | XFS_EXTENT_BUSY_SKIP_DISCARD); |
---|
143 | 139 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
---|
144 | 140 | |
---|
145 | | - xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_private.a.agno); |
---|
146 | | - |
---|
| 141 | + pag = cur->bc_ag.agbp->b_pag; |
---|
| 142 | + xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1); |
---|
147 | 143 | return 0; |
---|
148 | 144 | } |
---|
149 | 145 | |
---|
.. | .. |
---|
219 | 215 | struct xfs_btree_cur *cur, |
---|
220 | 216 | union xfs_btree_ptr *ptr) |
---|
221 | 217 | { |
---|
222 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
---|
| 218 | + struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; |
---|
223 | 219 | |
---|
224 | | - ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); |
---|
| 220 | + ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno)); |
---|
225 | 221 | |
---|
226 | 222 | ptr->s = agf->agf_roots[cur->bc_btnum]; |
---|
227 | 223 | } |
---|
.. | .. |
---|
292 | 288 | xfs_rmapbt_verify( |
---|
293 | 289 | struct xfs_buf *bp) |
---|
294 | 290 | { |
---|
295 | | - struct xfs_mount *mp = bp->b_target->bt_mount; |
---|
| 291 | + struct xfs_mount *mp = bp->b_mount; |
---|
296 | 292 | struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); |
---|
297 | 293 | struct xfs_perag *pag = bp->b_pag; |
---|
298 | 294 | xfs_failaddr_t fa; |
---|
.. | .. |
---|
310 | 306 | * from the on disk AGF. Again, we can only check against maximum limits |
---|
311 | 307 | * in this case. |
---|
312 | 308 | */ |
---|
313 | | - if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC)) |
---|
| 309 | + if (!xfs_verify_magic(bp, block->bb_magic)) |
---|
314 | 310 | return __this_address; |
---|
315 | 311 | |
---|
316 | 312 | if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) |
---|
.. | .. |
---|
365 | 361 | |
---|
366 | 362 | const struct xfs_buf_ops xfs_rmapbt_buf_ops = { |
---|
367 | 363 | .name = "xfs_rmapbt", |
---|
| 364 | + .magic = { 0, cpu_to_be32(XFS_RMAP_CRC_MAGIC) }, |
---|
368 | 365 | .verify_read = xfs_rmapbt_read_verify, |
---|
369 | 366 | .verify_write = xfs_rmapbt_write_verify, |
---|
370 | 367 | .verify_struct = xfs_rmapbt_verify, |
---|
.. | .. |
---|
451 | 448 | .recs_inorder = xfs_rmapbt_recs_inorder, |
---|
452 | 449 | }; |
---|
453 | 450 | |
---|
454 | | -/* |
---|
455 | | - * Allocate a new allocation btree cursor. |
---|
456 | | - */ |
---|
| 451 | +static struct xfs_btree_cur * |
---|
| 452 | +xfs_rmapbt_init_common( |
---|
| 453 | + struct xfs_mount *mp, |
---|
| 454 | + struct xfs_trans *tp, |
---|
| 455 | + xfs_agnumber_t agno) |
---|
| 456 | +{ |
---|
| 457 | + struct xfs_btree_cur *cur; |
---|
| 458 | + |
---|
| 459 | + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); |
---|
| 460 | + cur->bc_tp = tp; |
---|
| 461 | + cur->bc_mp = mp; |
---|
| 462 | + /* Overlapping btree; 2 keys per pointer. */ |
---|
| 463 | + cur->bc_btnum = XFS_BTNUM_RMAP; |
---|
| 464 | + cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING; |
---|
| 465 | + cur->bc_blocklog = mp->m_sb.sb_blocklog; |
---|
| 466 | + cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); |
---|
| 467 | + cur->bc_ag.agno = agno; |
---|
| 468 | + cur->bc_ops = &xfs_rmapbt_ops; |
---|
| 469 | + |
---|
| 470 | + return cur; |
---|
| 471 | +} |
---|
| 472 | + |
---|
| 473 | +/* Create a new reverse mapping btree cursor. */ |
---|
457 | 474 | struct xfs_btree_cur * |
---|
458 | 475 | xfs_rmapbt_init_cursor( |
---|
459 | 476 | struct xfs_mount *mp, |
---|
.. | .. |
---|
461 | 478 | struct xfs_buf *agbp, |
---|
462 | 479 | xfs_agnumber_t agno) |
---|
463 | 480 | { |
---|
464 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
| 481 | + struct xfs_agf *agf = agbp->b_addr; |
---|
465 | 482 | struct xfs_btree_cur *cur; |
---|
466 | 483 | |
---|
467 | | - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); |
---|
468 | | - cur->bc_tp = tp; |
---|
469 | | - cur->bc_mp = mp; |
---|
470 | | - /* Overlapping btree; 2 keys per pointer. */ |
---|
471 | | - cur->bc_btnum = XFS_BTNUM_RMAP; |
---|
472 | | - cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING; |
---|
473 | | - cur->bc_blocklog = mp->m_sb.sb_blocklog; |
---|
474 | | - cur->bc_ops = &xfs_rmapbt_ops; |
---|
| 484 | + cur = xfs_rmapbt_init_common(mp, tp, agno); |
---|
475 | 485 | cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]); |
---|
476 | | - cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2); |
---|
477 | | - |
---|
478 | | - cur->bc_private.a.agbp = agbp; |
---|
479 | | - cur->bc_private.a.agno = agno; |
---|
480 | | - |
---|
| 486 | + cur->bc_ag.agbp = agbp; |
---|
481 | 487 | return cur; |
---|
| 488 | +} |
---|
| 489 | + |
---|
| 490 | +/* Create a new reverse mapping btree cursor with a fake root for staging. */ |
---|
| 491 | +struct xfs_btree_cur * |
---|
| 492 | +xfs_rmapbt_stage_cursor( |
---|
| 493 | + struct xfs_mount *mp, |
---|
| 494 | + struct xbtree_afakeroot *afake, |
---|
| 495 | + xfs_agnumber_t agno) |
---|
| 496 | +{ |
---|
| 497 | + struct xfs_btree_cur *cur; |
---|
| 498 | + |
---|
| 499 | + cur = xfs_rmapbt_init_common(mp, NULL, agno); |
---|
| 500 | + xfs_btree_stage_afakeroot(cur, afake); |
---|
| 501 | + return cur; |
---|
| 502 | +} |
---|
| 503 | + |
---|
| 504 | +/* |
---|
| 505 | + * Install a new reverse mapping btree root. Caller is responsible for |
---|
| 506 | + * invalidating and freeing the old btree blocks. |
---|
| 507 | + */ |
---|
| 508 | +void |
---|
| 509 | +xfs_rmapbt_commit_staged_btree( |
---|
| 510 | + struct xfs_btree_cur *cur, |
---|
| 511 | + struct xfs_trans *tp, |
---|
| 512 | + struct xfs_buf *agbp) |
---|
| 513 | +{ |
---|
| 514 | + struct xfs_agf *agf = agbp->b_addr; |
---|
| 515 | + struct xbtree_afakeroot *afake = cur->bc_ag.afake; |
---|
| 516 | + |
---|
| 517 | + ASSERT(cur->bc_flags & XFS_BTREE_STAGING); |
---|
| 518 | + |
---|
| 519 | + agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root); |
---|
| 520 | + agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels); |
---|
| 521 | + agf->agf_rmap_blocks = cpu_to_be32(afake->af_blocks); |
---|
| 522 | + xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS | |
---|
| 523 | + XFS_AGF_RMAP_BLOCKS); |
---|
| 524 | + xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_rmapbt_ops); |
---|
482 | 525 | } |
---|
483 | 526 | |
---|
484 | 527 | /* |
---|
.. | .. |
---|
572 | 615 | if (error) |
---|
573 | 616 | return error; |
---|
574 | 617 | |
---|
575 | | - agf = XFS_BUF_TO_AGF(agbp); |
---|
| 618 | + agf = agbp->b_addr; |
---|
576 | 619 | agblocks = be32_to_cpu(agf->agf_length); |
---|
577 | 620 | tree_len = be32_to_cpu(agf->agf_rmap_blocks); |
---|
578 | 621 | xfs_trans_brelse(tp, agbp); |
---|
579 | 622 | |
---|
| 623 | + /* |
---|
| 624 | + * The log is permanently allocated, so the space it occupies will |
---|
| 625 | + * never be available for the kinds of things that would require btree |
---|
| 626 | + * expansion. We therefore can pretend the space isn't there. |
---|
| 627 | + */ |
---|
| 628 | + if (mp->m_sb.sb_logstart && |
---|
| 629 | + XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno) |
---|
| 630 | + agblocks -= mp->m_sb.sb_logblocks; |
---|
| 631 | + |
---|
580 | 632 | /* Reserve 1% of the AG or enough for 1 block per record. */ |
---|
581 | 633 | *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks)); |
---|
582 | 634 | *used += tree_len; |
---|