.. | .. |
---|
12 | 12 | #include "xfs_sb.h" |
---|
13 | 13 | #include "xfs_mount.h" |
---|
14 | 14 | #include "xfs_btree.h" |
---|
| 15 | +#include "xfs_btree_staging.h" |
---|
15 | 16 | #include "xfs_alloc_btree.h" |
---|
16 | 17 | #include "xfs_alloc.h" |
---|
17 | 18 | #include "xfs_extent_busy.h" |
---|
18 | 19 | #include "xfs_error.h" |
---|
19 | 20 | #include "xfs_trace.h" |
---|
20 | | -#include "xfs_cksum.h" |
---|
21 | 21 | #include "xfs_trans.h" |
---|
22 | 22 | |
---|
23 | 23 | |
---|
.. | .. |
---|
26 | 26 | struct xfs_btree_cur *cur) |
---|
27 | 27 | { |
---|
28 | 28 | return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, |
---|
29 | | - cur->bc_private.a.agbp, cur->bc_private.a.agno, |
---|
| 29 | + cur->bc_ag.agbp, cur->bc_ag.agno, |
---|
30 | 30 | cur->bc_btnum); |
---|
31 | 31 | } |
---|
32 | 32 | |
---|
.. | .. |
---|
36 | 36 | union xfs_btree_ptr *ptr, |
---|
37 | 37 | int inc) |
---|
38 | 38 | { |
---|
39 | | - struct xfs_buf *agbp = cur->bc_private.a.agbp; |
---|
40 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
41 | | - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); |
---|
| 39 | + struct xfs_buf *agbp = cur->bc_ag.agbp; |
---|
| 40 | + struct xfs_agf *agf = agbp->b_addr; |
---|
42 | 41 | int btnum = cur->bc_btnum; |
---|
43 | | - struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); |
---|
| 42 | + struct xfs_perag *pag = agbp->b_pag; |
---|
44 | 43 | |
---|
45 | 44 | ASSERT(ptr->s != 0); |
---|
46 | 45 | |
---|
47 | 46 | agf->agf_roots[btnum] = ptr->s; |
---|
48 | 47 | be32_add_cpu(&agf->agf_levels[btnum], inc); |
---|
49 | 48 | pag->pagf_levels[btnum] += inc; |
---|
50 | | - xfs_perag_put(pag); |
---|
51 | 49 | |
---|
52 | 50 | xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); |
---|
53 | 51 | } |
---|
.. | .. |
---|
63 | 61 | xfs_agblock_t bno; |
---|
64 | 62 | |
---|
65 | 63 | /* Allocate the new block from the freelist. If we can't, give up. */ |
---|
66 | | - error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, |
---|
| 64 | + error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_ag.agbp, |
---|
67 | 65 | &bno, 1); |
---|
68 | 66 | if (error) |
---|
69 | 67 | return error; |
---|
.. | .. |
---|
73 | 71 | return 0; |
---|
74 | 72 | } |
---|
75 | 73 | |
---|
76 | | - xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1, false); |
---|
| 74 | + xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false); |
---|
77 | 75 | |
---|
78 | 76 | xfs_trans_agbtree_delta(cur->bc_tp, 1); |
---|
79 | 77 | new->s = cpu_to_be32(bno); |
---|
.. | .. |
---|
87 | 85 | struct xfs_btree_cur *cur, |
---|
88 | 86 | struct xfs_buf *bp) |
---|
89 | 87 | { |
---|
90 | | - struct xfs_buf *agbp = cur->bc_private.a.agbp; |
---|
91 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
| 88 | + struct xfs_buf *agbp = cur->bc_ag.agbp; |
---|
| 89 | + struct xfs_agf *agf = agbp->b_addr; |
---|
92 | 90 | xfs_agblock_t bno; |
---|
93 | 91 | int error; |
---|
94 | 92 | |
---|
.. | .. |
---|
114 | 112 | int ptr, |
---|
115 | 113 | int reason) |
---|
116 | 114 | { |
---|
117 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
---|
118 | | - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); |
---|
| 115 | + struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; |
---|
119 | 116 | struct xfs_perag *pag; |
---|
120 | 117 | __be32 len; |
---|
121 | 118 | int numrecs; |
---|
.. | .. |
---|
160 | 157 | } |
---|
161 | 158 | |
---|
162 | 159 | agf->agf_longest = len; |
---|
163 | | - pag = xfs_perag_get(cur->bc_mp, seqno); |
---|
| 160 | + pag = cur->bc_ag.agbp->b_pag; |
---|
164 | 161 | pag->pagf_longest = be32_to_cpu(len); |
---|
165 | | - xfs_perag_put(pag); |
---|
166 | | - xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); |
---|
| 162 | + xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST); |
---|
167 | 163 | } |
---|
168 | 164 | |
---|
169 | 165 | STATIC int |
---|
.. | .. |
---|
227 | 223 | struct xfs_btree_cur *cur, |
---|
228 | 224 | union xfs_btree_ptr *ptr) |
---|
229 | 225 | { |
---|
230 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); |
---|
| 226 | + struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; |
---|
231 | 227 | |
---|
232 | | - ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); |
---|
| 228 | + ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno)); |
---|
233 | 229 | |
---|
234 | 230 | ptr->s = agf->agf_roots[cur->bc_btnum]; |
---|
235 | 231 | } |
---|
.. | .. |
---|
292 | 288 | xfs_allocbt_verify( |
---|
293 | 289 | struct xfs_buf *bp) |
---|
294 | 290 | { |
---|
295 | | - struct xfs_mount *mp = bp->b_target->bt_mount; |
---|
| 291 | + struct xfs_mount *mp = bp->b_mount; |
---|
296 | 292 | struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); |
---|
297 | 293 | struct xfs_perag *pag = bp->b_pag; |
---|
298 | 294 | xfs_failaddr_t fa; |
---|
299 | 295 | unsigned int level; |
---|
| 296 | + xfs_btnum_t btnum = XFS_BTNUM_BNOi; |
---|
| 297 | + |
---|
| 298 | + if (!xfs_verify_magic(bp, block->bb_magic)) |
---|
| 299 | + return __this_address; |
---|
| 300 | + |
---|
| 301 | + if (xfs_sb_version_hascrc(&mp->m_sb)) { |
---|
| 302 | + fa = xfs_btree_sblock_v5hdr_verify(bp); |
---|
| 303 | + if (fa) |
---|
| 304 | + return fa; |
---|
| 305 | + } |
---|
300 | 306 | |
---|
301 | 307 | /* |
---|
302 | | - * magic number and level verification |
---|
| 308 | + * The perag may not be attached during grow operations or fully |
---|
| 309 | + * initialized from the AGF during log recovery. Therefore we can only |
---|
| 310 | + * check against maximum tree depth from those contexts. |
---|
303 | 311 | * |
---|
304 | | - * During growfs operations, we can't verify the exact level or owner as |
---|
305 | | - * the perag is not fully initialised and hence not attached to the |
---|
306 | | - * buffer. In this case, check against the maximum tree depth. |
---|
307 | | - * |
---|
308 | | - * Similarly, during log recovery we will have a perag structure |
---|
309 | | - * attached, but the agf information will not yet have been initialised |
---|
310 | | - * from the on disk AGF. Again, we can only check against maximum limits |
---|
311 | | - * in this case. |
---|
| 312 | + * Otherwise check against the per-tree limit. Peek at one of the |
---|
| 313 | + * verifier magic values to determine the type of tree we're verifying |
---|
| 314 | + * against. |
---|
312 | 315 | */ |
---|
313 | 316 | level = be16_to_cpu(block->bb_level); |
---|
314 | | - switch (block->bb_magic) { |
---|
315 | | - case cpu_to_be32(XFS_ABTB_CRC_MAGIC): |
---|
316 | | - fa = xfs_btree_sblock_v5hdr_verify(bp); |
---|
317 | | - if (fa) |
---|
318 | | - return fa; |
---|
319 | | - /* fall through */ |
---|
320 | | - case cpu_to_be32(XFS_ABTB_MAGIC): |
---|
321 | | - if (pag && pag->pagf_init) { |
---|
322 | | - if (level >= pag->pagf_levels[XFS_BTNUM_BNOi]) |
---|
323 | | - return __this_address; |
---|
324 | | - } else if (level >= mp->m_ag_maxlevels) |
---|
| 317 | + if (bp->b_ops->magic[0] == cpu_to_be32(XFS_ABTC_MAGIC)) |
---|
| 318 | + btnum = XFS_BTNUM_CNTi; |
---|
| 319 | + if (pag && pag->pagf_init) { |
---|
| 320 | + if (level >= pag->pagf_levels[btnum]) |
---|
325 | 321 | return __this_address; |
---|
326 | | - break; |
---|
327 | | - case cpu_to_be32(XFS_ABTC_CRC_MAGIC): |
---|
328 | | - fa = xfs_btree_sblock_v5hdr_verify(bp); |
---|
329 | | - if (fa) |
---|
330 | | - return fa; |
---|
331 | | - /* fall through */ |
---|
332 | | - case cpu_to_be32(XFS_ABTC_MAGIC): |
---|
333 | | - if (pag && pag->pagf_init) { |
---|
334 | | - if (level >= pag->pagf_levels[XFS_BTNUM_CNTi]) |
---|
335 | | - return __this_address; |
---|
336 | | - } else if (level >= mp->m_ag_maxlevels) |
---|
337 | | - return __this_address; |
---|
338 | | - break; |
---|
339 | | - default: |
---|
| 322 | + } else if (level >= mp->m_ag_maxlevels) |
---|
340 | 323 | return __this_address; |
---|
341 | | - } |
---|
342 | 324 | |
---|
343 | 325 | return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]); |
---|
344 | 326 | } |
---|
.. | .. |
---|
377 | 359 | |
---|
378 | 360 | } |
---|
379 | 361 | |
---|
380 | | -const struct xfs_buf_ops xfs_allocbt_buf_ops = { |
---|
381 | | - .name = "xfs_allocbt", |
---|
| 362 | +const struct xfs_buf_ops xfs_bnobt_buf_ops = { |
---|
| 363 | + .name = "xfs_bnobt", |
---|
| 364 | + .magic = { cpu_to_be32(XFS_ABTB_MAGIC), |
---|
| 365 | + cpu_to_be32(XFS_ABTB_CRC_MAGIC) }, |
---|
382 | 366 | .verify_read = xfs_allocbt_read_verify, |
---|
383 | 367 | .verify_write = xfs_allocbt_write_verify, |
---|
384 | 368 | .verify_struct = xfs_allocbt_verify, |
---|
385 | 369 | }; |
---|
386 | 370 | |
---|
| 371 | +const struct xfs_buf_ops xfs_cntbt_buf_ops = { |
---|
| 372 | + .name = "xfs_cntbt", |
---|
| 373 | + .magic = { cpu_to_be32(XFS_ABTC_MAGIC), |
---|
| 374 | + cpu_to_be32(XFS_ABTC_CRC_MAGIC) }, |
---|
| 375 | + .verify_read = xfs_allocbt_read_verify, |
---|
| 376 | + .verify_write = xfs_allocbt_write_verify, |
---|
| 377 | + .verify_struct = xfs_allocbt_verify, |
---|
| 378 | +}; |
---|
387 | 379 | |
---|
388 | 380 | STATIC int |
---|
389 | 381 | xfs_bnobt_keys_inorder( |
---|
.. | .. |
---|
448 | 440 | .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, |
---|
449 | 441 | .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, |
---|
450 | 442 | .key_diff = xfs_bnobt_key_diff, |
---|
451 | | - .buf_ops = &xfs_allocbt_buf_ops, |
---|
| 443 | + .buf_ops = &xfs_bnobt_buf_ops, |
---|
452 | 444 | .diff_two_keys = xfs_bnobt_diff_two_keys, |
---|
453 | 445 | .keys_inorder = xfs_bnobt_keys_inorder, |
---|
454 | 446 | .recs_inorder = xfs_bnobt_recs_inorder, |
---|
.. | .. |
---|
470 | 462 | .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, |
---|
471 | 463 | .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, |
---|
472 | 464 | .key_diff = xfs_cntbt_key_diff, |
---|
473 | | - .buf_ops = &xfs_allocbt_buf_ops, |
---|
| 465 | + .buf_ops = &xfs_cntbt_buf_ops, |
---|
474 | 466 | .diff_two_keys = xfs_cntbt_diff_two_keys, |
---|
475 | 467 | .keys_inorder = xfs_cntbt_keys_inorder, |
---|
476 | 468 | .recs_inorder = xfs_cntbt_recs_inorder, |
---|
477 | 469 | }; |
---|
| 470 | + |
---|
| 471 | +/* Allocate most of a new allocation btree cursor. */ |
---|
| 472 | +STATIC struct xfs_btree_cur * |
---|
| 473 | +xfs_allocbt_init_common( |
---|
| 474 | + struct xfs_mount *mp, |
---|
| 475 | + struct xfs_trans *tp, |
---|
| 476 | + xfs_agnumber_t agno, |
---|
| 477 | + xfs_btnum_t btnum) |
---|
| 478 | +{ |
---|
| 479 | + struct xfs_btree_cur *cur; |
---|
| 480 | + |
---|
| 481 | + ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); |
---|
| 482 | + |
---|
| 483 | + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); |
---|
| 484 | + |
---|
| 485 | + cur->bc_tp = tp; |
---|
| 486 | + cur->bc_mp = mp; |
---|
| 487 | + cur->bc_btnum = btnum; |
---|
| 488 | + cur->bc_blocklog = mp->m_sb.sb_blocklog; |
---|
| 489 | + |
---|
| 490 | + if (btnum == XFS_BTNUM_CNT) { |
---|
| 491 | + cur->bc_ops = &xfs_cntbt_ops; |
---|
| 492 | + cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); |
---|
| 493 | + cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; |
---|
| 494 | + } else { |
---|
| 495 | + cur->bc_ops = &xfs_bnobt_ops; |
---|
| 496 | + cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2); |
---|
| 497 | + } |
---|
| 498 | + |
---|
| 499 | + cur->bc_ag.agno = agno; |
---|
| 500 | + cur->bc_ag.abt.active = false; |
---|
| 501 | + |
---|
| 502 | + if (xfs_sb_version_hascrc(&mp->m_sb)) |
---|
| 503 | + cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; |
---|
| 504 | + |
---|
| 505 | + return cur; |
---|
| 506 | +} |
---|
478 | 507 | |
---|
479 | 508 | /* |
---|
480 | 509 | * Allocate a new allocation btree cursor. |
---|
.. | .. |
---|
487 | 516 | xfs_agnumber_t agno, /* allocation group number */ |
---|
488 | 517 | xfs_btnum_t btnum) /* btree identifier */ |
---|
489 | 518 | { |
---|
490 | | - struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); |
---|
| 519 | + struct xfs_agf *agf = agbp->b_addr; |
---|
491 | 520 | struct xfs_btree_cur *cur; |
---|
492 | 521 | |
---|
493 | | - ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); |
---|
494 | | - |
---|
495 | | - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); |
---|
496 | | - |
---|
497 | | - cur->bc_tp = tp; |
---|
498 | | - cur->bc_mp = mp; |
---|
499 | | - cur->bc_btnum = btnum; |
---|
500 | | - cur->bc_blocklog = mp->m_sb.sb_blocklog; |
---|
501 | | - |
---|
502 | | - if (btnum == XFS_BTNUM_CNT) { |
---|
503 | | - cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2); |
---|
504 | | - cur->bc_ops = &xfs_cntbt_ops; |
---|
| 522 | + cur = xfs_allocbt_init_common(mp, tp, agno, btnum); |
---|
| 523 | + if (btnum == XFS_BTNUM_CNT) |
---|
505 | 524 | cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); |
---|
506 | | - cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; |
---|
507 | | - } else { |
---|
508 | | - cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2); |
---|
509 | | - cur->bc_ops = &xfs_bnobt_ops; |
---|
| 525 | + else |
---|
510 | 526 | cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); |
---|
511 | | - } |
---|
512 | 527 | |
---|
513 | | - cur->bc_private.a.agbp = agbp; |
---|
514 | | - cur->bc_private.a.agno = agno; |
---|
515 | | - |
---|
516 | | - if (xfs_sb_version_hascrc(&mp->m_sb)) |
---|
517 | | - cur->bc_flags |= XFS_BTREE_CRC_BLOCKS; |
---|
| 528 | + cur->bc_ag.agbp = agbp; |
---|
518 | 529 | |
---|
519 | 530 | return cur; |
---|
520 | 531 | } |
---|
521 | 532 | |
---|
| 533 | +/* Create a free space btree cursor with a fake root for staging. */ |
---|
| 534 | +struct xfs_btree_cur * |
---|
| 535 | +xfs_allocbt_stage_cursor( |
---|
| 536 | + struct xfs_mount *mp, |
---|
| 537 | + struct xbtree_afakeroot *afake, |
---|
| 538 | + xfs_agnumber_t agno, |
---|
| 539 | + xfs_btnum_t btnum) |
---|
| 540 | +{ |
---|
| 541 | + struct xfs_btree_cur *cur; |
---|
| 542 | + |
---|
| 543 | + cur = xfs_allocbt_init_common(mp, NULL, agno, btnum); |
---|
| 544 | + xfs_btree_stage_afakeroot(cur, afake); |
---|
| 545 | + return cur; |
---|
| 546 | +} |
---|
| 547 | + |
---|
| 548 | +/* |
---|
| 549 | + * Install a new free space btree root. Caller is responsible for invalidating |
---|
| 550 | + * and freeing the old btree blocks. |
---|
| 551 | + */ |
---|
| 552 | +void |
---|
| 553 | +xfs_allocbt_commit_staged_btree( |
---|
| 554 | + struct xfs_btree_cur *cur, |
---|
| 555 | + struct xfs_trans *tp, |
---|
| 556 | + struct xfs_buf *agbp) |
---|
| 557 | +{ |
---|
| 558 | + struct xfs_agf *agf = agbp->b_addr; |
---|
| 559 | + struct xbtree_afakeroot *afake = cur->bc_ag.afake; |
---|
| 560 | + |
---|
| 561 | + ASSERT(cur->bc_flags & XFS_BTREE_STAGING); |
---|
| 562 | + |
---|
| 563 | + agf->agf_roots[cur->bc_btnum] = cpu_to_be32(afake->af_root); |
---|
| 564 | + agf->agf_levels[cur->bc_btnum] = cpu_to_be32(afake->af_levels); |
---|
| 565 | + xfs_alloc_log_agf(tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); |
---|
| 566 | + |
---|
| 567 | + if (cur->bc_btnum == XFS_BTNUM_BNO) { |
---|
| 568 | + xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_bnobt_ops); |
---|
| 569 | + } else { |
---|
| 570 | + cur->bc_flags |= XFS_BTREE_LASTREC_UPDATE; |
---|
| 571 | + xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_cntbt_ops); |
---|
| 572 | + } |
---|
| 573 | +} |
---|
| 574 | + |
---|
522 | 575 | /* |
---|
523 | 576 | * Calculate number of records in an alloc btree block. |
---|
524 | 577 | */ |
---|