| .. | .. |
|---|
| 9 | 9 | #include "xfs_format.h" |
|---|
| 10 | 10 | #include "xfs_trans_resv.h" |
|---|
| 11 | 11 | #include "xfs_mount.h" |
|---|
| 12 | | -#include "xfs_defer.h" |
|---|
| 13 | 12 | #include "xfs_btree.h" |
|---|
| 14 | | -#include "xfs_bit.h" |
|---|
| 15 | 13 | #include "xfs_log_format.h" |
|---|
| 16 | 14 | #include "xfs_trans.h" |
|---|
| 17 | | -#include "xfs_sb.h" |
|---|
| 18 | 15 | #include "xfs_inode.h" |
|---|
| 19 | | -#include "xfs_alloc.h" |
|---|
| 20 | 16 | #include "xfs_ialloc.h" |
|---|
| 21 | 17 | #include "xfs_ialloc_btree.h" |
|---|
| 22 | 18 | #include "xfs_icache.h" |
|---|
| 23 | 19 | #include "xfs_rmap.h" |
|---|
| 24 | | -#include "xfs_log.h" |
|---|
| 25 | | -#include "xfs_trans_priv.h" |
|---|
| 26 | | -#include "scrub/xfs_scrub.h" |
|---|
| 27 | 20 | #include "scrub/scrub.h" |
|---|
| 28 | 21 | #include "scrub/common.h" |
|---|
| 29 | 22 | #include "scrub/btree.h" |
|---|
| .. | .. |
|---|
| 39 | 32 | struct xfs_scrub *sc, |
|---|
| 40 | 33 | struct xfs_inode *ip) |
|---|
| 41 | 34 | { |
|---|
| 42 | | - return xchk_setup_ag_btree(sc, ip, sc->try_harder); |
|---|
| 35 | + return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER); |
|---|
| 43 | 36 | } |
|---|
| 44 | 37 | |
|---|
| 45 | 38 | /* Inode btree scrubber. */ |
|---|
| 39 | + |
|---|
| 40 | +struct xchk_iallocbt { |
|---|
| 41 | + /* Number of inodes we see while scanning inobt. */ |
|---|
| 42 | + unsigned long long inodes; |
|---|
| 43 | + |
|---|
| 44 | + /* Expected next startino, for big block filesystems. */ |
|---|
| 45 | + xfs_agino_t next_startino; |
|---|
| 46 | + |
|---|
| 47 | + /* Expected end of the current inode cluster. */ |
|---|
| 48 | + xfs_agino_t next_cluster_ino; |
|---|
| 49 | +}; |
|---|
| 46 | 50 | |
|---|
| 47 | 51 | /* |
|---|
| 48 | 52 | * If we're checking the finobt, cross-reference with the inobt. |
|---|
| .. | .. |
|---|
| 82 | 86 | xfs_agblock_t agbno, |
|---|
| 83 | 87 | xfs_extlen_t len) |
|---|
| 84 | 88 | { |
|---|
| 85 | | - struct xfs_owner_info oinfo; |
|---|
| 86 | | - |
|---|
| 87 | 89 | if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
|---|
| 88 | 90 | return; |
|---|
| 89 | 91 | |
|---|
| 90 | 92 | xchk_xref_is_used_space(sc, agbno, len); |
|---|
| 91 | 93 | xchk_iallocbt_chunk_xref_other(sc, irec, agino); |
|---|
| 92 | | - xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); |
|---|
| 93 | | - xchk_xref_is_owned_by(sc, agbno, len, &oinfo); |
|---|
| 94 | + xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); |
|---|
| 94 | 95 | xchk_xref_is_not_shared(sc, agbno, len); |
|---|
| 95 | 96 | } |
|---|
| 96 | 97 | |
|---|
| .. | .. |
|---|
| 103 | 104 | xfs_extlen_t len) |
|---|
| 104 | 105 | { |
|---|
| 105 | 106 | struct xfs_mount *mp = bs->cur->bc_mp; |
|---|
| 106 | | - xfs_agnumber_t agno = bs->cur->bc_private.a.agno; |
|---|
| 107 | + xfs_agnumber_t agno = bs->cur->bc_ag.agno; |
|---|
| 107 | 108 | xfs_agblock_t bno; |
|---|
| 108 | 109 | |
|---|
| 109 | 110 | bno = XFS_AGINO_TO_AGBNO(mp, agino); |
|---|
| .. | .. |
|---|
| 126 | 127 | return hweight64(freemask); |
|---|
| 127 | 128 | } |
|---|
| 128 | 129 | |
|---|
| 129 | | -/* Check a particular inode with ir_free. */ |
|---|
| 130 | +/* |
|---|
| 131 | + * Check that an inode's allocation status matches ir_free in the inobt |
|---|
| 132 | + * record. First we try querying the in-core inode state, and if the inode |
|---|
| 133 | + * isn't loaded we examine the on-disk inode directly. |
|---|
| 134 | + * |
|---|
| 135 | + * Since there can be 1:M and M:1 mappings between inobt records and inode |
|---|
| 136 | + * clusters, we pass in the inode location information as an inobt record; |
|---|
| 137 | + * the index of an inode cluster within the inobt record (as well as the |
|---|
| 138 | + * cluster buffer itself); and the index of the inode within the cluster. |
|---|
| 139 | + * |
|---|
| 140 | + * @irec is the inobt record. |
|---|
| 141 | + * @irec_ino is the inode offset from the start of the record. |
|---|
| 142 | + * @dip is the on-disk inode. |
|---|
| 143 | + */ |
|---|
| 130 | 144 | STATIC int |
|---|
| 131 | | -xchk_iallocbt_check_cluster_freemask( |
|---|
| 145 | +xchk_iallocbt_check_cluster_ifree( |
|---|
| 132 | 146 | struct xchk_btree *bs, |
|---|
| 133 | | - xfs_ino_t fsino, |
|---|
| 134 | | - xfs_agino_t chunkino, |
|---|
| 135 | | - xfs_agino_t clusterino, |
|---|
| 136 | 147 | struct xfs_inobt_rec_incore *irec, |
|---|
| 137 | | - struct xfs_buf *bp) |
|---|
| 148 | + unsigned int irec_ino, |
|---|
| 149 | + struct xfs_dinode *dip) |
|---|
| 138 | 150 | { |
|---|
| 139 | | - struct xfs_dinode *dip; |
|---|
| 140 | 151 | struct xfs_mount *mp = bs->cur->bc_mp; |
|---|
| 141 | | - bool inode_is_free = false; |
|---|
| 152 | + xfs_ino_t fsino; |
|---|
| 153 | + xfs_agino_t agino; |
|---|
| 154 | + bool irec_free; |
|---|
| 155 | + bool ino_inuse; |
|---|
| 142 | 156 | bool freemask_ok; |
|---|
| 143 | | - bool inuse; |
|---|
| 144 | 157 | int error = 0; |
|---|
| 145 | 158 | |
|---|
| 146 | 159 | if (xchk_should_terminate(bs->sc, &error)) |
|---|
| 147 | 160 | return error; |
|---|
| 148 | 161 | |
|---|
| 149 | | - dip = xfs_buf_offset(bp, clusterino * mp->m_sb.sb_inodesize); |
|---|
| 162 | + /* |
|---|
| 163 | + * Given an inobt record and the offset of an inode from the start of |
|---|
| 164 | + * the record, compute which fs inode we're talking about. |
|---|
| 165 | + */ |
|---|
| 166 | + agino = irec->ir_startino + irec_ino; |
|---|
| 167 | + fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_ag.agno, agino); |
|---|
| 168 | + irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino)); |
|---|
| 169 | + |
|---|
| 150 | 170 | if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || |
|---|
| 151 | | - (dip->di_version >= 3 && |
|---|
| 152 | | - be64_to_cpu(dip->di_ino) != fsino + clusterino)) { |
|---|
| 171 | + (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) { |
|---|
| 153 | 172 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 154 | 173 | goto out; |
|---|
| 155 | 174 | } |
|---|
| 156 | 175 | |
|---|
| 157 | | - if (irec->ir_free & XFS_INOBT_MASK(chunkino + clusterino)) |
|---|
| 158 | | - inode_is_free = true; |
|---|
| 159 | | - error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, |
|---|
| 160 | | - fsino + clusterino, &inuse); |
|---|
| 176 | + error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino, |
|---|
| 177 | + &ino_inuse); |
|---|
| 161 | 178 | if (error == -ENODATA) { |
|---|
| 162 | 179 | /* Not cached, just read the disk buffer */ |
|---|
| 163 | | - freemask_ok = inode_is_free ^ !!(dip->di_mode); |
|---|
| 164 | | - if (!bs->sc->try_harder && !freemask_ok) |
|---|
| 180 | + freemask_ok = irec_free ^ !!(dip->di_mode); |
|---|
| 181 | + if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok) |
|---|
| 165 | 182 | return -EDEADLOCK; |
|---|
| 166 | 183 | } else if (error < 0) { |
|---|
| 167 | 184 | /* |
|---|
| .. | .. |
|---|
| 172 | 189 | goto out; |
|---|
| 173 | 190 | } else { |
|---|
| 174 | 191 | /* Inode is all there. */ |
|---|
| 175 | | - freemask_ok = inode_is_free ^ inuse; |
|---|
| 192 | + freemask_ok = irec_free ^ ino_inuse; |
|---|
| 176 | 193 | } |
|---|
| 177 | 194 | if (!freemask_ok) |
|---|
| 178 | 195 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| .. | .. |
|---|
| 180 | 197 | return 0; |
|---|
| 181 | 198 | } |
|---|
| 182 | 199 | |
|---|
| 183 | | -/* Make sure the free mask is consistent with what the inodes think. */ |
|---|
| 200 | +/* |
|---|
| 201 | + * Check that the holemask and freemask of a hypothetical inode cluster match |
|---|
| 202 | + * what's actually on disk. If sparse inodes are enabled, the cluster does |
|---|
| 203 | + * not actually have to map to inodes if the corresponding holemask bit is set. |
|---|
| 204 | + * |
|---|
| 205 | + * @cluster_base is the first inode in the cluster within the @irec. |
|---|
| 206 | + */ |
|---|
| 184 | 207 | STATIC int |
|---|
| 185 | | -xchk_iallocbt_check_freemask( |
|---|
| 208 | +xchk_iallocbt_check_cluster( |
|---|
| 186 | 209 | struct xchk_btree *bs, |
|---|
| 187 | | - struct xfs_inobt_rec_incore *irec) |
|---|
| 210 | + struct xfs_inobt_rec_incore *irec, |
|---|
| 211 | + unsigned int cluster_base) |
|---|
| 188 | 212 | { |
|---|
| 189 | | - struct xfs_owner_info oinfo; |
|---|
| 190 | 213 | struct xfs_imap imap; |
|---|
| 191 | 214 | struct xfs_mount *mp = bs->cur->bc_mp; |
|---|
| 192 | 215 | struct xfs_dinode *dip; |
|---|
| 193 | | - struct xfs_buf *bp; |
|---|
| 194 | | - xfs_ino_t fsino; |
|---|
| 195 | | - xfs_agino_t nr_inodes; |
|---|
| 196 | | - xfs_agino_t agino; |
|---|
| 197 | | - xfs_agino_t chunkino; |
|---|
| 198 | | - xfs_agino_t clusterino; |
|---|
| 216 | + struct xfs_buf *cluster_bp; |
|---|
| 217 | + unsigned int nr_inodes; |
|---|
| 218 | + xfs_agnumber_t agno = bs->cur->bc_ag.agno; |
|---|
| 199 | 219 | xfs_agblock_t agbno; |
|---|
| 200 | | - int blks_per_cluster; |
|---|
| 201 | | - uint16_t holemask; |
|---|
| 220 | + unsigned int cluster_index; |
|---|
| 221 | + uint16_t cluster_mask = 0; |
|---|
| 202 | 222 | uint16_t ir_holemask; |
|---|
| 203 | 223 | int error = 0; |
|---|
| 204 | 224 | |
|---|
| 205 | | - /* Make sure the freemask matches the inode records. */ |
|---|
| 206 | | - blks_per_cluster = xfs_icluster_size_fsb(mp); |
|---|
| 207 | | - nr_inodes = XFS_OFFBNO_TO_AGINO(mp, blks_per_cluster, 0); |
|---|
| 208 | | - xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); |
|---|
| 225 | + nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK, |
|---|
| 226 | + M_IGEO(mp)->inodes_per_cluster); |
|---|
| 209 | 227 | |
|---|
| 210 | | - for (agino = irec->ir_startino; |
|---|
| 211 | | - agino < irec->ir_startino + XFS_INODES_PER_CHUNK; |
|---|
| 212 | | - agino += blks_per_cluster * mp->m_sb.sb_inopblock) { |
|---|
| 213 | | - fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino); |
|---|
| 214 | | - chunkino = agino - irec->ir_startino; |
|---|
| 215 | | - agbno = XFS_AGINO_TO_AGBNO(mp, agino); |
|---|
| 228 | + /* Map this inode cluster */ |
|---|
| 229 | + agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); |
|---|
| 216 | 230 | |
|---|
| 217 | | - /* Compute the holemask mask for this cluster. */ |
|---|
| 218 | | - for (clusterino = 0, holemask = 0; clusterino < nr_inodes; |
|---|
| 219 | | - clusterino += XFS_INODES_PER_HOLEMASK_BIT) |
|---|
| 220 | | - holemask |= XFS_INOBT_MASK((chunkino + clusterino) / |
|---|
| 221 | | - XFS_INODES_PER_HOLEMASK_BIT); |
|---|
| 231 | + /* Compute a bitmask for this cluster that can be used for holemask. */ |
|---|
| 232 | + for (cluster_index = 0; |
|---|
| 233 | + cluster_index < nr_inodes; |
|---|
| 234 | + cluster_index += XFS_INODES_PER_HOLEMASK_BIT) |
|---|
| 235 | + cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) / |
|---|
| 236 | + XFS_INODES_PER_HOLEMASK_BIT); |
|---|
| 222 | 237 | |
|---|
| 223 | | - /* The whole cluster must be a hole or not a hole. */ |
|---|
| 224 | | - ir_holemask = (irec->ir_holemask & holemask); |
|---|
| 225 | | - if (ir_holemask != holemask && ir_holemask != 0) { |
|---|
| 238 | + /* |
|---|
| 239 | + * Map the first inode of this cluster to a buffer and offset. |
|---|
| 240 | + * Be careful about inobt records that don't align with the start of |
|---|
| 241 | + * the inode buffer when block sizes are large enough to hold multiple |
|---|
| 242 | + * inode chunks. When this happens, cluster_base will be zero but |
|---|
| 243 | + * ir_startino can be large enough to make im_boffset nonzero. |
|---|
| 244 | + */ |
|---|
| 245 | + ir_holemask = (irec->ir_holemask & cluster_mask); |
|---|
| 246 | + imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); |
|---|
| 247 | + imap.im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster); |
|---|
| 248 | + imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) << |
|---|
| 249 | + mp->m_sb.sb_inodelog; |
|---|
| 250 | + |
|---|
| 251 | + if (imap.im_boffset != 0 && cluster_base != 0) { |
|---|
| 252 | + ASSERT(imap.im_boffset == 0 || cluster_base == 0); |
|---|
| 253 | + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 254 | + return 0; |
|---|
| 255 | + } |
|---|
| 256 | + |
|---|
| 257 | + trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino, |
|---|
| 258 | + imap.im_blkno, imap.im_len, cluster_base, nr_inodes, |
|---|
| 259 | + cluster_mask, ir_holemask, |
|---|
| 260 | + XFS_INO_TO_OFFSET(mp, irec->ir_startino + |
|---|
| 261 | + cluster_base)); |
|---|
| 262 | + |
|---|
| 263 | + /* The whole cluster must be a hole or not a hole. */ |
|---|
| 264 | + if (ir_holemask != cluster_mask && ir_holemask != 0) { |
|---|
| 265 | + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 266 | + return 0; |
|---|
| 267 | + } |
|---|
| 268 | + |
|---|
| 269 | + /* If any part of this is a hole, skip it. */ |
|---|
| 270 | + if (ir_holemask) { |
|---|
| 271 | + xchk_xref_is_not_owned_by(bs->sc, agbno, |
|---|
| 272 | + M_IGEO(mp)->blocks_per_cluster, |
|---|
| 273 | + &XFS_RMAP_OINFO_INODES); |
|---|
| 274 | + return 0; |
|---|
| 275 | + } |
|---|
| 276 | + |
|---|
| 277 | + xchk_xref_is_owned_by(bs->sc, agbno, M_IGEO(mp)->blocks_per_cluster, |
|---|
| 278 | + &XFS_RMAP_OINFO_INODES); |
|---|
| 279 | + |
|---|
| 280 | + /* Grab the inode cluster buffer. */ |
|---|
| 281 | + error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, 0); |
|---|
| 282 | + if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) |
|---|
| 283 | + return error; |
|---|
| 284 | + |
|---|
| 285 | + /* Check free status of each inode within this cluster. */ |
|---|
| 286 | + for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) { |
|---|
| 287 | + struct xfs_dinode *dip; |
|---|
| 288 | + |
|---|
| 289 | + if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { |
|---|
| 226 | 290 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 227 | | - continue; |
|---|
| 291 | + break; |
|---|
| 228 | 292 | } |
|---|
| 229 | 293 | |
|---|
| 230 | | - /* If any part of this is a hole, skip it. */ |
|---|
| 231 | | - if (ir_holemask) { |
|---|
| 232 | | - xchk_xref_is_not_owned_by(bs->sc, agbno, |
|---|
| 233 | | - blks_per_cluster, &oinfo); |
|---|
| 234 | | - continue; |
|---|
| 235 | | - } |
|---|
| 294 | + dip = xfs_buf_offset(cluster_bp, imap.im_boffset); |
|---|
| 295 | + error = xchk_iallocbt_check_cluster_ifree(bs, irec, |
|---|
| 296 | + cluster_base + cluster_index, dip); |
|---|
| 297 | + if (error) |
|---|
| 298 | + break; |
|---|
| 299 | + imap.im_boffset += mp->m_sb.sb_inodesize; |
|---|
| 300 | + } |
|---|
| 236 | 301 | |
|---|
| 237 | | - xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster, |
|---|
| 238 | | - &oinfo); |
|---|
| 302 | + xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); |
|---|
| 303 | + return error; |
|---|
| 304 | +} |
|---|
| 239 | 305 | |
|---|
| 240 | | - /* Grab the inode cluster buffer. */ |
|---|
| 241 | | - imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno, |
|---|
| 242 | | - agbno); |
|---|
| 243 | | - imap.im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); |
|---|
| 244 | | - imap.im_boffset = 0; |
|---|
| 306 | +/* |
|---|
| 307 | + * For all the inode clusters that could map to this inobt record, make sure |
|---|
| 308 | + * that the holemask makes sense and that the allocation status of each inode |
|---|
| 309 | + * matches the freemask. |
|---|
| 310 | + */ |
|---|
| 311 | +STATIC int |
|---|
| 312 | +xchk_iallocbt_check_clusters( |
|---|
| 313 | + struct xchk_btree *bs, |
|---|
| 314 | + struct xfs_inobt_rec_incore *irec) |
|---|
| 315 | +{ |
|---|
| 316 | + unsigned int cluster_base; |
|---|
| 317 | + int error = 0; |
|---|
| 245 | 318 | |
|---|
| 246 | | - error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, |
|---|
| 247 | | - &dip, &bp, 0, 0); |
|---|
| 248 | | - if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, |
|---|
| 249 | | - &error)) |
|---|
| 250 | | - continue; |
|---|
| 251 | | - |
|---|
| 252 | | - /* Which inodes are free? */ |
|---|
| 253 | | - for (clusterino = 0; clusterino < nr_inodes; clusterino++) { |
|---|
| 254 | | - error = xchk_iallocbt_check_cluster_freemask(bs, |
|---|
| 255 | | - fsino, chunkino, clusterino, irec, bp); |
|---|
| 256 | | - if (error) { |
|---|
| 257 | | - xfs_trans_brelse(bs->cur->bc_tp, bp); |
|---|
| 258 | | - return error; |
|---|
| 259 | | - } |
|---|
| 260 | | - } |
|---|
| 261 | | - |
|---|
| 262 | | - xfs_trans_brelse(bs->cur->bc_tp, bp); |
|---|
| 319 | + /* |
|---|
| 320 | + * For the common case where this inobt record maps to multiple inode |
|---|
| 321 | + * clusters this will call _check_cluster for each cluster. |
|---|
| 322 | + * |
|---|
| 323 | + * For the case that multiple inobt records map to a single cluster, |
|---|
| 324 | + * this will call _check_cluster once. |
|---|
| 325 | + */ |
|---|
| 326 | + for (cluster_base = 0; |
|---|
| 327 | + cluster_base < XFS_INODES_PER_CHUNK; |
|---|
| 328 | + cluster_base += M_IGEO(bs->sc->mp)->inodes_per_cluster) { |
|---|
| 329 | + error = xchk_iallocbt_check_cluster(bs, irec, cluster_base); |
|---|
| 330 | + if (error) |
|---|
| 331 | + break; |
|---|
| 263 | 332 | } |
|---|
| 264 | 333 | |
|---|
| 265 | 334 | return error; |
|---|
| 335 | +} |
|---|
| 336 | + |
|---|
| 337 | +/* |
|---|
| 338 | + * Make sure this inode btree record is aligned properly. Because a fs block |
|---|
| 339 | + * contains multiple inodes, we check that the inobt record is aligned to the |
|---|
| 340 | + * correct inode, not just the correct block on disk. This results in a finer |
|---|
| 341 | + * grained corruption check. |
|---|
| 342 | + */ |
|---|
| 343 | +STATIC void |
|---|
| 344 | +xchk_iallocbt_rec_alignment( |
|---|
| 345 | + struct xchk_btree *bs, |
|---|
| 346 | + struct xfs_inobt_rec_incore *irec) |
|---|
| 347 | +{ |
|---|
| 348 | + struct xfs_mount *mp = bs->sc->mp; |
|---|
| 349 | + struct xchk_iallocbt *iabt = bs->private; |
|---|
| 350 | + struct xfs_ino_geometry *igeo = M_IGEO(mp); |
|---|
| 351 | + |
|---|
| 352 | + /* |
|---|
| 353 | + * finobt records have different positioning requirements than inobt |
|---|
| 354 | + * records: each finobt record must have a corresponding inobt record. |
|---|
| 355 | + * That is checked in the xref function, so for now we only catch the |
|---|
| 356 | + * obvious case where the record isn't at all aligned properly. |
|---|
| 357 | + * |
|---|
| 358 | + * Note that if a fs block contains more than a single chunk of inodes, |
|---|
| 359 | + * we will have finobt records only for those chunks containing free |
|---|
| 360 | + * inodes, and therefore expect chunk alignment of finobt records. |
|---|
| 361 | + * Otherwise, we expect that the finobt record is aligned to the |
|---|
| 362 | + * cluster alignment as told by the superblock. |
|---|
| 363 | + */ |
|---|
| 364 | + if (bs->cur->bc_btnum == XFS_BTNUM_FINO) { |
|---|
| 365 | + unsigned int imask; |
|---|
| 366 | + |
|---|
| 367 | + imask = min_t(unsigned int, XFS_INODES_PER_CHUNK, |
|---|
| 368 | + igeo->cluster_align_inodes) - 1; |
|---|
| 369 | + if (irec->ir_startino & imask) |
|---|
| 370 | + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 371 | + return; |
|---|
| 372 | + } |
|---|
| 373 | + |
|---|
| 374 | + if (iabt->next_startino != NULLAGINO) { |
|---|
| 375 | + /* |
|---|
| 376 | + * We're midway through a cluster of inodes that is mapped by |
|---|
| 377 | + * multiple inobt records. Did we get the record for the next |
|---|
| 378 | + * irec in the sequence? |
|---|
| 379 | + */ |
|---|
| 380 | + if (irec->ir_startino != iabt->next_startino) { |
|---|
| 381 | + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 382 | + return; |
|---|
| 383 | + } |
|---|
| 384 | + |
|---|
| 385 | + iabt->next_startino += XFS_INODES_PER_CHUNK; |
|---|
| 386 | + |
|---|
| 387 | + /* Are we done with the cluster? */ |
|---|
| 388 | + if (iabt->next_startino >= iabt->next_cluster_ino) { |
|---|
| 389 | + iabt->next_startino = NULLAGINO; |
|---|
| 390 | + iabt->next_cluster_ino = NULLAGINO; |
|---|
| 391 | + } |
|---|
| 392 | + return; |
|---|
| 393 | + } |
|---|
| 394 | + |
|---|
| 395 | + /* inobt records must be aligned to cluster and inoalignmnt size. */ |
|---|
| 396 | + if (irec->ir_startino & (igeo->cluster_align_inodes - 1)) { |
|---|
| 397 | + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 398 | + return; |
|---|
| 399 | + } |
|---|
| 400 | + |
|---|
| 401 | + if (irec->ir_startino & (igeo->inodes_per_cluster - 1)) { |
|---|
| 402 | + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 403 | + return; |
|---|
| 404 | + } |
|---|
| 405 | + |
|---|
| 406 | + if (igeo->inodes_per_cluster <= XFS_INODES_PER_CHUNK) |
|---|
| 407 | + return; |
|---|
| 408 | + |
|---|
| 409 | + /* |
|---|
| 410 | + * If this is the start of an inode cluster that can be mapped by |
|---|
| 411 | + * multiple inobt records, the next inobt record must follow exactly |
|---|
| 412 | + * after this one. |
|---|
| 413 | + */ |
|---|
| 414 | + iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK; |
|---|
| 415 | + iabt->next_cluster_ino = irec->ir_startino + igeo->inodes_per_cluster; |
|---|
| 266 | 416 | } |
|---|
| 267 | 417 | |
|---|
| 268 | 418 | /* Scrub an inobt/finobt record. */ |
|---|
| .. | .. |
|---|
| 272 | 422 | union xfs_btree_rec *rec) |
|---|
| 273 | 423 | { |
|---|
| 274 | 424 | struct xfs_mount *mp = bs->cur->bc_mp; |
|---|
| 275 | | - xfs_filblks_t *inode_blocks = bs->private; |
|---|
| 425 | + struct xchk_iallocbt *iabt = bs->private; |
|---|
| 276 | 426 | struct xfs_inobt_rec_incore irec; |
|---|
| 277 | 427 | uint64_t holes; |
|---|
| 278 | | - xfs_agnumber_t agno = bs->cur->bc_private.a.agno; |
|---|
| 428 | + xfs_agnumber_t agno = bs->cur->bc_ag.agno; |
|---|
| 279 | 429 | xfs_agino_t agino; |
|---|
| 280 | | - xfs_agblock_t agbno; |
|---|
| 281 | 430 | xfs_extlen_t len; |
|---|
| 282 | 431 | int holecount; |
|---|
| 283 | 432 | int i; |
|---|
| .. | .. |
|---|
| 304 | 453 | goto out; |
|---|
| 305 | 454 | } |
|---|
| 306 | 455 | |
|---|
| 307 | | - /* Make sure this record is aligned to cluster and inoalignmnt size. */ |
|---|
| 308 | | - agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino); |
|---|
| 309 | | - if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) || |
|---|
| 310 | | - (agbno & (xfs_icluster_size_fsb(mp) - 1))) |
|---|
| 311 | | - xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 456 | + xchk_iallocbt_rec_alignment(bs, &irec); |
|---|
| 457 | + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) |
|---|
| 458 | + goto out; |
|---|
| 312 | 459 | |
|---|
| 313 | | - *inode_blocks += XFS_B_TO_FSB(mp, |
|---|
| 314 | | - irec.ir_count * mp->m_sb.sb_inodesize); |
|---|
| 460 | + iabt->inodes += irec.ir_count; |
|---|
| 315 | 461 | |
|---|
| 316 | 462 | /* Handle non-sparse inodes */ |
|---|
| 317 | 463 | if (!xfs_inobt_issparse(irec.ir_holemask)) { |
|---|
| .. | .. |
|---|
| 322 | 468 | |
|---|
| 323 | 469 | if (!xchk_iallocbt_chunk(bs, &irec, agino, len)) |
|---|
| 324 | 470 | goto out; |
|---|
| 325 | | - goto check_freemask; |
|---|
| 471 | + goto check_clusters; |
|---|
| 326 | 472 | } |
|---|
| 327 | 473 | |
|---|
| 328 | 474 | /* Check each chunk of a sparse inode cluster. */ |
|---|
| .. | .. |
|---|
| 348 | 494 | holecount + irec.ir_count != XFS_INODES_PER_CHUNK) |
|---|
| 349 | 495 | xchk_btree_set_corrupt(bs->sc, bs->cur, 0); |
|---|
| 350 | 496 | |
|---|
| 351 | | -check_freemask: |
|---|
| 352 | | - error = xchk_iallocbt_check_freemask(bs, &irec); |
|---|
| 497 | +check_clusters: |
|---|
| 498 | + error = xchk_iallocbt_check_clusters(bs, &irec); |
|---|
| 353 | 499 | if (error) |
|---|
| 354 | 500 | goto out; |
|---|
| 355 | 501 | |
|---|
| .. | .. |
|---|
| 366 | 512 | struct xfs_scrub *sc, |
|---|
| 367 | 513 | int which) |
|---|
| 368 | 514 | { |
|---|
| 369 | | - struct xfs_owner_info oinfo; |
|---|
| 370 | 515 | xfs_filblks_t blocks; |
|---|
| 371 | 516 | xfs_extlen_t inobt_blocks = 0; |
|---|
| 372 | 517 | xfs_extlen_t finobt_blocks = 0; |
|---|
| .. | .. |
|---|
| 388 | 533 | return; |
|---|
| 389 | 534 | } |
|---|
| 390 | 535 | |
|---|
| 391 | | - xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); |
|---|
| 392 | | - error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, |
|---|
| 393 | | - &blocks); |
|---|
| 536 | + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, |
|---|
| 537 | + &XFS_RMAP_OINFO_INOBT, &blocks); |
|---|
| 394 | 538 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
|---|
| 395 | 539 | return; |
|---|
| 396 | 540 | if (blocks != inobt_blocks + finobt_blocks) |
|---|
| .. | .. |
|---|
| 405 | 549 | xchk_iallocbt_xref_rmap_inodes( |
|---|
| 406 | 550 | struct xfs_scrub *sc, |
|---|
| 407 | 551 | int which, |
|---|
| 408 | | - xfs_filblks_t inode_blocks) |
|---|
| 552 | + unsigned long long inodes) |
|---|
| 409 | 553 | { |
|---|
| 410 | | - struct xfs_owner_info oinfo; |
|---|
| 411 | 554 | xfs_filblks_t blocks; |
|---|
| 555 | + xfs_filblks_t inode_blocks; |
|---|
| 412 | 556 | int error; |
|---|
| 413 | 557 | |
|---|
| 414 | 558 | if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) |
|---|
| 415 | 559 | return; |
|---|
| 416 | 560 | |
|---|
| 417 | 561 | /* Check that we saw as many inode blocks as the rmap knows about. */ |
|---|
| 418 | | - xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); |
|---|
| 419 | | - error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo, |
|---|
| 420 | | - &blocks); |
|---|
| 562 | + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, |
|---|
| 563 | + &XFS_RMAP_OINFO_INODES, &blocks); |
|---|
| 421 | 564 | if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) |
|---|
| 422 | 565 | return; |
|---|
| 566 | + inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize); |
|---|
| 423 | 567 | if (blocks != inode_blocks) |
|---|
| 424 | 568 | xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); |
|---|
| 425 | 569 | } |
|---|
| .. | .. |
|---|
| 431 | 575 | xfs_btnum_t which) |
|---|
| 432 | 576 | { |
|---|
| 433 | 577 | struct xfs_btree_cur *cur; |
|---|
| 434 | | - struct xfs_owner_info oinfo; |
|---|
| 435 | | - xfs_filblks_t inode_blocks = 0; |
|---|
| 578 | + struct xchk_iallocbt iabt = { |
|---|
| 579 | + .inodes = 0, |
|---|
| 580 | + .next_startino = NULLAGINO, |
|---|
| 581 | + .next_cluster_ino = NULLAGINO, |
|---|
| 582 | + }; |
|---|
| 436 | 583 | int error; |
|---|
| 437 | 584 | |
|---|
| 438 | | - xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT); |
|---|
| 439 | 585 | cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur; |
|---|
| 440 | | - error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo, |
|---|
| 441 | | - &inode_blocks); |
|---|
| 586 | + error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT, |
|---|
| 587 | + &iabt); |
|---|
| 442 | 588 | if (error) |
|---|
| 443 | 589 | return error; |
|---|
| 444 | 590 | |
|---|
| .. | .. |
|---|
| 452 | 598 | * to inode chunks with free inodes. |
|---|
| 453 | 599 | */ |
|---|
| 454 | 600 | if (which == XFS_BTNUM_INO) |
|---|
| 455 | | - xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks); |
|---|
| 601 | + xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes); |
|---|
| 456 | 602 | |
|---|
| 457 | 603 | return error; |
|---|
| 458 | 604 | } |
|---|