at master 1745 lines 44 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6#include "xfs_platform.h" 7#include "xfs_fs.h" 8#include "xfs_shared.h" 9#include "xfs_format.h" 10#include "xfs_trans_resv.h" 11#include "xfs_mount.h" 12#include "xfs_btree.h" 13#include "xfs_log_format.h" 14#include "xfs_trans.h" 15#include "xfs_inode.h" 16#include "xfs_icache.h" 17#include "xfs_alloc.h" 18#include "xfs_alloc_btree.h" 19#include "xfs_ialloc.h" 20#include "xfs_ialloc_btree.h" 21#include "xfs_refcount_btree.h" 22#include "xfs_rmap.h" 23#include "xfs_rmap_btree.h" 24#include "xfs_log.h" 25#include "xfs_trans_priv.h" 26#include "xfs_da_format.h" 27#include "xfs_da_btree.h" 28#include "xfs_dir2_priv.h" 29#include "xfs_dir2.h" 30#include "xfs_attr.h" 31#include "xfs_reflink.h" 32#include "xfs_ag.h" 33#include "xfs_error.h" 34#include "xfs_quota.h" 35#include "xfs_exchmaps.h" 36#include "xfs_rtbitmap.h" 37#include "xfs_rtgroup.h" 38#include "xfs_rtrmap_btree.h" 39#include "xfs_bmap_util.h" 40#include "xfs_rtrefcount_btree.h" 41#include "scrub/scrub.h" 42#include "scrub/common.h" 43#include "scrub/trace.h" 44#include "scrub/repair.h" 45#include "scrub/health.h" 46#include "scrub/tempfile.h" 47 48/* Common code for the metadata scrubbers. */ 49 50/* 51 * Handling operational errors. 52 * 53 * The *_process_error() family of functions are used to process error return 54 * codes from functions called as part of a scrub operation. 55 * 56 * If there's no error, we return true to tell the caller that it's ok 57 * to move on to the next check in its list. 58 * 59 * For non-verifier errors (e.g. ENOMEM) we return false to tell the 60 * caller that something bad happened, and we preserve *error so that 61 * the caller can return the *error up the stack to userspace. 62 * 63 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting 64 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words, 65 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT, 66 * not via return codes. We return false to tell the caller that 67 * something bad happened. Since the error has been cleared, the caller 68 * will (presumably) return that zero and scrubbing will move on to 69 * whatever's next. 70 * 71 * ftrace can be used to record the precise metadata location and the 72 * approximate code location of the failed operation. 73 */ 74 75/* Check for operational errors. */ 76static bool 77__xchk_process_error( 78 struct xfs_scrub *sc, 79 xfs_agnumber_t agno, 80 xfs_agblock_t bno, 81 int *error, 82 __u32 errflag, 83 void *ret_ip) 84{ 85 switch (*error) { 86 case 0: 87 return true; 88 case -EDEADLOCK: 89 case -ECHRNG: 90 /* Used to restart an op with deadlock avoidance. */ 91 trace_xchk_deadlock_retry( 92 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)), 93 sc->sm, *error); 94 break; 95 case -ECANCELED: 96 /* 97 * ECANCELED here means that the caller set one of the scrub 98 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit 99 * quickly. Set error to zero and do not continue. 100 */ 101 trace_xchk_op_error(sc, agno, bno, *error, ret_ip); 102 *error = 0; 103 break; 104 case -EFSBADCRC: 105 case -EFSCORRUPTED: 106 case -EIO: 107 case -ENODATA: 108 /* Note the badness but don't abort. */ 109 sc->sm->sm_flags |= errflag; 110 *error = 0; 111 fallthrough; 112 default: 113 trace_xchk_op_error(sc, agno, bno, *error, ret_ip); 114 break; 115 } 116 return false; 117} 118 119bool 120xchk_process_error( 121 struct xfs_scrub *sc, 122 xfs_agnumber_t agno, 123 xfs_agblock_t bno, 124 int *error) 125{ 126 return __xchk_process_error(sc, agno, bno, error, 127 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 128} 129 130bool 131xchk_process_rt_error( 132 struct xfs_scrub *sc, 133 xfs_rgnumber_t rgno, 134 xfs_rgblock_t rgbno, 135 int *error) 136{ 137 return __xchk_process_error(sc, rgno, rgbno, error, 138 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 139} 140 141bool 142xchk_xref_process_error( 143 struct xfs_scrub *sc, 144 xfs_agnumber_t agno, 145 xfs_agblock_t bno, 146 int *error) 147{ 148 return __xchk_process_error(sc, agno, bno, error, 149 XFS_SCRUB_OFLAG_XFAIL, __return_address); 150} 151 152/* Check for operational errors for a file offset. */ 153static bool 154__xchk_fblock_process_error( 155 struct xfs_scrub *sc, 156 int whichfork, 157 xfs_fileoff_t offset, 158 int *error, 159 __u32 errflag, 160 void *ret_ip) 161{ 162 switch (*error) { 163 case 0: 164 return true; 165 case -EDEADLOCK: 166 case -ECHRNG: 167 /* Used to restart an op with deadlock avoidance. */ 168 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); 169 break; 170 case -ECANCELED: 171 /* 172 * ECANCELED here means that the caller set one of the scrub 173 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit 174 * quickly. Set error to zero and do not continue. 175 */ 176 trace_xchk_file_op_error(sc, whichfork, offset, *error, 177 ret_ip); 178 *error = 0; 179 break; 180 case -EFSBADCRC: 181 case -EFSCORRUPTED: 182 case -EIO: 183 case -ENODATA: 184 /* Note the badness but don't abort. */ 185 sc->sm->sm_flags |= errflag; 186 *error = 0; 187 fallthrough; 188 default: 189 trace_xchk_file_op_error(sc, whichfork, offset, *error, 190 ret_ip); 191 break; 192 } 193 return false; 194} 195 196bool 197xchk_fblock_process_error( 198 struct xfs_scrub *sc, 199 int whichfork, 200 xfs_fileoff_t offset, 201 int *error) 202{ 203 return __xchk_fblock_process_error(sc, whichfork, offset, error, 204 XFS_SCRUB_OFLAG_CORRUPT, __return_address); 205} 206 207bool 208xchk_fblock_xref_process_error( 209 struct xfs_scrub *sc, 210 int whichfork, 211 xfs_fileoff_t offset, 212 int *error) 213{ 214 return __xchk_fblock_process_error(sc, whichfork, offset, error, 215 XFS_SCRUB_OFLAG_XFAIL, __return_address); 216} 217 218/* 219 * Handling scrub corruption/optimization/warning checks. 220 * 221 * The *_set_{corrupt,preen,warning}() family of functions are used to 222 * record the presence of metadata that is incorrect (corrupt), could be 223 * optimized somehow (preen), or should be flagged for administrative 224 * review but is not incorrect (warn). 225 * 226 * ftrace can be used to record the precise metadata location and 227 * approximate code location of the failed check. 228 */ 229 230/* Record a block which could be optimized. */ 231void 232xchk_block_set_preen( 233 struct xfs_scrub *sc, 234 struct xfs_buf *bp) 235{ 236 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 237 trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address); 238} 239 240/* 241 * Record an inode which could be optimized. The trace data will 242 * include the block given by bp if bp is given; otherwise it will use 243 * the block location of the inode record itself. 244 */ 245void 246xchk_ino_set_preen( 247 struct xfs_scrub *sc, 248 xfs_ino_t ino) 249{ 250 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; 251 trace_xchk_ino_preen(sc, ino, __return_address); 252} 253 254/* Record something being wrong with the filesystem primary superblock. */ 255void 256xchk_set_corrupt( 257 struct xfs_scrub *sc) 258{ 259 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 260 trace_xchk_fs_error(sc, 0, __return_address); 261} 262 263/* Record a corrupt block. */ 264void 265xchk_block_set_corrupt( 266 struct xfs_scrub *sc, 267 struct xfs_buf *bp) 268{ 269 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 270 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); 271} 272 273#ifdef CONFIG_XFS_QUOTA 274/* Record a corrupt quota counter. */ 275void 276xchk_qcheck_set_corrupt( 277 struct xfs_scrub *sc, 278 unsigned int dqtype, 279 xfs_dqid_t id) 280{ 281 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 282 trace_xchk_qcheck_error(sc, dqtype, id, __return_address); 283} 284#endif 285 286/* Record a corruption while cross-referencing. */ 287void 288xchk_block_xref_set_corrupt( 289 struct xfs_scrub *sc, 290 struct xfs_buf *bp) 291{ 292 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 293 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address); 294} 295 296/* 297 * Record a corrupt inode. The trace data will include the block given 298 * by bp if bp is given; otherwise it will use the block location of the 299 * inode record itself. 300 */ 301void 302xchk_ino_set_corrupt( 303 struct xfs_scrub *sc, 304 xfs_ino_t ino) 305{ 306 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 307 trace_xchk_ino_error(sc, ino, __return_address); 308} 309 310/* Record a corruption while cross-referencing with an inode. */ 311void 312xchk_ino_xref_set_corrupt( 313 struct xfs_scrub *sc, 314 xfs_ino_t ino) 315{ 316 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 317 trace_xchk_ino_error(sc, ino, __return_address); 318} 319 320/* Record corruption in a block indexed by a file fork. */ 321void 322xchk_fblock_set_corrupt( 323 struct xfs_scrub *sc, 324 int whichfork, 325 xfs_fileoff_t offset) 326{ 327 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 328 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 329} 330 331/* Record a corruption while cross-referencing a fork block. */ 332void 333xchk_fblock_xref_set_corrupt( 334 struct xfs_scrub *sc, 335 int whichfork, 336 xfs_fileoff_t offset) 337{ 338 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT; 339 trace_xchk_fblock_error(sc, whichfork, offset, __return_address); 340} 341 342/* 343 * Warn about inodes that need administrative review but is not 344 * incorrect. 345 */ 346void 347xchk_ino_set_warning( 348 struct xfs_scrub *sc, 349 xfs_ino_t ino) 350{ 351 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 352 trace_xchk_ino_warning(sc, ino, __return_address); 353} 354 355/* Warn about a block indexed by a file fork that needs review. */ 356void 357xchk_fblock_set_warning( 358 struct xfs_scrub *sc, 359 int whichfork, 360 xfs_fileoff_t offset) 361{ 362 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING; 363 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address); 364} 365 366/* Signal an incomplete scrub. */ 367void 368xchk_set_incomplete( 369 struct xfs_scrub *sc) 370{ 371 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE; 372 trace_xchk_incomplete(sc, __return_address); 373} 374 375/* 376 * rmap scrubbing -- compute the number of blocks with a given owner, 377 * at least according to the reverse mapping data. 378 */ 379 380struct xchk_rmap_ownedby_info { 381 const struct xfs_owner_info *oinfo; 382 xfs_filblks_t *blocks; 383}; 384 385STATIC int 386xchk_count_rmap_ownedby_irec( 387 struct xfs_btree_cur *cur, 388 const struct xfs_rmap_irec *rec, 389 void *priv) 390{ 391 struct xchk_rmap_ownedby_info *sroi = priv; 392 bool irec_attr; 393 bool oinfo_attr; 394 395 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK; 396 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK; 397 398 if (rec->rm_owner != sroi->oinfo->oi_owner) 399 return 0; 400 401 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr) 402 (*sroi->blocks) += rec->rm_blockcount; 403 404 return 0; 405} 406 407/* 408 * Calculate the number of blocks the rmap thinks are owned by something. 409 * The caller should pass us an rmapbt cursor. 410 */ 411int 412xchk_count_rmap_ownedby_ag( 413 struct xfs_scrub *sc, 414 struct xfs_btree_cur *cur, 415 const struct xfs_owner_info *oinfo, 416 xfs_filblks_t *blocks) 417{ 418 struct xchk_rmap_ownedby_info sroi = { 419 .oinfo = oinfo, 420 .blocks = blocks, 421 }; 422 423 *blocks = 0; 424 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec, 425 &sroi); 426} 427 428/* 429 * AG scrubbing 430 * 431 * These helpers facilitate locking an allocation group's header 432 * buffers, setting up cursors for all btrees that are present, and 433 * cleaning everything up once we're through. 434 */ 435 436/* Decide if we want to return an AG header read failure. */ 437static inline bool 438want_ag_read_header_failure( 439 struct xfs_scrub *sc, 440 unsigned int type) 441{ 442 /* Return all AG header read failures when scanning btrees. */ 443 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF && 444 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL && 445 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI) 446 return true; 447 /* 448 * If we're scanning a given type of AG header, we only want to 449 * see read failures from that specific header. We'd like the 450 * other headers to cross-check them, but this isn't required. 451 */ 452 if (sc->sm->sm_type == type) 453 return true; 454 return false; 455} 456 457/* 458 * Grab the AG header buffers for the attached perag structure. 459 * 460 * The headers should be released by xchk_ag_free, but as a fail safe we attach 461 * all the buffers we grab to the scrub transaction so they'll all be freed 462 * when we cancel it. 463 */ 464static inline int 465xchk_perag_read_headers( 466 struct xfs_scrub *sc, 467 struct xchk_ag *sa) 468{ 469 int error; 470 471 error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp); 472 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI)) 473 return error; 474 475 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp); 476 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF)) 477 return error; 478 479 return 0; 480} 481 482/* 483 * Grab the AG headers for the attached perag structure and wait for pending 484 * intents to drain. 485 */ 486int 487xchk_perag_drain_and_lock( 488 struct xfs_scrub *sc) 489{ 490 struct xchk_ag *sa = &sc->sa; 491 int error = 0; 492 493 ASSERT(sa->pag != NULL); 494 ASSERT(sa->agi_bp == NULL); 495 ASSERT(sa->agf_bp == NULL); 496 497 do { 498 if (xchk_should_terminate(sc, &error)) 499 return error; 500 501 error = xchk_perag_read_headers(sc, sa); 502 if (error) 503 return error; 504 505 /* 506 * If we've grabbed an inode for scrubbing then we assume that 507 * holding its ILOCK will suffice to coordinate with any intent 508 * chains involving this inode. 509 */ 510 if (sc->ip) 511 return 0; 512 513 /* 514 * Decide if this AG is quiet enough for all metadata to be 515 * consistent with each other. XFS allows the AG header buffer 516 * locks to cycle across transaction rolls while processing 517 * chains of deferred ops, which means that there could be 518 * other threads in the middle of processing a chain of 519 * deferred ops. For regular operations we are careful about 520 * ordering operations to prevent collisions between threads 521 * (which is why we don't need a per-AG lock), but scrub and 522 * repair have to serialize against chained operations. 523 * 524 * We just locked all the AG headers buffers; now take a look 525 * to see if there are any intents in progress. If there are, 526 * drop the AG headers and wait for the intents to drain. 527 * Since we hold all the AG header locks for the duration of 528 * the scrub, this is the only time we have to sample the 529 * intents counter; any threads increasing it after this point 530 * can't possibly be in the middle of a chain of AG metadata 531 * updates. 532 * 533 * Obviously, this should be slanted against scrub and in favor 534 * of runtime threads. 535 */ 536 if (!xfs_group_intent_busy(pag_group(sa->pag))) 537 return 0; 538 539 if (sa->agf_bp) { 540 xfs_trans_brelse(sc->tp, sa->agf_bp); 541 sa->agf_bp = NULL; 542 } 543 544 if (sa->agi_bp) { 545 xfs_trans_brelse(sc->tp, sa->agi_bp); 546 sa->agi_bp = NULL; 547 } 548 549 if (!(sc->flags & XCHK_FSGATES_DRAIN)) 550 return -ECHRNG; 551 error = xfs_group_intent_drain(pag_group(sa->pag)); 552 if (error == -ERESTARTSYS) 553 error = -EINTR; 554 } while (!error); 555 556 return error; 557} 558 559/* 560 * Grab the per-AG structure, grab all AG header buffers, and wait until there 561 * aren't any pending intents. Returns -ENOENT if we can't grab the perag 562 * structure. 563 */ 564int 565xchk_ag_read_headers( 566 struct xfs_scrub *sc, 567 xfs_agnumber_t agno, 568 struct xchk_ag *sa) 569{ 570 struct xfs_mount *mp = sc->mp; 571 572 ASSERT(!sa->pag); 573 sa->pag = xfs_perag_get(mp, agno); 574 if (!sa->pag) 575 return -ENOENT; 576 577 return xchk_perag_drain_and_lock(sc); 578} 579 580/* Release all the AG btree cursors. */ 581void 582xchk_ag_btcur_free( 583 struct xchk_ag *sa) 584{ 585 if (sa->refc_cur) 586 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR); 587 if (sa->rmap_cur) 588 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR); 589 if (sa->fino_cur) 590 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR); 591 if (sa->ino_cur) 592 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR); 593 if (sa->cnt_cur) 594 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR); 595 if (sa->bno_cur) 596 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR); 597 598 sa->refc_cur = NULL; 599 sa->rmap_cur = NULL; 600 sa->fino_cur = NULL; 601 sa->ino_cur = NULL; 602 sa->bno_cur = NULL; 603 sa->cnt_cur = NULL; 604} 605 606/* Initialize all the btree cursors for an AG. */ 607void 608xchk_ag_btcur_init( 609 struct xfs_scrub *sc, 610 struct xchk_ag *sa) 611{ 612 struct xfs_mount *mp = sc->mp; 613 614 if (sa->agf_bp) { 615 /* Set up a bnobt cursor for cross-referencing. */ 616 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp, 617 sa->pag); 618 xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur, 619 XFS_SCRUB_TYPE_BNOBT); 620 621 /* Set up a cntbt cursor for cross-referencing. */ 622 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp, 623 sa->pag); 624 xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur, 625 XFS_SCRUB_TYPE_CNTBT); 626 627 /* Set up a rmapbt cursor for cross-referencing. */ 628 if (xfs_has_rmapbt(mp)) { 629 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, 630 sa->agf_bp, sa->pag); 631 xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur, 632 XFS_SCRUB_TYPE_RMAPBT); 633 } 634 635 /* Set up a refcountbt cursor for cross-referencing. */ 636 if (xfs_has_reflink(mp)) { 637 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp, 638 sa->agf_bp, sa->pag); 639 xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur, 640 XFS_SCRUB_TYPE_REFCNTBT); 641 } 642 } 643 644 if (sa->agi_bp) { 645 /* Set up a inobt cursor for cross-referencing. */ 646 sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp, 647 sa->agi_bp); 648 xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur, 649 XFS_SCRUB_TYPE_INOBT); 650 651 /* Set up a finobt cursor for cross-referencing. */ 652 if (xfs_has_finobt(mp)) { 653 sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp, 654 sa->agi_bp); 655 xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur, 656 XFS_SCRUB_TYPE_FINOBT); 657 } 658 } 659} 660 661/* Release the AG header context and btree cursors. */ 662void 663xchk_ag_free( 664 struct xfs_scrub *sc, 665 struct xchk_ag *sa) 666{ 667 xchk_ag_btcur_free(sa); 668 xrep_reset_perag_resv(sc); 669 if (sa->agf_bp) { 670 xfs_trans_brelse(sc->tp, sa->agf_bp); 671 sa->agf_bp = NULL; 672 } 673 if (sa->agi_bp) { 674 xfs_trans_brelse(sc->tp, sa->agi_bp); 675 sa->agi_bp = NULL; 676 } 677 if (sa->pag) { 678 xfs_perag_put(sa->pag); 679 sa->pag = NULL; 680 } 681} 682 683/* 684 * For scrub, grab the perag structure, the AGI, and the AGF headers, in that 685 * order. Locking order requires us to get the AGI before the AGF. We use the 686 * transaction to avoid deadlocking on crosslinked metadata buffers; either the 687 * caller passes one in (bmap scrub) or we have to create a transaction 688 * ourselves. Returns ENOENT if the perag struct cannot be grabbed. 689 */ 690int 691xchk_ag_init( 692 struct xfs_scrub *sc, 693 xfs_agnumber_t agno, 694 struct xchk_ag *sa) 695{ 696 int error; 697 698 error = xchk_ag_read_headers(sc, agno, sa); 699 if (error) 700 return error; 701 702 xchk_ag_btcur_init(sc, sa); 703 return 0; 704} 705 706#ifdef CONFIG_XFS_RT 707/* 708 * For scrubbing a realtime group, grab all the in-core resources we'll need to 709 * check the metadata, which means taking the ILOCK of the realtime group's 710 * metadata inodes. Callers must not join these inodes to the transaction with 711 * non-zero lockflags or concurrency problems will result. The @rtglock_flags 712 * argument takes XFS_RTGLOCK_* flags. 713 */ 714int 715xchk_rtgroup_init( 716 struct xfs_scrub *sc, 717 xfs_rgnumber_t rgno, 718 struct xchk_rt *sr) 719{ 720 ASSERT(sr->rtg == NULL); 721 ASSERT(sr->rtlock_flags == 0); 722 723 sr->rtg = xfs_rtgroup_get(sc->mp, rgno); 724 if (!sr->rtg) 725 return -ENOENT; 726 return 0; 727} 728 729/* Lock all the rt group metadata inode ILOCKs and wait for intents. */ 730int 731xchk_rtgroup_lock( 732 struct xfs_scrub *sc, 733 struct xchk_rt *sr, 734 unsigned int rtglock_flags) 735{ 736 int error = 0; 737 738 ASSERT(sr->rtg != NULL); 739 740 /* 741 * If we're /only/ locking the rtbitmap in shared mode, then we're 742 * obviously not trying to compare records in two metadata inodes. 743 * There's no need to drain intents here because the caller (most 744 * likely the rgsuper scanner) doesn't need that level of consistency. 745 */ 746 if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) { 747 xfs_rtgroup_lock(sr->rtg, rtglock_flags); 748 sr->rtlock_flags = rtglock_flags; 749 return 0; 750 } 751 752 do { 753 if (xchk_should_terminate(sc, &error)) 754 return error; 755 756 xfs_rtgroup_lock(sr->rtg, rtglock_flags); 757 758 /* 759 * If we've grabbed a non-metadata file for scrubbing, we 760 * assume that holding its ILOCK will suffice to coordinate 761 * with any rt intent chains involving this inode. 762 */ 763 if (sc->ip && !xfs_is_internal_inode(sc->ip)) 764 break; 765 766 /* 767 * Decide if the rt group is quiet enough for all metadata to 768 * be consistent with each other. Regular file IO doesn't get 769 * to lock all the rt inodes at the same time, which means that 770 * there could be other threads in the middle of processing a 771 * chain of deferred ops. 772 * 773 * We just locked all the metadata inodes for this rt group; 774 * now take a look to see if there are any intents in progress. 775 * If there are, drop the rt group inode locks and wait for the 776 * intents to drain. Since we hold the rt group inode locks 777 * for the duration of the scrub, this is the only time we have 778 * to sample the intents counter; any threads increasing it 779 * after this point can't possibly be in the middle of a chain 780 * of rt metadata updates. 781 * 782 * Obviously, this should be slanted against scrub and in favor 783 * of runtime threads. 784 */ 785 if (!xfs_group_intent_busy(rtg_group(sr->rtg))) 786 break; 787 788 xfs_rtgroup_unlock(sr->rtg, rtglock_flags); 789 790 if (!(sc->flags & XCHK_FSGATES_DRAIN)) 791 return -ECHRNG; 792 error = xfs_group_intent_drain(rtg_group(sr->rtg)); 793 if (error) { 794 if (error == -ERESTARTSYS) 795 error = -EINTR; 796 return error; 797 } 798 } while (1); 799 800 sr->rtlock_flags = rtglock_flags; 801 802 if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP)) 803 sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg); 804 805 if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT)) 806 sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg); 807 808 return 0; 809} 810 811/* 812 * Free all the btree cursors and other incore data relating to the realtime 813 * group. This has to be done /before/ committing (or cancelling) the scrub 814 * transaction. 815 */ 816void 817xchk_rtgroup_btcur_free( 818 struct xchk_rt *sr) 819{ 820 if (sr->rmap_cur) 821 xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR); 822 if (sr->refc_cur) 823 xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR); 824 825 sr->refc_cur = NULL; 826 sr->rmap_cur = NULL; 827} 828 829/* 830 * Unlock the realtime group. This must be done /after/ committing (or 831 * cancelling) the scrub transaction. 832 */ 833void 834xchk_rtgroup_unlock( 835 struct xchk_rt *sr) 836{ 837 ASSERT(sr->rtg != NULL); 838 839 if (sr->rtlock_flags) { 840 xfs_rtgroup_unlock(sr->rtg, sr->rtlock_flags); 841 sr->rtlock_flags = 0; 842 } 843} 844 845/* 846 * Unlock the realtime group and release its resources. This must be done 847 * /after/ committing (or cancelling) the scrub transaction. 848 */ 849void 850xchk_rtgroup_free( 851 struct xfs_scrub *sc, 852 struct xchk_rt *sr) 853{ 854 ASSERT(sr->rtg != NULL); 855 856 xchk_rtgroup_unlock(sr); 857 858 xfs_rtgroup_put(sr->rtg); 859 sr->rtg = NULL; 860} 861#endif /* CONFIG_XFS_RT */ 862 863/* Per-scrubber setup functions */ 864 865void 866xchk_trans_cancel( 867 struct xfs_scrub *sc) 868{ 869 xfs_trans_cancel(sc->tp); 870 sc->tp = NULL; 871} 872 873void 874xchk_trans_alloc_empty( 875 struct xfs_scrub *sc) 876{ 877 sc->tp = xfs_trans_alloc_empty(sc->mp); 878} 879 880/* 881 * Grab an empty transaction so that we can re-grab locked buffers if 882 * one of our btrees turns out to be cyclic. 883 * 884 * If we're going to repair something, we need to ask for the largest possible 885 * log reservation so that we can handle the worst case scenario for metadata 886 * updates while rebuilding a metadata item. We also need to reserve as many 887 * blocks in the head transaction as we think we're going to need to rebuild 888 * the metadata object. 889 */ 890int 891xchk_trans_alloc( 892 struct xfs_scrub *sc, 893 uint resblks) 894{ 895 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) 896 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate, 897 resblks, 0, 0, &sc->tp); 898 899 xchk_trans_alloc_empty(sc); 900 return 0; 901} 902 903/* Set us up with a transaction and an empty context. */ 904int 905xchk_setup_fs( 906 struct xfs_scrub *sc) 907{ 908 uint resblks; 909 910 resblks = xrep_calc_ag_resblks(sc); 911 return xchk_trans_alloc(sc, resblks); 912} 913 914/* Set us up with a transaction and an empty context to repair rt metadata. */ 915int 916xchk_setup_rt( 917 struct xfs_scrub *sc) 918{ 919 return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc)); 920} 921 922/* Set us up with AG headers and btree cursors. */ 923int 924xchk_setup_ag_btree( 925 struct xfs_scrub *sc, 926 bool force_log) 927{ 928 struct xfs_mount *mp = sc->mp; 929 int error; 930 931 /* 932 * If the caller asks us to checkpont the log, do so. This 933 * expensive operation should be performed infrequently and only 934 * as a last resort. Any caller that sets force_log should 935 * document why they need to do so. 936 */ 937 if (force_log) { 938 error = xchk_checkpoint_log(mp); 939 if (error) 940 return error; 941 } 942 943 error = xchk_setup_fs(sc); 944 if (error) 945 return error; 946 947 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa); 948} 949 950/* Push everything out of the log onto disk. */ 951int 952xchk_checkpoint_log( 953 struct xfs_mount *mp) 954{ 955 int error; 956 957 error = xfs_log_force(mp, XFS_LOG_SYNC); 958 if (error) 959 return error; 960 xfs_ail_push_all_sync(mp->m_ail); 961 return 0; 962} 963 964/* Verify that an inode is allocated ondisk, then return its cached inode. */ 965int 966xchk_iget( 967 struct xfs_scrub *sc, 968 xfs_ino_t inum, 969 struct xfs_inode **ipp) 970{ 971 ASSERT(sc->tp != NULL); 972 973 return xfs_iget(sc->mp, sc->tp, inum, XCHK_IGET_FLAGS, 0, ipp); 974} 975 976/* 977 * Try to grab an inode in a manner that avoids races with physical inode 978 * allocation. If we can't, return the locked AGI buffer so that the caller 979 * can single-step the loading process to see where things went wrong. 980 * Callers must have a valid scrub transaction. 981 * 982 * If the iget succeeds, return 0, a NULL AGI, and the inode. 983 * 984 * If the iget fails, return the error, the locked AGI, and a NULL inode. This 985 * can include -EINVAL and -ENOENT for invalid inode numbers or inodes that are 986 * no longer allocated; or any other corruption or runtime error. 987 * 988 * If the AGI read fails, return the error, a NULL AGI, and NULL inode. 989 * 990 * If a fatal signal is pending, return -EINTR, a NULL AGI, and a NULL inode. 991 */ 992int 993xchk_iget_agi( 994 struct xfs_scrub *sc, 995 xfs_ino_t inum, 996 struct xfs_buf **agi_bpp, 997 struct xfs_inode **ipp) 998{ 999 struct xfs_mount *mp = sc->mp; 1000 struct xfs_trans *tp = sc->tp; 1001 struct xfs_perag *pag; 1002 int error; 1003 1004 ASSERT(sc->tp != NULL); 1005 1006again: 1007 *agi_bpp = NULL; 1008 *ipp = NULL; 1009 error = 0; 1010 1011 if (xchk_should_terminate(sc, &error)) 1012 return error; 1013 1014 /* 1015 * Attach the AGI buffer to the scrub transaction to avoid deadlocks 1016 * in the iget cache miss path. 1017 */ 1018 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); 1019 error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp); 1020 xfs_perag_put(pag); 1021 if (error) 1022 return error; 1023 1024 error = xfs_iget(mp, tp, inum, XFS_IGET_NORETRY | XCHK_IGET_FLAGS, 0, 1025 ipp); 1026 if (error == -EAGAIN) { 1027 /* 1028 * The inode may be in core but temporarily unavailable and may 1029 * require the AGI buffer before it can be returned. Drop the 1030 * AGI buffer and retry the lookup. 1031 * 1032 * Incore lookup will fail with EAGAIN on a cache hit if the 1033 * inode is queued to the inactivation list. The inactivation 1034 * worker may remove the inode from the unlinked list and hence 1035 * needs the AGI. 1036 * 1037 * Hence xchk_iget_agi() needs to drop the AGI lock on EAGAIN 1038 * to allow inodegc to make progress and move the inode to 1039 * IRECLAIMABLE state where xfs_iget will be able to return it 1040 * again if it can lock the inode. 1041 */ 1042 xfs_trans_brelse(tp, *agi_bpp); 1043 delay(1); 1044 goto again; 1045 } 1046 if (error) 1047 return error; 1048 1049 /* We got the inode, so we can release the AGI. */ 1050 ASSERT(*ipp != NULL); 1051 xfs_trans_brelse(tp, *agi_bpp); 1052 *agi_bpp = NULL; 1053 return 0; 1054} 1055 1056#ifdef CONFIG_XFS_QUOTA 1057/* 1058 * Try to attach dquots to this inode if we think we might want to repair it. 1059 * Callers must not hold any ILOCKs. If the dquots are broken and cannot be 1060 * attached, a quotacheck will be scheduled. 1061 */ 1062int 1063xchk_ino_dqattach( 1064 struct xfs_scrub *sc) 1065{ 1066 ASSERT(sc->tp != NULL); 1067 ASSERT(sc->ip != NULL); 1068 1069 if (!xchk_could_repair(sc)) 1070 return 0; 1071 1072 return xrep_ino_dqattach(sc); 1073} 1074#endif 1075 1076/* Install an inode that we opened by handle for scrubbing. */ 1077int 1078xchk_install_handle_inode( 1079 struct xfs_scrub *sc, 1080 struct xfs_inode *ip) 1081{ 1082 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) { 1083 xchk_irele(sc, ip); 1084 return -ENOENT; 1085 } 1086 1087 sc->ip = ip; 1088 return 0; 1089} 1090 1091/* 1092 * Install an already-referenced inode for scrubbing. Get our own reference to 1093 * the inode to make disposal simpler. The inode must not be in I_FREEING or 1094 * I_WILL_FREE state! 1095 */ 1096int 1097xchk_install_live_inode( 1098 struct xfs_scrub *sc, 1099 struct xfs_inode *ip) 1100{ 1101 if (!igrab(VFS_I(ip))) { 1102 xchk_ino_set_corrupt(sc, ip->i_ino); 1103 return -EFSCORRUPTED; 1104 } 1105 1106 sc->ip = ip; 1107 return 0; 1108} 1109 1110/* 1111 * In preparation to scrub metadata structures that hang off of an inode, 1112 * grab either the inode referenced in the scrub control structure or the 1113 * inode passed in. If the inumber does not reference an allocated inode 1114 * record, the function returns ENOENT to end the scrub early. The inode 1115 * is not locked. 1116 */ 1117int 1118xchk_iget_for_scrubbing( 1119 struct xfs_scrub *sc) 1120{ 1121 struct xfs_imap imap; 1122 struct xfs_mount *mp = sc->mp; 1123 struct xfs_perag *pag; 1124 struct xfs_buf *agi_bp; 1125 struct xfs_inode *ip_in = XFS_I(file_inode(sc->file)); 1126 struct xfs_inode *ip = NULL; 1127 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, sc->sm->sm_ino); 1128 int error; 1129 1130 ASSERT(sc->tp == NULL); 1131 1132 /* We want to scan the inode we already had opened. */ 1133 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) 1134 return xchk_install_live_inode(sc, ip_in); 1135 1136 /* 1137 * On pre-metadir filesystems, reject internal metadata files. For 1138 * metadir filesystems, limited scrubbing of any file in the metadata 1139 * directory tree by handle is allowed, because that is the only way to 1140 * validate the lack of parent pointers in the sb-root metadata inodes. 1141 */ 1142 if (!xfs_has_metadir(mp) && xfs_is_sb_inum(mp, sc->sm->sm_ino)) 1143 return -ENOENT; 1144 /* Reject obviously bad inode numbers. */ 1145 if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino)) 1146 return -ENOENT; 1147 1148 /* Try a safe untrusted iget. */ 1149 error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip); 1150 if (!error) 1151 return xchk_install_handle_inode(sc, ip); 1152 if (error == -ENOENT) 1153 return error; 1154 if (error != -EINVAL) 1155 goto out_error; 1156 1157 /* 1158 * EINVAL with IGET_UNTRUSTED probably means one of several things: 1159 * userspace gave us an inode number that doesn't correspond to fs 1160 * space; the inode btree lacks a record for this inode; or there is a 1161 * record, and it says this inode is free. 1162 * 1163 * We want to look up this inode in the inobt to distinguish two 1164 * scenarios: (1) the inobt says the inode is free, in which case 1165 * there's nothing to do; and (2) the inobt says the inode is 1166 * allocated, but loading it failed due to corruption. 1167 * 1168 * Allocate a transaction and grab the AGI to prevent inobt activity 1169 * in this AG. Retry the iget in case someone allocated a new inode 1170 * after the first iget failed. 1171 */ 1172 error = xchk_trans_alloc(sc, 0); 1173 if (error) 1174 goto out_error; 1175 1176 error = xchk_iget_agi(sc, sc->sm->sm_ino, &agi_bp, &ip); 1177 if (error == 0) { 1178 /* Actually got the inode, so install it. */ 1179 xchk_trans_cancel(sc); 1180 return xchk_install_handle_inode(sc, ip); 1181 } 1182 if (error == -ENOENT) 1183 goto out_gone; 1184 if (error != -EINVAL) 1185 goto out_cancel; 1186 1187 /* Ensure that we have protected against inode allocation/freeing. */ 1188 if (agi_bp == NULL) { 1189 ASSERT(agi_bp != NULL); 1190 error = -ECANCELED; 1191 goto out_cancel; 1192 } 1193 1194 /* 1195 * Untrusted iget failed a second time. Let's try an inobt lookup. 1196 * If the inobt thinks this the inode neither can exist inside the 1197 * filesystem nor is allocated, return ENOENT to signal that the check 1198 * can be skipped. 1199 * 1200 * If the lookup returns corruption, we'll mark this inode corrupt and 1201 * exit to userspace. There's little chance of fixing anything until 1202 * the inobt is straightened out, but there's nothing we can do here. 1203 * 1204 * If the lookup encounters any other error, exit to userspace. 1205 * 1206 * If the lookup succeeds, something else must be very wrong in the fs 1207 * such that setting up the incore inode failed in some strange way. 1208 * Treat those as corruptions. 1209 */ 1210 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino)); 1211 if (!pag) { 1212 error = -EFSCORRUPTED; 1213 goto out_cancel; 1214 } 1215 1216 error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap, 1217 XFS_IGET_UNTRUSTED); 1218 xfs_perag_put(pag); 1219 if (error == -EINVAL || error == -ENOENT) 1220 goto out_gone; 1221 if (!error) 1222 error = -EFSCORRUPTED; 1223 1224out_cancel: 1225 xchk_trans_cancel(sc); 1226out_error: 1227 trace_xchk_op_error(sc, agno, XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino), 1228 error, __return_address); 1229 return error; 1230out_gone: 1231 /* The file is gone, so there's nothing to check. */ 1232 xchk_trans_cancel(sc); 1233 return -ENOENT; 1234} 1235 1236/* Release an inode, possibly dropping it in the process. */ 1237void 1238xchk_irele( 1239 struct xfs_scrub *sc, 1240 struct xfs_inode *ip) 1241{ 1242 if (sc->tp) { 1243 /* 1244 * If we are in a transaction, we /cannot/ drop the inode 1245 * ourselves, because the VFS will trigger writeback, which 1246 * can require a transaction. Clear DONTCACHE to force the 1247 * inode to the LRU, where someone else can take care of 1248 * dropping it. 1249 * 1250 * Note that when we grabbed our reference to the inode, it 1251 * could have had an active ref and DONTCACHE set if a sysadmin 1252 * is trying to coerce a change in file access mode. icache 1253 * hits do not clear DONTCACHE, so we must do it here. 1254 */ 1255 spin_lock(&VFS_I(ip)->i_lock); 1256 inode_state_clear(VFS_I(ip), I_DONTCACHE); 1257 spin_unlock(&VFS_I(ip)->i_lock); 1258 } 1259 1260 xfs_irele(ip); 1261} 1262 1263/* 1264 * Set us up to scrub metadata mapped by a file's fork. Callers must not use 1265 * this to operate on user-accessible regular file data because the MMAPLOCK is 1266 * not taken. 1267 */ 1268int 1269xchk_setup_inode_contents( 1270 struct xfs_scrub *sc, 1271 unsigned int resblks) 1272{ 1273 int error; 1274 1275 error = xchk_iget_for_scrubbing(sc); 1276 if (error) 1277 return error; 1278 1279 error = xrep_tempfile_adjust_directory_tree(sc); 1280 if (error) 1281 return error; 1282 1283 /* Lock the inode so the VFS cannot touch this file. */ 1284 xchk_ilock(sc, XFS_IOLOCK_EXCL); 1285 1286 error = xchk_trans_alloc(sc, resblks); 1287 if (error) 1288 goto out; 1289 1290 error = xchk_ino_dqattach(sc); 1291 if (error) 1292 goto out; 1293 1294 xchk_ilock(sc, XFS_ILOCK_EXCL); 1295out: 1296 /* scrub teardown will unlock and release the inode for us */ 1297 return error; 1298} 1299 1300void 1301xchk_ilock( 1302 struct xfs_scrub *sc, 1303 unsigned int ilock_flags) 1304{ 1305 xfs_ilock(sc->ip, ilock_flags); 1306 sc->ilock_flags |= ilock_flags; 1307} 1308 1309bool 1310xchk_ilock_nowait( 1311 struct xfs_scrub *sc, 1312 unsigned int ilock_flags) 1313{ 1314 if (xfs_ilock_nowait(sc->ip, ilock_flags)) { 1315 sc->ilock_flags |= ilock_flags; 1316 return true; 1317 } 1318 1319 return false; 1320} 1321 1322void 1323xchk_iunlock( 1324 struct xfs_scrub *sc, 1325 unsigned int ilock_flags) 1326{ 1327 sc->ilock_flags &= ~ilock_flags; 1328 xfs_iunlock(sc->ip, ilock_flags); 1329} 1330 1331/* 1332 * Predicate that decides if we need to evaluate the cross-reference check. 1333 * If there was an error accessing the cross-reference btree, just delete 1334 * the cursor and skip the check. 1335 */ 1336bool 1337xchk_should_check_xref( 1338 struct xfs_scrub *sc, 1339 int *error, 1340 struct xfs_btree_cur **curpp) 1341{ 1342 /* No point in xref if we already know we're corrupt. */ 1343 if (xchk_skip_xref(sc->sm)) 1344 return false; 1345 1346 if (*error == 0) 1347 return true; 1348 1349 if (curpp) { 1350 /* If we've already given up on xref, just bail out. */ 1351 if (!*curpp) 1352 return false; 1353 1354 /* xref error, delete cursor and bail out. */ 1355 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR); 1356 *curpp = NULL; 1357 } 1358 1359 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL; 1360 trace_xchk_xref_error(sc, *error, __return_address); 1361 1362 /* 1363 * Errors encountered during cross-referencing with another 1364 * data structure should not cause this scrubber to abort. 1365 */ 1366 *error = 0; 1367 return false; 1368} 1369 1370/* Run the structure verifiers on in-memory buffers to detect bad memory. */ 1371void 1372xchk_buffer_recheck( 1373 struct xfs_scrub *sc, 1374 struct xfs_buf *bp) 1375{ 1376 xfs_failaddr_t fa; 1377 1378 if (bp->b_ops == NULL) { 1379 xchk_block_set_corrupt(sc, bp); 1380 return; 1381 } 1382 if (bp->b_ops->verify_struct == NULL) { 1383 xchk_set_incomplete(sc); 1384 return; 1385 } 1386 fa = bp->b_ops->verify_struct(bp); 1387 if (!fa) 1388 return; 1389 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 1390 trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa); 1391} 1392 1393static inline int 1394xchk_metadata_inode_subtype( 1395 struct xfs_scrub *sc, 1396 unsigned int scrub_type) 1397{ 1398 struct xfs_scrub_subord *sub; 1399 int error; 1400 1401 sub = xchk_scrub_create_subord(sc, scrub_type); 1402 if (!sub) 1403 return -ENOMEM; 1404 1405 error = sub->sc.ops->scrub(&sub->sc); 1406 xchk_scrub_free_subord(sub); 1407 return error; 1408} 1409 1410/* 1411 * Scrub the attr/data forks of a metadata inode. The metadata inode must be 1412 * pointed to by sc->ip and the ILOCK must be held. 1413 */ 1414int 1415xchk_metadata_inode_forks( 1416 struct xfs_scrub *sc) 1417{ 1418 bool shared; 1419 int error; 1420 1421 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 1422 return 0; 1423 1424 /* Check the inode record. */ 1425 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE); 1426 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1427 return error; 1428 1429 /* Metadata inodes don't live on the rt device. */ 1430 if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) { 1431 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1432 return 0; 1433 } 1434 1435 /* They should never participate in reflink. */ 1436 if (xfs_is_reflink_inode(sc->ip)) { 1437 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1438 return 0; 1439 } 1440 1441 /* Invoke the data fork scrubber. */ 1442 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD); 1443 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1444 return error; 1445 1446 /* Look for incorrect shared blocks. */ 1447 if (xfs_has_reflink(sc->mp)) { 1448 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip, 1449 &shared); 1450 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, 1451 &error)) 1452 return error; 1453 if (shared) 1454 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1455 } 1456 1457 /* 1458 * Metadata files can only have extended attributes on metadir 1459 * filesystems, either for parent pointers or for actual xattr data. 1460 */ 1461 if (xfs_inode_hasattr(sc->ip)) { 1462 if (!xfs_has_metadir(sc->mp)) { 1463 xchk_ino_set_corrupt(sc, sc->ip->i_ino); 1464 return 0; 1465 } 1466 1467 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTA); 1468 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 1469 return error; 1470 } 1471 1472 return 0; 1473} 1474 1475/* 1476 * Enable filesystem hooks (i.e. runtime code patching) before starting a scrub 1477 * operation. Callers must not hold any locks that intersect with the CPU 1478 * hotplug lock (e.g. writeback locks) because code patching must halt the CPUs 1479 * to change kernel code. 1480 */ 1481void 1482xchk_fsgates_enable( 1483 struct xfs_scrub *sc, 1484 unsigned int scrub_fsgates) 1485{ 1486 ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL)); 1487 ASSERT(!(sc->flags & scrub_fsgates)); 1488 1489 trace_xchk_fsgates_enable(sc, scrub_fsgates); 1490 1491 if (scrub_fsgates & XCHK_FSGATES_DRAIN) 1492 xfs_defer_drain_wait_enable(); 1493 1494 if (scrub_fsgates & XCHK_FSGATES_QUOTA) 1495 xfs_dqtrx_hook_enable(); 1496 1497 if (scrub_fsgates & XCHK_FSGATES_DIRENTS) 1498 xfs_dir_hook_enable(); 1499 1500 if (scrub_fsgates & XCHK_FSGATES_RMAP) 1501 xfs_rmap_hook_enable(); 1502 1503 sc->flags |= scrub_fsgates; 1504} 1505 1506/* 1507 * Decide if this is this a cached inode that's also allocated. The caller 1508 * must hold a reference to an AG and the AGI buffer lock to prevent inodes 1509 * from being allocated or freed. 1510 * 1511 * Look up an inode by number in the given file system. If the inode number 1512 * is invalid, return -EINVAL. If the inode is not in cache, return -ENODATA. 1513 * If the inode is being reclaimed, return -ENODATA because we know the inode 1514 * cache cannot be updating the ondisk metadata. 1515 * 1516 * Otherwise, the incore inode is the one we want, and it is either live, 1517 * somewhere in the inactivation machinery, or reclaimable. The inode is 1518 * allocated if i_mode is nonzero. In all three cases, the cached inode will 1519 * be more up to date than the ondisk inode buffer, so we must use the incore 1520 * i_mode. 1521 */ 1522int 1523xchk_inode_is_allocated( 1524 struct xfs_scrub *sc, 1525 xfs_agino_t agino, 1526 bool *inuse) 1527{ 1528 struct xfs_mount *mp = sc->mp; 1529 struct xfs_perag *pag = sc->sa.pag; 1530 xfs_ino_t ino; 1531 struct xfs_inode *ip; 1532 int error; 1533 1534 /* caller must hold perag reference */ 1535 if (pag == NULL) { 1536 ASSERT(pag != NULL); 1537 return -EINVAL; 1538 } 1539 1540 /* caller must have AGI buffer */ 1541 if (sc->sa.agi_bp == NULL) { 1542 ASSERT(sc->sa.agi_bp != NULL); 1543 return -EINVAL; 1544 } 1545 1546 /* reject inode numbers outside existing AGs */ 1547 ino = xfs_agino_to_ino(pag, agino); 1548 if (!xfs_verify_ino(mp, ino)) 1549 return -EINVAL; 1550 1551 error = -ENODATA; 1552 rcu_read_lock(); 1553 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 1554 if (!ip) { 1555 /* cache miss */ 1556 goto out_rcu; 1557 } 1558 1559 /* 1560 * If the inode number doesn't match, the incore inode got reused 1561 * during an RCU grace period and the radix tree hasn't been updated. 1562 * This isn't the inode we want. 1563 */ 1564 spin_lock(&ip->i_flags_lock); 1565 if (ip->i_ino != ino) 1566 goto out_skip; 1567 1568 trace_xchk_inode_is_allocated(ip); 1569 1570 /* 1571 * We have an incore inode that matches the inode we want, and the 1572 * caller holds the perag structure and the AGI buffer. Let's check 1573 * our assumptions below: 1574 */ 1575 1576#ifdef DEBUG 1577 /* 1578 * (1) If the incore inode is live (i.e. referenced from the dcache), 1579 * it will not be INEW, nor will it be in the inactivation or reclaim 1580 * machinery. The ondisk inode had better be allocated. This is the 1581 * most trivial case. 1582 */ 1583 if (!(ip->i_flags & (XFS_NEED_INACTIVE | XFS_INEW | XFS_IRECLAIMABLE | 1584 XFS_INACTIVATING))) { 1585 /* live inode */ 1586 ASSERT(VFS_I(ip)->i_mode != 0); 1587 } 1588 1589 /* 1590 * If the incore inode is INEW, there are several possibilities: 1591 * 1592 * (2) For a file that is being created, note that we allocate the 1593 * ondisk inode before allocating, initializing, and adding the incore 1594 * inode to the radix tree. 1595 * 1596 * (3) If the incore inode is being recycled, the inode has to be 1597 * allocated because we don't allow freed inodes to be recycled. 1598 * Recycling doesn't touch i_mode. 1599 */ 1600 if (ip->i_flags & XFS_INEW) { 1601 /* created on disk already or recycling */ 1602 ASSERT(VFS_I(ip)->i_mode != 0); 1603 } 1604 1605 /* 1606 * (4) If the inode is queued for inactivation (NEED_INACTIVE) but 1607 * inactivation has not started (!INACTIVATING), it is still allocated. 1608 */ 1609 if ((ip->i_flags & XFS_NEED_INACTIVE) && 1610 !(ip->i_flags & XFS_INACTIVATING)) { 1611 /* definitely before difree */ 1612 ASSERT(VFS_I(ip)->i_mode != 0); 1613 } 1614#endif 1615 1616 /* 1617 * If the incore inode is undergoing inactivation (INACTIVATING), there 1618 * are two possibilities: 1619 * 1620 * (5) It is before the point where it would get freed ondisk, in which 1621 * case i_mode is still nonzero. 1622 * 1623 * (6) It has already been freed, in which case i_mode is zero. 1624 * 1625 * We don't take the ILOCK here, but difree and dialloc update the AGI, 1626 * and we've taken the AGI buffer lock, which prevents that from 1627 * happening. 1628 */ 1629 1630 /* 1631 * (7) Inodes undergoing inactivation (INACTIVATING) or queued for 1632 * reclaim (IRECLAIMABLE) could be allocated or free. i_mode still 1633 * reflects the ondisk state. 1634 */ 1635 1636 /* 1637 * (8) If the inode is in IFLUSHING, it's safe to query i_mode because 1638 * the flush code uses i_mode to format the ondisk inode. 1639 */ 1640 1641 /* 1642 * (9) If the inode is in IRECLAIM and was reachable via the radix 1643 * tree, it still has the same i_mode as it did before it entered 1644 * reclaim. The inode object is still alive because we hold the RCU 1645 * read lock. 1646 */ 1647 1648 *inuse = VFS_I(ip)->i_mode != 0; 1649 error = 0; 1650 1651out_skip: 1652 spin_unlock(&ip->i_flags_lock); 1653out_rcu: 1654 rcu_read_unlock(); 1655 return error; 1656} 1657 1658/* Is this inode a root directory for either tree? */ 1659bool 1660xchk_inode_is_dirtree_root(const struct xfs_inode *ip) 1661{ 1662 struct xfs_mount *mp = ip->i_mount; 1663 1664 return ip == mp->m_rootip || 1665 (xfs_has_metadir(mp) && ip == mp->m_metadirip); 1666} 1667 1668/* Does the superblock point down to this inode? */ 1669bool 1670xchk_inode_is_sb_rooted(const struct xfs_inode *ip) 1671{ 1672 return xchk_inode_is_dirtree_root(ip) || 1673 xfs_is_sb_inum(ip->i_mount, ip->i_ino); 1674} 1675 1676/* What is the root directory inumber for this inode? */ 1677xfs_ino_t 1678xchk_inode_rootdir_inum(const struct xfs_inode *ip) 1679{ 1680 struct xfs_mount *mp = ip->i_mount; 1681 1682 if (xfs_is_metadir_inode(ip)) 1683 return mp->m_metadirip->i_ino; 1684 return mp->m_rootip->i_ino; 1685} 1686 1687static int 1688xchk_meta_btree_count_blocks( 1689 struct xfs_scrub *sc, 1690 xfs_extnum_t *nextents, 1691 xfs_filblks_t *count) 1692{ 1693 struct xfs_btree_cur *cur; 1694 int error; 1695 1696 if (!sc->sr.rtg) { 1697 ASSERT(0); 1698 return -EFSCORRUPTED; 1699 } 1700 1701 switch (sc->ip->i_metatype) { 1702 case XFS_METAFILE_RTRMAP: 1703 cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg); 1704 break; 1705 case XFS_METAFILE_RTREFCOUNT: 1706 cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg); 1707 break; 1708 default: 1709 ASSERT(0); 1710 return -EFSCORRUPTED; 1711 } 1712 1713 error = xfs_btree_count_blocks(cur, count); 1714 xfs_btree_del_cursor(cur, error); 1715 if (!error) { 1716 *nextents = 0; 1717 (*count)--; /* don't count the btree iroot */ 1718 } 1719 return error; 1720} 1721 1722/* Count the blocks used by a file, even if it's a metadata inode. */ 1723int 1724xchk_inode_count_blocks( 1725 struct xfs_scrub *sc, 1726 int whichfork, 1727 xfs_extnum_t *nextents, 1728 xfs_filblks_t *count) 1729{ 1730 struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork); 1731 1732 if (!ifp) { 1733 *nextents = 0; 1734 *count = 0; 1735 return 0; 1736 } 1737 1738 if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) { 1739 ASSERT(whichfork == XFS_DATA_FORK); 1740 return xchk_meta_btree_count_blocks(sc, nextents, count); 1741 } 1742 1743 return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents, 1744 count); 1745}