Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.0 3854 lines 107 kB view raw
1/* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_fs.h" 20#include "xfs_types.h" 21#include "xfs_bit.h" 22#include "xfs_log.h" 23#include "xfs_inum.h" 24#include "xfs_trans.h" 25#include "xfs_sb.h" 26#include "xfs_ag.h" 27#include "xfs_mount.h" 28#include "xfs_error.h" 29#include "xfs_bmap_btree.h" 30#include "xfs_alloc_btree.h" 31#include "xfs_ialloc_btree.h" 32#include "xfs_dinode.h" 33#include "xfs_inode.h" 34#include "xfs_inode_item.h" 35#include "xfs_alloc.h" 36#include "xfs_ialloc.h" 37#include "xfs_log_priv.h" 38#include "xfs_buf_item.h" 39#include "xfs_log_recover.h" 40#include "xfs_extfree_item.h" 41#include "xfs_trans_priv.h" 42#include "xfs_quota.h" 43#include "xfs_rw.h" 44#include "xfs_utils.h" 45#include "xfs_trace.h" 46 47STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *); 48STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t); 49#if defined(DEBUG) 50STATIC void xlog_recover_check_summary(xlog_t *); 51#else 52#define xlog_recover_check_summary(log) 53#endif 54 55/* 56 * This structure is used during recovery to record the buf log items which 57 * have been canceled and should not be replayed. 58 */ 59struct xfs_buf_cancel { 60 xfs_daddr_t bc_blkno; 61 uint bc_len; 62 int bc_refcount; 63 struct list_head bc_list; 64}; 65 66/* 67 * Sector aligned buffer routines for buffer create/read/write/access 68 */ 69 70/* 71 * Verify the given count of basic blocks is valid number of blocks 72 * to specify for an operation involving the given XFS log buffer. 73 * Returns nonzero if the count is valid, 0 otherwise. 74 */ 75 76static inline int 77xlog_buf_bbcount_valid( 78 xlog_t *log, 79 int bbcount) 80{ 81 return bbcount > 0 && bbcount <= log->l_logBBsize; 82} 83 84/* 85 * Allocate a buffer to hold log data. The buffer needs to be able 86 * to map to a range of nbblks basic blocks at any valid (basic 87 * block) offset within the log. 88 */ 89STATIC xfs_buf_t * 90xlog_get_bp( 91 xlog_t *log, 92 int nbblks) 93{ 94 if (!xlog_buf_bbcount_valid(log, nbblks)) { 95 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 96 nbblks); 97 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 98 return NULL; 99 } 100 101 /* 102 * We do log I/O in units of log sectors (a power-of-2 103 * multiple of the basic block size), so we round up the 104 * requested size to accommodate the basic blocks required 105 * for complete log sectors. 106 * 107 * In addition, the buffer may be used for a non-sector- 108 * aligned block offset, in which case an I/O of the 109 * requested size could extend beyond the end of the 110 * buffer. If the requested size is only 1 basic block it 111 * will never straddle a sector boundary, so this won't be 112 * an issue. Nor will this be a problem if the log I/O is 113 * done in basic blocks (sector size 1). But otherwise we 114 * extend the buffer by one extra log sector to ensure 115 * there's space to accommodate this possibility. 116 */ 117 if (nbblks > 1 && log->l_sectBBsize > 1) 118 nbblks += log->l_sectBBsize; 119 nbblks = round_up(nbblks, log->l_sectBBsize); 120 121 return xfs_buf_get_uncached(log->l_mp->m_logdev_targp, 122 BBTOB(nbblks), 0); 123} 124 125STATIC void 126xlog_put_bp( 127 xfs_buf_t *bp) 128{ 129 xfs_buf_free(bp); 130} 131 132/* 133 * Return the address of the start of the given block number's data 134 * in a log buffer. The buffer covers a log sector-aligned region. 135 */ 136STATIC xfs_caddr_t 137xlog_align( 138 xlog_t *log, 139 xfs_daddr_t blk_no, 140 int nbblks, 141 xfs_buf_t *bp) 142{ 143 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); 144 145 ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp)); 146 return XFS_BUF_PTR(bp) + BBTOB(offset); 147} 148 149 150/* 151 * nbblks should be uint, but oh well. Just want to catch that 32-bit length. 152 */ 153STATIC int 154xlog_bread_noalign( 155 xlog_t *log, 156 xfs_daddr_t blk_no, 157 int nbblks, 158 xfs_buf_t *bp) 159{ 160 int error; 161 162 if (!xlog_buf_bbcount_valid(log, nbblks)) { 163 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 164 nbblks); 165 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 166 return EFSCORRUPTED; 167 } 168 169 blk_no = round_down(blk_no, log->l_sectBBsize); 170 nbblks = round_up(nbblks, log->l_sectBBsize); 171 172 ASSERT(nbblks > 0); 173 ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); 174 175 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 176 XFS_BUF_READ(bp); 177 XFS_BUF_BUSY(bp); 178 XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); 179 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); 180 181 xfsbdstrat(log->l_mp, bp); 182 error = xfs_buf_iowait(bp); 183 if (error) 184 xfs_ioerror_alert("xlog_bread", log->l_mp, 185 bp, XFS_BUF_ADDR(bp)); 186 return error; 187} 188 189STATIC int 190xlog_bread( 191 xlog_t *log, 192 xfs_daddr_t blk_no, 193 int nbblks, 194 xfs_buf_t *bp, 195 xfs_caddr_t *offset) 196{ 197 int error; 198 199 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 200 if (error) 201 return error; 202 203 *offset = xlog_align(log, blk_no, nbblks, bp); 204 return 0; 205} 206 207/* 208 * Read at an offset into the buffer. Returns with the buffer in it's original 209 * state regardless of the result of the read. 210 */ 211STATIC int 212xlog_bread_offset( 213 xlog_t *log, 214 xfs_daddr_t blk_no, /* block to read from */ 215 int nbblks, /* blocks to read */ 216 xfs_buf_t *bp, 217 xfs_caddr_t offset) 218{ 219 xfs_caddr_t orig_offset = XFS_BUF_PTR(bp); 220 int orig_len = bp->b_buffer_length; 221 int error, error2; 222 223 error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks)); 224 if (error) 225 return error; 226 227 error = xlog_bread_noalign(log, blk_no, nbblks, bp); 228 229 /* must reset buffer pointer even on error */ 230 error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len); 231 if (error) 232 return error; 233 return error2; 234} 235 236/* 237 * Write out the buffer at the given block for the given number of blocks. 238 * The buffer is kept locked across the write and is returned locked. 239 * This can only be used for synchronous log writes. 240 */ 241STATIC int 242xlog_bwrite( 243 xlog_t *log, 244 xfs_daddr_t blk_no, 245 int nbblks, 246 xfs_buf_t *bp) 247{ 248 int error; 249 250 if (!xlog_buf_bbcount_valid(log, nbblks)) { 251 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 252 nbblks); 253 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 254 return EFSCORRUPTED; 255 } 256 257 blk_no = round_down(blk_no, log->l_sectBBsize); 258 nbblks = round_up(nbblks, log->l_sectBBsize); 259 260 ASSERT(nbblks > 0); 261 ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp)); 262 263 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); 264 XFS_BUF_ZEROFLAGS(bp); 265 XFS_BUF_BUSY(bp); 266 XFS_BUF_HOLD(bp); 267 XFS_BUF_PSEMA(bp, PRIBIO); 268 XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); 269 XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); 270 271 if ((error = xfs_bwrite(log->l_mp, bp))) 272 xfs_ioerror_alert("xlog_bwrite", log->l_mp, 273 bp, XFS_BUF_ADDR(bp)); 274 return error; 275} 276 277#ifdef DEBUG 278/* 279 * dump debug superblock and log record information 280 */ 281STATIC void 282xlog_header_check_dump( 283 xfs_mount_t *mp, 284 xlog_rec_header_t *head) 285{ 286 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n", 287 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 288 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n", 289 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 290} 291#else 292#define xlog_header_check_dump(mp, head) 293#endif 294 295/* 296 * check log record header for recovery 297 */ 298STATIC int 299xlog_header_check_recover( 300 xfs_mount_t *mp, 301 xlog_rec_header_t *head) 302{ 303 ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); 304 305 /* 306 * IRIX doesn't write the h_fmt field and leaves it zeroed 307 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 308 * a dirty log created in IRIX. 309 */ 310 if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { 311 xfs_warn(mp, 312 "dirty log written in incompatible format - can't recover"); 313 xlog_header_check_dump(mp, head); 314 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 315 XFS_ERRLEVEL_HIGH, mp); 316 return XFS_ERROR(EFSCORRUPTED); 317 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 318 xfs_warn(mp, 319 "dirty log entry has mismatched uuid - can't recover"); 320 xlog_header_check_dump(mp, head); 321 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 322 XFS_ERRLEVEL_HIGH, mp); 323 return XFS_ERROR(EFSCORRUPTED); 324 } 325 return 0; 326} 327 328/* 329 * read the head block of the log and check the header 330 */ 331STATIC int 332xlog_header_check_mount( 333 xfs_mount_t *mp, 334 xlog_rec_header_t *head) 335{ 336 ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); 337 338 if (uuid_is_nil(&head->h_fs_uuid)) { 339 /* 340 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If 341 * h_fs_uuid is nil, we assume this log was last mounted 342 * by IRIX and continue. 343 */ 344 xfs_warn(mp, "nil uuid in log - IRIX style log"); 345 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 346 xfs_warn(mp, "log has mismatched uuid - can't recover"); 347 xlog_header_check_dump(mp, head); 348 XFS_ERROR_REPORT("xlog_header_check_mount", 349 XFS_ERRLEVEL_HIGH, mp); 350 return XFS_ERROR(EFSCORRUPTED); 351 } 352 return 0; 353} 354 355STATIC void 356xlog_recover_iodone( 357 struct xfs_buf *bp) 358{ 359 if (XFS_BUF_GETERROR(bp)) { 360 /* 361 * We're not going to bother about retrying 362 * this during recovery. One strike! 363 */ 364 xfs_ioerror_alert("xlog_recover_iodone", 365 bp->b_target->bt_mount, bp, 366 XFS_BUF_ADDR(bp)); 367 xfs_force_shutdown(bp->b_target->bt_mount, 368 SHUTDOWN_META_IO_ERROR); 369 } 370 XFS_BUF_CLR_IODONE_FUNC(bp); 371 xfs_buf_ioend(bp, 0); 372} 373 374/* 375 * This routine finds (to an approximation) the first block in the physical 376 * log which contains the given cycle. It uses a binary search algorithm. 377 * Note that the algorithm can not be perfect because the disk will not 378 * necessarily be perfect. 379 */ 380STATIC int 381xlog_find_cycle_start( 382 xlog_t *log, 383 xfs_buf_t *bp, 384 xfs_daddr_t first_blk, 385 xfs_daddr_t *last_blk, 386 uint cycle) 387{ 388 xfs_caddr_t offset; 389 xfs_daddr_t mid_blk; 390 xfs_daddr_t end_blk; 391 uint mid_cycle; 392 int error; 393 394 end_blk = *last_blk; 395 mid_blk = BLK_AVG(first_blk, end_blk); 396 while (mid_blk != first_blk && mid_blk != end_blk) { 397 error = xlog_bread(log, mid_blk, 1, bp, &offset); 398 if (error) 399 return error; 400 mid_cycle = xlog_get_cycle(offset); 401 if (mid_cycle == cycle) 402 end_blk = mid_blk; /* last_half_cycle == mid_cycle */ 403 else 404 first_blk = mid_blk; /* first_half_cycle == mid_cycle */ 405 mid_blk = BLK_AVG(first_blk, end_blk); 406 } 407 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || 408 (mid_blk == end_blk && mid_blk-1 == first_blk)); 409 410 *last_blk = end_blk; 411 412 return 0; 413} 414 415/* 416 * Check that a range of blocks does not contain stop_on_cycle_no. 417 * Fill in *new_blk with the block offset where such a block is 418 * found, or with -1 (an invalid block number) if there is no such 419 * block in the range. The scan needs to occur from front to back 420 * and the pointer into the region must be updated since a later 421 * routine will need to perform another test. 422 */ 423STATIC int 424xlog_find_verify_cycle( 425 xlog_t *log, 426 xfs_daddr_t start_blk, 427 int nbblks, 428 uint stop_on_cycle_no, 429 xfs_daddr_t *new_blk) 430{ 431 xfs_daddr_t i, j; 432 uint cycle; 433 xfs_buf_t *bp; 434 xfs_daddr_t bufblks; 435 xfs_caddr_t buf = NULL; 436 int error = 0; 437 438 /* 439 * Greedily allocate a buffer big enough to handle the full 440 * range of basic blocks we'll be examining. If that fails, 441 * try a smaller size. We need to be able to read at least 442 * a log sector, or we're out of luck. 443 */ 444 bufblks = 1 << ffs(nbblks); 445 while (!(bp = xlog_get_bp(log, bufblks))) { 446 bufblks >>= 1; 447 if (bufblks < log->l_sectBBsize) 448 return ENOMEM; 449 } 450 451 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { 452 int bcount; 453 454 bcount = min(bufblks, (start_blk + nbblks - i)); 455 456 error = xlog_bread(log, i, bcount, bp, &buf); 457 if (error) 458 goto out; 459 460 for (j = 0; j < bcount; j++) { 461 cycle = xlog_get_cycle(buf); 462 if (cycle == stop_on_cycle_no) { 463 *new_blk = i+j; 464 goto out; 465 } 466 467 buf += BBSIZE; 468 } 469 } 470 471 *new_blk = -1; 472 473out: 474 xlog_put_bp(bp); 475 return error; 476} 477 478/* 479 * Potentially backup over partial log record write. 480 * 481 * In the typical case, last_blk is the number of the block directly after 482 * a good log record. Therefore, we subtract one to get the block number 483 * of the last block in the given buffer. extra_bblks contains the number 484 * of blocks we would have read on a previous read. This happens when the 485 * last log record is split over the end of the physical log. 486 * 487 * extra_bblks is the number of blocks potentially verified on a previous 488 * call to this routine. 489 */ 490STATIC int 491xlog_find_verify_log_record( 492 xlog_t *log, 493 xfs_daddr_t start_blk, 494 xfs_daddr_t *last_blk, 495 int extra_bblks) 496{ 497 xfs_daddr_t i; 498 xfs_buf_t *bp; 499 xfs_caddr_t offset = NULL; 500 xlog_rec_header_t *head = NULL; 501 int error = 0; 502 int smallmem = 0; 503 int num_blks = *last_blk - start_blk; 504 int xhdrs; 505 506 ASSERT(start_blk != 0 || *last_blk != start_blk); 507 508 if (!(bp = xlog_get_bp(log, num_blks))) { 509 if (!(bp = xlog_get_bp(log, 1))) 510 return ENOMEM; 511 smallmem = 1; 512 } else { 513 error = xlog_bread(log, start_blk, num_blks, bp, &offset); 514 if (error) 515 goto out; 516 offset += ((num_blks - 1) << BBSHIFT); 517 } 518 519 for (i = (*last_blk) - 1; i >= 0; i--) { 520 if (i < start_blk) { 521 /* valid log record not found */ 522 xfs_warn(log->l_mp, 523 "Log inconsistent (didn't find previous header)"); 524 ASSERT(0); 525 error = XFS_ERROR(EIO); 526 goto out; 527 } 528 529 if (smallmem) { 530 error = xlog_bread(log, i, 1, bp, &offset); 531 if (error) 532 goto out; 533 } 534 535 head = (xlog_rec_header_t *)offset; 536 537 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno)) 538 break; 539 540 if (!smallmem) 541 offset -= BBSIZE; 542 } 543 544 /* 545 * We hit the beginning of the physical log & still no header. Return 546 * to caller. If caller can handle a return of -1, then this routine 547 * will be called again for the end of the physical log. 548 */ 549 if (i == -1) { 550 error = -1; 551 goto out; 552 } 553 554 /* 555 * We have the final block of the good log (the first block 556 * of the log record _before_ the head. So we check the uuid. 557 */ 558 if ((error = xlog_header_check_mount(log->l_mp, head))) 559 goto out; 560 561 /* 562 * We may have found a log record header before we expected one. 563 * last_blk will be the 1st block # with a given cycle #. We may end 564 * up reading an entire log record. In this case, we don't want to 565 * reset last_blk. Only when last_blk points in the middle of a log 566 * record do we update last_blk. 567 */ 568 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 569 uint h_size = be32_to_cpu(head->h_size); 570 571 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; 572 if (h_size % XLOG_HEADER_CYCLE_SIZE) 573 xhdrs++; 574 } else { 575 xhdrs = 1; 576 } 577 578 if (*last_blk - i + extra_bblks != 579 BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 580 *last_blk = i; 581 582out: 583 xlog_put_bp(bp); 584 return error; 585} 586 587/* 588 * Head is defined to be the point of the log where the next log write 589 * write could go. This means that incomplete LR writes at the end are 590 * eliminated when calculating the head. We aren't guaranteed that previous 591 * LR have complete transactions. We only know that a cycle number of 592 * current cycle number -1 won't be present in the log if we start writing 593 * from our current block number. 594 * 595 * last_blk contains the block number of the first block with a given 596 * cycle number. 597 * 598 * Return: zero if normal, non-zero if error. 599 */ 600STATIC int 601xlog_find_head( 602 xlog_t *log, 603 xfs_daddr_t *return_head_blk) 604{ 605 xfs_buf_t *bp; 606 xfs_caddr_t offset; 607 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; 608 int num_scan_bblks; 609 uint first_half_cycle, last_half_cycle; 610 uint stop_on_cycle; 611 int error, log_bbnum = log->l_logBBsize; 612 613 /* Is the end of the log device zeroed? */ 614 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { 615 *return_head_blk = first_blk; 616 617 /* Is the whole lot zeroed? */ 618 if (!first_blk) { 619 /* Linux XFS shouldn't generate totally zeroed logs - 620 * mkfs etc write a dummy unmount record to a fresh 621 * log so we can store the uuid in there 622 */ 623 xfs_warn(log->l_mp, "totally zeroed log"); 624 } 625 626 return 0; 627 } else if (error) { 628 xfs_warn(log->l_mp, "empty log check failed"); 629 return error; 630 } 631 632 first_blk = 0; /* get cycle # of 1st block */ 633 bp = xlog_get_bp(log, 1); 634 if (!bp) 635 return ENOMEM; 636 637 error = xlog_bread(log, 0, 1, bp, &offset); 638 if (error) 639 goto bp_err; 640 641 first_half_cycle = xlog_get_cycle(offset); 642 643 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 644 error = xlog_bread(log, last_blk, 1, bp, &offset); 645 if (error) 646 goto bp_err; 647 648 last_half_cycle = xlog_get_cycle(offset); 649 ASSERT(last_half_cycle != 0); 650 651 /* 652 * If the 1st half cycle number is equal to the last half cycle number, 653 * then the entire log is stamped with the same cycle number. In this 654 * case, head_blk can't be set to zero (which makes sense). The below 655 * math doesn't work out properly with head_blk equal to zero. Instead, 656 * we set it to log_bbnum which is an invalid block number, but this 657 * value makes the math correct. If head_blk doesn't changed through 658 * all the tests below, *head_blk is set to zero at the very end rather 659 * than log_bbnum. In a sense, log_bbnum and zero are the same block 660 * in a circular file. 661 */ 662 if (first_half_cycle == last_half_cycle) { 663 /* 664 * In this case we believe that the entire log should have 665 * cycle number last_half_cycle. We need to scan backwards 666 * from the end verifying that there are no holes still 667 * containing last_half_cycle - 1. If we find such a hole, 668 * then the start of that hole will be the new head. The 669 * simple case looks like 670 * x | x ... | x - 1 | x 671 * Another case that fits this picture would be 672 * x | x + 1 | x ... | x 673 * In this case the head really is somewhere at the end of the 674 * log, as one of the latest writes at the beginning was 675 * incomplete. 676 * One more case is 677 * x | x + 1 | x ... | x - 1 | x 678 * This is really the combination of the above two cases, and 679 * the head has to end up at the start of the x-1 hole at the 680 * end of the log. 681 * 682 * In the 256k log case, we will read from the beginning to the 683 * end of the log and search for cycle numbers equal to x-1. 684 * We don't worry about the x+1 blocks that we encounter, 685 * because we know that they cannot be the head since the log 686 * started with x. 687 */ 688 head_blk = log_bbnum; 689 stop_on_cycle = last_half_cycle - 1; 690 } else { 691 /* 692 * In this case we want to find the first block with cycle 693 * number matching last_half_cycle. We expect the log to be 694 * some variation on 695 * x + 1 ... | x ... | x 696 * The first block with cycle number x (last_half_cycle) will 697 * be where the new head belongs. First we do a binary search 698 * for the first occurrence of last_half_cycle. The binary 699 * search may not be totally accurate, so then we scan back 700 * from there looking for occurrences of last_half_cycle before 701 * us. If that backwards scan wraps around the beginning of 702 * the log, then we look for occurrences of last_half_cycle - 1 703 * at the end of the log. The cases we're looking for look 704 * like 705 * v binary search stopped here 706 * x + 1 ... | x | x + 1 | x ... | x 707 * ^ but we want to locate this spot 708 * or 709 * <---------> less than scan distance 710 * x + 1 ... | x ... | x - 1 | x 711 * ^ we want to locate this spot 712 */ 713 stop_on_cycle = last_half_cycle; 714 if ((error = xlog_find_cycle_start(log, bp, first_blk, 715 &head_blk, last_half_cycle))) 716 goto bp_err; 717 } 718 719 /* 720 * Now validate the answer. Scan back some number of maximum possible 721 * blocks and make sure each one has the expected cycle number. The 722 * maximum is determined by the total possible amount of buffering 723 * in the in-core log. The following number can be made tighter if 724 * we actually look at the block size of the filesystem. 725 */ 726 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 727 if (head_blk >= num_scan_bblks) { 728 /* 729 * We are guaranteed that the entire check can be performed 730 * in one buffer. 731 */ 732 start_blk = head_blk - num_scan_bblks; 733 if ((error = xlog_find_verify_cycle(log, 734 start_blk, num_scan_bblks, 735 stop_on_cycle, &new_blk))) 736 goto bp_err; 737 if (new_blk != -1) 738 head_blk = new_blk; 739 } else { /* need to read 2 parts of log */ 740 /* 741 * We are going to scan backwards in the log in two parts. 742 * First we scan the physical end of the log. In this part 743 * of the log, we are looking for blocks with cycle number 744 * last_half_cycle - 1. 745 * If we find one, then we know that the log starts there, as 746 * we've found a hole that didn't get written in going around 747 * the end of the physical log. The simple case for this is 748 * x + 1 ... | x ... | x - 1 | x 749 * <---------> less than scan distance 750 * If all of the blocks at the end of the log have cycle number 751 * last_half_cycle, then we check the blocks at the start of 752 * the log looking for occurrences of last_half_cycle. If we 753 * find one, then our current estimate for the location of the 754 * first occurrence of last_half_cycle is wrong and we move 755 * back to the hole we've found. This case looks like 756 * x + 1 ... | x | x + 1 | x ... 757 * ^ binary search stopped here 758 * Another case we need to handle that only occurs in 256k 759 * logs is 760 * x + 1 ... | x ... | x+1 | x ... 761 * ^ binary search stops here 762 * In a 256k log, the scan at the end of the log will see the 763 * x + 1 blocks. We need to skip past those since that is 764 * certainly not the head of the log. By searching for 765 * last_half_cycle-1 we accomplish that. 766 */ 767 ASSERT(head_blk <= INT_MAX && 768 (xfs_daddr_t) num_scan_bblks >= head_blk); 769 start_blk = log_bbnum - (num_scan_bblks - head_blk); 770 if ((error = xlog_find_verify_cycle(log, start_blk, 771 num_scan_bblks - (int)head_blk, 772 (stop_on_cycle - 1), &new_blk))) 773 goto bp_err; 774 if (new_blk != -1) { 775 head_blk = new_blk; 776 goto validate_head; 777 } 778 779 /* 780 * Scan beginning of log now. The last part of the physical 781 * log is good. This scan needs to verify that it doesn't find 782 * the last_half_cycle. 783 */ 784 start_blk = 0; 785 ASSERT(head_blk <= INT_MAX); 786 if ((error = xlog_find_verify_cycle(log, 787 start_blk, (int)head_blk, 788 stop_on_cycle, &new_blk))) 789 goto bp_err; 790 if (new_blk != -1) 791 head_blk = new_blk; 792 } 793 794validate_head: 795 /* 796 * Now we need to make sure head_blk is not pointing to a block in 797 * the middle of a log record. 798 */ 799 num_scan_bblks = XLOG_REC_SHIFT(log); 800 if (head_blk >= num_scan_bblks) { 801 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 802 803 /* start ptr at last block ptr before head_blk */ 804 if ((error = xlog_find_verify_log_record(log, start_blk, 805 &head_blk, 0)) == -1) { 806 error = XFS_ERROR(EIO); 807 goto bp_err; 808 } else if (error) 809 goto bp_err; 810 } else { 811 start_blk = 0; 812 ASSERT(head_blk <= INT_MAX); 813 if ((error = xlog_find_verify_log_record(log, start_blk, 814 &head_blk, 0)) == -1) { 815 /* We hit the beginning of the log during our search */ 816 start_blk = log_bbnum - (num_scan_bblks - head_blk); 817 new_blk = log_bbnum; 818 ASSERT(start_blk <= INT_MAX && 819 (xfs_daddr_t) log_bbnum-start_blk >= 0); 820 ASSERT(head_blk <= INT_MAX); 821 if ((error = xlog_find_verify_log_record(log, 822 start_blk, &new_blk, 823 (int)head_blk)) == -1) { 824 error = XFS_ERROR(EIO); 825 goto bp_err; 826 } else if (error) 827 goto bp_err; 828 if (new_blk != log_bbnum) 829 head_blk = new_blk; 830 } else if (error) 831 goto bp_err; 832 } 833 834 xlog_put_bp(bp); 835 if (head_blk == log_bbnum) 836 *return_head_blk = 0; 837 else 838 *return_head_blk = head_blk; 839 /* 840 * When returning here, we have a good block number. Bad block 841 * means that during a previous crash, we didn't have a clean break 842 * from cycle number N to cycle number N-1. In this case, we need 843 * to find the first block with cycle number N-1. 844 */ 845 return 0; 846 847 bp_err: 848 xlog_put_bp(bp); 849 850 if (error) 851 xfs_warn(log->l_mp, "failed to find log head"); 852 return error; 853} 854 855/* 856 * Find the sync block number or the tail of the log. 857 * 858 * This will be the block number of the last record to have its 859 * associated buffers synced to disk. Every log record header has 860 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy 861 * to get a sync block number. The only concern is to figure out which 862 * log record header to believe. 863 * 864 * The following algorithm uses the log record header with the largest 865 * lsn. The entire log record does not need to be valid. We only care 866 * that the header is valid. 867 * 868 * We could speed up search by using current head_blk buffer, but it is not 869 * available. 870 */ 871STATIC int 872xlog_find_tail( 873 xlog_t *log, 874 xfs_daddr_t *head_blk, 875 xfs_daddr_t *tail_blk) 876{ 877 xlog_rec_header_t *rhead; 878 xlog_op_header_t *op_head; 879 xfs_caddr_t offset = NULL; 880 xfs_buf_t *bp; 881 int error, i, found; 882 xfs_daddr_t umount_data_blk; 883 xfs_daddr_t after_umount_blk; 884 xfs_lsn_t tail_lsn; 885 int hblks; 886 887 found = 0; 888 889 /* 890 * Find previous log record 891 */ 892 if ((error = xlog_find_head(log, head_blk))) 893 return error; 894 895 bp = xlog_get_bp(log, 1); 896 if (!bp) 897 return ENOMEM; 898 if (*head_blk == 0) { /* special case */ 899 error = xlog_bread(log, 0, 1, bp, &offset); 900 if (error) 901 goto done; 902 903 if (xlog_get_cycle(offset) == 0) { 904 *tail_blk = 0; 905 /* leave all other log inited values alone */ 906 goto done; 907 } 908 } 909 910 /* 911 * Search backwards looking for log record header block 912 */ 913 ASSERT(*head_blk < INT_MAX); 914 for (i = (int)(*head_blk) - 1; i >= 0; i--) { 915 error = xlog_bread(log, i, 1, bp, &offset); 916 if (error) 917 goto done; 918 919 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { 920 found = 1; 921 break; 922 } 923 } 924 /* 925 * If we haven't found the log record header block, start looking 926 * again from the end of the physical log. XXXmiken: There should be 927 * a check here to make sure we didn't search more than N blocks in 928 * the previous code. 929 */ 930 if (!found) { 931 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) { 932 error = xlog_bread(log, i, 1, bp, &offset); 933 if (error) 934 goto done; 935 936 if (XLOG_HEADER_MAGIC_NUM == 937 be32_to_cpu(*(__be32 *)offset)) { 938 found = 2; 939 break; 940 } 941 } 942 } 943 if (!found) { 944 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 945 ASSERT(0); 946 return XFS_ERROR(EIO); 947 } 948 949 /* find blk_no of tail of log */ 950 rhead = (xlog_rec_header_t *)offset; 951 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 952 953 /* 954 * Reset log values according to the state of the log when we 955 * crashed. In the case where head_blk == 0, we bump curr_cycle 956 * one because the next write starts a new cycle rather than 957 * continuing the cycle of the last good log record. At this 958 * point we have guaranteed that all partial log records have been 959 * accounted for. Therefore, we know that the last good log record 960 * written was complete and ended exactly on the end boundary 961 * of the physical log. 962 */ 963 log->l_prev_block = i; 964 log->l_curr_block = (int)*head_blk; 965 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 966 if (found == 2) 967 log->l_curr_cycle++; 968 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); 969 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); 970 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, 971 BBTOB(log->l_curr_block)); 972 xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, 973 BBTOB(log->l_curr_block)); 974 975 /* 976 * Look for unmount record. If we find it, then we know there 977 * was a clean unmount. Since 'i' could be the last block in 978 * the physical log, we convert to a log block before comparing 979 * to the head_blk. 980 * 981 * Save the current tail lsn to use to pass to 982 * xlog_clear_stale_blocks() below. We won't want to clear the 983 * unmount record if there is one, so we pass the lsn of the 984 * unmount record rather than the block after it. 985 */ 986 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 987 int h_size = be32_to_cpu(rhead->h_size); 988 int h_version = be32_to_cpu(rhead->h_version); 989 990 if ((h_version & XLOG_VERSION_2) && 991 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 992 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 993 if (h_size % XLOG_HEADER_CYCLE_SIZE) 994 hblks++; 995 } else { 996 hblks = 1; 997 } 998 } else { 999 hblks = 1; 1000 } 1001 after_umount_blk = (i + hblks + (int) 1002 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 1003 tail_lsn = atomic64_read(&log->l_tail_lsn); 1004 if (*head_blk == after_umount_blk && 1005 be32_to_cpu(rhead->h_num_logops) == 1) { 1006 umount_data_blk = (i + hblks) % log->l_logBBsize; 1007 error = xlog_bread(log, umount_data_blk, 1, bp, &offset); 1008 if (error) 1009 goto done; 1010 1011 op_head = (xlog_op_header_t *)offset; 1012 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { 1013 /* 1014 * Set tail and last sync so that newly written 1015 * log records will point recovery to after the 1016 * current unmount record. 1017 */ 1018 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1019 log->l_curr_cycle, after_umount_blk); 1020 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1021 log->l_curr_cycle, after_umount_blk); 1022 *tail_blk = after_umount_blk; 1023 1024 /* 1025 * Note that the unmount was clean. If the unmount 1026 * was not clean, we need to know this to rebuild the 1027 * superblock counters from the perag headers if we 1028 * have a filesystem using non-persistent counters. 1029 */ 1030 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 1031 } 1032 } 1033 1034 /* 1035 * Make sure that there are no blocks in front of the head 1036 * with the same cycle number as the head. This can happen 1037 * because we allow multiple outstanding log writes concurrently, 1038 * and the later writes might make it out before earlier ones. 1039 * 1040 * We use the lsn from before modifying it so that we'll never 1041 * overwrite the unmount record after a clean unmount. 1042 * 1043 * Do this only if we are going to recover the filesystem 1044 * 1045 * NOTE: This used to say "if (!readonly)" 1046 * However on Linux, we can & do recover a read-only filesystem. 1047 * We only skip recovery if NORECOVERY is specified on mount, 1048 * in which case we would not be here. 1049 * 1050 * But... if the -device- itself is readonly, just skip this. 1051 * We can't recover this device anyway, so it won't matter. 1052 */ 1053 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) 1054 error = xlog_clear_stale_blocks(log, tail_lsn); 1055 1056done: 1057 xlog_put_bp(bp); 1058 1059 if (error) 1060 xfs_warn(log->l_mp, "failed to locate log tail"); 1061 return error; 1062} 1063 1064/* 1065 * Is the log zeroed at all? 1066 * 1067 * The last binary search should be changed to perform an X block read 1068 * once X becomes small enough. You can then search linearly through 1069 * the X blocks. This will cut down on the number of reads we need to do. 1070 * 1071 * If the log is partially zeroed, this routine will pass back the blkno 1072 * of the first block with cycle number 0. It won't have a complete LR 1073 * preceding it. 1074 * 1075 * Return: 1076 * 0 => the log is completely written to 1077 * -1 => use *blk_no as the first block of the log 1078 * >0 => error has occurred 1079 */ 1080STATIC int 1081xlog_find_zeroed( 1082 xlog_t *log, 1083 xfs_daddr_t *blk_no) 1084{ 1085 xfs_buf_t *bp; 1086 xfs_caddr_t offset; 1087 uint first_cycle, last_cycle; 1088 xfs_daddr_t new_blk, last_blk, start_blk; 1089 xfs_daddr_t num_scan_bblks; 1090 int error, log_bbnum = log->l_logBBsize; 1091 1092 *blk_no = 0; 1093 1094 /* check totally zeroed log */ 1095 bp = xlog_get_bp(log, 1); 1096 if (!bp) 1097 return ENOMEM; 1098 error = xlog_bread(log, 0, 1, bp, &offset); 1099 if (error) 1100 goto bp_err; 1101 1102 first_cycle = xlog_get_cycle(offset); 1103 if (first_cycle == 0) { /* completely zeroed log */ 1104 *blk_no = 0; 1105 xlog_put_bp(bp); 1106 return -1; 1107 } 1108 1109 /* check partially zeroed log */ 1110 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset); 1111 if (error) 1112 goto bp_err; 1113 1114 last_cycle = xlog_get_cycle(offset); 1115 if (last_cycle != 0) { /* log completely written to */ 1116 xlog_put_bp(bp); 1117 return 0; 1118 } else if (first_cycle != 1) { 1119 /* 1120 * If the cycle of the last block is zero, the cycle of 1121 * the first block must be 1. If it's not, maybe we're 1122 * not looking at a log... Bail out. 1123 */ 1124 xfs_warn(log->l_mp, 1125 "Log inconsistent or not a log (last==0, first!=1)"); 1126 return XFS_ERROR(EINVAL); 1127 } 1128 1129 /* we have a partially zeroed log */ 1130 last_blk = log_bbnum-1; 1131 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0))) 1132 goto bp_err; 1133 1134 /* 1135 * Validate the answer. Because there is no way to guarantee that 1136 * the entire log is made up of log records which are the same size, 1137 * we scan over the defined maximum blocks. At this point, the maximum 1138 * is not chosen to mean anything special. XXXmiken 1139 */ 1140 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); 1141 ASSERT(num_scan_bblks <= INT_MAX); 1142 1143 if (last_blk < num_scan_bblks) 1144 num_scan_bblks = last_blk; 1145 start_blk = last_blk - num_scan_bblks; 1146 1147 /* 1148 * We search for any instances of cycle number 0 that occur before 1149 * our current estimate of the head. What we're trying to detect is 1150 * 1 ... | 0 | 1 | 0... 1151 * ^ binary search ends here 1152 */ 1153 if ((error = xlog_find_verify_cycle(log, start_blk, 1154 (int)num_scan_bblks, 0, &new_blk))) 1155 goto bp_err; 1156 if (new_blk != -1) 1157 last_blk = new_blk; 1158 1159 /* 1160 * Potentially backup over partial log record write. We don't need 1161 * to search the end of the log because we know it is zero. 1162 */ 1163 if ((error = xlog_find_verify_log_record(log, start_blk, 1164 &last_blk, 0)) == -1) { 1165 error = XFS_ERROR(EIO); 1166 goto bp_err; 1167 } else if (error) 1168 goto bp_err; 1169 1170 *blk_no = last_blk; 1171bp_err: 1172 xlog_put_bp(bp); 1173 if (error) 1174 return error; 1175 return -1; 1176} 1177 1178/* 1179 * These are simple subroutines used by xlog_clear_stale_blocks() below 1180 * to initialize a buffer full of empty log record headers and write 1181 * them into the log. 1182 */ 1183STATIC void 1184xlog_add_record( 1185 xlog_t *log, 1186 xfs_caddr_t buf, 1187 int cycle, 1188 int block, 1189 int tail_cycle, 1190 int tail_block) 1191{ 1192 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1193 1194 memset(buf, 0, BBSIZE); 1195 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1196 recp->h_cycle = cpu_to_be32(cycle); 1197 recp->h_version = cpu_to_be32( 1198 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); 1199 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1200 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1201 recp->h_fmt = cpu_to_be32(XLOG_FMT); 1202 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1203} 1204 1205STATIC int 1206xlog_write_log_records( 1207 xlog_t *log, 1208 int cycle, 1209 int start_block, 1210 int blocks, 1211 int tail_cycle, 1212 int tail_block) 1213{ 1214 xfs_caddr_t offset; 1215 xfs_buf_t *bp; 1216 int balign, ealign; 1217 int sectbb = log->l_sectBBsize; 1218 int end_block = start_block + blocks; 1219 int bufblks; 1220 int error = 0; 1221 int i, j = 0; 1222 1223 /* 1224 * Greedily allocate a buffer big enough to handle the full 1225 * range of basic blocks to be written. If that fails, try 1226 * a smaller size. We need to be able to write at least a 1227 * log sector, or we're out of luck. 1228 */ 1229 bufblks = 1 << ffs(blocks); 1230 while (!(bp = xlog_get_bp(log, bufblks))) { 1231 bufblks >>= 1; 1232 if (bufblks < sectbb) 1233 return ENOMEM; 1234 } 1235 1236 /* We may need to do a read at the start to fill in part of 1237 * the buffer in the starting sector not covered by the first 1238 * write below. 1239 */ 1240 balign = round_down(start_block, sectbb); 1241 if (balign != start_block) { 1242 error = xlog_bread_noalign(log, start_block, 1, bp); 1243 if (error) 1244 goto out_put_bp; 1245 1246 j = start_block - balign; 1247 } 1248 1249 for (i = start_block; i < end_block; i += bufblks) { 1250 int bcount, endcount; 1251 1252 bcount = min(bufblks, end_block - start_block); 1253 endcount = bcount - j; 1254 1255 /* We may need to do a read at the end to fill in part of 1256 * the buffer in the final sector not covered by the write. 1257 * If this is the same sector as the above read, skip it. 1258 */ 1259 ealign = round_down(end_block, sectbb); 1260 if (j == 0 && (start_block + endcount > ealign)) { 1261 offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block); 1262 error = xlog_bread_offset(log, ealign, sectbb, 1263 bp, offset); 1264 if (error) 1265 break; 1266 1267 } 1268 1269 offset = xlog_align(log, start_block, endcount, bp); 1270 for (; j < endcount; j++) { 1271 xlog_add_record(log, offset, cycle, i+j, 1272 tail_cycle, tail_block); 1273 offset += BBSIZE; 1274 } 1275 error = xlog_bwrite(log, start_block, endcount, bp); 1276 if (error) 1277 break; 1278 start_block += endcount; 1279 j = 0; 1280 } 1281 1282 out_put_bp: 1283 xlog_put_bp(bp); 1284 return error; 1285} 1286 1287/* 1288 * This routine is called to blow away any incomplete log writes out 1289 * in front of the log head. We do this so that we won't become confused 1290 * if we come up, write only a little bit more, and then crash again. 1291 * If we leave the partial log records out there, this situation could 1292 * cause us to think those partial writes are valid blocks since they 1293 * have the current cycle number. We get rid of them by overwriting them 1294 * with empty log records with the old cycle number rather than the 1295 * current one. 1296 * 1297 * The tail lsn is passed in rather than taken from 1298 * the log so that we will not write over the unmount record after a 1299 * clean unmount in a 512 block log. Doing so would leave the log without 1300 * any valid log records in it until a new one was written. If we crashed 1301 * during that time we would not be able to recover. 1302 */ 1303STATIC int 1304xlog_clear_stale_blocks( 1305 xlog_t *log, 1306 xfs_lsn_t tail_lsn) 1307{ 1308 int tail_cycle, head_cycle; 1309 int tail_block, head_block; 1310 int tail_distance, max_distance; 1311 int distance; 1312 int error; 1313 1314 tail_cycle = CYCLE_LSN(tail_lsn); 1315 tail_block = BLOCK_LSN(tail_lsn); 1316 head_cycle = log->l_curr_cycle; 1317 head_block = log->l_curr_block; 1318 1319 /* 1320 * Figure out the distance between the new head of the log 1321 * and the tail. We want to write over any blocks beyond the 1322 * head that we may have written just before the crash, but 1323 * we don't want to overwrite the tail of the log. 1324 */ 1325 if (head_cycle == tail_cycle) { 1326 /* 1327 * The tail is behind the head in the physical log, 1328 * so the distance from the head to the tail is the 1329 * distance from the head to the end of the log plus 1330 * the distance from the beginning of the log to the 1331 * tail. 1332 */ 1333 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { 1334 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", 1335 XFS_ERRLEVEL_LOW, log->l_mp); 1336 return XFS_ERROR(EFSCORRUPTED); 1337 } 1338 tail_distance = tail_block + (log->l_logBBsize - head_block); 1339 } else { 1340 /* 1341 * The head is behind the tail in the physical log, 1342 * so the distance from the head to the tail is just 1343 * the tail block minus the head block. 1344 */ 1345 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ 1346 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", 1347 XFS_ERRLEVEL_LOW, log->l_mp); 1348 return XFS_ERROR(EFSCORRUPTED); 1349 } 1350 tail_distance = tail_block - head_block; 1351 } 1352 1353 /* 1354 * If the head is right up against the tail, we can't clear 1355 * anything. 1356 */ 1357 if (tail_distance <= 0) { 1358 ASSERT(tail_distance == 0); 1359 return 0; 1360 } 1361 1362 max_distance = XLOG_TOTAL_REC_SHIFT(log); 1363 /* 1364 * Take the smaller of the maximum amount of outstanding I/O 1365 * we could have and the distance to the tail to clear out. 1366 * We take the smaller so that we don't overwrite the tail and 1367 * we don't waste all day writing from the head to the tail 1368 * for no reason. 1369 */ 1370 max_distance = MIN(max_distance, tail_distance); 1371 1372 if ((head_block + max_distance) <= log->l_logBBsize) { 1373 /* 1374 * We can stomp all the blocks we need to without 1375 * wrapping around the end of the log. Just do it 1376 * in a single write. Use the cycle number of the 1377 * current cycle minus one so that the log will look like: 1378 * n ... | n - 1 ... 1379 */ 1380 error = xlog_write_log_records(log, (head_cycle - 1), 1381 head_block, max_distance, tail_cycle, 1382 tail_block); 1383 if (error) 1384 return error; 1385 } else { 1386 /* 1387 * We need to wrap around the end of the physical log in 1388 * order to clear all the blocks. Do it in two separate 1389 * I/Os. The first write should be from the head to the 1390 * end of the physical log, and it should use the current 1391 * cycle number minus one just like above. 1392 */ 1393 distance = log->l_logBBsize - head_block; 1394 error = xlog_write_log_records(log, (head_cycle - 1), 1395 head_block, distance, tail_cycle, 1396 tail_block); 1397 1398 if (error) 1399 return error; 1400 1401 /* 1402 * Now write the blocks at the start of the physical log. 1403 * This writes the remainder of the blocks we want to clear. 1404 * It uses the current cycle number since we're now on the 1405 * same cycle as the head so that we get: 1406 * n ... n ... | n - 1 ... 1407 * ^^^^^ blocks we're writing 1408 */ 1409 distance = max_distance - (log->l_logBBsize - head_block); 1410 error = xlog_write_log_records(log, head_cycle, 0, distance, 1411 tail_cycle, tail_block); 1412 if (error) 1413 return error; 1414 } 1415 1416 return 0; 1417} 1418 1419/****************************************************************************** 1420 * 1421 * Log recover routines 1422 * 1423 ****************************************************************************** 1424 */ 1425 1426STATIC xlog_recover_t * 1427xlog_recover_find_tid( 1428 struct hlist_head *head, 1429 xlog_tid_t tid) 1430{ 1431 xlog_recover_t *trans; 1432 struct hlist_node *n; 1433 1434 hlist_for_each_entry(trans, n, head, r_list) { 1435 if (trans->r_log_tid == tid) 1436 return trans; 1437 } 1438 return NULL; 1439} 1440 1441STATIC void 1442xlog_recover_new_tid( 1443 struct hlist_head *head, 1444 xlog_tid_t tid, 1445 xfs_lsn_t lsn) 1446{ 1447 xlog_recover_t *trans; 1448 1449 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP); 1450 trans->r_log_tid = tid; 1451 trans->r_lsn = lsn; 1452 INIT_LIST_HEAD(&trans->r_itemq); 1453 1454 INIT_HLIST_NODE(&trans->r_list); 1455 hlist_add_head(&trans->r_list, head); 1456} 1457 1458STATIC void 1459xlog_recover_add_item( 1460 struct list_head *head) 1461{ 1462 xlog_recover_item_t *item; 1463 1464 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP); 1465 INIT_LIST_HEAD(&item->ri_list); 1466 list_add_tail(&item->ri_list, head); 1467} 1468 1469STATIC int 1470xlog_recover_add_to_cont_trans( 1471 struct log *log, 1472 xlog_recover_t *trans, 1473 xfs_caddr_t dp, 1474 int len) 1475{ 1476 xlog_recover_item_t *item; 1477 xfs_caddr_t ptr, old_ptr; 1478 int old_len; 1479 1480 if (list_empty(&trans->r_itemq)) { 1481 /* finish copying rest of trans header */ 1482 xlog_recover_add_item(&trans->r_itemq); 1483 ptr = (xfs_caddr_t) &trans->r_theader + 1484 sizeof(xfs_trans_header_t) - len; 1485 memcpy(ptr, dp, len); /* d, s, l */ 1486 return 0; 1487 } 1488 /* take the tail entry */ 1489 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 1490 1491 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 1492 old_len = item->ri_buf[item->ri_cnt-1].i_len; 1493 1494 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u); 1495 memcpy(&ptr[old_len], dp, len); /* d, s, l */ 1496 item->ri_buf[item->ri_cnt-1].i_len += len; 1497 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 1498 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); 1499 return 0; 1500} 1501 1502/* 1503 * The next region to add is the start of a new region. It could be 1504 * a whole region or it could be the first part of a new region. Because 1505 * of this, the assumption here is that the type and size fields of all 1506 * format structures fit into the first 32 bits of the structure. 1507 * 1508 * This works because all regions must be 32 bit aligned. Therefore, we 1509 * either have both fields or we have neither field. In the case we have 1510 * neither field, the data part of the region is zero length. We only have 1511 * a log_op_header and can throw away the header since a new one will appear 1512 * later. If we have at least 4 bytes, then we can determine how many regions 1513 * will appear in the current log item. 1514 */ 1515STATIC int 1516xlog_recover_add_to_trans( 1517 struct log *log, 1518 xlog_recover_t *trans, 1519 xfs_caddr_t dp, 1520 int len) 1521{ 1522 xfs_inode_log_format_t *in_f; /* any will do */ 1523 xlog_recover_item_t *item; 1524 xfs_caddr_t ptr; 1525 1526 if (!len) 1527 return 0; 1528 if (list_empty(&trans->r_itemq)) { 1529 /* we need to catch log corruptions here */ 1530 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 1531 xfs_warn(log->l_mp, "%s: bad header magic number", 1532 __func__); 1533 ASSERT(0); 1534 return XFS_ERROR(EIO); 1535 } 1536 if (len == sizeof(xfs_trans_header_t)) 1537 xlog_recover_add_item(&trans->r_itemq); 1538 memcpy(&trans->r_theader, dp, len); /* d, s, l */ 1539 return 0; 1540 } 1541 1542 ptr = kmem_alloc(len, KM_SLEEP); 1543 memcpy(ptr, dp, len); 1544 in_f = (xfs_inode_log_format_t *)ptr; 1545 1546 /* take the tail entry */ 1547 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list); 1548 if (item->ri_total != 0 && 1549 item->ri_total == item->ri_cnt) { 1550 /* tail item is in use, get a new one */ 1551 xlog_recover_add_item(&trans->r_itemq); 1552 item = list_entry(trans->r_itemq.prev, 1553 xlog_recover_item_t, ri_list); 1554 } 1555 1556 if (item->ri_total == 0) { /* first region to be added */ 1557 if (in_f->ilf_size == 0 || 1558 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1559 xfs_warn(log->l_mp, 1560 "bad number of regions (%d) in inode log format", 1561 in_f->ilf_size); 1562 ASSERT(0); 1563 return XFS_ERROR(EIO); 1564 } 1565 1566 item->ri_total = in_f->ilf_size; 1567 item->ri_buf = 1568 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 1569 KM_SLEEP); 1570 } 1571 ASSERT(item->ri_total > item->ri_cnt); 1572 /* Description region is ri_buf[0] */ 1573 item->ri_buf[item->ri_cnt].i_addr = ptr; 1574 item->ri_buf[item->ri_cnt].i_len = len; 1575 item->ri_cnt++; 1576 trace_xfs_log_recover_item_add(log, trans, item, 0); 1577 return 0; 1578} 1579 1580/* 1581 * Sort the log items in the transaction. Cancelled buffers need 1582 * to be put first so they are processed before any items that might 1583 * modify the buffers. If they are cancelled, then the modifications 1584 * don't need to be replayed. 1585 */ 1586STATIC int 1587xlog_recover_reorder_trans( 1588 struct log *log, 1589 xlog_recover_t *trans, 1590 int pass) 1591{ 1592 xlog_recover_item_t *item, *n; 1593 LIST_HEAD(sort_list); 1594 1595 list_splice_init(&trans->r_itemq, &sort_list); 1596 list_for_each_entry_safe(item, n, &sort_list, ri_list) { 1597 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1598 1599 switch (ITEM_TYPE(item)) { 1600 case XFS_LI_BUF: 1601 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1602 trace_xfs_log_recover_item_reorder_head(log, 1603 trans, item, pass); 1604 list_move(&item->ri_list, &trans->r_itemq); 1605 break; 1606 } 1607 case XFS_LI_INODE: 1608 case XFS_LI_DQUOT: 1609 case XFS_LI_QUOTAOFF: 1610 case XFS_LI_EFD: 1611 case XFS_LI_EFI: 1612 trace_xfs_log_recover_item_reorder_tail(log, 1613 trans, item, pass); 1614 list_move_tail(&item->ri_list, &trans->r_itemq); 1615 break; 1616 default: 1617 xfs_warn(log->l_mp, 1618 "%s: unrecognized type of log operation", 1619 __func__); 1620 ASSERT(0); 1621 return XFS_ERROR(EIO); 1622 } 1623 } 1624 ASSERT(list_empty(&sort_list)); 1625 return 0; 1626} 1627 1628/* 1629 * Build up the table of buf cancel records so that we don't replay 1630 * cancelled data in the second pass. For buffer records that are 1631 * not cancel records, there is nothing to do here so we just return. 1632 * 1633 * If we get a cancel record which is already in the table, this indicates 1634 * that the buffer was cancelled multiple times. In order to ensure 1635 * that during pass 2 we keep the record in the table until we reach its 1636 * last occurrence in the log, we keep a reference count in the cancel 1637 * record in the table to tell us how many times we expect to see this 1638 * record during the second pass. 1639 */ 1640STATIC int 1641xlog_recover_buffer_pass1( 1642 struct log *log, 1643 xlog_recover_item_t *item) 1644{ 1645 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1646 struct list_head *bucket; 1647 struct xfs_buf_cancel *bcp; 1648 1649 /* 1650 * If this isn't a cancel buffer item, then just return. 1651 */ 1652 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { 1653 trace_xfs_log_recover_buf_not_cancel(log, buf_f); 1654 return 0; 1655 } 1656 1657 /* 1658 * Insert an xfs_buf_cancel record into the hash table of them. 1659 * If there is already an identical record, bump its reference count. 1660 */ 1661 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); 1662 list_for_each_entry(bcp, bucket, bc_list) { 1663 if (bcp->bc_blkno == buf_f->blf_blkno && 1664 bcp->bc_len == buf_f->blf_len) { 1665 bcp->bc_refcount++; 1666 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); 1667 return 0; 1668 } 1669 } 1670 1671 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); 1672 bcp->bc_blkno = buf_f->blf_blkno; 1673 bcp->bc_len = buf_f->blf_len; 1674 bcp->bc_refcount = 1; 1675 list_add_tail(&bcp->bc_list, bucket); 1676 1677 trace_xfs_log_recover_buf_cancel_add(log, buf_f); 1678 return 0; 1679} 1680 1681/* 1682 * Check to see whether the buffer being recovered has a corresponding 1683 * entry in the buffer cancel record table. If it does then return 1 1684 * so that it will be cancelled, otherwise return 0. If the buffer is 1685 * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement 1686 * the refcount on the entry in the table and remove it from the table 1687 * if this is the last reference. 1688 * 1689 * We remove the cancel record from the table when we encounter its 1690 * last occurrence in the log so that if the same buffer is re-used 1691 * again after its last cancellation we actually replay the changes 1692 * made at that point. 1693 */ 1694STATIC int 1695xlog_check_buffer_cancelled( 1696 struct log *log, 1697 xfs_daddr_t blkno, 1698 uint len, 1699 ushort flags) 1700{ 1701 struct list_head *bucket; 1702 struct xfs_buf_cancel *bcp; 1703 1704 if (log->l_buf_cancel_table == NULL) { 1705 /* 1706 * There is nothing in the table built in pass one, 1707 * so this buffer must not be cancelled. 1708 */ 1709 ASSERT(!(flags & XFS_BLF_CANCEL)); 1710 return 0; 1711 } 1712 1713 /* 1714 * Search for an entry in the cancel table that matches our buffer. 1715 */ 1716 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); 1717 list_for_each_entry(bcp, bucket, bc_list) { 1718 if (bcp->bc_blkno == blkno && bcp->bc_len == len) 1719 goto found; 1720 } 1721 1722 /* 1723 * We didn't find a corresponding entry in the table, so return 0 so 1724 * that the buffer is NOT cancelled. 1725 */ 1726 ASSERT(!(flags & XFS_BLF_CANCEL)); 1727 return 0; 1728 1729found: 1730 /* 1731 * We've go a match, so return 1 so that the recovery of this buffer 1732 * is cancelled. If this buffer is actually a buffer cancel log 1733 * item, then decrement the refcount on the one in the table and 1734 * remove it if this is the last reference. 1735 */ 1736 if (flags & XFS_BLF_CANCEL) { 1737 if (--bcp->bc_refcount == 0) { 1738 list_del(&bcp->bc_list); 1739 kmem_free(bcp); 1740 } 1741 } 1742 return 1; 1743} 1744 1745/* 1746 * Perform recovery for a buffer full of inodes. In these buffers, the only 1747 * data which should be recovered is that which corresponds to the 1748 * di_next_unlinked pointers in the on disk inode structures. The rest of the 1749 * data for the inodes is always logged through the inodes themselves rather 1750 * than the inode buffer and is recovered in xlog_recover_inode_pass2(). 1751 * 1752 * The only time when buffers full of inodes are fully recovered is when the 1753 * buffer is full of newly allocated inodes. In this case the buffer will 1754 * not be marked as an inode buffer and so will be sent to 1755 * xlog_recover_do_reg_buffer() below during recovery. 1756 */ 1757STATIC int 1758xlog_recover_do_inode_buffer( 1759 struct xfs_mount *mp, 1760 xlog_recover_item_t *item, 1761 struct xfs_buf *bp, 1762 xfs_buf_log_format_t *buf_f) 1763{ 1764 int i; 1765 int item_index = 0; 1766 int bit = 0; 1767 int nbits = 0; 1768 int reg_buf_offset = 0; 1769 int reg_buf_bytes = 0; 1770 int next_unlinked_offset; 1771 int inodes_per_buf; 1772 xfs_agino_t *logged_nextp; 1773 xfs_agino_t *buffer_nextp; 1774 1775 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 1776 1777 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; 1778 for (i = 0; i < inodes_per_buf; i++) { 1779 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + 1780 offsetof(xfs_dinode_t, di_next_unlinked); 1781 1782 while (next_unlinked_offset >= 1783 (reg_buf_offset + reg_buf_bytes)) { 1784 /* 1785 * The next di_next_unlinked field is beyond 1786 * the current logged region. Find the next 1787 * logged region that contains or is beyond 1788 * the current di_next_unlinked field. 1789 */ 1790 bit += nbits; 1791 bit = xfs_next_bit(buf_f->blf_data_map, 1792 buf_f->blf_map_size, bit); 1793 1794 /* 1795 * If there are no more logged regions in the 1796 * buffer, then we're done. 1797 */ 1798 if (bit == -1) 1799 return 0; 1800 1801 nbits = xfs_contig_bits(buf_f->blf_data_map, 1802 buf_f->blf_map_size, bit); 1803 ASSERT(nbits > 0); 1804 reg_buf_offset = bit << XFS_BLF_SHIFT; 1805 reg_buf_bytes = nbits << XFS_BLF_SHIFT; 1806 item_index++; 1807 } 1808 1809 /* 1810 * If the current logged region starts after the current 1811 * di_next_unlinked field, then move on to the next 1812 * di_next_unlinked field. 1813 */ 1814 if (next_unlinked_offset < reg_buf_offset) 1815 continue; 1816 1817 ASSERT(item->ri_buf[item_index].i_addr != NULL); 1818 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); 1819 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp)); 1820 1821 /* 1822 * The current logged region contains a copy of the 1823 * current di_next_unlinked field. Extract its value 1824 * and copy it to the buffer copy. 1825 */ 1826 logged_nextp = item->ri_buf[item_index].i_addr + 1827 next_unlinked_offset - reg_buf_offset; 1828 if (unlikely(*logged_nextp == 0)) { 1829 xfs_alert(mp, 1830 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " 1831 "Trying to replay bad (0) inode di_next_unlinked field.", 1832 item, bp); 1833 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1834 XFS_ERRLEVEL_LOW, mp); 1835 return XFS_ERROR(EFSCORRUPTED); 1836 } 1837 1838 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, 1839 next_unlinked_offset); 1840 *buffer_nextp = *logged_nextp; 1841 } 1842 1843 return 0; 1844} 1845 1846/* 1847 * Perform a 'normal' buffer recovery. Each logged region of the 1848 * buffer should be copied over the corresponding region in the 1849 * given buffer. The bitmap in the buf log format structure indicates 1850 * where to place the logged data. 1851 */ 1852STATIC void 1853xlog_recover_do_reg_buffer( 1854 struct xfs_mount *mp, 1855 xlog_recover_item_t *item, 1856 struct xfs_buf *bp, 1857 xfs_buf_log_format_t *buf_f) 1858{ 1859 int i; 1860 int bit; 1861 int nbits; 1862 int error; 1863 1864 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); 1865 1866 bit = 0; 1867 i = 1; /* 0 is the buf format structure */ 1868 while (1) { 1869 bit = xfs_next_bit(buf_f->blf_data_map, 1870 buf_f->blf_map_size, bit); 1871 if (bit == -1) 1872 break; 1873 nbits = xfs_contig_bits(buf_f->blf_data_map, 1874 buf_f->blf_map_size, bit); 1875 ASSERT(nbits > 0); 1876 ASSERT(item->ri_buf[i].i_addr != NULL); 1877 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); 1878 ASSERT(XFS_BUF_COUNT(bp) >= 1879 ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT)); 1880 1881 /* 1882 * Do a sanity check if this is a dquot buffer. Just checking 1883 * the first dquot in the buffer should do. XXXThis is 1884 * probably a good thing to do for other buf types also. 1885 */ 1886 error = 0; 1887 if (buf_f->blf_flags & 1888 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 1889 if (item->ri_buf[i].i_addr == NULL) { 1890 xfs_alert(mp, 1891 "XFS: NULL dquot in %s.", __func__); 1892 goto next; 1893 } 1894 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 1895 xfs_alert(mp, 1896 "XFS: dquot too small (%d) in %s.", 1897 item->ri_buf[i].i_len, __func__); 1898 goto next; 1899 } 1900 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr, 1901 -1, 0, XFS_QMOPT_DOWARN, 1902 "dquot_buf_recover"); 1903 if (error) 1904 goto next; 1905 } 1906 1907 memcpy(xfs_buf_offset(bp, 1908 (uint)bit << XFS_BLF_SHIFT), /* dest */ 1909 item->ri_buf[i].i_addr, /* source */ 1910 nbits<<XFS_BLF_SHIFT); /* length */ 1911 next: 1912 i++; 1913 bit += nbits; 1914 } 1915 1916 /* Shouldn't be any more regions */ 1917 ASSERT(i == item->ri_total); 1918} 1919 1920/* 1921 * Do some primitive error checking on ondisk dquot data structures. 1922 */ 1923int 1924xfs_qm_dqcheck( 1925 struct xfs_mount *mp, 1926 xfs_disk_dquot_t *ddq, 1927 xfs_dqid_t id, 1928 uint type, /* used only when IO_dorepair is true */ 1929 uint flags, 1930 char *str) 1931{ 1932 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq; 1933 int errs = 0; 1934 1935 /* 1936 * We can encounter an uninitialized dquot buffer for 2 reasons: 1937 * 1. If we crash while deleting the quotainode(s), and those blks got 1938 * used for user data. This is because we take the path of regular 1939 * file deletion; however, the size field of quotainodes is never 1940 * updated, so all the tricks that we play in itruncate_finish 1941 * don't quite matter. 1942 * 1943 * 2. We don't play the quota buffers when there's a quotaoff logitem. 1944 * But the allocation will be replayed so we'll end up with an 1945 * uninitialized quota block. 1946 * 1947 * This is all fine; things are still consistent, and we haven't lost 1948 * any quota information. Just don't complain about bad dquot blks. 1949 */ 1950 if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) { 1951 if (flags & XFS_QMOPT_DOWARN) 1952 xfs_alert(mp, 1953 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", 1954 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC); 1955 errs++; 1956 } 1957 if (ddq->d_version != XFS_DQUOT_VERSION) { 1958 if (flags & XFS_QMOPT_DOWARN) 1959 xfs_alert(mp, 1960 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", 1961 str, id, ddq->d_version, XFS_DQUOT_VERSION); 1962 errs++; 1963 } 1964 1965 if (ddq->d_flags != XFS_DQ_USER && 1966 ddq->d_flags != XFS_DQ_PROJ && 1967 ddq->d_flags != XFS_DQ_GROUP) { 1968 if (flags & XFS_QMOPT_DOWARN) 1969 xfs_alert(mp, 1970 "%s : XFS dquot ID 0x%x, unknown flags 0x%x", 1971 str, id, ddq->d_flags); 1972 errs++; 1973 } 1974 1975 if (id != -1 && id != be32_to_cpu(ddq->d_id)) { 1976 if (flags & XFS_QMOPT_DOWARN) 1977 xfs_alert(mp, 1978 "%s : ondisk-dquot 0x%p, ID mismatch: " 1979 "0x%x expected, found id 0x%x", 1980 str, ddq, id, be32_to_cpu(ddq->d_id)); 1981 errs++; 1982 } 1983 1984 if (!errs && ddq->d_id) { 1985 if (ddq->d_blk_softlimit && 1986 be64_to_cpu(ddq->d_bcount) >= 1987 be64_to_cpu(ddq->d_blk_softlimit)) { 1988 if (!ddq->d_btimer) { 1989 if (flags & XFS_QMOPT_DOWARN) 1990 xfs_alert(mp, 1991 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED", 1992 str, (int)be32_to_cpu(ddq->d_id), ddq); 1993 errs++; 1994 } 1995 } 1996 if (ddq->d_ino_softlimit && 1997 be64_to_cpu(ddq->d_icount) >= 1998 be64_to_cpu(ddq->d_ino_softlimit)) { 1999 if (!ddq->d_itimer) { 2000 if (flags & XFS_QMOPT_DOWARN) 2001 xfs_alert(mp, 2002 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED", 2003 str, (int)be32_to_cpu(ddq->d_id), ddq); 2004 errs++; 2005 } 2006 } 2007 if (ddq->d_rtb_softlimit && 2008 be64_to_cpu(ddq->d_rtbcount) >= 2009 be64_to_cpu(ddq->d_rtb_softlimit)) { 2010 if (!ddq->d_rtbtimer) { 2011 if (flags & XFS_QMOPT_DOWARN) 2012 xfs_alert(mp, 2013 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED", 2014 str, (int)be32_to_cpu(ddq->d_id), ddq); 2015 errs++; 2016 } 2017 } 2018 } 2019 2020 if (!errs || !(flags & XFS_QMOPT_DQREPAIR)) 2021 return errs; 2022 2023 if (flags & XFS_QMOPT_DOWARN) 2024 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id); 2025 2026 /* 2027 * Typically, a repair is only requested by quotacheck. 2028 */ 2029 ASSERT(id != -1); 2030 ASSERT(flags & XFS_QMOPT_DQREPAIR); 2031 memset(d, 0, sizeof(xfs_dqblk_t)); 2032 2033 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 2034 d->dd_diskdq.d_version = XFS_DQUOT_VERSION; 2035 d->dd_diskdq.d_flags = type; 2036 d->dd_diskdq.d_id = cpu_to_be32(id); 2037 2038 return errs; 2039} 2040 2041/* 2042 * Perform a dquot buffer recovery. 2043 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type 2044 * (ie. USR or GRP), then just toss this buffer away; don't recover it. 2045 * Else, treat it as a regular buffer and do recovery. 2046 */ 2047STATIC void 2048xlog_recover_do_dquot_buffer( 2049 xfs_mount_t *mp, 2050 xlog_t *log, 2051 xlog_recover_item_t *item, 2052 xfs_buf_t *bp, 2053 xfs_buf_log_format_t *buf_f) 2054{ 2055 uint type; 2056 2057 trace_xfs_log_recover_buf_dquot_buf(log, buf_f); 2058 2059 /* 2060 * Filesystems are required to send in quota flags at mount time. 2061 */ 2062 if (mp->m_qflags == 0) { 2063 return; 2064 } 2065 2066 type = 0; 2067 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) 2068 type |= XFS_DQ_USER; 2069 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) 2070 type |= XFS_DQ_PROJ; 2071 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) 2072 type |= XFS_DQ_GROUP; 2073 /* 2074 * This type of quotas was turned off, so ignore this buffer 2075 */ 2076 if (log->l_quotaoffs_flag & type) 2077 return; 2078 2079 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2080} 2081 2082/* 2083 * This routine replays a modification made to a buffer at runtime. 2084 * There are actually two types of buffer, regular and inode, which 2085 * are handled differently. Inode buffers are handled differently 2086 * in that we only recover a specific set of data from them, namely 2087 * the inode di_next_unlinked fields. This is because all other inode 2088 * data is actually logged via inode records and any data we replay 2089 * here which overlaps that may be stale. 2090 * 2091 * When meta-data buffers are freed at run time we log a buffer item 2092 * with the XFS_BLF_CANCEL bit set to indicate that previous copies 2093 * of the buffer in the log should not be replayed at recovery time. 2094 * This is so that if the blocks covered by the buffer are reused for 2095 * file data before we crash we don't end up replaying old, freed 2096 * meta-data into a user's file. 2097 * 2098 * To handle the cancellation of buffer log items, we make two passes 2099 * over the log during recovery. During the first we build a table of 2100 * those buffers which have been cancelled, and during the second we 2101 * only replay those buffers which do not have corresponding cancel 2102 * records in the table. See xlog_recover_do_buffer_pass[1,2] above 2103 * for more details on the implementation of the table of cancel records. 2104 */ 2105STATIC int 2106xlog_recover_buffer_pass2( 2107 xlog_t *log, 2108 xlog_recover_item_t *item) 2109{ 2110 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2111 xfs_mount_t *mp = log->l_mp; 2112 xfs_buf_t *bp; 2113 int error; 2114 uint buf_flags; 2115 2116 /* 2117 * In this pass we only want to recover all the buffers which have 2118 * not been cancelled and are not cancellation buffers themselves. 2119 */ 2120 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, 2121 buf_f->blf_len, buf_f->blf_flags)) { 2122 trace_xfs_log_recover_buf_cancel(log, buf_f); 2123 return 0; 2124 } 2125 2126 trace_xfs_log_recover_buf_recover(log, buf_f); 2127 2128 buf_flags = XBF_LOCK; 2129 if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF)) 2130 buf_flags |= XBF_MAPPED; 2131 2132 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, 2133 buf_flags); 2134 if (XFS_BUF_ISERROR(bp)) { 2135 xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, 2136 bp, buf_f->blf_blkno); 2137 error = XFS_BUF_GETERROR(bp); 2138 xfs_buf_relse(bp); 2139 return error; 2140 } 2141 2142 error = 0; 2143 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 2144 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2145 } else if (buf_f->blf_flags & 2146 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2147 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2148 } else { 2149 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2150 } 2151 if (error) 2152 return XFS_ERROR(error); 2153 2154 /* 2155 * Perform delayed write on the buffer. Asynchronous writes will be 2156 * slower when taking into account all the buffers to be flushed. 2157 * 2158 * Also make sure that only inode buffers with good sizes stay in 2159 * the buffer cache. The kernel moves inodes in buffers of 1 block 2160 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode 2161 * buffers in the log can be a different size if the log was generated 2162 * by an older kernel using unclustered inode buffers or a newer kernel 2163 * running with a different inode cluster size. Regardless, if the 2164 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE) 2165 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep 2166 * the buffer out of the buffer cache so that the buffer won't 2167 * overlap with future reads of those inodes. 2168 */ 2169 if (XFS_DINODE_MAGIC == 2170 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && 2171 (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, 2172 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { 2173 XFS_BUF_STALE(bp); 2174 error = xfs_bwrite(mp, bp); 2175 } else { 2176 ASSERT(bp->b_target->bt_mount == mp); 2177 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2178 xfs_bdwrite(mp, bp); 2179 } 2180 2181 return (error); 2182} 2183 2184STATIC int 2185xlog_recover_inode_pass2( 2186 xlog_t *log, 2187 xlog_recover_item_t *item) 2188{ 2189 xfs_inode_log_format_t *in_f; 2190 xfs_mount_t *mp = log->l_mp; 2191 xfs_buf_t *bp; 2192 xfs_dinode_t *dip; 2193 int len; 2194 xfs_caddr_t src; 2195 xfs_caddr_t dest; 2196 int error; 2197 int attr_index; 2198 uint fields; 2199 xfs_icdinode_t *dicp; 2200 int need_free = 0; 2201 2202 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2203 in_f = item->ri_buf[0].i_addr; 2204 } else { 2205 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP); 2206 need_free = 1; 2207 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f); 2208 if (error) 2209 goto error; 2210 } 2211 2212 /* 2213 * Inode buffers can be freed, look out for it, 2214 * and do not replay the inode. 2215 */ 2216 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, 2217 in_f->ilf_len, 0)) { 2218 error = 0; 2219 trace_xfs_log_recover_inode_cancel(log, in_f); 2220 goto error; 2221 } 2222 trace_xfs_log_recover_inode_recover(log, in_f); 2223 2224 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 2225 XBF_LOCK); 2226 if (XFS_BUF_ISERROR(bp)) { 2227 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, 2228 bp, in_f->ilf_blkno); 2229 error = XFS_BUF_GETERROR(bp); 2230 xfs_buf_relse(bp); 2231 goto error; 2232 } 2233 error = 0; 2234 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); 2235 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); 2236 2237 /* 2238 * Make sure the place we're flushing out to really looks 2239 * like an inode! 2240 */ 2241 if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { 2242 xfs_buf_relse(bp); 2243 xfs_alert(mp, 2244 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", 2245 __func__, dip, bp, in_f->ilf_ino); 2246 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2247 XFS_ERRLEVEL_LOW, mp); 2248 error = EFSCORRUPTED; 2249 goto error; 2250 } 2251 dicp = item->ri_buf[1].i_addr; 2252 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2253 xfs_buf_relse(bp); 2254 xfs_alert(mp, 2255 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld", 2256 __func__, item, in_f->ilf_ino); 2257 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2258 XFS_ERRLEVEL_LOW, mp); 2259 error = EFSCORRUPTED; 2260 goto error; 2261 } 2262 2263 /* Skip replay when the on disk inode is newer than the log one */ 2264 if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) { 2265 /* 2266 * Deal with the wrap case, DI_MAX_FLUSH is less 2267 * than smaller numbers 2268 */ 2269 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && 2270 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) { 2271 /* do nothing */ 2272 } else { 2273 xfs_buf_relse(bp); 2274 trace_xfs_log_recover_inode_skip(log, in_f); 2275 error = 0; 2276 goto error; 2277 } 2278 } 2279 /* Take the opportunity to reset the flush iteration count */ 2280 dicp->di_flushiter = 0; 2281 2282 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) { 2283 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2284 (dicp->di_format != XFS_DINODE_FMT_BTREE)) { 2285 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 2286 XFS_ERRLEVEL_LOW, mp, dicp); 2287 xfs_buf_relse(bp); 2288 xfs_alert(mp, 2289 "%s: Bad regular inode log record, rec ptr 0x%p, " 2290 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2291 __func__, item, dip, bp, in_f->ilf_ino); 2292 error = EFSCORRUPTED; 2293 goto error; 2294 } 2295 } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) { 2296 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2297 (dicp->di_format != XFS_DINODE_FMT_BTREE) && 2298 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { 2299 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", 2300 XFS_ERRLEVEL_LOW, mp, dicp); 2301 xfs_buf_relse(bp); 2302 xfs_alert(mp, 2303 "%s: Bad dir inode log record, rec ptr 0x%p, " 2304 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2305 __func__, item, dip, bp, in_f->ilf_ino); 2306 error = EFSCORRUPTED; 2307 goto error; 2308 } 2309 } 2310 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ 2311 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", 2312 XFS_ERRLEVEL_LOW, mp, dicp); 2313 xfs_buf_relse(bp); 2314 xfs_alert(mp, 2315 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2316 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2317 __func__, item, dip, bp, in_f->ilf_ino, 2318 dicp->di_nextents + dicp->di_anextents, 2319 dicp->di_nblocks); 2320 error = EFSCORRUPTED; 2321 goto error; 2322 } 2323 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { 2324 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", 2325 XFS_ERRLEVEL_LOW, mp, dicp); 2326 xfs_buf_relse(bp); 2327 xfs_alert(mp, 2328 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2329 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, 2330 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); 2331 error = EFSCORRUPTED; 2332 goto error; 2333 } 2334 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) { 2335 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", 2336 XFS_ERRLEVEL_LOW, mp, dicp); 2337 xfs_buf_relse(bp); 2338 xfs_alert(mp, 2339 "%s: Bad inode log record length %d, rec ptr 0x%p", 2340 __func__, item->ri_buf[1].i_len, item); 2341 error = EFSCORRUPTED; 2342 goto error; 2343 } 2344 2345 /* The core is in in-core format */ 2346 xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr); 2347 2348 /* the rest is in on-disk format */ 2349 if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) { 2350 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode), 2351 item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode), 2352 item->ri_buf[1].i_len - sizeof(struct xfs_icdinode)); 2353 } 2354 2355 fields = in_f->ilf_fields; 2356 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { 2357 case XFS_ILOG_DEV: 2358 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); 2359 break; 2360 case XFS_ILOG_UUID: 2361 memcpy(XFS_DFORK_DPTR(dip), 2362 &in_f->ilf_u.ilfu_uuid, 2363 sizeof(uuid_t)); 2364 break; 2365 } 2366 2367 if (in_f->ilf_size == 2) 2368 goto write_inode_buffer; 2369 len = item->ri_buf[2].i_len; 2370 src = item->ri_buf[2].i_addr; 2371 ASSERT(in_f->ilf_size <= 4); 2372 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK)); 2373 ASSERT(!(fields & XFS_ILOG_DFORK) || 2374 (len == in_f->ilf_dsize)); 2375 2376 switch (fields & XFS_ILOG_DFORK) { 2377 case XFS_ILOG_DDATA: 2378 case XFS_ILOG_DEXT: 2379 memcpy(XFS_DFORK_DPTR(dip), src, len); 2380 break; 2381 2382 case XFS_ILOG_DBROOT: 2383 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, 2384 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), 2385 XFS_DFORK_DSIZE(dip, mp)); 2386 break; 2387 2388 default: 2389 /* 2390 * There are no data fork flags set. 2391 */ 2392 ASSERT((fields & XFS_ILOG_DFORK) == 0); 2393 break; 2394 } 2395 2396 /* 2397 * If we logged any attribute data, recover it. There may or 2398 * may not have been any other non-core data logged in this 2399 * transaction. 2400 */ 2401 if (in_f->ilf_fields & XFS_ILOG_AFORK) { 2402 if (in_f->ilf_fields & XFS_ILOG_DFORK) { 2403 attr_index = 3; 2404 } else { 2405 attr_index = 2; 2406 } 2407 len = item->ri_buf[attr_index].i_len; 2408 src = item->ri_buf[attr_index].i_addr; 2409 ASSERT(len == in_f->ilf_asize); 2410 2411 switch (in_f->ilf_fields & XFS_ILOG_AFORK) { 2412 case XFS_ILOG_ADATA: 2413 case XFS_ILOG_AEXT: 2414 dest = XFS_DFORK_APTR(dip); 2415 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp)); 2416 memcpy(dest, src, len); 2417 break; 2418 2419 case XFS_ILOG_ABROOT: 2420 dest = XFS_DFORK_APTR(dip); 2421 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, 2422 len, (xfs_bmdr_block_t*)dest, 2423 XFS_DFORK_ASIZE(dip, mp)); 2424 break; 2425 2426 default: 2427 xfs_warn(log->l_mp, "%s: Invalid flag", __func__); 2428 ASSERT(0); 2429 xfs_buf_relse(bp); 2430 error = EIO; 2431 goto error; 2432 } 2433 } 2434 2435write_inode_buffer: 2436 ASSERT(bp->b_target->bt_mount == mp); 2437 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2438 xfs_bdwrite(mp, bp); 2439error: 2440 if (need_free) 2441 kmem_free(in_f); 2442 return XFS_ERROR(error); 2443} 2444 2445/* 2446 * Recover QUOTAOFF records. We simply make a note of it in the xlog_t 2447 * structure, so that we know not to do any dquot item or dquot buffer recovery, 2448 * of that type. 2449 */ 2450STATIC int 2451xlog_recover_quotaoff_pass1( 2452 xlog_t *log, 2453 xlog_recover_item_t *item) 2454{ 2455 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; 2456 ASSERT(qoff_f); 2457 2458 /* 2459 * The logitem format's flag tells us if this was user quotaoff, 2460 * group/project quotaoff or both. 2461 */ 2462 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) 2463 log->l_quotaoffs_flag |= XFS_DQ_USER; 2464 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) 2465 log->l_quotaoffs_flag |= XFS_DQ_PROJ; 2466 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 2467 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 2468 2469 return (0); 2470} 2471 2472/* 2473 * Recover a dquot record 2474 */ 2475STATIC int 2476xlog_recover_dquot_pass2( 2477 xlog_t *log, 2478 xlog_recover_item_t *item) 2479{ 2480 xfs_mount_t *mp = log->l_mp; 2481 xfs_buf_t *bp; 2482 struct xfs_disk_dquot *ddq, *recddq; 2483 int error; 2484 xfs_dq_logformat_t *dq_f; 2485 uint type; 2486 2487 2488 /* 2489 * Filesystems are required to send in quota flags at mount time. 2490 */ 2491 if (mp->m_qflags == 0) 2492 return (0); 2493 2494 recddq = item->ri_buf[1].i_addr; 2495 if (recddq == NULL) { 2496 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); 2497 return XFS_ERROR(EIO); 2498 } 2499 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 2500 xfs_alert(log->l_mp, "dquot too small (%d) in %s.", 2501 item->ri_buf[1].i_len, __func__); 2502 return XFS_ERROR(EIO); 2503 } 2504 2505 /* 2506 * This type of quotas was turned off, so ignore this record. 2507 */ 2508 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 2509 ASSERT(type); 2510 if (log->l_quotaoffs_flag & type) 2511 return (0); 2512 2513 /* 2514 * At this point we know that quota was _not_ turned off. 2515 * Since the mount flags are not indicating to us otherwise, this 2516 * must mean that quota is on, and the dquot needs to be replayed. 2517 * Remember that we may not have fully recovered the superblock yet, 2518 * so we can't do the usual trick of looking at the SB quota bits. 2519 * 2520 * The other possibility, of course, is that the quota subsystem was 2521 * removed since the last mount - ENOSYS. 2522 */ 2523 dq_f = item->ri_buf[0].i_addr; 2524 ASSERT(dq_f); 2525 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2526 "xlog_recover_dquot_pass2 (log copy)"); 2527 if (error) 2528 return XFS_ERROR(EIO); 2529 ASSERT(dq_f->qlf_len == 1); 2530 2531 error = xfs_read_buf(mp, mp->m_ddev_targp, 2532 dq_f->qlf_blkno, 2533 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 2534 0, &bp); 2535 if (error) { 2536 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp, 2537 bp, dq_f->qlf_blkno); 2538 return error; 2539 } 2540 ASSERT(bp); 2541 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); 2542 2543 /* 2544 * At least the magic num portion should be on disk because this 2545 * was among a chunk of dquots created earlier, and we did some 2546 * minimal initialization then. 2547 */ 2548 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2549 "xlog_recover_dquot_pass2"); 2550 if (error) { 2551 xfs_buf_relse(bp); 2552 return XFS_ERROR(EIO); 2553 } 2554 2555 memcpy(ddq, recddq, item->ri_buf[1].i_len); 2556 2557 ASSERT(dq_f->qlf_size == 2); 2558 ASSERT(bp->b_target->bt_mount == mp); 2559 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); 2560 xfs_bdwrite(mp, bp); 2561 2562 return (0); 2563} 2564 2565/* 2566 * This routine is called to create an in-core extent free intent 2567 * item from the efi format structure which was logged on disk. 2568 * It allocates an in-core efi, copies the extents from the format 2569 * structure into it, and adds the efi to the AIL with the given 2570 * LSN. 2571 */ 2572STATIC int 2573xlog_recover_efi_pass2( 2574 xlog_t *log, 2575 xlog_recover_item_t *item, 2576 xfs_lsn_t lsn) 2577{ 2578 int error; 2579 xfs_mount_t *mp = log->l_mp; 2580 xfs_efi_log_item_t *efip; 2581 xfs_efi_log_format_t *efi_formatp; 2582 2583 efi_formatp = item->ri_buf[0].i_addr; 2584 2585 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 2586 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), 2587 &(efip->efi_format)))) { 2588 xfs_efi_item_free(efip); 2589 return error; 2590 } 2591 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); 2592 2593 spin_lock(&log->l_ailp->xa_lock); 2594 /* 2595 * xfs_trans_ail_update() drops the AIL lock. 2596 */ 2597 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); 2598 return 0; 2599} 2600 2601 2602/* 2603 * This routine is called when an efd format structure is found in 2604 * a committed transaction in the log. It's purpose is to cancel 2605 * the corresponding efi if it was still in the log. To do this 2606 * it searches the AIL for the efi with an id equal to that in the 2607 * efd format structure. If we find it, we remove the efi from the 2608 * AIL and free it. 2609 */ 2610STATIC int 2611xlog_recover_efd_pass2( 2612 xlog_t *log, 2613 xlog_recover_item_t *item) 2614{ 2615 xfs_efd_log_format_t *efd_formatp; 2616 xfs_efi_log_item_t *efip = NULL; 2617 xfs_log_item_t *lip; 2618 __uint64_t efi_id; 2619 struct xfs_ail_cursor cur; 2620 struct xfs_ail *ailp = log->l_ailp; 2621 2622 efd_formatp = item->ri_buf[0].i_addr; 2623 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 2624 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 2625 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + 2626 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); 2627 efi_id = efd_formatp->efd_efi_id; 2628 2629 /* 2630 * Search for the efi with the id in the efd format structure 2631 * in the AIL. 2632 */ 2633 spin_lock(&ailp->xa_lock); 2634 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2635 while (lip != NULL) { 2636 if (lip->li_type == XFS_LI_EFI) { 2637 efip = (xfs_efi_log_item_t *)lip; 2638 if (efip->efi_format.efi_id == efi_id) { 2639 /* 2640 * xfs_trans_ail_delete() drops the 2641 * AIL lock. 2642 */ 2643 xfs_trans_ail_delete(ailp, lip); 2644 xfs_efi_item_free(efip); 2645 spin_lock(&ailp->xa_lock); 2646 break; 2647 } 2648 } 2649 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2650 } 2651 xfs_trans_ail_cursor_done(ailp, &cur); 2652 spin_unlock(&ailp->xa_lock); 2653 2654 return 0; 2655} 2656 2657/* 2658 * Free up any resources allocated by the transaction 2659 * 2660 * Remember that EFIs, EFDs, and IUNLINKs are handled later. 2661 */ 2662STATIC void 2663xlog_recover_free_trans( 2664 struct xlog_recover *trans) 2665{ 2666 xlog_recover_item_t *item, *n; 2667 int i; 2668 2669 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { 2670 /* Free the regions in the item. */ 2671 list_del(&item->ri_list); 2672 for (i = 0; i < item->ri_cnt; i++) 2673 kmem_free(item->ri_buf[i].i_addr); 2674 /* Free the item itself */ 2675 kmem_free(item->ri_buf); 2676 kmem_free(item); 2677 } 2678 /* Free the transaction recover structure */ 2679 kmem_free(trans); 2680} 2681 2682STATIC int 2683xlog_recover_commit_pass1( 2684 struct log *log, 2685 struct xlog_recover *trans, 2686 xlog_recover_item_t *item) 2687{ 2688 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); 2689 2690 switch (ITEM_TYPE(item)) { 2691 case XFS_LI_BUF: 2692 return xlog_recover_buffer_pass1(log, item); 2693 case XFS_LI_QUOTAOFF: 2694 return xlog_recover_quotaoff_pass1(log, item); 2695 case XFS_LI_INODE: 2696 case XFS_LI_EFI: 2697 case XFS_LI_EFD: 2698 case XFS_LI_DQUOT: 2699 /* nothing to do in pass 1 */ 2700 return 0; 2701 default: 2702 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 2703 __func__, ITEM_TYPE(item)); 2704 ASSERT(0); 2705 return XFS_ERROR(EIO); 2706 } 2707} 2708 2709STATIC int 2710xlog_recover_commit_pass2( 2711 struct log *log, 2712 struct xlog_recover *trans, 2713 xlog_recover_item_t *item) 2714{ 2715 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); 2716 2717 switch (ITEM_TYPE(item)) { 2718 case XFS_LI_BUF: 2719 return xlog_recover_buffer_pass2(log, item); 2720 case XFS_LI_INODE: 2721 return xlog_recover_inode_pass2(log, item); 2722 case XFS_LI_EFI: 2723 return xlog_recover_efi_pass2(log, item, trans->r_lsn); 2724 case XFS_LI_EFD: 2725 return xlog_recover_efd_pass2(log, item); 2726 case XFS_LI_DQUOT: 2727 return xlog_recover_dquot_pass2(log, item); 2728 case XFS_LI_QUOTAOFF: 2729 /* nothing to do in pass2 */ 2730 return 0; 2731 default: 2732 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 2733 __func__, ITEM_TYPE(item)); 2734 ASSERT(0); 2735 return XFS_ERROR(EIO); 2736 } 2737} 2738 2739/* 2740 * Perform the transaction. 2741 * 2742 * If the transaction modifies a buffer or inode, do it now. Otherwise, 2743 * EFIs and EFDs get queued up by adding entries into the AIL for them. 2744 */ 2745STATIC int 2746xlog_recover_commit_trans( 2747 struct log *log, 2748 struct xlog_recover *trans, 2749 int pass) 2750{ 2751 int error = 0; 2752 xlog_recover_item_t *item; 2753 2754 hlist_del(&trans->r_list); 2755 2756 error = xlog_recover_reorder_trans(log, trans, pass); 2757 if (error) 2758 return error; 2759 2760 list_for_each_entry(item, &trans->r_itemq, ri_list) { 2761 if (pass == XLOG_RECOVER_PASS1) 2762 error = xlog_recover_commit_pass1(log, trans, item); 2763 else 2764 error = xlog_recover_commit_pass2(log, trans, item); 2765 if (error) 2766 return error; 2767 } 2768 2769 xlog_recover_free_trans(trans); 2770 return 0; 2771} 2772 2773STATIC int 2774xlog_recover_unmount_trans( 2775 struct log *log, 2776 xlog_recover_t *trans) 2777{ 2778 /* Do nothing now */ 2779 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 2780 return 0; 2781} 2782 2783/* 2784 * There are two valid states of the r_state field. 0 indicates that the 2785 * transaction structure is in a normal state. We have either seen the 2786 * start of the transaction or the last operation we added was not a partial 2787 * operation. If the last operation we added to the transaction was a 2788 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. 2789 * 2790 * NOTE: skip LRs with 0 data length. 2791 */ 2792STATIC int 2793xlog_recover_process_data( 2794 xlog_t *log, 2795 struct hlist_head rhash[], 2796 xlog_rec_header_t *rhead, 2797 xfs_caddr_t dp, 2798 int pass) 2799{ 2800 xfs_caddr_t lp; 2801 int num_logops; 2802 xlog_op_header_t *ohead; 2803 xlog_recover_t *trans; 2804 xlog_tid_t tid; 2805 int error; 2806 unsigned long hash; 2807 uint flags; 2808 2809 lp = dp + be32_to_cpu(rhead->h_len); 2810 num_logops = be32_to_cpu(rhead->h_num_logops); 2811 2812 /* check the log format matches our own - else we can't recover */ 2813 if (xlog_header_check_recover(log->l_mp, rhead)) 2814 return (XFS_ERROR(EIO)); 2815 2816 while ((dp < lp) && num_logops) { 2817 ASSERT(dp + sizeof(xlog_op_header_t) <= lp); 2818 ohead = (xlog_op_header_t *)dp; 2819 dp += sizeof(xlog_op_header_t); 2820 if (ohead->oh_clientid != XFS_TRANSACTION && 2821 ohead->oh_clientid != XFS_LOG) { 2822 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 2823 __func__, ohead->oh_clientid); 2824 ASSERT(0); 2825 return (XFS_ERROR(EIO)); 2826 } 2827 tid = be32_to_cpu(ohead->oh_tid); 2828 hash = XLOG_RHASH(tid); 2829 trans = xlog_recover_find_tid(&rhash[hash], tid); 2830 if (trans == NULL) { /* not found; add new tid */ 2831 if (ohead->oh_flags & XLOG_START_TRANS) 2832 xlog_recover_new_tid(&rhash[hash], tid, 2833 be64_to_cpu(rhead->h_lsn)); 2834 } else { 2835 if (dp + be32_to_cpu(ohead->oh_len) > lp) { 2836 xfs_warn(log->l_mp, "%s: bad length 0x%x", 2837 __func__, be32_to_cpu(ohead->oh_len)); 2838 WARN_ON(1); 2839 return (XFS_ERROR(EIO)); 2840 } 2841 flags = ohead->oh_flags & ~XLOG_END_TRANS; 2842 if (flags & XLOG_WAS_CONT_TRANS) 2843 flags &= ~XLOG_CONTINUE_TRANS; 2844 switch (flags) { 2845 case XLOG_COMMIT_TRANS: 2846 error = xlog_recover_commit_trans(log, 2847 trans, pass); 2848 break; 2849 case XLOG_UNMOUNT_TRANS: 2850 error = xlog_recover_unmount_trans(log, trans); 2851 break; 2852 case XLOG_WAS_CONT_TRANS: 2853 error = xlog_recover_add_to_cont_trans(log, 2854 trans, dp, 2855 be32_to_cpu(ohead->oh_len)); 2856 break; 2857 case XLOG_START_TRANS: 2858 xfs_warn(log->l_mp, "%s: bad transaction", 2859 __func__); 2860 ASSERT(0); 2861 error = XFS_ERROR(EIO); 2862 break; 2863 case 0: 2864 case XLOG_CONTINUE_TRANS: 2865 error = xlog_recover_add_to_trans(log, trans, 2866 dp, be32_to_cpu(ohead->oh_len)); 2867 break; 2868 default: 2869 xfs_warn(log->l_mp, "%s: bad flag 0x%x", 2870 __func__, flags); 2871 ASSERT(0); 2872 error = XFS_ERROR(EIO); 2873 break; 2874 } 2875 if (error) 2876 return error; 2877 } 2878 dp += be32_to_cpu(ohead->oh_len); 2879 num_logops--; 2880 } 2881 return 0; 2882} 2883 2884/* 2885 * Process an extent free intent item that was recovered from 2886 * the log. We need to free the extents that it describes. 2887 */ 2888STATIC int 2889xlog_recover_process_efi( 2890 xfs_mount_t *mp, 2891 xfs_efi_log_item_t *efip) 2892{ 2893 xfs_efd_log_item_t *efdp; 2894 xfs_trans_t *tp; 2895 int i; 2896 int error = 0; 2897 xfs_extent_t *extp; 2898 xfs_fsblock_t startblock_fsb; 2899 2900 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)); 2901 2902 /* 2903 * First check the validity of the extents described by the 2904 * EFI. If any are bad, then assume that all are bad and 2905 * just toss the EFI. 2906 */ 2907 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 2908 extp = &(efip->efi_format.efi_extents[i]); 2909 startblock_fsb = XFS_BB_TO_FSB(mp, 2910 XFS_FSB_TO_DADDR(mp, extp->ext_start)); 2911 if ((startblock_fsb == 0) || 2912 (extp->ext_len == 0) || 2913 (startblock_fsb >= mp->m_sb.sb_dblocks) || 2914 (extp->ext_len >= mp->m_sb.sb_agblocks)) { 2915 /* 2916 * This will pull the EFI from the AIL and 2917 * free the memory associated with it. 2918 */ 2919 xfs_efi_release(efip, efip->efi_format.efi_nextents); 2920 return XFS_ERROR(EIO); 2921 } 2922 } 2923 2924 tp = xfs_trans_alloc(mp, 0); 2925 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0); 2926 if (error) 2927 goto abort_error; 2928 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); 2929 2930 for (i = 0; i < efip->efi_format.efi_nextents; i++) { 2931 extp = &(efip->efi_format.efi_extents[i]); 2932 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len); 2933 if (error) 2934 goto abort_error; 2935 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start, 2936 extp->ext_len); 2937 } 2938 2939 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); 2940 error = xfs_trans_commit(tp, 0); 2941 return error; 2942 2943abort_error: 2944 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 2945 return error; 2946} 2947 2948/* 2949 * When this is called, all of the EFIs which did not have 2950 * corresponding EFDs should be in the AIL. What we do now 2951 * is free the extents associated with each one. 2952 * 2953 * Since we process the EFIs in normal transactions, they 2954 * will be removed at some point after the commit. This prevents 2955 * us from just walking down the list processing each one. 2956 * We'll use a flag in the EFI to skip those that we've already 2957 * processed and use the AIL iteration mechanism's generation 2958 * count to try to speed this up at least a bit. 2959 * 2960 * When we start, we know that the EFIs are the only things in 2961 * the AIL. As we process them, however, other items are added 2962 * to the AIL. Since everything added to the AIL must come after 2963 * everything already in the AIL, we stop processing as soon as 2964 * we see something other than an EFI in the AIL. 2965 */ 2966STATIC int 2967xlog_recover_process_efis( 2968 xlog_t *log) 2969{ 2970 xfs_log_item_t *lip; 2971 xfs_efi_log_item_t *efip; 2972 int error = 0; 2973 struct xfs_ail_cursor cur; 2974 struct xfs_ail *ailp; 2975 2976 ailp = log->l_ailp; 2977 spin_lock(&ailp->xa_lock); 2978 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2979 while (lip != NULL) { 2980 /* 2981 * We're done when we see something other than an EFI. 2982 * There should be no EFIs left in the AIL now. 2983 */ 2984 if (lip->li_type != XFS_LI_EFI) { 2985#ifdef DEBUG 2986 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) 2987 ASSERT(lip->li_type != XFS_LI_EFI); 2988#endif 2989 break; 2990 } 2991 2992 /* 2993 * Skip EFIs that we've already processed. 2994 */ 2995 efip = (xfs_efi_log_item_t *)lip; 2996 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) { 2997 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2998 continue; 2999 } 3000 3001 spin_unlock(&ailp->xa_lock); 3002 error = xlog_recover_process_efi(log->l_mp, efip); 3003 spin_lock(&ailp->xa_lock); 3004 if (error) 3005 goto out; 3006 lip = xfs_trans_ail_cursor_next(ailp, &cur); 3007 } 3008out: 3009 xfs_trans_ail_cursor_done(ailp, &cur); 3010 spin_unlock(&ailp->xa_lock); 3011 return error; 3012} 3013 3014/* 3015 * This routine performs a transaction to null out a bad inode pointer 3016 * in an agi unlinked inode hash bucket. 3017 */ 3018STATIC void 3019xlog_recover_clear_agi_bucket( 3020 xfs_mount_t *mp, 3021 xfs_agnumber_t agno, 3022 int bucket) 3023{ 3024 xfs_trans_t *tp; 3025 xfs_agi_t *agi; 3026 xfs_buf_t *agibp; 3027 int offset; 3028 int error; 3029 3030 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); 3031 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 3032 0, 0, 0); 3033 if (error) 3034 goto out_abort; 3035 3036 error = xfs_read_agi(mp, tp, agno, &agibp); 3037 if (error) 3038 goto out_abort; 3039 3040 agi = XFS_BUF_TO_AGI(agibp); 3041 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 3042 offset = offsetof(xfs_agi_t, agi_unlinked) + 3043 (sizeof(xfs_agino_t) * bucket); 3044 xfs_trans_log_buf(tp, agibp, offset, 3045 (offset + sizeof(xfs_agino_t) - 1)); 3046 3047 error = xfs_trans_commit(tp, 0); 3048 if (error) 3049 goto out_error; 3050 return; 3051 3052out_abort: 3053 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3054out_error: 3055 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 3056 return; 3057} 3058 3059STATIC xfs_agino_t 3060xlog_recover_process_one_iunlink( 3061 struct xfs_mount *mp, 3062 xfs_agnumber_t agno, 3063 xfs_agino_t agino, 3064 int bucket) 3065{ 3066 struct xfs_buf *ibp; 3067 struct xfs_dinode *dip; 3068 struct xfs_inode *ip; 3069 xfs_ino_t ino; 3070 int error; 3071 3072 ino = XFS_AGINO_TO_INO(mp, agno, agino); 3073 error = xfs_iget(mp, NULL, ino, 0, 0, &ip); 3074 if (error) 3075 goto fail; 3076 3077 /* 3078 * Get the on disk inode to find the next inode in the bucket. 3079 */ 3080 error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK); 3081 if (error) 3082 goto fail_iput; 3083 3084 ASSERT(ip->i_d.di_nlink == 0); 3085 ASSERT(ip->i_d.di_mode != 0); 3086 3087 /* setup for the next pass */ 3088 agino = be32_to_cpu(dip->di_next_unlinked); 3089 xfs_buf_relse(ibp); 3090 3091 /* 3092 * Prevent any DMAPI event from being sent when the reference on 3093 * the inode is dropped. 3094 */ 3095 ip->i_d.di_dmevmask = 0; 3096 3097 IRELE(ip); 3098 return agino; 3099 3100 fail_iput: 3101 IRELE(ip); 3102 fail: 3103 /* 3104 * We can't read in the inode this bucket points to, or this inode 3105 * is messed up. Just ditch this bucket of inodes. We will lose 3106 * some inodes and space, but at least we won't hang. 3107 * 3108 * Call xlog_recover_clear_agi_bucket() to perform a transaction to 3109 * clear the inode pointer in the bucket. 3110 */ 3111 xlog_recover_clear_agi_bucket(mp, agno, bucket); 3112 return NULLAGINO; 3113} 3114 3115/* 3116 * xlog_iunlink_recover 3117 * 3118 * This is called during recovery to process any inodes which 3119 * we unlinked but not freed when the system crashed. These 3120 * inodes will be on the lists in the AGI blocks. What we do 3121 * here is scan all the AGIs and fully truncate and free any 3122 * inodes found on the lists. Each inode is removed from the 3123 * lists when it has been fully truncated and is freed. The 3124 * freeing of the inode and its removal from the list must be 3125 * atomic. 3126 */ 3127STATIC void 3128xlog_recover_process_iunlinks( 3129 xlog_t *log) 3130{ 3131 xfs_mount_t *mp; 3132 xfs_agnumber_t agno; 3133 xfs_agi_t *agi; 3134 xfs_buf_t *agibp; 3135 xfs_agino_t agino; 3136 int bucket; 3137 int error; 3138 uint mp_dmevmask; 3139 3140 mp = log->l_mp; 3141 3142 /* 3143 * Prevent any DMAPI event from being sent while in this function. 3144 */ 3145 mp_dmevmask = mp->m_dmevmask; 3146 mp->m_dmevmask = 0; 3147 3148 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3149 /* 3150 * Find the agi for this ag. 3151 */ 3152 error = xfs_read_agi(mp, NULL, agno, &agibp); 3153 if (error) { 3154 /* 3155 * AGI is b0rked. Don't process it. 3156 * 3157 * We should probably mark the filesystem as corrupt 3158 * after we've recovered all the ag's we can.... 3159 */ 3160 continue; 3161 } 3162 agi = XFS_BUF_TO_AGI(agibp); 3163 3164 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { 3165 agino = be32_to_cpu(agi->agi_unlinked[bucket]); 3166 while (agino != NULLAGINO) { 3167 /* 3168 * Release the agi buffer so that it can 3169 * be acquired in the normal course of the 3170 * transaction to truncate and free the inode. 3171 */ 3172 xfs_buf_relse(agibp); 3173 3174 agino = xlog_recover_process_one_iunlink(mp, 3175 agno, agino, bucket); 3176 3177 /* 3178 * Reacquire the agibuffer and continue around 3179 * the loop. This should never fail as we know 3180 * the buffer was good earlier on. 3181 */ 3182 error = xfs_read_agi(mp, NULL, agno, &agibp); 3183 ASSERT(error == 0); 3184 agi = XFS_BUF_TO_AGI(agibp); 3185 } 3186 } 3187 3188 /* 3189 * Release the buffer for the current agi so we can 3190 * go on to the next one. 3191 */ 3192 xfs_buf_relse(agibp); 3193 } 3194 3195 mp->m_dmevmask = mp_dmevmask; 3196} 3197 3198 3199#ifdef DEBUG 3200STATIC void 3201xlog_pack_data_checksum( 3202 xlog_t *log, 3203 xlog_in_core_t *iclog, 3204 int size) 3205{ 3206 int i; 3207 __be32 *up; 3208 uint chksum = 0; 3209 3210 up = (__be32 *)iclog->ic_datap; 3211 /* divide length by 4 to get # words */ 3212 for (i = 0; i < (size >> 2); i++) { 3213 chksum ^= be32_to_cpu(*up); 3214 up++; 3215 } 3216 iclog->ic_header.h_chksum = cpu_to_be32(chksum); 3217} 3218#else 3219#define xlog_pack_data_checksum(log, iclog, size) 3220#endif 3221 3222/* 3223 * Stamp cycle number in every block 3224 */ 3225void 3226xlog_pack_data( 3227 xlog_t *log, 3228 xlog_in_core_t *iclog, 3229 int roundoff) 3230{ 3231 int i, j, k; 3232 int size = iclog->ic_offset + roundoff; 3233 __be32 cycle_lsn; 3234 xfs_caddr_t dp; 3235 3236 xlog_pack_data_checksum(log, iclog, size); 3237 3238 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); 3239 3240 dp = iclog->ic_datap; 3241 for (i = 0; i < BTOBB(size) && 3242 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 3243 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 3244 *(__be32 *)dp = cycle_lsn; 3245 dp += BBSIZE; 3246 } 3247 3248 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 3249 xlog_in_core_2_t *xhdr = iclog->ic_data; 3250 3251 for ( ; i < BTOBB(size); i++) { 3252 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3253 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3254 xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 3255 *(__be32 *)dp = cycle_lsn; 3256 dp += BBSIZE; 3257 } 3258 3259 for (i = 1; i < log->l_iclog_heads; i++) { 3260 xhdr[i].hic_xheader.xh_cycle = cycle_lsn; 3261 } 3262 } 3263} 3264 3265STATIC void 3266xlog_unpack_data( 3267 xlog_rec_header_t *rhead, 3268 xfs_caddr_t dp, 3269 xlog_t *log) 3270{ 3271 int i, j, k; 3272 3273 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 3274 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 3275 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 3276 dp += BBSIZE; 3277 } 3278 3279 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 3280 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; 3281 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 3282 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3283 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3284 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 3285 dp += BBSIZE; 3286 } 3287 } 3288} 3289 3290STATIC int 3291xlog_valid_rec_header( 3292 xlog_t *log, 3293 xlog_rec_header_t *rhead, 3294 xfs_daddr_t blkno) 3295{ 3296 int hlen; 3297 3298 if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) { 3299 XFS_ERROR_REPORT("xlog_valid_rec_header(1)", 3300 XFS_ERRLEVEL_LOW, log->l_mp); 3301 return XFS_ERROR(EFSCORRUPTED); 3302 } 3303 if (unlikely( 3304 (!rhead->h_version || 3305 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 3306 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 3307 __func__, be32_to_cpu(rhead->h_version)); 3308 return XFS_ERROR(EIO); 3309 } 3310 3311 /* LR body must have data or it wouldn't have been written */ 3312 hlen = be32_to_cpu(rhead->h_len); 3313 if (unlikely( hlen <= 0 || hlen > INT_MAX )) { 3314 XFS_ERROR_REPORT("xlog_valid_rec_header(2)", 3315 XFS_ERRLEVEL_LOW, log->l_mp); 3316 return XFS_ERROR(EFSCORRUPTED); 3317 } 3318 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { 3319 XFS_ERROR_REPORT("xlog_valid_rec_header(3)", 3320 XFS_ERRLEVEL_LOW, log->l_mp); 3321 return XFS_ERROR(EFSCORRUPTED); 3322 } 3323 return 0; 3324} 3325 3326/* 3327 * Read the log from tail to head and process the log records found. 3328 * Handle the two cases where the tail and head are in the same cycle 3329 * and where the active portion of the log wraps around the end of 3330 * the physical log separately. The pass parameter is passed through 3331 * to the routines called to process the data and is not looked at 3332 * here. 3333 */ 3334STATIC int 3335xlog_do_recovery_pass( 3336 xlog_t *log, 3337 xfs_daddr_t head_blk, 3338 xfs_daddr_t tail_blk, 3339 int pass) 3340{ 3341 xlog_rec_header_t *rhead; 3342 xfs_daddr_t blk_no; 3343 xfs_caddr_t offset; 3344 xfs_buf_t *hbp, *dbp; 3345 int error = 0, h_size; 3346 int bblks, split_bblks; 3347 int hblks, split_hblks, wrapped_hblks; 3348 struct hlist_head rhash[XLOG_RHASH_SIZE]; 3349 3350 ASSERT(head_blk != tail_blk); 3351 3352 /* 3353 * Read the header of the tail block and get the iclog buffer size from 3354 * h_size. Use this to tell how many sectors make up the log header. 3355 */ 3356 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { 3357 /* 3358 * When using variable length iclogs, read first sector of 3359 * iclog header and extract the header size from it. Get a 3360 * new hbp that is the correct size. 3361 */ 3362 hbp = xlog_get_bp(log, 1); 3363 if (!hbp) 3364 return ENOMEM; 3365 3366 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 3367 if (error) 3368 goto bread_err1; 3369 3370 rhead = (xlog_rec_header_t *)offset; 3371 error = xlog_valid_rec_header(log, rhead, tail_blk); 3372 if (error) 3373 goto bread_err1; 3374 h_size = be32_to_cpu(rhead->h_size); 3375 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && 3376 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 3377 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 3378 if (h_size % XLOG_HEADER_CYCLE_SIZE) 3379 hblks++; 3380 xlog_put_bp(hbp); 3381 hbp = xlog_get_bp(log, hblks); 3382 } else { 3383 hblks = 1; 3384 } 3385 } else { 3386 ASSERT(log->l_sectBBsize == 1); 3387 hblks = 1; 3388 hbp = xlog_get_bp(log, 1); 3389 h_size = XLOG_BIG_RECORD_BSIZE; 3390 } 3391 3392 if (!hbp) 3393 return ENOMEM; 3394 dbp = xlog_get_bp(log, BTOBB(h_size)); 3395 if (!dbp) { 3396 xlog_put_bp(hbp); 3397 return ENOMEM; 3398 } 3399 3400 memset(rhash, 0, sizeof(rhash)); 3401 if (tail_blk <= head_blk) { 3402 for (blk_no = tail_blk; blk_no < head_blk; ) { 3403 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 3404 if (error) 3405 goto bread_err2; 3406 3407 rhead = (xlog_rec_header_t *)offset; 3408 error = xlog_valid_rec_header(log, rhead, blk_no); 3409 if (error) 3410 goto bread_err2; 3411 3412 /* blocks in data section */ 3413 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3414 error = xlog_bread(log, blk_no + hblks, bblks, dbp, 3415 &offset); 3416 if (error) 3417 goto bread_err2; 3418 3419 xlog_unpack_data(rhead, offset, log); 3420 if ((error = xlog_recover_process_data(log, 3421 rhash, rhead, offset, pass))) 3422 goto bread_err2; 3423 blk_no += bblks + hblks; 3424 } 3425 } else { 3426 /* 3427 * Perform recovery around the end of the physical log. 3428 * When the head is not on the same cycle number as the tail, 3429 * we can't do a sequential recovery as above. 3430 */ 3431 blk_no = tail_blk; 3432 while (blk_no < log->l_logBBsize) { 3433 /* 3434 * Check for header wrapping around physical end-of-log 3435 */ 3436 offset = XFS_BUF_PTR(hbp); 3437 split_hblks = 0; 3438 wrapped_hblks = 0; 3439 if (blk_no + hblks <= log->l_logBBsize) { 3440 /* Read header in one read */ 3441 error = xlog_bread(log, blk_no, hblks, hbp, 3442 &offset); 3443 if (error) 3444 goto bread_err2; 3445 } else { 3446 /* This LR is split across physical log end */ 3447 if (blk_no != log->l_logBBsize) { 3448 /* some data before physical log end */ 3449 ASSERT(blk_no <= INT_MAX); 3450 split_hblks = log->l_logBBsize - (int)blk_no; 3451 ASSERT(split_hblks > 0); 3452 error = xlog_bread(log, blk_no, 3453 split_hblks, hbp, 3454 &offset); 3455 if (error) 3456 goto bread_err2; 3457 } 3458 3459 /* 3460 * Note: this black magic still works with 3461 * large sector sizes (non-512) only because: 3462 * - we increased the buffer size originally 3463 * by 1 sector giving us enough extra space 3464 * for the second read; 3465 * - the log start is guaranteed to be sector 3466 * aligned; 3467 * - we read the log end (LR header start) 3468 * _first_, then the log start (LR header end) 3469 * - order is important. 3470 */ 3471 wrapped_hblks = hblks - split_hblks; 3472 error = xlog_bread_offset(log, 0, 3473 wrapped_hblks, hbp, 3474 offset + BBTOB(split_hblks)); 3475 if (error) 3476 goto bread_err2; 3477 } 3478 rhead = (xlog_rec_header_t *)offset; 3479 error = xlog_valid_rec_header(log, rhead, 3480 split_hblks ? blk_no : 0); 3481 if (error) 3482 goto bread_err2; 3483 3484 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3485 blk_no += hblks; 3486 3487 /* Read in data for log record */ 3488 if (blk_no + bblks <= log->l_logBBsize) { 3489 error = xlog_bread(log, blk_no, bblks, dbp, 3490 &offset); 3491 if (error) 3492 goto bread_err2; 3493 } else { 3494 /* This log record is split across the 3495 * physical end of log */ 3496 offset = XFS_BUF_PTR(dbp); 3497 split_bblks = 0; 3498 if (blk_no != log->l_logBBsize) { 3499 /* some data is before the physical 3500 * end of log */ 3501 ASSERT(!wrapped_hblks); 3502 ASSERT(blk_no <= INT_MAX); 3503 split_bblks = 3504 log->l_logBBsize - (int)blk_no; 3505 ASSERT(split_bblks > 0); 3506 error = xlog_bread(log, blk_no, 3507 split_bblks, dbp, 3508 &offset); 3509 if (error) 3510 goto bread_err2; 3511 } 3512 3513 /* 3514 * Note: this black magic still works with 3515 * large sector sizes (non-512) only because: 3516 * - we increased the buffer size originally 3517 * by 1 sector giving us enough extra space 3518 * for the second read; 3519 * - the log start is guaranteed to be sector 3520 * aligned; 3521 * - we read the log end (LR header start) 3522 * _first_, then the log start (LR header end) 3523 * - order is important. 3524 */ 3525 error = xlog_bread_offset(log, 0, 3526 bblks - split_bblks, hbp, 3527 offset + BBTOB(split_bblks)); 3528 if (error) 3529 goto bread_err2; 3530 } 3531 xlog_unpack_data(rhead, offset, log); 3532 if ((error = xlog_recover_process_data(log, rhash, 3533 rhead, offset, pass))) 3534 goto bread_err2; 3535 blk_no += bblks; 3536 } 3537 3538 ASSERT(blk_no >= log->l_logBBsize); 3539 blk_no -= log->l_logBBsize; 3540 3541 /* read first part of physical log */ 3542 while (blk_no < head_blk) { 3543 error = xlog_bread(log, blk_no, hblks, hbp, &offset); 3544 if (error) 3545 goto bread_err2; 3546 3547 rhead = (xlog_rec_header_t *)offset; 3548 error = xlog_valid_rec_header(log, rhead, blk_no); 3549 if (error) 3550 goto bread_err2; 3551 3552 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3553 error = xlog_bread(log, blk_no+hblks, bblks, dbp, 3554 &offset); 3555 if (error) 3556 goto bread_err2; 3557 3558 xlog_unpack_data(rhead, offset, log); 3559 if ((error = xlog_recover_process_data(log, rhash, 3560 rhead, offset, pass))) 3561 goto bread_err2; 3562 blk_no += bblks + hblks; 3563 } 3564 } 3565 3566 bread_err2: 3567 xlog_put_bp(dbp); 3568 bread_err1: 3569 xlog_put_bp(hbp); 3570 return error; 3571} 3572 3573/* 3574 * Do the recovery of the log. We actually do this in two phases. 3575 * The two passes are necessary in order to implement the function 3576 * of cancelling a record written into the log. The first pass 3577 * determines those things which have been cancelled, and the 3578 * second pass replays log items normally except for those which 3579 * have been cancelled. The handling of the replay and cancellations 3580 * takes place in the log item type specific routines. 3581 * 3582 * The table of items which have cancel records in the log is allocated 3583 * and freed at this level, since only here do we know when all of 3584 * the log recovery has been completed. 3585 */ 3586STATIC int 3587xlog_do_log_recovery( 3588 xlog_t *log, 3589 xfs_daddr_t head_blk, 3590 xfs_daddr_t tail_blk) 3591{ 3592 int error, i; 3593 3594 ASSERT(head_blk != tail_blk); 3595 3596 /* 3597 * First do a pass to find all of the cancelled buf log items. 3598 * Store them in the buf_cancel_table for use in the second pass. 3599 */ 3600 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * 3601 sizeof(struct list_head), 3602 KM_SLEEP); 3603 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3604 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); 3605 3606 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3607 XLOG_RECOVER_PASS1); 3608 if (error != 0) { 3609 kmem_free(log->l_buf_cancel_table); 3610 log->l_buf_cancel_table = NULL; 3611 return error; 3612 } 3613 /* 3614 * Then do a second pass to actually recover the items in the log. 3615 * When it is complete free the table of buf cancel items. 3616 */ 3617 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3618 XLOG_RECOVER_PASS2); 3619#ifdef DEBUG 3620 if (!error) { 3621 int i; 3622 3623 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3624 ASSERT(list_empty(&log->l_buf_cancel_table[i])); 3625 } 3626#endif /* DEBUG */ 3627 3628 kmem_free(log->l_buf_cancel_table); 3629 log->l_buf_cancel_table = NULL; 3630 3631 return error; 3632} 3633 3634/* 3635 * Do the actual recovery 3636 */ 3637STATIC int 3638xlog_do_recover( 3639 xlog_t *log, 3640 xfs_daddr_t head_blk, 3641 xfs_daddr_t tail_blk) 3642{ 3643 int error; 3644 xfs_buf_t *bp; 3645 xfs_sb_t *sbp; 3646 3647 /* 3648 * First replay the images in the log. 3649 */ 3650 error = xlog_do_log_recovery(log, head_blk, tail_blk); 3651 if (error) { 3652 return error; 3653 } 3654 3655 XFS_bflush(log->l_mp->m_ddev_targp); 3656 3657 /* 3658 * If IO errors happened during recovery, bail out. 3659 */ 3660 if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 3661 return (EIO); 3662 } 3663 3664 /* 3665 * We now update the tail_lsn since much of the recovery has completed 3666 * and there may be space available to use. If there were no extent 3667 * or iunlinks, we can free up the entire log and set the tail_lsn to 3668 * be the last_sync_lsn. This was set in xlog_find_tail to be the 3669 * lsn of the last known good LR on disk. If there are extent frees 3670 * or iunlinks they will have some entries in the AIL; so we look at 3671 * the AIL to determine how to set the tail_lsn. 3672 */ 3673 xlog_assign_tail_lsn(log->l_mp); 3674 3675 /* 3676 * Now that we've finished replaying all buffer and inode 3677 * updates, re-read in the superblock. 3678 */ 3679 bp = xfs_getsb(log->l_mp, 0); 3680 XFS_BUF_UNDONE(bp); 3681 ASSERT(!(XFS_BUF_ISWRITE(bp))); 3682 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 3683 XFS_BUF_READ(bp); 3684 XFS_BUF_UNASYNC(bp); 3685 xfsbdstrat(log->l_mp, bp); 3686 error = xfs_buf_iowait(bp); 3687 if (error) { 3688 xfs_ioerror_alert("xlog_do_recover", 3689 log->l_mp, bp, XFS_BUF_ADDR(bp)); 3690 ASSERT(0); 3691 xfs_buf_relse(bp); 3692 return error; 3693 } 3694 3695 /* Convert superblock from on-disk format */ 3696 sbp = &log->l_mp->m_sb; 3697 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 3698 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); 3699 ASSERT(xfs_sb_good_version(sbp)); 3700 xfs_buf_relse(bp); 3701 3702 /* We've re-read the superblock so re-initialize per-cpu counters */ 3703 xfs_icsb_reinit_counters(log->l_mp); 3704 3705 xlog_recover_check_summary(log); 3706 3707 /* Normal transactions can now occur */ 3708 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; 3709 return 0; 3710} 3711 3712/* 3713 * Perform recovery and re-initialize some log variables in xlog_find_tail. 3714 * 3715 * Return error or zero. 3716 */ 3717int 3718xlog_recover( 3719 xlog_t *log) 3720{ 3721 xfs_daddr_t head_blk, tail_blk; 3722 int error; 3723 3724 /* find the tail of the log */ 3725 if ((error = xlog_find_tail(log, &head_blk, &tail_blk))) 3726 return error; 3727 3728 if (tail_blk != head_blk) { 3729 /* There used to be a comment here: 3730 * 3731 * disallow recovery on read-only mounts. note -- mount 3732 * checks for ENOSPC and turns it into an intelligent 3733 * error message. 3734 * ...but this is no longer true. Now, unless you specify 3735 * NORECOVERY (in which case this function would never be 3736 * called), we just go ahead and recover. We do this all 3737 * under the vfs layer, so we can get away with it unless 3738 * the device itself is read-only, in which case we fail. 3739 */ 3740 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { 3741 return error; 3742 } 3743 3744 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 3745 log->l_mp->m_logname ? log->l_mp->m_logname 3746 : "internal"); 3747 3748 error = xlog_do_recover(log, head_blk, tail_blk); 3749 log->l_flags |= XLOG_RECOVERY_NEEDED; 3750 } 3751 return error; 3752} 3753 3754/* 3755 * In the first part of recovery we replay inodes and buffers and build 3756 * up the list of extent free items which need to be processed. Here 3757 * we process the extent free items and clean up the on disk unlinked 3758 * inode lists. This is separated from the first part of recovery so 3759 * that the root and real-time bitmap inodes can be read in from disk in 3760 * between the two stages. This is necessary so that we can free space 3761 * in the real-time portion of the file system. 3762 */ 3763int 3764xlog_recover_finish( 3765 xlog_t *log) 3766{ 3767 /* 3768 * Now we're ready to do the transactions needed for the 3769 * rest of recovery. Start with completing all the extent 3770 * free intent records and then process the unlinked inode 3771 * lists. At this point, we essentially run in normal mode 3772 * except that we're still performing recovery actions 3773 * rather than accepting new requests. 3774 */ 3775 if (log->l_flags & XLOG_RECOVERY_NEEDED) { 3776 int error; 3777 error = xlog_recover_process_efis(log); 3778 if (error) { 3779 xfs_alert(log->l_mp, "Failed to recover EFIs"); 3780 return error; 3781 } 3782 /* 3783 * Sync the log to get all the EFIs out of the AIL. 3784 * This isn't absolutely necessary, but it helps in 3785 * case the unlink transactions would have problems 3786 * pushing the EFIs out of the way. 3787 */ 3788 xfs_log_force(log->l_mp, XFS_LOG_SYNC); 3789 3790 xlog_recover_process_iunlinks(log); 3791 3792 xlog_recover_check_summary(log); 3793 3794 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 3795 log->l_mp->m_logname ? log->l_mp->m_logname 3796 : "internal"); 3797 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 3798 } else { 3799 xfs_info(log->l_mp, "Ending clean mount"); 3800 } 3801 return 0; 3802} 3803 3804 3805#if defined(DEBUG) 3806/* 3807 * Read all of the agf and agi counters and check that they 3808 * are consistent with the superblock counters. 3809 */ 3810void 3811xlog_recover_check_summary( 3812 xlog_t *log) 3813{ 3814 xfs_mount_t *mp; 3815 xfs_agf_t *agfp; 3816 xfs_buf_t *agfbp; 3817 xfs_buf_t *agibp; 3818 xfs_agnumber_t agno; 3819 __uint64_t freeblks; 3820 __uint64_t itotal; 3821 __uint64_t ifree; 3822 int error; 3823 3824 mp = log->l_mp; 3825 3826 freeblks = 0LL; 3827 itotal = 0LL; 3828 ifree = 0LL; 3829 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3830 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 3831 if (error) { 3832 xfs_alert(mp, "%s agf read failed agno %d error %d", 3833 __func__, agno, error); 3834 } else { 3835 agfp = XFS_BUF_TO_AGF(agfbp); 3836 freeblks += be32_to_cpu(agfp->agf_freeblks) + 3837 be32_to_cpu(agfp->agf_flcount); 3838 xfs_buf_relse(agfbp); 3839 } 3840 3841 error = xfs_read_agi(mp, NULL, agno, &agibp); 3842 if (error) { 3843 xfs_alert(mp, "%s agi read failed agno %d error %d", 3844 __func__, agno, error); 3845 } else { 3846 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 3847 3848 itotal += be32_to_cpu(agi->agi_count); 3849 ifree += be32_to_cpu(agi->agi_freecount); 3850 xfs_buf_relse(agibp); 3851 } 3852 } 3853} 3854#endif /* DEBUG */