xfs: remove xfs_flush_space

The only thing we need to do now when we get an ENOSPC condition during delayed
allocation reservation is flush all the other inodes with delalloc blocks on
them and retry without EOF preallocation. Remove the unneeded mess that is
xfs_flush_space() and just call xfs_flush_inodes() directly from
xfs_iomap_write_delay().

Also, change the location of the retry label to avoid trying to do EOF
preallocation because we don't want to do that at ENOSPC. This enables us to
remove the BMAPI_SYNC flag as it is no longer used.

Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>

authored by

Dave Chinner and committed by
Christoph Hellwig
8de2bf93 153fec43

+16 -48
+15 -46
fs/xfs/xfs_iomap.c
··· 338 } 339 340 STATIC int 341 - xfs_flush_space( 342 - xfs_inode_t *ip, 343 - int *fsynced, 344 - int *ioflags) 345 - { 346 - switch (*fsynced) { 347 - case 0: 348 - if (ip->i_delayed_blks) { 349 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 350 - delay(1); 351 - xfs_ilock(ip, XFS_ILOCK_EXCL); 352 - *fsynced = 1; 353 - } else { 354 - *ioflags |= BMAPI_SYNC; 355 - *fsynced = 2; 356 - } 357 - return 0; 358 - case 1: 359 - *fsynced = 2; 360 - *ioflags |= BMAPI_SYNC; 361 - return 0; 362 - case 2: 363 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 364 - xfs_flush_inodes(ip); 365 - xfs_ilock(ip, XFS_ILOCK_EXCL); 366 - *fsynced = 3; 367 - return 0; 368 - } 369 - return 1; 370 - } 371 - 372 - STATIC int 373 xfs_cmn_err_fsblock_zero( 374 xfs_inode_t *ip, 375 xfs_bmbt_irec_t *imap) ··· 506 } 507 508 /* 509 - * If the caller is doing a write at the end of the file, 510 - * then extend the allocation out to the file system's write 511 - * iosize. We clean up any extra space left over when the 512 - * file is closed in xfs_inactive(). 513 - * 514 - * For sync writes, we are flushing delayed allocate space to 515 - * try to make additional space available for allocation near 516 - * the filesystem full boundary - preallocation hurts in that 517 - * situation, of course. 518 */ 519 STATIC int 520 xfs_iomap_eof_want_preallocate( ··· 527 int n, error, imaps; 528 529 *prealloc = 0; 530 - if ((ioflag & BMAPI_SYNC) || (offset + count) <= ip->i_size) 531 return 0; 532 533 /* ··· 573 xfs_extlen_t extsz; 574 int nimaps; 575 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 576 - int prealloc, fsynced = 0; 577 int error; 578 579 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ··· 589 extsz = xfs_get_extsz_hint(ip); 590 offset_fsb = XFS_B_TO_FSBT(mp, offset); 591 592 - retry: 593 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 594 ioflag, imap, XFS_WRITE_IMAPS, &prealloc); 595 if (error) 596 return error; 597 598 if (prealloc) { 599 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 600 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); ··· 621 622 /* 623 * If bmapi returned us nothing, and if we didn't get back EDQUOT, 624 - * then we must have run out of space - flush delalloc, and retry.. 625 */ 626 if (nimaps == 0) { 627 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 628 ip, offset, count); 629 - if (xfs_flush_space(ip, &fsynced, &ioflag)) 630 return XFS_ERROR(ENOSPC); 631 632 error = 0; 633 goto retry; 634 } 635
··· 338 } 339 340 STATIC int 341 xfs_cmn_err_fsblock_zero( 342 xfs_inode_t *ip, 343 xfs_bmbt_irec_t *imap) ··· 538 } 539 540 /* 541 + * If the caller is doing a write at the end of the file, then extend the 542 + * allocation out to the file system's write iosize. We clean up any extra 543 + * space left over when the file is closed in xfs_inactive(). 544 */ 545 STATIC int 546 xfs_iomap_eof_want_preallocate( ··· 565 int n, error, imaps; 566 567 *prealloc = 0; 568 + if ((offset + count) <= ip->i_size) 569 return 0; 570 571 /* ··· 611 xfs_extlen_t extsz; 612 int nimaps; 613 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; 614 + int prealloc, flushed = 0; 615 int error; 616 617 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ··· 627 extsz = xfs_get_extsz_hint(ip); 628 offset_fsb = XFS_B_TO_FSBT(mp, offset); 629 630 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 631 ioflag, imap, XFS_WRITE_IMAPS, &prealloc); 632 if (error) 633 return error; 634 635 + retry: 636 if (prealloc) { 637 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 638 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); ··· 659 660 /* 661 * If bmapi returned us nothing, and if we didn't get back EDQUOT, 662 + * then we must have run out of space - flush all other inodes with 663 + * delalloc blocks and retry without EOF preallocation. 664 */ 665 if (nimaps == 0) { 666 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 667 ip, offset, count); 668 + if (flushed) 669 return XFS_ERROR(ENOSPC); 670 671 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 672 + xfs_flush_inodes(ip); 673 + xfs_ilock(ip, XFS_ILOCK_EXCL); 674 + 675 + flushed = 1; 676 error = 0; 677 + prealloc = 0; 678 goto retry; 679 } 680
+1 -2
fs/xfs/xfs_iomap.h
··· 40 BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ 41 BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ 42 BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ 43 - BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */ 44 - BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */ 45 } bmapi_flags_t; 46 47
··· 40 BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ 41 BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ 42 BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ 43 + BMAPI_TRYLOCK = (1 << 7), /* non-blocking request */ 44 } bmapi_flags_t; 45 46