Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vfs: add hooks for ext4's delayed allocation support

Export mpage_bio_submit() and __mpage_writepage() for the benefit of
ext4's delayed allocation support. Also change __block_write_full_page
so that if buffers that have the BH_Delay flag set it will call
get_block() to get the physical block allocated, just as in the
!BH_Mapped case.

Signed-off-by: Alex Tomas <alex@clusterfs.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>

authored by

Alex Tomas and committed by
Theodore Ts'o
29a814d2 87c89c23

+20 -11
+5 -2
fs/buffer.c
··· 1691 1691 */ 1692 1692 clear_buffer_dirty(bh); 1693 1693 set_buffer_uptodate(bh); 1694 - } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { 1694 + } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && 1695 + buffer_dirty(bh)) { 1695 1696 WARN_ON(bh->b_size != blocksize); 1696 1697 err = get_block(inode, block, bh, 1); 1697 1698 if (err) 1698 1699 goto recover; 1700 + clear_buffer_delay(bh); 1699 1701 if (buffer_new(bh)) { 1700 1702 /* blockdev mappings never come here */ 1701 1703 clear_buffer_new(bh); ··· 1776 1774 bh = head; 1777 1775 /* Recovery: lock and submit the mapped buffers */ 1778 1776 do { 1779 - if (buffer_mapped(bh) && buffer_dirty(bh)) { 1777 + if (buffer_mapped(bh) && buffer_dirty(bh) && 1778 + !buffer_delay(bh)) { 1780 1779 lock_buffer(bh); 1781 1780 mark_buffer_async_write(bh); 1782 1781 } else {
+5 -9
fs/mpage.c
··· 82 82 bio_put(bio); 83 83 } 84 84 85 - static struct bio *mpage_bio_submit(int rw, struct bio *bio) 85 + struct bio *mpage_bio_submit(int rw, struct bio *bio) 86 86 { 87 87 bio->bi_end_io = mpage_end_io_read; 88 88 if (rw == WRITE) ··· 90 90 submit_bio(rw, bio); 91 91 return NULL; 92 92 } 93 + EXPORT_SYMBOL(mpage_bio_submit); 93 94 94 95 static struct bio * 95 96 mpage_alloc(struct block_device *bdev, ··· 436 435 * written, so it can intelligently allocate a suitably-sized BIO. For now, 437 436 * just allocate full-size (16-page) BIOs. 438 437 */ 439 - struct mpage_data { 440 - struct bio *bio; 441 - sector_t last_block_in_bio; 442 - get_block_t *get_block; 443 - unsigned use_writepage; 444 - }; 445 438 446 - static int __mpage_writepage(struct page *page, struct writeback_control *wbc, 447 - void *data) 439 + int __mpage_writepage(struct page *page, struct writeback_control *wbc, 440 + void *data) 448 441 { 449 442 struct mpage_data *mpd = data; 450 443 struct bio *bio = mpd->bio; ··· 646 651 mpd->bio = bio; 647 652 return ret; 648 653 } 654 + EXPORT_SYMBOL(__mpage_writepage); 649 655 650 656 /** 651 657 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
+10
include/linux/mpage.h
··· 11 11 */ 12 12 #ifdef CONFIG_BLOCK 13 13 14 + struct mpage_data { 15 + struct bio *bio; 16 + sector_t last_block_in_bio; 17 + get_block_t *get_block; 18 + unsigned use_writepage; 19 + }; 20 + 14 21 struct writeback_control; 15 22 23 + struct bio *mpage_bio_submit(int rw, struct bio *bio); 16 24 int mpage_readpages(struct address_space *mapping, struct list_head *pages, 17 25 unsigned nr_pages, get_block_t get_block); 18 26 int mpage_readpage(struct page *page, get_block_t get_block); 27 + int __mpage_writepage(struct page *page, struct writeback_control *wbc, 28 + void *data); 19 29 int mpage_writepages(struct address_space *mapping, 20 30 struct writeback_control *wbc, get_block_t get_block); 21 31 int mpage_writepage(struct page *page, get_block_t *get_block,