Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ext4: Properly initialize the buffer_head state

These struct buffer_heads are allocated on the stack (and hence are
initialized with stack garbage). They are only used to call a
get_blocks() function, so that's mostly OK, but b_state must be
initialized to be 0 so we don't have any unexpected BH_* flags set by
accident, such as BH_Unwritten or BH_Delay.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>

authored by

Aneesh Kumar K.V and committed by
Theodore Ts'o
79ffab34 9fa7eb28

+19 -3
+1
fs/ext4/extents.c
··· 3150 3150 ret = PTR_ERR(handle); 3151 3151 break; 3152 3152 } 3153 + map_bh.b_state = 0; 3153 3154 ret = ext4_get_blocks_wrap(handle, inode, block, 3154 3155 max_blocks, &map_bh, 3155 3156 EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
+14 -1
fs/ext4/inode.c
··· 2055 2055 if ((mpd->b_state & (1 << BH_Mapped)) && 2056 2056 !(mpd->b_state & (1 << BH_Delay))) 2057 2057 return 0; 2058 - new.b_state = mpd->b_state; 2058 + /* 2059 + * We need to make sure the BH_Delay flag is passed down to 2060 + * ext4_da_get_block_write(), since it calls 2061 + * ext4_get_blocks_wrap() with the EXT4_DELALLOC_RSVED flag. 2062 + * This flag causes ext4_get_blocks_wrap() to call 2063 + * ext4_da_update_reserve_space() if the passed buffer head 2064 + * has the BH_Delay flag set. In the future, once we clean up 2065 + * the interfaces to ext4_get_blocks_wrap(), we should pass in 2066 + * a separate flag which requests that the delayed allocation 2067 + * statistics should be updated, instead of depending on the 2068 + * state information getting passed down via the map_bh's 2069 + * state bitmasks plus the magic EXT4_DELALLOC_RSVED flag. 2070 + */ 2071 + new.b_state = mpd->b_state & (1 << BH_Delay); 2059 2072 new.b_blocknr = 0; 2060 2073 new.b_size = mpd->b_size; 2061 2074 next = mpd->b_blocknr;
+4 -2
fs/mpage.c
··· 379 379 struct buffer_head map_bh; 380 380 unsigned long first_logical_block = 0; 381 381 382 - clear_buffer_mapped(&map_bh); 382 + map_bh.b_state = 0; 383 + map_bh.b_size = 0; 383 384 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 384 385 struct page *page = list_entry(pages->prev, struct page, lru); 385 386 ··· 413 412 struct buffer_head map_bh; 414 413 unsigned long first_logical_block = 0; 415 414 416 - clear_buffer_mapped(&map_bh); 415 + map_bh.b_state = 0; 416 + map_bh.b_size = 0; 417 417 bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, 418 418 &map_bh, &first_logical_block, get_block); 419 419 if (bio)