Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ext4: refactor duplicated block placement code

I found that ext4_ext_find_goal() and ext4_find_near()
share the same code for returning a coloured start block
based on i_block_group.

We can refactor this into a common function so that they
don't diverge in the future.

Thanks to adilger for suggesting the new function name.

Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>

authored by

Eric Sandeen and committed by
Theodore Ts'o
f86186b4 dae1e52c

+51 -63
+48
fs/ext4/balloc.c
··· 620 620 621 621 } 622 622 623 + /** 624 + * ext4_inode_to_goal_block - return a hint for block allocation 625 + * @inode: inode for block allocation 626 + * 627 + * Return the ideal location to start allocating blocks for a 628 + * newly created inode. 629 + */ 630 + ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode) 631 + { 632 + struct ext4_inode_info *ei = EXT4_I(inode); 633 + ext4_group_t block_group; 634 + ext4_grpblk_t colour; 635 + int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 636 + ext4_fsblk_t bg_start; 637 + ext4_fsblk_t last_block; 638 + 639 + block_group = ei->i_block_group; 640 + if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 641 + /* 642 + * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 643 + * block groups per flexgroup, reserve the first block 644 + * group for directories and special files. Regular 645 + * files will start at the second block group. This 646 + * tends to speed up directory access and improves 647 + * fsck times. 648 + */ 649 + block_group &= ~(flex_size-1); 650 + if (S_ISREG(inode->i_mode)) 651 + block_group++; 652 + } 653 + bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 654 + last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 655 + 656 + /* 657 + * If we are doing delayed allocation, we don't need take 658 + * colour into account. 659 + */ 660 + if (test_opt(inode->i_sb, DELALLOC)) 661 + return bg_start; 662 + 663 + if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 664 + colour = (current->pid % 16) * 665 + (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 666 + else 667 + colour = (current->pid % 16) * ((last_block - bg_start) / 16); 668 + return bg_start + colour; 669 + } 670 +
+1
fs/ext4/ext4.h
··· 1743 1743 struct ext4_group_desc *desc); 1744 1744 #define ext4_free_blocks_after_init(sb, group, desc) \ 1745 1745 ext4_init_block_bitmap(sb, NULL, group, desc) 1746 + ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); 1746 1747 1747 1748 /* dir.c */ 1748 1749 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
+1 -36
fs/ext4/extents.c
··· 114 114 struct ext4_ext_path *path, 115 115 ext4_lblk_t block) 116 116 { 117 - struct ext4_inode_info *ei = EXT4_I(inode); 118 - ext4_fsblk_t bg_start; 119 - ext4_fsblk_t last_block; 120 - ext4_grpblk_t colour; 121 - ext4_group_t block_group; 122 - int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 123 117 int depth; 124 118 125 119 if (path) { ··· 155 161 } 156 162 157 163 /* OK. use inode's group */ 158 - block_group = ei->i_block_group; 159 - if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 160 - /* 161 - * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 162 - * block groups per flexgroup, reserve the first block 163 - * group for directories and special files. Regular 164 - * files will start at the second block group. This 165 - * tends to speed up directory access and improves 166 - * fsck times. 167 - */ 168 - block_group &= ~(flex_size-1); 169 - if (S_ISREG(inode->i_mode)) 170 - block_group++; 171 - } 172 - bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 173 - last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 174 - 175 - /* 176 - * If we are doing delayed allocation, we don't need take 177 - * colour into account. 178 - */ 179 - if (test_opt(inode->i_sb, DELALLOC)) 180 - return bg_start; 181 - 182 - if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 183 - colour = (current->pid % 16) * 184 - (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 185 - else 186 - colour = (current->pid % 16) * ((last_block - bg_start) / 16); 187 - return bg_start + colour + block; 164 + return ext4_inode_to_goal_block(inode); 188 165 } 189 166 190 167 /*
+1 -27
fs/ext4/indirect.c
··· 207 207 struct ext4_inode_info *ei = EXT4_I(inode); 208 208 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 209 209 __le32 *p; 210 - ext4_fsblk_t bg_start; 211 - ext4_fsblk_t last_block; 212 - ext4_grpblk_t colour; 213 - ext4_group_t block_group; 214 - int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb)); 215 210 216 211 /* Try to find previous block */ 217 212 for (p = ind->p - 1; p >= start; p--) { ··· 222 227 * It is going to be referred to from the inode itself? OK, just put it 223 228 * into the same cylinder group then. 224 229 */ 225 - block_group = ei->i_block_group; 226 - if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 227 - block_group &= ~(flex_size-1); 228 - if (S_ISREG(inode->i_mode)) 229 - block_group++; 230 - } 231 - bg_start = ext4_group_first_block_no(inode->i_sb, block_group); 232 - last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 233 - 234 - /* 235 - * If we are doing delayed allocation, we don't need take 236 - * colour into account. 237 - */ 238 - if (test_opt(inode->i_sb, DELALLOC)) 239 - return bg_start; 240 - 241 - if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 242 - colour = (current->pid % 16) * 243 - (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 244 - else 245 - colour = (current->pid % 16) * ((last_block - bg_start) / 16); 246 - return bg_start + colour; 230 + return ext4_inode_to_goal_block(inode); 247 231 } 248 232 249 233 /**