Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
ext4: add missing ext4_journal_stop()
ext4: ext4_find_next_zero_bit needs an aligned address on some arch
ext4: set EXT4_EXTENTS_FL only for directory and regular files
ext4: Don't mark filesystem error if fallocate fails
ext4: Fix BUG when writing to an unitialized extent
ext4: Don't use ext4_dec_count() if not needed
ext4: modify block allocation algorithm for the last group
ext4: Don't claim block from group which has corrupt bitmap
ext4: Get journal write access before modifying the extent tree
ext4: Fix memory and buffer head leak in callers to ext4_ext_find_extent()
ext4: Don't leave behind a half-created inode if ext4_mkdir() fails
ext4: Fix kernel BUG at fs/ext4/mballoc.c:910!
ext4: Fix locking hierarchy violation in ext4_fallocate()
Remove incorrect BKL comments in ext4

+177 -67
+1 -1
fs/ext4/dir.c
··· 46 46 #ifdef CONFIG_COMPAT 47 47 .compat_ioctl = ext4_compat_ioctl, 48 48 #endif 49 - .fsync = ext4_sync_file, /* BKL held */ 49 + .fsync = ext4_sync_file, 50 50 .release = ext4_release_dir, 51 51 }; 52 52
+40 -19
fs/ext4/extents.c
··· 148 148 { 149 149 struct ext4_inode_info *ei = EXT4_I(inode); 150 150 ext4_fsblk_t bg_start; 151 + ext4_fsblk_t last_block; 151 152 ext4_grpblk_t colour; 152 153 int depth; 153 154 ··· 170 169 /* OK. use inode's group */ 171 170 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) + 172 171 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block); 173 - colour = (current->pid % 16) * 172 + last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 173 + 174 + if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 175 + colour = (current->pid % 16) * 174 176 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 177 + else 178 + colour = (current->pid % 16) * ((last_block - bg_start) / 16); 175 179 return bg_start + colour + block; 176 180 } 177 181 ··· 355 349 #define ext4_ext_show_leaf(inode,path) 356 350 #endif 357 351 358 - static void ext4_ext_drop_refs(struct ext4_ext_path *path) 352 + void ext4_ext_drop_refs(struct ext4_ext_path *path) 359 353 { 360 354 int depth = path->p_depth; 361 355 int i; ··· 2174 2168 newblock = iblock - ee_block + ext_pblock(ex); 2175 2169 ex2 = ex; 2176 2170 2171 + err = ext4_ext_get_access(handle, inode, path + depth); 2172 + if (err) 2173 + goto out; 2174 + 2177 2175 /* ex1: ee_block to iblock - 1 : uninitialized */ 2178 2176 if (iblock > ee_block) { 2179 2177 ex1 = ex; ··· 2210 2200 newdepth = ext_depth(inode); 2211 2201 if (newdepth != depth) { 2212 2202 depth = newdepth; 2213 - path = ext4_ext_find_extent(inode, iblock, NULL); 2203 + ext4_ext_drop_refs(path); 2204 + path = ext4_ext_find_extent(inode, iblock, path); 2214 2205 if (IS_ERR(path)) { 2215 2206 err = PTR_ERR(path); 2216 - path = NULL; 2217 2207 goto out; 2218 2208 } 2219 2209 eh = path[depth].p_hdr; 2220 2210 ex = path[depth].p_ext; 2221 2211 if (ex2 != &newex) 2222 2212 ex2 = ex; 2213 + 2214 + err = ext4_ext_get_access(handle, inode, path + depth); 2215 + if (err) 2216 + goto out; 2223 2217 } 2224 2218 allocated = max_blocks; 2225 2219 } ··· 2244 2230 ex2->ee_len = cpu_to_le16(allocated); 2245 2231 if (ex2 != ex) 2246 2232 goto insert; 2247 - err = ext4_ext_get_access(handle, inode, path + depth); 2248 - if (err) 2249 - goto out; 2250 2233 /* 2251 2234 * New (initialized) extent starts from the first block 2252 2235 * in the current extent. i.e., ex2 == ex ··· 2287 2276 } 2288 2277 2289 2278 /* 2279 + * Block allocation/map/preallocation routine for extents based files 2280 + * 2281 + * 2290 2282 * Need to be called with 2291 2283 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 2292 2284 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 2285 + * 2286 + * return > 0, number of of blocks already mapped/allocated 2287 + * if create == 0 and these are pre-allocated blocks 2288 + * buffer head is unmapped 2289 + * otherwise blocks are mapped 2290 + * 2291 + * return = 0, if plain look up failed (blocks have not been allocated) 2292 + * buffer head is unmapped 2293 + * 2294 + * return < 0, error case. 2293 2295 */ 2294 2296 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 2295 2297 ext4_lblk_t iblock, ··· 2647 2623 * modify 1 super block, 1 block bitmap and 1 group descriptor. 2648 2624 */ 2649 2625 credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3; 2650 - down_write((&EXT4_I(inode)->i_data_sem)); 2626 + mutex_lock(&inode->i_mutex); 2651 2627 retry: 2652 2628 while (ret >= 0 && ret < max_blocks) { 2653 2629 block = block + ret; ··· 2658 2634 break; 2659 2635 } 2660 2636 2661 - ret = ext4_ext_get_blocks(handle, inode, block, 2637 + ret = ext4_get_blocks_wrap(handle, inode, block, 2662 2638 max_blocks, &map_bh, 2663 2639 EXT4_CREATE_UNINITIALIZED_EXT, 0); 2664 - WARN_ON(ret <= 0); 2665 2640 if (ret <= 0) { 2666 - ext4_error(inode->i_sb, "ext4_fallocate", 2667 - "ext4_ext_get_blocks returned error: " 2668 - "inode#%lu, block=%u, max_blocks=%lu", 2641 + #ifdef EXT4FS_DEBUG 2642 + WARN_ON(ret <= 0); 2643 + printk(KERN_ERR "%s: ext4_ext_get_blocks " 2644 + "returned error inode#%lu, block=%u, " 2645 + "max_blocks=%lu", __func__, 2669 2646 inode->i_ino, block, max_blocks); 2670 - ret = -EIO; 2647 + #endif 2671 2648 ext4_mark_inode_dirty(handle, inode); 2672 2649 ret2 = ext4_journal_stop(handle); 2673 2650 break; ··· 2705 2680 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2706 2681 goto retry; 2707 2682 2708 - up_write((&EXT4_I(inode)->i_data_sem)); 2709 2683 /* 2710 2684 * Time to update the file size. 2711 2685 * Update only when preallocation was requested beyond the file size. ··· 2716 2692 * if no error, we assume preallocation succeeded 2717 2693 * completely 2718 2694 */ 2719 - mutex_lock(&inode->i_mutex); 2720 2695 i_size_write(inode, offset + len); 2721 2696 EXT4_I(inode)->i_disksize = i_size_read(inode); 2722 - mutex_unlock(&inode->i_mutex); 2723 2697 } else if (ret < 0 && nblocks) { 2724 2698 /* Handle partial allocation scenario */ 2725 2699 loff_t newsize; 2726 2700 2727 - mutex_lock(&inode->i_mutex); 2728 2701 newsize = (nblocks << blkbits) + i_size_read(inode); 2729 2702 i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits)); 2730 2703 EXT4_I(inode)->i_disksize = i_size_read(inode); 2731 - mutex_unlock(&inode->i_mutex); 2732 2704 } 2733 2705 } 2734 2706 2707 + mutex_unlock(&inode->i_mutex); 2735 2708 return ret > 0 ? ret2 : ret; 2736 2709 }
+15 -7
fs/ext4/ialloc.c
··· 702 702 ei->i_dir_start_lookup = 0; 703 703 ei->i_disksize = 0; 704 704 705 - ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL; 705 + /* 706 + * Don't inherit extent flag from directory. We set extent flag on 707 + * newly created directory and file only if -o extent mount option is 708 + * specified 709 + */ 710 + ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL); 706 711 if (S_ISLNK(mode)) 707 712 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL); 708 713 /* dirsync only applies to directories */ ··· 750 745 goto fail_free_drop; 751 746 } 752 747 if (test_opt(sb, EXTENTS)) { 753 - EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 754 - ext4_ext_tree_init(handle, inode); 755 - err = ext4_update_incompat_feature(handle, sb, 756 - EXT4_FEATURE_INCOMPAT_EXTENTS); 757 - if (err) 758 - goto fail; 748 + /* set extent flag only for directory and file */ 749 + if (S_ISDIR(mode) || S_ISREG(mode)) { 750 + EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 751 + ext4_ext_tree_init(handle, inode); 752 + err = ext4_update_incompat_feature(handle, sb, 753 + EXT4_FEATURE_INCOMPAT_EXTENTS); 754 + if (err) 755 + goto fail; 756 + } 759 757 } 760 758 761 759 ext4_debug("allocating inode %lu\n", inode->i_ino);
+51 -5
fs/ext4/inode.c
··· 403 403 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; 404 404 __le32 *p; 405 405 ext4_fsblk_t bg_start; 406 + ext4_fsblk_t last_block; 406 407 ext4_grpblk_t colour; 407 408 408 409 /* Try to find previous block */ ··· 421 420 * into the same cylinder group then. 422 421 */ 423 422 bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); 424 - colour = (current->pid % 16) * 423 + last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 424 + 425 + if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 426 + colour = (current->pid % 16) * 425 427 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 428 + else 429 + colour = (current->pid % 16) * ((last_block - bg_start) / 16); 426 430 return bg_start + colour; 427 431 } 428 432 ··· 774 768 * 775 769 * `handle' can be NULL if create == 0. 776 770 * 777 - * The BKL may not be held on entry here. Be sure to take it early. 778 771 * return > 0, # of blocks mapped or allocated. 779 772 * return = 0, if plain lookup failed. 780 773 * return < 0, error case. ··· 908 903 */ 909 904 #define DIO_CREDITS 25 910 905 906 + 907 + /* 908 + * 909 + * 910 + * ext4_ext4 get_block() wrapper function 911 + * It will do a look up first, and returns if the blocks already mapped. 912 + * Otherwise it takes the write lock of the i_data_sem and allocate blocks 913 + * and store the allocated blocks in the result buffer head and mark it 914 + * mapped. 915 + * 916 + * If file type is extents based, it will call ext4_ext_get_blocks(), 917 + * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping 918 + * based files 919 + * 920 + * On success, it returns the number of blocks being mapped or allocate. 921 + * if create==0 and the blocks are pre-allocated and uninitialized block, 922 + * the result buffer head is unmapped. If the create ==1, it will make sure 923 + * the buffer head is mapped. 924 + * 925 + * It returns 0 if plain look up failed (blocks have not been allocated), in 926 + * that casem, buffer head is unmapped 927 + * 928 + * It returns the error in case of allocation failure. 929 + */ 911 930 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 912 931 unsigned long max_blocks, struct buffer_head *bh, 913 932 int create, int extend_disksize) 914 933 { 915 934 int retval; 935 + 936 + clear_buffer_mapped(bh); 937 + 916 938 /* 917 939 * Try to see if we can get the block without requesting 918 940 * for new file system block. ··· 953 921 inode, block, max_blocks, bh, 0, 0); 954 922 } 955 923 up_read((&EXT4_I(inode)->i_data_sem)); 956 - if (!create || (retval > 0)) 924 + 925 + /* If it is only a block(s) look up */ 926 + if (!create) 957 927 return retval; 958 928 959 929 /* 960 - * We need to allocate new blocks which will result 961 - * in i_data update 930 + * Returns if the blocks have already allocated 931 + * 932 + * Note that if blocks have been preallocated 933 + * ext4_ext_get_block() returns th create = 0 934 + * with buffer head unmapped. 935 + */ 936 + if (retval > 0 && buffer_mapped(bh)) 937 + return retval; 938 + 939 + /* 940 + * New blocks allocate and/or writing to uninitialized extent 941 + * will possibly result in updating i_data, so we take 942 + * the write lock of i_data_sem, and call get_blocks() 943 + * with create == 1 flag. 962 944 */ 963 945 down_write((&EXT4_I(inode)->i_data_sem)); 964 946 /*
+56 -24
fs/ext4/mballoc.c
··· 627 627 return block; 628 628 } 629 629 630 + static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 631 + { 630 632 #if BITS_PER_LONG == 64 631 - #define mb_correct_addr_and_bit(bit, addr) \ 632 - { \ 633 - bit += ((unsigned long) addr & 7UL) << 3; \ 634 - addr = (void *) ((unsigned long) addr & ~7UL); \ 635 - } 633 + *bit += ((unsigned long) addr & 7UL) << 3; 634 + addr = (void *) ((unsigned long) addr & ~7UL); 636 635 #elif BITS_PER_LONG == 32 637 - #define mb_correct_addr_and_bit(bit, addr) \ 638 - { \ 639 - bit += ((unsigned long) addr & 3UL) << 3; \ 640 - addr = (void *) ((unsigned long) addr & ~3UL); \ 641 - } 636 + *bit += ((unsigned long) addr & 3UL) << 3; 637 + addr = (void *) ((unsigned long) addr & ~3UL); 642 638 #else 643 639 #error "how many bits you are?!" 644 640 #endif 641 + return addr; 642 + } 645 643 646 644 static inline int mb_test_bit(int bit, void *addr) 647 645 { ··· 647 649 * ext4_test_bit on architecture like powerpc 648 650 * needs unsigned long aligned address 649 651 */ 650 - mb_correct_addr_and_bit(bit, addr); 652 + addr = mb_correct_addr_and_bit(&bit, addr); 651 653 return ext4_test_bit(bit, addr); 652 654 } 653 655 654 656 static inline void mb_set_bit(int bit, void *addr) 655 657 { 656 - mb_correct_addr_and_bit(bit, addr); 658 + addr = mb_correct_addr_and_bit(&bit, addr); 657 659 ext4_set_bit(bit, addr); 658 660 } 659 661 660 662 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr) 661 663 { 662 - mb_correct_addr_and_bit(bit, addr); 664 + addr = mb_correct_addr_and_bit(&bit, addr); 663 665 ext4_set_bit_atomic(lock, bit, addr); 664 666 } 665 667 666 668 static inline void mb_clear_bit(int bit, void *addr) 667 669 { 668 - mb_correct_addr_and_bit(bit, addr); 670 + addr = mb_correct_addr_and_bit(&bit, addr); 669 671 ext4_clear_bit(bit, addr); 670 672 } 671 673 672 674 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr) 673 675 { 674 - mb_correct_addr_and_bit(bit, addr); 676 + addr = mb_correct_addr_and_bit(&bit, addr); 675 677 ext4_clear_bit_atomic(lock, bit, addr); 678 + } 679 + 680 + static inline int mb_find_next_zero_bit(void *addr, int max, int start) 681 + { 682 + int fix = 0; 683 + addr = mb_correct_addr_and_bit(&fix, addr); 684 + max += fix; 685 + start += fix; 686 + 687 + return ext4_find_next_zero_bit(addr, max, start) - fix; 688 + } 689 + 690 + static inline int mb_find_next_bit(void *addr, int max, int start) 691 + { 692 + int fix = 0; 693 + addr = mb_correct_addr_and_bit(&fix, addr); 694 + max += fix; 695 + start += fix; 696 + 697 + return ext4_find_next_bit(addr, max, start) - fix; 676 698 } 677 699 678 700 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) ··· 924 906 unsigned short chunk; 925 907 unsigned short border; 926 908 927 - BUG_ON(len >= EXT4_BLOCKS_PER_GROUP(sb)); 909 + BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); 928 910 929 911 border = 2 << sb->s_blocksize_bits; 930 912 ··· 964 946 965 947 /* initialize buddy from bitmap which is aggregation 966 948 * of on-disk bitmap and preallocations */ 967 - i = ext4_find_next_zero_bit(bitmap, max, 0); 949 + i = mb_find_next_zero_bit(bitmap, max, 0); 968 950 grp->bb_first_free = i; 969 951 while (i < max) { 970 952 fragments++; 971 953 first = i; 972 - i = ext4_find_next_bit(bitmap, max, i); 954 + i = mb_find_next_bit(bitmap, max, i); 973 955 len = i - first; 974 956 free += len; 975 957 if (len > 1) ··· 977 959 else 978 960 grp->bb_counters[0]++; 979 961 if (i < max) 980 - i = ext4_find_next_zero_bit(bitmap, max, i); 962 + i = mb_find_next_zero_bit(bitmap, max, i); 981 963 } 982 964 grp->bb_fragments = fragments; 983 965 ··· 985 967 ext4_error(sb, __FUNCTION__, 986 968 "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n", 987 969 group, free, grp->bb_free); 970 + /* 971 + * If we intent to continue, we consider group descritor 972 + * corrupt and update bb_free using bitmap value 973 + */ 988 974 grp->bb_free = free; 989 975 } 990 976 ··· 1800 1778 buddy = mb_find_buddy(e4b, i, &max); 1801 1779 BUG_ON(buddy == NULL); 1802 1780 1803 - k = ext4_find_next_zero_bit(buddy, max, 0); 1781 + k = mb_find_next_zero_bit(buddy, max, 0); 1804 1782 BUG_ON(k >= max); 1805 1783 1806 1784 ac->ac_found++; ··· 1840 1818 i = e4b->bd_info->bb_first_free; 1841 1819 1842 1820 while (free && ac->ac_status == AC_STATUS_CONTINUE) { 1843 - i = ext4_find_next_zero_bit(bitmap, 1821 + i = mb_find_next_zero_bit(bitmap, 1844 1822 EXT4_BLOCKS_PER_GROUP(sb), i); 1845 1823 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) { 1846 1824 /* 1847 - * IF we corrupt the bitmap we won't find any 1825 + * IF we have corrupt bitmap, we won't find any 1848 1826 * free blocks even though group info says we 1849 1827 * we have free blocks 1850 1828 */ ··· 1860 1838 ext4_error(sb, __FUNCTION__, "%d free blocks as per " 1861 1839 "group info. But got %d blocks\n", 1862 1840 free, ex.fe_len); 1841 + /* 1842 + * The number of free blocks differs. This mostly 1843 + * indicate that the bitmap is corrupt. So exit 1844 + * without claiming the space. 1845 + */ 1846 + break; 1863 1847 } 1864 1848 1865 1849 ext4_mb_measure_extent(ac, &ex, e4b); ··· 3768 3740 } 3769 3741 3770 3742 while (bit < end) { 3771 - bit = ext4_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3743 + bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 3772 3744 if (bit >= end) 3773 3745 break; 3774 - next = ext4_find_next_bit(bitmap_bh->b_data, end, bit); 3746 + next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 3775 3747 if (next > end) 3776 3748 next = end; 3777 3749 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + ··· 3799 3771 (unsigned long) pa->pa_len); 3800 3772 ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n", 3801 3773 free, pa->pa_free); 3774 + /* 3775 + * pa is already deleted so we use the value obtained 3776 + * from the bitmap and continue. 3777 + */ 3802 3778 } 3803 3779 atomic_add(free, &sbi->s_mb_discarded); 3804 3780 if (ac)
+5
fs/ext4/migrate.c
··· 43 43 44 44 if (IS_ERR(path)) { 45 45 retval = PTR_ERR(path); 46 + path = NULL; 46 47 goto err_out; 47 48 } 48 49 ··· 75 74 } 76 75 retval = ext4_ext_insert_extent(handle, inode, path, &newext); 77 76 err_out: 77 + if (path) { 78 + ext4_ext_drop_refs(path); 79 + kfree(path); 80 + } 78 81 lb->first_pblock = 0; 79 82 return retval; 80 83 }
+7 -11
fs/ext4/namei.c
··· 1804 1804 inode->i_fop = &ext4_dir_operations; 1805 1805 inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize; 1806 1806 dir_block = ext4_bread (handle, inode, 0, 1, &err); 1807 - if (!dir_block) { 1808 - ext4_dec_count(handle, inode); /* is this nlink == 0? */ 1809 - ext4_mark_inode_dirty(handle, inode); 1810 - iput (inode); 1811 - goto out_stop; 1812 - } 1807 + if (!dir_block) 1808 + goto out_clear_inode; 1813 1809 BUFFER_TRACE(dir_block, "get_write_access"); 1814 1810 ext4_journal_get_write_access(handle, dir_block); 1815 1811 de = (struct ext4_dir_entry_2 *) dir_block->b_data; ··· 1828 1832 ext4_mark_inode_dirty(handle, inode); 1829 1833 err = ext4_add_entry (handle, dentry, inode); 1830 1834 if (err) { 1831 - inode->i_nlink = 0; 1835 + out_clear_inode: 1836 + clear_nlink(inode); 1832 1837 ext4_mark_inode_dirty(handle, inode); 1833 1838 iput (inode); 1834 1839 goto out_stop; ··· 2161 2164 dir->i_ctime = dir->i_mtime = ext4_current_time(dir); 2162 2165 ext4_update_dx_flag(dir); 2163 2166 ext4_mark_inode_dirty(handle, dir); 2164 - ext4_dec_count(handle, inode); 2167 + drop_nlink(inode); 2165 2168 if (!inode->i_nlink) 2166 2169 ext4_orphan_add(handle, inode); 2167 2170 inode->i_ctime = ext4_current_time(inode); ··· 2211 2214 err = __page_symlink(inode, symname, l, 2212 2215 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 2213 2216 if (err) { 2214 - ext4_dec_count(handle, inode); 2217 + clear_nlink(inode); 2215 2218 ext4_mark_inode_dirty(handle, inode); 2216 2219 iput (inode); 2217 2220 goto out_stop; ··· 2220 2223 inode->i_op = &ext4_fast_symlink_inode_operations; 2221 2224 memcpy((char*)&EXT4_I(inode)->i_data,symname,l); 2222 2225 inode->i_size = l-1; 2223 - EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL; 2224 2226 } 2225 2227 EXT4_I(inode)->i_disksize = inode->i_size; 2226 2228 err = ext4_add_nondir(handle, dentry, inode); ··· 2403 2407 ext4_dec_count(handle, old_dir); 2404 2408 if (new_inode) { 2405 2409 /* checked empty_dir above, can't have another parent, 2406 - * ext3_dec_count() won't work for many-linked dirs */ 2410 + * ext4_dec_count() won't work for many-linked dirs */ 2407 2411 new_inode->i_nlink = 0; 2408 2412 } else { 2409 2413 ext4_inc_count(handle, new_dir);
+1
fs/ext4/resize.c
··· 1037 1037 ext4_warning(sb, __FUNCTION__, 1038 1038 "multiple resizers run on filesystem!"); 1039 1039 unlock_super(sb); 1040 + ext4_journal_stop(handle); 1040 1041 err = -EBUSY; 1041 1042 goto exit_put; 1042 1043 }
+1
include/linux/ext4_fs_extents.h
··· 227 227 ext4_lblk_t *, ext4_fsblk_t *); 228 228 extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *, 229 229 ext4_lblk_t *, ext4_fsblk_t *); 230 + extern void ext4_ext_drop_refs(struct ext4_ext_path *); 230 231 #endif /* _LINUX_EXT4_EXTENTS */ 231 232