Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
"Fix a number of ext4 bugs in fast_commit, inline data, and delayed
allocation.

Also fix error handling code paths in ext4_dx_readdir() and
ext4_fill_super().

Finally, avoid a grabbing a journal head in the delayed allocation
write in the common cases where we are overwriting a pre-existing
block or appending to an inode"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
ext4: recheck buffer uptodate bit under buffer lock
ext4: fix potential infinite loop in ext4_dx_readdir()
ext4: flush s_error_work before journal destroy in ext4_fill_super
ext4: fix loff_t overflow in ext4_max_bitmap_size()
ext4: fix reserved space counter leakage
ext4: limit the number of blocks in one ADD_RANGE TLV
ext4: enforce buffer head state assertion in ext4_da_map_blocks
ext4: remove extent cache entries when truncating inline data
ext4: drop unnecessary journal handle in delalloc write
ext4: factor out write end code of inline file
ext4: correct the error path of ext4_write_inline_data_end()
ext4: check and update i_disksize properly
ext4: add error checking to ext4_ext_replay_set_iblocks()

+3 -3
fs/ext4/dir.c
··· 551 551 struct dir_private_info *info = file->private_data; 552 552 struct inode *inode = file_inode(file); 553 553 struct fname *fname; 554 - int ret; 554 + int ret = 0; 555 555 556 556 if (!info) { 557 557 info = ext4_htree_create_dir_info(file, ctx->pos); ··· 599 599 info->curr_minor_hash, 600 600 &info->next_hash); 601 601 if (ret < 0) 602 - return ret; 602 + goto finished; 603 603 if (ret == 0) { 604 604 ctx->pos = ext4_get_htree_eof(file); 605 605 break; ··· 630 630 } 631 631 finished: 632 632 info->last_pos = ctx->pos; 633 - return 0; 633 + return ret < 0 ? ret : 0; 634 634 } 635 635 636 636 static int ext4_release_dir(struct inode *inode, struct file *filp)
-3
fs/ext4/ext4.h
··· 3593 3593 unsigned flags, 3594 3594 struct page **pagep, 3595 3595 void **fsdata); 3596 - extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, 3597 - unsigned len, unsigned copied, 3598 - struct page *page); 3599 3596 extern int ext4_try_add_inline_entry(handle_t *handle, 3600 3597 struct ext4_filename *fname, 3601 3598 struct inode *dir, struct inode *inode);
+14 -5
fs/ext4/extents.c
··· 5916 5916 } 5917 5917 5918 5918 /* Check if *cur is a hole and if it is, skip it */ 5919 - static void skip_hole(struct inode *inode, ext4_lblk_t *cur) 5919 + static int skip_hole(struct inode *inode, ext4_lblk_t *cur) 5920 5920 { 5921 5921 int ret; 5922 5922 struct ext4_map_blocks map; ··· 5925 5925 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; 5926 5926 5927 5927 ret = ext4_map_blocks(NULL, inode, &map, 0); 5928 + if (ret < 0) 5929 + return ret; 5928 5930 if (ret != 0) 5929 - return; 5931 + return 0; 5930 5932 *cur = *cur + map.m_len; 5933 + return 0; 5931 5934 } 5932 5935 5933 5936 /* Count number of blocks used by this inode and update i_blocks */ ··· 5979 5976 * iblocks by total number of differences found. 5980 5977 */ 5981 5978 cur = 0; 5982 - skip_hole(inode, &cur); 5979 + ret = skip_hole(inode, &cur); 5980 + if (ret < 0) 5981 + goto out; 5983 5982 path = ext4_find_extent(inode, cur, NULL, 0); 5984 5983 if (IS_ERR(path)) 5985 5984 goto out; ··· 6000 5995 } 6001 5996 cur = max(cur + 1, le32_to_cpu(ex->ee_block) + 6002 5997 ext4_ext_get_actual_len(ex)); 6003 - skip_hole(inode, &cur); 6004 - 5998 + ret = skip_hole(inode, &cur); 5999 + if (ret < 0) { 6000 + ext4_ext_drop_refs(path); 6001 + kfree(path); 6002 + break; 6003 + } 6005 6004 path2 = ext4_find_extent(inode, cur, NULL, 0); 6006 6005 if (IS_ERR(path2)) { 6007 6006 ext4_ext_drop_refs(path);
+6
fs/ext4/fast_commit.c
··· 892 892 sizeof(lrange), (u8 *)&lrange, crc)) 893 893 return -ENOSPC; 894 894 } else { 895 + unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ? 896 + EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN; 897 + 898 + /* Limit the number of blocks in one extent */ 899 + map.m_len = min(max, map.m_len); 900 + 895 901 fc_ext.fc_ino = cpu_to_le32(inode->i_ino); 896 902 ex = (struct ext4_extent *)&fc_ext.fc_ex; 897 903 ex->ee_block = cpu_to_le32(map.m_lblk);
+85 -65
fs/ext4/inline.c
··· 7 7 #include <linux/iomap.h> 8 8 #include <linux/fiemap.h> 9 9 #include <linux/iversion.h> 10 + #include <linux/backing-dev.h> 10 11 11 12 #include "ext4_jbd2.h" 12 13 #include "ext4.h" ··· 734 733 int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, 735 734 unsigned copied, struct page *page) 736 735 { 737 - int ret, no_expand; 736 + handle_t *handle = ext4_journal_current_handle(); 737 + int no_expand; 738 738 void *kaddr; 739 739 struct ext4_iloc iloc; 740 + int ret = 0, ret2; 740 741 741 - if (unlikely(copied < len)) { 742 - if (!PageUptodate(page)) { 743 - copied = 0; 742 + if (unlikely(copied < len) && !PageUptodate(page)) 743 + copied = 0; 744 + 745 + if (likely(copied)) { 746 + ret = ext4_get_inode_loc(inode, &iloc); 747 + if (ret) { 748 + unlock_page(page); 749 + put_page(page); 750 + ext4_std_error(inode->i_sb, ret); 744 751 goto out; 745 752 } 746 - } 753 + ext4_write_lock_xattr(inode, &no_expand); 754 + BUG_ON(!ext4_has_inline_data(inode)); 747 755 748 - ret = ext4_get_inode_loc(inode, &iloc); 749 - if (ret) { 750 - ext4_std_error(inode->i_sb, ret); 751 - copied = 0; 752 - goto out; 753 - } 756 + /* 757 + * ei->i_inline_off may have changed since 758 + * ext4_write_begin() called 759 + * ext4_try_to_write_inline_data() 760 + */ 761 + (void) ext4_find_inline_data_nolock(inode); 754 762 755 - ext4_write_lock_xattr(inode, &no_expand); 756 - BUG_ON(!ext4_has_inline_data(inode)); 763 + kaddr = kmap_atomic(page); 764 + ext4_write_inline_data(inode, &iloc, kaddr, pos, copied); 765 + kunmap_atomic(kaddr); 766 + SetPageUptodate(page); 767 + /* clear page dirty so that writepages wouldn't work for us. */ 768 + ClearPageDirty(page); 769 + 770 + ext4_write_unlock_xattr(inode, &no_expand); 771 + brelse(iloc.bh); 772 + 773 + /* 774 + * It's important to update i_size while still holding page 775 + * lock: page writeout could otherwise come in and zero 776 + * beyond i_size. 777 + */ 778 + ext4_update_inode_size(inode, pos + copied); 779 + } 780 + unlock_page(page); 781 + put_page(page); 757 782 758 783 /* 759 - * ei->i_inline_off may have changed since ext4_write_begin() 760 - * called ext4_try_to_write_inline_data() 784 + * Don't mark the inode dirty under page lock. First, it unnecessarily 785 + * makes the holding time of page lock longer. Second, it forces lock 786 + * ordering of page lock and transaction start for journaling 787 + * filesystems. 761 788 */ 762 - (void) ext4_find_inline_data_nolock(inode); 763 - 764 - kaddr = kmap_atomic(page); 765 - ext4_write_inline_data(inode, &iloc, kaddr, pos, len); 766 - kunmap_atomic(kaddr); 767 - SetPageUptodate(page); 768 - /* clear page dirty so that writepages wouldn't work for us. */ 769 - ClearPageDirty(page); 770 - 771 - ext4_write_unlock_xattr(inode, &no_expand); 772 - brelse(iloc.bh); 773 - mark_inode_dirty(inode); 789 + if (likely(copied)) 790 + mark_inode_dirty(inode); 774 791 out: 775 - return copied; 792 + /* 793 + * If we didn't copy as much data as expected, we need to trim back 794 + * size of xattr containing inline data. 795 + */ 796 + if (pos + len > inode->i_size && ext4_can_truncate(inode)) 797 + ext4_orphan_add(handle, inode); 798 + 799 + ret2 = ext4_journal_stop(handle); 800 + if (!ret) 801 + ret = ret2; 802 + if (pos + len > inode->i_size) { 803 + ext4_truncate_failed_write(inode); 804 + /* 805 + * If truncate failed early the inode might still be 806 + * on the orphan list; we need to make sure the inode 807 + * is removed from the orphan list in that case. 808 + */ 809 + if (inode->i_nlink) 810 + ext4_orphan_del(NULL, inode); 811 + } 812 + return ret ? ret : copied; 776 813 } 777 814 778 815 struct buffer_head * ··· 990 951 out: 991 952 brelse(iloc.bh); 992 953 return ret; 993 - } 994 - 995 - int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, 996 - unsigned len, unsigned copied, 997 - struct page *page) 998 - { 999 - int ret; 1000 - 1001 - ret = ext4_write_inline_data_end(inode, pos, len, copied, page); 1002 - if (ret < 0) { 1003 - unlock_page(page); 1004 - put_page(page); 1005 - return ret; 1006 - } 1007 - copied = ret; 1008 - 1009 - /* 1010 - * No need to use i_size_read() here, the i_size 1011 - * cannot change under us because we hold i_mutex. 1012 - * 1013 - * But it's important to update i_size while still holding page lock: 1014 - * page writeout could otherwise come in and zero beyond i_size. 1015 - */ 1016 - if (pos+copied > inode->i_size) 1017 - i_size_write(inode, pos+copied); 1018 - unlock_page(page); 1019 - put_page(page); 1020 - 1021 - /* 1022 - * Don't mark the inode dirty under page lock. First, it unnecessarily 1023 - * makes the holding time of page lock longer. Second, it forces lock 1024 - * ordering of page lock and transaction start for journaling 1025 - * filesystems. 1026 - */ 1027 - mark_inode_dirty(inode); 1028 - 1029 - return copied; 1030 954 } 1031 955 1032 956 #ifdef INLINE_DIR_DEBUG ··· 1919 1917 EXT4_I(inode)->i_disksize = i_size; 1920 1918 1921 1919 if (i_size < inline_size) { 1920 + /* 1921 + * if there's inline data to truncate and this file was 1922 + * converted to extents after that inline data was written, 1923 + * the extent status cache must be cleared to avoid leaving 1924 + * behind stale delayed allocated extent entries 1925 + */ 1926 + if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1927 + retry: 1928 + err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1929 + if (err == -ENOMEM) { 1930 + cond_resched(); 1931 + congestion_wait(BLK_RW_ASYNC, HZ/50); 1932 + goto retry; 1933 + } 1934 + if (err) 1935 + goto out_error; 1936 + } 1937 + 1922 1938 /* Clear the content in the xattr space. */ 1923 1939 if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) { 1924 1940 if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
+60 -118
fs/ext4/inode.c
··· 1284 1284 loff_t old_size = inode->i_size; 1285 1285 int ret = 0, ret2; 1286 1286 int i_size_changed = 0; 1287 - int inline_data = ext4_has_inline_data(inode); 1288 1287 bool verity = ext4_verity_in_progress(inode); 1289 1288 1290 1289 trace_ext4_write_end(inode, pos, len, copied); 1291 - if (inline_data) { 1292 - ret = ext4_write_inline_data_end(inode, pos, len, 1293 - copied, page); 1294 - if (ret < 0) { 1295 - unlock_page(page); 1296 - put_page(page); 1297 - goto errout; 1298 - } 1299 - copied = ret; 1300 - } else 1301 - copied = block_write_end(file, mapping, pos, 1302 - len, copied, page, fsdata); 1290 + 1291 + if (ext4_has_inline_data(inode)) 1292 + return ext4_write_inline_data_end(inode, pos, len, copied, page); 1293 + 1294 + copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1303 1295 /* 1304 1296 * it's important to update i_size while still holding page lock: 1305 1297 * page writeout could otherwise come in and zero beyond i_size. ··· 1312 1320 * ordering of page lock and transaction start for journaling 1313 1321 * filesystems. 1314 1322 */ 1315 - if (i_size_changed || inline_data) 1323 + if (i_size_changed) 1316 1324 ret = ext4_mark_inode_dirty(handle, inode); 1317 1325 1318 1326 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) ··· 1321 1329 * inode->i_size. So truncate them 1322 1330 */ 1323 1331 ext4_orphan_add(handle, inode); 1324 - errout: 1332 + 1325 1333 ret2 = ext4_journal_stop(handle); 1326 1334 if (!ret) 1327 1335 ret = ret2; ··· 1387 1395 int partial = 0; 1388 1396 unsigned from, to; 1389 1397 int size_changed = 0; 1390 - int inline_data = ext4_has_inline_data(inode); 1391 1398 bool verity = ext4_verity_in_progress(inode); 1392 1399 1393 1400 trace_ext4_journalled_write_end(inode, pos, len, copied); ··· 1395 1404 1396 1405 BUG_ON(!ext4_handle_valid(handle)); 1397 1406 1398 - if (inline_data) { 1399 - ret = ext4_write_inline_data_end(inode, pos, len, 1400 - copied, page); 1401 - if (ret < 0) { 1402 - unlock_page(page); 1403 - put_page(page); 1404 - goto errout; 1405 - } 1406 - copied = ret; 1407 - } else if (unlikely(copied < len) && !PageUptodate(page)) { 1407 + if (ext4_has_inline_data(inode)) 1408 + return ext4_write_inline_data_end(inode, pos, len, copied, page); 1409 + 1410 + if (unlikely(copied < len) && !PageUptodate(page)) { 1408 1411 copied = 0; 1409 1412 ext4_journalled_zero_new_buffers(handle, inode, page, from, to); 1410 1413 } else { ··· 1421 1436 if (old_size < pos && !verity) 1422 1437 pagecache_isize_extended(inode, old_size, pos); 1423 1438 1424 - if (size_changed || inline_data) { 1439 + if (size_changed) { 1425 1440 ret2 = ext4_mark_inode_dirty(handle, inode); 1426 1441 if (!ret) 1427 1442 ret = ret2; ··· 1434 1449 */ 1435 1450 ext4_orphan_add(handle, inode); 1436 1451 1437 - errout: 1438 1452 ret2 = ext4_journal_stop(handle); 1439 1453 if (!ret) 1440 1454 ret = ret2; ··· 1628 1644 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1629 1645 int ret; 1630 1646 bool allocated = false; 1647 + bool reserved = false; 1631 1648 1632 1649 /* 1633 1650 * If the cluster containing lblk is shared with a delayed, ··· 1645 1660 ret = ext4_da_reserve_space(inode); 1646 1661 if (ret != 0) /* ENOSPC */ 1647 1662 goto errout; 1663 + reserved = true; 1648 1664 } else { /* bigalloc */ 1649 1665 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { 1650 1666 if (!ext4_es_scan_clu(inode, ··· 1658 1672 ret = ext4_da_reserve_space(inode); 1659 1673 if (ret != 0) /* ENOSPC */ 1660 1674 goto errout; 1675 + reserved = true; 1661 1676 } else { 1662 1677 allocated = true; 1663 1678 } ··· 1669 1682 } 1670 1683 1671 1684 ret = ext4_es_insert_delayed_block(inode, lblk, allocated); 1685 + if (ret && reserved) 1686 + ext4_da_release_space(inode, 1); 1672 1687 1673 1688 errout: 1674 1689 return ret; ··· 1711 1722 } 1712 1723 1713 1724 /* 1714 - * Delayed extent could be allocated by fallocate. 1715 - * So we need to check it. 1725 + * the buffer head associated with a delayed and not unwritten 1726 + * block found in the extent status cache must contain an 1727 + * invalid block number and have its BH_New and BH_Delay bits 1728 + * set, reflecting the state assigned when the block was 1729 + * initially delayed allocated 1716 1730 */ 1717 - if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1718 - map_bh(bh, inode->i_sb, invalid_block); 1719 - set_buffer_new(bh); 1720 - set_buffer_delay(bh); 1731 + if (ext4_es_is_delonly(&es)) { 1732 + BUG_ON(bh->b_blocknr != invalid_block); 1733 + BUG_ON(!buffer_new(bh)); 1734 + BUG_ON(!buffer_delay(bh)); 1721 1735 return 0; 1722 1736 } 1723 1737 ··· 2924 2932 return 0; 2925 2933 } 2926 2934 2927 - /* We always reserve for an inode update; the superblock could be there too */ 2928 - static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2929 - { 2930 - if (likely(ext4_has_feature_large_file(inode->i_sb))) 2931 - return 1; 2932 - 2933 - if (pos + len <= 0x7fffffffULL) 2934 - return 1; 2935 - 2936 - /* We might need to update the superblock to set LARGE_FILE */ 2937 - return 2; 2938 - } 2939 - 2940 2935 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2941 2936 loff_t pos, unsigned len, unsigned flags, 2942 2937 struct page **pagep, void **fsdata) ··· 2932 2953 struct page *page; 2933 2954 pgoff_t index; 2934 2955 struct inode *inode = mapping->host; 2935 - handle_t *handle; 2936 2956 2937 2957 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2938 2958 return -EIO; ··· 2957 2979 return 0; 2958 2980 } 2959 2981 2960 - /* 2961 - * grab_cache_page_write_begin() can take a long time if the 2962 - * system is thrashing due to memory pressure, or if the page 2963 - * is being written back. So grab it first before we start 2964 - * the transaction handle. This also allows us to allocate 2965 - * the page (if needed) without using GFP_NOFS. 2966 - */ 2967 - retry_grab: 2982 + retry: 2968 2983 page = grab_cache_page_write_begin(mapping, index, flags); 2969 2984 if (!page) 2970 2985 return -ENOMEM; 2971 - unlock_page(page); 2972 2986 2973 - /* 2974 - * With delayed allocation, we don't log the i_disksize update 2975 - * if there is delayed block allocation. But we still need 2976 - * to journalling the i_disksize update if writes to the end 2977 - * of file which has an already mapped buffer. 2978 - */ 2979 - retry_journal: 2980 - handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2981 - ext4_da_write_credits(inode, pos, len)); 2982 - if (IS_ERR(handle)) { 2983 - put_page(page); 2984 - return PTR_ERR(handle); 2985 - } 2986 - 2987 - lock_page(page); 2988 - if (page->mapping != mapping) { 2989 - /* The page got truncated from under us */ 2990 - unlock_page(page); 2991 - put_page(page); 2992 - ext4_journal_stop(handle); 2993 - goto retry_grab; 2994 - } 2995 2987 /* In case writeback began while the page was unlocked */ 2996 2988 wait_for_stable_page(page); 2997 2989 ··· 2973 3025 #endif 2974 3026 if (ret < 0) { 2975 3027 unlock_page(page); 2976 - ext4_journal_stop(handle); 3028 + put_page(page); 2977 3029 /* 2978 3030 * block_write_begin may have instantiated a few blocks 2979 3031 * outside i_size. Trim these off again. Don't need 2980 - * i_size_read because we hold i_mutex. 3032 + * i_size_read because we hold inode lock. 2981 3033 */ 2982 3034 if (pos + len > inode->i_size) 2983 3035 ext4_truncate_failed_write(inode); 2984 3036 2985 3037 if (ret == -ENOSPC && 2986 3038 ext4_should_retry_alloc(inode->i_sb, &retries)) 2987 - goto retry_journal; 2988 - 2989 - put_page(page); 3039 + goto retry; 2990 3040 return ret; 2991 3041 } 2992 3042 ··· 3021 3075 struct page *page, void *fsdata) 3022 3076 { 3023 3077 struct inode *inode = mapping->host; 3024 - int ret = 0, ret2; 3025 - handle_t *handle = ext4_journal_current_handle(); 3026 3078 loff_t new_i_size; 3027 3079 unsigned long start, end; 3028 3080 int write_mode = (int)(unsigned long)fsdata; ··· 3030 3086 len, copied, page, fsdata); 3031 3087 3032 3088 trace_ext4_da_write_end(inode, pos, len, copied); 3033 - start = pos & (PAGE_SIZE - 1); 3034 - end = start + copied - 1; 3035 - 3036 - /* 3037 - * generic_write_end() will run mark_inode_dirty() if i_size 3038 - * changes. So let's piggyback the i_disksize mark_inode_dirty 3039 - * into that. 3040 - */ 3041 - new_i_size = pos + copied; 3042 - if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3043 - if (ext4_has_inline_data(inode) || 3044 - ext4_da_should_update_i_disksize(page, end)) { 3045 - ext4_update_i_disksize(inode, new_i_size); 3046 - /* We need to mark inode dirty even if 3047 - * new_i_size is less that inode->i_size 3048 - * bu greater than i_disksize.(hint delalloc) 3049 - */ 3050 - ret = ext4_mark_inode_dirty(handle, inode); 3051 - } 3052 - } 3053 3089 3054 3090 if (write_mode != CONVERT_INLINE_DATA && 3055 3091 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3056 3092 ext4_has_inline_data(inode)) 3057 - ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3058 - page); 3059 - else 3060 - ret2 = generic_write_end(file, mapping, pos, len, copied, 3061 - page, fsdata); 3093 + return ext4_write_inline_data_end(inode, pos, len, copied, page); 3062 3094 3063 - copied = ret2; 3064 - if (ret2 < 0) 3065 - ret = ret2; 3066 - ret2 = ext4_journal_stop(handle); 3067 - if (unlikely(ret2 && !ret)) 3068 - ret = ret2; 3095 + start = pos & (PAGE_SIZE - 1); 3096 + end = start + copied - 1; 3069 3097 3070 - return ret ? ret : copied; 3098 + /* 3099 + * Since we are holding inode lock, we are sure i_disksize <= 3100 + * i_size. We also know that if i_disksize < i_size, there are 3101 + * delalloc writes pending in the range upto i_size. If the end of 3102 + * the current write is <= i_size, there's no need to touch 3103 + * i_disksize since writeback will push i_disksize upto i_size 3104 + * eventually. If the end of the current write is > i_size and 3105 + * inside an allocated block (ext4_da_should_update_i_disksize() 3106 + * check), we need to update i_disksize here as neither 3107 + * ext4_writepage() nor certain ext4_writepages() paths not 3108 + * allocating blocks update i_disksize. 3109 + * 3110 + * Note that we defer inode dirtying to generic_write_end() / 3111 + * ext4_da_write_inline_data_end(). 3112 + */ 3113 + new_i_size = pos + copied; 3114 + if (copied && new_i_size > inode->i_size && 3115 + ext4_da_should_update_i_disksize(page, end)) 3116 + ext4_update_i_disksize(inode, new_i_size); 3117 + 3118 + return generic_write_end(file, mapping, pos, len, copied, page, fsdata); 3071 3119 } 3072 3120 3073 3121 /* ··· 4276 4340 goto has_buffer; 4277 4341 4278 4342 lock_buffer(bh); 4343 + if (ext4_buffer_uptodate(bh)) { 4344 + /* Someone brought it uptodate while we waited */ 4345 + unlock_buffer(bh); 4346 + goto has_buffer; 4347 + } 4348 + 4279 4349 /* 4280 4350 * If we have all information of the inode in memory and this 4281 4351 * is the only valid inode in the block, we need not read the
+15 -6
fs/ext4/super.c
··· 658 658 * constraints, it may not be safe to do it right here so we 659 659 * defer superblock flushing to a workqueue. 660 660 */ 661 - if (continue_fs) 661 + if (continue_fs && journal) 662 662 schedule_work(&EXT4_SB(sb)->s_error_work); 663 663 else 664 664 ext4_commit_super(sb); ··· 1350 1350 true); 1351 1351 dump_stack(); 1352 1352 } 1353 + 1354 + if (EXT4_I(inode)->i_reserved_data_blocks) 1355 + ext4_msg(inode->i_sb, KERN_ERR, 1356 + "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 1357 + inode->i_ino, EXT4_I(inode), 1358 + EXT4_I(inode)->i_reserved_data_blocks); 1353 1359 } 1354 1360 1355 1361 static void init_once(void *foo) ··· 3027 3021 */ 3028 3022 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3029 3023 { 3030 - loff_t res = EXT4_NDIR_BLOCKS; 3024 + unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS; 3031 3025 int meta_blocks; 3032 - loff_t upper_limit; 3033 - /* This is calculated to be the largest file size for a dense, block 3026 + 3027 + /* 3028 + * This is calculated to be the largest file size for a dense, block 3034 3029 * mapped file such that the file's total number of 512-byte sectors, 3035 3030 * including data and all indirect blocks, does not exceed (2^48 - 1). 3036 3031 * 3037 3032 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 3038 3033 * number of 512-byte sectors of the file. 3039 3034 */ 3040 - 3041 3035 if (!has_huge_files) { 3042 3036 /* 3043 3037 * !has_huge_files or implies that the inode i_block field ··· 3080 3074 if (res > MAX_LFS_FILESIZE) 3081 3075 res = MAX_LFS_FILESIZE; 3082 3076 3083 - return res; 3077 + return (loff_t)res; 3084 3078 } 3085 3079 3086 3080 static ext4_fsblk_t descriptor_loc(struct super_block *sb, ··· 5048 5042 sbi->s_ea_block_cache = NULL; 5049 5043 5050 5044 if (sbi->s_journal) { 5045 + /* flush s_error_work before journal destroy. */ 5046 + flush_work(&sbi->s_error_work); 5051 5047 jbd2_journal_destroy(sbi->s_journal); 5052 5048 sbi->s_journal = NULL; 5053 5049 } 5054 5050 failed_mount3a: 5055 5051 ext4_es_unregister_shrinker(sbi); 5056 5052 failed_mount3: 5053 + /* flush s_error_work before sbi destroy */ 5057 5054 flush_work(&sbi->s_error_work); 5058 5055 del_timer_sync(&sbi->s_err_report); 5059 5056 ext4_stop_mmpd(sbi);