Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ext4_for_linus-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
"New ext4 features:

- Add support so tune2fs can modify/update the superblock using an
ioctl, without needing write access to the block device

- Add support for 32-bit reserved uid's and gid's

Bug fixes:

- Fix potential warnings and other failures caused by corrupted /
fuzzed file systems

- Fail unaligned direct I/O write with EINVAL instead of silently
falling back to buffered I/O

- Correectly handle fsmap queries for metadata mappings

- Avoid journal stalls caused by writeback throttling

- Add some missing GFP_NOFAIL flags to avoid potential deadlocks
under extremem memory pressure

Cleanups:

- Remove obsolete EXT3 Kconfigs"

* tag 'ext4_for_linus-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
ext4: fix checks for orphan inodes
ext4: validate ea_ino and size in check_xattrs
ext4: guard against EA inode refcount underflow in xattr update
ext4: implemet new ioctls to set and get superblock parameters
ext4: add support for 32-bit default reserved uid and gid values
ext4: avoid potential buffer over-read in parse_apply_sb_mount_options()
ext4: fix an off-by-one issue during moving extents
ext4: increase i_disksize to offset + len in ext4_update_disksize_before_punch()
ext4: verify orphan file size is not too big
ext4: fail unaligned direct IO write with EINVAL
ext4: correctly handle queries for metadata mappings
ext4: increase IO priority of fastcommit
ext4: remove obsolete EXT3 config options
jbd2: increase IO priority of checkpoint
ext4: fix potential null deref in ext4_mb_init()
ext4: add ext4_sb_bread_nofail() helper function for ext4_free_branches()
ext4: replace min/max nesting with clamp()
fs: ext4: change GFP_KERNEL to GFP_NOFS to avoid deadlock

+467 -118
-27
fs/ext4/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - # Ext3 configs are here for backward compatibility with old configs which may 3 - # have EXT3_FS set but not EXT4_FS set and thus would result in non-bootable 4 - # kernels after the removal of ext3 driver. 5 - config EXT3_FS 6 - tristate "The Extended 3 (ext3) filesystem" 7 - select EXT4_FS 8 - help 9 - This config option is here only for backward compatibility. ext3 10 - filesystem is now handled by the ext4 driver. 11 - 12 - config EXT3_FS_POSIX_ACL 13 - bool "Ext3 POSIX Access Control Lists" 14 - depends on EXT3_FS 15 - select EXT4_FS_POSIX_ACL 16 - select FS_POSIX_ACL 17 - help 18 - This config option is here only for backward compatibility. ext3 19 - filesystem is now handled by the ext4 driver. 20 - 21 - config EXT3_FS_SECURITY 22 - bool "Ext3 Security Labels" 23 - depends on EXT3_FS 24 - select EXT4_FS_SECURITY 25 - help 26 - This config option is here only for backward compatibility. ext3 27 - filesystem is now handled by the ext4 driver. 28 - 29 2 config EXT4_FS 30 3 tristate "The Extended 4 (ext4) filesystem" 31 4 select BUFFER_HEAD
+27 -1
fs/ext4/ext4.h
··· 1450 1450 __le16 s_encoding; /* Filename charset encoding */ 1451 1451 __le16 s_encoding_flags; /* Filename charset encoding flags */ 1452 1452 __le32 s_orphan_file_inum; /* Inode for tracking orphan inodes */ 1453 - __le32 s_reserved[94]; /* Padding to the end of the block */ 1453 + __le16 s_def_resuid_hi; 1454 + __le16 s_def_resgid_hi; 1455 + __le32 s_reserved[93]; /* Padding to the end of the block */ 1454 1456 __le32 s_checksum; /* crc32c(superblock) */ 1455 1457 }; 1456 1458 ··· 1822 1820 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); 1823 1821 } 1824 1822 1823 + static inline int ext4_get_resuid(struct ext4_super_block *es) 1824 + { 1825 + return le16_to_cpu(es->s_def_resuid) | 1826 + le16_to_cpu(es->s_def_resuid_hi) << 16; 1827 + } 1828 + 1829 + static inline int ext4_get_resgid(struct ext4_super_block *es) 1830 + { 1831 + return le16_to_cpu(es->s_def_resgid) | 1832 + le16_to_cpu(es->s_def_resgid_hi) << 16; 1833 + } 1834 + 1825 1835 /* 1826 1836 * Returns: sbi->field[index] 1827 1837 * Used to access an array element from the following sbi fields which require ··· 2002 1988 } 2003 1989 2004 1990 #define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime 1991 + 1992 + /* 1993 + * Check whether the inode is tracked as orphan (either in orphan file or 1994 + * orphan list). 1995 + */ 1996 + static inline bool ext4_inode_orphan_tracked(struct inode *inode) 1997 + { 1998 + return ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) || 1999 + !list_empty(&EXT4_I(inode)->i_orphan); 2000 + } 2005 2001 2006 2002 /* 2007 2003 * Codes for operating systems ··· 3166 3142 sector_t block, blk_opf_t op_flags); 3167 3143 extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, 3168 3144 sector_t block); 3145 + extern struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb, 3146 + sector_t block); 3169 3147 extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags, 3170 3148 bh_end_io_t *end_io, bool simu_fail); 3171 3149 extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
+1 -1
fs/ext4/fast_commit.c
··· 663 663 664 664 static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail) 665 665 { 666 - blk_opf_t write_flags = REQ_SYNC; 666 + blk_opf_t write_flags = JBD2_JOURNAL_REQ_FLAGS; 667 667 struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh; 668 668 669 669 /* Add REQ_FUA | REQ_PREFLUSH only its tail */
+1 -1
fs/ext4/file.c
··· 354 354 * to cleanup the orphan list in ext4_handle_inode_extension(). Do it 355 355 * now. 356 356 */ 357 - if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) { 357 + if (ext4_inode_orphan_tracked(inode) && inode->i_nlink) { 358 358 handle_t *handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 359 359 360 360 if (IS_ERR(handle)) {
+9 -5
fs/ext4/fsmap.c
··· 74 74 static bool ext4_getfsmap_rec_before_low_key(struct ext4_getfsmap_info *info, 75 75 struct ext4_fsmap *rec) 76 76 { 77 - return rec->fmr_physical < info->gfi_low.fmr_physical; 77 + return rec->fmr_physical + rec->fmr_length <= 78 + info->gfi_low.fmr_physical; 78 79 } 79 80 80 81 /* ··· 201 200 ext4_group_first_block_no(sb, agno)); 202 201 fs_end = fs_start + EXT4_C2B(sbi, len); 203 202 204 - /* Return relevant extents from the meta_list */ 203 + /* 204 + * Return relevant extents from the meta_list. We emit all extents that 205 + * partially/fully overlap with the query range 206 + */ 205 207 list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) { 206 - if (p->fmr_physical < info->gfi_next_fsblk) { 208 + if (p->fmr_physical + p->fmr_length <= info->gfi_next_fsblk) { 207 209 list_del(&p->fmr_list); 208 210 kfree(p); 209 211 continue; 210 212 } 211 - if (p->fmr_physical <= fs_start || 212 - p->fmr_physical + p->fmr_length <= fs_end) { 213 + if (p->fmr_physical <= fs_end && 214 + p->fmr_physical + p->fmr_length > fs_start) { 213 215 /* Emit the retained free extent record if present */ 214 216 if (info->gfi_lastfree.fmr_owner) { 215 217 error = ext4_getfsmap_helper(sb, info,
+1 -1
fs/ext4/indirect.c
··· 1025 1025 } 1026 1026 1027 1027 /* Go read the buffer for the next level down */ 1028 - bh = ext4_sb_bread(inode->i_sb, nr, 0); 1028 + bh = ext4_sb_bread_nofail(inode->i_sb, nr); 1029 1029 1030 1030 /* 1031 1031 * A read failure? Report error and clear slot
+9 -38
fs/ext4/inode.c
··· 3872 3872 return ret; 3873 3873 } 3874 3874 3875 - static inline bool ext4_want_directio_fallback(unsigned flags, ssize_t written) 3876 - { 3877 - /* must be a directio to fall back to buffered */ 3878 - if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != 3879 - (IOMAP_WRITE | IOMAP_DIRECT)) 3880 - return false; 3881 - 3882 - /* atomic writes are all-or-nothing */ 3883 - if (flags & IOMAP_ATOMIC) 3884 - return false; 3885 - 3886 - /* can only try again if we wrote nothing */ 3887 - return written == 0; 3888 - } 3889 - 3890 - static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length, 3891 - ssize_t written, unsigned flags, struct iomap *iomap) 3892 - { 3893 - /* 3894 - * Check to see whether an error occurred while writing out the data to 3895 - * the allocated blocks. If so, return the magic error code for 3896 - * non-atomic write so that we fallback to buffered I/O and attempt to 3897 - * complete the remainder of the I/O. 3898 - * For non-atomic writes, any blocks that may have been 3899 - * allocated in preparation for the direct I/O will be reused during 3900 - * buffered I/O. For atomic write, we never fallback to buffered-io. 3901 - */ 3902 - if (ext4_want_directio_fallback(flags, written)) 3903 - return -ENOTBLK; 3904 - 3905 - return 0; 3906 - } 3907 - 3908 3875 const struct iomap_ops ext4_iomap_ops = { 3909 3876 .iomap_begin = ext4_iomap_begin, 3910 - .iomap_end = ext4_iomap_end, 3911 3877 }; 3912 3878 3913 3879 const struct iomap_ops ext4_iomap_overwrite_ops = { 3914 3880 .iomap_begin = ext4_iomap_overwrite_begin, 3915 - .iomap_end = ext4_iomap_end, 3916 3881 }; 3917 3882 3918 3883 static int ext4_iomap_begin_report(struct inode *inode, loff_t offset, ··· 4252 4287 * We have to make sure i_disksize gets properly updated before we truncate 4253 4288 * page cache due to hole punching or zero range. Otherwise i_disksize update 4254 4289 * can get lost as it may have been postponed to submission of writeback but 4255 - * that will never happen after we truncate page cache. 4290 + * that will never happen if we remove the folio containing i_size from the 4291 + * page cache. Also if we punch hole within i_size but above i_disksize, 4292 + * following ext4_page_mkwrite() may mistakenly allocate written blocks over 4293 + * the hole and thus introduce allocated blocks beyond i_disksize which is 4294 + * not allowed (e2fsck would complain in case of crash). 4256 4295 */ 4257 4296 int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset, 4258 4297 loff_t len) ··· 4267 4298 loff_t size = i_size_read(inode); 4268 4299 4269 4300 WARN_ON(!inode_is_locked(inode)); 4270 - if (offset > size || offset + len < size) 4301 + if (offset > size) 4271 4302 return 0; 4272 4303 4304 + if (offset + len < size) 4305 + size = offset + len; 4273 4306 if (EXT4_I(inode)->i_disksize >= size) 4274 4307 return 0; 4275 4308 ··· 4719 4748 * old inodes get re-used with the upper 16 bits of the 4720 4749 * uid/gid intact. 4721 4750 */ 4722 - if (ei->i_dtime && list_empty(&ei->i_orphan)) { 4751 + if (ei->i_dtime && !ext4_inode_orphan_tracked(inode)) { 4723 4752 raw_inode->i_uid_high = 0; 4724 4753 raw_inode->i_gid_high = 0; 4725 4754 } else {
+305 -7
fs/ext4/ioctl.c
··· 27 27 #include "fsmap.h" 28 28 #include <trace/events/ext4.h> 29 29 30 - typedef void ext4_update_sb_callback(struct ext4_super_block *es, 31 - const void *arg); 30 + typedef void ext4_update_sb_callback(struct ext4_sb_info *sbi, 31 + struct ext4_super_block *es, 32 + const void *arg); 32 33 33 34 /* 34 35 * Superblock modification callback function for changing file system 35 36 * label 36 37 */ 37 - static void ext4_sb_setlabel(struct ext4_super_block *es, const void *arg) 38 + static void ext4_sb_setlabel(struct ext4_sb_info *sbi, 39 + struct ext4_super_block *es, const void *arg) 38 40 { 39 41 /* Sanity check, this should never happen */ 40 42 BUILD_BUG_ON(sizeof(es->s_volume_name) < EXT4_LABEL_MAX); ··· 48 46 * Superblock modification callback function for changing file system 49 47 * UUID. 50 48 */ 51 - static void ext4_sb_setuuid(struct ext4_super_block *es, const void *arg) 49 + static void ext4_sb_setuuid(struct ext4_sb_info *sbi, 50 + struct ext4_super_block *es, const void *arg) 52 51 { 53 52 memcpy(es->s_uuid, (__u8 *)arg, UUID_SIZE); 54 53 } ··· 74 71 goto out_err; 75 72 76 73 lock_buffer(bh); 77 - func(es, arg); 74 + func(sbi, es, arg); 78 75 ext4_superblock_csum_set(sb); 79 76 unlock_buffer(bh); 80 77 ··· 152 149 unlock_buffer(bh); 153 150 goto out_bh; 154 151 } 155 - func(es, arg); 152 + func(EXT4_SB(sb), es, arg); 156 153 if (ext4_has_feature_metadata_csum(sb)) 157 154 es->s_checksum = ext4_superblock_csum(es); 158 155 set_buffer_uptodate(bh); ··· 1233 1230 return ret; 1234 1231 } 1235 1232 1233 + 1234 + #define TUNE_OPS_SUPPORTED (EXT4_TUNE_FL_ERRORS_BEHAVIOR | \ 1235 + EXT4_TUNE_FL_MNT_COUNT | EXT4_TUNE_FL_MAX_MNT_COUNT | \ 1236 + EXT4_TUNE_FL_CHECKINTRVAL | EXT4_TUNE_FL_LAST_CHECK_TIME | \ 1237 + EXT4_TUNE_FL_RESERVED_BLOCKS | EXT4_TUNE_FL_RESERVED_UID | \ 1238 + EXT4_TUNE_FL_RESERVED_GID | EXT4_TUNE_FL_DEFAULT_MNT_OPTS | \ 1239 + EXT4_TUNE_FL_DEF_HASH_ALG | EXT4_TUNE_FL_RAID_STRIDE | \ 1240 + EXT4_TUNE_FL_RAID_STRIPE_WIDTH | EXT4_TUNE_FL_MOUNT_OPTS | \ 1241 + EXT4_TUNE_FL_FEATURES | EXT4_TUNE_FL_EDIT_FEATURES | \ 1242 + EXT4_TUNE_FL_FORCE_FSCK | EXT4_TUNE_FL_ENCODING | \ 1243 + EXT4_TUNE_FL_ENCODING_FLAGS) 1244 + 1245 + #define EXT4_TUNE_SET_COMPAT_SUPP \ 1246 + (EXT4_FEATURE_COMPAT_DIR_INDEX | \ 1247 + EXT4_FEATURE_COMPAT_STABLE_INODES) 1248 + #define EXT4_TUNE_SET_INCOMPAT_SUPP \ 1249 + (EXT4_FEATURE_INCOMPAT_EXTENTS | \ 1250 + EXT4_FEATURE_INCOMPAT_EA_INODE | \ 1251 + EXT4_FEATURE_INCOMPAT_ENCRYPT | \ 1252 + EXT4_FEATURE_INCOMPAT_CSUM_SEED | \ 1253 + EXT4_FEATURE_INCOMPAT_LARGEDIR | \ 1254 + EXT4_FEATURE_INCOMPAT_CASEFOLD) 1255 + #define EXT4_TUNE_SET_RO_COMPAT_SUPP \ 1256 + (EXT4_FEATURE_RO_COMPAT_LARGE_FILE | \ 1257 + EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \ 1258 + EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ 1259 + EXT4_FEATURE_RO_COMPAT_PROJECT | \ 1260 + EXT4_FEATURE_RO_COMPAT_VERITY) 1261 + 1262 + #define EXT4_TUNE_CLEAR_COMPAT_SUPP (0) 1263 + #define EXT4_TUNE_CLEAR_INCOMPAT_SUPP (0) 1264 + #define EXT4_TUNE_CLEAR_RO_COMPAT_SUPP (0) 1265 + 1266 + #define SB_ENC_SUPP_MASK (SB_ENC_STRICT_MODE_FL | \ 1267 + SB_ENC_NO_COMPAT_FALLBACK_FL) 1268 + 1269 + static int ext4_ioctl_get_tune_sb(struct ext4_sb_info *sbi, 1270 + struct ext4_tune_sb_params __user *params) 1271 + { 1272 + struct ext4_tune_sb_params ret; 1273 + struct ext4_super_block *es = sbi->s_es; 1274 + 1275 + memset(&ret, 0, sizeof(ret)); 1276 + ret.set_flags = TUNE_OPS_SUPPORTED; 1277 + ret.errors_behavior = le16_to_cpu(es->s_errors); 1278 + ret.mnt_count = le16_to_cpu(es->s_mnt_count); 1279 + ret.max_mnt_count = le16_to_cpu(es->s_max_mnt_count); 1280 + ret.checkinterval = le32_to_cpu(es->s_checkinterval); 1281 + ret.last_check_time = le32_to_cpu(es->s_lastcheck); 1282 + ret.reserved_blocks = ext4_r_blocks_count(es); 1283 + ret.blocks_count = ext4_blocks_count(es); 1284 + ret.reserved_uid = ext4_get_resuid(es); 1285 + ret.reserved_gid = ext4_get_resgid(es); 1286 + ret.default_mnt_opts = le32_to_cpu(es->s_default_mount_opts); 1287 + ret.def_hash_alg = es->s_def_hash_version; 1288 + ret.raid_stride = le16_to_cpu(es->s_raid_stride); 1289 + ret.raid_stripe_width = le32_to_cpu(es->s_raid_stripe_width); 1290 + ret.encoding = le16_to_cpu(es->s_encoding); 1291 + ret.encoding_flags = le16_to_cpu(es->s_encoding_flags); 1292 + strscpy_pad(ret.mount_opts, es->s_mount_opts); 1293 + ret.feature_compat = le32_to_cpu(es->s_feature_compat); 1294 + ret.feature_incompat = le32_to_cpu(es->s_feature_incompat); 1295 + ret.feature_ro_compat = le32_to_cpu(es->s_feature_ro_compat); 1296 + ret.set_feature_compat_mask = EXT4_TUNE_SET_COMPAT_SUPP; 1297 + ret.set_feature_incompat_mask = EXT4_TUNE_SET_INCOMPAT_SUPP; 1298 + ret.set_feature_ro_compat_mask = EXT4_TUNE_SET_RO_COMPAT_SUPP; 1299 + ret.clear_feature_compat_mask = EXT4_TUNE_CLEAR_COMPAT_SUPP; 1300 + ret.clear_feature_incompat_mask = EXT4_TUNE_CLEAR_INCOMPAT_SUPP; 1301 + ret.clear_feature_ro_compat_mask = EXT4_TUNE_CLEAR_RO_COMPAT_SUPP; 1302 + if (copy_to_user(params, &ret, sizeof(ret))) 1303 + return -EFAULT; 1304 + return 0; 1305 + } 1306 + 1307 + static void ext4_sb_setparams(struct ext4_sb_info *sbi, 1308 + struct ext4_super_block *es, const void *arg) 1309 + { 1310 + const struct ext4_tune_sb_params *params = arg; 1311 + 1312 + if (params->set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) 1313 + es->s_errors = cpu_to_le16(params->errors_behavior); 1314 + if (params->set_flags & EXT4_TUNE_FL_MNT_COUNT) 1315 + es->s_mnt_count = cpu_to_le16(params->mnt_count); 1316 + if (params->set_flags & EXT4_TUNE_FL_MAX_MNT_COUNT) 1317 + es->s_max_mnt_count = cpu_to_le16(params->max_mnt_count); 1318 + if (params->set_flags & EXT4_TUNE_FL_CHECKINTRVAL) 1319 + es->s_checkinterval = cpu_to_le32(params->checkinterval); 1320 + if (params->set_flags & EXT4_TUNE_FL_LAST_CHECK_TIME) 1321 + es->s_lastcheck = cpu_to_le32(params->last_check_time); 1322 + if (params->set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) { 1323 + ext4_fsblk_t blk = params->reserved_blocks; 1324 + 1325 + es->s_r_blocks_count_lo = cpu_to_le32((u32)blk); 1326 + es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32); 1327 + } 1328 + if (params->set_flags & EXT4_TUNE_FL_RESERVED_UID) { 1329 + int uid = params->reserved_uid; 1330 + 1331 + es->s_def_resuid = cpu_to_le16(uid & 0xFFFF); 1332 + es->s_def_resuid_hi = cpu_to_le16(uid >> 16); 1333 + } 1334 + if (params->set_flags & EXT4_TUNE_FL_RESERVED_GID) { 1335 + int gid = params->reserved_gid; 1336 + 1337 + es->s_def_resgid = cpu_to_le16(gid & 0xFFFF); 1338 + es->s_def_resgid_hi = cpu_to_le16(gid >> 16); 1339 + } 1340 + if (params->set_flags & EXT4_TUNE_FL_DEFAULT_MNT_OPTS) 1341 + es->s_default_mount_opts = cpu_to_le32(params->default_mnt_opts); 1342 + if (params->set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) 1343 + es->s_def_hash_version = params->def_hash_alg; 1344 + if (params->set_flags & EXT4_TUNE_FL_RAID_STRIDE) 1345 + es->s_raid_stride = cpu_to_le16(params->raid_stride); 1346 + if (params->set_flags & EXT4_TUNE_FL_RAID_STRIPE_WIDTH) 1347 + es->s_raid_stripe_width = 1348 + cpu_to_le32(params->raid_stripe_width); 1349 + if (params->set_flags & EXT4_TUNE_FL_ENCODING) 1350 + es->s_encoding = cpu_to_le16(params->encoding); 1351 + if (params->set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) 1352 + es->s_encoding_flags = cpu_to_le16(params->encoding_flags); 1353 + strscpy_pad(es->s_mount_opts, params->mount_opts); 1354 + if (params->set_flags & EXT4_TUNE_FL_EDIT_FEATURES) { 1355 + es->s_feature_compat |= 1356 + cpu_to_le32(params->set_feature_compat_mask); 1357 + es->s_feature_incompat |= 1358 + cpu_to_le32(params->set_feature_incompat_mask); 1359 + es->s_feature_ro_compat |= 1360 + cpu_to_le32(params->set_feature_ro_compat_mask); 1361 + es->s_feature_compat &= 1362 + ~cpu_to_le32(params->clear_feature_compat_mask); 1363 + es->s_feature_incompat &= 1364 + ~cpu_to_le32(params->clear_feature_incompat_mask); 1365 + es->s_feature_ro_compat &= 1366 + ~cpu_to_le32(params->clear_feature_ro_compat_mask); 1367 + if (params->set_feature_compat_mask & 1368 + EXT4_FEATURE_COMPAT_DIR_INDEX) 1369 + es->s_def_hash_version = sbi->s_def_hash_version; 1370 + if (params->set_feature_incompat_mask & 1371 + EXT4_FEATURE_INCOMPAT_CSUM_SEED) 1372 + es->s_checksum_seed = cpu_to_le32(sbi->s_csum_seed); 1373 + } 1374 + if (params->set_flags & EXT4_TUNE_FL_FORCE_FSCK) 1375 + es->s_state |= cpu_to_le16(EXT4_ERROR_FS); 1376 + } 1377 + 1378 + static int ext4_ioctl_set_tune_sb(struct file *filp, 1379 + struct ext4_tune_sb_params __user *in) 1380 + { 1381 + struct ext4_tune_sb_params params; 1382 + struct super_block *sb = file_inode(filp)->i_sb; 1383 + struct ext4_sb_info *sbi = EXT4_SB(sb); 1384 + struct ext4_super_block *es = sbi->s_es; 1385 + int enabling_casefold = 0; 1386 + int ret; 1387 + 1388 + if (!capable(CAP_SYS_ADMIN)) 1389 + return -EPERM; 1390 + 1391 + if (copy_from_user(&params, in, sizeof(params))) 1392 + return -EFAULT; 1393 + 1394 + if ((params.set_flags & ~TUNE_OPS_SUPPORTED) != 0) 1395 + return -EOPNOTSUPP; 1396 + 1397 + if ((params.set_flags & EXT4_TUNE_FL_ERRORS_BEHAVIOR) && 1398 + (params.errors_behavior > EXT4_ERRORS_PANIC)) 1399 + return -EINVAL; 1400 + 1401 + if ((params.set_flags & EXT4_TUNE_FL_RESERVED_BLOCKS) && 1402 + (params.reserved_blocks > ext4_blocks_count(sbi->s_es) / 2)) 1403 + return -EINVAL; 1404 + if ((params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) && 1405 + ((params.def_hash_alg > DX_HASH_LAST) || 1406 + (params.def_hash_alg == DX_HASH_SIPHASH))) 1407 + return -EINVAL; 1408 + if ((params.set_flags & EXT4_TUNE_FL_FEATURES) && 1409 + (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES)) 1410 + return -EINVAL; 1411 + 1412 + if (params.set_flags & EXT4_TUNE_FL_FEATURES) { 1413 + params.set_feature_compat_mask = 1414 + params.feature_compat & 1415 + ~le32_to_cpu(es->s_feature_compat); 1416 + params.set_feature_incompat_mask = 1417 + params.feature_incompat & 1418 + ~le32_to_cpu(es->s_feature_incompat); 1419 + params.set_feature_ro_compat_mask = 1420 + params.feature_ro_compat & 1421 + ~le32_to_cpu(es->s_feature_ro_compat); 1422 + params.clear_feature_compat_mask = 1423 + ~params.feature_compat & 1424 + le32_to_cpu(es->s_feature_compat); 1425 + params.clear_feature_incompat_mask = 1426 + ~params.feature_incompat & 1427 + le32_to_cpu(es->s_feature_incompat); 1428 + params.clear_feature_ro_compat_mask = 1429 + ~params.feature_ro_compat & 1430 + le32_to_cpu(es->s_feature_ro_compat); 1431 + params.set_flags |= EXT4_TUNE_FL_EDIT_FEATURES; 1432 + } 1433 + if (params.set_flags & EXT4_TUNE_FL_EDIT_FEATURES) { 1434 + if ((params.set_feature_compat_mask & 1435 + ~EXT4_TUNE_SET_COMPAT_SUPP) || 1436 + (params.set_feature_incompat_mask & 1437 + ~EXT4_TUNE_SET_INCOMPAT_SUPP) || 1438 + (params.set_feature_ro_compat_mask & 1439 + ~EXT4_TUNE_SET_RO_COMPAT_SUPP) || 1440 + (params.clear_feature_compat_mask & 1441 + ~EXT4_TUNE_CLEAR_COMPAT_SUPP) || 1442 + (params.clear_feature_incompat_mask & 1443 + ~EXT4_TUNE_CLEAR_INCOMPAT_SUPP) || 1444 + (params.clear_feature_ro_compat_mask & 1445 + ~EXT4_TUNE_CLEAR_RO_COMPAT_SUPP)) 1446 + return -EOPNOTSUPP; 1447 + 1448 + /* 1449 + * Filter out the features that are already set from 1450 + * the set_mask. 1451 + */ 1452 + params.set_feature_compat_mask &= 1453 + ~le32_to_cpu(es->s_feature_compat); 1454 + params.set_feature_incompat_mask &= 1455 + ~le32_to_cpu(es->s_feature_incompat); 1456 + params.set_feature_ro_compat_mask &= 1457 + ~le32_to_cpu(es->s_feature_ro_compat); 1458 + if ((params.set_feature_incompat_mask & 1459 + EXT4_FEATURE_INCOMPAT_CASEFOLD)) { 1460 + enabling_casefold = 1; 1461 + if (!(params.set_flags & EXT4_TUNE_FL_ENCODING)) { 1462 + params.encoding = EXT4_ENC_UTF8_12_1; 1463 + params.set_flags |= EXT4_TUNE_FL_ENCODING; 1464 + } 1465 + if (!(params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS)) { 1466 + params.encoding_flags = 0; 1467 + params.set_flags |= EXT4_TUNE_FL_ENCODING_FLAGS; 1468 + } 1469 + } 1470 + if ((params.set_feature_compat_mask & 1471 + EXT4_FEATURE_COMPAT_DIR_INDEX)) { 1472 + uuid_t uu; 1473 + 1474 + memcpy(&uu, sbi->s_hash_seed, UUID_SIZE); 1475 + if (uuid_is_null(&uu)) 1476 + generate_random_uuid((char *) 1477 + &sbi->s_hash_seed); 1478 + if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) 1479 + sbi->s_def_hash_version = params.def_hash_alg; 1480 + else if (sbi->s_def_hash_version == 0) 1481 + sbi->s_def_hash_version = DX_HASH_HALF_MD4; 1482 + if (!(es->s_flags & 1483 + cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH)) && 1484 + !(es->s_flags & 1485 + cpu_to_le32(EXT2_FLAGS_SIGNED_HASH))) { 1486 + #ifdef __CHAR_UNSIGNED__ 1487 + sbi->s_hash_unsigned = 3; 1488 + #else 1489 + sbi->s_hash_unsigned = 0; 1490 + #endif 1491 + } 1492 + } 1493 + } 1494 + if (params.set_flags & EXT4_TUNE_FL_ENCODING) { 1495 + if (!enabling_casefold) 1496 + return -EINVAL; 1497 + if (params.encoding == 0) 1498 + params.encoding = EXT4_ENC_UTF8_12_1; 1499 + else if (params.encoding != EXT4_ENC_UTF8_12_1) 1500 + return -EINVAL; 1501 + } 1502 + if (params.set_flags & EXT4_TUNE_FL_ENCODING_FLAGS) { 1503 + if (!enabling_casefold) 1504 + return -EINVAL; 1505 + if (params.encoding_flags & ~SB_ENC_SUPP_MASK) 1506 + return -EINVAL; 1507 + } 1508 + 1509 + ret = mnt_want_write_file(filp); 1510 + if (ret) 1511 + return ret; 1512 + 1513 + ret = ext4_update_superblocks_fn(sb, ext4_sb_setparams, &params); 1514 + mnt_drop_write_file(filp); 1515 + 1516 + if (params.set_flags & EXT4_TUNE_FL_DEF_HASH_ALG) 1517 + sbi->s_def_hash_version = params.def_hash_alg; 1518 + 1519 + return ret; 1520 + } 1521 + 1236 1522 static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1237 1523 { 1238 1524 struct inode *inode = file_inode(filp); ··· 1908 1616 return ext4_ioctl_getuuid(EXT4_SB(sb), (void __user *)arg); 1909 1617 case EXT4_IOC_SETFSUUID: 1910 1618 return ext4_ioctl_setuuid(filp, (const void __user *)arg); 1619 + case EXT4_IOC_GET_TUNE_SB_PARAM: 1620 + return ext4_ioctl_get_tune_sb(EXT4_SB(sb), 1621 + (void __user *)arg); 1622 + case EXT4_IOC_SET_TUNE_SB_PARAM: 1623 + return ext4_ioctl_set_tune_sb(filp, (void __user *)arg); 1911 1624 default: 1912 1625 return -ENOTTY; 1913 1626 } ··· 2000 1703 } 2001 1704 #endif 2002 1705 2003 - static void set_overhead(struct ext4_super_block *es, const void *arg) 1706 + static void set_overhead(struct ext4_sb_info *sbi, 1707 + struct ext4_super_block *es, const void *arg) 2004 1708 { 2005 1709 es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg)); 2006 1710 }
+10
fs/ext4/mballoc.c
··· 3655 3655 3656 3656 static inline void ext4_mb_avg_fragment_size_destroy(struct ext4_sb_info *sbi) 3657 3657 { 3658 + if (!sbi->s_mb_avg_fragment_size) 3659 + return; 3660 + 3658 3661 for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++) 3659 3662 xa_destroy(&sbi->s_mb_avg_fragment_size[i]); 3663 + 3660 3664 kfree(sbi->s_mb_avg_fragment_size); 3665 + sbi->s_mb_avg_fragment_size = NULL; 3661 3666 } 3662 3667 3663 3668 static inline void ext4_mb_largest_free_orders_destroy(struct ext4_sb_info *sbi) 3664 3669 { 3670 + if (!sbi->s_mb_largest_free_orders) 3671 + return; 3672 + 3665 3673 for (int i = 0; i < MB_NUM_ORDERS(sbi->s_sb); i++) 3666 3674 xa_destroy(&sbi->s_mb_largest_free_orders[i]); 3675 + 3667 3676 kfree(sbi->s_mb_largest_free_orders); 3677 + sbi->s_mb_largest_free_orders = NULL; 3668 3678 } 3669 3679 3670 3680 int ext4_mb_init(struct super_block *sb)
+3 -3
fs/ext4/mmp.c
··· 231 231 * Adjust the mmp_check_interval depending on how much time 232 232 * it took for the MMP block to be written. 233 233 */ 234 - mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ, 235 - EXT4_MMP_MAX_CHECK_INTERVAL), 236 - EXT4_MMP_MIN_CHECK_INTERVAL); 234 + mmp_check_interval = clamp(EXT4_MMP_CHECK_MULT * diff / HZ, 235 + EXT4_MMP_MIN_CHECK_INTERVAL, 236 + EXT4_MMP_MAX_CHECK_INTERVAL); 237 237 mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); 238 238 } 239 239
+1 -1
fs/ext4/move_extent.c
··· 225 225 do { 226 226 if (bh_offset(bh) + blocksize <= from) 227 227 continue; 228 - if (bh_offset(bh) > to) 228 + if (bh_offset(bh) >= to) 229 229 break; 230 230 wait_on_buffer(bh); 231 231 if (buffer_uptodate(bh))
+13 -6
fs/ext4/orphan.c
··· 109 109 110 110 WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && 111 111 !inode_is_locked(inode)); 112 - /* 113 - * Inode orphaned in orphan file or in orphan list? 114 - */ 115 - if (ext4_test_inode_state(inode, EXT4_STATE_ORPHAN_FILE) || 116 - !list_empty(&EXT4_I(inode)->i_orphan)) 112 + if (ext4_inode_orphan_tracked(inode)) 117 113 return 0; 118 114 119 115 /* ··· 583 587 ext4_msg(sb, KERN_ERR, "get orphan inode failed"); 584 588 return PTR_ERR(inode); 585 589 } 590 + /* 591 + * This is just an artificial limit to prevent corrupted fs from 592 + * consuming absurd amounts of memory when pinning blocks of orphan 593 + * file in memory. 594 + */ 595 + if (inode->i_size > 8 << 20) { 596 + ext4_msg(sb, KERN_ERR, "orphan file too big: %llu", 597 + (unsigned long long)inode->i_size); 598 + ret = -EFSCORRUPTED; 599 + goto out_put; 600 + } 586 601 oi->of_blocks = inode->i_size >> sb->s_blocksize_bits; 587 602 oi->of_csum_seed = EXT4_I(inode)->i_csum_seed; 588 - oi->of_binfo = kmalloc_array(oi->of_blocks, 603 + oi->of_binfo = kvmalloc_array(oi->of_blocks, 589 604 sizeof(struct ext4_orphan_block), 590 605 GFP_KERNEL); 591 606 if (!oi->of_binfo) {
+20 -18
fs/ext4/super.c
··· 265 265 return __ext4_sb_bread_gfp(sb, block, 0, gfp); 266 266 } 267 267 268 + struct buffer_head *ext4_sb_bread_nofail(struct super_block *sb, 269 + sector_t block) 270 + { 271 + gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, 272 + ~__GFP_FS) | __GFP_MOVABLE | __GFP_NOFAIL; 273 + 274 + return __ext4_sb_bread_gfp(sb, block, 0, gfp); 275 + } 276 + 268 277 void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block) 269 278 { 270 279 struct buffer_head *bh = bdev_getblk(sb->s_bdev, block, ··· 1447 1438 1448 1439 static void ext4_destroy_inode(struct inode *inode) 1449 1440 { 1450 - if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 1441 + if (ext4_inode_orphan_tracked(inode)) { 1451 1442 ext4_msg(inode->i_sb, KERN_ERR, 1452 - "Inode %lu (%p): orphan list check failed!", 1443 + "Inode %lu (%p): inode tracked as orphan!", 1453 1444 inode->i_ino, EXT4_I(inode)); 1454 1445 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, 1455 1446 EXT4_I(inode), sizeof(struct ext4_inode_info), ··· 2475 2466 struct ext4_fs_context *m_ctx) 2476 2467 { 2477 2468 struct ext4_sb_info *sbi = EXT4_SB(sb); 2478 - char *s_mount_opts = NULL; 2469 + char s_mount_opts[65]; 2479 2470 struct ext4_fs_context *s_ctx = NULL; 2480 2471 struct fs_context *fc = NULL; 2481 2472 int ret = -ENOMEM; ··· 2483 2474 if (!sbi->s_es->s_mount_opts[0]) 2484 2475 return 0; 2485 2476 2486 - s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, 2487 - sizeof(sbi->s_es->s_mount_opts), 2488 - GFP_KERNEL); 2489 - if (!s_mount_opts) 2490 - return ret; 2477 + strscpy_pad(s_mount_opts, sbi->s_es->s_mount_opts); 2491 2478 2492 2479 fc = kzalloc(sizeof(struct fs_context), GFP_KERNEL); 2493 2480 if (!fc) 2494 - goto out_free; 2481 + return -ENOMEM; 2495 2482 2496 2483 s_ctx = kzalloc(sizeof(struct ext4_fs_context), GFP_KERNEL); 2497 2484 if (!s_ctx) ··· 2519 2514 ret = 0; 2520 2515 2521 2516 out_free: 2522 - if (fc) { 2523 - ext4_fc_free(fc); 2524 - kfree(fc); 2525 - } 2526 - kfree(s_mount_opts); 2517 + ext4_fc_free(fc); 2518 + kfree(fc); 2527 2519 return ret; 2528 2520 } 2529 2521 ··· 2966 2964 } 2967 2965 2968 2966 if (nodefs || !uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT4_DEF_RESUID)) || 2969 - le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) 2967 + ext4_get_resuid(es) != EXT4_DEF_RESUID) 2970 2968 SEQ_OPTS_PRINT("resuid=%u", 2971 2969 from_kuid_munged(&init_user_ns, sbi->s_resuid)); 2972 2970 if (nodefs || !gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT4_DEF_RESGID)) || 2973 - le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) 2971 + ext4_get_resgid(es) != EXT4_DEF_RESGID) 2974 2972 SEQ_OPTS_PRINT("resgid=%u", 2975 2973 from_kgid_munged(&init_user_ns, sbi->s_resgid)); 2976 2974 def_errors = nodefs ? -1 : le16_to_cpu(es->s_errors); ··· 5285 5283 5286 5284 ext4_set_def_opts(sb, es); 5287 5285 5288 - sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); 5289 - sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); 5286 + sbi->s_resuid = make_kuid(&init_user_ns, ext4_get_resuid(es)); 5287 + sbi->s_resgid = make_kgid(&init_user_ns, ext4_get_resuid(es)); 5290 5288 sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ; 5291 5289 sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME; 5292 5290 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
+13 -8
fs/ext4/xattr.c
··· 251 251 err_str = "invalid ea_ino"; 252 252 goto errout; 253 253 } 254 + if (ea_ino && !size) { 255 + err_str = "invalid size in ea xattr"; 256 + goto errout; 257 + } 254 258 if (size > EXT4_XATTR_SIZE_MAX) { 255 259 err_str = "e_value size too large"; 256 260 goto errout; ··· 1023 1019 int ref_change) 1024 1020 { 1025 1021 struct ext4_iloc iloc; 1026 - s64 ref_count; 1022 + u64 ref_count; 1027 1023 int ret; 1028 1024 1029 1025 inode_lock_nested(ea_inode, I_MUTEX_XATTR); ··· 1033 1029 goto out; 1034 1030 1035 1031 ref_count = ext4_xattr_inode_get_ref(ea_inode); 1032 + if ((ref_count == 0 && ref_change < 0) || (ref_count == U64_MAX && ref_change > 0)) { 1033 + ext4_error_inode(ea_inode, __func__, __LINE__, 0, 1034 + "EA inode %lu ref wraparound: ref_count=%lld ref_change=%d", 1035 + ea_inode->i_ino, ref_count, ref_change); 1036 + ret = -EFSCORRUPTED; 1037 + goto out; 1038 + } 1036 1039 ref_count += ref_change; 1037 1040 ext4_xattr_inode_set_ref(ea_inode, ref_count); 1038 1041 1039 1042 if (ref_change > 0) { 1040 - WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld", 1041 - ea_inode->i_ino, ref_count); 1042 - 1043 1043 if (ref_count == 1) { 1044 1044 WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u", 1045 1045 ea_inode->i_ino, ea_inode->i_nlink); ··· 1052 1044 ext4_orphan_del(handle, ea_inode); 1053 1045 } 1054 1046 } else { 1055 - WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld", 1056 - ea_inode->i_ino, ref_count); 1057 - 1058 1047 if (ref_count == 0) { 1059 1048 WARN_ONCE(ea_inode->i_nlink != 1, 1060 1049 "EA inode %lu i_nlink=%u", ··· 1535 1530 WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) && 1536 1531 !(current->flags & PF_MEMALLOC_NOFS)); 1537 1532 1538 - ea_data = kvmalloc(value_len, GFP_KERNEL); 1533 + ea_data = kvmalloc(value_len, GFP_NOFS); 1539 1534 if (!ea_data) { 1540 1535 mb_cache_entry_put(ea_inode_cache, ce); 1541 1536 return NULL;
+1 -1
fs/jbd2/checkpoint.c
··· 131 131 132 132 blk_start_plug(&plug); 133 133 for (i = 0; i < *batch_count; i++) 134 - write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC); 134 + write_dirty_buffer(journal->j_chkpt_bhs[i], JBD2_JOURNAL_REQ_FLAGS); 135 135 blk_finish_plug(&plug); 136 136 137 137 for (i = 0; i < *batch_count; i++) {
+53
include/uapi/linux/ext4.h
··· 33 33 #define EXT4_IOC_CHECKPOINT _IOW('f', 43, __u32) 34 34 #define EXT4_IOC_GETFSUUID _IOR('f', 44, struct fsuuid) 35 35 #define EXT4_IOC_SETFSUUID _IOW('f', 44, struct fsuuid) 36 + #define EXT4_IOC_GET_TUNE_SB_PARAM _IOR('f', 45, struct ext4_tune_sb_params) 37 + #define EXT4_IOC_SET_TUNE_SB_PARAM _IOW('f', 46, struct ext4_tune_sb_params) 36 38 37 39 #define EXT4_IOC_SHUTDOWN _IOR('X', 125, __u32) 38 40 ··· 109 107 __u16 reserved_blocks; /* Number of reserved blocks in this group */ 110 108 __u16 unused; 111 109 }; 110 + 111 + struct ext4_tune_sb_params { 112 + __u32 set_flags; 113 + __u32 checkinterval; 114 + __u16 errors_behavior; 115 + __u16 mnt_count; 116 + __u16 max_mnt_count; 117 + __u16 raid_stride; 118 + __u64 last_check_time; 119 + __u64 reserved_blocks; 120 + __u64 blocks_count; 121 + __u32 default_mnt_opts; 122 + __u32 reserved_uid; 123 + __u32 reserved_gid; 124 + __u32 raid_stripe_width; 125 + __u16 encoding; 126 + __u16 encoding_flags; 127 + __u8 def_hash_alg; 128 + __u8 pad_1; 129 + __u16 pad_2; 130 + __u32 feature_compat; 131 + __u32 feature_incompat; 132 + __u32 feature_ro_compat; 133 + __u32 set_feature_compat_mask; 134 + __u32 set_feature_incompat_mask; 135 + __u32 set_feature_ro_compat_mask; 136 + __u32 clear_feature_compat_mask; 137 + __u32 clear_feature_incompat_mask; 138 + __u32 clear_feature_ro_compat_mask; 139 + __u8 mount_opts[64]; 140 + __u8 pad[64]; 141 + }; 142 + 143 + #define EXT4_TUNE_FL_ERRORS_BEHAVIOR 0x00000001 144 + #define EXT4_TUNE_FL_MNT_COUNT 0x00000002 145 + #define EXT4_TUNE_FL_MAX_MNT_COUNT 0x00000004 146 + #define EXT4_TUNE_FL_CHECKINTRVAL 0x00000008 147 + #define EXT4_TUNE_FL_LAST_CHECK_TIME 0x00000010 148 + #define EXT4_TUNE_FL_RESERVED_BLOCKS 0x00000020 149 + #define EXT4_TUNE_FL_RESERVED_UID 0x00000040 150 + #define EXT4_TUNE_FL_RESERVED_GID 0x00000080 151 + #define EXT4_TUNE_FL_DEFAULT_MNT_OPTS 0x00000100 152 + #define EXT4_TUNE_FL_DEF_HASH_ALG 0x00000200 153 + #define EXT4_TUNE_FL_RAID_STRIDE 0x00000400 154 + #define EXT4_TUNE_FL_RAID_STRIPE_WIDTH 0x00000800 155 + #define EXT4_TUNE_FL_MOUNT_OPTS 0x00001000 156 + #define EXT4_TUNE_FL_FEATURES 0x00002000 157 + #define EXT4_TUNE_FL_EDIT_FEATURES 0x00004000 158 + #define EXT4_TUNE_FL_FORCE_FSCK 0x00008000 159 + #define EXT4_TUNE_FL_ENCODING 0x00010000 160 + #define EXT4_TUNE_FL_ENCODING_FLAGS 0x00020000 112 161 113 162 /* 114 163 * Returned by EXT4_IOC_GET_ES_CACHE as an additional possible flag.