f2fs: clean up F2FS_I()

Use temporary variable instead of F2FS_I() for cleanup.

Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

authored by Chao Yu and committed by Jaegeuk Kim 7309871c f18d0076

+88 -93
+14 -18
fs/f2fs/f2fs.h
··· 4403 { 4404 #ifdef CONFIG_F2FS_FS_COMPRESSION 4405 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4406 4407 - F2FS_I(inode)->i_compress_algorithm = 4408 - F2FS_OPTION(sbi).compress_algorithm; 4409 - F2FS_I(inode)->i_log_cluster_size = 4410 - F2FS_OPTION(sbi).compress_log_size; 4411 - F2FS_I(inode)->i_compress_flag = 4412 - F2FS_OPTION(sbi).compress_chksum ? 4413 - BIT(COMPRESS_CHKSUM) : 0; 4414 - F2FS_I(inode)->i_cluster_size = 4415 - BIT(F2FS_I(inode)->i_log_cluster_size); 4416 - if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4417 - F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4418 F2FS_OPTION(sbi).compress_level) 4419 - F2FS_I(inode)->i_compress_level = 4420 - F2FS_OPTION(sbi).compress_level; 4421 - F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4422 set_inode_flag(inode, FI_COMPRESSED_FILE); 4423 stat_inc_compr_inode(inode); 4424 inc_compr_inode_stat(inode); ··· 4429 { 4430 struct f2fs_inode_info *fi = F2FS_I(inode); 4431 4432 - f2fs_down_write(&F2FS_I(inode)->i_sem); 4433 4434 if (!f2fs_compressed_file(inode)) { 4435 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4436 return true; 4437 } 4438 if (f2fs_is_mmap_file(inode) || 4439 (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { 4440 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4441 return false; 4442 } 4443 ··· 4446 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4447 f2fs_mark_inode_dirty_sync(inode, true); 4448 4449 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4450 return true; 4451 } 4452
··· 4403 { 4404 #ifdef CONFIG_F2FS_FS_COMPRESSION 4405 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4406 + struct f2fs_inode_info *fi = F2FS_I(inode); 4407 4408 + fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm; 4409 + fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size; 4410 + fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ? 4411 + BIT(COMPRESS_CHKSUM) : 0; 4412 + fi->i_cluster_size = BIT(fi->i_log_cluster_size); 4413 + if ((fi->i_compress_algorithm == COMPRESS_LZ4 || 4414 + fi->i_compress_algorithm == COMPRESS_ZSTD) && 4415 F2FS_OPTION(sbi).compress_level) 4416 + fi->i_compress_level = F2FS_OPTION(sbi).compress_level; 4417 + fi->i_flags |= F2FS_COMPR_FL; 4418 set_inode_flag(inode, FI_COMPRESSED_FILE); 4419 stat_inc_compr_inode(inode); 4420 inc_compr_inode_stat(inode); ··· 4433 { 4434 struct f2fs_inode_info *fi = F2FS_I(inode); 4435 4436 + f2fs_down_write(&fi->i_sem); 4437 4438 if (!f2fs_compressed_file(inode)) { 4439 + f2fs_up_write(&fi->i_sem); 4440 return true; 4441 } 4442 if (f2fs_is_mmap_file(inode) || 4443 (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { 4444 + f2fs_up_write(&fi->i_sem); 4445 return false; 4446 } 4447 ··· 4450 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4451 f2fs_mark_inode_dirty_sync(inode, true); 4452 4453 + f2fs_up_write(&fi->i_sem); 4454 return true; 4455 } 4456
+34 -30
fs/f2fs/file.c
··· 981 struct iattr *attr) 982 { 983 struct inode *inode = d_inode(dentry); 984 int err; 985 986 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) ··· 1000 return -EOPNOTSUPP; 1001 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) && 1002 !IS_ALIGNED(attr->ia_size, 1003 - F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size))) 1004 return -EINVAL; 1005 } 1006 ··· 1054 return err; 1055 } 1056 1057 - f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1058 filemap_invalidate_lock(inode->i_mapping); 1059 1060 truncate_setsize(inode, attr->ia_size); ··· 1066 * larger than i_size. 1067 */ 1068 filemap_invalidate_unlock(inode->i_mapping); 1069 - f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1070 if (err) 1071 return err; 1072 1073 - spin_lock(&F2FS_I(inode)->i_size_lock); 1074 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 1075 - F2FS_I(inode)->last_disk_size = i_size_read(inode); 1076 - spin_unlock(&F2FS_I(inode)->i_size_lock); 1077 } 1078 1079 __setattr_copy(idmap, inode, attr); ··· 1083 1084 if (is_inode_flag_set(inode, FI_ACL_MODE)) { 1085 if (!err) 1086 - inode->i_mode = F2FS_I(inode)->i_acl_mode; 1087 clear_inode_flag(inode, FI_ACL_MODE); 1088 } 1089 } ··· 1991 if (err) 1992 return err; 1993 1994 - f2fs_down_write(&F2FS_I(inode)->i_sem); 1995 if (!f2fs_may_compress(inode) || 1996 (S_ISREG(inode->i_mode) && 1997 F2FS_HAS_BLOCKS(inode))) { 1998 - f2fs_up_write(&F2FS_I(inode)->i_sem); 1999 return -EINVAL; 2000 } 2001 err = set_compress_context(inode); 2002 - f2fs_up_write(&F2FS_I(inode)->i_sem); 2003 2004 if (err) 2005 return err; ··· 3589 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) 3590 { 3591 struct inode *inode = file_inode(filp); 3592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3593 pgoff_t page_idx = 0, last_idx; 3594 unsigned int released_blocks = 0; ··· 3627 if (ret) 3628 goto out; 3629 3630 - if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3631 ret = -EPERM; 3632 goto out; 3633 } ··· 3636 inode_set_ctime_current(inode); 3637 f2fs_mark_inode_dirty_sync(inode, true); 3638 3639 - f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3640 filemap_invalidate_lock(inode->i_mapping); 3641 3642 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3662 3663 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3664 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3665 - count = round_up(count, F2FS_I(inode)->i_cluster_size); 3666 3667 ret = release_compress_blocks(&dn, count); 3668 ··· 3678 } 3679 3680 filemap_invalidate_unlock(inode->i_mapping); 3681 - f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3682 out: 3683 if (released_blocks) 3684 f2fs_update_time(sbi, REQ_TIME); ··· 3689 if (ret >= 0) { 3690 ret = put_user(released_blocks, (u64 __user *)arg); 3691 } else if (released_blocks && 3692 - atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3693 set_sbi_flag(sbi, SBI_NEED_FSCK); 3694 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3695 "iblocks=%llu, released=%u, compr_blocks=%u, " 3696 "run fsck to fix.", 3697 __func__, inode->i_ino, inode->i_blocks, 3698 released_blocks, 3699 - atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3700 } 3701 3702 return ret; ··· 3785 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) 3786 { 3787 struct inode *inode = file_inode(filp); 3788 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3789 pgoff_t page_idx = 0, last_idx; 3790 unsigned int reserved_blocks = 0; ··· 3811 goto unlock_inode; 3812 } 3813 3814 - if (atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3815 goto unlock_inode; 3816 3817 - f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3818 filemap_invalidate_lock(inode->i_mapping); 3819 3820 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3840 3841 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3842 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3843 - count = round_up(count, F2FS_I(inode)->i_cluster_size); 3844 3845 ret = reserve_compress_blocks(&dn, count, &reserved_blocks); 3846 ··· 3855 } 3856 3857 filemap_invalidate_unlock(inode->i_mapping); 3858 - f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3859 3860 if (!ret) { 3861 clear_inode_flag(inode, FI_COMPRESS_RELEASED); ··· 3871 if (!ret) { 3872 ret = put_user(reserved_blocks, (u64 __user *)arg); 3873 } else if (reserved_blocks && 3874 - atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3875 set_sbi_flag(sbi, SBI_NEED_FSCK); 3876 f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx " 3877 "iblocks=%llu, reserved=%u, compr_blocks=%u, " 3878 "run fsck to fix.", 3879 __func__, inode->i_ino, inode->i_blocks, 3880 reserved_blocks, 3881 - atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3882 } 3883 3884 return ret; ··· 4104 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg) 4105 { 4106 struct inode *inode = file_inode(filp); 4107 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4108 struct f2fs_comp_option option; 4109 int ret = 0; ··· 4145 goto out; 4146 } 4147 4148 - F2FS_I(inode)->i_compress_algorithm = option.algorithm; 4149 - F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size; 4150 - F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size); 4151 /* Set default level */ 4152 - if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) 4153 - F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; 4154 else 4155 - F2FS_I(inode)->i_compress_level = 0; 4156 /* Adjust mount option level */ 4157 if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm && 4158 F2FS_OPTION(sbi).compress_level) 4159 - F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level; 4160 f2fs_mark_inode_dirty_sync(inode, true); 4161 4162 if (!f2fs_is_compress_backend_ready(inode)) 4163 f2fs_warn(sbi, "compression algorithm is successfully set, " 4164 "but current kernel doesn't support this algorithm."); 4165 out: 4166 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4167 inode_unlock(inode); 4168 mnt_drop_write_file(filp); 4169
··· 981 struct iattr *attr) 982 { 983 struct inode *inode = d_inode(dentry); 984 + struct f2fs_inode_info *fi = F2FS_I(inode); 985 int err; 986 987 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) ··· 999 return -EOPNOTSUPP; 1000 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) && 1001 !IS_ALIGNED(attr->ia_size, 1002 + F2FS_BLK_TO_BYTES(fi->i_cluster_size))) 1003 return -EINVAL; 1004 } 1005 ··· 1053 return err; 1054 } 1055 1056 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 1057 filemap_invalidate_lock(inode->i_mapping); 1058 1059 truncate_setsize(inode, attr->ia_size); ··· 1065 * larger than i_size. 1066 */ 1067 filemap_invalidate_unlock(inode->i_mapping); 1068 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1069 if (err) 1070 return err; 1071 1072 + spin_lock(&fi->i_size_lock); 1073 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 1074 + fi->last_disk_size = i_size_read(inode); 1075 + spin_unlock(&fi->i_size_lock); 1076 } 1077 1078 __setattr_copy(idmap, inode, attr); ··· 1082 1083 if (is_inode_flag_set(inode, FI_ACL_MODE)) { 1084 if (!err) 1085 + inode->i_mode = fi->i_acl_mode; 1086 clear_inode_flag(inode, FI_ACL_MODE); 1087 } 1088 } ··· 1990 if (err) 1991 return err; 1992 1993 + f2fs_down_write(&fi->i_sem); 1994 if (!f2fs_may_compress(inode) || 1995 (S_ISREG(inode->i_mode) && 1996 F2FS_HAS_BLOCKS(inode))) { 1997 + f2fs_up_write(&fi->i_sem); 1998 return -EINVAL; 1999 } 2000 err = set_compress_context(inode); 2001 + f2fs_up_write(&fi->i_sem); 2002 2003 if (err) 2004 return err; ··· 3588 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) 3589 { 3590 struct inode *inode = file_inode(filp); 3591 + struct f2fs_inode_info *fi = F2FS_I(inode); 3592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3593 pgoff_t page_idx = 0, last_idx; 3594 unsigned int released_blocks = 0; ··· 3625 if (ret) 3626 goto out; 3627 3628 + if (!atomic_read(&fi->i_compr_blocks)) { 3629 ret = -EPERM; 3630 goto out; 3631 } ··· 3634 inode_set_ctime_current(inode); 3635 f2fs_mark_inode_dirty_sync(inode, true); 3636 3637 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 3638 filemap_invalidate_lock(inode->i_mapping); 3639 3640 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3660 3661 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3662 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3663 + count = round_up(count, fi->i_cluster_size); 3664 3665 ret = release_compress_blocks(&dn, count); 3666 ··· 3676 } 3677 3678 filemap_invalidate_unlock(inode->i_mapping); 3679 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 3680 out: 3681 if (released_blocks) 3682 f2fs_update_time(sbi, REQ_TIME); ··· 3687 if (ret >= 0) { 3688 ret = put_user(released_blocks, (u64 __user *)arg); 3689 } else if (released_blocks && 3690 + atomic_read(&fi->i_compr_blocks)) { 3691 set_sbi_flag(sbi, SBI_NEED_FSCK); 3692 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3693 "iblocks=%llu, released=%u, compr_blocks=%u, " 3694 "run fsck to fix.", 3695 __func__, inode->i_ino, inode->i_blocks, 3696 released_blocks, 3697 + atomic_read(&fi->i_compr_blocks)); 3698 } 3699 3700 return ret; ··· 3783 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) 3784 { 3785 struct inode *inode = file_inode(filp); 3786 + struct f2fs_inode_info *fi = F2FS_I(inode); 3787 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3788 pgoff_t page_idx = 0, last_idx; 3789 unsigned int reserved_blocks = 0; ··· 3808 goto unlock_inode; 3809 } 3810 3811 + if (atomic_read(&fi->i_compr_blocks)) 3812 goto unlock_inode; 3813 3814 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 3815 filemap_invalidate_lock(inode->i_mapping); 3816 3817 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3837 3838 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3839 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3840 + count = round_up(count, fi->i_cluster_size); 3841 3842 ret = reserve_compress_blocks(&dn, count, &reserved_blocks); 3843 ··· 3852 } 3853 3854 filemap_invalidate_unlock(inode->i_mapping); 3855 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 3856 3857 if (!ret) { 3858 clear_inode_flag(inode, FI_COMPRESS_RELEASED); ··· 3868 if (!ret) { 3869 ret = put_user(reserved_blocks, (u64 __user *)arg); 3870 } else if (reserved_blocks && 3871 + atomic_read(&fi->i_compr_blocks)) { 3872 set_sbi_flag(sbi, SBI_NEED_FSCK); 3873 f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx " 3874 "iblocks=%llu, reserved=%u, compr_blocks=%u, " 3875 "run fsck to fix.", 3876 __func__, inode->i_ino, inode->i_blocks, 3877 reserved_blocks, 3878 + atomic_read(&fi->i_compr_blocks)); 3879 } 3880 3881 return ret; ··· 4101 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg) 4102 { 4103 struct inode *inode = file_inode(filp); 4104 + struct f2fs_inode_info *fi = F2FS_I(inode); 4105 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4106 struct f2fs_comp_option option; 4107 int ret = 0; ··· 4141 goto out; 4142 } 4143 4144 + fi->i_compress_algorithm = option.algorithm; 4145 + fi->i_log_cluster_size = option.log_cluster_size; 4146 + fi->i_cluster_size = BIT(option.log_cluster_size); 4147 /* Set default level */ 4148 + if (fi->i_compress_algorithm == COMPRESS_ZSTD) 4149 + fi->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; 4150 else 4151 + fi->i_compress_level = 0; 4152 /* Adjust mount option level */ 4153 if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm && 4154 F2FS_OPTION(sbi).compress_level) 4155 + fi->i_compress_level = F2FS_OPTION(sbi).compress_level; 4156 f2fs_mark_inode_dirty_sync(inode, true); 4157 4158 if (!f2fs_is_compress_backend_ready(inode)) 4159 f2fs_warn(sbi, "compression algorithm is successfully set, " 4160 "but current kernel doesn't support this algorithm."); 4161 out: 4162 + f2fs_up_write(&fi->i_sem); 4163 inode_unlock(inode); 4164 mnt_drop_write_file(filp); 4165
+23 -31
fs/f2fs/inode.c
··· 636 637 void f2fs_update_inode(struct inode *inode, struct page *node_page) 638 { 639 struct f2fs_inode *ri; 640 - struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; 641 642 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 643 set_page_dirty(node_page); ··· 648 ri = F2FS_INODE(node_page); 649 650 ri->i_mode = cpu_to_le16(inode->i_mode); 651 - ri->i_advise = F2FS_I(inode)->i_advise; 652 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 653 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 654 ri->i_links = cpu_to_le32(inode->i_nlink); ··· 674 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 675 ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 676 if (S_ISDIR(inode->i_mode)) 677 - ri->i_current_depth = 678 - cpu_to_le32(F2FS_I(inode)->i_current_depth); 679 else if (S_ISREG(inode->i_mode)) 680 - ri->i_gc_failures = cpu_to_le16(F2FS_I(inode)->i_gc_failures); 681 - ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 682 - ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 683 - ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 684 ri->i_generation = cpu_to_le32(inode->i_generation); 685 - ri->i_dir_level = F2FS_I(inode)->i_dir_level; 686 687 if (f2fs_has_extra_attr(inode)) { 688 - ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 689 690 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 691 ri->i_inline_xattr_size = 692 - cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 693 694 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 695 - F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 696 - i_projid)) { 697 projid_t i_projid; 698 699 - i_projid = from_kprojid(&init_user_ns, 700 - F2FS_I(inode)->i_projid); 701 ri->i_projid = cpu_to_le32(i_projid); 702 } 703 704 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 705 - F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 706 - i_crtime)) { 707 - ri->i_crtime = 708 - cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 709 - ri->i_crtime_nsec = 710 - cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 711 } 712 713 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) && 714 - F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 715 i_compress_flag)) { 716 unsigned short compress_flag; 717 718 - ri->i_compr_blocks = 719 - cpu_to_le64(atomic_read( 720 - &F2FS_I(inode)->i_compr_blocks)); 721 - ri->i_compress_algorithm = 722 - F2FS_I(inode)->i_compress_algorithm; 723 - compress_flag = F2FS_I(inode)->i_compress_flag | 724 - F2FS_I(inode)->i_compress_level << 725 COMPRESS_LEVEL_OFFSET; 726 ri->i_compress_flag = cpu_to_le16(compress_flag); 727 - ri->i_log_cluster_size = 728 - F2FS_I(inode)->i_log_cluster_size; 729 } 730 } 731
··· 636 637 void f2fs_update_inode(struct inode *inode, struct page *node_page) 638 { 639 + struct f2fs_inode_info *fi = F2FS_I(inode); 640 struct f2fs_inode *ri; 641 + struct extent_tree *et = fi->extent_tree[EX_READ]; 642 643 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 644 set_page_dirty(node_page); ··· 647 ri = F2FS_INODE(node_page); 648 649 ri->i_mode = cpu_to_le16(inode->i_mode); 650 + ri->i_advise = fi->i_advise; 651 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 652 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 653 ri->i_links = cpu_to_le32(inode->i_nlink); ··· 673 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 674 ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 675 if (S_ISDIR(inode->i_mode)) 676 + ri->i_current_depth = cpu_to_le32(fi->i_current_depth); 677 else if (S_ISREG(inode->i_mode)) 678 + ri->i_gc_failures = cpu_to_le16(fi->i_gc_failures); 679 + ri->i_xattr_nid = cpu_to_le32(fi->i_xattr_nid); 680 + ri->i_flags = cpu_to_le32(fi->i_flags); 681 + ri->i_pino = cpu_to_le32(fi->i_pino); 682 ri->i_generation = cpu_to_le32(inode->i_generation); 683 + ri->i_dir_level = fi->i_dir_level; 684 685 if (f2fs_has_extra_attr(inode)) { 686 + ri->i_extra_isize = cpu_to_le16(fi->i_extra_isize); 687 688 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 689 ri->i_inline_xattr_size = 690 + cpu_to_le16(fi->i_inline_xattr_size); 691 692 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 693 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) { 694 projid_t i_projid; 695 696 + i_projid = from_kprojid(&init_user_ns, fi->i_projid); 697 ri->i_projid = cpu_to_le32(i_projid); 698 } 699 700 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 701 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 702 + ri->i_crtime = cpu_to_le64(fi->i_crtime.tv_sec); 703 + ri->i_crtime_nsec = cpu_to_le32(fi->i_crtime.tv_nsec); 704 } 705 706 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) && 707 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 708 i_compress_flag)) { 709 unsigned short compress_flag; 710 711 + ri->i_compr_blocks = cpu_to_le64( 712 + atomic_read(&fi->i_compr_blocks)); 713 + ri->i_compress_algorithm = fi->i_compress_algorithm; 714 + compress_flag = fi->i_compress_flag | 715 + fi->i_compress_level << 716 COMPRESS_LEVEL_OFFSET; 717 ri->i_compress_flag = cpu_to_le16(compress_flag); 718 + ri->i_log_cluster_size = fi->i_log_cluster_size; 719 } 720 } 721
+11 -9
fs/f2fs/namei.c
··· 221 const char *name) 222 { 223 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 224 nid_t ino; 225 struct inode *inode; 226 bool nid_free = false; ··· 242 243 inode_init_owner(idmap, inode, dir, mode); 244 245 inode->i_ino = ino; 246 inode->i_blocks = 0; 247 simple_inode_init_ts(inode); 248 - F2FS_I(inode)->i_crtime = inode_get_mtime(inode); 249 inode->i_generation = get_random_u32(); 250 251 if (S_ISDIR(inode->i_mode)) 252 - F2FS_I(inode)->i_current_depth = 1; 253 254 err = insert_inode_locked(inode); 255 if (err) { ··· 260 261 if (f2fs_sb_has_project_quota(sbi) && 262 (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL)) 263 - F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid; 264 else 265 - F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns, 266 F2FS_DEF_PROJID); 267 268 err = fscrypt_prepare_new_inode(dir, inode, &encrypt); ··· 280 281 if (f2fs_sb_has_extra_attr(sbi)) { 282 set_inode_flag(inode, FI_EXTRA_ATTR); 283 - F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE; 284 } 285 286 if (test_opt(sbi, INLINE_XATTR)) ··· 298 f2fs_has_inline_dentry(inode)) { 299 xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 300 } 301 - F2FS_I(inode)->i_inline_xattr_size = xattr_size; 302 303 - F2FS_I(inode)->i_flags = 304 f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); 305 306 if (S_ISDIR(inode->i_mode)) 307 - F2FS_I(inode)->i_flags |= F2FS_INDEX_FL; 308 309 - if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) 310 set_inode_flag(inode, FI_PROJ_INHERIT); 311 312 /* Check compression first. */
··· 221 const char *name) 222 { 223 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 224 + struct f2fs_inode_info *fi; 225 nid_t ino; 226 struct inode *inode; 227 bool nid_free = false; ··· 241 242 inode_init_owner(idmap, inode, dir, mode); 243 244 + fi = F2FS_I(inode); 245 inode->i_ino = ino; 246 inode->i_blocks = 0; 247 simple_inode_init_ts(inode); 248 + fi->i_crtime = inode_get_mtime(inode); 249 inode->i_generation = get_random_u32(); 250 251 if (S_ISDIR(inode->i_mode)) 252 + fi->i_current_depth = 1; 253 254 err = insert_inode_locked(inode); 255 if (err) { ··· 258 259 if (f2fs_sb_has_project_quota(sbi) && 260 (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL)) 261 + fi->i_projid = F2FS_I(dir)->i_projid; 262 else 263 + fi->i_projid = make_kprojid(&init_user_ns, 264 F2FS_DEF_PROJID); 265 266 err = fscrypt_prepare_new_inode(dir, inode, &encrypt); ··· 278 279 if (f2fs_sb_has_extra_attr(sbi)) { 280 set_inode_flag(inode, FI_EXTRA_ATTR); 281 + fi->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE; 282 } 283 284 if (test_opt(sbi, INLINE_XATTR)) ··· 296 f2fs_has_inline_dentry(inode)) { 297 xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 298 } 299 + fi->i_inline_xattr_size = xattr_size; 300 301 + fi->i_flags = 302 f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); 303 304 if (S_ISDIR(inode->i_mode)) 305 + fi->i_flags |= F2FS_INDEX_FL; 306 307 + if (fi->i_flags & F2FS_PROJINHERIT_FL) 308 set_inode_flag(inode, FI_PROJ_INHERIT); 309 310 /* Check compression first. */
+6 -5
fs/f2fs/recovery.c
··· 287 static int recover_inode(struct inode *inode, struct page *page) 288 { 289 struct f2fs_inode *raw = F2FS_INODE(page); 290 char *name; 291 int err; 292 ··· 310 i_projid = (projid_t)le32_to_cpu(raw->i_projid); 311 kprojid = make_kprojid(&init_user_ns, i_projid); 312 313 - if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) { 314 err = f2fs_transfer_project_quota(inode, 315 kprojid); 316 if (err) 317 return err; 318 - F2FS_I(inode)->i_projid = kprojid; 319 } 320 } 321 } ··· 328 inode_set_mtime(inode, le64_to_cpu(raw->i_mtime), 329 le32_to_cpu(raw->i_mtime_nsec)); 330 331 - F2FS_I(inode)->i_advise = raw->i_advise; 332 - F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags); 333 f2fs_set_inode_flags(inode); 334 - F2FS_I(inode)->i_gc_failures = le16_to_cpu(raw->i_gc_failures); 335 336 recover_inline_flags(inode, raw); 337
··· 287 static int recover_inode(struct inode *inode, struct page *page) 288 { 289 struct f2fs_inode *raw = F2FS_INODE(page); 290 + struct f2fs_inode_info *fi = F2FS_I(inode); 291 char *name; 292 int err; 293 ··· 309 i_projid = (projid_t)le32_to_cpu(raw->i_projid); 310 kprojid = make_kprojid(&init_user_ns, i_projid); 311 312 + if (!projid_eq(kprojid, fi->i_projid)) { 313 err = f2fs_transfer_project_quota(inode, 314 kprojid); 315 if (err) 316 return err; 317 + fi->i_projid = kprojid; 318 } 319 } 320 } ··· 327 inode_set_mtime(inode, le64_to_cpu(raw->i_mtime), 328 le32_to_cpu(raw->i_mtime_nsec)); 329 330 + fi->i_advise = raw->i_advise; 331 + fi->i_flags = le32_to_cpu(raw->i_flags); 332 f2fs_set_inode_flags(inode); 333 + fi->i_gc_failures = le16_to_cpu(raw->i_gc_failures); 334 335 recover_inline_flags(inode, raw); 336