f2fs: clean up F2FS_I()

Use temporary variable instead of F2FS_I() for cleanup.

Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

authored by Chao Yu and committed by Jaegeuk Kim 7309871c f18d0076

+88 -93
+14 -18
fs/f2fs/f2fs.h
··· 4403 4403 { 4404 4404 #ifdef CONFIG_F2FS_FS_COMPRESSION 4405 4405 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4406 + struct f2fs_inode_info *fi = F2FS_I(inode); 4406 4407 4407 - F2FS_I(inode)->i_compress_algorithm = 4408 - F2FS_OPTION(sbi).compress_algorithm; 4409 - F2FS_I(inode)->i_log_cluster_size = 4410 - F2FS_OPTION(sbi).compress_log_size; 4411 - F2FS_I(inode)->i_compress_flag = 4412 - F2FS_OPTION(sbi).compress_chksum ? 4413 - BIT(COMPRESS_CHKSUM) : 0; 4414 - F2FS_I(inode)->i_cluster_size = 4415 - BIT(F2FS_I(inode)->i_log_cluster_size); 4416 - if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || 4417 - F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && 4408 + fi->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm; 4409 + fi->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size; 4410 + fi->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ? 4411 + BIT(COMPRESS_CHKSUM) : 0; 4412 + fi->i_cluster_size = BIT(fi->i_log_cluster_size); 4413 + if ((fi->i_compress_algorithm == COMPRESS_LZ4 || 4414 + fi->i_compress_algorithm == COMPRESS_ZSTD) && 4418 4415 F2FS_OPTION(sbi).compress_level) 4419 - F2FS_I(inode)->i_compress_level = 4420 - F2FS_OPTION(sbi).compress_level; 4421 - F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; 4416 + fi->i_compress_level = F2FS_OPTION(sbi).compress_level; 4417 + fi->i_flags |= F2FS_COMPR_FL; 4422 4418 set_inode_flag(inode, FI_COMPRESSED_FILE); 4423 4419 stat_inc_compr_inode(inode); 4424 4420 inc_compr_inode_stat(inode); ··· 4429 4433 { 4430 4434 struct f2fs_inode_info *fi = F2FS_I(inode); 4431 4435 4432 - f2fs_down_write(&F2FS_I(inode)->i_sem); 4436 + f2fs_down_write(&fi->i_sem); 4433 4437 4434 4438 if (!f2fs_compressed_file(inode)) { 4435 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4439 + f2fs_up_write(&fi->i_sem); 4436 4440 return true; 4437 4441 } 4438 4442 if (f2fs_is_mmap_file(inode) || 4439 4443 (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { 4440 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4444 + f2fs_up_write(&fi->i_sem); 4441 4445 return false; 4442 4446 } 4443 4447 ··· 4446 4450 clear_inode_flag(inode, FI_COMPRESSED_FILE); 4447 4451 f2fs_mark_inode_dirty_sync(inode, true); 4448 4452 4449 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4453 + f2fs_up_write(&fi->i_sem); 4450 4454 return true; 4451 4455 } 4452 4456
+34 -30
fs/f2fs/file.c
··· 981 981 struct iattr *attr) 982 982 { 983 983 struct inode *inode = d_inode(dentry); 984 + struct f2fs_inode_info *fi = F2FS_I(inode); 984 985 int err; 985 986 986 987 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) ··· 1000 999 return -EOPNOTSUPP; 1001 1000 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) && 1002 1001 !IS_ALIGNED(attr->ia_size, 1003 - F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size))) 1002 + F2FS_BLK_TO_BYTES(fi->i_cluster_size))) 1004 1003 return -EINVAL; 1005 1004 } 1006 1005 ··· 1054 1053 return err; 1055 1054 } 1056 1055 1057 - f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1056 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 1058 1057 filemap_invalidate_lock(inode->i_mapping); 1059 1058 1060 1059 truncate_setsize(inode, attr->ia_size); ··· 1066 1065 * larger than i_size. 1067 1066 */ 1068 1067 filemap_invalidate_unlock(inode->i_mapping); 1069 - f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1068 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 1070 1069 if (err) 1071 1070 return err; 1072 1071 1073 - spin_lock(&F2FS_I(inode)->i_size_lock); 1072 + spin_lock(&fi->i_size_lock); 1074 1073 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 1075 - F2FS_I(inode)->last_disk_size = i_size_read(inode); 1076 - spin_unlock(&F2FS_I(inode)->i_size_lock); 1074 + fi->last_disk_size = i_size_read(inode); 1075 + spin_unlock(&fi->i_size_lock); 1077 1076 } 1078 1077 1079 1078 __setattr_copy(idmap, inode, attr); ··· 1083 1082 1084 1083 if (is_inode_flag_set(inode, FI_ACL_MODE)) { 1085 1084 if (!err) 1086 - inode->i_mode = F2FS_I(inode)->i_acl_mode; 1085 + inode->i_mode = fi->i_acl_mode; 1087 1086 clear_inode_flag(inode, FI_ACL_MODE); 1088 1087 } 1089 1088 } ··· 1991 1990 if (err) 1992 1991 return err; 1993 1992 1994 - f2fs_down_write(&F2FS_I(inode)->i_sem); 1993 + f2fs_down_write(&fi->i_sem); 1995 1994 if (!f2fs_may_compress(inode) || 1996 1995 (S_ISREG(inode->i_mode) && 1997 1996 F2FS_HAS_BLOCKS(inode))) { 1998 - f2fs_up_write(&F2FS_I(inode)->i_sem); 1997 + f2fs_up_write(&fi->i_sem); 1999 1998 return -EINVAL; 2000 1999 } 2001 2000 err = set_compress_context(inode); 2002 - f2fs_up_write(&F2FS_I(inode)->i_sem); 2001 + f2fs_up_write(&fi->i_sem); 2003 2002 2004 2003 if (err) 2005 2004 return err; ··· 3589 3588 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) 3590 3589 { 3591 3590 struct inode *inode = file_inode(filp); 3591 + struct f2fs_inode_info *fi = F2FS_I(inode); 3592 3592 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3593 3593 pgoff_t page_idx = 0, last_idx; 3594 3594 unsigned int released_blocks = 0; ··· 3627 3625 if (ret) 3628 3626 goto out; 3629 3627 3630 - if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3628 + if (!atomic_read(&fi->i_compr_blocks)) { 3631 3629 ret = -EPERM; 3632 3630 goto out; 3633 3631 } ··· 3636 3634 inode_set_ctime_current(inode); 3637 3635 f2fs_mark_inode_dirty_sync(inode, true); 3638 3636 3639 - f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3637 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 3640 3638 filemap_invalidate_lock(inode->i_mapping); 3641 3639 3642 3640 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3662 3660 3663 3661 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3664 3662 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3665 - count = round_up(count, F2FS_I(inode)->i_cluster_size); 3663 + count = round_up(count, fi->i_cluster_size); 3666 3664 3667 3665 ret = release_compress_blocks(&dn, count); 3668 3666 ··· 3678 3676 } 3679 3677 3680 3678 filemap_invalidate_unlock(inode->i_mapping); 3681 - f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3679 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 3682 3680 out: 3683 3681 if (released_blocks) 3684 3682 f2fs_update_time(sbi, REQ_TIME); ··· 3689 3687 if (ret >= 0) { 3690 3688 ret = put_user(released_blocks, (u64 __user *)arg); 3691 3689 } else if (released_blocks && 3692 - atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3690 + atomic_read(&fi->i_compr_blocks)) { 3693 3691 set_sbi_flag(sbi, SBI_NEED_FSCK); 3694 3692 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " 3695 3693 "iblocks=%llu, released=%u, compr_blocks=%u, " 3696 3694 "run fsck to fix.", 3697 3695 __func__, inode->i_ino, inode->i_blocks, 3698 3696 released_blocks, 3699 - atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3697 + atomic_read(&fi->i_compr_blocks)); 3700 3698 } 3701 3699 3702 3700 return ret; ··· 3785 3783 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) 3786 3784 { 3787 3785 struct inode *inode = file_inode(filp); 3786 + struct f2fs_inode_info *fi = F2FS_I(inode); 3788 3787 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3789 3788 pgoff_t page_idx = 0, last_idx; 3790 3789 unsigned int reserved_blocks = 0; ··· 3811 3808 goto unlock_inode; 3812 3809 } 3813 3810 3814 - if (atomic_read(&F2FS_I(inode)->i_compr_blocks)) 3811 + if (atomic_read(&fi->i_compr_blocks)) 3815 3812 goto unlock_inode; 3816 3813 3817 - f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3814 + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); 3818 3815 filemap_invalidate_lock(inode->i_mapping); 3819 3816 3820 3817 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); ··· 3840 3837 3841 3838 end_offset = ADDRS_PER_PAGE(dn.node_page, inode); 3842 3839 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); 3843 - count = round_up(count, F2FS_I(inode)->i_cluster_size); 3840 + count = round_up(count, fi->i_cluster_size); 3844 3841 3845 3842 ret = reserve_compress_blocks(&dn, count, &reserved_blocks); 3846 3843 ··· 3855 3852 } 3856 3853 3857 3854 filemap_invalidate_unlock(inode->i_mapping); 3858 - f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 3855 + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); 3859 3856 3860 3857 if (!ret) { 3861 3858 clear_inode_flag(inode, FI_COMPRESS_RELEASED); ··· 3871 3868 if (!ret) { 3872 3869 ret = put_user(reserved_blocks, (u64 __user *)arg); 3873 3870 } else if (reserved_blocks && 3874 - atomic_read(&F2FS_I(inode)->i_compr_blocks)) { 3871 + atomic_read(&fi->i_compr_blocks)) { 3875 3872 set_sbi_flag(sbi, SBI_NEED_FSCK); 3876 3873 f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx " 3877 3874 "iblocks=%llu, reserved=%u, compr_blocks=%u, " 3878 3875 "run fsck to fix.", 3879 3876 __func__, inode->i_ino, inode->i_blocks, 3880 3877 reserved_blocks, 3881 - atomic_read(&F2FS_I(inode)->i_compr_blocks)); 3878 + atomic_read(&fi->i_compr_blocks)); 3882 3879 } 3883 3880 3884 3881 return ret; ··· 4104 4101 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg) 4105 4102 { 4106 4103 struct inode *inode = file_inode(filp); 4104 + struct f2fs_inode_info *fi = F2FS_I(inode); 4107 4105 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 4108 4106 struct f2fs_comp_option option; 4109 4107 int ret = 0; ··· 4145 4141 goto out; 4146 4142 } 4147 4143 4148 - F2FS_I(inode)->i_compress_algorithm = option.algorithm; 4149 - F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size; 4150 - F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size); 4144 + fi->i_compress_algorithm = option.algorithm; 4145 + fi->i_log_cluster_size = option.log_cluster_size; 4146 + fi->i_cluster_size = BIT(option.log_cluster_size); 4151 4147 /* Set default level */ 4152 - if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) 4153 - F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; 4148 + if (fi->i_compress_algorithm == COMPRESS_ZSTD) 4149 + fi->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; 4154 4150 else 4155 - F2FS_I(inode)->i_compress_level = 0; 4151 + fi->i_compress_level = 0; 4156 4152 /* Adjust mount option level */ 4157 4153 if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm && 4158 4154 F2FS_OPTION(sbi).compress_level) 4159 - F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level; 4155 + fi->i_compress_level = F2FS_OPTION(sbi).compress_level; 4160 4156 f2fs_mark_inode_dirty_sync(inode, true); 4161 4157 4162 4158 if (!f2fs_is_compress_backend_ready(inode)) 4163 4159 f2fs_warn(sbi, "compression algorithm is successfully set, " 4164 4160 "but current kernel doesn't support this algorithm."); 4165 4161 out: 4166 - f2fs_up_write(&F2FS_I(inode)->i_sem); 4162 + f2fs_up_write(&fi->i_sem); 4167 4163 inode_unlock(inode); 4168 4164 mnt_drop_write_file(filp); 4169 4165
+23 -31
fs/f2fs/inode.c
··· 636 636 637 637 void f2fs_update_inode(struct inode *inode, struct page *node_page) 638 638 { 639 + struct f2fs_inode_info *fi = F2FS_I(inode); 639 640 struct f2fs_inode *ri; 640 - struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; 641 + struct extent_tree *et = fi->extent_tree[EX_READ]; 641 642 642 643 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 643 644 set_page_dirty(node_page); ··· 648 647 ri = F2FS_INODE(node_page); 649 648 650 649 ri->i_mode = cpu_to_le16(inode->i_mode); 651 - ri->i_advise = F2FS_I(inode)->i_advise; 650 + ri->i_advise = fi->i_advise; 652 651 ri->i_uid = cpu_to_le32(i_uid_read(inode)); 653 652 ri->i_gid = cpu_to_le32(i_gid_read(inode)); 654 653 ri->i_links = cpu_to_le32(inode->i_nlink); ··· 674 673 ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); 675 674 ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); 676 675 if (S_ISDIR(inode->i_mode)) 677 - ri->i_current_depth = 678 - cpu_to_le32(F2FS_I(inode)->i_current_depth); 676 + ri->i_current_depth = cpu_to_le32(fi->i_current_depth); 679 677 else if (S_ISREG(inode->i_mode)) 680 - ri->i_gc_failures = cpu_to_le16(F2FS_I(inode)->i_gc_failures); 681 - ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid); 682 - ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags); 683 - ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino); 678 + ri->i_gc_failures = cpu_to_le16(fi->i_gc_failures); 679 + ri->i_xattr_nid = cpu_to_le32(fi->i_xattr_nid); 680 + ri->i_flags = cpu_to_le32(fi->i_flags); 681 + ri->i_pino = cpu_to_le32(fi->i_pino); 684 682 ri->i_generation = cpu_to_le32(inode->i_generation); 685 - ri->i_dir_level = F2FS_I(inode)->i_dir_level; 683 + ri->i_dir_level = fi->i_dir_level; 686 684 687 685 if (f2fs_has_extra_attr(inode)) { 688 - ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize); 686 + ri->i_extra_isize = cpu_to_le16(fi->i_extra_isize); 689 687 690 688 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode))) 691 689 ri->i_inline_xattr_size = 692 - cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size); 690 + cpu_to_le16(fi->i_inline_xattr_size); 693 691 694 692 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) && 695 - F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 696 - i_projid)) { 693 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) { 697 694 projid_t i_projid; 698 695 699 - i_projid = from_kprojid(&init_user_ns, 700 - F2FS_I(inode)->i_projid); 696 + i_projid = from_kprojid(&init_user_ns, fi->i_projid); 701 697 ri->i_projid = cpu_to_le32(i_projid); 702 698 } 703 699 704 700 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && 705 - F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 706 - i_crtime)) { 707 - ri->i_crtime = 708 - cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec); 709 - ri->i_crtime_nsec = 710 - cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec); 701 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { 702 + ri->i_crtime = cpu_to_le64(fi->i_crtime.tv_sec); 703 + ri->i_crtime_nsec = cpu_to_le32(fi->i_crtime.tv_nsec); 711 704 } 712 705 713 706 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) && 714 - F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize, 707 + F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, 715 708 i_compress_flag)) { 716 709 unsigned short compress_flag; 717 710 718 - ri->i_compr_blocks = 719 - cpu_to_le64(atomic_read( 720 - &F2FS_I(inode)->i_compr_blocks)); 721 - ri->i_compress_algorithm = 722 - F2FS_I(inode)->i_compress_algorithm; 723 - compress_flag = F2FS_I(inode)->i_compress_flag | 724 - F2FS_I(inode)->i_compress_level << 711 + ri->i_compr_blocks = cpu_to_le64( 712 + atomic_read(&fi->i_compr_blocks)); 713 + ri->i_compress_algorithm = fi->i_compress_algorithm; 714 + compress_flag = fi->i_compress_flag | 715 + fi->i_compress_level << 725 716 COMPRESS_LEVEL_OFFSET; 726 717 ri->i_compress_flag = cpu_to_le16(compress_flag); 727 - ri->i_log_cluster_size = 728 - F2FS_I(inode)->i_log_cluster_size; 718 + ri->i_log_cluster_size = fi->i_log_cluster_size; 729 719 } 730 720 } 731 721
+11 -9
fs/f2fs/namei.c
··· 221 221 const char *name) 222 222 { 223 223 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 224 + struct f2fs_inode_info *fi; 224 225 nid_t ino; 225 226 struct inode *inode; 226 227 bool nid_free = false; ··· 242 241 243 242 inode_init_owner(idmap, inode, dir, mode); 244 243 244 + fi = F2FS_I(inode); 245 245 inode->i_ino = ino; 246 246 inode->i_blocks = 0; 247 247 simple_inode_init_ts(inode); 248 - F2FS_I(inode)->i_crtime = inode_get_mtime(inode); 248 + fi->i_crtime = inode_get_mtime(inode); 249 249 inode->i_generation = get_random_u32(); 250 250 251 251 if (S_ISDIR(inode->i_mode)) 252 - F2FS_I(inode)->i_current_depth = 1; 252 + fi->i_current_depth = 1; 253 253 254 254 err = insert_inode_locked(inode); 255 255 if (err) { ··· 260 258 261 259 if (f2fs_sb_has_project_quota(sbi) && 262 260 (F2FS_I(dir)->i_flags & F2FS_PROJINHERIT_FL)) 263 - F2FS_I(inode)->i_projid = F2FS_I(dir)->i_projid; 261 + fi->i_projid = F2FS_I(dir)->i_projid; 264 262 else 265 - F2FS_I(inode)->i_projid = make_kprojid(&init_user_ns, 263 + fi->i_projid = make_kprojid(&init_user_ns, 266 264 F2FS_DEF_PROJID); 267 265 268 266 err = fscrypt_prepare_new_inode(dir, inode, &encrypt); ··· 280 278 281 279 if (f2fs_sb_has_extra_attr(sbi)) { 282 280 set_inode_flag(inode, FI_EXTRA_ATTR); 283 - F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE; 281 + fi->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE; 284 282 } 285 283 286 284 if (test_opt(sbi, INLINE_XATTR)) ··· 298 296 f2fs_has_inline_dentry(inode)) { 299 297 xattr_size = DEFAULT_INLINE_XATTR_ADDRS; 300 298 } 301 - F2FS_I(inode)->i_inline_xattr_size = xattr_size; 299 + fi->i_inline_xattr_size = xattr_size; 302 300 303 - F2FS_I(inode)->i_flags = 301 + fi->i_flags = 304 302 f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED); 305 303 306 304 if (S_ISDIR(inode->i_mode)) 307 - F2FS_I(inode)->i_flags |= F2FS_INDEX_FL; 305 + fi->i_flags |= F2FS_INDEX_FL; 308 306 309 - if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) 307 + if (fi->i_flags & F2FS_PROJINHERIT_FL) 310 308 set_inode_flag(inode, FI_PROJ_INHERIT); 311 309 312 310 /* Check compression first. */
+6 -5
fs/f2fs/recovery.c
··· 287 287 static int recover_inode(struct inode *inode, struct page *page) 288 288 { 289 289 struct f2fs_inode *raw = F2FS_INODE(page); 290 + struct f2fs_inode_info *fi = F2FS_I(inode); 290 291 char *name; 291 292 int err; 292 293 ··· 310 309 i_projid = (projid_t)le32_to_cpu(raw->i_projid); 311 310 kprojid = make_kprojid(&init_user_ns, i_projid); 312 311 313 - if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) { 312 + if (!projid_eq(kprojid, fi->i_projid)) { 314 313 err = f2fs_transfer_project_quota(inode, 315 314 kprojid); 316 315 if (err) 317 316 return err; 318 - F2FS_I(inode)->i_projid = kprojid; 317 + fi->i_projid = kprojid; 319 318 } 320 319 } 321 320 } ··· 328 327 inode_set_mtime(inode, le64_to_cpu(raw->i_mtime), 329 328 le32_to_cpu(raw->i_mtime_nsec)); 330 329 331 - F2FS_I(inode)->i_advise = raw->i_advise; 332 - F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags); 330 + fi->i_advise = raw->i_advise; 331 + fi->i_flags = le32_to_cpu(raw->i_flags); 333 332 f2fs_set_inode_flags(inode); 334 - F2FS_I(inode)->i_gc_failures = le16_to_cpu(raw->i_gc_failures); 333 + fi->i_gc_failures = le16_to_cpu(raw->i_gc_failures); 335 334 336 335 recover_inline_flags(inode, raw); 337 336