Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

f2fs: introduce f2fs_<level> macros to wrap f2fs_printk()

- Add and use f2fs_<level> macros
- Convert f2fs_msg to f2fs_printk
- Remove level from f2fs_printk and embed the level in the format
- Coalesce formats and align multi-line arguments
- Remove unnecessary duplicate extern f2fs_msg f2fs.h

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

authored by

Joe Perches and committed by
Jaegeuk Kim
dcbb4c10 8740edc3

+350 -506
+14 -20
fs/f2fs/checkpoint.c
··· 146 146 147 147 exist = f2fs_test_bit(offset, se->cur_valid_map); 148 148 if (!exist && type == DATA_GENERIC_ENHANCE) { 149 - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " 150 - "blkaddr:%u, sit bitmap:%d", blkaddr, exist); 149 + f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", 150 + blkaddr, exist); 151 151 set_sbi_flag(sbi, SBI_NEED_FSCK); 152 152 WARN_ON(1); 153 153 } ··· 184 184 case DATA_GENERIC_ENHANCE_READ: 185 185 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || 186 186 blkaddr < MAIN_BLKADDR(sbi))) { 187 - f2fs_msg(sbi->sb, KERN_WARNING, 188 - "access invalid blkaddr:%u", blkaddr); 187 + f2fs_warn(sbi, "access invalid blkaddr:%u", 188 + blkaddr); 189 189 set_sbi_flag(sbi, SBI_NEED_FSCK); 190 190 WARN_ON(1); 191 191 return false; ··· 657 657 658 658 err_out: 659 659 set_sbi_flag(sbi, SBI_NEED_FSCK); 660 - f2fs_msg(sbi->sb, KERN_WARNING, 661 - "%s: orphan failed (ino=%x), run fsck to fix.", 662 - __func__, ino); 660 + f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.", 661 + __func__, ino); 663 662 return err; 664 663 } 665 664 ··· 675 676 return 0; 676 677 677 678 if (bdev_read_only(sbi->sb->s_bdev)) { 678 - f2fs_msg(sbi->sb, KERN_INFO, "write access " 679 - "unavailable, skipping orphan cleanup"); 679 + f2fs_info(sbi, "write access unavailable, skipping orphan cleanup"); 680 680 return 0; 681 681 } 682 682 683 683 if (s_flags & SB_RDONLY) { 684 - f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs"); 684 + f2fs_info(sbi, "orphan cleanup on readonly fs"); 685 685 sbi->sb->s_flags &= ~SB_RDONLY; 686 686 } 687 687 ··· 825 827 if (crc_offset < CP_MIN_CHKSUM_OFFSET || 826 828 crc_offset > CP_CHKSUM_OFFSET) { 827 829 f2fs_put_page(*cp_page, 1); 828 - f2fs_msg(sbi->sb, KERN_WARNING, 829 - "invalid crc_offset: %zu", crc_offset); 830 + f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset); 830 831 return -EINVAL; 831 832 } 832 833 833 834 crc = f2fs_checkpoint_chksum(sbi, *cp_block); 834 835 if (crc != cur_cp_crc(*cp_block)) { 835 836 f2fs_put_page(*cp_page, 1); 836 - f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value"); 837 + f2fs_warn(sbi, "invalid crc value"); 837 838 return -EINVAL; 838 839 } 839 840 ··· 855 858 856 859 if (le32_to_cpu(cp_block->cp_pack_total_block_count) > 857 860 sbi->blocks_per_seg) { 858 - f2fs_msg(sbi->sb, KERN_WARNING, 859 - "invalid cp_pack_total_block_count:%u", 860 - le32_to_cpu(cp_block->cp_pack_total_block_count)); 861 + f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", 862 + le32_to_cpu(cp_block->cp_pack_total_block_count)); 861 863 goto invalid_cp; 862 864 } 863 865 pre_version = *version; ··· 1555 1559 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 1556 1560 if (cpc->reason != CP_PAUSE) 1557 1561 return 0; 1558 - f2fs_msg(sbi->sb, KERN_WARNING, 1559 - "Start checkpoint disabled!"); 1562 + f2fs_warn(sbi, "Start checkpoint disabled!"); 1560 1563 } 1561 1564 mutex_lock(&sbi->cp_mutex); 1562 1565 ··· 1621 1626 stat_inc_cp_count(sbi->stat_info); 1622 1627 1623 1628 if (cpc->reason & CP_RECOVERY) 1624 - f2fs_msg(sbi->sb, KERN_NOTICE, 1625 - "checkpoint: version = %llx", ckpt_ver); 1629 + f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); 1626 1630 1627 1631 /* do checkpoint periodically */ 1628 1632 f2fs_update_time(sbi, CP_TIME);
+4 -6
fs/f2fs/dir.c
··· 218 218 219 219 max_depth = F2FS_I(dir)->i_current_depth; 220 220 if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) { 221 - f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING, 222 - "Corrupted max_depth of %lu: %u", 223 - dir->i_ino, max_depth); 221 + f2fs_warn(F2FS_I_SB(dir), "Corrupted max_depth of %lu: %u", 222 + dir->i_ino, max_depth); 224 223 max_depth = MAX_DIR_HASH_DEPTH; 225 224 f2fs_i_depth_write(dir, max_depth); 226 225 } ··· 815 816 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); 816 817 if (unlikely(bit_pos > d->max || 817 818 le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) { 818 - f2fs_msg(sbi->sb, KERN_WARNING, 819 - "%s: corrupted namelen=%d, run fsck to fix.", 820 - __func__, le16_to_cpu(de->name_len)); 819 + f2fs_warn(sbi, "%s: corrupted namelen=%d, run fsck to fix.", 820 + __func__, le16_to_cpu(de->name_len)); 821 821 set_sbi_flag(sbi, SBI_NEED_FSCK); 822 822 err = -EINVAL; 823 823 goto out;
+3 -4
fs/f2fs/extent_cache.c
··· 184 184 next_re = rb_entry(next, struct rb_entry, rb_node); 185 185 186 186 if (cur_re->ofs + cur_re->len > next_re->ofs) { 187 - f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, " 188 - "cur(%u, %u) next(%u, %u)", 189 - cur_re->ofs, cur_re->len, 190 - next_re->ofs, next_re->len); 187 + f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)", 188 + cur_re->ofs, cur_re->len, 189 + next_re->ofs, next_re->len); 191 190 return false; 192 191 } 193 192
+23 -15
fs/f2fs/f2fs.h
··· 1808 1808 return -ENOSPC; 1809 1809 } 1810 1810 1811 - void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); 1811 + __printf(2, 3) 1812 + void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); 1813 + 1814 + #define f2fs_err(sbi, fmt, ...) \ 1815 + f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) 1816 + #define f2fs_warn(sbi, fmt, ...) \ 1817 + f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) 1818 + #define f2fs_notice(sbi, fmt, ...) \ 1819 + f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) 1820 + #define f2fs_info(sbi, fmt, ...) \ 1821 + f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) 1822 + #define f2fs_debug(sbi, fmt, ...) \ 1823 + f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) 1824 + 1812 1825 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 1813 1826 struct inode *inode, 1814 1827 block_t count) ··· 1837 1824 sbi->current_reserved_blocks + count); 1838 1825 spin_unlock(&sbi->stat_lock); 1839 1826 if (unlikely(inode->i_blocks < sectors)) { 1840 - f2fs_msg(sbi->sb, KERN_WARNING, 1841 - "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 1842 - inode->i_ino, 1843 - (unsigned long long)inode->i_blocks, 1844 - (unsigned long long)sectors); 1827 + f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 1828 + inode->i_ino, 1829 + (unsigned long long)inode->i_blocks, 1830 + (unsigned long long)sectors); 1845 1831 set_sbi_flag(sbi, SBI_NEED_FSCK); 1846 1832 return; 1847 1833 } ··· 2078 2066 dquot_free_inode(inode); 2079 2067 } else { 2080 2068 if (unlikely(inode->i_blocks == 0)) { 2081 - f2fs_msg(sbi->sb, KERN_WARNING, 2082 - "Inconsistent i_blocks, ino:%lu, iblocks:%llu", 2083 - inode->i_ino, 2084 - (unsigned long long)inode->i_blocks); 2069 + f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu", 2070 + inode->i_ino, 2071 + (unsigned long long)inode->i_blocks); 2085 2072 set_sbi_flag(sbi, SBI_NEED_FSCK); 2086 2073 return; 2087 2074 } ··· 2850 2839 block_t blkaddr, int type) 2851 2840 { 2852 2841 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 2853 - f2fs_msg(sbi->sb, KERN_ERR, 2854 - "invalid blkaddr: %u, type: %d, run fsck to fix.", 2855 - blkaddr, type); 2842 + f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", 2843 + blkaddr, type); 2856 2844 f2fs_bug_on(sbi, 1); 2857 2845 } 2858 2846 } ··· 2982 2972 void f2fs_quota_off_umount(struct super_block *sb); 2983 2973 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 2984 2974 int f2fs_sync_fs(struct super_block *sb, int sync); 2985 - extern __printf(3, 4) 2986 - void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); 2987 2975 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 2988 2976 2989 2977 /*
+8 -13
fs/f2fs/file.c
··· 1837 1837 * f2fs_is_atomic_file. 1838 1838 */ 1839 1839 if (get_dirty_pages(inode)) 1840 - f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1841 - "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1842 - inode->i_ino, get_dirty_pages(inode)); 1840 + f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1841 + inode->i_ino, get_dirty_pages(inode)); 1843 1842 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1844 1843 if (ret) { 1845 1844 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); ··· 2273 2274 return -EROFS; 2274 2275 2275 2276 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2276 - f2fs_msg(sbi->sb, KERN_INFO, 2277 - "Skipping Checkpoint. Checkpoints currently disabled."); 2277 + f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); 2278 2278 return -EINVAL; 2279 2279 } 2280 2280 ··· 2658 2660 2659 2661 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || 2660 2662 __is_large_section(sbi)) { 2661 - f2fs_msg(sbi->sb, KERN_WARNING, 2662 - "Can't flush %u in %d for segs_per_sec %u != 1", 2663 - range.dev_num, sbi->s_ndevs, 2664 - sbi->segs_per_sec); 2663 + f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", 2664 + range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); 2665 2665 return -EINVAL; 2666 2666 } 2667 2667 ··· 2944 2948 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 2945 2949 2946 2950 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 2947 - f2fs_msg(sbi->sb, KERN_WARNING, 2948 - "%s: Enable GC = ino %lx after %x GC trials", 2949 - __func__, inode->i_ino, 2950 - fi->i_gc_failures[GC_FAILURE_PIN]); 2951 + f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", 2952 + __func__, inode->i_ino, 2953 + fi->i_gc_failures[GC_FAILURE_PIN]); 2951 2954 clear_inode_flag(inode, FI_PIN_FILE); 2952 2955 return -EAGAIN; 2953 2956 }
+9 -14
fs/f2fs/gc.c
··· 618 618 } 619 619 620 620 if (sum->version != dni->version) { 621 - f2fs_msg(sbi->sb, KERN_WARNING, 622 - "%s: valid data with mismatched node version.", 623 - __func__); 621 + f2fs_warn(sbi, "%s: valid data with mismatched node version.", 622 + __func__); 624 623 set_sbi_flag(sbi, SBI_NEED_FSCK); 625 624 } 626 625 ··· 1182 1183 1183 1184 sum = page_address(sum_page); 1184 1185 if (type != GET_SUM_TYPE((&sum->footer))) { 1185 - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " 1186 - "type [%d, %d] in SSA and SIT", 1187 - segno, type, GET_SUM_TYPE((&sum->footer))); 1186 + f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", 1187 + segno, type, GET_SUM_TYPE((&sum->footer))); 1188 1188 set_sbi_flag(sbi, SBI_NEED_FSCK); 1189 1189 f2fs_stop_checkpoint(sbi, false); 1190 1190 goto skip; ··· 1395 1397 1396 1398 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); 1397 1399 if (next_inuse <= end) { 1398 - f2fs_msg(sbi->sb, KERN_ERR, 1399 - "segno %u should be free but still inuse!", next_inuse); 1400 + f2fs_err(sbi, "segno %u should be free but still inuse!", 1401 + next_inuse); 1400 1402 f2fs_bug_on(sbi, 1); 1401 1403 } 1402 1404 return err; ··· 1453 1455 return 0; 1454 1456 1455 1457 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 1456 - f2fs_msg(sbi->sb, KERN_ERR, 1457 - "Should run fsck to repair first."); 1458 + f2fs_err(sbi, "Should run fsck to repair first."); 1458 1459 return -EINVAL; 1459 1460 } 1460 1461 1461 1462 if (test_opt(sbi, DISABLE_CHECKPOINT)) { 1462 - f2fs_msg(sbi->sb, KERN_ERR, 1463 - "Checkpoint should be enabled."); 1463 + f2fs_err(sbi, "Checkpoint should be enabled."); 1464 1464 return -EINVAL; 1465 1465 } 1466 1466 ··· 1522 1526 out: 1523 1527 if (err) { 1524 1528 set_sbi_flag(sbi, SBI_NEED_FSCK); 1525 - f2fs_msg(sbi->sb, KERN_ERR, 1526 - "resize_fs failed, should run fsck to repair!"); 1529 + f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); 1527 1530 1528 1531 MAIN_SECS(sbi) += secs; 1529 1532 spin_lock(&sbi->stat_lock);
+4 -8
fs/f2fs/inline.c
··· 140 140 if (unlikely(dn->data_blkaddr != NEW_ADDR)) { 141 141 f2fs_put_dnode(dn); 142 142 set_sbi_flag(fio.sbi, SBI_NEED_FSCK); 143 - f2fs_msg(fio.sbi->sb, KERN_WARNING, 144 - "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " 145 - "run fsck to fix.", 146 - __func__, dn->inode->i_ino, dn->data_blkaddr); 143 + f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", 144 + __func__, dn->inode->i_ino, dn->data_blkaddr); 147 145 return -EINVAL; 148 146 } 149 147 ··· 381 383 if (unlikely(dn.data_blkaddr != NEW_ADDR)) { 382 384 f2fs_put_dnode(&dn); 383 385 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); 384 - f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING, 385 - "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " 386 - "run fsck to fix.", 387 - __func__, dir->i_ino, dn.data_blkaddr); 386 + f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.", 387 + __func__, dir->i_ino, dn.data_blkaddr); 388 388 err = -EINVAL; 389 389 goto out; 390 390 }
+26 -46
fs/f2fs/inode.c
··· 176 176 calculated = f2fs_inode_chksum(sbi, page); 177 177 178 178 if (provided != calculated) 179 - f2fs_msg(sbi->sb, KERN_WARNING, 180 - "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 181 - page->index, ino_of_node(page), provided, calculated); 179 + f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 180 + page->index, ino_of_node(page), provided, calculated); 182 181 183 182 return provided == calculated; 184 183 } ··· 201 202 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 202 203 if (!iblocks) { 203 204 set_sbi_flag(sbi, SBI_NEED_FSCK); 204 - f2fs_msg(sbi->sb, KERN_WARNING, 205 - "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, " 206 - "run fsck to fix.", 207 - __func__, inode->i_ino, iblocks); 205 + f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.", 206 + __func__, inode->i_ino, iblocks); 208 207 return false; 209 208 } 210 209 211 210 if (ino_of_node(node_page) != nid_of_node(node_page)) { 212 211 set_sbi_flag(sbi, SBI_NEED_FSCK); 213 - f2fs_msg(sbi->sb, KERN_WARNING, 214 - "%s: corrupted inode footer i_ino=%lx, ino,nid: " 215 - "[%u, %u] run fsck to fix.", 216 - __func__, inode->i_ino, 217 - ino_of_node(node_page), nid_of_node(node_page)); 212 + f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.", 213 + __func__, inode->i_ino, 214 + ino_of_node(node_page), nid_of_node(node_page)); 218 215 return false; 219 216 } 220 217 221 218 if (f2fs_sb_has_flexible_inline_xattr(sbi) 222 219 && !f2fs_has_extra_attr(inode)) { 223 220 set_sbi_flag(sbi, SBI_NEED_FSCK); 224 - f2fs_msg(sbi->sb, KERN_WARNING, 225 - "%s: corrupted inode ino=%lx, run fsck to fix.", 226 - __func__, inode->i_ino); 221 + f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.", 222 + __func__, inode->i_ino); 227 223 return false; 228 224 } 229 225 230 226 if (f2fs_has_extra_attr(inode) && 231 227 !f2fs_sb_has_extra_attr(sbi)) { 232 228 set_sbi_flag(sbi, SBI_NEED_FSCK); 233 - f2fs_msg(sbi->sb, KERN_WARNING, 234 - "%s: inode (ino=%lx) is with extra_attr, " 235 - "but extra_attr feature is off", 236 - __func__, inode->i_ino); 229 + f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off", 230 + __func__, inode->i_ino); 237 231 return false; 238 232 } 239 233 240 234 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 241 235 fi->i_extra_isize % sizeof(__le32)) { 242 236 set_sbi_flag(sbi, SBI_NEED_FSCK); 243 - f2fs_msg(sbi->sb, KERN_WARNING, 244 - "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, " 245 - "max: %zu", 246 - __func__, inode->i_ino, fi->i_extra_isize, 247 - F2FS_TOTAL_EXTRA_ATTR_SIZE); 237 + f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu", 238 + __func__, inode->i_ino, fi->i_extra_isize, 239 + F2FS_TOTAL_EXTRA_ATTR_SIZE); 248 240 return false; 249 241 } 250 242 ··· 245 255 (!fi->i_inline_xattr_size || 246 256 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 247 257 set_sbi_flag(sbi, SBI_NEED_FSCK); 248 - f2fs_msg(sbi->sb, KERN_WARNING, 249 - "%s: inode (ino=%lx) has corrupted " 250 - "i_inline_xattr_size: %d, max: %zu", 251 - __func__, inode->i_ino, fi->i_inline_xattr_size, 252 - MAX_INLINE_XATTR_SIZE); 258 + f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu", 259 + __func__, inode->i_ino, fi->i_inline_xattr_size, 260 + MAX_INLINE_XATTR_SIZE); 253 261 return false; 254 262 } 255 263 ··· 260 272 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, 261 273 DATA_GENERIC_ENHANCE))) { 262 274 set_sbi_flag(sbi, SBI_NEED_FSCK); 263 - f2fs_msg(sbi->sb, KERN_WARNING, 264 - "%s: inode (ino=%lx) extent info [%u, %u, %u] " 265 - "is incorrect, run fsck to fix", 266 - __func__, inode->i_ino, 267 - ei->blk, ei->fofs, ei->len); 275 + f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix", 276 + __func__, inode->i_ino, 277 + ei->blk, ei->fofs, ei->len); 268 278 return false; 269 279 } 270 280 } ··· 270 284 if (f2fs_has_inline_data(inode) && 271 285 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { 272 286 set_sbi_flag(sbi, SBI_NEED_FSCK); 273 - f2fs_msg(sbi->sb, KERN_WARNING, 274 - "%s: inode (ino=%lx, mode=%u) should not have " 275 - "inline_data, run fsck to fix", 276 - __func__, inode->i_ino, inode->i_mode); 287 + f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix", 288 + __func__, inode->i_ino, inode->i_mode); 277 289 return false; 278 290 } 279 291 280 292 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 281 293 set_sbi_flag(sbi, SBI_NEED_FSCK); 282 - f2fs_msg(sbi->sb, KERN_WARNING, 283 - "%s: inode (ino=%lx, mode=%u) should not have " 284 - "inline_dentry, run fsck to fix", 285 - __func__, inode->i_ino, inode->i_mode); 294 + f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix", 295 + __func__, inode->i_ino, inode->i_mode); 286 296 return false; 287 297 } 288 298 ··· 767 785 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 768 786 if (err) { 769 787 set_sbi_flag(sbi, SBI_NEED_FSCK); 770 - f2fs_msg(sbi->sb, KERN_WARNING, 771 - "May loss orphan inode, run fsck to fix."); 788 + f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); 772 789 goto out; 773 790 } 774 791 ··· 775 794 err = f2fs_acquire_orphan_inode(sbi); 776 795 if (err) { 777 796 set_sbi_flag(sbi, SBI_NEED_FSCK); 778 - f2fs_msg(sbi->sb, KERN_WARNING, 779 - "Too many orphan inodes, run fsck to fix."); 797 + f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix."); 780 798 } else { 781 799 f2fs_add_orphan_inode(inode); 782 800 }
+4 -6
fs/f2fs/namei.c
··· 385 385 int err = 0; 386 386 387 387 if (f2fs_readonly(sbi->sb)) { 388 - f2fs_msg(sbi->sb, KERN_INFO, 389 - "skip recovering inline_dots inode (ino:%lu, pino:%u) " 390 - "in readonly mountpoint", dir->i_ino, pino); 388 + f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint", 389 + dir->i_ino, pino); 391 390 return 0; 392 391 } 393 392 ··· 483 484 if (IS_ENCRYPTED(dir) && 484 485 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && 485 486 !fscrypt_has_permitted_context(dir, inode)) { 486 - f2fs_msg(inode->i_sb, KERN_WARNING, 487 - "Inconsistent encryption contexts: %lu/%lu", 488 - dir->i_ino, inode->i_ino); 487 + f2fs_warn(F2FS_I_SB(inode), "Inconsistent encryption contexts: %lu/%lu", 488 + dir->i_ino, inode->i_ino); 489 489 err = -EPERM; 490 490 goto out_iput; 491 491 }
+12 -18
fs/f2fs/node.c
··· 34 34 { 35 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 - f2fs_msg(sbi->sb, KERN_WARNING, 38 - "%s: out-of-range nid=%x, run fsck to fix.", 39 - __func__, nid); 37 + f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 + __func__, nid); 40 39 return -EINVAL; 41 40 } 42 41 return 0; ··· 1188 1189 } 1189 1190 1190 1191 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1191 - f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1192 - "Inconsistent i_blocks, ino:%lu, iblocks:%llu", 1193 - inode->i_ino, 1194 - (unsigned long long)inode->i_blocks); 1192 + f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu", 1193 + inode->i_ino, (unsigned long long)inode->i_blocks); 1195 1194 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1196 1195 } 1197 1196 ··· 1377 1380 } 1378 1381 page_hit: 1379 1382 if(unlikely(nid != nid_of_node(page))) { 1380 - f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, " 1381 - "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1382 - nid, nid_of_node(page), ino_of_node(page), 1383 - ofs_of_node(page), cpver_of_node(page), 1384 - next_blkaddr_of_node(page)); 1383 + f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1384 + nid, nid_of_node(page), ino_of_node(page), 1385 + ofs_of_node(page), cpver_of_node(page), 1386 + next_blkaddr_of_node(page)); 1385 1387 err = -EINVAL; 1386 1388 out_err: 1387 1389 ClearPageUptodate(page); ··· 1748 1752 break; 1749 1753 } 1750 1754 if (!ret && atomic && !marked) { 1751 - f2fs_msg(sbi->sb, KERN_DEBUG, 1752 - "Retry to write fsync mark: ino=%u, idx=%lx", 1753 - ino, last_page->index); 1755 + f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1756 + ino, last_page->index); 1754 1757 lock_page(last_page); 1755 1758 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1756 1759 set_page_dirty(last_page); ··· 2299 2304 if (ret) { 2300 2305 up_read(&nm_i->nat_tree_lock); 2301 2306 f2fs_bug_on(sbi, !mount); 2302 - f2fs_msg(sbi->sb, KERN_ERR, 2303 - "NAT is corrupt, run fsck to fix it"); 2307 + f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2304 2308 return ret; 2305 2309 } 2306 2310 } ··· 2909 2915 nm_i->full_nat_bits = nm_i->nat_bits + 8; 2910 2916 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 2911 2917 2912 - f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); 2918 + f2fs_notice(sbi, "Found nat_bits in checkpoint"); 2913 2919 return 0; 2914 2920 } 2915 2921
+15 -22
fs/f2fs/recovery.c
··· 188 188 name = "<encrypted>"; 189 189 else 190 190 name = raw_inode->i_name; 191 - f2fs_msg(inode->i_sb, KERN_NOTICE, 192 - "%s: ino = %x, name = %s, dir = %lx, err = %d", 193 - __func__, ino_of_node(ipage), name, 194 - IS_ERR(dir) ? 0 : dir->i_ino, err); 191 + f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d", 192 + __func__, ino_of_node(ipage), name, 193 + IS_ERR(dir) ? 0 : dir->i_ino, err); 195 194 return err; 196 195 } 197 196 ··· 291 292 else 292 293 name = F2FS_INODE(page)->i_name; 293 294 294 - f2fs_msg(inode->i_sb, KERN_NOTICE, 295 - "recover_inode: ino = %x, name = %s, inline = %x", 296 - ino_of_node(page), name, raw->i_inline); 295 + f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x", 296 + ino_of_node(page), name, raw->i_inline); 297 297 return 0; 298 298 } 299 299 ··· 369 371 /* sanity check in order to detect looped node chain */ 370 372 if (++loop_cnt >= free_blocks || 371 373 blkaddr == next_blkaddr_of_node(page)) { 372 - f2fs_msg(sbi->sb, KERN_NOTICE, 373 - "%s: detect looped node chain, " 374 - "blkaddr:%u, next:%u", 375 - __func__, blkaddr, next_blkaddr_of_node(page)); 374 + f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u", 375 + __func__, blkaddr, 376 + next_blkaddr_of_node(page)); 376 377 f2fs_put_page(page, 1); 377 378 err = -EINVAL; 378 379 break; ··· 550 553 f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); 551 554 552 555 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { 553 - f2fs_msg(sbi->sb, KERN_WARNING, 554 - "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", 555 - inode->i_ino, ofs_of_node(dn.node_page), 556 - ofs_of_node(page)); 556 + f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", 557 + inode->i_ino, ofs_of_node(dn.node_page), 558 + ofs_of_node(page)); 557 559 err = -EFAULT; 558 560 goto err; 559 561 } ··· 638 642 err: 639 643 f2fs_put_dnode(&dn); 640 644 out: 641 - f2fs_msg(sbi->sb, KERN_NOTICE, 642 - "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", 643 - inode->i_ino, 644 - file_keep_isize(inode) ? "keep" : "recover", 645 - recovered, err); 645 + f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", 646 + inode->i_ino, file_keep_isize(inode) ? "keep" : "recover", 647 + recovered, err); 646 648 return err; 647 649 } 648 650 ··· 728 734 #endif 729 735 730 736 if (s_flags & SB_RDONLY) { 731 - f2fs_msg(sbi->sb, KERN_INFO, 732 - "recover fsync data on readonly fs"); 737 + f2fs_info(sbi, "recover fsync data on readonly fs"); 733 738 sbi->sb->s_flags &= ~SB_RDONLY; 734 739 } 735 740
+25 -36
fs/f2fs/segment.c
··· 1757 1757 devi = f2fs_target_device_index(sbi, blkstart); 1758 1758 if (blkstart < FDEV(devi).start_blk || 1759 1759 blkstart > FDEV(devi).end_blk) { 1760 - f2fs_msg(sbi->sb, KERN_ERR, "Invalid block %x", 1761 - blkstart); 1760 + f2fs_err(sbi, "Invalid block %x", blkstart); 1762 1761 return -EIO; 1763 1762 } 1764 1763 blkstart -= FDEV(devi).start_blk; ··· 1770 1771 1771 1772 if (sector & (bdev_zone_sectors(bdev) - 1) || 1772 1773 nr_sects != bdev_zone_sectors(bdev)) { 1773 - f2fs_msg(sbi->sb, KERN_ERR, 1774 - "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1775 - devi, sbi->s_ndevs ? FDEV(devi).path: "", 1776 - blkstart, blklen); 1774 + f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1775 + devi, sbi->s_ndevs ? FDEV(devi).path : "", 1776 + blkstart, blklen); 1777 1777 return -EIO; 1778 1778 } 1779 1779 trace_f2fs_issue_reset_zone(bdev, blkstart); ··· 2136 2138 mir_exist = f2fs_test_and_set_bit(offset, 2137 2139 se->cur_valid_map_mir); 2138 2140 if (unlikely(exist != mir_exist)) { 2139 - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " 2140 - "when setting bitmap, blk:%u, old bit:%d", 2141 - blkaddr, exist); 2141 + f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", 2142 + blkaddr, exist); 2142 2143 f2fs_bug_on(sbi, 1); 2143 2144 } 2144 2145 #endif 2145 2146 if (unlikely(exist)) { 2146 - f2fs_msg(sbi->sb, KERN_ERR, 2147 - "Bitmap was wrongly set, blk:%u", blkaddr); 2147 + f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", 2148 + blkaddr); 2148 2149 f2fs_bug_on(sbi, 1); 2149 2150 se->valid_blocks--; 2150 2151 del = 0; ··· 2164 2167 mir_exist = f2fs_test_and_clear_bit(offset, 2165 2168 se->cur_valid_map_mir); 2166 2169 if (unlikely(exist != mir_exist)) { 2167 - f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " 2168 - "when clearing bitmap, blk:%u, old bit:%d", 2169 - blkaddr, exist); 2170 + f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", 2171 + blkaddr, exist); 2170 2172 f2fs_bug_on(sbi, 1); 2171 2173 } 2172 2174 #endif 2173 2175 if (unlikely(!exist)) { 2174 - f2fs_msg(sbi->sb, KERN_ERR, 2175 - "Bitmap was wrongly cleared, blk:%u", blkaddr); 2176 + f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", 2177 + blkaddr); 2176 2178 f2fs_bug_on(sbi, 1); 2177 2179 se->valid_blocks++; 2178 2180 del = 0; ··· 2679 2683 up_write(&SIT_I(sbi)->sentry_lock); 2680 2684 2681 2685 if (segno != curseg->segno) 2682 - f2fs_msg(sbi->sb, KERN_NOTICE, 2683 - "For resize: curseg of type %d: %u ==> %u", 2684 - type, segno, curseg->segno); 2686 + f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u", 2687 + type, segno, curseg->segno); 2685 2688 2686 2689 mutex_unlock(&curseg->curseg_mutex); 2687 2690 up_read(&SM_I(sbi)->curseg_lock); ··· 2818 2823 goto out; 2819 2824 2820 2825 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 2821 - f2fs_msg(sbi->sb, KERN_WARNING, 2822 - "Found FS corruption, run fsck to fix."); 2826 + f2fs_warn(sbi, "Found FS corruption, run fsck to fix."); 2823 2827 return -EIO; 2824 2828 } 2825 2829 ··· 3579 3585 /* sanity check for summary blocks */ 3580 3586 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || 3581 3587 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) { 3582 - f2fs_msg(sbi->sb, KERN_ERR, 3583 - "invalid journal entries nats %u sits %u\n", 3584 - nats_in_cursum(nat_j), sits_in_cursum(sit_j)); 3588 + f2fs_err(sbi, "invalid journal entries nats %u sits %u\n", 3589 + nats_in_cursum(nat_j), sits_in_cursum(sit_j)); 3585 3590 return -EINVAL; 3586 3591 } 3587 3592 ··· 4148 4155 4149 4156 start = le32_to_cpu(segno_in_journal(journal, i)); 4150 4157 if (start >= MAIN_SEGS(sbi)) { 4151 - f2fs_msg(sbi->sb, KERN_ERR, 4152 - "Wrong journal entry on segno %u", 4153 - start); 4158 + f2fs_err(sbi, "Wrong journal entry on segno %u", 4159 + start); 4154 4160 set_sbi_flag(sbi, SBI_NEED_FSCK); 4155 4161 err = -EINVAL; 4156 4162 break; ··· 4188 4196 up_read(&curseg->journal_rwsem); 4189 4197 4190 4198 if (!err && total_node_blocks != valid_node_count(sbi)) { 4191 - f2fs_msg(sbi->sb, KERN_ERR, 4192 - "SIT is corrupted node# %u vs %u", 4193 - total_node_blocks, valid_node_count(sbi)); 4199 + f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4200 + total_node_blocks, valid_node_count(sbi)); 4194 4201 set_sbi_flag(sbi, SBI_NEED_FSCK); 4195 4202 err = -EINVAL; 4196 4203 } ··· 4305 4314 if (!f2fs_test_bit(blkofs, se->cur_valid_map)) 4306 4315 continue; 4307 4316 out: 4308 - f2fs_msg(sbi->sb, KERN_ERR, 4309 - "Current segment's next free block offset is " 4310 - "inconsistent with bitmap, logtype:%u, " 4311 - "segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4312 - i, curseg->segno, curseg->alloc_type, 4313 - curseg->next_blkoff, blkofs); 4317 + f2fs_err(sbi, 4318 + "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u", 4319 + i, curseg->segno, curseg->alloc_type, 4320 + curseg->next_blkoff, blkofs); 4314 4321 return -EINVAL; 4315 4322 } 4316 4323 }
+4 -6
fs/f2fs/segment.h
··· 693 693 } while (cur_pos < sbi->blocks_per_seg); 694 694 695 695 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { 696 - f2fs_msg(sbi->sb, KERN_ERR, 697 - "Mismatch valid blocks %d vs. %d", 698 - GET_SIT_VBLOCKS(raw_sit), valid_blocks); 696 + f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", 697 + GET_SIT_VBLOCKS(raw_sit), valid_blocks); 699 698 set_sbi_flag(sbi, SBI_NEED_FSCK); 700 699 return -EINVAL; 701 700 } ··· 702 703 /* check segment usage, and check boundary of a given segment number */ 703 704 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg 704 705 || segno > TOTAL_SEGS(sbi) - 1)) { 705 - f2fs_msg(sbi->sb, KERN_ERR, 706 - "Wrong valid blocks %d or segno %u", 707 - GET_SIT_VBLOCKS(raw_sit), segno); 706 + f2fs_err(sbi, "Wrong valid blocks %d or segno %u", 707 + GET_SIT_VBLOCKS(raw_sit), segno); 708 708 set_sbi_flag(sbi, SBI_NEED_FSCK); 709 709 return -EINVAL; 710 710 }
+199 -292
fs/f2fs/super.c
··· 205 205 {Opt_err, NULL}, 206 206 }; 207 207 208 - void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) 208 + void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) 209 209 { 210 210 struct va_format vaf; 211 211 va_list args; 212 + int level; 212 213 213 214 va_start(args, fmt); 214 - vaf.fmt = fmt; 215 + 216 + level = printk_get_level(fmt); 217 + vaf.fmt = printk_skip_level(fmt); 215 218 vaf.va = &args; 216 - printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); 219 + printk("%c%cF2FS-fs (%s): %pV\n", 220 + KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); 221 + 217 222 va_end(args); 218 223 } 219 224 ··· 231 226 if (test_opt(sbi, RESERVE_ROOT) && 232 227 F2FS_OPTION(sbi).root_reserved_blocks > limit) { 233 228 F2FS_OPTION(sbi).root_reserved_blocks = limit; 234 - f2fs_msg(sbi->sb, KERN_INFO, 235 - "Reduce reserved blocks for root = %u", 236 - F2FS_OPTION(sbi).root_reserved_blocks); 229 + f2fs_info(sbi, "Reduce reserved blocks for root = %u", 230 + F2FS_OPTION(sbi).root_reserved_blocks); 237 231 } 238 232 if (!test_opt(sbi, RESERVE_ROOT) && 239 233 (!uid_eq(F2FS_OPTION(sbi).s_resuid, 240 234 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || 241 235 !gid_eq(F2FS_OPTION(sbi).s_resgid, 242 236 make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) 243 - f2fs_msg(sbi->sb, KERN_INFO, 244 - "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", 245 - from_kuid_munged(&init_user_ns, 246 - F2FS_OPTION(sbi).s_resuid), 247 - from_kgid_munged(&init_user_ns, 248 - F2FS_OPTION(sbi).s_resgid)); 237 + f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", 238 + from_kuid_munged(&init_user_ns, 239 + F2FS_OPTION(sbi).s_resuid), 240 + from_kgid_munged(&init_user_ns, 241 + F2FS_OPTION(sbi).s_resgid)); 249 242 } 250 243 251 244 static void init_once(void *foo) ··· 264 261 int ret = -EINVAL; 265 262 266 263 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { 267 - f2fs_msg(sb, KERN_ERR, 268 - "Cannot change journaled " 269 - "quota options when quota turned on"); 264 + f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); 270 265 return -EINVAL; 271 266 } 272 267 if (f2fs_sb_has_quota_ino(sbi)) { 273 - f2fs_msg(sb, KERN_INFO, 274 - "QUOTA feature is enabled, so ignore qf_name"); 268 + f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); 275 269 return 0; 276 270 } 277 271 278 272 qname = match_strdup(args); 279 273 if (!qname) { 280 - f2fs_msg(sb, KERN_ERR, 281 - "Not enough memory for storing quotafile name"); 274 + f2fs_err(sbi, "Not enough memory for storing quotafile name"); 282 275 return -ENOMEM; 283 276 } 284 277 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { 285 278 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) 286 279 ret = 0; 287 280 else 288 - f2fs_msg(sb, KERN_ERR, 289 - "%s quota file already specified", 281 + f2fs_err(sbi, "%s quota file already specified", 290 282 QTYPE2NAME(qtype)); 291 283 goto errout; 292 284 } 293 285 if (strchr(qname, '/')) { 294 - f2fs_msg(sb, KERN_ERR, 295 - "quotafile must be on filesystem root"); 286 + f2fs_err(sbi, "quotafile must be on filesystem root"); 296 287 goto errout; 297 288 } 298 289 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; ··· 302 305 struct f2fs_sb_info *sbi = F2FS_SB(sb); 303 306 304 307 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { 305 - f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" 306 - " when quota turned on"); 308 + f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); 307 309 return -EINVAL; 308 310 } 309 311 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]); ··· 318 322 * to support legacy quotas in quota files. 319 323 */ 320 324 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { 321 - f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. " 322 - "Cannot enable project quota enforcement."); 325 + f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); 323 326 return -1; 324 327 } 325 328 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || ··· 338 343 339 344 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || 340 345 test_opt(sbi, PRJQUOTA)) { 341 - f2fs_msg(sbi->sb, KERN_ERR, "old and new quota " 342 - "format mixing"); 346 + f2fs_err(sbi, "old and new quota format mixing"); 343 347 return -1; 344 348 } 345 349 346 350 if (!F2FS_OPTION(sbi).s_jquota_fmt) { 347 - f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format " 348 - "not specified"); 351 + f2fs_err(sbi, "journaled quota format not specified"); 349 352 return -1; 350 353 } 351 354 } 352 355 353 356 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { 354 - f2fs_msg(sbi->sb, KERN_INFO, 355 - "QUOTA feature is enabled, so ignore jquota_fmt"); 357 + f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); 356 358 F2FS_OPTION(sbi).s_jquota_fmt = 0; 357 359 } 358 360 return 0; ··· 417 425 break; 418 426 case Opt_nodiscard: 419 427 if (f2fs_sb_has_blkzoned(sbi)) { 420 - f2fs_msg(sb, KERN_WARNING, 421 - "discard is required for zoned block devices"); 428 + f2fs_warn(sbi, "discard is required for zoned block devices"); 422 429 return -EINVAL; 423 430 } 424 431 clear_opt(sbi, DISCARD); ··· 449 458 break; 450 459 #else 451 460 case Opt_user_xattr: 452 - f2fs_msg(sb, KERN_INFO, 453 - "user_xattr options not supported"); 461 + f2fs_info(sbi, "user_xattr options not supported"); 454 462 break; 455 463 case Opt_nouser_xattr: 456 - f2fs_msg(sb, KERN_INFO, 457 - "nouser_xattr options not supported"); 464 + f2fs_info(sbi, "nouser_xattr options not supported"); 458 465 break; 459 466 case Opt_inline_xattr: 460 - f2fs_msg(sb, KERN_INFO, 461 - "inline_xattr options not supported"); 467 + f2fs_info(sbi, "inline_xattr options not supported"); 462 468 break; 463 469 case Opt_noinline_xattr: 464 - f2fs_msg(sb, KERN_INFO, 465 - "noinline_xattr options not supported"); 470 + f2fs_info(sbi, "noinline_xattr options not supported"); 466 471 break; 467 472 #endif 468 473 #ifdef CONFIG_F2FS_FS_POSIX_ACL ··· 470 483 break; 471 484 #else 472 485 case Opt_acl: 473 - f2fs_msg(sb, KERN_INFO, "acl options not supported"); 486 + f2fs_info(sbi, "acl options not supported"); 474 487 break; 475 488 case Opt_noacl: 476 - f2fs_msg(sb, KERN_INFO, "noacl options not supported"); 489 + f2fs_info(sbi, "noacl options not supported"); 477 490 break; 478 491 #endif 479 492 case Opt_active_logs: ··· 523 536 if (args->from && match_int(args, &arg)) 524 537 return -EINVAL; 525 538 if (test_opt(sbi, RESERVE_ROOT)) { 526 - f2fs_msg(sb, KERN_INFO, 527 - "Preserve previous reserve_root=%u", 528 - F2FS_OPTION(sbi).root_reserved_blocks); 539 + f2fs_info(sbi, "Preserve previous reserve_root=%u", 540 + F2FS_OPTION(sbi).root_reserved_blocks); 529 541 } else { 530 542 F2FS_OPTION(sbi).root_reserved_blocks = arg; 531 543 set_opt(sbi, RESERVE_ROOT); ··· 535 549 return -EINVAL; 536 550 uid = make_kuid(current_user_ns(), arg); 537 551 if (!uid_valid(uid)) { 538 - f2fs_msg(sb, KERN_ERR, 539 - "Invalid uid value %d", arg); 552 + f2fs_err(sbi, "Invalid uid value %d", arg); 540 553 return -EINVAL; 541 554 } 542 555 F2FS_OPTION(sbi).s_resuid = uid; ··· 545 560 return -EINVAL; 546 561 gid = make_kgid(current_user_ns(), arg); 547 562 if (!gid_valid(gid)) { 548 - f2fs_msg(sb, KERN_ERR, 549 - "Invalid gid value %d", arg); 563 + f2fs_err(sbi, "Invalid gid value %d", arg); 550 564 return -EINVAL; 551 565 } 552 566 F2FS_OPTION(sbi).s_resgid = gid; ··· 558 574 if (strlen(name) == 8 && 559 575 !strncmp(name, "adaptive", 8)) { 560 576 if (f2fs_sb_has_blkzoned(sbi)) { 561 - f2fs_msg(sb, KERN_WARNING, 562 - "adaptive mode is not allowed with " 563 - "zoned block device feature"); 577 + f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); 564 578 kvfree(name); 565 579 return -EINVAL; 566 580 } ··· 576 594 if (args->from && match_int(args, &arg)) 577 595 return -EINVAL; 578 596 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) { 579 - f2fs_msg(sb, KERN_WARNING, 580 - "Not support %d, larger than %d", 581 - 1 << arg, BIO_MAX_PAGES); 597 + f2fs_warn(sbi, "Not support %d, larger than %d", 598 + 1 << arg, BIO_MAX_PAGES); 582 599 return -EINVAL; 583 600 } 584 601 F2FS_OPTION(sbi).write_io_size_bits = arg; ··· 598 617 break; 599 618 #else 600 619 case Opt_fault_injection: 601 - f2fs_msg(sb, KERN_INFO, 602 - "fault_injection options not supported"); 620 + f2fs_info(sbi, "fault_injection options not supported"); 603 621 break; 604 622 605 623 case Opt_fault_type: 606 - f2fs_msg(sb, KERN_INFO, 607 - "fault_type options not supported"); 624 + f2fs_info(sbi, "fault_type options not supported"); 608 625 break; 609 626 #endif 610 627 case Opt_lazytime: ··· 682 703 case Opt_jqfmt_vfsv0: 683 704 case Opt_jqfmt_vfsv1: 684 705 case Opt_noquota: 685 - f2fs_msg(sb, KERN_INFO, 686 - "quota operations not supported"); 706 + f2fs_info(sbi, "quota operations not supported"); 687 707 break; 688 708 #endif 689 709 case Opt_whint: ··· 744 766 case Opt_test_dummy_encryption: 745 767 #ifdef CONFIG_FS_ENCRYPTION 746 768 if (!f2fs_sb_has_encrypt(sbi)) { 747 - f2fs_msg(sb, KERN_ERR, "Encrypt feature is off"); 769 + f2fs_err(sbi, "Encrypt feature is off"); 748 770 return -EINVAL; 749 771 } 750 772 751 773 F2FS_OPTION(sbi).test_dummy_encryption = true; 752 - f2fs_msg(sb, KERN_INFO, 753 - "Test dummy encryption mode enabled"); 774 + f2fs_info(sbi, "Test dummy encryption mode enabled"); 754 775 #else 755 - f2fs_msg(sb, KERN_INFO, 756 - "Test dummy encryption mount option ignored"); 776 + f2fs_info(sbi, "Test dummy encryption mount option ignored"); 757 777 #endif 758 778 break; 759 779 case Opt_checkpoint_disable_cap_perc: ··· 780 804 clear_opt(sbi, DISABLE_CHECKPOINT); 781 805 break; 782 806 default: 783 - f2fs_msg(sb, KERN_ERR, 784 - "Unrecognized mount option \"%s\" or missing value", 785 - p); 807 + f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", 808 + p); 786 809 return -EINVAL; 787 810 } 788 811 } ··· 790 815 return -EINVAL; 791 816 #else 792 817 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { 793 - f2fs_msg(sbi->sb, KERN_INFO, 794 - "Filesystem with quota feature cannot be mounted RDWR " 795 - "without CONFIG_QUOTA"); 818 + f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); 796 819 return -EINVAL; 797 820 } 798 821 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { 799 - f2fs_msg(sb, KERN_ERR, 800 - "Filesystem with project quota feature cannot be " 801 - "mounted RDWR without CONFIG_QUOTA"); 822 + f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); 802 823 return -EINVAL; 803 824 } 804 825 #endif 805 826 806 827 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { 807 - f2fs_msg(sb, KERN_ERR, 808 - "Should set mode=lfs with %uKB-sized IO", 809 - F2FS_IO_SIZE_KB(sbi)); 828 + f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO", 829 + F2FS_IO_SIZE_KB(sbi)); 810 830 return -EINVAL; 811 831 } 812 832 ··· 810 840 811 841 if (!f2fs_sb_has_extra_attr(sbi) || 812 842 !f2fs_sb_has_flexible_inline_xattr(sbi)) { 813 - f2fs_msg(sb, KERN_ERR, 814 - "extra_attr or flexible_inline_xattr " 815 - "feature is off"); 843 + f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); 816 844 return -EINVAL; 817 845 } 818 846 if (!test_opt(sbi, INLINE_XATTR)) { 819 - f2fs_msg(sb, KERN_ERR, 820 - "inline_xattr_size option should be " 821 - "set with inline_xattr option"); 847 + f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); 822 848 return -EINVAL; 823 849 } 824 850 ··· 823 857 824 858 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || 825 859 F2FS_OPTION(sbi).inline_xattr_size > max_size) { 826 - f2fs_msg(sb, KERN_ERR, 827 - "inline xattr size is out of range: %d ~ %d", 828 - min_size, max_size); 860 + f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", 861 + min_size, max_size); 829 862 return -EINVAL; 830 863 } 831 864 } 832 865 833 866 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) { 834 - f2fs_msg(sb, KERN_ERR, 835 - "LFS not compatible with checkpoint=disable\n"); 867 + f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n"); 836 868 return -EINVAL; 837 869 } 838 870 ··· 1452 1488 block_t unusable; 1453 1489 1454 1490 if (s_flags & SB_RDONLY) { 1455 - f2fs_msg(sbi->sb, KERN_ERR, 1456 - "checkpoint=disable on readonly fs"); 1491 + f2fs_err(sbi, "checkpoint=disable on readonly fs"); 1457 1492 return -EINVAL; 1458 1493 } 1459 1494 sbi->sb->s_flags |= SB_ACTIVE; ··· 1555 1592 /* recover superblocks we couldn't write due to previous RO mount */ 1556 1593 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { 1557 1594 err = f2fs_commit_super(sbi, false); 1558 - f2fs_msg(sb, KERN_INFO, 1559 - "Try to recover all the superblocks, ret: %d", err); 1595 + f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", 1596 + err); 1560 1597 if (!err) 1561 1598 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); 1562 1599 } ··· 1597 1634 /* disallow enable/disable extent_cache dynamically */ 1598 1635 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { 1599 1636 err = -EINVAL; 1600 - f2fs_msg(sbi->sb, KERN_WARNING, 1601 - "switch extent_cache option is not allowed"); 1637 + f2fs_warn(sbi, "switch extent_cache option is not allowed"); 1602 1638 goto restore_opts; 1603 1639 } 1604 1640 1605 1641 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { 1606 1642 err = -EINVAL; 1607 - f2fs_msg(sbi->sb, KERN_WARNING, 1608 - "disabling checkpoint not compatible with read-only"); 1643 + f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); 1609 1644 goto restore_opts; 1610 1645 } 1611 1646 ··· 1673 1712 restore_gc: 1674 1713 if (need_restart_gc) { 1675 1714 if (f2fs_start_gc_thread(sbi)) 1676 - f2fs_msg(sbi->sb, KERN_WARNING, 1677 - "background gc thread has stopped"); 1715 + f2fs_warn(sbi, "background gc thread has stopped"); 1678 1716 } else if (need_stop_gc) { 1679 1717 f2fs_stop_gc_thread(sbi); 1680 1718 } ··· 1812 1852 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) 1813 1853 { 1814 1854 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { 1815 - f2fs_msg(sbi->sb, KERN_ERR, 1816 - "quota sysfile may be corrupted, skip loading it"); 1855 + f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); 1817 1856 return 0; 1818 1857 } 1819 1858 ··· 1828 1869 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { 1829 1870 err = f2fs_enable_quotas(sbi->sb); 1830 1871 if (err) { 1831 - f2fs_msg(sbi->sb, KERN_ERR, 1832 - "Cannot turn on quota_ino: %d", err); 1872 + f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); 1833 1873 return 0; 1834 1874 } 1835 1875 return 1; ··· 1841 1883 enabled = 1; 1842 1884 continue; 1843 1885 } 1844 - f2fs_msg(sbi->sb, KERN_ERR, 1845 - "Cannot turn on quotas: %d on %d", err, i); 1886 + f2fs_err(sbi, "Cannot turn on quotas: %d on %d", 1887 + err, i); 1846 1888 } 1847 1889 } 1848 1890 return enabled; ··· 1863 1905 1864 1906 qf_inode = f2fs_iget(sb, qf_inum); 1865 1907 if (IS_ERR(qf_inode)) { 1866 - f2fs_msg(sb, KERN_ERR, 1867 - "Bad quota inode %u:%lu", type, qf_inum); 1908 + f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum); 1868 1909 return PTR_ERR(qf_inode); 1869 1910 } 1870 1911 ··· 1876 1919 1877 1920 static int f2fs_enable_quotas(struct super_block *sb) 1878 1921 { 1922 + struct f2fs_sb_info *sbi = F2FS_SB(sb); 1879 1923 int type, err = 0; 1880 1924 unsigned long qf_inum; 1881 1925 bool quota_mopt[MAXQUOTAS] = { 1882 - test_opt(F2FS_SB(sb), USRQUOTA), 1883 - test_opt(F2FS_SB(sb), GRPQUOTA), 1884 - test_opt(F2FS_SB(sb), PRJQUOTA), 1926 + test_opt(sbi, USRQUOTA), 1927 + test_opt(sbi, GRPQUOTA), 1928 + test_opt(sbi, PRJQUOTA), 1885 1929 }; 1886 1930 1887 1931 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) { 1888 - f2fs_msg(sb, KERN_ERR, 1889 - "quota file may be corrupted, skip loading it"); 1932 + f2fs_err(sbi, "quota file may be corrupted, skip loading it"); 1890 1933 return 0; 1891 1934 } 1892 1935 ··· 1899 1942 DQUOT_USAGE_ENABLED | 1900 1943 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 1901 1944 if (err) { 1902 - f2fs_msg(sb, KERN_ERR, 1903 - "Failed to enable quota tracking " 1904 - "(type=%d, err=%d). Please run " 1905 - "fsck to fix.", type, err); 1945 + f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", 1946 + type, err); 1906 1947 for (type--; type >= 0; type--) 1907 1948 dquot_quota_off(sb, type); 1908 1949 set_sbi_flag(F2FS_SB(sb), ··· 2020 2065 if (err) { 2021 2066 int ret = dquot_quota_off(sb, type); 2022 2067 2023 - f2fs_msg(sb, KERN_ERR, 2024 - "Fail to turn off disk quota " 2025 - "(type: %d, err: %d, ret:%d), Please " 2026 - "run fsck to fix it.", type, err, ret); 2068 + f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.", 2069 + type, err, ret); 2027 2070 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 2028 2071 } 2029 2072 } ··· 2314 2361 (segment_count << log_blocks_per_seg); 2315 2362 2316 2363 if (segment0_blkaddr != cp_blkaddr) { 2317 - f2fs_msg(sb, KERN_INFO, 2318 - "Mismatch start address, segment0(%u) cp_blkaddr(%u)", 2319 - segment0_blkaddr, cp_blkaddr); 2364 + f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", 2365 + segment0_blkaddr, cp_blkaddr); 2320 2366 return true; 2321 2367 } 2322 2368 2323 2369 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != 2324 2370 sit_blkaddr) { 2325 - f2fs_msg(sb, KERN_INFO, 2326 - "Wrong CP boundary, start(%u) end(%u) blocks(%u)", 2327 - cp_blkaddr, sit_blkaddr, 2328 - segment_count_ckpt << log_blocks_per_seg); 2371 + f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", 2372 + cp_blkaddr, sit_blkaddr, 2373 + segment_count_ckpt << log_blocks_per_seg); 2329 2374 return true; 2330 2375 } 2331 2376 2332 2377 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != 2333 2378 nat_blkaddr) { 2334 - f2fs_msg(sb, KERN_INFO, 2335 - "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", 2336 - sit_blkaddr, nat_blkaddr, 2337 - segment_count_sit << log_blocks_per_seg); 2379 + f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", 2380 + sit_blkaddr, nat_blkaddr, 2381 + segment_count_sit << log_blocks_per_seg); 2338 2382 return true; 2339 2383 } 2340 2384 2341 2385 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != 2342 2386 ssa_blkaddr) { 2343 - f2fs_msg(sb, KERN_INFO, 2344 - "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", 2345 - nat_blkaddr, ssa_blkaddr, 2346 - segment_count_nat << log_blocks_per_seg); 2387 + f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", 2388 + nat_blkaddr, ssa_blkaddr, 2389 + segment_count_nat << log_blocks_per_seg); 2347 2390 return true; 2348 2391 } 2349 2392 2350 2393 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != 2351 2394 main_blkaddr) { 2352 - f2fs_msg(sb, KERN_INFO, 2353 - "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", 2354 - ssa_blkaddr, main_blkaddr, 2355 - segment_count_ssa << log_blocks_per_seg); 2395 + f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", 2396 + ssa_blkaddr, main_blkaddr, 2397 + segment_count_ssa << log_blocks_per_seg); 2356 2398 return true; 2357 2399 } 2358 2400 2359 2401 if (main_end_blkaddr > seg_end_blkaddr) { 2360 - f2fs_msg(sb, KERN_INFO, 2361 - "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", 2362 - main_blkaddr, 2363 - segment0_blkaddr + 2364 - (segment_count << log_blocks_per_seg), 2365 - segment_count_main << log_blocks_per_seg); 2402 + f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", 2403 + main_blkaddr, 2404 + segment0_blkaddr + 2405 + (segment_count << log_blocks_per_seg), 2406 + segment_count_main << log_blocks_per_seg); 2366 2407 return true; 2367 2408 } else if (main_end_blkaddr < seg_end_blkaddr) { 2368 2409 int err = 0; ··· 2373 2426 err = __f2fs_commit_super(bh, NULL); 2374 2427 res = err ? "failed" : "done"; 2375 2428 } 2376 - f2fs_msg(sb, KERN_INFO, 2377 - "Fix alignment : %s, start(%u) end(%u) block(%u)", 2378 - res, main_blkaddr, 2379 - segment0_blkaddr + 2380 - (segment_count << log_blocks_per_seg), 2381 - segment_count_main << log_blocks_per_seg); 2429 + f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)", 2430 + res, main_blkaddr, 2431 + segment0_blkaddr + 2432 + (segment_count << log_blocks_per_seg), 2433 + segment_count_main << log_blocks_per_seg); 2382 2434 if (err) 2383 2435 return true; 2384 2436 } ··· 2391 2445 block_t total_sections, blocks_per_seg; 2392 2446 struct f2fs_super_block *raw_super = (struct f2fs_super_block *) 2393 2447 (bh->b_data + F2FS_SUPER_OFFSET); 2394 - struct super_block *sb = sbi->sb; 2395 2448 unsigned int blocksize; 2396 2449 size_t crc_offset = 0; 2397 2450 __u32 crc = 0; ··· 2400 2455 crc_offset = le32_to_cpu(raw_super->checksum_offset); 2401 2456 if (crc_offset != 2402 2457 offsetof(struct f2fs_super_block, crc)) { 2403 - f2fs_msg(sb, KERN_INFO, 2404 - "Invalid SB checksum offset: %zu", 2405 - crc_offset); 2458 + f2fs_info(sbi, "Invalid SB checksum offset: %zu", 2459 + crc_offset); 2406 2460 return 1; 2407 2461 } 2408 2462 crc = le32_to_cpu(raw_super->crc); 2409 2463 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { 2410 - f2fs_msg(sb, KERN_INFO, 2411 - "Invalid SB checksum value: %u", crc); 2464 + f2fs_info(sbi, "Invalid SB checksum value: %u", crc); 2412 2465 return 1; 2413 2466 } 2414 2467 } 2415 2468 2416 2469 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 2417 - f2fs_msg(sb, KERN_INFO, 2418 - "Magic Mismatch, valid(0x%x) - read(0x%x)", 2419 - F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); 2470 + f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", 2471 + F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); 2420 2472 return 1; 2421 2473 } 2422 2474 2423 2475 /* Currently, support only 4KB page cache size */ 2424 2476 if (F2FS_BLKSIZE != PAGE_SIZE) { 2425 - f2fs_msg(sb, KERN_INFO, 2426 - "Invalid page_cache_size (%lu), supports only 4KB", 2427 - PAGE_SIZE); 2477 + f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", 2478 + PAGE_SIZE); 2428 2479 return 1; 2429 2480 } 2430 2481 2431 2482 /* Currently, support only 4KB block size */ 2432 2483 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 2433 2484 if (blocksize != F2FS_BLKSIZE) { 2434 - f2fs_msg(sb, KERN_INFO, 2435 - "Invalid blocksize (%u), supports only 4KB", 2436 - blocksize); 2485 + f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB", 2486 + blocksize); 2437 2487 return 1; 2438 2488 } 2439 2489 2440 2490 /* check log blocks per segment */ 2441 2491 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { 2442 - f2fs_msg(sb, KERN_INFO, 2443 - "Invalid log blocks per segment (%u)", 2444 - le32_to_cpu(raw_super->log_blocks_per_seg)); 2492 + f2fs_info(sbi, "Invalid log blocks per segment (%u)", 2493 + le32_to_cpu(raw_super->log_blocks_per_seg)); 2445 2494 return 1; 2446 2495 } 2447 2496 ··· 2444 2505 F2FS_MAX_LOG_SECTOR_SIZE || 2445 2506 le32_to_cpu(raw_super->log_sectorsize) < 2446 2507 F2FS_MIN_LOG_SECTOR_SIZE) { 2447 - f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)", 2448 - le32_to_cpu(raw_super->log_sectorsize)); 2508 + f2fs_info(sbi, "Invalid log sectorsize (%u)", 2509 + le32_to_cpu(raw_super->log_sectorsize)); 2449 2510 return 1; 2450 2511 } 2451 2512 if (le32_to_cpu(raw_super->log_sectors_per_block) + 2452 2513 le32_to_cpu(raw_super->log_sectorsize) != 2453 2514 F2FS_MAX_LOG_SECTOR_SIZE) { 2454 - f2fs_msg(sb, KERN_INFO, 2455 - "Invalid log sectors per block(%u) log sectorsize(%u)", 2456 - le32_to_cpu(raw_super->log_sectors_per_block), 2457 - le32_to_cpu(raw_super->log_sectorsize)); 2515 + f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", 2516 + le32_to_cpu(raw_super->log_sectors_per_block), 2517 + le32_to_cpu(raw_super->log_sectorsize)); 2458 2518 return 1; 2459 2519 } 2460 2520 ··· 2467 2529 2468 2530 if (segment_count > F2FS_MAX_SEGMENT || 2469 2531 segment_count < F2FS_MIN_SEGMENTS) { 2470 - f2fs_msg(sb, KERN_INFO, 2471 - "Invalid segment count (%u)", 2472 - segment_count); 2532 + f2fs_info(sbi, "Invalid segment count (%u)", segment_count); 2473 2533 return 1; 2474 2534 } 2475 2535 2476 2536 if (total_sections > segment_count || 2477 2537 total_sections < F2FS_MIN_SEGMENTS || 2478 2538 segs_per_sec > segment_count || !segs_per_sec) { 2479 - f2fs_msg(sb, KERN_INFO, 2480 - "Invalid segment/section count (%u, %u x %u)", 2481 - segment_count, total_sections, segs_per_sec); 2539 + f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", 2540 + segment_count, total_sections, segs_per_sec); 2482 2541 return 1; 2483 2542 } 2484 2543 2485 2544 if ((segment_count / segs_per_sec) < total_sections) { 2486 - f2fs_msg(sb, KERN_INFO, 2487 - "Small segment_count (%u < %u * %u)", 2488 - segment_count, segs_per_sec, total_sections); 2545 + f2fs_info(sbi, "Small segment_count (%u < %u * %u)", 2546 + segment_count, segs_per_sec, total_sections); 2489 2547 return 1; 2490 2548 } 2491 2549 2492 2550 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { 2493 - f2fs_msg(sb, KERN_INFO, 2494 - "Wrong segment_count / block_count (%u > %llu)", 2495 - segment_count, le64_to_cpu(raw_super->block_count)); 2551 + f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", 2552 + segment_count, le64_to_cpu(raw_super->block_count)); 2496 2553 return 1; 2497 2554 } 2498 2555 2499 2556 if (secs_per_zone > total_sections || !secs_per_zone) { 2500 - f2fs_msg(sb, KERN_INFO, 2501 - "Wrong secs_per_zone / total_sections (%u, %u)", 2502 - secs_per_zone, total_sections); 2557 + f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", 2558 + secs_per_zone, total_sections); 2503 2559 return 1; 2504 2560 } 2505 2561 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || 2506 2562 raw_super->hot_ext_count > F2FS_MAX_EXTENSION || 2507 2563 (le32_to_cpu(raw_super->extension_count) + 2508 2564 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) { 2509 - f2fs_msg(sb, KERN_INFO, 2510 - "Corrupted extension count (%u + %u > %u)", 2511 - le32_to_cpu(raw_super->extension_count), 2512 - raw_super->hot_ext_count, 2513 - F2FS_MAX_EXTENSION); 2565 + f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", 2566 + le32_to_cpu(raw_super->extension_count), 2567 + raw_super->hot_ext_count, 2568 + F2FS_MAX_EXTENSION); 2514 2569 return 1; 2515 2570 } 2516 2571 2517 2572 if (le32_to_cpu(raw_super->cp_payload) > 2518 2573 (blocks_per_seg - F2FS_CP_PACKS)) { 2519 - f2fs_msg(sb, KERN_INFO, 2520 - "Insane cp_payload (%u > %u)", 2521 - le32_to_cpu(raw_super->cp_payload), 2522 - blocks_per_seg - F2FS_CP_PACKS); 2574 + f2fs_info(sbi, "Insane cp_payload (%u > %u)", 2575 + le32_to_cpu(raw_super->cp_payload), 2576 + blocks_per_seg - F2FS_CP_PACKS); 2523 2577 return 1; 2524 2578 } 2525 2579 ··· 2519 2589 if (le32_to_cpu(raw_super->node_ino) != 1 || 2520 2590 le32_to_cpu(raw_super->meta_ino) != 2 || 2521 2591 le32_to_cpu(raw_super->root_ino) != 3) { 2522 - f2fs_msg(sb, KERN_INFO, 2523 - "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", 2524 - le32_to_cpu(raw_super->node_ino), 2525 - le32_to_cpu(raw_super->meta_ino), 2526 - le32_to_cpu(raw_super->root_ino)); 2592 + f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", 2593 + le32_to_cpu(raw_super->node_ino), 2594 + le32_to_cpu(raw_super->meta_ino), 2595 + le32_to_cpu(raw_super->root_ino)); 2527 2596 return 1; 2528 2597 } 2529 2598 ··· 2566 2637 2567 2638 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || 2568 2639 ovp_segments == 0 || reserved_segments == 0)) { 2569 - f2fs_msg(sbi->sb, KERN_ERR, 2570 - "Wrong layout: check mkfs.f2fs version"); 2640 + f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); 2571 2641 return 1; 2572 2642 } 2573 2643 ··· 2575 2647 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 2576 2648 if (!user_block_count || user_block_count >= 2577 2649 segment_count_main << log_blocks_per_seg) { 2578 - f2fs_msg(sbi->sb, KERN_ERR, 2579 - "Wrong user_block_count: %u", user_block_count); 2650 + f2fs_err(sbi, "Wrong user_block_count: %u", 2651 + user_block_count); 2580 2652 return 1; 2581 2653 } 2582 2654 2583 2655 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count); 2584 2656 if (valid_user_blocks > user_block_count) { 2585 - f2fs_msg(sbi->sb, KERN_ERR, 2586 - "Wrong valid_user_blocks: %u, user_block_count: %u", 2587 - valid_user_blocks, user_block_count); 2657 + f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", 2658 + valid_user_blocks, user_block_count); 2588 2659 return 1; 2589 2660 } 2590 2661 ··· 2591 2664 avail_node_count = sbi->total_node_count - sbi->nquota_files - 2592 2665 F2FS_RESERVED_NODE_NUM; 2593 2666 if (valid_node_count > avail_node_count) { 2594 - f2fs_msg(sbi->sb, KERN_ERR, 2595 - "Wrong valid_node_count: %u, avail_node_count: %u", 2596 - valid_node_count, avail_node_count); 2667 + f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", 2668 + valid_node_count, avail_node_count); 2597 2669 return 1; 2598 2670 } 2599 2671 ··· 2606 2680 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) { 2607 2681 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2608 2682 le32_to_cpu(ckpt->cur_node_segno[j])) { 2609 - f2fs_msg(sbi->sb, KERN_ERR, 2610 - "Node segment (%u, %u) has the same " 2611 - "segno: %u", i, j, 2612 - le32_to_cpu(ckpt->cur_node_segno[i])); 2683 + f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", 2684 + i, j, 2685 + le32_to_cpu(ckpt->cur_node_segno[i])); 2613 2686 return 1; 2614 2687 } 2615 2688 } ··· 2620 2695 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) { 2621 2696 if (le32_to_cpu(ckpt->cur_data_segno[i]) == 2622 2697 le32_to_cpu(ckpt->cur_data_segno[j])) { 2623 - f2fs_msg(sbi->sb, KERN_ERR, 2624 - "Data segment (%u, %u) has the same " 2625 - "segno: %u", i, j, 2626 - le32_to_cpu(ckpt->cur_data_segno[i])); 2698 + f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", 2699 + i, j, 2700 + le32_to_cpu(ckpt->cur_data_segno[i])); 2627 2701 return 1; 2628 2702 } 2629 2703 } ··· 2631 2707 for (j = i; j < NR_CURSEG_DATA_TYPE; j++) { 2632 2708 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2633 2709 le32_to_cpu(ckpt->cur_data_segno[j])) { 2634 - f2fs_msg(sbi->sb, KERN_ERR, 2635 - "Data segment (%u) and Data segment (%u)" 2636 - " has the same segno: %u", i, j, 2637 - le32_to_cpu(ckpt->cur_node_segno[i])); 2710 + f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u", 2711 + i, j, 2712 + le32_to_cpu(ckpt->cur_node_segno[i])); 2638 2713 return 1; 2639 2714 } 2640 2715 } ··· 2644 2721 2645 2722 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || 2646 2723 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { 2647 - f2fs_msg(sbi->sb, KERN_ERR, 2648 - "Wrong bitmap size: sit: %u, nat:%u", 2649 - sit_bitmap_size, nat_bitmap_size); 2724 + f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", 2725 + sit_bitmap_size, nat_bitmap_size); 2650 2726 return 1; 2651 2727 } 2652 2728 ··· 2654 2732 if (cp_pack_start_sum < cp_payload + 1 || 2655 2733 cp_pack_start_sum > blocks_per_seg - 1 - 2656 2734 NR_CURSEG_TYPE) { 2657 - f2fs_msg(sbi->sb, KERN_ERR, 2658 - "Wrong cp_pack_start_sum: %u", 2659 - cp_pack_start_sum); 2735 + f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", 2736 + cp_pack_start_sum); 2660 2737 return 1; 2661 2738 } 2662 2739 2663 2740 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) && 2664 2741 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) { 2665 - f2fs_msg(sbi->sb, KERN_WARNING, 2666 - "layout of large_nat_bitmap is deprecated, " 2667 - "run fsck to repair, chksum_offset: %u", 2668 - le32_to_cpu(ckpt->checksum_offset)); 2742 + f2fs_warn(sbi, "layout of large_nat_bitmap is deprecated, run fsck to repair, chksum_offset: %u", 2743 + le32_to_cpu(ckpt->checksum_offset)); 2669 2744 return 1; 2670 2745 } 2671 2746 2672 2747 if (unlikely(f2fs_cp_error(sbi))) { 2673 - f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); 2748 + f2fs_err(sbi, "A bug case: need to run fsck"); 2674 2749 return 1; 2675 2750 } 2676 2751 return 0; ··· 2836 2917 for (block = 0; block < 2; block++) { 2837 2918 bh = sb_bread(sb, block); 2838 2919 if (!bh) { 2839 - f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", 2840 - block + 1); 2920 + f2fs_err(sbi, "Unable to read %dth superblock", 2921 + block + 1); 2841 2922 err = -EIO; 2842 2923 continue; 2843 2924 } 2844 2925 2845 2926 /* sanity checking of raw super */ 2846 2927 if (sanity_check_raw_super(sbi, bh)) { 2847 - f2fs_msg(sb, KERN_ERR, 2848 - "Can't find valid F2FS filesystem in %dth superblock", 2849 - block + 1); 2928 + f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", 2929 + block + 1); 2850 2930 err = -EINVAL; 2851 2931 brelse(bh); 2852 2932 continue; ··· 2975 3057 #ifdef CONFIG_BLK_DEV_ZONED 2976 3058 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && 2977 3059 !f2fs_sb_has_blkzoned(sbi)) { 2978 - f2fs_msg(sbi->sb, KERN_ERR, 2979 - "Zoned block device feature not enabled\n"); 3060 + f2fs_err(sbi, "Zoned block device feature not enabled\n"); 2980 3061 return -EINVAL; 2981 3062 } 2982 3063 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) { 2983 3064 if (init_blkz_info(sbi, i)) { 2984 - f2fs_msg(sbi->sb, KERN_ERR, 2985 - "Failed to initialize F2FS blkzone information"); 3065 + f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); 2986 3066 return -EINVAL; 2987 3067 } 2988 3068 if (max_devices == 1) 2989 3069 break; 2990 - f2fs_msg(sbi->sb, KERN_INFO, 2991 - "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", 2992 - i, FDEV(i).path, 2993 - FDEV(i).total_segments, 2994 - FDEV(i).start_blk, FDEV(i).end_blk, 2995 - bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? 2996 - "Host-aware" : "Host-managed"); 3070 + f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", 3071 + i, FDEV(i).path, 3072 + FDEV(i).total_segments, 3073 + FDEV(i).start_blk, FDEV(i).end_blk, 3074 + bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? 3075 + "Host-aware" : "Host-managed"); 2997 3076 continue; 2998 3077 } 2999 3078 #endif 3000 - f2fs_msg(sbi->sb, KERN_INFO, 3001 - "Mount Device [%2d]: %20s, %8u, %8x - %8x", 3002 - i, FDEV(i).path, 3003 - FDEV(i).total_segments, 3004 - FDEV(i).start_blk, FDEV(i).end_blk); 3079 + f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", 3080 + i, FDEV(i).path, 3081 + FDEV(i).total_segments, 3082 + FDEV(i).start_blk, FDEV(i).end_blk); 3005 3083 } 3006 - f2fs_msg(sbi->sb, KERN_INFO, 3007 - "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); 3084 + f2fs_info(sbi, 3085 + "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); 3008 3086 return 0; 3009 3087 } 3010 3088 ··· 3046 3132 /* Load the checksum driver */ 3047 3133 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); 3048 3134 if (IS_ERR(sbi->s_chksum_driver)) { 3049 - f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); 3135 + f2fs_err(sbi, "Cannot load crc32 driver."); 3050 3136 err = PTR_ERR(sbi->s_chksum_driver); 3051 3137 sbi->s_chksum_driver = NULL; 3052 3138 goto free_sbi; ··· 3054 3140 3055 3141 /* set a block size */ 3056 3142 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { 3057 - f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); 3143 + f2fs_err(sbi, "unable to set blocksize"); 3058 3144 goto free_sbi; 3059 3145 } 3060 3146 ··· 3078 3164 */ 3079 3165 #ifndef CONFIG_BLK_DEV_ZONED 3080 3166 if (f2fs_sb_has_blkzoned(sbi)) { 3081 - f2fs_msg(sb, KERN_ERR, 3082 - "Zoned block device support is not enabled"); 3167 + f2fs_err(sbi, "Zoned block device support is not enabled"); 3083 3168 err = -EOPNOTSUPP; 3084 3169 goto free_sb_buf; 3085 3170 } ··· 3186 3273 /* get an inode for meta space */ 3187 3274 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 3188 3275 if (IS_ERR(sbi->meta_inode)) { 3189 - f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); 3276 + f2fs_err(sbi, "Failed to read F2FS meta data inode"); 3190 3277 err = PTR_ERR(sbi->meta_inode); 3191 3278 goto free_io_dummy; 3192 3279 } 3193 3280 3194 3281 err = f2fs_get_valid_checkpoint(sbi); 3195 3282 if (err) { 3196 - f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); 3283 + f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); 3197 3284 goto free_meta_inode; 3198 3285 } 3199 3286 ··· 3210 3297 /* Initialize device list */ 3211 3298 err = f2fs_scan_devices(sbi); 3212 3299 if (err) { 3213 - f2fs_msg(sb, KERN_ERR, "Failed to find devices"); 3300 + f2fs_err(sbi, "Failed to find devices"); 3214 3301 goto free_devices; 3215 3302 } 3216 3303 ··· 3241 3328 /* setup f2fs internal modules */ 3242 3329 err = f2fs_build_segment_manager(sbi); 3243 3330 if (err) { 3244 - f2fs_msg(sb, KERN_ERR, 3245 - "Failed to initialize F2FS segment manager (%d)", err); 3331 + f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", 3332 + err); 3246 3333 goto free_sm; 3247 3334 } 3248 3335 err = f2fs_build_node_manager(sbi); 3249 3336 if (err) { 3250 - f2fs_msg(sb, KERN_ERR, 3251 - "Failed to initialize F2FS node manager (%d)", err); 3337 + f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", 3338 + err); 3252 3339 goto free_nm; 3253 3340 } 3254 3341 ··· 3273 3360 /* get an inode for node space */ 3274 3361 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 3275 3362 if (IS_ERR(sbi->node_inode)) { 3276 - f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); 3363 + f2fs_err(sbi, "Failed to read node inode"); 3277 3364 err = PTR_ERR(sbi->node_inode); 3278 3365 goto free_stats; 3279 3366 } ··· 3281 3368 /* read root inode and dentry */ 3282 3369 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 3283 3370 if (IS_ERR(root)) { 3284 - f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); 3371 + f2fs_err(sbi, "Failed to read root inode"); 3285 3372 err = PTR_ERR(root); 3286 3373 goto free_node_inode; 3287 3374 } ··· 3307 3394 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { 3308 3395 err = f2fs_enable_quotas(sb); 3309 3396 if (err) 3310 - f2fs_msg(sb, KERN_ERR, 3311 - "Cannot turn on quotas: error %d", err); 3397 + f2fs_err(sbi, "Cannot turn on quotas: error %d", err); 3312 3398 } 3313 3399 #endif 3314 3400 /* if there are nt orphan nodes free them */ ··· 3327 3415 if (f2fs_hw_is_readonly(sbi)) { 3328 3416 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 3329 3417 err = -EROFS; 3330 - f2fs_msg(sb, KERN_ERR, 3331 - "Need to recover fsync data, but " 3332 - "write access unavailable"); 3418 + f2fs_err(sbi, "Need to recover fsync data, but write access unavailable"); 3333 3419 goto free_meta; 3334 3420 } 3335 - f2fs_msg(sbi->sb, KERN_INFO, "write access " 3336 - "unavailable, skipping recovery"); 3421 + f2fs_info(sbi, "write access unavailable, skipping recovery"); 3337 3422 goto reset_checkpoint; 3338 3423 } 3339 3424 ··· 3345 3436 if (err != -ENOMEM) 3346 3437 skip_recovery = true; 3347 3438 need_fsck = true; 3348 - f2fs_msg(sb, KERN_ERR, 3349 - "Cannot recover all fsync data errno=%d", err); 3439 + f2fs_err(sbi, "Cannot recover all fsync data errno=%d", 3440 + err); 3350 3441 goto free_meta; 3351 3442 } 3352 3443 } else { ··· 3354 3445 3355 3446 if (!f2fs_readonly(sb) && err > 0) { 3356 3447 err = -EINVAL; 3357 - f2fs_msg(sb, KERN_ERR, 3358 - "Need to recover fsync data"); 3448 + f2fs_err(sbi, "Need to recover fsync data"); 3359 3449 goto free_meta; 3360 3450 } 3361 3451 } ··· 3385 3477 /* recover broken superblock */ 3386 3478 if (recovery) { 3387 3479 err = f2fs_commit_super(sbi, true); 3388 - f2fs_msg(sb, KERN_INFO, 3389 - "Try to recover %dth superblock, ret: %d", 3390 - sbi->valid_super_block ? 1 : 2, err); 3480 + f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", 3481 + sbi->valid_super_block ? 1 : 2, err); 3391 3482 } 3392 3483 3393 3484 f2fs_join_shrinker(sbi); 3394 3485 3395 3486 f2fs_tuning_parameters(sbi); 3396 3487 3397 - f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", 3398 - cur_cp_version(F2FS_CKPT(sbi))); 3488 + f2fs_notice(sbi, "Mounted with checkpoint version = %llx", 3489 + cur_cp_version(F2FS_CKPT(sbi))); 3399 3490 f2fs_update_time(sbi, CP_TIME); 3400 3491 f2fs_update_time(sbi, REQ_TIME); 3401 3492 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);