Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

erofs: clean up erofs_map_blocks tracepoints

Since the new type of chunk-based files is introduced, there is no
need to leave flatmode tracepoints.

Rename to erofs_map_blocks instead.

Link: https://lore.kernel.org/r/20211209012918.30337-1-hsiangkao@linux.alibaba.com
Reviewed-by: Yue Hu <huyue2@yulong.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>

+19 -24
+17 -22
fs/erofs/data.c
··· 26 26 struct erofs_map_blocks *map, 27 27 int flags) 28 28 { 29 - int err = 0; 30 29 erofs_blk_t nblocks, lastblk; 31 30 u64 offset = map->m_la; 32 31 struct erofs_inode *vi = EROFS_I(inode); 33 32 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); 34 - 35 - trace_erofs_map_blocks_flatmode_enter(inode, map, flags); 36 33 37 34 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 38 35 lastblk = nblocks - tailendpacking; 39 36 40 37 /* there is no hole in flatmode */ 41 38 map->m_flags = EROFS_MAP_MAPPED; 42 - 43 39 if (offset < blknr_to_addr(lastblk)) { 44 40 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; 45 41 map->m_plen = blknr_to_addr(lastblk) - offset; ··· 47 51 vi->xattr_isize + erofs_blkoff(map->m_la); 48 52 map->m_plen = inode->i_size - offset; 49 53 50 - /* inline data should be located in one meta block */ 51 - if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { 54 + /* inline data should be located in the same meta block */ 55 + if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) { 52 56 erofs_err(inode->i_sb, 53 57 "inline data cross block boundary @ nid %llu", 54 58 vi->nid); 55 59 DBG_BUGON(1); 56 - err = -EFSCORRUPTED; 57 - goto err_out; 60 + return -EFSCORRUPTED; 58 61 } 59 - 60 62 map->m_flags |= EROFS_MAP_META; 61 63 } else { 62 64 erofs_err(inode->i_sb, 63 65 "internal error @ nid: %llu (size %llu), m_la 0x%llx", 64 66 vi->nid, inode->i_size, map->m_la); 65 67 DBG_BUGON(1); 66 - err = -EIO; 67 - goto err_out; 68 + return -EIO; 68 69 } 69 - 70 - map->m_llen = map->m_plen; 71 - err_out: 72 - trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); 73 - return err; 70 + return 0; 74 71 } 75 72 76 73 static int erofs_map_blocks(struct inode *inode, ··· 78 89 erofs_off_t pos; 79 90 int err = 0; 80 91 92 + trace_erofs_map_blocks_enter(inode, map, flags); 81 93 map->m_deviceid = 0; 82 94 if (map->m_la >= inode->i_size) { 83 95 /* leave out-of-bound access unmapped */ ··· 87 97 goto out; 88 98 } 89 99 90 - if (vi->datalayout != EROFS_INODE_CHUNK_BASED) 91 - return erofs_map_blocks_flatmode(inode, map, flags); 100 + if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { 101 + err = erofs_map_blocks_flatmode(inode, map, flags); 102 + goto out; 103 + } 92 104 93 105 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) 94 106 unit = sizeof(*idx); /* chunk index */ ··· 102 110 vi->xattr_isize, unit) + unit * chunknr; 103 111 104 112 page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos)); 105 - if (IS_ERR(page)) 106 - return PTR_ERR(page); 107 - 113 + if (IS_ERR(page)) { 114 + err = PTR_ERR(page); 115 + goto out; 116 + } 108 117 map->m_la = chunknr << vi->chunkbits; 109 118 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, 110 119 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ)); ··· 139 146 unlock_page(page); 140 147 put_page(page); 141 148 out: 142 - map->m_llen = map->m_plen; 149 + if (!err) 150 + map->m_llen = map->m_plen; 151 + trace_erofs_map_blocks_exit(inode, map, flags, 0); 143 152 return err; 144 153 } 145 154
+2 -2
include/trace/events/erofs.h
··· 169 169 __entry->flags ? show_map_flags(__entry->flags) : "NULL") 170 170 ); 171 171 172 - DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, 172 + DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter, 173 173 TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 174 174 unsigned flags), 175 175 ··· 221 221 show_mflags(__entry->mflags), __entry->ret) 222 222 ); 223 223 224 - DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, 224 + DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit, 225 225 TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, 226 226 unsigned flags, int ret), 227 227