Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iomap: lift common tracing code from xfs to iomap

Lift the xfs code for tracing address space operations to the iomap
layer.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

authored by

Christoph Hellwig and committed by
Darrick J. Wong
9e91c572 009d8d84

+120 -57
+9 -7
fs/iomap/Makefile
··· 3 3 # Copyright (c) 2019 Oracle. 4 4 # All Rights Reserved. 5 5 # 6 + 7 + ccflags-y += -I $(srctree)/$(src) # needed for trace events 8 + 6 9 obj-$(CONFIG_FS_IOMAP) += iomap.o 7 10 8 - iomap-y += \ 9 - apply.o \ 10 - buffered-io.o \ 11 - direct-io.o \ 12 - fiemap.o \ 13 - seek.o 14 - 11 + iomap-y += trace.o \ 12 + apply.o \ 13 + buffered-io.o \ 14 + direct-io.o \ 15 + fiemap.o \ 16 + seek.o 15 17 iomap-$(CONFIG_SWAP) += swapfile.o
+9
fs/iomap/buffered-io.c
··· 16 16 #include <linux/bio.h> 17 17 #include <linux/sched/signal.h> 18 18 #include <linux/migrate.h> 19 + #include "trace.h" 19 20 20 21 #include "../internal.h" 21 22 ··· 302 301 unsigned poff; 303 302 loff_t ret; 304 303 304 + trace_iomap_readpage(page->mapping->host, 1); 305 + 305 306 for (poff = 0; poff < PAGE_SIZE; poff += ret) { 306 307 ret = iomap_apply(inode, page_offset(page) + poff, 307 308 PAGE_SIZE - poff, 0, ops, &ctx, ··· 400 397 loff_t last = page_offset(list_entry(pages->next, struct page, lru)); 401 398 loff_t length = last - pos + PAGE_SIZE, ret = 0; 402 399 400 + trace_iomap_readpages(mapping->host, nr_pages); 401 + 403 402 while (length > 0) { 404 403 ret = iomap_apply(mapping->host, pos, length, 0, ops, 405 404 &ctx, iomap_readpages_actor); ··· 468 463 int 469 464 iomap_releasepage(struct page *page, gfp_t gfp_mask) 470 465 { 466 + trace_iomap_releasepage(page->mapping->host, page, 0, 0); 467 + 471 468 /* 472 469 * mm accommodates an old ext3 case where clean pages might not have had 473 470 * the dirty bit cleared. Thus, it can send actual dirty pages to ··· 485 478 void 486 479 iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) 487 480 { 481 + trace_iomap_invalidatepage(page->mapping->host, page, offset, len); 482 + 488 483 /* 489 484 * If we are invalidating the entire page, clear the dirty state from it 490 485 * and release it to avoid unnecessary buildup of the LRU.
+12
fs/iomap/trace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2019 Christoph Hellwig 4 + */ 5 + #include <linux/iomap.h> 6 + 7 + /* 8 + * We include this last to have the helpers above available for the trace 9 + * event implementations. 10 + */ 11 + #define CREATE_TRACE_POINTS 12 + #include "trace.h"
+87
fs/iomap/trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2009-2019 Christoph Hellwig 4 + * 5 + * NOTE: none of these tracepoints shall be consider a stable kernel ABI 6 + * as they can change at any time. 7 + */ 8 + #undef TRACE_SYSTEM 9 + #define TRACE_SYSTEM iomap 10 + 11 + #if !defined(_IOMAP_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 12 + #define _IOMAP_TRACE_H 13 + 14 + #include <linux/tracepoint.h> 15 + 16 + struct inode; 17 + 18 + DECLARE_EVENT_CLASS(iomap_readpage_class, 19 + TP_PROTO(struct inode *inode, int nr_pages), 20 + TP_ARGS(inode, nr_pages), 21 + TP_STRUCT__entry( 22 + __field(dev_t, dev) 23 + __field(u64, ino) 24 + __field(int, nr_pages) 25 + ), 26 + TP_fast_assign( 27 + __entry->dev = inode->i_sb->s_dev; 28 + __entry->ino = inode->i_ino; 29 + __entry->nr_pages = nr_pages; 30 + ), 31 + TP_printk("dev %d:%d ino 0x%llx nr_pages %d", 32 + MAJOR(__entry->dev), MINOR(__entry->dev), 33 + __entry->ino, 34 + __entry->nr_pages) 35 + ) 36 + 37 + #define DEFINE_READPAGE_EVENT(name) \ 38 + DEFINE_EVENT(iomap_readpage_class, name, \ 39 + TP_PROTO(struct inode *inode, int nr_pages), \ 40 + TP_ARGS(inode, nr_pages)) 41 + DEFINE_READPAGE_EVENT(iomap_readpage); 42 + DEFINE_READPAGE_EVENT(iomap_readpages); 43 + 44 + DECLARE_EVENT_CLASS(iomap_page_class, 45 + TP_PROTO(struct inode *inode, struct page *page, unsigned long off, 46 + unsigned int len), 47 + TP_ARGS(inode, page, off, len), 48 + TP_STRUCT__entry( 49 + __field(dev_t, dev) 50 + __field(u64, ino) 51 + __field(pgoff_t, pgoff) 52 + __field(loff_t, size) 53 + __field(unsigned long, offset) 54 + __field(unsigned int, length) 55 + ), 56 + TP_fast_assign( 57 + __entry->dev = inode->i_sb->s_dev; 58 + __entry->ino = inode->i_ino; 59 + __entry->pgoff = page_offset(page); 60 + __entry->size = i_size_read(inode); 61 + __entry->offset = off; 62 + __entry->length = len; 63 + ), 64 + TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " 65 + "length %x", 66 + MAJOR(__entry->dev), MINOR(__entry->dev), 67 + __entry->ino, 68 + __entry->pgoff, 69 + __entry->size, 70 + __entry->offset, 71 + __entry->length) 72 + ) 73 + 74 + #define DEFINE_PAGE_EVENT(name) \ 75 + DEFINE_EVENT(iomap_page_class, name, \ 76 + TP_PROTO(struct inode *inode, struct page *page, unsigned long off, \ 77 + unsigned int len), \ 78 + TP_ARGS(inode, page, off, len)) 79 + DEFINE_PAGE_EVENT(iomap_releasepage); 80 + DEFINE_PAGE_EVENT(iomap_invalidatepage); 81 + 82 + #endif /* _IOMAP_TRACE_H */ 83 + 84 + #undef TRACE_INCLUDE_PATH 85 + #define TRACE_INCLUDE_PATH . 86 + #define TRACE_INCLUDE_FILE trace 87 + #include <trace/define_trace.h>
+3 -24
fs/xfs/xfs_aops.c
··· 823 823 wbc_account_cgroup_owner(wbc, page, len); 824 824 } 825 825 826 - STATIC void 827 - xfs_vm_invalidatepage( 828 - struct page *page, 829 - unsigned int offset, 830 - unsigned int length) 831 - { 832 - trace_xfs_invalidatepage(page->mapping->host, page, offset, length); 833 - iomap_invalidatepage(page, offset, length); 834 - } 835 - 836 826 /* 837 827 * If the page has delalloc blocks on it, we need to punch them out before we 838 828 * invalidate the page. If we don't, we leave a stale delalloc mapping on the ··· 857 867 if (error && !XFS_FORCED_SHUTDOWN(mp)) 858 868 xfs_alert(mp, "page discard unable to remove delalloc mapping."); 859 869 out_invalidate: 860 - xfs_vm_invalidatepage(page, 0, PAGE_SIZE); 870 + iomap_invalidatepage(page, 0, PAGE_SIZE); 861 871 } 862 872 863 873 /* ··· 1137 1147 xfs_find_bdev_for_inode(mapping->host), wbc); 1138 1148 } 1139 1149 1140 - STATIC int 1141 - xfs_vm_releasepage( 1142 - struct page *page, 1143 - gfp_t gfp_mask) 1144 - { 1145 - trace_xfs_releasepage(page->mapping->host, page, 0, 0); 1146 - return iomap_releasepage(page, gfp_mask); 1147 - } 1148 - 1149 1150 STATIC sector_t 1150 1151 xfs_vm_bmap( 1151 1152 struct address_space *mapping, ··· 1165 1184 struct file *unused, 1166 1185 struct page *page) 1167 1186 { 1168 - trace_xfs_vm_readpage(page->mapping->host, 1); 1169 1187 return iomap_readpage(page, &xfs_iomap_ops); 1170 1188 } 1171 1189 ··· 1175 1195 struct list_head *pages, 1176 1196 unsigned nr_pages) 1177 1197 { 1178 - trace_xfs_vm_readpages(mapping->host, nr_pages); 1179 1198 return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); 1180 1199 } 1181 1200 ··· 1194 1215 .writepage = xfs_vm_writepage, 1195 1216 .writepages = xfs_vm_writepages, 1196 1217 .set_page_dirty = iomap_set_page_dirty, 1197 - .releasepage = xfs_vm_releasepage, 1198 - .invalidatepage = xfs_vm_invalidatepage, 1218 + .releasepage = iomap_releasepage, 1219 + .invalidatepage = iomap_invalidatepage, 1199 1220 .bmap = xfs_vm_bmap, 1200 1221 .direct_IO = noop_direct_IO, 1201 1222 .migratepage = iomap_migrate_page,
-26
fs/xfs/xfs_trace.h
··· 1197 1197 DEFINE_PAGE_EVENT(xfs_releasepage); 1198 1198 DEFINE_PAGE_EVENT(xfs_invalidatepage); 1199 1199 1200 - DECLARE_EVENT_CLASS(xfs_readpage_class, 1201 - TP_PROTO(struct inode *inode, int nr_pages), 1202 - TP_ARGS(inode, nr_pages), 1203 - TP_STRUCT__entry( 1204 - __field(dev_t, dev) 1205 - __field(xfs_ino_t, ino) 1206 - __field(int, nr_pages) 1207 - ), 1208 - TP_fast_assign( 1209 - __entry->dev = inode->i_sb->s_dev; 1210 - __entry->ino = inode->i_ino; 1211 - __entry->nr_pages = nr_pages; 1212 - ), 1213 - TP_printk("dev %d:%d ino 0x%llx nr_pages %d", 1214 - MAJOR(__entry->dev), MINOR(__entry->dev), 1215 - __entry->ino, 1216 - __entry->nr_pages) 1217 - ) 1218 - 1219 - #define DEFINE_READPAGE_EVENT(name) \ 1220 - DEFINE_EVENT(xfs_readpage_class, name, \ 1221 - TP_PROTO(struct inode *inode, int nr_pages), \ 1222 - TP_ARGS(inode, nr_pages)) 1223 - DEFINE_READPAGE_EVENT(xfs_vm_readpage); 1224 - DEFINE_READPAGE_EVENT(xfs_vm_readpages); 1225 - 1226 1200 DECLARE_EVENT_CLASS(xfs_imap_class, 1227 1201 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, 1228 1202 int whichfork, struct xfs_bmbt_irec *irec),