Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfs: Renumber the NETFS_RREQ_* flags to make traces easier to read

Renumber the NETFS_RREQ_* flags to put the most useful status bits in the
bottom nibble - and therefore the last hex digit in the trace output -
making it easier to grasp the state at a glance.

In particular, put the IN_PROGRESS flag in bit 0 and ALL_QUEUED at bit 1.

Also make the flags field in /proc/fs/netfs/requests larger to accommodate
all the flags.

Also make the flags field in the netfs_sreq tracepoint larger to
accommodate all the NETFS_SREQ_* flags.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/20250701163852.2171681-13-dhowells@redhat.com
Reviewed-by: Paulo Alcantara <pc@manguebit.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>

authored by

David Howells and committed by
Christian Brauner
4e325410 5e1e6ec2

+14 -14
+3 -3
fs/netfs/main.c
··· 58 58 59 59 if (v == &netfs_io_requests) { 60 60 seq_puts(m, 61 - "REQUEST OR REF FL ERR OPS COVERAGE\n" 62 - "======== == === == ==== === =========\n" 61 + "REQUEST OR REF FLAG ERR OPS COVERAGE\n" 62 + "======== == === ==== ==== === =========\n" 63 63 ); 64 64 return 0; 65 65 } 66 66 67 67 rreq = list_entry(v, struct netfs_io_request, proc_link); 68 68 seq_printf(m, 69 - "%08x %s %3d %2lx %4ld %3d @%04llx %llx/%llx", 69 + "%08x %s %3d %4lx %4ld %3d @%04llx %llx/%llx", 70 70 rreq->debug_id, 71 71 netfs_origins[rreq->origin], 72 72 refcount_read(&rreq->ref),
+10 -10
include/linux/netfs.h
··· 265 265 bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ 266 266 refcount_t ref; 267 267 unsigned long flags; 268 - #define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */ 269 - #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ 270 - #define NETFS_RREQ_FAILED 4 /* The request failed */ 271 - #define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes (has ref) */ 272 - #define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */ 273 - #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ 274 - #define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */ 268 + #define NETFS_RREQ_IN_PROGRESS 0 /* Unlocked when the request completes (has ref) */ 269 + #define NETFS_RREQ_ALL_QUEUED 1 /* All subreqs are now queued */ 270 + #define NETFS_RREQ_PAUSE 2 /* Pause subrequest generation */ 271 + #define NETFS_RREQ_FAILED 3 /* The request failed */ 272 + #define NETFS_RREQ_RETRYING 4 /* Set if we're in the retry path */ 273 + #define NETFS_RREQ_SHORT_TRANSFER 5 /* Set if we have a short transfer */ 274 + #define NETFS_RREQ_OFFLOAD_COLLECTION 8 /* Offload collection to workqueue */ 275 + #define NETFS_RREQ_NO_UNLOCK_FOLIO 9 /* Don't unlock no_unlock_folio on completion */ 276 + #define NETFS_RREQ_FOLIO_COPY_TO_CACHE 10 /* Copy current folio to cache from read */ 277 + #define NETFS_RREQ_UPLOAD_TO_SERVER 11 /* Need to write to the server */ 275 278 #define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */ 276 - #define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */ 277 - #define NETFS_RREQ_RETRYING 14 /* Set if we're in the retry path */ 278 - #define NETFS_RREQ_SHORT_TRANSFER 15 /* Set if we have a short transfer */ 279 279 #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark 280 280 * write to cache on read */ 281 281 const struct netfs_request_ops *netfs_ops;
+1 -1
include/trace/events/netfs.h
··· 367 367 __entry->slot = sreq->io_iter.folioq_slot; 368 368 ), 369 369 370 - TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx s=%u e=%d", 370 + TP_printk("R=%08x[%x] %s %s f=%03x s=%llx %zx/%zx s=%u e=%d", 371 371 __entry->rreq, __entry->index, 372 372 __print_symbolic(__entry->source, netfs_sreq_sources), 373 373 __print_symbolic(__entry->what, netfs_sreq_traces),