Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove the request_queue to argument request based tracepoints

The request_queue can trivially be derived from the request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
a54895fa 1c02fca6

+39 -57
+1 -1
block/blk-merge.c
··· 799 799 */ 800 800 blk_account_io_merge_request(next); 801 801 802 - trace_block_rq_merge(q, next); 802 + trace_block_rq_merge(next); 803 803 804 804 /* 805 805 * ownership of bio passed from next to req, return 'next' for
+1 -1
block/blk-mq-sched.c
··· 386 386 387 387 void blk_mq_sched_request_inserted(struct request *rq) 388 388 { 389 - trace_block_rq_insert(rq->q, rq); 389 + trace_block_rq_insert(rq); 390 390 } 391 391 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); 392 392
+4 -4
block/blk-mq.c
··· 733 733 { 734 734 struct request_queue *q = rq->q; 735 735 736 - trace_block_rq_issue(q, rq); 736 + trace_block_rq_issue(rq); 737 737 738 738 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 739 739 rq->io_start_time_ns = ktime_get_ns(); ··· 760 760 761 761 blk_mq_put_driver_tag(rq); 762 762 763 - trace_block_rq_requeue(q, rq); 763 + trace_block_rq_requeue(rq); 764 764 rq_qos_requeue(q, rq); 765 765 766 766 if (blk_mq_request_started(rq)) { ··· 1821 1821 1822 1822 lockdep_assert_held(&ctx->lock); 1823 1823 1824 - trace_block_rq_insert(hctx->queue, rq); 1824 + trace_block_rq_insert(rq); 1825 1825 1826 1826 if (at_head) 1827 1827 list_add(&rq->queuelist, &ctx->rq_lists[type]); ··· 1878 1878 */ 1879 1879 list_for_each_entry(rq, list, queuelist) { 1880 1880 BUG_ON(rq->mq_ctx != ctx); 1881 - trace_block_rq_insert(hctx->queue, rq); 1881 + trace_block_rq_insert(rq); 1882 1882 } 1883 1883 1884 1884 spin_lock(&ctx->lock);
+1 -1
drivers/md/dm-rq.c
··· 397 397 } 398 398 399 399 /* The target has remapped the I/O so dispatch it */ 400 - trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 400 + trace_block_rq_remap(clone, disk_devt(dm_disk(md)), 401 401 blk_rq_pos(rq)); 402 402 ret = dm_dispatch_clone_request(clone, rq); 403 403 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+1 -2
drivers/s390/scsi/zfcp_fsf.c
··· 2359 2359 } 2360 2360 } 2361 2361 2362 - blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2363 - sizeof(blktrc)); 2362 + blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc)); 2364 2363 } 2365 2364 2366 2365 /**
+2 -3
include/linux/blktrace_api.h
··· 75 75 return ret; 76 76 } 77 77 78 - extern void blk_add_driver_data(struct request_queue *q, struct request *rq, 79 - void *data, size_t len); 78 + extern void blk_add_driver_data(struct request *rq, void *data, size_t len); 80 79 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 81 80 struct block_device *bdev, 82 81 char __user *arg); ··· 89 90 #else /* !CONFIG_BLK_DEV_IO_TRACE */ 90 91 # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 91 92 # define blk_trace_shutdown(q) do { } while (0) 92 - # define blk_add_driver_data(q, rq, data, len) do {} while (0) 93 + # define blk_add_driver_data(rq, data, len) do {} while (0) 93 94 # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) 94 95 # define blk_trace_startstop(q, start) (-ENOTTY) 95 96 # define blk_trace_remove(q) (-ENOTTY)
+12 -18
include/trace/events/block.h
··· 64 64 65 65 /** 66 66 * block_rq_requeue - place block IO request back on a queue 67 - * @q: queue holding operation 68 67 * @rq: block IO operation request 69 68 * 70 69 * The block operation request @rq is being placed back into queue ··· 72 73 */ 73 74 TRACE_EVENT(block_rq_requeue, 74 75 75 - TP_PROTO(struct request_queue *q, struct request *rq), 76 + TP_PROTO(struct request *rq), 76 77 77 - TP_ARGS(q, rq), 78 + TP_ARGS(rq), 78 79 79 80 TP_STRUCT__entry( 80 81 __field( dev_t, dev ) ··· 146 147 147 148 DECLARE_EVENT_CLASS(block_rq, 148 149 149 - TP_PROTO(struct request_queue *q, struct request *rq), 150 + TP_PROTO(struct request *rq), 150 151 151 - TP_ARGS(q, rq), 152 + TP_ARGS(rq), 152 153 153 154 TP_STRUCT__entry( 154 155 __field( dev_t, dev ) ··· 180 181 181 182 /** 182 183 * block_rq_insert - insert block operation request into queue 183 - * @q: target queue 184 184 * @rq: block IO operation request 185 185 * 186 186 * Called immediately before block operation request @rq is inserted ··· 189 191 */ 190 192 DEFINE_EVENT(block_rq, block_rq_insert, 191 193 192 - TP_PROTO(struct request_queue *q, struct request *rq), 194 + TP_PROTO(struct request *rq), 193 195 194 - TP_ARGS(q, rq) 196 + TP_ARGS(rq) 195 197 ); 196 198 197 199 /** 198 200 * block_rq_issue - issue pending block IO request operation to device driver 199 - * @q: queue holding operation 200 201 * @rq: block IO operation operation request 201 202 * 202 203 * Called when block operation request @rq from queue @q is sent to a ··· 203 206 */ 204 207 DEFINE_EVENT(block_rq, block_rq_issue, 205 208 206 - TP_PROTO(struct request_queue *q, struct request *rq), 209 + TP_PROTO(struct request *rq), 207 210 208 - TP_ARGS(q, rq) 211 + TP_ARGS(rq) 209 212 ); 210 213 211 214 /** 212 215 * block_rq_merge - merge request with another one in the elevator 213 - * @q: queue holding operation 214 216 * @rq: block IO operation operation request 215 217 * 216 218 * Called when block operation request @rq from queue @q is merged to another ··· 217 221 */ 218 222 DEFINE_EVENT(block_rq, block_rq_merge, 219 223 220 - TP_PROTO(struct request_queue *q, struct request *rq), 224 + TP_PROTO(struct request *rq), 221 225 222 - TP_ARGS(q, rq) 226 + TP_ARGS(rq) 223 227 ); 224 228 225 229 /** ··· 487 491 488 492 /** 489 493 * block_rq_remap - map request for a block operation request 490 - * @q: queue holding the operation 491 494 * @rq: block IO operation request 492 495 * @dev: device for the operation 493 496 * @from: original sector for the operation ··· 497 502 */ 498 503 TRACE_EVENT(block_rq_remap, 499 504 500 - TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 501 - sector_t from), 505 + TP_PROTO(struct request *rq, dev_t dev, sector_t from), 502 506 503 - TP_ARGS(q, rq, dev, from), 507 + TP_ARGS(rq, dev, from), 504 508 505 509 TP_STRUCT__entry( 506 510 __field( dev_t, dev )
+17 -27
kernel/trace/blktrace.c
··· 795 795 #endif 796 796 797 797 static u64 798 - blk_trace_request_get_cgid(struct request_queue *q, struct request *rq) 798 + blk_trace_request_get_cgid(struct request *rq) 799 799 { 800 800 if (!rq->bio) 801 801 return 0; 802 802 /* Use the first bio */ 803 - return blk_trace_bio_get_cgid(q, rq->bio); 803 + return blk_trace_bio_get_cgid(rq->q, rq->bio); 804 804 } 805 805 806 806 /* ··· 841 841 rcu_read_unlock(); 842 842 } 843 843 844 - static void blk_add_trace_rq_insert(void *ignore, 845 - struct request_queue *q, struct request *rq) 844 + static void blk_add_trace_rq_insert(void *ignore, struct request *rq) 846 845 { 847 846 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, 848 - blk_trace_request_get_cgid(q, rq)); 847 + blk_trace_request_get_cgid(rq)); 849 848 } 850 849 851 - static void blk_add_trace_rq_issue(void *ignore, 852 - struct request_queue *q, struct request *rq) 850 + static void blk_add_trace_rq_issue(void *ignore, struct request *rq) 853 851 { 854 852 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE, 855 - blk_trace_request_get_cgid(q, rq)); 853 + blk_trace_request_get_cgid(rq)); 856 854 } 857 855 858 - static void blk_add_trace_rq_merge(void *ignore, 859 - struct request_queue *q, struct request *rq) 856 + static void blk_add_trace_rq_merge(void *ignore, struct request *rq) 860 857 { 861 858 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE, 862 - blk_trace_request_get_cgid(q, rq)); 859 + blk_trace_request_get_cgid(rq)); 863 860 } 864 861 865 - static void blk_add_trace_rq_requeue(void *ignore, 866 - struct request_queue *q, 867 - struct request *rq) 862 + static void blk_add_trace_rq_requeue(void *ignore, struct request *rq) 868 863 { 869 864 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE, 870 - blk_trace_request_get_cgid(q, rq)); 865 + blk_trace_request_get_cgid(rq)); 871 866 } 872 867 873 868 static void blk_add_trace_rq_complete(void *ignore, struct request *rq, 874 869 int error, unsigned int nr_bytes) 875 870 { 876 871 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE, 877 - blk_trace_request_get_cgid(rq->q, rq)); 872 + blk_trace_request_get_cgid(rq)); 878 873 } 879 874 880 875 /** ··· 1032 1037 * Add a trace for that action. 1033 1038 * 1034 1039 **/ 1035 - static void blk_add_trace_rq_remap(void *ignore, 1036 - struct request_queue *q, 1037 - struct request *rq, dev_t dev, 1040 + static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev, 1038 1041 sector_t from) 1039 1042 { 1040 1043 struct blk_trace *bt; 1041 1044 struct blk_io_trace_remap r; 1042 1045 1043 1046 rcu_read_lock(); 1044 - bt = rcu_dereference(q->blk_trace); 1047 + bt = rcu_dereference(rq->q->blk_trace); 1045 1048 if (likely(!bt)) { 1046 1049 rcu_read_unlock(); 1047 1050 return; ··· 1051 1058 1052 1059 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 1053 1060 rq_data_dir(rq), 0, BLK_TA_REMAP, 0, 1054 - sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); 1061 + sizeof(r), &r, blk_trace_request_get_cgid(rq)); 1055 1062 rcu_read_unlock(); 1056 1063 } 1057 1064 1058 1065 /** 1059 1066 * blk_add_driver_data - Add binary message with driver-specific data 1060 - * @q: queue the io is for 1061 1067 * @rq: io request 1062 1068 * @data: driver-specific data 1063 1069 * @len: length of driver-specific data ··· 1065 1073 * Some drivers might want to write driver-specific data per request. 1066 1074 * 1067 1075 **/ 1068 - void blk_add_driver_data(struct request_queue *q, 1069 - struct request *rq, 1070 - void *data, size_t len) 1076 + void blk_add_driver_data(struct request *rq, void *data, size_t len) 1071 1077 { 1072 1078 struct blk_trace *bt; 1073 1079 1074 1080 rcu_read_lock(); 1075 - bt = rcu_dereference(q->blk_trace); 1081 + bt = rcu_dereference(rq->q->blk_trace); 1076 1082 if (likely(!bt)) { 1077 1083 rcu_read_unlock(); 1078 1084 return; ··· 1078 1088 1079 1089 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0, 1080 1090 BLK_TA_DRV_DATA, 0, len, data, 1081 - blk_trace_request_get_cgid(q, rq)); 1091 + blk_trace_request_get_cgid(rq)); 1082 1092 rcu_read_unlock(); 1083 1093 } 1084 1094 EXPORT_SYMBOL_GPL(blk_add_driver_data);