Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: make unplug timer trace event correspond to the schedule() unplug
block: let io_schedule() flush the plug inline

+45 -19
+12 -6
block/blk-core.c
··· 2662 return !(rqa->q <= rqb->q); 2663 } 2664 2665 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2666 - bool force_kblockd) 2667 { 2668 - trace_block_unplug_io(q, depth); 2669 - __blk_run_queue(q, force_kblockd); 2670 2671 if (q->unplugged_fn) 2672 q->unplugged_fn(q); 2673 } 2674 2675 - void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) 2676 { 2677 struct request_queue *q; 2678 unsigned long flags; ··· 2713 BUG_ON(!rq->q); 2714 if (rq->q != q) { 2715 if (q) { 2716 - queue_unplugged(q, depth, force_kblockd); 2717 spin_unlock(q->queue_lock); 2718 } 2719 q = rq->q; ··· 2734 } 2735 2736 if (q) { 2737 - queue_unplugged(q, depth, force_kblockd); 2738 spin_unlock(q->queue_lock); 2739 } 2740
··· 2662 return !(rqa->q <= rqb->q); 2663 } 2664 2665 + /* 2666 + * If 'from_schedule' is true, then postpone the dispatch of requests 2667 + * until a safe kblockd context. We due this to avoid accidental big 2668 + * additional stack usage in driver dispatch, in places where the originally 2669 + * plugger did not intend it. 2670 + */ 2671 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2672 + bool from_schedule) 2673 { 2674 + trace_block_unplug(q, depth, !from_schedule); 2675 + __blk_run_queue(q, from_schedule); 2676 2677 if (q->unplugged_fn) 2678 q->unplugged_fn(q); 2679 } 2680 2681 + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2682 { 2683 struct request_queue *q; 2684 unsigned long flags; ··· 2707 BUG_ON(!rq->q); 2708 if (rq->q != q) { 2709 if (q) { 2710 + queue_unplugged(q, depth, from_schedule); 2711 spin_unlock(q->queue_lock); 2712 } 2713 q = rq->q; ··· 2728 } 2729 2730 if (q) { 2731 + queue_unplugged(q, depth, from_schedule); 2732 spin_unlock(q->queue_lock); 2733 } 2734
+13
include/linux/blkdev.h
··· 872 struct blk_plug *plug = tsk->plug; 873 874 if (plug) 875 blk_flush_plug_list(plug, true); 876 } 877 ··· 1324 static inline void blk_flush_plug(struct task_struct *task) 1325 { 1326 } 1327 1328 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1329 {
··· 872 struct blk_plug *plug = tsk->plug; 873 874 if (plug) 875 + blk_flush_plug_list(plug, false); 876 + } 877 + 878 + static inline void blk_schedule_flush_plug(struct task_struct *tsk) 879 + { 880 + struct blk_plug *plug = tsk->plug; 881 + 882 + if (plug) 883 blk_flush_plug_list(plug, true); 884 } 885 ··· 1316 static inline void blk_flush_plug(struct task_struct *task) 1317 { 1318 } 1319 + 1320 + static inline void blk_schedule_flush_plug(struct task_struct *task) 1321 + { 1322 + } 1323 + 1324 1325 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1326 {
+7 -6
include/trace/events/block.h
··· 401 402 DECLARE_EVENT_CLASS(block_unplug, 403 404 - TP_PROTO(struct request_queue *q, unsigned int depth), 405 406 - TP_ARGS(q, depth), 407 408 TP_STRUCT__entry( 409 __field( int, nr_rq ) ··· 419 ); 420 421 /** 422 - * block_unplug_io - release of operations requests in request queue 423 * @q: request queue to unplug 424 * @depth: number of requests just added to the queue 425 * 426 * Unplug request queue @q because device driver is scheduled to work 427 * on elements in the request queue. 428 */ 429 - DEFINE_EVENT(block_unplug, block_unplug_io, 430 431 - TP_PROTO(struct request_queue *q, unsigned int depth), 432 433 - TP_ARGS(q, depth) 434 ); 435 436 /**
··· 401 402 DECLARE_EVENT_CLASS(block_unplug, 403 404 + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 405 406 + TP_ARGS(q, depth, explicit), 407 408 TP_STRUCT__entry( 409 __field( int, nr_rq ) ··· 419 ); 420 421 /** 422 + * block_unplug - release of operations requests in request queue 423 * @q: request queue to unplug 424 * @depth: number of requests just added to the queue 425 + * @explicit: whether this was an explicit unplug, or one from schedule() 426 * 427 * Unplug request queue @q because device driver is scheduled to work 428 * on elements in the request queue. 429 */ 430 + DEFINE_EVENT(block_unplug, block_unplug, 431 432 + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 433 434 + TP_ARGS(q, depth, explicit) 435 ); 436 437 /**
+1 -1
kernel/sched.c
··· 4118 */ 4119 if (blk_needs_flush_plug(prev)) { 4120 raw_spin_unlock(&rq->lock); 4121 - blk_flush_plug(prev); 4122 raw_spin_lock(&rq->lock); 4123 } 4124 }
··· 4118 */ 4119 if (blk_needs_flush_plug(prev)) { 4120 raw_spin_unlock(&rq->lock); 4121 + blk_schedule_flush_plug(prev); 4122 raw_spin_lock(&rq->lock); 4123 } 4124 }
+12 -6
kernel/trace/blktrace.c
··· 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 851 } 852 853 - static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q, 854 - unsigned int depth) 855 { 856 struct blk_trace *bt = q->blk_trace; 857 858 if (bt) { 859 __be64 rpdu = cpu_to_be64(depth); 860 861 - __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 862 - sizeof(rpdu), &rpdu); 863 } 864 } 865 ··· 1007 WARN_ON(ret); 1008 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1009 WARN_ON(ret); 1010 - ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1011 WARN_ON(ret); 1012 ret = register_trace_block_split(blk_add_trace_split, NULL); 1013 WARN_ON(ret); ··· 1022 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1023 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1024 unregister_trace_block_split(blk_add_trace_split, NULL); 1025 - unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1026 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1027 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1028 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); ··· 1337 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1338 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1339 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1340 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1341 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1342 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
··· 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 851 } 852 853 + static void blk_add_trace_unplug(void *ignore, struct request_queue *q, 854 + unsigned int depth, bool explicit) 855 { 856 struct blk_trace *bt = q->blk_trace; 857 858 if (bt) { 859 __be64 rpdu = cpu_to_be64(depth); 860 + u32 what; 861 862 + if (explicit) 863 + what = BLK_TA_UNPLUG_IO; 864 + else 865 + what = BLK_TA_UNPLUG_TIMER; 866 + 867 + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); 868 } 869 } 870 ··· 1002 WARN_ON(ret); 1003 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1004 WARN_ON(ret); 1005 + ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); 1006 WARN_ON(ret); 1007 ret = register_trace_block_split(blk_add_trace_split, NULL); 1008 WARN_ON(ret); ··· 1017 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1018 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1019 unregister_trace_block_split(blk_add_trace_split, NULL); 1020 + unregister_trace_block_unplug(blk_add_trace_unplug, NULL); 1021 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1022 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1023 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); ··· 1332 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1333 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1334 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1335 + [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, 1336 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1337 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1338 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },