Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/rt: Add a tuning knob to allow changing SCHED_RR timeslice

Add a /proc/sys/kernel scheduler knob named
sched_rr_timeslice_ms that allows global changing of the
SCHED_RR timeslice value. User visable value is in milliseconds
but is stored as jiffies. Setting to 0 (zero) resets to the
default (currently 100ms).

Signed-off-by: Clark Williams <williams@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20130207094704.13751796@riff.lan
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Clark Williams and committed by
Ingo Molnar
ce0dbbbb cf4aebc2

+44 -3
+14 -1
include/linux/sched/sysctl.h
··· 73 73 return 1; 74 74 } 75 75 #endif 76 + 77 + /* 78 + * control realtime throttling: 79 + * 80 + * /proc/sys/kernel/sched_rt_period_us 81 + * /proc/sys/kernel/sched_rt_runtime_us 82 + */ 76 83 extern unsigned int sysctl_sched_rt_period; 77 84 extern int sysctl_sched_rt_runtime; 78 85 ··· 97 90 */ 98 91 #define RR_TIMESLICE (100 * HZ / 1000) 99 92 100 - int sched_rt_handler(struct ctl_table *table, int write, 93 + extern int sched_rr_timeslice; 94 + 95 + extern int sched_rr_handler(struct ctl_table *table, int write, 96 + void __user *buffer, size_t *lenp, 97 + loff_t *ppos); 98 + 99 + extern int sched_rt_handler(struct ctl_table *table, int write, 101 100 void __user *buffer, size_t *lenp, 102 101 loff_t *ppos); 103 102
+19
kernel/sched/core.c
··· 7509 7509 } 7510 7510 #endif /* CONFIG_RT_GROUP_SCHED */ 7511 7511 7512 + int sched_rr_handler(struct ctl_table *table, int write, 7513 + void __user *buffer, size_t *lenp, 7514 + loff_t *ppos) 7515 + { 7516 + int ret; 7517 + static DEFINE_MUTEX(mutex); 7518 + 7519 + mutex_lock(&mutex); 7520 + ret = proc_dointvec(table, write, buffer, lenp, ppos); 7521 + /* make sure that internally we keep jiffies */ 7522 + /* also, writing zero resets timeslice to default */ 7523 + if (!ret && write) { 7524 + sched_rr_timeslice = sched_rr_timeslice <= 0 ? 7525 + RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); 7526 + } 7527 + mutex_unlock(&mutex); 7528 + return ret; 7529 + } 7530 + 7512 7531 int sched_rt_handler(struct ctl_table *table, int write, 7513 7532 void __user *buffer, size_t *lenp, 7514 7533 loff_t *ppos)
+4 -2
kernel/sched/rt.c
··· 7 7 8 8 #include <linux/slab.h> 9 9 10 + int sched_rr_timeslice = RR_TIMESLICE; 11 + 10 12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 11 13 12 14 struct rt_bandwidth def_rt_bandwidth; ··· 2018 2016 if (--p->rt.time_slice) 2019 2017 return; 2020 2018 2021 - p->rt.time_slice = RR_TIMESLICE; 2019 + p->rt.time_slice = sched_rr_timeslice; 2022 2020 2023 2021 /* 2024 2022 * Requeue to the end of queue if we (and all of our ancestors) are the ··· 2049 2047 * Time slice is 0 for SCHED_FIFO tasks 2050 2048 */ 2051 2049 if (task->policy == SCHED_RR) 2052 - return RR_TIMESLICE; 2050 + return sched_rr_timeslice; 2053 2051 else 2054 2052 return 0; 2055 2053 }
+7
kernel/sysctl.c
··· 404 404 .mode = 0644, 405 405 .proc_handler = sched_rt_handler, 406 406 }, 407 + { 408 + .procname = "sched_rr_timeslice_ms", 409 + .data = &sched_rr_timeslice, 410 + .maxlen = sizeof(int), 411 + .mode = 0644, 412 + .proc_handler = sched_rr_handler, 413 + }, 407 414 #ifdef CONFIG_SCHED_AUTOGROUP 408 415 { 409 416 .procname = "sched_autogroup_enabled",