sched/core: Simplify helpers for rq clock update skip requests

By renaming the functions we can get rid of the skip parameter
and have better code redability. It makes zero sense to have
things such as:

rq_clock_skip_update(rq, false)

When the skip request is in fact not going to happen. Ever. Rename
things such that we end up with:

rq_clock_skip_update(rq)
rq_clock_cancel_skipupdate(rq)

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: matt@codeblueprint.co.uk
Cc: rostedt@goodmis.org
Link: http://lkml.kernel.org/r/20180404161539.nhadkff2aats74jh@linux-n805
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Davidlohr Bueso and committed by Ingo Molnar adcc8da8 d29a2064

+16 -9
+1 -1
kernel/sched/core.c
··· 874 * this case, we can save a useless back to back clock update. 875 */ 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 877 - rq_clock_skip_update(rq, true); 878 } 879 880 #ifdef CONFIG_SMP
··· 874 * this case, we can save a useless back to back clock update. 875 */ 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 877 + rq_clock_skip_update(rq); 878 } 879 880 #ifdef CONFIG_SMP
+1 -1
kernel/sched/deadline.c
··· 1560 * so we don't do microscopic update in schedule() 1561 * and double the fastpath cost. 1562 */ 1563 - rq_clock_skip_update(rq, true); 1564 } 1565 1566 #ifdef CONFIG_SMP
··· 1560 * so we don't do microscopic update in schedule() 1561 * and double the fastpath cost. 1562 */ 1563 + rq_clock_skip_update(rq); 1564 } 1565 1566 #ifdef CONFIG_SMP
+1 -1
kernel/sched/fair.c
··· 7089 * so we don't do microscopic update in schedule() 7090 * and double the fastpath cost. 7091 */ 7092 - rq_clock_skip_update(rq, true); 7093 } 7094 7095 set_skip_buddy(se);
··· 7089 * so we don't do microscopic update in schedule() 7090 * and double the fastpath cost. 7091 */ 7092 + rq_clock_skip_update(rq); 7093 } 7094 7095 set_skip_buddy(se);
+1 -1
kernel/sched/rt.c
··· 861 * 'runtime'. 862 */ 863 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 864 - rq_clock_skip_update(rq, false); 865 } 866 if (rt_rq->rt_time || rt_rq->rt_nr_running) 867 idle = 0;
··· 861 * 'runtime'. 862 */ 863 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 864 + rq_clock_cancel_skipupdate(rq); 865 } 866 if (rt_rq->rt_time || rt_rq->rt_nr_running) 867 idle = 0;
+12 -5
kernel/sched/sched.h
··· 976 return rq->clock_task; 977 } 978 979 - static inline void rq_clock_skip_update(struct rq *rq, bool skip) 980 { 981 lockdep_assert_held(&rq->lock); 982 - if (skip) 983 - rq->clock_update_flags |= RQCF_REQ_SKIP; 984 - else 985 - rq->clock_update_flags &= ~RQCF_REQ_SKIP; 986 } 987 988 struct rq_flags {
··· 976 return rq->clock_task; 977 } 978 979 + static inline void rq_clock_skip_update(struct rq *rq) 980 { 981 lockdep_assert_held(&rq->lock); 982 + rq->clock_update_flags |= RQCF_REQ_SKIP; 983 + } 984 + 985 + /* 986 + * See rt task throttoling, which is the only time a skip 987 + * request is cancelled. 988 + */ 989 + static inline void rq_clock_cancel_skipupdate(struct rq *rq) 990 + { 991 + lockdep_assert_held(&rq->lock); 992 + rq->clock_update_flags &= ~RQCF_REQ_SKIP; 993 } 994 995 struct rq_flags {