sched/core: Simplify helpers for rq clock update skip requests

By renaming the functions we can get rid of the skip parameter
and have better code redability. It makes zero sense to have
things such as:

rq_clock_skip_update(rq, false)

When the skip request is in fact not going to happen. Ever. Rename
things such that we end up with:

rq_clock_skip_update(rq)
rq_clock_cancel_skipupdate(rq)

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: matt@codeblueprint.co.uk
Cc: rostedt@goodmis.org
Link: http://lkml.kernel.org/r/20180404161539.nhadkff2aats74jh@linux-n805
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Davidlohr Bueso and committed by Ingo Molnar adcc8da8 d29a2064

+16 -9
+1 -1
kernel/sched/core.c
··· 874 874 * this case, we can save a useless back to back clock update. 875 875 */ 876 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 877 - rq_clock_skip_update(rq, true); 877 + rq_clock_skip_update(rq); 878 878 } 879 879 880 880 #ifdef CONFIG_SMP
+1 -1
kernel/sched/deadline.c
··· 1560 1560 * so we don't do microscopic update in schedule() 1561 1561 * and double the fastpath cost. 1562 1562 */ 1563 - rq_clock_skip_update(rq, true); 1563 + rq_clock_skip_update(rq); 1564 1564 } 1565 1565 1566 1566 #ifdef CONFIG_SMP
+1 -1
kernel/sched/fair.c
··· 7089 7089 * so we don't do microscopic update in schedule() 7090 7090 * and double the fastpath cost. 7091 7091 */ 7092 - rq_clock_skip_update(rq, true); 7092 + rq_clock_skip_update(rq); 7093 7093 } 7094 7094 7095 7095 set_skip_buddy(se);
+1 -1
kernel/sched/rt.c
··· 861 861 * 'runtime'. 862 862 */ 863 863 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 864 - rq_clock_skip_update(rq, false); 864 + rq_clock_cancel_skipupdate(rq); 865 865 } 866 866 if (rt_rq->rt_time || rt_rq->rt_nr_running) 867 867 idle = 0;
+12 -5
kernel/sched/sched.h
··· 976 976 return rq->clock_task; 977 977 } 978 978 979 - static inline void rq_clock_skip_update(struct rq *rq, bool skip) 979 + static inline void rq_clock_skip_update(struct rq *rq) 980 980 { 981 981 lockdep_assert_held(&rq->lock); 982 - if (skip) 983 - rq->clock_update_flags |= RQCF_REQ_SKIP; 984 - else 985 - rq->clock_update_flags &= ~RQCF_REQ_SKIP; 982 + rq->clock_update_flags |= RQCF_REQ_SKIP; 983 + } 984 + 985 + /* 986 + * See rt task throttoling, which is the only time a skip 987 + * request is cancelled. 988 + */ 989 + static inline void rq_clock_cancel_skipupdate(struct rq *rq) 990 + { 991 + lockdep_assert_held(&rq->lock); 992 + rq->clock_update_flags &= ~RQCF_REQ_SKIP; 986 993 } 987 994 988 995 struct rq_flags {