Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:
"A few scheduler fixes:

- Prevent a bogus warning vs. runqueue clock update flags in
do_sched_rt_period_timer()

- Simplify the helper functions which handle requests for skipping
the runqueue clock updat.

- Do not unlock the tunables mutex in the error path of the cpu
frequency scheduler utils. Its not held.

- Enforce proper alignement for 'struct util_est' in sched_avg to
prevent a misalignment fault on IA64"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/core: Force proper alignment of 'struct util_est'
sched/core: Simplify helpers for rq clock update skip requests
sched/rt: Fix rq->clock_update_flags < RQCF_ACT_SKIP warning
sched/cpufreq/schedutil: Fix error path mutex unlock

+22 -14
+3 -3
include/linux/sched.h
··· 300 unsigned int enqueued; 301 unsigned int ewma; 302 #define UTIL_EST_WEIGHT_SHIFT 2 303 - }; 304 305 /* 306 * The load_avg/util_avg accumulates an infinite geometric series ··· 364 unsigned long runnable_load_avg; 365 unsigned long util_avg; 366 struct util_est util_est; 367 - }; 368 369 struct sched_statistics { 370 #ifdef CONFIG_SCHEDSTATS ··· 435 * Put into separate cache line so it does not 436 * collide with read-mostly values above. 437 */ 438 - struct sched_avg avg ____cacheline_aligned_in_smp; 439 #endif 440 }; 441
··· 300 unsigned int enqueued; 301 unsigned int ewma; 302 #define UTIL_EST_WEIGHT_SHIFT 2 303 + } __attribute__((__aligned__(sizeof(u64)))); 304 305 /* 306 * The load_avg/util_avg accumulates an infinite geometric series ··· 364 unsigned long runnable_load_avg; 365 unsigned long util_avg; 366 struct util_est util_est; 367 + } ____cacheline_aligned; 368 369 struct sched_statistics { 370 #ifdef CONFIG_SCHEDSTATS ··· 435 * Put into separate cache line so it does not 436 * collide with read-mostly values above. 437 */ 438 + struct sched_avg avg; 439 #endif 440 }; 441
+1 -1
kernel/sched/core.c
··· 874 * this case, we can save a useless back to back clock update. 875 */ 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 877 - rq_clock_skip_update(rq, true); 878 } 879 880 #ifdef CONFIG_SMP
··· 874 * this case, we can save a useless back to back clock update. 875 */ 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 877 + rq_clock_skip_update(rq); 878 } 879 880 #ifdef CONFIG_SMP
+1 -2
kernel/sched/cpufreq_schedutil.c
··· 631 632 stop_kthread: 633 sugov_kthread_stop(sg_policy); 634 - 635 - free_sg_policy: 636 mutex_unlock(&global_tunables_lock); 637 638 sugov_policy_free(sg_policy); 639 640 disable_fast_switch:
··· 631 632 stop_kthread: 633 sugov_kthread_stop(sg_policy); 634 mutex_unlock(&global_tunables_lock); 635 636 + free_sg_policy: 637 sugov_policy_free(sg_policy); 638 639 disable_fast_switch:
+1 -1
kernel/sched/deadline.c
··· 1560 * so we don't do microscopic update in schedule() 1561 * and double the fastpath cost. 1562 */ 1563 - rq_clock_skip_update(rq, true); 1564 } 1565 1566 #ifdef CONFIG_SMP
··· 1560 * so we don't do microscopic update in schedule() 1561 * and double the fastpath cost. 1562 */ 1563 + rq_clock_skip_update(rq); 1564 } 1565 1566 #ifdef CONFIG_SMP
+1 -1
kernel/sched/fair.c
··· 7089 * so we don't do microscopic update in schedule() 7090 * and double the fastpath cost. 7091 */ 7092 - rq_clock_skip_update(rq, true); 7093 } 7094 7095 set_skip_buddy(se);
··· 7089 * so we don't do microscopic update in schedule() 7090 * and double the fastpath cost. 7091 */ 7092 + rq_clock_skip_update(rq); 7093 } 7094 7095 set_skip_buddy(se);
+3 -1
kernel/sched/rt.c
··· 839 continue; 840 841 raw_spin_lock(&rq->lock); 842 if (rt_rq->rt_time) { 843 u64 runtime; 844 ··· 861 * 'runtime'. 862 */ 863 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 864 - rq_clock_skip_update(rq, false); 865 } 866 if (rt_rq->rt_time || rt_rq->rt_nr_running) 867 idle = 0;
··· 839 continue; 840 841 raw_spin_lock(&rq->lock); 842 + update_rq_clock(rq); 843 + 844 if (rt_rq->rt_time) { 845 u64 runtime; 846 ··· 859 * 'runtime'. 860 */ 861 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 862 + rq_clock_cancel_skipupdate(rq); 863 } 864 if (rt_rq->rt_time || rt_rq->rt_nr_running) 865 idle = 0;
+12 -5
kernel/sched/sched.h
··· 976 return rq->clock_task; 977 } 978 979 - static inline void rq_clock_skip_update(struct rq *rq, bool skip) 980 { 981 lockdep_assert_held(&rq->lock); 982 - if (skip) 983 - rq->clock_update_flags |= RQCF_REQ_SKIP; 984 - else 985 - rq->clock_update_flags &= ~RQCF_REQ_SKIP; 986 } 987 988 struct rq_flags {
··· 976 return rq->clock_task; 977 } 978 979 + static inline void rq_clock_skip_update(struct rq *rq) 980 { 981 lockdep_assert_held(&rq->lock); 982 + rq->clock_update_flags |= RQCF_REQ_SKIP; 983 + } 984 + 985 + /* 986 + * See rt task throttoling, which is the only time a skip 987 + * request is cancelled. 988 + */ 989 + static inline void rq_clock_cancel_skipupdate(struct rq *rq) 990 + { 991 + lockdep_assert_held(&rq->lock); 992 + rq->clock_update_flags &= ~RQCF_REQ_SKIP; 993 } 994 995 struct rq_flags {