Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
"Two load-balancing fixes for cgroups-intense workloads"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion
sched/fair: Fix effective_load() to consistently use smoothed load

+22 -24
+22 -24
kernel/sched/fair.c
··· 735 735 } 736 736 } 737 737 738 - static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq); 739 - static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq); 740 738 #else 741 739 void init_entity_runnable_average(struct sched_entity *se) 742 740 { ··· 2497 2499 2498 2500 #ifdef CONFIG_FAIR_GROUP_SCHED 2499 2501 # ifdef CONFIG_SMP 2500 - static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) 2501 - { 2502 - long tg_weight; 2503 - 2504 - /* 2505 - * Use this CPU's real-time load instead of the last load contribution 2506 - * as the updating of the contribution is delayed, and we will use the 2507 - * the real-time load to calc the share. See update_tg_load_avg(). 2508 - */ 2509 - tg_weight = atomic_long_read(&tg->load_avg); 2510 - tg_weight -= cfs_rq->tg_load_avg_contrib; 2511 - tg_weight += cfs_rq->load.weight; 2512 - 2513 - return tg_weight; 2514 - } 2515 - 2516 2502 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) 2517 2503 { 2518 2504 long tg_weight, load, shares; 2519 2505 2520 - tg_weight = calc_tg_weight(tg, cfs_rq); 2521 - load = cfs_rq->load.weight; 2506 + /* 2507 + * This really should be: cfs_rq->avg.load_avg, but instead we use 2508 + * cfs_rq->load.weight, which is its upper bound. This helps ramp up 2509 + * the shares for small weight interactive tasks. 2510 + */ 2511 + load = scale_load_down(cfs_rq->load.weight); 2512 + 2513 + tg_weight = atomic_long_read(&tg->load_avg); 2514 + 2515 + /* Ensure tg_weight >= load */ 2516 + tg_weight -= cfs_rq->tg_load_avg_contrib; 2517 + tg_weight += load; 2522 2518 2523 2519 shares = (tg->shares * load); 2524 2520 if (tg_weight) ··· 2531 2539 return tg->shares; 2532 2540 } 2533 2541 # endif /* CONFIG_SMP */ 2542 + 2534 2543 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2535 2544 unsigned long weight) 2536 2545 { ··· 4939 4946 return wl; 4940 4947 4941 4948 for_each_sched_entity(se) { 4942 - long w, W; 4949 + struct cfs_rq *cfs_rq = se->my_q; 4950 + long W, w = cfs_rq_load_avg(cfs_rq); 4943 4951 4944 - tg = se->my_q->tg; 4952 + tg = cfs_rq->tg; 4945 4953 4946 4954 /* 4947 4955 * W = @wg + \Sum rw_j 4948 4956 */ 4949 - W = wg + calc_tg_weight(tg, se->my_q); 4957 + W = wg + atomic_long_read(&tg->load_avg); 4958 + 4959 + /* Ensure \Sum rw_j >= rw_i */ 4960 + W -= cfs_rq->tg_load_avg_contrib; 4961 + W += w; 4950 4962 4951 4963 /* 4952 4964 * w = rw_i + @wl 4953 4965 */ 4954 - w = cfs_rq_load_avg(se->my_q) + wl; 4966 + w += wl; 4955 4967 4956 4968 /* 4957 4969 * wl = S * s'_i; see (2)