Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memcg: simplify the way memory limits are checked

Since transparent huge pages, checking whether memory cgroups are below
their limits is no longer enough, but the actual amount of chargeable
space is important.

To not have more than one limit-checking interface, replace
memory_cgroup_check_under_limit() and memory_cgroup_check_margin() with a
single memory_cgroup_margin() that returns the chargeable space and leaves
the comparison to the callsite.

Soft limits are now checked the other way round, by using the already
existing function that returns the amount by which soft limits are
exceeded: res_counter_soft_limit_excess().

Also remove all the corresponding functions on the res_counter side that
are now no longer used.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
9d11ea9f b7c61678

+31 -90
+14 -58
include/linux/res_counter.h
··· 129 129 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 130 130 void res_counter_uncharge(struct res_counter *counter, unsigned long val); 131 131 132 - static inline bool res_counter_limit_check_locked(struct res_counter *cnt) 132 + /** 133 + * res_counter_margin - calculate chargeable space of a counter 134 + * @cnt: the counter 135 + * 136 + * Returns the difference between the hard limit and the current usage 137 + * of resource counter @cnt. 138 + */ 139 + static inline unsigned long long res_counter_margin(struct res_counter *cnt) 133 140 { 134 - if (cnt->usage < cnt->limit) 135 - return true; 141 + unsigned long long margin; 142 + unsigned long flags; 136 143 137 - return false; 138 - } 139 - 140 - static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) 141 - { 142 - if (cnt->usage <= cnt->soft_limit) 143 - return true; 144 - 145 - return false; 144 + spin_lock_irqsave(&cnt->lock, flags); 145 + margin = cnt->limit - cnt->usage; 146 + spin_unlock_irqrestore(&cnt->lock, flags); 147 + return margin; 146 148 } 147 149 148 150 /** ··· 167 165 excess = cnt->usage - cnt->soft_limit; 168 166 spin_unlock_irqrestore(&cnt->lock, flags); 169 167 return excess; 170 - } 171 - 172 - /* 173 - * Helper function to detect if the cgroup is within it's limit or 174 - * not. It's currently called from cgroup_rss_prepare() 175 - */ 176 - static inline bool res_counter_check_under_limit(struct res_counter *cnt) 177 - { 178 - bool ret; 179 - unsigned long flags; 180 - 181 - spin_lock_irqsave(&cnt->lock, flags); 182 - ret = res_counter_limit_check_locked(cnt); 183 - spin_unlock_irqrestore(&cnt->lock, flags); 184 - return ret; 185 - } 186 - 187 - /** 188 - * res_counter_check_margin - check if the counter allows charging 189 - * @cnt: the resource counter to check 190 - * @bytes: the number of bytes to check the remaining space against 191 - * 192 - * Returns a boolean value on whether the counter can be charged 193 - * @bytes or whether this would exceed the limit. 194 - */ 195 - static inline bool res_counter_check_margin(struct res_counter *cnt, 196 - unsigned long bytes) 197 - { 198 - bool ret; 199 - unsigned long flags; 200 - 201 - spin_lock_irqsave(&cnt->lock, flags); 202 - ret = cnt->limit - cnt->usage >= bytes; 203 - spin_unlock_irqrestore(&cnt->lock, flags); 204 - return ret; 205 - } 206 - 207 - static inline bool res_counter_check_within_soft_limit(struct res_counter *cnt) 208 - { 209 - bool ret; 210 - unsigned long flags; 211 - 212 - spin_lock_irqsave(&cnt->lock, flags); 213 - ret = res_counter_soft_limit_check_locked(cnt); 214 - spin_unlock_irqrestore(&cnt->lock, flags); 215 - return ret; 216 168 } 217 169 218 170 static inline void res_counter_reset_max(struct res_counter *cnt)
+17 -32
mm/memcontrol.c
··· 504 504 } 505 505 } 506 506 507 - static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem) 508 - { 509 - return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT; 510 - } 511 - 512 507 static struct mem_cgroup_per_zone * 513 508 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) 514 509 { ··· 1122 1127 #define mem_cgroup_from_res_counter(counter, member) \ 1123 1128 container_of(counter, struct mem_cgroup, member) 1124 1129 1125 - static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) 1126 - { 1127 - if (do_swap_account) { 1128 - if (res_counter_check_under_limit(&mem->res) && 1129 - res_counter_check_under_limit(&mem->memsw)) 1130 - return true; 1131 - } else 1132 - if (res_counter_check_under_limit(&mem->res)) 1133 - return true; 1134 - return false; 1135 - } 1136 - 1137 1130 /** 1138 - * mem_cgroup_check_margin - check if the memory cgroup allows charging 1139 - * @mem: memory cgroup to check 1140 - * @bytes: the number of bytes the caller intends to charge 1131 + * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1132 + * @mem: the memory cgroup 1141 1133 * 1142 - * Returns a boolean value on whether @mem can be charged @bytes or 1143 - * whether this would exceed the limit. 1134 + * Returns the maximum amount of memory @mem can be charged with, in 1135 + * bytes. 1144 1136 */ 1145 - static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes) 1137 + static unsigned long long mem_cgroup_margin(struct mem_cgroup *mem) 1146 1138 { 1147 - if (!res_counter_check_margin(&mem->res, bytes)) 1148 - return false; 1149 - if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes)) 1150 - return false; 1151 - return true; 1139 + unsigned long long margin; 1140 + 1141 + margin = res_counter_margin(&mem->res); 1142 + if (do_swap_account) 1143 + margin = min(margin, res_counter_margin(&mem->memsw)); 1144 + return margin; 1152 1145 } 1153 1146 1154 1147 static unsigned int get_swappiness(struct mem_cgroup *memcg) ··· 1403 1420 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP; 1404 1421 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; 1405 1422 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; 1406 - unsigned long excess = mem_cgroup_get_excess(root_mem); 1423 + unsigned long excess; 1424 + 1425 + excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1407 1426 1408 1427 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1409 1428 if (root_mem->memsw_is_minimum) ··· 1462 1477 return ret; 1463 1478 total += ret; 1464 1479 if (check_soft) { 1465 - if (res_counter_check_within_soft_limit(&root_mem->res)) 1480 + if (!res_counter_soft_limit_excess(&root_mem->res)) 1466 1481 return total; 1467 - } else if (mem_cgroup_check_under_limit(root_mem)) 1482 + } else if (mem_cgroup_margin(root_mem)) 1468 1483 return 1 + total; 1469 1484 } 1470 1485 return total; ··· 1883 1898 1884 1899 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, 1885 1900 gfp_mask, flags); 1886 - if (mem_cgroup_check_margin(mem_over_limit, csize)) 1901 + if (mem_cgroup_margin(mem_over_limit) >= csize) 1887 1902 return CHARGE_RETRY; 1888 1903 /* 1889 1904 * Even though the limit is exceeded at this point, reclaim