Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/memcg: move cgroup high memory limit setting into struct page_counter

High memory limit is currently recorded directly in struct mem_cgroup.
We are about to add a high limit for swap, move the field to struct
page_counter and add some helpers.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Chris Down <chris@chrisdown.name>
Cc: Hugh Dickins <hughd@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200527195846.102707-4-kuba@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Jakub Kicinski and committed by
Linus Torvalds
d1663a90 ff144e69

+19 -11
-3
include/linux/memcontrol.h
··· 215 215 struct page_counter kmem; 216 216 struct page_counter tcpmem; 217 217 218 - /* Upper bound of normal memory consumption range */ 219 - unsigned long high; 220 - 221 218 /* Range enforcement for interrupt charges */ 222 219 struct work_struct high_work; 223 220
+8
include/linux/page_counter.h
··· 10 10 atomic_long_t usage; 11 11 unsigned long min; 12 12 unsigned long low; 13 + unsigned long high; 13 14 unsigned long max; 14 15 struct page_counter *parent; 15 16 ··· 56 55 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); 57 56 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); 58 57 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); 58 + 59 + static inline void page_counter_set_high(struct page_counter *counter, 60 + unsigned long nr_pages) 61 + { 62 + WRITE_ONCE(counter->high, nr_pages); 63 + } 64 + 59 65 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); 60 66 int page_counter_memparse(const char *buf, const char *max, 61 67 unsigned long *nr_pages);
+11 -8
mm/memcontrol.c
··· 2252 2252 gfp_t gfp_mask) 2253 2253 { 2254 2254 do { 2255 - if (page_counter_read(&memcg->memory) <= READ_ONCE(memcg->high)) 2255 + if (page_counter_read(&memcg->memory) <= 2256 + READ_ONCE(memcg->memory.high)) 2256 2257 continue; 2257 2258 memcg_memory_event(memcg, MEMCG_HIGH); 2258 2259 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); ··· 2346 2345 2347 2346 do { 2348 2347 overage = calculate_overage(page_counter_read(&memcg->memory), 2349 - READ_ONCE(memcg->high)); 2348 + READ_ONCE(memcg->memory.high)); 2350 2349 max_overage = max(overage, max_overage); 2351 2350 } while ((memcg = parent_mem_cgroup(memcg)) && 2352 2351 !mem_cgroup_is_root(memcg)); ··· 2605 2604 * reclaim, the cost of mismatch is negligible. 2606 2605 */ 2607 2606 do { 2608 - if (page_counter_read(&memcg->memory) > READ_ONCE(memcg->high)) { 2607 + if (page_counter_read(&memcg->memory) > 2608 + READ_ONCE(memcg->memory.high)) { 2609 2609 /* Don't bother a random interrupted task */ 2610 2610 if (in_interrupt()) { 2611 2611 schedule_work(&memcg->high_work); ··· 4349 4347 4350 4348 while ((parent = parent_mem_cgroup(memcg))) { 4351 4349 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), 4352 - READ_ONCE(memcg->high)); 4350 + READ_ONCE(memcg->memory.high)); 4353 4351 unsigned long used = page_counter_read(&memcg->memory); 4354 4352 4355 4353 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); ··· 5074 5072 if (IS_ERR(memcg)) 5075 5073 return ERR_CAST(memcg); 5076 5074 5077 - WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX); 5075 + page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5078 5076 memcg->soft_limit = PAGE_COUNTER_MAX; 5079 5077 if (parent) { 5080 5078 memcg->swappiness = mem_cgroup_swappiness(parent); ··· 5227 5225 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); 5228 5226 page_counter_set_min(&memcg->memory, 0); 5229 5227 page_counter_set_low(&memcg->memory, 0); 5230 - WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX); 5228 + page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); 5231 5229 memcg->soft_limit = PAGE_COUNTER_MAX; 5232 5230 memcg_wb_domain_size_changed(memcg); 5233 5231 } ··· 6026 6024 6027 6025 static int memory_high_show(struct seq_file *m, void *v) 6028 6026 { 6029 - return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high)); 6027 + return seq_puts_memcg_tunable(m, 6028 + READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); 6030 6029 } 6031 6030 6032 6031 static ssize_t memory_high_write(struct kernfs_open_file *of, ··· 6044 6041 if (err) 6045 6042 return err; 6046 6043 6047 - WRITE_ONCE(memcg->high, high); 6044 + page_counter_set_high(&memcg->memory, high); 6048 6045 6049 6046 for (;;) { 6050 6047 unsigned long nr_pages = page_counter_read(&memcg->memory);