Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: memcontrol: consolidate memory controller initialization

The initialization code for the per-cpu charge stock and the soft
limit tree is compact enough to inline it into mem_cgroup_init().

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
95a045f6 9c608dbe

+25 -35
+25 -35
mm/memcontrol.c
··· 2138 2138 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 2139 2139 } 2140 2140 2141 - static void __init memcg_stock_init(void) 2142 - { 2143 - int cpu; 2144 - 2145 - for_each_possible_cpu(cpu) { 2146 - struct memcg_stock_pcp *stock = 2147 - &per_cpu(memcg_stock, cpu); 2148 - INIT_WORK(&stock->work, drain_local_stock); 2149 - } 2150 - } 2151 - 2152 2141 /* 2153 2142 * Cache charges(val) to local per_cpu area. 2154 2143 * This will be consumed by consume_stock() function, later. ··· 4496 4507 } 4497 4508 EXPORT_SYMBOL(parent_mem_cgroup); 4498 4509 4499 - static void __init mem_cgroup_soft_limit_tree_init(void) 4500 - { 4501 - int node; 4502 - 4503 - for_each_node(node) { 4504 - struct mem_cgroup_tree_per_node *rtpn; 4505 - int zone; 4506 - 4507 - rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 4508 - node_online(node) ? node : NUMA_NO_NODE); 4509 - 4510 - for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4511 - struct mem_cgroup_tree_per_zone *rtpz; 4512 - 4513 - rtpz = &rtpn->rb_tree_per_zone[zone]; 4514 - rtpz->rb_root = RB_ROOT; 4515 - spin_lock_init(&rtpz->lock); 4516 - } 4517 - soft_limit_tree.rb_tree_per_node[node] = rtpn; 4518 - } 4519 - } 4520 - 4521 4510 static struct cgroup_subsys_state * __ref 4522 4511 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 4523 4512 { ··· 5872 5905 */ 5873 5906 static int __init mem_cgroup_init(void) 5874 5907 { 5908 + int cpu, node; 5909 + 5875 5910 hotcpu_notifier(memcg_cpu_hotplug_callback, 0); 5911 + 5912 + for_each_possible_cpu(cpu) 5913 + INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, 5914 + drain_local_stock); 5915 + 5916 + for_each_node(node) { 5917 + struct mem_cgroup_tree_per_node *rtpn; 5918 + int zone; 5919 + 5920 + rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, 5921 + node_online(node) ? node : NUMA_NO_NODE); 5922 + 5923 + for (zone = 0; zone < MAX_NR_ZONES; zone++) { 5924 + struct mem_cgroup_tree_per_zone *rtpz; 5925 + 5926 + rtpz = &rtpn->rb_tree_per_zone[zone]; 5927 + rtpz->rb_root = RB_ROOT; 5928 + spin_lock_init(&rtpz->lock); 5929 + } 5930 + soft_limit_tree.rb_tree_per_node[node] = rtpn; 5931 + } 5932 + 5876 5933 enable_swap_cgroup(); 5877 - mem_cgroup_soft_limit_tree_init(); 5878 - memcg_stock_init(); 5934 + 5879 5935 return 0; 5880 5936 } 5881 5937 subsys_initcall(mem_cgroup_init);