Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memcg: rename config variables

Sanity:

CONFIG_CGROUP_MEM_RES_CTLR -> CONFIG_MEMCG
CONFIG_CGROUP_MEM_RES_CTLR_SWAP -> CONFIG_MEMCG_SWAP
CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED -> CONFIG_MEMCG_SWAP_ENABLED
CONFIG_CGROUP_MEM_RES_CTLR_KMEM -> CONFIG_MEMCG_KMEM

[mhocko@suse.cz: fix missed bits]
Cc: Glauber Costa <glommer@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrew Morton and committed by
Linus Torvalds
c255a458 80934513

+78 -78
+5 -5
Documentation/cgroups/memory.txt
··· 187 187 But see section 8.2: when moving a task to another cgroup, its pages may 188 188 be recharged to the new cgroup, if move_charge_at_immigrate has been chosen. 189 189 190 - Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used. 190 + Exception: If CONFIG_CGROUP_CGROUP_MEMCG_SWAP is not used. 191 191 When you do swapoff and make swapped-out pages of shmem(tmpfs) to 192 192 be backed into memory in force, charges for pages are accounted against the 193 193 caller of swapoff rather than the users of shmem. 194 194 195 - 2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP) 195 + 2.4 Swap Extension (CONFIG_MEMCG_SWAP) 196 196 197 197 Swap Extension allows you to record charge for swap. A swapped-in page is 198 198 charged back to original page allocator if possible. ··· 259 259 per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by 260 260 zone->lru_lock, it has no lock of its own. 261 261 262 - 2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM) 262 + 2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM) 263 263 264 264 With the Kernel memory extension, the Memory Controller is able to limit 265 265 the amount of kernel memory used by the system. Kernel memory is fundamentally ··· 286 286 287 287 a. Enable CONFIG_CGROUPS 288 288 b. Enable CONFIG_RESOURCE_COUNTERS 289 - c. Enable CONFIG_CGROUP_MEM_RES_CTLR 290 - d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension) 289 + c. Enable CONFIG_MEMCG 290 + d. Enable CONFIG_MEMCG_SWAP (to use swap extension) 291 291 292 292 1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?) 293 293 # mount -t tmpfs none /sys/fs/cgroup
+2 -2
arch/powerpc/configs/chroma_defconfig
··· 21 21 CONFIG_CPUSETS=y 22 22 CONFIG_CGROUP_CPUACCT=y 23 23 CONFIG_RESOURCE_COUNTERS=y 24 - CONFIG_CGROUP_MEM_RES_CTLR=y 25 - CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 24 + CONFIG_CGROUP_MEMCG=y 25 + CONFIG_CGROUP_MEMCG_SWAP=y 26 26 CONFIG_NAMESPACES=y 27 27 CONFIG_RELAY=y 28 28 CONFIG_BLK_DEV_INITRD=y
+1 -1
arch/s390/defconfig
··· 13 13 CONFIG_CPUSETS=y 14 14 CONFIG_CGROUP_CPUACCT=y 15 15 CONFIG_RESOURCE_COUNTERS=y 16 - CONFIG_CGROUP_MEM_RES_CTLR=y 16 + CONFIG_CGROUP_MEMCG=y 17 17 CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 18 18 CONFIG_CGROUP_SCHED=y 19 19 CONFIG_RT_GROUP_SCHED=y
+1 -1
arch/sh/configs/apsh4ad0a_defconfig
··· 11 11 CONFIG_CGROUP_DEVICE=y 12 12 CONFIG_CGROUP_CPUACCT=y 13 13 CONFIG_RESOURCE_COUNTERS=y 14 - CONFIG_CGROUP_MEM_RES_CTLR=y 14 + CONFIG_CGROUP_MEMCG=y 15 15 CONFIG_BLK_CGROUP=y 16 16 CONFIG_NAMESPACES=y 17 17 CONFIG_BLK_DEV_INITRD=y
+2 -2
arch/sh/configs/sdk7786_defconfig
··· 18 18 # CONFIG_PROC_PID_CPUSET is not set 19 19 CONFIG_CGROUP_CPUACCT=y 20 20 CONFIG_RESOURCE_COUNTERS=y 21 - CONFIG_CGROUP_MEM_RES_CTLR=y 22 - CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 21 + CONFIG_CGROUP_MEMCG=y 22 + CONFIG_CGROUP_MEMCG_SWAP=y 23 23 CONFIG_CGROUP_SCHED=y 24 24 CONFIG_RT_GROUP_SCHED=y 25 25 CONFIG_BLK_CGROUP=y
+1 -1
arch/sh/configs/se7206_defconfig
··· 11 11 CONFIG_CGROUP_DEVICE=y 12 12 CONFIG_CGROUP_CPUACCT=y 13 13 CONFIG_RESOURCE_COUNTERS=y 14 - CONFIG_CGROUP_MEM_RES_CTLR=y 14 + CONFIG_CGROUP_MEMCG=y 15 15 CONFIG_RELAY=y 16 16 CONFIG_NAMESPACES=y 17 17 CONFIG_UTS_NS=y
+1 -1
arch/sh/configs/shx3_defconfig
··· 13 13 CONFIG_CGROUP_DEVICE=y 14 14 CONFIG_CGROUP_CPUACCT=y 15 15 CONFIG_RESOURCE_COUNTERS=y 16 - CONFIG_CGROUP_MEM_RES_CTLR=y 16 + CONFIG_CGROUP_MEMCG=y 17 17 CONFIG_RELAY=y 18 18 CONFIG_NAMESPACES=y 19 19 CONFIG_UTS_NS=y
+2 -2
arch/sh/configs/urquell_defconfig
··· 15 15 # CONFIG_PROC_PID_CPUSET is not set 16 16 CONFIG_CGROUP_CPUACCT=y 17 17 CONFIG_RESOURCE_COUNTERS=y 18 - CONFIG_CGROUP_MEM_RES_CTLR=y 19 - CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 18 + CONFIG_CGROUP_MEMCG=y 19 + CONFIG_CGROUP_MEMCG_SWAP=y 20 20 CONFIG_CGROUP_SCHED=y 21 21 CONFIG_RT_GROUP_SCHED=y 22 22 CONFIG_BLK_DEV_INITRD=y
+2 -2
arch/tile/configs/tilegx_defconfig
··· 18 18 CONFIG_CPUSETS=y 19 19 CONFIG_CGROUP_CPUACCT=y 20 20 CONFIG_RESOURCE_COUNTERS=y 21 - CONFIG_CGROUP_MEM_RES_CTLR=y 22 - CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 21 + CONFIG_CGROUP_MEMCG=y 22 + CONFIG_CGROUP_MEMCG_SWAP=y 23 23 CONFIG_CGROUP_SCHED=y 24 24 CONFIG_RT_GROUP_SCHED=y 25 25 CONFIG_BLK_CGROUP=y
+2 -2
arch/tile/configs/tilepro_defconfig
··· 17 17 CONFIG_CPUSETS=y 18 18 CONFIG_CGROUP_CPUACCT=y 19 19 CONFIG_RESOURCE_COUNTERS=y 20 - CONFIG_CGROUP_MEM_RES_CTLR=y 21 - CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 20 + CONFIG_CGROUP_MEMCG=y 21 + CONFIG_CGROUP_MEMCG_SWAP=y 22 22 CONFIG_CGROUP_SCHED=y 23 23 CONFIG_RT_GROUP_SCHED=y 24 24 CONFIG_BLK_CGROUP=y
+4 -4
arch/um/defconfig
··· 155 155 CONFIG_PROC_PID_CPUSET=y 156 156 CONFIG_CGROUP_CPUACCT=y 157 157 CONFIG_RESOURCE_COUNTERS=y 158 - CONFIG_CGROUP_MEM_RES_CTLR=y 159 - CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y 160 - # CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set 161 - # CONFIG_CGROUP_MEM_RES_CTLR_KMEM is not set 158 + CONFIG_CGROUP_MEMCG=y 159 + CONFIG_CGROUP_MEMCG_SWAP=y 160 + # CONFIG_CGROUP_MEMCG_SWAP_ENABLED is not set 161 + # CONFIG_CGROUP_MEMCG_KMEM is not set 162 162 CONFIG_CGROUP_SCHED=y 163 163 CONFIG_FAIR_GROUP_SCHED=y 164 164 # CONFIG_CFS_BANDWIDTH is not set
+1 -1
include/linux/cgroup_subsys.h
··· 31 31 32 32 /* */ 33 33 34 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 34 + #ifdef CONFIG_MEMCG 35 35 SUBSYS(mem_cgroup) 36 36 #endif 37 37
+7 -7
include/linux/memcontrol.h
··· 38 38 unsigned int generation; 39 39 }; 40 40 41 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 41 + #ifdef CONFIG_MEMCG 42 42 /* 43 43 * All "charge" functions with gfp_mask should use GFP_KERNEL or 44 44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't ··· 124 124 extern void mem_cgroup_replace_page_cache(struct page *oldpage, 125 125 struct page *newpage); 126 126 127 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 127 + #ifdef CONFIG_MEMCG_SWAP 128 128 extern int do_swap_account; 129 129 #endif 130 130 ··· 193 193 bool mem_cgroup_bad_page_check(struct page *page); 194 194 void mem_cgroup_print_bad_page(struct page *page); 195 195 #endif 196 - #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 196 + #else /* CONFIG_MEMCG */ 197 197 struct mem_cgroup; 198 198 199 199 static inline int mem_cgroup_newpage_charge(struct page *page, ··· 384 384 struct page *newpage) 385 385 { 386 386 } 387 - #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ 387 + #endif /* CONFIG_MEMCG */ 388 388 389 - #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 389 + #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) 390 390 static inline bool 391 391 mem_cgroup_bad_page_check(struct page *page) 392 392 { ··· 406 406 }; 407 407 408 408 struct sock; 409 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 409 + #ifdef CONFIG_MEMCG_KMEM 410 410 void sock_update_memcg(struct sock *sk); 411 411 void sock_release_memcg(struct sock *sk); 412 412 #else ··· 416 416 static inline void sock_release_memcg(struct sock *sk) 417 417 { 418 418 } 419 - #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 419 + #endif /* CONFIG_MEMCG_KMEM */ 420 420 #endif /* _LINUX_MEMCONTROL_H */ 421 421
+4 -4
include/linux/mmzone.h
··· 201 201 struct lruvec { 202 202 struct list_head lists[NR_LRU_LISTS]; 203 203 struct zone_reclaim_stat reclaim_stat; 204 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 204 + #ifdef CONFIG_MEMCG 205 205 struct zone *zone; 206 206 #endif 207 207 }; ··· 671 671 int nr_zones; 672 672 #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 673 673 struct page *node_mem_map; 674 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 674 + #ifdef CONFIG_MEMCG 675 675 struct page_cgroup *node_page_cgroup; 676 676 #endif 677 677 #endif ··· 736 736 737 737 static inline struct zone *lruvec_zone(struct lruvec *lruvec) 738 738 { 739 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 739 + #ifdef CONFIG_MEMCG 740 740 return lruvec->zone; 741 741 #else 742 742 return container_of(lruvec, struct zone, lruvec); ··· 1052 1052 1053 1053 /* See declaration of similar field in struct zone */ 1054 1054 unsigned long *pageblock_flags; 1055 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 1055 + #ifdef CONFIG_MEMCG 1056 1056 /* 1057 1057 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use 1058 1058 * section. (see memcontrol.h/page_cgroup.h about this.)
+5 -5
include/linux/page_cgroup.h
··· 12 12 #ifndef __GENERATING_BOUNDS_H 13 13 #include <generated/bounds.h> 14 14 15 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 15 + #ifdef CONFIG_MEMCG 16 16 #include <linux/bit_spinlock.h> 17 17 18 18 /* ··· 82 82 bit_spin_unlock(PCG_LOCK, &pc->flags); 83 83 } 84 84 85 - #else /* CONFIG_CGROUP_MEM_RES_CTLR */ 85 + #else /* CONFIG_MEMCG */ 86 86 struct page_cgroup; 87 87 88 88 static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) ··· 102 102 { 103 103 } 104 104 105 - #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ 105 + #endif /* CONFIG_MEMCG */ 106 106 107 107 #include <linux/swap.h> 108 108 109 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 109 + #ifdef CONFIG_MEMCG_SWAP 110 110 extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, 111 111 unsigned short old, unsigned short new); 112 112 extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); ··· 138 138 return; 139 139 } 140 140 141 - #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ 141 + #endif /* CONFIG_MEMCG_SWAP */ 142 142 143 143 #endif /* !__GENERATING_BOUNDS_H */ 144 144
+1 -1
include/linux/sched.h
··· 1584 1584 /* bitmask and counter of trace recursion */ 1585 1585 unsigned long trace_recursion; 1586 1586 #endif /* CONFIG_TRACING */ 1587 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ 1587 + #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1588 1588 struct memcg_batch_info { 1589 1589 int do_batch; /* incremented when batch uncharge started */ 1590 1590 struct mem_cgroup *memcg; /* target memcg of uncharge */
+3 -3
include/linux/swap.h
··· 301 301 302 302 extern int kswapd_run(int nid); 303 303 extern void kswapd_stop(int nid); 304 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 304 + #ifdef CONFIG_MEMCG 305 305 extern int mem_cgroup_swappiness(struct mem_cgroup *mem); 306 306 #else 307 307 static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) ··· 309 309 return vm_swappiness; 310 310 } 311 311 #endif 312 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 312 + #ifdef CONFIG_MEMCG_SWAP 313 313 extern void mem_cgroup_uncharge_swap(swp_entry_t ent); 314 314 #else 315 315 static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) ··· 360 360 extern int try_to_free_swap(struct page *); 361 361 struct backing_dev_info; 362 362 363 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 363 + #ifdef CONFIG_MEMCG 364 364 extern void 365 365 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 366 366 #else
+2 -2
include/net/sock.h
··· 913 913 #ifdef SOCK_REFCNT_DEBUG 914 914 atomic_t socks; 915 915 #endif 916 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 916 + #ifdef CONFIG_MEMCG_KMEM 917 917 /* 918 918 * cgroup specific init/deinit functions. Called once for all 919 919 * protocols that implement it, from cgroups populate function. ··· 994 994 #define sk_refcnt_debug_release(sk) do { } while (0) 995 995 #endif /* SOCK_REFCNT_DEBUG */ 996 996 997 - #if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET) 997 + #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET) 998 998 extern struct static_key memcg_socket_limit_enabled; 999 999 static inline struct cg_proto *parent_cg_proto(struct proto *proto, 1000 1000 struct cg_proto *cg_proto)
+7 -7
init/Kconfig
··· 686 686 This option enables controller independent resource accounting 687 687 infrastructure that works with cgroups. 688 688 689 - config CGROUP_MEM_RES_CTLR 689 + config MEMCG 690 690 bool "Memory Resource Controller for Control Groups" 691 691 depends on RESOURCE_COUNTERS 692 692 select MM_OWNER ··· 709 709 This config option also selects MM_OWNER config option, which 710 710 could in turn add some fork/exit overhead. 711 711 712 - config CGROUP_MEM_RES_CTLR_SWAP 712 + config MEMCG_SWAP 713 713 bool "Memory Resource Controller Swap Extension" 714 - depends on CGROUP_MEM_RES_CTLR && SWAP 714 + depends on MEMCG && SWAP 715 715 help 716 716 Add swap management feature to memory resource controller. When you 717 717 enable this, you can limit mem+swap usage per cgroup. In other words, ··· 726 726 if boot option "swapaccount=0" is set, swap will not be accounted. 727 727 Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page 728 728 size is 4096bytes, 512k per 1Gbytes of swap. 729 - config CGROUP_MEM_RES_CTLR_SWAP_ENABLED 729 + config MEMCG_SWAP_ENABLED 730 730 bool "Memory Resource Controller Swap Extension enabled by default" 731 - depends on CGROUP_MEM_RES_CTLR_SWAP 731 + depends on MEMCG_SWAP 732 732 default y 733 733 help 734 734 Memory Resource Controller Swap Extension comes with its price in ··· 739 739 For those who want to have the feature enabled by default should 740 740 select this option (if, for some reason, they need to disable it 741 741 then swapaccount=0 does the trick). 742 - config CGROUP_MEM_RES_CTLR_KMEM 742 + config MEMCG_KMEM 743 743 bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)" 744 - depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL 744 + depends on MEMCG && EXPERIMENTAL 745 745 default n 746 746 help 747 747 The Kernel Memory extension for Memory Resource Controller can limit
+1 -1
kernel/fork.c
··· 1306 1306 #ifdef CONFIG_DEBUG_MUTEXES 1307 1307 p->blocked_on = NULL; /* not blocked yet */ 1308 1308 #endif 1309 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 1309 + #ifdef CONFIG_MEMCG 1310 1310 p->memcg_batch.do_batch = 0; 1311 1311 p->memcg_batch.memcg = NULL; 1312 1312 #endif
+1 -1
mm/Makefile
··· 49 49 obj-$(CONFIG_MIGRATION) += migrate.o 50 50 obj-$(CONFIG_QUICKLIST) += quicklist.o 51 51 obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o 52 - obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 52 + obj-$(CONFIG_MEMCG) += memcontrol.o page_cgroup.o 53 53 obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o 54 54 obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o 55 55 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
+1 -1
mm/hwpoison-inject.c
··· 123 123 if (!dentry) 124 124 goto fail; 125 125 126 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 126 + #ifdef CONFIG_MEMCG_SWAP 127 127 dentry = debugfs_create_u64("corrupt-filter-memcg", 0600, 128 128 hwpoison_dir, &hwpoison_filter_memcg); 129 129 if (!dentry)
+10 -10
mm/memcontrol.c
··· 61 61 #define MEM_CGROUP_RECLAIM_RETRIES 5 62 62 static struct mem_cgroup *root_mem_cgroup __read_mostly; 63 63 64 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 64 + #ifdef CONFIG_MEMCG_SWAP 65 65 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 66 66 int do_swap_account __read_mostly; 67 67 68 68 /* for remember boot option*/ 69 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED 69 + #ifdef CONFIG_MEMCG_SWAP_ENABLED 70 70 static int really_do_swap_account __initdata = 1; 71 71 #else 72 72 static int really_do_swap_account __initdata = 0; ··· 407 407 static void mem_cgroup_put(struct mem_cgroup *memcg); 408 408 409 409 /* Writing them here to avoid exposing memcg's inner layout */ 410 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 410 + #ifdef CONFIG_MEMCG_KMEM 411 411 #include <net/sock.h> 412 412 #include <net/ip.h> 413 413 ··· 466 466 } 467 467 EXPORT_SYMBOL(tcp_proto_cgroup); 468 468 #endif /* CONFIG_INET */ 469 - #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 469 + #endif /* CONFIG_MEMCG_KMEM */ 470 470 471 - #if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) 471 + #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 472 472 static void disarm_sock_keys(struct mem_cgroup *memcg) 473 473 { 474 474 if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto)) ··· 3085 3085 } 3086 3086 #endif 3087 3087 3088 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3088 + #ifdef CONFIG_MEMCG_SWAP 3089 3089 /* 3090 3090 * called from swap_entry_free(). remove record in swap_cgroup and 3091 3091 * uncharge "memsw" account. ··· 4518 4518 return 0; 4519 4519 } 4520 4520 4521 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 4521 + #ifdef CONFIG_MEMCG_KMEM 4522 4522 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 4523 4523 { 4524 4524 return mem_cgroup_sockets_init(memcg, ss); ··· 4608 4608 .read_seq_string = mem_control_numa_stat_show, 4609 4609 }, 4610 4610 #endif 4611 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4611 + #ifdef CONFIG_MEMCG_SWAP 4612 4612 { 4613 4613 .name = "memsw.usage_in_bytes", 4614 4614 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), ··· 4795 4795 } 4796 4796 EXPORT_SYMBOL(parent_mem_cgroup); 4797 4797 4798 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4798 + #ifdef CONFIG_MEMCG_SWAP 4799 4799 static void __init enable_swap_cgroup(void) 4800 4800 { 4801 4801 if (!mem_cgroup_disabled() && really_do_swap_account) ··· 5526 5526 .__DEPRECATED_clear_css_refs = true, 5527 5527 }; 5528 5528 5529 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 5529 + #ifdef CONFIG_MEMCG_SWAP 5530 5530 static int __init enable_swap_account(char *s) 5531 5531 { 5532 5532 /* consider enabled if no parameter or 1 is given */
+1 -1
mm/memory-failure.c
··· 128 128 * can only guarantee that the page either belongs to the memcg tasks, or is 129 129 * a freed page. 130 130 */ 131 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 131 + #ifdef CONFIG_MEMCG_SWAP 132 132 u64 hwpoison_filter_memcg; 133 133 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 134 134 static int hwpoison_filter_task(struct page *p)
+1 -1
mm/mmzone.c
··· 96 96 for_each_lru(lru) 97 97 INIT_LIST_HEAD(&lruvec->lists[lru]); 98 98 99 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 99 + #ifdef CONFIG_MEMCG 100 100 lruvec->zone = zone; 101 101 #endif 102 102 }
+1 -1
mm/oom_kill.c
··· 541 541 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 542 542 } 543 543 544 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 544 + #ifdef CONFIG_MEMCG 545 545 void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 546 546 int order) 547 547 {
+1 -1
mm/page_cgroup.c
··· 317 317 #endif 318 318 319 319 320 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 320 + #ifdef CONFIG_MEMCG_SWAP 321 321 322 322 static DEFINE_MUTEX(swap_cgroup_mutex); 323 323 struct swap_cgroup_ctrl {
+2 -2
mm/vmscan.c
··· 133 133 static LIST_HEAD(shrinker_list); 134 134 static DECLARE_RWSEM(shrinker_rwsem); 135 135 136 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 136 + #ifdef CONFIG_MEMCG 137 137 static bool global_reclaim(struct scan_control *sc) 138 138 { 139 139 return !sc->target_mem_cgroup; ··· 2142 2142 return nr_reclaimed; 2143 2143 } 2144 2144 2145 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR 2145 + #ifdef CONFIG_MEMCG 2146 2146 2147 2147 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, 2148 2148 gfp_t gfp_mask, bool noswap,
+1 -1
net/core/sock.c
··· 142 142 static DEFINE_MUTEX(proto_list_mutex); 143 143 static LIST_HEAD(proto_list); 144 144 145 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 145 + #ifdef CONFIG_MEMCG_KMEM 146 146 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 147 147 { 148 148 struct proto *proto;
+1 -1
net/ipv4/Makefile
··· 49 49 obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o 50 50 obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o 51 51 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o 52 - obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o 52 + obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o 53 53 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o 54 54 55 55 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
+2 -2
net/ipv4/sysctl_net_ipv4.c
··· 184 184 int ret; 185 185 unsigned long vec[3]; 186 186 struct net *net = current->nsproxy->net_ns; 187 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 187 + #ifdef CONFIG_MEMCG_KMEM 188 188 struct mem_cgroup *memcg; 189 189 #endif 190 190 ··· 203 203 if (ret) 204 204 return ret; 205 205 206 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 206 + #ifdef CONFIG_MEMCG_KMEM 207 207 rcu_read_lock(); 208 208 memcg = mem_cgroup_from_task(current); 209 209
+1 -1
net/ipv4/tcp_ipv4.c
··· 2633 2633 .compat_setsockopt = compat_tcp_setsockopt, 2634 2634 .compat_getsockopt = compat_tcp_getsockopt, 2635 2635 #endif 2636 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 2636 + #ifdef CONFIG_MEMCG_KMEM 2637 2637 .init_cgroup = tcp_init_cgroup, 2638 2638 .destroy_cgroup = tcp_destroy_cgroup, 2639 2639 .proto_cgroup = tcp_proto_cgroup,
+1 -1
net/ipv6/tcp_ipv6.c
··· 2015 2015 .compat_setsockopt = compat_tcp_setsockopt, 2016 2016 .compat_getsockopt = compat_tcp_getsockopt, 2017 2017 #endif 2018 - #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 2018 + #ifdef CONFIG_MEMCG_KMEM 2019 2019 .proto_cgroup = tcp_proto_cgroup, 2020 2020 #endif 2021 2021 };