Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memcg: remove MEMCG_NR_FILE_MAPPED

While accounting memcg page stat, it's not worth to use
MEMCG_NR_FILE_MAPPED as an extra layer of indirection because of the
complexity and presumed performance overhead. We can use
MEM_CGROUP_STAT_FILE_MAPPED directly.

Signed-off-by: Sha Zhengju <handai.szj@taobao.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Fengguang Wu <fengguang.wu@intel.com>
Reviewed-by: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Sha Zhengju and committed by
Linus Torvalds
68b4876d 1a36e59d

+22 -34
+19 -8
include/linux/memcontrol.h
··· 30 30 struct mm_struct; 31 31 struct kmem_cache; 32 32 33 - /* Stats that can be updated by kernel. */ 34 - enum mem_cgroup_page_stat_item { 35 - MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33 + /* 34 + * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, 35 + * These two lists should keep in accord with each other. 36 + */ 37 + enum mem_cgroup_stat_index { 38 + /* 39 + * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 40 + */ 41 + MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 42 + MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 43 + MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 44 + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 45 + MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 46 + MEM_CGROUP_STAT_NSTATS, 36 47 }; 37 48 38 49 struct mem_cgroup_reclaim_cookie { ··· 244 233 } 245 234 246 235 void mem_cgroup_update_page_stat(struct page *page, 247 - enum mem_cgroup_page_stat_item idx, 236 + enum mem_cgroup_stat_index idx, 248 237 int val); 249 238 250 239 static inline void mem_cgroup_inc_page_stat(struct page *page, 251 - enum mem_cgroup_page_stat_item idx) 240 + enum mem_cgroup_stat_index idx) 252 241 { 253 242 mem_cgroup_update_page_stat(page, idx, 1); 254 243 } 255 244 256 245 static inline void mem_cgroup_dec_page_stat(struct page *page, 257 - enum mem_cgroup_page_stat_item idx) 246 + enum mem_cgroup_stat_index idx) 258 247 { 259 248 mem_cgroup_update_page_stat(page, idx, -1); 260 249 } ··· 460 449 } 461 450 462 451 static inline void mem_cgroup_inc_page_stat(struct page *page, 463 - enum mem_cgroup_page_stat_item idx) 452 + enum mem_cgroup_stat_index idx) 464 453 { 465 454 } 466 455 467 456 static inline void mem_cgroup_dec_page_stat(struct page *page, 468 - enum mem_cgroup_page_stat_item idx) 457 + enum mem_cgroup_stat_index idx) 469 458 { 470 459 } 471 460
+1 -24
mm/memcontrol.c
··· 84 84 #endif 85 85 86 86 87 - /* 88 - * Statistics for memory cgroup. 89 - */ 90 - enum mem_cgroup_stat_index { 91 - /* 92 - * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 93 - */ 94 - MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 95 - MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 96 - MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 97 - MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 98 - MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 99 - MEM_CGROUP_STAT_NSTATS, 100 - }; 101 - 102 87 static const char * const mem_cgroup_stat_names[] = { 103 88 "cache", 104 89 "rss", ··· 2216 2231 } 2217 2232 2218 2233 void mem_cgroup_update_page_stat(struct page *page, 2219 - enum mem_cgroup_page_stat_item idx, int val) 2234 + enum mem_cgroup_stat_index idx, int val) 2220 2235 { 2221 2236 struct mem_cgroup *memcg; 2222 2237 struct page_cgroup *pc = lookup_page_cgroup(page); ··· 2228 2243 memcg = pc->mem_cgroup; 2229 2244 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2230 2245 return; 2231 - 2232 - switch (idx) { 2233 - case MEMCG_NR_FILE_MAPPED: 2234 - idx = MEM_CGROUP_STAT_FILE_MAPPED; 2235 - break; 2236 - default: 2237 - BUG(); 2238 - } 2239 2246 2240 2247 this_cpu_add(memcg->stat->count[idx], val); 2241 2248 }
+2 -2
mm/rmap.c
··· 1111 1111 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1112 1112 if (atomic_inc_and_test(&page->_mapcount)) { 1113 1113 __inc_zone_page_state(page, NR_FILE_MAPPED); 1114 - mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); 1114 + mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1115 1115 } 1116 1116 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1117 1117 } ··· 1155 1155 NR_ANON_TRANSPARENT_HUGEPAGES); 1156 1156 } else { 1157 1157 __dec_zone_page_state(page, NR_FILE_MAPPED); 1158 - mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); 1158 + mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1159 1159 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1160 1160 } 1161 1161 if (unlikely(PageMlocked(page)))