memcg: create extensible page stat update routines

Replace usage of the mem_cgroup_update_file_mapped() memcg
statistic update routine with two new routines:
* mem_cgroup_inc_page_stat()
* mem_cgroup_dec_page_stat()

As before, only the file_mapped statistic is managed. However, these more
general interfaces allow for new statistics to be more easily added. New
statistics are added with memcg dirty page accounting.

Signed-off-by: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrea Righi <arighi@develer.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Greg Thelen and committed by Linus Torvalds 2a7106f2 ece72400

+37 -14
+28 -3
include/linux/memcontrol.h
··· 25 25 struct page; 26 26 struct mm_struct; 27 27 28 + /* Stats that can be updated by kernel. */ 29 + enum mem_cgroup_page_stat_item { 30 + MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 31 + }; 32 + 28 33 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 29 34 struct list_head *dst, 30 35 unsigned long *scanned, int order, ··· 126 121 return false; 127 122 } 128 123 129 - void mem_cgroup_update_file_mapped(struct page *page, int val); 124 + void mem_cgroup_update_page_stat(struct page *page, 125 + enum mem_cgroup_page_stat_item idx, 126 + int val); 127 + 128 + static inline void mem_cgroup_inc_page_stat(struct page *page, 129 + enum mem_cgroup_page_stat_item idx) 130 + { 131 + mem_cgroup_update_page_stat(page, idx, 1); 132 + } 133 + 134 + static inline void mem_cgroup_dec_page_stat(struct page *page, 135 + enum mem_cgroup_page_stat_item idx) 136 + { 137 + mem_cgroup_update_page_stat(page, idx, -1); 138 + } 139 + 130 140 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 131 141 gfp_t gfp_mask); 132 142 u64 mem_cgroup_get_limit(struct mem_cgroup *mem); ··· 313 293 { 314 294 } 315 295 316 - static inline void mem_cgroup_update_file_mapped(struct page *page, 317 - int val) 296 + static inline void mem_cgroup_inc_page_stat(struct page *page, 297 + enum mem_cgroup_page_stat_item idx) 298 + { 299 + } 300 + 301 + static inline void mem_cgroup_dec_page_stat(struct page *page, 302 + enum mem_cgroup_page_stat_item idx) 318 303 { 319 304 } 320 305
+7 -9
mm/memcontrol.c
··· 1600 1600 * possibility of race condition. If there is, we take a lock. 1601 1601 */ 1602 1602 1603 - static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) 1603 + void mem_cgroup_update_page_stat(struct page *page, 1604 + enum mem_cgroup_page_stat_item idx, int val) 1604 1605 { 1605 1606 struct mem_cgroup *mem; 1606 1607 struct page_cgroup *pc = lookup_page_cgroup(page); ··· 1624 1623 goto out; 1625 1624 } 1626 1625 1627 - this_cpu_add(mem->stat->count[idx], val); 1628 - 1629 1626 switch (idx) { 1630 - case MEM_CGROUP_STAT_FILE_MAPPED: 1627 + case MEMCG_NR_FILE_MAPPED: 1631 1628 if (val > 0) 1632 1629 SetPageCgroupFileMapped(pc); 1633 1630 else if (!page_mapped(page)) 1634 1631 ClearPageCgroupFileMapped(pc); 1632 + idx = MEM_CGROUP_STAT_FILE_MAPPED; 1635 1633 break; 1636 1634 default: 1637 1635 BUG(); 1638 1636 } 1637 + 1638 + this_cpu_add(mem->stat->count[idx], val); 1639 1639 1640 1640 out: 1641 1641 if (unlikely(need_unlock)) ··· 1644 1642 rcu_read_unlock(); 1645 1643 return; 1646 1644 } 1647 - 1648 - void mem_cgroup_update_file_mapped(struct page *page, int val) 1649 - { 1650 - mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val); 1651 - } 1645 + EXPORT_SYMBOL(mem_cgroup_update_page_stat); 1652 1646 1653 1647 /* 1654 1648 * size of first charge trial. "32" comes from vmscan.c's magic value.
+2 -2
mm/rmap.c
··· 937 937 { 938 938 if (atomic_inc_and_test(&page->_mapcount)) { 939 939 __inc_zone_page_state(page, NR_FILE_MAPPED); 940 - mem_cgroup_update_file_mapped(page, 1); 940 + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); 941 941 } 942 942 } 943 943 ··· 979 979 NR_ANON_TRANSPARENT_HUGEPAGES); 980 980 } else { 981 981 __dec_zone_page_state(page, NR_FILE_MAPPED); 982 - mem_cgroup_update_file_mapped(page, -1); 982 + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); 983 983 } 984 984 /* 985 985 * It would be tidy to reset the PageAnon mapping here,