Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memcg: rename high level charging functions

mem_cgroup_newpage_charge is used only for charging anonymous memory so
it is better to rename it to mem_cgroup_charge_anon.

mem_cgroup_cache_charge is used for file backed memory so rename it to
mem_cgroup_charge_file.

Signed-off-by: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Michal Hocko and committed by
Linus Torvalds
d715ae08 6d1fdc48

+19 -19
+2 -2
Documentation/cgroups/memcg_test.txt
··· 24 24 25 25 a page/swp_entry may be charged (usage += PAGE_SIZE) at 26 26 27 - mem_cgroup_newpage_charge() 27 + mem_cgroup_charge_anon() 28 28 Called at new page fault and Copy-On-Write. 29 29 30 30 mem_cgroup_try_charge_swapin() ··· 32 32 Followed by charge-commit-cancel protocol. (With swap accounting) 33 33 At commit, a charge recorded in swap_cgroup is removed. 34 34 35 - mem_cgroup_cache_charge() 35 + mem_cgroup_charge_file() 36 36 Called at add_to_page_cache() 37 37 38 38 mem_cgroup_cache_charge_swapin()
+4 -4
include/linux/memcontrol.h
··· 65 65 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 66 66 */ 67 67 68 - extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 68 + extern int mem_cgroup_charge_anon(struct page *page, struct mm_struct *mm, 69 69 gfp_t gfp_mask); 70 70 /* for swap handling */ 71 71 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, ··· 74 74 struct mem_cgroup *memcg); 75 75 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 76 76 77 - extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 77 + extern int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm, 78 78 gfp_t gfp_mask); 79 79 80 80 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); ··· 233 233 #else /* CONFIG_MEMCG */ 234 234 struct mem_cgroup; 235 235 236 - static inline int mem_cgroup_newpage_charge(struct page *page, 236 + static inline int mem_cgroup_charge_anon(struct page *page, 237 237 struct mm_struct *mm, gfp_t gfp_mask) 238 238 { 239 239 return 0; 240 240 } 241 241 242 - static inline int mem_cgroup_cache_charge(struct page *page, 242 + static inline int mem_cgroup_charge_file(struct page *page, 243 243 struct mm_struct *mm, gfp_t gfp_mask) 244 244 { 245 245 return 0;
+1 -1
mm/filemap.c
··· 563 563 VM_BUG_ON_PAGE(!PageLocked(page), page); 564 564 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 565 565 566 - error = mem_cgroup_cache_charge(page, current->mm, 566 + error = mem_cgroup_charge_file(page, current->mm, 567 567 gfp_mask & GFP_RECLAIM_MASK); 568 568 if (error) 569 569 return error;
+4 -4
mm/huge_memory.c
··· 827 827 count_vm_event(THP_FAULT_FALLBACK); 828 828 return VM_FAULT_FALLBACK; 829 829 } 830 - if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 830 + if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) { 831 831 put_page(page); 832 832 count_vm_event(THP_FAULT_FALLBACK); 833 833 return VM_FAULT_FALLBACK; ··· 968 968 __GFP_OTHER_NODE, 969 969 vma, address, page_to_nid(page)); 970 970 if (unlikely(!pages[i] || 971 - mem_cgroup_newpage_charge(pages[i], mm, 971 + mem_cgroup_charge_anon(pages[i], mm, 972 972 GFP_KERNEL))) { 973 973 if (pages[i]) 974 974 put_page(pages[i]); ··· 1101 1101 goto out; 1102 1102 } 1103 1103 1104 - if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1104 + if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) { 1105 1105 put_page(new_page); 1106 1106 if (page) { 1107 1107 split_huge_page(page); ··· 2359 2359 if (!new_page) 2360 2360 return; 2361 2361 2362 - if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) 2362 + if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) 2363 2363 return; 2364 2364 2365 2365 /*
+2 -2
mm/memcontrol.c
··· 3818 3818 return ret; 3819 3819 } 3820 3820 3821 - int mem_cgroup_newpage_charge(struct page *page, 3821 + int mem_cgroup_charge_anon(struct page *page, 3822 3822 struct mm_struct *mm, gfp_t gfp_mask) 3823 3823 { 3824 3824 unsigned int nr_pages = 1; ··· 3954 3954 MEM_CGROUP_CHARGE_TYPE_ANON); 3955 3955 } 3956 3956 3957 - int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 3957 + int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm, 3958 3958 gfp_t gfp_mask) 3959 3959 { 3960 3960 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
+3 -3
mm/memory.c
··· 2828 2828 } 2829 2829 __SetPageUptodate(new_page); 2830 2830 2831 - if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) 2831 + if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) 2832 2832 goto oom_free_new; 2833 2833 2834 2834 mmun_start = address & PAGE_MASK; ··· 3281 3281 */ 3282 3282 __SetPageUptodate(page); 3283 3283 3284 - if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) 3284 + if (mem_cgroup_charge_anon(page, mm, GFP_KERNEL)) 3285 3285 goto oom_free_page; 3286 3286 3287 3287 entry = mk_pte(page, vma->vm_page_prot); ··· 3537 3537 if (!new_page) 3538 3538 return VM_FAULT_OOM; 3539 3539 3540 - if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) { 3540 + if (mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)) { 3541 3541 page_cache_release(new_page); 3542 3542 return VM_FAULT_OOM; 3543 3543 }
+3 -3
mm/shmem.c
··· 683 683 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 684 684 * Charged back to the user (not to caller) when swap account is used. 685 685 */ 686 - error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 686 + error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL); 687 687 if (error) 688 688 goto out; 689 689 /* No radix_tree_preload: swap entry keeps a place for page in tree */ ··· 1080 1080 goto failed; 1081 1081 } 1082 1082 1083 - error = mem_cgroup_cache_charge(page, current->mm, 1083 + error = mem_cgroup_charge_file(page, current->mm, 1084 1084 gfp & GFP_RECLAIM_MASK); 1085 1085 if (!error) { 1086 1086 error = shmem_add_to_page_cache(page, mapping, index, ··· 1134 1134 1135 1135 SetPageSwapBacked(page); 1136 1136 __set_page_locked(page); 1137 - error = mem_cgroup_cache_charge(page, current->mm, 1137 + error = mem_cgroup_charge_file(page, current->mm, 1138 1138 gfp & GFP_RECLAIM_MASK); 1139 1139 if (error) 1140 1140 goto decused;