Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: memcontrol: avoid charge statistics churn during page migration

Charge migration currently disables IRQs twice to update the charge
statistics for the old page and then again for the new page.

But migration is a seamless transition of a charge from one physical
page to another one of the same size, so this should be a non-event from
an accounting point of view. Leave the statistics alone.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Johannes Weiner and committed by
Linus Torvalds
6abb5a86 3cbb0187

+10 -25
+10 -25
mm/memcontrol.c
··· 2728 2728 } 2729 2729 2730 2730 static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2731 - unsigned int nr_pages, bool lrucare) 2731 + bool lrucare) 2732 2732 { 2733 2733 struct page_cgroup *pc = lookup_page_cgroup(page); 2734 2734 int isolated; ··· 2765 2765 2766 2766 if (lrucare) 2767 2767 unlock_page_lru(page, isolated); 2768 - 2769 - local_irq_disable(); 2770 - mem_cgroup_charge_statistics(memcg, page, nr_pages); 2771 - /* 2772 - * "charge_statistics" updated event counter. Then, check it. 2773 - * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 2774 - * if they exceeds softlimit. 2775 - */ 2776 - memcg_check_events(memcg, page); 2777 - local_irq_enable(); 2778 2768 } 2779 2769 2780 2770 static DEFINE_MUTEX(set_limit_mutex); ··· 6450 6460 if (!memcg) 6451 6461 return; 6452 6462 6463 + commit_charge(page, memcg, lrucare); 6464 + 6453 6465 if (PageTransHuge(page)) { 6454 6466 nr_pages <<= compound_order(page); 6455 6467 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 6456 6468 } 6457 6469 6458 - commit_charge(page, memcg, nr_pages, lrucare); 6470 + local_irq_disable(); 6471 + mem_cgroup_charge_statistics(memcg, page, nr_pages); 6472 + memcg_check_events(memcg, page); 6473 + local_irq_enable(); 6459 6474 6460 6475 if (do_swap_account && PageSwapCache(page)) { 6461 6476 swp_entry_t entry = { .val = page_private(page) }; ··· 6646 6651 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, 6647 6652 bool lrucare) 6648 6653 { 6649 - unsigned int nr_pages = 1; 6650 6654 struct page_cgroup *pc; 6651 6655 int isolated; 6652 6656 ··· 6654 6660 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage); 6655 6661 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage); 6656 6662 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6663 + VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage), 6664 + newpage); 6657 6665 6658 6666 if (mem_cgroup_disabled()) 6659 6667 return; ··· 6673 6677 VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); 6674 6678 VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage); 6675 6679 6676 - if (PageTransHuge(oldpage)) { 6677 - nr_pages <<= compound_order(oldpage); 6678 - VM_BUG_ON_PAGE(!PageTransHuge(oldpage), oldpage); 6679 - VM_BUG_ON_PAGE(!PageTransHuge(newpage), newpage); 6680 - } 6681 - 6682 6680 if (lrucare) 6683 6681 lock_page_lru(oldpage, &isolated); 6684 6682 ··· 6681 6691 if (lrucare) 6682 6692 unlock_page_lru(oldpage, isolated); 6683 6693 6684 - local_irq_disable(); 6685 - mem_cgroup_charge_statistics(pc->mem_cgroup, oldpage, -nr_pages); 6686 - memcg_check_events(pc->mem_cgroup, oldpage); 6687 - local_irq_enable(); 6688 - 6689 - commit_charge(newpage, pc->mem_cgroup, nr_pages, lrucare); 6694 + commit_charge(newpage, pc->mem_cgroup, lrucare); 6690 6695 } 6691 6696 6692 6697 /*