Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

thp: fix anon memory statistics with transparent hugepages

Count each transparent hugepage as HPAGE_PMD_NR pages in the LRU
statistics, so the Active(anon) and Inactive(anon) statistics in
/proc/meminfo are correct.

Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Rik van Riel and committed by
Linus Torvalds
2c888cfb 97562cd2

+30 -9
+8
include/linux/huge_mm.h
··· 117 117 return; 118 118 __vma_adjust_trans_huge(vma, start, end, adjust_next); 119 119 } 120 + static inline int hpage_nr_pages(struct page *page) 121 + { 122 + if (unlikely(PageTransHuge(page))) 123 + return HPAGE_PMD_NR; 124 + return 1; 125 + } 120 126 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 121 127 #define HPAGE_PMD_SHIFT ({ BUG(); 0; }) 122 128 #define HPAGE_PMD_MASK ({ BUG(); 0; }) 123 129 #define HPAGE_PMD_SIZE ({ BUG(); 0; }) 130 + 131 + #define hpage_nr_pages(x) 1 124 132 125 133 #define transparent_hugepage_enabled(__vma) 0 126 134
+5 -3
include/linux/mm_inline.h
··· 1 1 #ifndef LINUX_MM_INLINE_H 2 2 #define LINUX_MM_INLINE_H 3 3 4 + #include <linux/huge_mm.h> 5 + 4 6 /** 5 7 * page_is_file_cache - should the page be on a file LRU or anon LRU? 6 8 * @page: the page to test ··· 26 24 struct list_head *head) 27 25 { 28 26 list_add(&page->lru, head); 29 - __inc_zone_state(zone, NR_LRU_BASE + l); 27 + __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); 30 28 mem_cgroup_add_lru_list(page, l); 31 29 } 32 30 ··· 40 38 del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) 41 39 { 42 40 list_del(&page->lru); 43 - __dec_zone_state(zone, NR_LRU_BASE + l); 41 + __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); 44 42 mem_cgroup_del_lru_list(page, l); 45 43 } 46 44 ··· 75 73 l += LRU_ACTIVE; 76 74 } 77 75 } 78 - __dec_zone_state(zone, NR_LRU_BASE + l); 76 + __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); 79 77 mem_cgroup_del_lru_list(page, l); 80 78 } 81 79
+10
mm/huge_memory.c
··· 1143 1143 int i; 1144 1144 unsigned long head_index = page->index; 1145 1145 struct zone *zone = page_zone(page); 1146 + int zonestat; 1146 1147 1147 1148 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1148 1149 spin_lock_irq(&zone->lru_lock); ··· 1207 1206 1208 1207 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1209 1208 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1209 + 1210 + /* 1211 + * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics, 1212 + * so adjust those appropriately if this page is on the LRU. 1213 + */ 1214 + if (PageLRU(page)) { 1215 + zonestat = NR_LRU_BASE + page_lru(page); 1216 + __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1)); 1217 + } 1210 1218 1211 1219 ClearPageCompound(page); 1212 1220 compound_unlock(page);
+1 -1
mm/memcontrol.c
··· 1091 1091 case 0: 1092 1092 list_move(&page->lru, dst); 1093 1093 mem_cgroup_del_lru(page); 1094 - nr_taken++; 1094 + nr_taken += hpage_nr_pages(page); 1095 1095 break; 1096 1096 case -EBUSY: 1097 1097 /* we don't affect global LRU but rotate in our LRU */
+6 -5
mm/vmscan.c
··· 1045 1045 case 0: 1046 1046 list_move(&page->lru, dst); 1047 1047 mem_cgroup_del_lru(page); 1048 - nr_taken++; 1048 + nr_taken += hpage_nr_pages(page); 1049 1049 break; 1050 1050 1051 1051 case -EBUSY: ··· 1103 1103 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1104 1104 list_move(&cursor_page->lru, dst); 1105 1105 mem_cgroup_del_lru(cursor_page); 1106 - nr_taken++; 1106 + nr_taken += hpage_nr_pages(page); 1107 1107 nr_lumpy_taken++; 1108 1108 if (PageDirty(cursor_page)) 1109 1109 nr_lumpy_dirty++; ··· 1158 1158 struct page *page; 1159 1159 1160 1160 list_for_each_entry(page, page_list, lru) { 1161 + int numpages = hpage_nr_pages(page); 1161 1162 lru = page_lru_base_type(page); 1162 1163 if (PageActive(page)) { 1163 1164 lru += LRU_ACTIVE; 1164 1165 ClearPageActive(page); 1165 - nr_active++; 1166 + nr_active += numpages; 1166 1167 } 1167 1168 if (count) 1168 - count[lru]++; 1169 + count[lru] += numpages; 1169 1170 } 1170 1171 1171 1172 return nr_active; ··· 1484 1483 1485 1484 list_move(&page->lru, &zone->lru[lru].list); 1486 1485 mem_cgroup_add_lru_list(page, lru); 1487 - pgmoved++; 1486 + pgmoved += hpage_nr_pages(page); 1488 1487 1489 1488 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1490 1489 spin_unlock_irq(&zone->lru_lock);