revert "mm: fix-up zone present pages"

Revert commit 7f1290f2f2a4 ("mm: fix-up zone present pages")

That patch tried to fix a issue when calculating zone->present_pages,
but it caused a regression on 32bit systems with HIGHMEM. With that
change, reset_zone_present_pages() resets all zone->present_pages to
zero, and fixup_zone_present_pages() is called to recalculate
zone->present_pages when the boot allocator frees core memory pages into
buddy allocator. Because highmem pages are not freed by bootmem
allocator, all highmem zones' present_pages becomes zero.

Various options for improving the situation are being discussed but for
now, let's return to the 3.6 code.

Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Petr Tesarik <ptesarik@suse.cz>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: David Rientjes <rientjes@google.com>
Tested-by: Chris Clayton <chris2553@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Andrew Morton and committed by Linus Torvalds 5576646f 0f3c42f5

Changed files
+1 -58
arch
ia64
mm
include
linux
mm
-1
arch/ia64/mm/init.c
··· 637 637 638 638 high_memory = __va(max_low_pfn * PAGE_SIZE); 639 639 640 - reset_zone_present_pages(); 641 640 for_each_online_pgdat(pgdat) 642 641 if (pgdat->bdata->node_bootmem_map) 643 642 totalram_pages += free_all_bootmem_node(pgdat);
-4
include/linux/mm.h
··· 1684 1684 static inline bool page_is_guard(struct page *page) { return false; } 1685 1685 #endif /* CONFIG_DEBUG_PAGEALLOC */ 1686 1686 1687 - extern void reset_zone_present_pages(void); 1688 - extern void fixup_zone_present_pages(int nid, unsigned long start_pfn, 1689 - unsigned long end_pfn); 1690 - 1691 1687 #endif /* __KERNEL__ */ 1692 1688 #endif /* _LINUX_MM_H */
+1 -9
mm/bootmem.c
··· 198 198 int order = ilog2(BITS_PER_LONG); 199 199 200 200 __free_pages_bootmem(pfn_to_page(start), order); 201 - fixup_zone_present_pages(page_to_nid(pfn_to_page(start)), 202 - start, start + BITS_PER_LONG); 203 201 count += BITS_PER_LONG; 204 202 start += BITS_PER_LONG; 205 203 } else { ··· 208 210 if (vec & 1) { 209 211 page = pfn_to_page(start + off); 210 212 __free_pages_bootmem(page, 0); 211 - fixup_zone_present_pages( 212 - page_to_nid(page), 213 - start + off, start + off + 1); 214 213 count++; 215 214 } 216 215 vec >>= 1; ··· 221 226 pages = bdata->node_low_pfn - bdata->node_min_pfn; 222 227 pages = bootmem_bootmap_pages(pages); 223 228 count += pages; 224 - while (pages--) { 225 - fixup_zone_present_pages(page_to_nid(page), 226 - page_to_pfn(page), page_to_pfn(page) + 1); 229 + while (pages--) 227 230 __free_pages_bootmem(page++, 0); 228 - } 229 231 230 232 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); 231 233
-7
mm/memory_hotplug.c
··· 106 106 void __ref put_page_bootmem(struct page *page) 107 107 { 108 108 unsigned long type; 109 - struct zone *zone; 110 109 111 110 type = (unsigned long) page->lru.next; 112 111 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || ··· 116 117 set_page_private(page, 0); 117 118 INIT_LIST_HEAD(&page->lru); 118 119 __free_pages_bootmem(page, 0); 119 - 120 - zone = page_zone(page); 121 - zone_span_writelock(zone); 122 - zone->present_pages++; 123 - zone_span_writeunlock(zone); 124 - totalram_pages++; 125 120 } 126 121 127 122 }
-3
mm/nobootmem.c
··· 116 116 return 0; 117 117 118 118 __free_pages_memory(start_pfn, end_pfn); 119 - fixup_zone_present_pages(pfn_to_nid(start >> PAGE_SHIFT), 120 - start_pfn, end_pfn); 121 119 122 120 return end_pfn - start_pfn; 123 121 } ··· 126 128 phys_addr_t start, end, size; 127 129 u64 i; 128 130 129 - reset_zone_present_pages(); 130 131 for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) 131 132 count += __free_memory_core(start, end); 132 133
-34
mm/page_alloc.c
··· 6098 6098 dump_page_flags(page->flags); 6099 6099 mem_cgroup_print_bad_page(page); 6100 6100 } 6101 - 6102 - /* reset zone->present_pages */ 6103 - void reset_zone_present_pages(void) 6104 - { 6105 - struct zone *z; 6106 - int i, nid; 6107 - 6108 - for_each_node_state(nid, N_HIGH_MEMORY) { 6109 - for (i = 0; i < MAX_NR_ZONES; i++) { 6110 - z = NODE_DATA(nid)->node_zones + i; 6111 - z->present_pages = 0; 6112 - } 6113 - } 6114 - } 6115 - 6116 - /* calculate zone's present pages in buddy system */ 6117 - void fixup_zone_present_pages(int nid, unsigned long start_pfn, 6118 - unsigned long end_pfn) 6119 - { 6120 - struct zone *z; 6121 - unsigned long zone_start_pfn, zone_end_pfn; 6122 - int i; 6123 - 6124 - for (i = 0; i < MAX_NR_ZONES; i++) { 6125 - z = NODE_DATA(nid)->node_zones + i; 6126 - zone_start_pfn = z->zone_start_pfn; 6127 - zone_end_pfn = zone_start_pfn + z->spanned_pages; 6128 - 6129 - /* if the two regions intersect */ 6130 - if (!(zone_start_pfn >= end_pfn || zone_end_pfn <= start_pfn)) 6131 - z->present_pages += min(end_pfn, zone_end_pfn) - 6132 - max(start_pfn, zone_start_pfn); 6133 - } 6134 - }