Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, show_mem: remove SHOW_MEM_FILTER_PAGE_COUNT

Commit 4b59e6c47309 ("mm, show_mem: suppress page counts in
non-blockable contexts") introduced SHOW_MEM_FILTER_PAGE_COUNT to
suppress PFN walks on large memory machines. Commit c78e93630d15 ("mm:
do not walk all of system memory during show_mem") avoided a PFN walk in
the generic show_mem helper which removes the requirement for
SHOW_MEM_FILTER_PAGE_COUNT in that case.

This patch removes PFN walkers from the arch-specific implementations
that report on a per-node or per-zone granularity. ARM and unicore32
still do a PFN walk as they report memory usage on each bank which is a
much finer granularity where the debugging information may still be of
use. As the remaining arches doing PFN walks have relatively small
amounts of memory, this patch simply removes SHOW_MEM_FILTER_PAGE_COUNT.

[akpm@linux-foundation.org: fix parisc]
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: James Bottomley <jejb@parisc-linux.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
aec6a888 ece86e22

+65 -190
-3
arch/arm/mm/init.c
··· 92 92 printk("Mem-info:\n"); 93 93 show_free_areas(filter); 94 94 95 - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 96 - return; 97 - 98 95 for_each_bank (i, mi) { 99 96 struct membank *bank = &mi->bank[i]; 100 97 unsigned int pfn1, pfn2;
-68
arch/ia64/mm/contig.c
··· 31 31 static unsigned long max_gap; 32 32 #endif 33 33 34 - /** 35 - * show_mem - give short summary of memory stats 36 - * 37 - * Shows a simple page count of reserved and used pages in the system. 38 - * For discontig machines, it does this on a per-pgdat basis. 39 - */ 40 - void show_mem(unsigned int filter) 41 - { 42 - int i, total_reserved = 0; 43 - int total_shared = 0, total_cached = 0; 44 - unsigned long total_present = 0; 45 - pg_data_t *pgdat; 46 - 47 - printk(KERN_INFO "Mem-info:\n"); 48 - show_free_areas(filter); 49 - printk(KERN_INFO "Node memory in pages:\n"); 50 - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 51 - return; 52 - for_each_online_pgdat(pgdat) { 53 - unsigned long present; 54 - unsigned long flags; 55 - int shared = 0, cached = 0, reserved = 0; 56 - int nid = pgdat->node_id; 57 - 58 - if (skip_free_areas_node(filter, nid)) 59 - continue; 60 - pgdat_resize_lock(pgdat, &flags); 61 - present = pgdat->node_present_pages; 62 - for(i = 0; i < pgdat->node_spanned_pages; i++) { 63 - struct page *page; 64 - if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) 65 - touch_nmi_watchdog(); 66 - if (pfn_valid(pgdat->node_start_pfn + i)) 67 - page = pfn_to_page(pgdat->node_start_pfn + i); 68 - else { 69 - #ifdef CONFIG_VIRTUAL_MEM_MAP 70 - if (max_gap < LARGE_GAP) 71 - continue; 72 - #endif 73 - i = vmemmap_find_next_valid_pfn(nid, i) - 1; 74 - continue; 75 - } 76 - if (PageReserved(page)) 77 - reserved++; 78 - else if (PageSwapCache(page)) 79 - cached++; 80 - else if (page_count(page)) 81 - shared += page_count(page)-1; 82 - } 83 - pgdat_resize_unlock(pgdat, &flags); 84 - total_present += present; 85 - total_reserved += reserved; 86 - total_cached += cached; 87 - total_shared += shared; 88 - printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 89 - "shrd: %10d, swpd: %10d\n", nid, 90 - present, reserved, shared, cached); 91 - } 92 - printk(KERN_INFO "%ld pages of RAM\n", total_present); 93 - printk(KERN_INFO "%d reserved pages\n", total_reserved); 94 - printk(KERN_INFO "%d pages shared\n", total_shared); 95 - printk(KERN_INFO "%d pages swap cached\n", total_cached); 96 - printk(KERN_INFO "Total of %ld pages in page table cache\n", 97 - quicklist_total_size()); 98 - printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); 99 - } 100 - 101 - 102 34 /* physical address where the bootmem map is located */ 103 35 unsigned long bootmap_start; 104 36
-63
arch/ia64/mm/discontig.c
··· 608 608 #endif /* CONFIG_SMP */ 609 609 610 610 /** 611 - * show_mem - give short summary of memory stats 612 - * 613 - * Shows a simple page count of reserved and used pages in the system. 614 - * For discontig machines, it does this on a per-pgdat basis. 615 - */ 616 - void show_mem(unsigned int filter) 617 - { 618 - int i, total_reserved = 0; 619 - int total_shared = 0, total_cached = 0; 620 - unsigned long total_present = 0; 621 - pg_data_t *pgdat; 622 - 623 - printk(KERN_INFO "Mem-info:\n"); 624 - show_free_areas(filter); 625 - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 626 - return; 627 - printk(KERN_INFO "Node memory in pages:\n"); 628 - for_each_online_pgdat(pgdat) { 629 - unsigned long present; 630 - unsigned long flags; 631 - int shared = 0, cached = 0, reserved = 0; 632 - int nid = pgdat->node_id; 633 - 634 - if (skip_free_areas_node(filter, nid)) 635 - continue; 636 - pgdat_resize_lock(pgdat, &flags); 637 - present = pgdat->node_present_pages; 638 - for(i = 0; i < pgdat->node_spanned_pages; i++) { 639 - struct page *page; 640 - if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) 641 - touch_nmi_watchdog(); 642 - if (pfn_valid(pgdat->node_start_pfn + i)) 643 - page = pfn_to_page(pgdat->node_start_pfn + i); 644 - else { 645 - i = vmemmap_find_next_valid_pfn(nid, i) - 1; 646 - continue; 647 - } 648 - if (PageReserved(page)) 649 - reserved++; 650 - else if (PageSwapCache(page)) 651 - cached++; 652 - else if (page_count(page)) 653 - shared += page_count(page)-1; 654 - } 655 - pgdat_resize_unlock(pgdat, &flags); 656 - total_present += present; 657 - total_reserved += reserved; 658 - total_cached += cached; 659 - total_shared += shared; 660 - printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, " 661 - "shrd: %10d, swpd: %10d\n", nid, 662 - present, reserved, shared, cached); 663 - } 664 - printk(KERN_INFO "%ld pages of RAM\n", total_present); 665 - printk(KERN_INFO "%d reserved pages\n", total_reserved); 666 - printk(KERN_INFO "%d pages shared\n", total_shared); 667 - printk(KERN_INFO "%d pages swap cached\n", total_cached); 668 - printk(KERN_INFO "Total of %ld pages in page table cache\n", 669 - quicklist_total_size()); 670 - printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); 671 - } 672 - 673 - /** 674 611 * call_pernode_memory - use SRAT to call callback functions with node info 675 612 * @start: physical start of range 676 613 * @len: length of range
+48
arch/ia64/mm/init.c
··· 684 684 } 685 685 686 686 __initcall(per_linux32_init); 687 + 688 + /** 689 + * show_mem - give short summary of memory stats 690 + * 691 + * Shows a simple page count of reserved and used pages in the system. 692 + * For discontig machines, it does this on a per-pgdat basis. 693 + */ 694 + void show_mem(unsigned int filter) 695 + { 696 + int total_reserved = 0; 697 + unsigned long total_present = 0; 698 + pg_data_t *pgdat; 699 + 700 + printk(KERN_INFO "Mem-info:\n"); 701 + show_free_areas(filter); 702 + printk(KERN_INFO "Node memory in pages:\n"); 703 + for_each_online_pgdat(pgdat) { 704 + unsigned long present; 705 + unsigned long flags; 706 + int reserved = 0; 707 + int nid = pgdat->node_id; 708 + int zoneid; 709 + 710 + if (skip_free_areas_node(filter, nid)) 711 + continue; 712 + pgdat_resize_lock(pgdat, &flags); 713 + 714 + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 715 + struct zone *zone = &pgdat->node_zones[zoneid]; 716 + if (!populated_zone(zone)) 717 + continue; 718 + 719 + reserved += zone->present_pages - zone->managed_pages; 720 + } 721 + present = pgdat->node_present_pages; 722 + 723 + pgdat_resize_unlock(pgdat, &flags); 724 + total_present += present; 725 + total_reserved += reserved; 726 + printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ", 727 + nid, present, reserved); 728 + } 729 + printk(KERN_INFO "%ld pages of RAM\n", total_present); 730 + printk(KERN_INFO "%d reserved pages\n", total_reserved); 731 + printk(KERN_INFO "Total of %ld pages in page table cache\n", 732 + quicklist_total_size()); 733 + printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); 734 + }
+17 -42
arch/parisc/mm/init.c
··· 645 645 646 646 void show_mem(unsigned int filter) 647 647 { 648 - int i,free = 0,total = 0,reserved = 0; 649 - int shared = 0, cached = 0; 648 + int total = 0,reserved = 0; 649 + pg_data_t *pgdat; 650 650 651 651 printk(KERN_INFO "Mem-info:\n"); 652 652 show_free_areas(filter); 653 - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 654 - return; 655 - #ifndef CONFIG_DISCONTIGMEM 656 - i = max_mapnr; 657 - while (i-- > 0) { 658 - total++; 659 - if (PageReserved(mem_map+i)) 660 - reserved++; 661 - else if (PageSwapCache(mem_map+i)) 662 - cached++; 663 - else if (!page_count(&mem_map[i])) 664 - free++; 665 - else 666 - shared += page_count(&mem_map[i]) - 1; 653 + 654 + for_each_online_pgdat(pgdat) { 655 + unsigned long flags; 656 + int zoneid; 657 + 658 + pgdat_resize_lock(pgdat, &flags); 659 + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 660 + struct zone *zone = &pgdat->node_zones[zoneid]; 661 + if (!populated_zone(zone)) 662 + continue; 663 + 664 + total += zone->present_pages; 665 + reserved = zone->present_pages - zone->managed_pages; 666 + } 667 + pgdat_resize_unlock(pgdat, &flags); 667 668 } 668 - #else 669 - for (i = 0; i < npmem_ranges; i++) { 670 - int j; 671 669 672 - for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { 673 - struct page *p; 674 - unsigned long flags; 675 - 676 - pgdat_resize_lock(NODE_DATA(i), &flags); 677 - p = nid_page_nr(i, j) - node_start_pfn(i); 678 - 679 - total++; 680 - if (PageReserved(p)) 681 - reserved++; 682 - else if (PageSwapCache(p)) 683 - cached++; 684 - else if (!page_count(p)) 685 - free++; 686 - else 687 - shared += page_count(p) - 1; 688 - pgdat_resize_unlock(NODE_DATA(i), &flags); 689 - } 690 - } 691 - #endif 692 670 printk(KERN_INFO "%d pages of RAM\n", total); 693 671 printk(KERN_INFO "%d reserved pages\n", reserved); 694 - printk(KERN_INFO "%d pages shared\n", shared); 695 - printk(KERN_INFO "%d pages swap cached\n", cached); 696 - 697 672 698 673 #ifdef CONFIG_DISCONTIGMEM 699 674 {
-3
arch/unicore32/mm/init.c
··· 66 66 printk(KERN_DEFAULT "Mem-info:\n"); 67 67 show_free_areas(filter); 68 68 69 - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 70 - return; 71 - 72 69 for_each_bank(i, mi) { 73 70 struct membank *bank = &mi->bank[i]; 74 71 unsigned int pfn1, pfn2;
-1
include/linux/mm.h
··· 1016 1016 * various contexts. 1017 1017 */ 1018 1018 #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1019 - #define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */ 1020 1019 1021 1020 extern void show_free_areas(unsigned int flags); 1022 1021 extern bool skip_free_areas_node(unsigned int flags, int nid);
-3
lib/show_mem.c
··· 17 17 printk("Mem-Info:\n"); 18 18 show_free_areas(filter); 19 19 20 - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) 21 - return; 22 - 23 20 for_each_online_pgdat(pgdat) { 24 21 unsigned long flags; 25 22 int zoneid;
-7
mm/page_alloc.c
··· 2072 2072 return; 2073 2073 2074 2074 /* 2075 - * Walking all memory to count page types is very expensive and should 2076 - * be inhibited in non-blockable contexts. 2077 - */ 2078 - if (!(gfp_mask & __GFP_WAIT)) 2079 - filter |= SHOW_MEM_FILTER_PAGE_COUNT; 2080 - 2081 - /* 2082 2075 * This documents exceptions given to allocations in certain 2083 2076 * contexts that are allowed to allocate outside current's set 2084 2077 * of allowed nodes.