Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: reuse pageblock_start/end_pfn() macro

Move pageblock_start_pfn/pageblock_end_pfn() into pageblock-flags.h, then
they could be used somewhere else, not only in compaction, also use
ALIGN_DOWN() instead of round_down() to be pair with ALIGN(), which should
be same for pageblock usage.

Link: https://lkml.kernel.org/r/20220907060844.126891-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kefeng Wang and committed by
Andrew Morton
4f9bc69a 0bba9af0

+16 -18
+2
include/linux/pageblock-flags.h
··· 53 53 #endif /* CONFIG_HUGETLB_PAGE */ 54 54 55 55 #define pageblock_nr_pages (1UL << pageblock_order) 56 + #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) 57 + #define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages) 56 58 57 59 /* Forward declaration */ 58 60 struct page;
-2
mm/compaction.c
··· 52 52 53 53 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 54 54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 55 - #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 56 - #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 57 55 58 56 /* 59 57 * Page order with-respect-to which proactive compaction
+1 -1
mm/memblock.c
··· 2000 2000 * presume that there are no holes in the memory map inside 2001 2001 * a pageblock 2002 2002 */ 2003 - start = round_down(start, pageblock_nr_pages); 2003 + start = pageblock_start_pfn(start); 2004 2004 2005 2005 /* 2006 2006 * If we had a previous bank, and there is a space
+6 -7
mm/page_alloc.c
··· 544 544 #ifdef CONFIG_SPARSEMEM 545 545 pfn &= (PAGES_PER_SECTION-1); 546 546 #else 547 - pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 547 + pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 548 548 #endif /* CONFIG_SPARSEMEM */ 549 549 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 550 550 } ··· 1857 1857 unsigned long block_start_pfn = zone->zone_start_pfn; 1858 1858 unsigned long block_end_pfn; 1859 1859 1860 - block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1860 + block_end_pfn = pageblock_end_pfn(block_start_pfn); 1861 1861 for (; block_start_pfn < zone_end_pfn(zone); 1862 1862 block_start_pfn = block_end_pfn, 1863 1863 block_end_pfn += pageblock_nr_pages) { ··· 2653 2653 *num_movable = 0; 2654 2654 2655 2655 pfn = page_to_pfn(page); 2656 - start_pfn = pfn & ~(pageblock_nr_pages - 1); 2657 - end_pfn = start_pfn + pageblock_nr_pages - 1; 2656 + start_pfn = pageblock_start_pfn(pfn); 2657 + end_pfn = pageblock_end_pfn(pfn) - 1; 2658 2658 2659 2659 /* Do not cross zone boundaries */ 2660 2660 if (!zone_spans_pfn(zone, start_pfn)) ··· 6934 6934 u64 pgcnt = 0; 6935 6935 6936 6936 for (pfn = spfn; pfn < epfn; pfn++) { 6937 - if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { 6938 - pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) 6939 - + pageblock_nr_pages - 1; 6937 + if (!pfn_valid(pageblock_start_pfn(pfn))) { 6938 + pfn = pageblock_end_pfn(pfn) - 1; 6940 6939 continue; 6941 6940 } 6942 6941 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
+5 -6
mm/page_isolation.c
··· 37 37 struct zone *zone = page_zone(page); 38 38 unsigned long pfn; 39 39 40 - VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) != 41 - ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages)); 40 + VM_BUG_ON(pageblock_start_pfn(start_pfn) != 41 + pageblock_start_pfn(end_pfn - 1)); 42 42 43 43 if (is_migrate_cma_page(page)) { 44 44 /* ··· 172 172 * to avoid redundant checks. 173 173 */ 174 174 check_unmovable_start = max(page_to_pfn(page), start_pfn); 175 - check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages), 175 + check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)), 176 176 end_pfn); 177 177 178 178 unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, ··· 532 532 unsigned long pfn; 533 533 struct page *page; 534 534 /* isolation is done at page block granularity */ 535 - unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages); 535 + unsigned long isolate_start = pageblock_start_pfn(start_pfn); 536 536 unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); 537 537 int ret; 538 538 bool skip_isolation = false; ··· 579 579 { 580 580 unsigned long pfn; 581 581 struct page *page; 582 - unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages); 582 + unsigned long isolate_start = pageblock_start_pfn(start_pfn); 583 583 unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages); 584 - 585 584 586 585 for (pfn = isolate_start; 587 586 pfn < isolate_end;
+2 -2
mm/page_owner.c
··· 297 297 continue; 298 298 } 299 299 300 - block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 300 + block_end_pfn = pageblock_end_pfn(pfn); 301 301 block_end_pfn = min(block_end_pfn, end_pfn); 302 302 303 303 pageblock_mt = get_pageblock_migratetype(page); ··· 635 635 continue; 636 636 } 637 637 638 - block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 638 + block_end_pfn = pageblock_end_pfn(pfn); 639 639 block_end_pfn = min(block_end_pfn, end_pfn); 640 640 641 641 for (; pfn < block_end_pfn; pfn++) {