Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/sparsemem: use PAGES_PER_SECTION to remove redundant nr_pages parameter

For below functions,

- sparse_add_one_section()
- kmalloc_section_memmap()
- __kmalloc_section_memmap()
- __kfree_section_memmap()

they are always invoked to operate on one memory section, so it is
redundant to always pass a nr_pages parameter, which is the page numbers
in one section. So we can directly use predefined macro PAGES_PER_SECTION
instead of passing the parameter.

Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Zhang Yanfei and committed by
Linus Torvalds
85b35fea 071aee13

+17 -22
+1 -2
include/linux/memory_hotplug.h
··· 268 268 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); 269 269 extern bool is_memblock_offlined(struct memory_block *mem); 270 270 extern void remove_memory(int nid, u64 start, u64 size); 271 - extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 272 - int nr_pages); 271 + extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn); 273 272 extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); 274 273 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 275 274 unsigned long pnum);
+1 -2
mm/memory_hotplug.c
··· 401 401 static int __meminit __add_section(int nid, struct zone *zone, 402 402 unsigned long phys_start_pfn) 403 403 { 404 - int nr_pages = PAGES_PER_SECTION; 405 404 int ret; 406 405 407 406 if (pfn_valid(phys_start_pfn)) 408 407 return -EEXIST; 409 408 410 - ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 409 + ret = sparse_add_one_section(zone, phys_start_pfn); 411 410 412 411 if (ret < 0) 413 412 return ret;
+15 -18
mm/sparse.c
··· 590 590 591 591 #ifdef CONFIG_MEMORY_HOTPLUG 592 592 #ifdef CONFIG_SPARSEMEM_VMEMMAP 593 - static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 594 - unsigned long nr_pages) 593 + static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 595 594 { 596 595 /* This will make the necessary allocations eventually. */ 597 596 return sparse_mem_map_populate(pnum, nid); 598 597 } 599 - static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 598 + static void __kfree_section_memmap(struct page *memmap) 600 599 { 601 600 unsigned long start = (unsigned long)memmap; 602 - unsigned long end = (unsigned long)(memmap + nr_pages); 601 + unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 603 602 604 603 vmemmap_free(start, end); 605 604 } ··· 612 613 } 613 614 #endif /* CONFIG_MEMORY_HOTREMOVE */ 614 615 #else 615 - static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 616 + static struct page *__kmalloc_section_memmap(void) 616 617 { 617 618 struct page *page, *ret; 618 - unsigned long memmap_size = sizeof(struct page) * nr_pages; 619 + unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; 619 620 620 621 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 621 622 if (page) ··· 633 634 return ret; 634 635 } 635 636 636 - static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 637 - unsigned long nr_pages) 637 + static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 638 638 { 639 - return __kmalloc_section_memmap(nr_pages); 639 + return __kmalloc_section_memmap(); 640 640 } 641 641 642 - static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 642 + static void __kfree_section_memmap(struct page *memmap) 643 643 { 644 644 if (is_vmalloc_addr(memmap)) 645 645 vfree(memmap); 646 646 else 647 647 free_pages((unsigned long)memmap, 648 - get_order(sizeof(struct page) * nr_pages)); 648 + get_order(sizeof(struct page) * PAGES_PER_SECTION)); 649 649 } 650 650 651 651 #ifdef CONFIG_MEMORY_HOTREMOVE ··· 682 684 * set. If this is <=0, then that means that the passed-in 683 685 * map was not consumed and must be freed. 684 686 */ 685 - int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 686 - int nr_pages) 687 + int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) 687 688 { 688 689 unsigned long section_nr = pfn_to_section_nr(start_pfn); 689 690 struct pglist_data *pgdat = zone->zone_pgdat; ··· 699 702 ret = sparse_index_init(section_nr, pgdat->node_id); 700 703 if (ret < 0 && ret != -EEXIST) 701 704 return ret; 702 - memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); 705 + memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); 703 706 if (!memmap) 704 707 return -ENOMEM; 705 708 usemap = __kmalloc_section_usemap(); 706 709 if (!usemap) { 707 - __kfree_section_memmap(memmap, nr_pages); 710 + __kfree_section_memmap(memmap); 708 711 return -ENOMEM; 709 712 } 710 713 ··· 716 719 goto out; 717 720 } 718 721 719 - memset(memmap, 0, sizeof(struct page) * nr_pages); 722 + memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); 720 723 721 724 ms->section_mem_map |= SECTION_MARKED_PRESENT; 722 725 ··· 726 729 pgdat_resize_unlock(pgdat, &flags); 727 730 if (ret <= 0) { 728 731 kfree(usemap); 729 - __kfree_section_memmap(memmap, nr_pages); 732 + __kfree_section_memmap(memmap); 730 733 } 731 734 return ret; 732 735 } ··· 768 771 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 769 772 kfree(usemap); 770 773 if (memmap) 771 - __kfree_section_memmap(memmap, PAGES_PER_SECTION); 774 + __kfree_section_memmap(memmap); 772 775 return; 773 776 } 774 777