Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/mm: thread pgprot_t through init_memory_mapping()

In preparation to support a pgprot_t argument for arch_add_memory().

It's required to move the prototype of init_memory_mapping() seeing the
original location came before the definition of pgprot_t.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200306170846.9333-4-logang@deltatee.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Logan Gunthorpe and committed by
Linus Torvalds
c164fbb4 f5637d3b

+34 -25
-3
arch/x86/include/asm/page_types.h
··· 71 71 72 72 bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); 73 73 74 - extern unsigned long init_memory_mapping(unsigned long start, 75 - unsigned long end); 76 - 77 74 extern void initmem_init(void); 78 75 79 76 #endif /* !__ASSEMBLY__ */
+3
arch/x86/include/asm/pgtable.h
··· 1081 1081 1082 1082 void __init poking_init(void); 1083 1083 1084 + unsigned long init_memory_mapping(unsigned long start, 1085 + unsigned long end, pgprot_t prot); 1086 + 1084 1087 # ifdef CONFIG_RANDOMIZE_MEMORY 1085 1088 void __meminit init_trampoline(void); 1086 1089 # else
+2 -1
arch/x86/kernel/amd_gart_64.c
··· 744 744 745 745 start_pfn = PFN_DOWN(aper_base); 746 746 if (!pfn_range_is_mapped(start_pfn, end_pfn)) 747 - init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 747 + init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT, 748 + PAGE_KERNEL); 748 749 749 750 pr_info("PCI-DMA: using GART IOMMU.\n"); 750 751 iommu_size = check_iommu_size(info.aper_base, aper_size);
+5 -4
arch/x86/mm/init.c
··· 467 467 * the physical memory. To access them they are temporarily mapped. 468 468 */ 469 469 unsigned long __ref init_memory_mapping(unsigned long start, 470 - unsigned long end) 470 + unsigned long end, pgprot_t prot) 471 471 { 472 472 struct map_range mr[NR_RANGE_MR]; 473 473 unsigned long ret = 0; ··· 481 481 482 482 for (i = 0; i < nr_range; i++) 483 483 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, 484 - mr[i].page_size_mask); 484 + mr[i].page_size_mask, 485 + prot); 485 486 486 487 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); 487 488 ··· 522 521 */ 523 522 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= 524 523 min(end, (u64)pgt_buf_top<<PAGE_SHIFT); 525 - init_memory_mapping(start, end); 524 + init_memory_mapping(start, end, PAGE_KERNEL); 526 525 mapped_ram_size += end - start; 527 526 can_use_brk_pgt = true; 528 527 } ··· 662 661 #endif 663 662 664 663 /* the ISA range is always mapped regardless of memory holes */ 665 - init_memory_mapping(0, ISA_END_ADDRESS); 664 + init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL); 666 665 667 666 /* Init the trampoline, possibly with KASLR memory offset */ 668 667 init_trampoline();
+2 -1
arch/x86/mm/init_32.c
··· 257 257 unsigned long __init 258 258 kernel_physical_mapping_init(unsigned long start, 259 259 unsigned long end, 260 - unsigned long page_size_mask) 260 + unsigned long page_size_mask, 261 + pgprot_t prot) 261 262 { 262 263 int use_pse = page_size_mask == (1<<PG_LEVEL_2M); 263 264 unsigned long last_map_addr = end;
+18 -14
arch/x86/mm/init_64.c
··· 585 585 */ 586 586 static unsigned long __meminit 587 587 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, 588 - unsigned long page_size_mask, bool init) 588 + unsigned long page_size_mask, pgprot_t _prot, bool init) 589 589 { 590 590 unsigned long pages = 0, paddr_next; 591 591 unsigned long paddr_last = paddr_end; ··· 595 595 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { 596 596 pud_t *pud; 597 597 pmd_t *pmd; 598 - pgprot_t prot = PAGE_KERNEL; 598 + pgprot_t prot = _prot; 599 599 600 600 vaddr = (unsigned long)__va(paddr); 601 601 pud = pud_page + pud_index(vaddr); ··· 644 644 if (page_size_mask & (1<<PG_LEVEL_1G)) { 645 645 pages++; 646 646 spin_lock(&init_mm.page_table_lock); 647 + 648 + prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE); 649 + 647 650 set_pte_init((pte_t *)pud, 648 651 pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, 649 - PAGE_KERNEL_LARGE), 652 + prot), 650 653 init); 651 654 spin_unlock(&init_mm.page_table_lock); 652 655 paddr_last = paddr_next; ··· 672 669 673 670 static unsigned long __meminit 674 671 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, 675 - unsigned long page_size_mask, bool init) 672 + unsigned long page_size_mask, pgprot_t prot, bool init) 676 673 { 677 674 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; 678 675 ··· 682 679 683 680 if (!pgtable_l5_enabled()) 684 681 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, 685 - page_size_mask, init); 682 + page_size_mask, prot, init); 686 683 687 684 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 688 685 p4d_t *p4d = p4d_page + p4d_index(vaddr); ··· 705 702 if (!p4d_none(*p4d)) { 706 703 pud = pud_offset(p4d, 0); 707 704 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), 708 - page_size_mask, init); 705 + page_size_mask, prot, init); 709 706 continue; 710 707 } 711 708 712 709 pud = alloc_low_page(); 713 710 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), 714 - page_size_mask, init); 711 + page_size_mask, prot, init); 715 712 716 713 spin_lock(&init_mm.page_table_lock); 717 714 p4d_populate_init(&init_mm, p4d, pud, init); ··· 725 722 __kernel_physical_mapping_init(unsigned long paddr_start, 726 723 unsigned long paddr_end, 727 724 unsigned long page_size_mask, 728 - bool init) 725 + pgprot_t prot, bool init) 729 726 { 730 727 bool pgd_changed = false; 731 728 unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; ··· 746 743 paddr_last = phys_p4d_init(p4d, __pa(vaddr), 747 744 __pa(vaddr_end), 748 745 page_size_mask, 749 - init); 746 + prot, init); 750 747 continue; 751 748 } 752 749 753 750 p4d = alloc_low_page(); 754 751 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), 755 - page_size_mask, init); 752 + page_size_mask, prot, init); 756 753 757 754 spin_lock(&init_mm.page_table_lock); 758 755 if (pgtable_l5_enabled()) ··· 781 778 unsigned long __meminit 782 779 kernel_physical_mapping_init(unsigned long paddr_start, 783 780 unsigned long paddr_end, 784 - unsigned long page_size_mask) 781 + unsigned long page_size_mask, pgprot_t prot) 785 782 { 786 783 return __kernel_physical_mapping_init(paddr_start, paddr_end, 787 - page_size_mask, true); 784 + page_size_mask, prot, true); 788 785 } 789 786 790 787 /* ··· 799 796 unsigned long page_size_mask) 800 797 { 801 798 return __kernel_physical_mapping_init(paddr_start, paddr_end, 802 - page_size_mask, false); 799 + page_size_mask, PAGE_KERNEL, 800 + false); 803 801 } 804 802 805 803 #ifndef CONFIG_NUMA ··· 867 863 unsigned long start_pfn = start >> PAGE_SHIFT; 868 864 unsigned long nr_pages = size >> PAGE_SHIFT; 869 865 870 - init_memory_mapping(start, start + size); 866 + init_memory_mapping(start, start + size, PAGE_KERNEL); 871 867 872 868 return add_pages(nid, start_pfn, nr_pages, params); 873 869 }
+2 -1
arch/x86/mm/mm_internal.h
··· 12 12 13 13 unsigned long kernel_physical_mapping_init(unsigned long start, 14 14 unsigned long end, 15 - unsigned long page_size_mask); 15 + unsigned long page_size_mask, 16 + pgprot_t prot); 16 17 unsigned long kernel_physical_mapping_change(unsigned long start, 17 18 unsigned long end, 18 19 unsigned long page_size_mask);
+2 -1
arch/x86/platform/uv/bios_uv.c
··· 352 352 if (type == EFI_MEMORY_MAPPED_IO) 353 353 return ioremap(phys_addr, size); 354 354 355 - last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size); 355 + last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size, 356 + PAGE_KERNEL); 356 357 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) { 357 358 unsigned long top = last_map_pfn << PAGE_SHIFT; 358 359 efi_ioremap(top, size - (top - phys_addr), type, attribute);