Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/ioremap: pass pgprot_t to ioremap_prot() instead of unsigned long

ioremap_prot() currently accepts pgprot_val parameter as an unsigned long,
thus implicitly assuming that pgprot_val and pgprot_t could never be
bigger than unsigned long. But this assumption soon will not be true on
arm64 when using D128 pgtables. In 128 bit page table configuration,
unsigned long is 64 bit, but pgprot_t is 128 bit.

Passing platform abstracted pgprot_t argument is better as compared to
size based data types. Let's change the parameter to directly pass
pgprot_t like another similar helper generic_ioremap_prot().

Without this change in place, D128 configuration does not work on arm64 as
the top 64 bits gets silently stripped when passing the protection value
to this function.

Link: https://lkml.kernel.org/r/20250218101954.415331-1-anshuman.khandual@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Co-developed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Ryan Roberts and committed by
Andrew Morton
86758b50 af3b45aa

+55 -59
+2 -4
arch/arc/mm/ioremap.c
··· 32 32 return (void __iomem *)(u32)paddr; 33 33 34 34 return ioremap_prot(paddr, size, 35 - pgprot_val(pgprot_noncached(PAGE_KERNEL))); 35 + pgprot_noncached(PAGE_KERNEL)); 36 36 } 37 37 EXPORT_SYMBOL(ioremap); 38 38 ··· 44 44 * might need finer access control (R/W/X) 45 45 */ 46 46 void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, 47 - unsigned long flags) 47 + pgprot_t prot) 48 48 { 49 - pgprot_t prot = __pgprot(flags); 50 - 51 49 /* force uncached */ 52 50 return generic_ioremap_prot(paddr, size, pgprot_noncached(prot)); 53 51 }
+3 -3
arch/arm64/include/asm/io.h
··· 270 270 #define _PAGE_IOREMAP PROT_DEVICE_nGnRE 271 271 272 272 #define ioremap_wc(addr, size) \ 273 - ioremap_prot((addr), (size), PROT_NORMAL_NC) 273 + ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC)) 274 274 #define ioremap_np(addr, size) \ 275 - ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE) 275 + ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) 276 276 277 277 /* 278 278 * io{read,write}{16,32,64}be() macros ··· 293 293 if (pfn_is_map_memory(__phys_to_pfn(addr))) 294 294 return (void __iomem *)__phys_to_virt(addr); 295 295 296 - return ioremap_prot(addr, size, PROT_NORMAL); 296 + return ioremap_prot(addr, size, __pgprot(PROT_NORMAL)); 297 297 } 298 298 299 299 /*
+1 -1
arch/arm64/kernel/acpi.c
··· 379 379 prot = __acpi_get_writethrough_mem_attribute(); 380 380 } 381 381 } 382 - return ioremap_prot(phys, size, pgprot_val(prot)); 382 + return ioremap_prot(phys, size, prot); 383 383 } 384 384 385 385 /*
+1 -2
arch/arm64/mm/ioremap.c
··· 15 15 } 16 16 17 17 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 18 - unsigned long prot) 18 + pgprot_t pgprot) 19 19 { 20 20 unsigned long last_addr = phys_addr + size - 1; 21 - pgprot_t pgprot = __pgprot(prot); 22 21 23 22 /* Don't allow outside PHYS_MASK */ 24 23 if (last_addr & ~PHYS_MASK)
+1 -1
arch/csky/include/asm/io.h
··· 36 36 */ 37 37 #define ioremap_wc(addr, size) \ 38 38 ioremap_prot((addr), (size), \ 39 - (_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED) 39 + __pgprot((_PAGE_IOREMAP & ~_CACHE_MASK) | _CACHE_UNCACHED)) 40 40 41 41 #include <asm-generic/io.h> 42 42
+5 -5
arch/loongarch/include/asm/io.h
··· 23 23 #ifdef CONFIG_ARCH_IOREMAP 24 24 25 25 static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, 26 - unsigned long prot_val) 26 + pgprot_t prot) 27 27 { 28 - switch (prot_val & _CACHE_MASK) { 28 + switch (pgprot_val(prot) & _CACHE_MASK) { 29 29 case _CACHE_CC: 30 30 return (void __iomem *)(unsigned long)(CACHE_BASE + offset); 31 31 case _CACHE_SUC: ··· 38 38 } 39 39 40 40 #define ioremap(offset, size) \ 41 - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) 41 + ioremap_prot((offset), (size), PAGE_KERNEL_SUC) 42 42 43 43 #define iounmap(addr) ((void)(addr)) 44 44 ··· 55 55 */ 56 56 #define ioremap_wc(offset, size) \ 57 57 ioremap_prot((offset), (size), \ 58 - pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)) 58 + wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC) 59 59 60 60 #define ioremap_cache(offset, size) \ 61 - ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) 61 + ioremap_prot((offset), (size), PAGE_KERNEL) 62 62 63 63 #define mmiowb() wmb() 64 64
+4 -4
arch/mips/include/asm/io.h
··· 126 126 } 127 127 128 128 void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, 129 - unsigned long prot_val); 129 + pgprot_t prot); 130 130 void iounmap(const volatile void __iomem *addr); 131 131 132 132 /* ··· 141 141 * address. 142 142 */ 143 143 #define ioremap(offset, size) \ 144 - ioremap_prot((offset), (size), _CACHE_UNCACHED) 144 + ioremap_prot((offset), (size), __pgprot(_CACHE_UNCACHED)) 145 145 146 146 /* 147 147 * ioremap_cache - map bus memory into CPU space ··· 159 159 * memory-like regions on I/O busses. 160 160 */ 161 161 #define ioremap_cache(offset, size) \ 162 - ioremap_prot((offset), (size), _page_cachable_default) 162 + ioremap_prot((offset), (size), __pgprot(_page_cachable_default)) 163 163 164 164 /* 165 165 * ioremap_wc - map bus memory into CPU space ··· 180 180 * _CACHE_UNCACHED option (see cpu_probe() method). 181 181 */ 182 182 #define ioremap_wc(offset, size) \ 183 - ioremap_prot((offset), (size), boot_cpu_data.writecombine) 183 + ioremap_prot((offset), (size), __pgprot(boot_cpu_data.writecombine)) 184 184 185 185 #if defined(CONFIG_CPU_CAVIUM_OCTEON) 186 186 #define war_io_reorder_wmb() wmb()
+2 -2
arch/mips/mm/ioremap.c
··· 44 44 * ioremap_prot gives the caller control over cache coherency attributes (CCA) 45 45 */ 46 46 void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, 47 - unsigned long prot_val) 47 + pgprot_t prot) 48 48 { 49 - unsigned long flags = prot_val & _CACHE_MASK; 49 + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; 50 50 unsigned long offset, pfn, last_pfn; 51 51 struct vm_struct *area; 52 52 phys_addr_t last_addr;
+2 -2
arch/mips/mm/ioremap64.c
··· 3 3 #include <ioremap.h> 4 4 5 5 void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, 6 - unsigned long prot_val) 6 + pgprot_t prot) 7 7 { 8 - unsigned long flags = prot_val & _CACHE_MASK; 8 + unsigned long flags = pgprot_val(prot) & _CACHE_MASK; 9 9 u64 base = (flags == _CACHE_UNCACHED ? IO_BASE : UNCAC_BASE); 10 10 void __iomem *addr; 11 11
+1 -1
arch/parisc/include/asm/io.h
··· 131 131 _PAGE_ACCESSED | _PAGE_NO_CACHE) 132 132 133 133 #define ioremap_wc(addr, size) \ 134 - ioremap_prot((addr), (size), _PAGE_IOREMAP) 134 + ioremap_prot((addr), (size), __pgprot(_PAGE_IOREMAP)) 135 135 136 136 #define pci_iounmap pci_iounmap 137 137
+2 -2
arch/parisc/mm/ioremap.c
··· 14 14 #include <linux/mm.h> 15 15 16 16 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 17 - unsigned long prot) 17 + pgprot_t prot) 18 18 { 19 19 #ifdef CONFIG_EISA 20 20 unsigned long end = phys_addr + size - 1; ··· 41 41 } 42 42 } 43 43 44 - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); 44 + return generic_ioremap_prot(phys_addr, size, prot); 45 45 } 46 46 EXPORT_SYMBOL(ioremap_prot);
+1 -1
arch/powerpc/include/asm/io.h
··· 895 895 896 896 void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); 897 897 #define ioremap_cache(addr, size) \ 898 - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) 898 + ioremap_prot((addr), (size), PAGE_KERNEL) 899 899 900 900 #define iounmap iounmap 901 901
+2 -2
arch/powerpc/mm/ioremap.c
··· 41 41 return __ioremap_caller(addr, size, prot, caller); 42 42 } 43 43 44 - void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) 44 + void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot) 45 45 { 46 - pte_t pte = __pte(flags); 46 + pte_t pte = __pte(pgprot_val(prot)); 47 47 void *caller = __builtin_return_address(0); 48 48 49 49 /* writeable implies dirty for kernel addresses */
+2 -2
arch/powerpc/platforms/ps3/spu.c
··· 190 190 static int __init setup_areas(struct spu *spu) 191 191 { 192 192 struct table {char* name; unsigned long addr; unsigned long size;}; 193 - unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO)); 194 193 195 194 spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, 196 - sizeof(struct spe_shadow), shadow_flags); 195 + sizeof(struct spe_shadow), 196 + pgprot_noncached_wc(PAGE_KERNEL_RO)); 197 197 if (!spu_pdata(spu)->shadow) { 198 198 pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); 199 199 goto fail_ioremap;
+1 -1
arch/riscv/include/asm/io.h
··· 137 137 138 138 #ifdef CONFIG_MMU 139 139 #define arch_memremap_wb(addr, size) \ 140 - ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) 140 + ((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL))) 141 141 #endif 142 142 143 143 #endif /* _ASM_RISCV_IO_H */
+1 -1
arch/riscv/kernel/acpi.c
··· 305 305 } 306 306 } 307 307 308 - return ioremap_prot(phys, size, pgprot_val(prot)); 308 + return ioremap_prot(phys, size, prot); 309 309 } 310 310 311 311 #ifdef CONFIG_PCI
+2 -2
arch/s390/include/asm/io.h
··· 33 33 #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL) 34 34 35 35 #define ioremap_wc(addr, size) \ 36 - ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL))) 36 + ioremap_prot((addr), (size), pgprot_writecombine(PAGE_KERNEL)) 37 37 #define ioremap_wt(addr, size) \ 38 - ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL))) 38 + ioremap_prot((addr), (size), pgprot_writethrough(PAGE_KERNEL)) 39 39 40 40 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 41 41 {
+2 -2
arch/s390/pci/pci.c
··· 255 255 } 256 256 257 257 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 258 - unsigned long prot) 258 + pgprot_t prot) 259 259 { 260 260 /* 261 261 * When PCI MIO instructions are unavailable the "physical" address ··· 265 265 if (!static_branch_unlikely(&have_mio)) 266 266 return (void __iomem *)phys_addr; 267 267 268 - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); 268 + return generic_ioremap_prot(phys_addr, size, prot); 269 269 } 270 270 EXPORT_SYMBOL(ioremap_prot); 271 271
+1 -1
arch/sh/boards/mach-landisk/setup.c
··· 58 58 /* open I/O area window */ 59 59 paddrbase = virt_to_phys((void *)PA_AREA5_IO); 60 60 prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); 61 - cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot)); 61 + cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot); 62 62 if (!cf_ide_base) { 63 63 printk("allocate_cf_area : can't open CF I/O window!\n"); 64 64 return -ENOMEM;
+1 -1
arch/sh/boards/mach-lboxre2/setup.c
··· 53 53 paddrbase = virt_to_phys((void*)PA_AREA5_IO); 54 54 psize = PAGE_SIZE; 55 55 prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); 56 - cf0_io_base = (u32)ioremap_prot(paddrbase, psize, pgprot_val(prot)); 56 + cf0_io_base = (u32)ioremap_prot(paddrbase, psize, prot); 57 57 if (!cf0_io_base) { 58 58 printk(KERN_ERR "%s : can't open CF I/O window!\n" , __func__ ); 59 59 return -ENOMEM;
+1 -1
arch/sh/boards/mach-sh03/setup.c
··· 75 75 /* open I/O area window */ 76 76 paddrbase = virt_to_phys((void *)PA_AREA5_IO); 77 77 prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16); 78 - cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot)); 78 + cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, prot); 79 79 if (!cf_ide_base) { 80 80 printk("allocate_cf_area : can't open CF I/O window!\n"); 81 81 return -ENOMEM;
+1 -1
arch/sh/include/asm/io.h
··· 299 299 #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) 300 300 301 301 #define ioremap_cache(addr, size) \ 302 - ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) 302 + ioremap_prot((addr), (size), PAGE_KERNEL) 303 303 #endif /* CONFIG_MMU */ 304 304 305 305 #include <asm-generic/io.h>
+1 -2
arch/sh/mm/ioremap.c
··· 73 73 #endif /* CONFIG_29BIT */ 74 74 75 75 void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size, 76 - unsigned long prot) 76 + pgprot_t pgprot) 77 77 { 78 78 void __iomem *mapped; 79 - pgprot_t pgprot = __pgprot(prot); 80 79 81 80 mapped = __ioremap_trapped(phys_addr, size); 82 81 if (mapped)
+1 -1
arch/x86/include/asm/io.h
··· 170 170 #define ioremap_uc ioremap_uc 171 171 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); 172 172 #define ioremap_cache ioremap_cache 173 - extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); 173 + extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, pgprot_t prot); 174 174 #define ioremap_prot ioremap_prot 175 175 extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size); 176 176 #define ioremap_encrypted ioremap_encrypted
+2 -2
arch/x86/mm/ioremap.c
··· 440 440 EXPORT_SYMBOL(ioremap_cache); 441 441 442 442 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, 443 - unsigned long prot_val) 443 + pgprot_t prot) 444 444 { 445 445 return __ioremap_caller(phys_addr, size, 446 - pgprot2cachemode(__pgprot(prot_val)), 446 + pgprot2cachemode(prot), 447 447 __builtin_return_address(0), false); 448 448 } 449 449 EXPORT_SYMBOL(ioremap_prot);
+3 -3
arch/xtensa/include/asm/io.h
··· 29 29 * I/O memory mapping functions. 30 30 */ 31 31 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 32 - unsigned long prot); 32 + pgprot_t prot); 33 33 #define ioremap_prot ioremap_prot 34 34 #define iounmap iounmap 35 35 ··· 40 40 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); 41 41 else 42 42 return ioremap_prot(offset, size, 43 - pgprot_val(pgprot_noncached(PAGE_KERNEL))); 43 + pgprot_noncached(PAGE_KERNEL)); 44 44 } 45 45 #define ioremap ioremap 46 46 ··· 51 51 && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) 52 52 return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); 53 53 else 54 - return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL)); 54 + return ioremap_prot(offset, size, PAGE_KERNEL); 55 55 56 56 } 57 57 #define ioremap_cache ioremap_cache
+2 -2
arch/xtensa/mm/ioremap.c
··· 11 11 #include <asm/io.h> 12 12 13 13 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 14 - unsigned long prot) 14 + pgprot_t prot) 15 15 { 16 16 unsigned long pfn = __phys_to_pfn((phys_addr)); 17 17 WARN_ON(pfn_valid(pfn)); 18 18 19 - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); 19 + return generic_ioremap_prot(phys_addr, size, prot); 20 20 } 21 21 EXPORT_SYMBOL(ioremap_prot); 22 22
+2 -2
include/asm-generic/io.h
··· 1111 1111 pgprot_t prot); 1112 1112 1113 1113 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 1114 - unsigned long prot); 1114 + pgprot_t prot); 1115 1115 void iounmap(volatile void __iomem *addr); 1116 1116 void generic_iounmap(volatile void __iomem *addr); 1117 1117 ··· 1120 1120 static inline void __iomem *ioremap(phys_addr_t addr, size_t size) 1121 1121 { 1122 1122 /* _PAGE_IOREMAP needs to be supplied by the architecture */ 1123 - return ioremap_prot(addr, size, _PAGE_IOREMAP); 1123 + return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP)); 1124 1124 } 1125 1125 #endif 1126 1126 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
+2 -2
mm/ioremap.c
··· 50 50 51 51 #ifndef ioremap_prot 52 52 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 53 - unsigned long prot) 53 + pgprot_t prot) 54 54 { 55 - return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); 55 + return generic_ioremap_prot(phys_addr, size, prot); 56 56 } 57 57 EXPORT_SYMBOL(ioremap_prot); 58 58 #endif
+3 -3
mm/memory.c
··· 6727 6727 void *buf, int len, int write) 6728 6728 { 6729 6729 resource_size_t phys_addr; 6730 - unsigned long prot = 0; 6730 + pgprot_t prot = __pgprot(0); 6731 6731 void __iomem *maddr; 6732 6732 int offset = offset_in_page(addr); 6733 6733 int ret = -EINVAL; ··· 6737 6737 retry: 6738 6738 if (follow_pfnmap_start(&args)) 6739 6739 return -EINVAL; 6740 - prot = pgprot_val(args.pgprot); 6740 + prot = args.pgprot; 6741 6741 phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT; 6742 6742 writable = args.writable; 6743 6743 follow_pfnmap_end(&args); ··· 6752 6752 if (follow_pfnmap_start(&args)) 6753 6753 goto out_unmap; 6754 6754 6755 - if ((prot != pgprot_val(args.pgprot)) || 6755 + if ((pgprot_val(prot) != pgprot_val(args.pgprot)) || 6756 6756 (phys_addr != (args.pfn << PAGE_SHIFT)) || 6757 6757 (writable != args.writable)) { 6758 6758 follow_pfnmap_end(&args);