Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] mm 10: allow memory type to be specified with ioremap

__ioremap() took a set of page table flags (specifically the cacheable
and bufferable bits) to control the mapping type. However, with
the advent of ARMv6, this is far too limited.

Replace the page table flags with a memory type index, so that the
desired attributes can be selected from the mem_type table.

Finally, to prevent silent miscompilation due to the differing
arguments, rename the __ioremap() and __ioremap_pfn() functions.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Russell King and committed by
Russell King
3603ab2b 0af92bef

+54 -48
+5 -5
arch/arm/mach-iop13xx/io.c
··· 41 41 EXPORT_SYMBOL(__iop13xx_io); 42 42 43 43 void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, 44 - unsigned long flags) 44 + unsigned int mtype) 45 45 { 46 46 void __iomem * retval; 47 47 ··· 61 61 (cookie - IOP13XX_PCIE_LOWER_MEM_RA)); 62 62 break; 63 63 case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA: 64 - retval = __ioremap(IOP13XX_PBI_LOWER_MEM_PA + 65 - (cookie - IOP13XX_PBI_LOWER_MEM_RA), 66 - size, flags); 64 + retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA + 65 + (cookie - IOP13XX_PBI_LOWER_MEM_RA), 66 + size, mtype); 67 67 break; 68 68 case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA: 69 69 retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie); ··· 75 75 retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie); 76 76 break; 77 77 default: 78 - retval = __ioremap(cookie, size, flags); 78 + retval = __arm_ioremap(cookie, size, mtype); 79 79 } 80 80 81 81 return retval;
+4 -4
arch/arm/mach-iop13xx/pci.c
··· 88 88 89 89 if (end) { 90 90 iop13xx_atux_mem_base = 91 - (u32) __ioremap_pfn( 91 + (u32) __arm_ioremap_pfn( 92 92 __phys_to_pfn(IOP13XX_PCIX_LOWER_MEM_PA) 93 - , 0, iop13xx_atux_mem_size, 0); 93 + , 0, iop13xx_atux_mem_size, MT_DEVICE); 94 94 if (!iop13xx_atux_mem_base) { 95 95 printk("%s: atux allocation " 96 96 "failed\n", __FUNCTION__); ··· 114 114 115 115 if (end) { 116 116 iop13xx_atue_mem_base = 117 - (u32) __ioremap_pfn( 117 + (u32) __arm_ioremap_pfn( 118 118 __phys_to_pfn(IOP13XX_PCIE_LOWER_MEM_PA) 119 - , 0, iop13xx_atue_mem_size, 0); 119 + , 0, iop13xx_atue_mem_size, MT_DEVICE); 120 120 if (!iop13xx_atue_mem_base) { 121 121 printk("%s: atue allocation " 122 122 "failed\n", __FUNCTION__);
+9 -11
arch/arm/mm/ioremap.c
··· 262 262 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. 263 263 */ 264 264 void __iomem * 265 - __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 266 - unsigned long flags) 265 + __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 266 + unsigned int mtype) 267 267 { 268 268 const struct mem_type *type; 269 - struct mem_type t; 270 269 int err; 271 270 unsigned long addr; 272 271 struct vm_struct * area; ··· 276 277 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 277 278 return NULL; 278 279 279 - t = *get_mem_type(MT_DEVICE); 280 - t.prot_sect |= flags; 281 - t.prot_pte |= flags; 282 - type = &t; 280 + type = get_mem_type(mtype); 281 + if (!type) 282 + return NULL; 283 283 284 284 size = PAGE_ALIGN(size); 285 285 ··· 309 311 flush_cache_vmap(addr, addr + size); 310 312 return (void __iomem *) (offset + addr); 311 313 } 312 - EXPORT_SYMBOL(__ioremap_pfn); 314 + EXPORT_SYMBOL(__arm_ioremap_pfn); 313 315 314 316 void __iomem * 315 - __ioremap(unsigned long phys_addr, size_t size, unsigned long flags) 317 + __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 316 318 { 317 319 unsigned long last_addr; 318 320 unsigned long offset = phys_addr & ~PAGE_MASK; ··· 330 332 */ 331 333 size = PAGE_ALIGN(last_addr + 1) - phys_addr; 332 334 333 - return __ioremap_pfn(pfn, offset, size, flags); 335 + return __arm_ioremap_pfn(pfn, offset, size, mtype); 334 336 } 335 - EXPORT_SYMBOL(__ioremap); 337 + EXPORT_SYMBOL(__arm_ioremap); 336 338 337 339 void __iounmap(volatile void __iomem *addr) 338 340 {
+6 -6
arch/arm/mm/nommu.c
··· 62 62 } 63 63 EXPORT_SYMBOL(flush_dcache_page); 64 64 65 - void __iomem *__ioremap_pfn(unsigned long pfn, unsigned long offset, 66 - size_t size, unsigned long flags) 65 + void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, 66 + size_t size, unsigned int mtype) 67 67 { 68 68 if (pfn >= (0x100000000ULL >> PAGE_SHIFT)) 69 69 return NULL; 70 70 return (void __iomem *) (offset + (pfn << PAGE_SHIFT)); 71 71 } 72 - EXPORT_SYMBOL(__ioremap_pfn); 72 + EXPORT_SYMBOL(__arm_ioremap_pfn); 73 73 74 - void __iomem *__ioremap(unsigned long phys_addr, size_t size, 75 - unsigned long flags) 74 + void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, 75 + unsigned int mtype) 76 76 { 77 77 return (void __iomem *)phys_addr; 78 78 } 79 - EXPORT_SYMBOL(__ioremap); 79 + EXPORT_SYMBOL(__arm_ioremap); 80 80 81 81 void __iounmap(volatile void __iomem *addr) 82 82 {
+2 -2
arch/arm/plat-iop/io.c
··· 22 22 #include <asm/io.h> 23 23 24 24 void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size, 25 - unsigned long flags) 25 + unsigned int mtype) 26 26 { 27 27 void __iomem * retval; 28 28 ··· 34 34 retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie); 35 35 break; 36 36 default: 37 - retval = __ioremap(cookie, size, flags); 37 + retval = __arm_ioremap(cookie, size, mtype); 38 38 } 39 39 40 40 return retval;
+2 -2
include/asm-arm/arch-ixp23xx/io.h
··· 23 23 #include <linux/kernel.h> /* For BUG */ 24 24 25 25 static inline void __iomem * 26 - ixp23xx_ioremap(unsigned long addr, unsigned long size, unsigned long flags) 26 + ixp23xx_ioremap(unsigned long addr, unsigned long size, unsigned int mtype) 27 27 { 28 28 if (addr >= IXP23XX_PCI_MEM_START && 29 29 addr <= IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE) { ··· 34 34 ((addr - IXP23XX_PCI_MEM_START) + IXP23XX_PCI_MEM_VIRT); 35 35 } 36 36 37 - return __ioremap(addr, size, flags); 37 + return __arm_ioremap(addr, size, mtype); 38 38 } 39 39 40 40 static inline void
+2 -2
include/asm-arm/arch-ixp4xx/io.h
··· 59 59 * fallback to the default. 60 60 */ 61 61 static inline void __iomem * 62 - __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags) 62 + __ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype) 63 63 { 64 64 if((addr < 0x48000000) || (addr > 0x4fffffff)) 65 - return __ioremap(addr, size, flags); 65 + return __arm_ioremap(addr, size, mtype); 66 66 67 67 return (void *)addr; 68 68 }
+21 -12
include/asm-arm/io.h
··· 56 56 57 57 /* 58 58 * Architecture ioremap implementation. 59 - * 60 - * __ioremap takes CPU physical address. 61 - * 62 - * __ioremap_pfn takes a Page Frame Number and an offset into that page 63 59 */ 64 - extern void __iomem * __ioremap_pfn(unsigned long, unsigned long, size_t, unsigned long); 65 - extern void __iomem * __ioremap(unsigned long, size_t, unsigned long); 60 + #define MT_DEVICE 0 61 + #define MT_DEVICE_NONSHARED 1 62 + #define MT_DEVICE_CACHED 2 63 + #define MT_DEVICE_IXP2000 3 64 + /* 65 + * types 4 onwards can be found in asm/mach/map.h and are undefined 66 + * for ioremap 67 + */ 68 + 69 + /* 70 + * __arm_ioremap takes CPU physical address. 71 + * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page 72 + */ 73 + extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 74 + extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); 66 75 extern void __iounmap(volatile void __iomem *addr); 67 76 68 77 /* ··· 212 203 * 213 204 */ 214 205 #ifndef __arch_ioremap 215 - #define ioremap(cookie,size) __ioremap(cookie,size,0) 216 - #define ioremap_nocache(cookie,size) __ioremap(cookie,size,0) 217 - #define ioremap_cached(cookie,size) __ioremap(cookie,size,L_PTE_CACHEABLE) 206 + #define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 207 + #define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE) 208 + #define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED) 218 209 #define iounmap(cookie) __iounmap(cookie) 219 210 #else 220 - #define ioremap(cookie,size) __arch_ioremap((cookie),(size),0) 221 - #define ioremap_nocache(cookie,size) __arch_ioremap((cookie),(size),0) 222 - #define ioremap_cached(cookie,size) __arch_ioremap((cookie),(size),L_PTE_CACHEABLE) 211 + #define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 212 + #define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE) 213 + #define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED) 223 214 #define iounmap(cookie) __arch_iounmap(cookie) 224 215 #endif 225 216
+3 -4
include/asm-arm/mach/map.h
··· 9 9 * 10 10 * Page table mapping constructs and function prototypes 11 11 */ 12 + #include <asm/io.h> 13 + 12 14 struct map_desc { 13 15 unsigned long virtual; 14 16 unsigned long pfn; ··· 18 16 unsigned int type; 19 17 }; 20 18 21 - #define MT_DEVICE 0 22 - #define MT_DEVICE_NONSHARED 1 23 - #define MT_DEVICE_CACHED 2 24 - #define MT_DEVICE_IXP2000 3 19 + /* types 0-3 are defined in asm/io.h */ 25 20 #define MT_CACHECLEAN 4 26 21 #define MT_MINICLEAN 5 27 22 #define MT_LOW_VECTORS 6