Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm-memremap-for-v4.7' of git://git.linaro.org/people/ard.biesheuvel/linux-arm into devel-stable

This series wires up the generic memremap() function for ARM in a way
that allows it to be used as intended, i.e., without regard for whether
the region being mapped is covered by a struct page and/or the linear
mapping (lowmem)

+47 -7
+12
arch/arm/include/asm/io.h
··· 392 392 #define ioremap ioremap 393 393 #define ioremap_nocache ioremap 394 394 395 + /* 396 + * Do not use ioremap_cache for mapping memory. Use memremap instead. 397 + */ 395 398 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); 396 399 #define ioremap_cache ioremap_cache 400 + 401 + /* 402 + * Do not use ioremap_cached in new code. Provided for the benefit of 403 + * the pxa2xx-flash MTD driver only. 404 + */ 405 + void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size); 397 406 398 407 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); 399 408 #define ioremap_wc ioremap_wc ··· 410 401 411 402 void iounmap(volatile void __iomem *iomem_cookie); 412 403 #define iounmap iounmap 404 + 405 + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size); 406 + #define arch_memremap_wb arch_memremap_wb 413 407 414 408 /* 415 409 * io{read,write}{16,32}be() macros
+14 -2
arch/arm/mm/ioremap.c
··· 297 297 } 298 298 299 299 /* 300 - * Don't allow RAM to be mapped - this causes problems with ARMv6+ 300 + * Don't allow RAM to be mapped with mismatched attributes - this 301 + * causes problems with ARMv6+ 301 302 */ 302 - if (WARN_ON(pfn_valid(pfn))) 303 + if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) 303 304 return NULL; 304 305 305 306 area = get_vm_area_caller(size, VM_IOREMAP, caller); ··· 381 380 EXPORT_SYMBOL(ioremap); 382 381 383 382 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 383 + __alias(ioremap_cached); 384 + 385 + void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) 384 386 { 385 387 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 386 388 __builtin_return_address(0)); 387 389 } 388 390 EXPORT_SYMBOL(ioremap_cache); 391 + EXPORT_SYMBOL(ioremap_cached); 389 392 390 393 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 391 394 { ··· 417 412 418 413 return __arm_ioremap_caller(phys_addr, size, mtype, 419 414 __builtin_return_address(0)); 415 + } 416 + 417 + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) 418 + { 419 + return (__force void *)arch_ioremap_caller(phys_addr, size, 420 + MT_MEMORY_RW, 421 + __builtin_return_address(0)); 420 422 } 421 423 422 424 void __iounmap(volatile void __iomem *io_addr)
+9
arch/arm/mm/nommu.c
··· 367 367 EXPORT_SYMBOL(ioremap); 368 368 369 369 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 370 + __alias(ioremap_cached); 371 + 372 + void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) 370 373 { 371 374 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 372 375 __builtin_return_address(0)); 373 376 } 374 377 EXPORT_SYMBOL(ioremap_cache); 378 + EXPORT_SYMBOL(ioremap_cached); 375 379 376 380 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 377 381 { ··· 383 379 __builtin_return_address(0)); 384 380 } 385 381 EXPORT_SYMBOL(ioremap_wc); 382 + 383 + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) 384 + { 385 + return (void *)phys_addr; 386 + } 386 387 387 388 void __iounmap(volatile void __iomem *addr) 388 389 {
+3 -3
drivers/mtd/maps/pxa2xx-flash.c
··· 71 71 info->map.name); 72 72 return -ENOMEM; 73 73 } 74 - info->map.cached = memremap(info->map.phys, info->map.size, 75 - MEMREMAP_WB); 74 + info->map.cached = 75 + ioremap_cached(info->map.phys, info->map.size); 76 76 if (!info->map.cached) 77 77 printk(KERN_WARNING "Failed to ioremap cached %s\n", 78 78 info->map.name); ··· 111 111 map_destroy(info->mtd); 112 112 iounmap(info->map.virt); 113 113 if (info->map.cached) 114 - memunmap(info->map.cached); 114 + iounmap(info->map.cached); 115 115 kfree(info); 116 116 return 0; 117 117 }
+9 -2
kernel/memremap.c
··· 27 27 } 28 28 #endif 29 29 30 + #ifndef arch_memremap_wb 31 + static void *arch_memremap_wb(resource_size_t offset, unsigned long size) 32 + { 33 + return (__force void *)ioremap_cache(offset, size); 34 + } 35 + #endif 36 + 30 37 static void *try_ram_remap(resource_size_t offset, size_t size) 31 38 { 32 39 unsigned long pfn = PHYS_PFN(offset); ··· 41 34 /* In the simple case just return the existing linear address */ 42 35 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn))) 43 36 return __va(offset); 44 - return NULL; /* fallback to ioremap_cache */ 37 + return NULL; /* fallback to arch_memremap_wb */ 45 38 } 46 39 47 40 /** ··· 97 90 if (is_ram == REGION_INTERSECTS) 98 91 addr = try_ram_remap(offset, size); 99 92 if (!addr) 100 - addr = ioremap_cache(offset, size); 93 + addr = arch_memremap_wb(offset, size); 101 94 } 102 95 103 96 /*