Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: mm: convert to GENERIC_IOREMAP

By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(),
generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap()
and iounmap() are all visible and available to arch. Arch needs to
provide wrapper functions to override the generic versions if there's
arch specific handling in its ioremap_prot(), ioremap() or iounmap().
This change will simplify implementation by removing duplicated code
with generic_ioremap_prot() and generic_iounmap(), and has the equivalent
functioality as before.

Here, add wrapper functions ioremap_prot() and iounmap() for powerpc's
special operation when ioremap() and iounmap().

Link: https://lkml.kernel.org/r/20230706154520.11257-18-bhe@redhat.com
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Christophe Leroy and committed by
Andrew Morton
8d05554d 016fec91

+16 -50
+1
arch/powerpc/Kconfig
··· 193 193 select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC 194 194 select GENERIC_EARLY_IOREMAP 195 195 select GENERIC_GETTIMEOFDAY 196 + select GENERIC_IOREMAP 196 197 select GENERIC_IRQ_SHOW 197 198 select GENERIC_IRQ_SHOW_LEVEL 198 199 select GENERIC_PCI_IOMAP if PCI
+3 -5
arch/powerpc/include/asm/io.h
··· 889 889 * 890 890 */ 891 891 extern void __iomem *ioremap(phys_addr_t address, unsigned long size); 892 - extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, 893 - unsigned long flags); 892 + #define ioremap ioremap 893 + #define ioremap_prot ioremap_prot 894 894 extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); 895 895 #define ioremap_wc ioremap_wc 896 896 ··· 904 904 #define ioremap_cache(addr, size) \ 905 905 ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) 906 906 907 - extern void iounmap(volatile void __iomem *addr); 907 + #define iounmap iounmap 908 908 909 909 void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size); 910 910 911 911 int early_ioremap_range(unsigned long ea, phys_addr_t pa, 912 912 unsigned long size, pgprot_t prot); 913 - void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, 914 - pgprot_t prot, void *caller); 915 913 916 914 extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, 917 915 pgprot_t prot, void *caller);
+1 -25
arch/powerpc/mm/ioremap.c
··· 41 41 return __ioremap_caller(addr, size, prot, caller); 42 42 } 43 43 44 - void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) 44 + void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) 45 45 { 46 46 pte_t pte = __pte(flags); 47 47 void *caller = __builtin_return_address(0); ··· 73 73 } 74 74 75 75 return 0; 76 - } 77 - 78 - void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, 79 - pgprot_t prot, void *caller) 80 - { 81 - struct vm_struct *area; 82 - int ret; 83 - unsigned long va; 84 - 85 - area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller); 86 - if (area == NULL) 87 - return NULL; 88 - 89 - area->phys_addr = pa; 90 - va = (unsigned long)area->addr; 91 - 92 - ret = ioremap_page_range(va, va + size, pa, prot); 93 - if (!ret) 94 - return (void __iomem *)area->addr + offset; 95 - 96 - vunmap_range(va, va + size); 97 - free_vm_area(area); 98 - 99 - return NULL; 100 76 }
+9 -10
arch/powerpc/mm/ioremap_32.c
··· 22 22 int err; 23 23 24 24 /* 25 + * If the address lies within the first 16 MB, assume it's in ISA 26 + * memory space 27 + */ 28 + if (addr < SZ_16M) 29 + addr += _ISA_MEM_BASE; 30 + 31 + /* 25 32 * Choose an address to map it to. 26 33 * Once the vmalloc system is running, we use it. 27 34 * Before then, we use space going down from IOREMAP_TOP ··· 37 30 p = addr & PAGE_MASK; 38 31 offset = addr & ~PAGE_MASK; 39 32 size = PAGE_ALIGN(addr + size) - p; 40 - 41 - /* 42 - * If the address lies within the first 16 MB, assume it's in ISA 43 - * memory space 44 - */ 45 - if (p < 16 * 1024 * 1024) 46 - p += _ISA_MEM_BASE; 47 33 48 34 #ifndef CONFIG_CRASH_DUMP 49 35 /* ··· 63 63 return (void __iomem *)v + offset; 64 64 65 65 if (slab_is_available()) 66 - return do_ioremap(p, offset, size, prot, caller); 66 + return generic_ioremap_prot(addr, size, prot); 67 67 68 68 /* 69 69 * Should check if it is a candidate for a BAT mapping ··· 87 87 if (v_block_mapped((unsigned long)addr)) 88 88 return; 89 89 90 - if (addr > high_memory && (unsigned long)addr < ioremap_bot) 91 - vunmap((void *)(PAGE_MASK & (unsigned long)addr)); 90 + generic_iounmap(addr); 92 91 } 93 92 EXPORT_SYMBOL(iounmap);
+2 -10
arch/powerpc/mm/ioremap_64.c
··· 29 29 return NULL; 30 30 31 31 if (slab_is_available()) 32 - return do_ioremap(paligned, offset, size, prot, caller); 32 + return generic_ioremap_prot(addr, size, prot); 33 33 34 34 pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller); 35 35 ··· 49 49 */ 50 50 void iounmap(volatile void __iomem *token) 51 51 { 52 - void *addr; 53 - 54 52 if (!slab_is_available()) 55 53 return; 56 54 57 - addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK); 58 - 59 - if ((unsigned long)addr < ioremap_bot) { 60 - pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr); 61 - return; 62 - } 63 - vunmap(addr); 55 + generic_iounmap(PCI_FIX_ADDR(token)); 64 56 } 65 57 EXPORT_SYMBOL(iounmap);