Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: mm: Convert to GENERIC_IOREMAP

Add hook for arm64's special operation when ioremap(), then
ioremap_wc/np/cache is converted to use ioremap_prot() from
GENERIC_IOREMAP, update the Copyright and kill the unused
inclusions.

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/r/20220607125027.44946-6-wangkefeng.wang@huawei.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Kefeng Wang and committed by
Will Deacon
f23eab0b 18e780b4

+28 -89
+1
arch/arm64/Kconfig
··· 126 126 select GENERIC_CPU_VULNERABILITIES 127 127 select GENERIC_EARLY_IOREMAP 128 128 select GENERIC_IDLE_POLL_SETUP 129 + select GENERIC_IOREMAP 129 130 select GENERIC_IRQ_IPI 130 131 select GENERIC_IRQ_PROBE 131 132 select GENERIC_IRQ_SHOW
+18 -6
arch/arm64/include/asm/io.h
··· 163 163 /* 164 164 * I/O memory mapping functions. 165 165 */ 166 - extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); 167 - extern void iounmap(volatile void __iomem *addr); 168 - extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 169 166 170 - #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 171 - #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 172 - #define ioremap_np(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) 167 + bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot); 168 + #define ioremap_allowed ioremap_allowed 169 + 170 + #define _PAGE_IOREMAP PROT_DEVICE_nGnRE 171 + 172 + #define ioremap_wc(addr, size) \ 173 + ioremap_prot((addr), (size), PROT_NORMAL_NC) 174 + #define ioremap_np(addr, size) \ 175 + ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE) 173 176 174 177 /* 175 178 * io{read,write}{16,32,64}be() macros ··· 186 183 #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); }) 187 184 188 185 #include <asm-generic/io.h> 186 + 187 + #define ioremap_cache ioremap_cache 188 + static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size) 189 + { 190 + if (pfn_is_map_memory(__phys_to_pfn(addr))) 191 + return (void __iomem *)__phys_to_virt(addr); 192 + 193 + return ioremap_prot(addr, size, PROT_NORMAL); 194 + } 189 195 190 196 /* 191 197 * More restrictive address range checking than the default implementation
+1 -1
arch/arm64/kernel/acpi.c
··· 351 351 prot = __acpi_get_writethrough_mem_attribute(); 352 352 } 353 353 } 354 - return __ioremap(phys, size, prot); 354 + return ioremap_prot(phys, size, pgprot_val(prot)); 355 355 } 356 356 357 357 /*
+8 -82
arch/arm64/mm/ioremap.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Based on arch/arm/mm/ioremap.c 4 - * 5 - * (C) Copyright 1995 1996 Linus Torvalds 6 - * Hacked for ARM by Phil Blundell <philb@gnu.org> 7 - * Hacked to allow all architectures to build, and various cleanups 8 - * by Russell King 9 - * Copyright (C) 2012 ARM Ltd. 10 - */ 11 2 12 - #include <linux/export.h> 13 3 #include <linux/mm.h> 14 - #include <linux/vmalloc.h> 15 4 #include <linux/io.h> 16 5 17 - #include <asm/fixmap.h> 18 - #include <asm/tlbflush.h> 19 - 20 - static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, 21 - pgprot_t prot, void *caller) 6 + bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot) 22 7 { 23 - unsigned long last_addr; 24 - unsigned long offset = phys_addr & ~PAGE_MASK; 25 - int err; 26 - unsigned long addr; 27 - struct vm_struct *area; 8 + unsigned long last_addr = phys_addr + size - 1; 28 9 29 - /* 30 - * Page align the mapping address and size, taking account of any 31 - * offset. 32 - */ 33 - phys_addr &= PAGE_MASK; 34 - size = PAGE_ALIGN(size + offset); 10 + /* Don't allow outside PHYS_MASK */ 11 + if (last_addr & ~PHYS_MASK) 12 + return false; 35 13 36 - /* 37 - * Don't allow wraparound, zero size or outside PHYS_MASK. 38 - */ 39 - last_addr = phys_addr + size - 1; 40 - if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK)) 41 - return NULL; 42 - 43 - /* 44 - * Don't allow RAM to be mapped. 45 - */ 14 + /* Don't allow RAM to be mapped. */ 46 15 if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr)))) 47 - return NULL; 16 + return false; 48 17 49 - area = get_vm_area_caller(size, VM_IOREMAP, caller); 50 - if (!area) 51 - return NULL; 52 - addr = (unsigned long)area->addr; 53 - area->phys_addr = phys_addr; 54 - 55 - err = ioremap_page_range(addr, addr + size, phys_addr, prot); 56 - if (err) { 57 - vunmap((void *)addr); 58 - return NULL; 59 - } 60 - 61 - return (void __iomem *)(offset + addr); 18 + return true; 62 19 } 63 - 64 - void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) 65 - { 66 - return __ioremap_caller(phys_addr, size, prot, 67 - __builtin_return_address(0)); 68 - } 69 - EXPORT_SYMBOL(__ioremap); 70 - 71 - void iounmap(volatile void __iomem *io_addr) 72 - { 73 - unsigned long addr = (unsigned long)io_addr & PAGE_MASK; 74 - 75 - /* 76 - * We could get an address outside vmalloc range in case 77 - * of ioremap_cache() reusing a RAM mapping. 78 - */ 79 - if (is_vmalloc_addr((void *)addr)) 80 - vunmap((void *)addr); 81 - } 82 - EXPORT_SYMBOL(iounmap); 83 - 84 - void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) 85 - { 86 - /* For normal memory we already have a cacheable mapping. */ 87 - if (pfn_is_map_memory(__phys_to_pfn(phys_addr))) 88 - return (void __iomem *)__phys_to_virt(phys_addr); 89 - 90 - return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), 91 - __builtin_return_address(0)); 92 - } 93 - EXPORT_SYMBOL(ioremap_cache); 94 20 95 21 /* 96 22 * Must be called after early_fixmap_init