Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Wire up /proc/vmallocinfo to our ioremap()

This adds the necessary bits and pieces to powerpc implementation of
ioremap to benefit from caller tracking in /proc/vmallocinfo, at least
for ioremap's done after mem init as the older ones aren't tracked.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+38 -15
+6
arch/powerpc/include/asm/io.h
··· 632 632 * ioremap_flags and cannot be hooked (but can be used by a hook on one 633 633 * of the previous ones) 634 634 * 635 + * * __ioremap_caller is the same as above but takes an explicit caller 636 + * reference rather than using __builtin_return_address(0) 637 + * 635 638 * * __iounmap, is the low level implementation used by iounmap and cannot 636 639 * be hooked (but can be used by a hook on iounmap) 637 640 * ··· 649 646 650 647 extern void __iomem *__ioremap(phys_addr_t, unsigned long size, 651 648 unsigned long flags); 649 + extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size, 650 + unsigned long flags, void *caller); 651 + 652 652 extern void __iounmap(volatile void __iomem *addr); 653 653 654 654 extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
+1 -1
arch/powerpc/include/asm/machdep.h
··· 90 90 void (*tce_flush)(struct iommu_table *tbl); 91 91 92 92 void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, 93 - unsigned long flags); 93 + unsigned long flags, void *caller); 94 94 void (*iounmap)(volatile void __iomem *token); 95 95 96 96 #ifdef CONFIG_PM
+11 -3
arch/powerpc/mm/pgtable_32.c
··· 129 129 void __iomem * 130 130 ioremap(phys_addr_t addr, unsigned long size) 131 131 { 132 - return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); 132 + return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED, 133 + __builtin_return_address(0)); 133 134 } 134 135 EXPORT_SYMBOL(ioremap); 135 136 ··· 144 143 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ 145 144 flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC); 146 145 147 - return __ioremap(addr, size, flags); 146 + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 148 147 } 149 148 EXPORT_SYMBOL(ioremap_flags); 150 149 151 150 void __iomem * 152 151 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) 152 + { 153 + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 154 + } 155 + 156 + void __iomem * 157 + __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, 158 + void *caller) 153 159 { 154 160 unsigned long v, i; 155 161 phys_addr_t p; ··· 220 212 221 213 if (mem_init_done) { 222 214 struct vm_struct *area; 223 - area = get_vm_area(size, VM_IOREMAP); 215 + area = get_vm_area_caller(size, VM_IOREMAP, caller); 224 216 if (area == 0) 225 217 return NULL; 226 218 v = (unsigned long) area->addr;
+17 -8
arch/powerpc/mm/pgtable_64.c
··· 144 144 unmap_kernel_range((unsigned long)ea, size); 145 145 } 146 146 147 - void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 148 - unsigned long flags) 147 + void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, 148 + unsigned long flags, void *caller) 149 149 { 150 150 phys_addr_t paligned; 151 151 void __iomem *ret; ··· 168 168 if (mem_init_done) { 169 169 struct vm_struct *area; 170 170 171 - area = __get_vm_area(size, VM_IOREMAP, 172 - ioremap_bot, IOREMAP_END); 171 + area = __get_vm_area_caller(size, VM_IOREMAP, 172 + ioremap_bot, IOREMAP_END, 173 + caller); 173 174 if (area == NULL) 174 175 return NULL; 175 176 ret = __ioremap_at(paligned, area->addr, size, flags); ··· 187 186 return ret; 188 187 } 189 188 189 + void __iomem * __ioremap(phys_addr_t addr, unsigned long size, 190 + unsigned long flags) 191 + { 192 + return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 193 + } 190 194 191 195 void __iomem * ioremap(phys_addr_t addr, unsigned long size) 192 196 { 193 197 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED; 198 + void *caller = __builtin_return_address(0); 194 199 195 200 if (ppc_md.ioremap) 196 - return ppc_md.ioremap(addr, size, flags); 197 - return __ioremap(addr, size, flags); 201 + return ppc_md.ioremap(addr, size, flags, caller); 202 + return __ioremap_caller(addr, size, flags, caller); 198 203 } 199 204 200 205 void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, 201 206 unsigned long flags) 202 207 { 208 + void *caller = __builtin_return_address(0); 209 + 203 210 /* writeable implies dirty for kernel addresses */ 204 211 if (flags & _PAGE_RW) 205 212 flags |= _PAGE_DIRTY; ··· 216 207 flags &= ~(_PAGE_USER | _PAGE_EXEC); 217 208 218 209 if (ppc_md.ioremap) 219 - return ppc_md.ioremap(addr, size, flags); 220 - return __ioremap(addr, size, flags); 210 + return ppc_md.ioremap(addr, size, flags, caller); 211 + return __ioremap_caller(addr, size, flags, caller); 221 212 } 222 213 223 214
+2 -2
arch/powerpc/platforms/cell/io-workarounds.c
··· 131 131 }; 132 132 133 133 static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, 134 - unsigned long flags) 134 + unsigned long flags, void *caller) 135 135 { 136 136 struct iowa_bus *bus; 137 - void __iomem *res = __ioremap(addr, size, flags); 137 + void __iomem *res = __ioremap_caller(addr, size, flags, caller); 138 138 int busno; 139 139 140 140 bus = iowa_pci_find(0, (unsigned long)addr);
+1 -1
arch/powerpc/platforms/iseries/setup.c
··· 617 617 } 618 618 619 619 static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size, 620 - unsigned long flags) 620 + unsigned long flags, void *caller) 621 621 { 622 622 return (void __iomem *)address; 623 623 }