Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'virt-to-pfn-for-arch-v6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-integrator into asm-generic

This is an attempt to harden the typing on virt_to_pfn()
and pfn_to_virt().

Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.

For symmetry, we do the same with pfn_to_virt().

The problem with this inconsistent typing was pointed out by
Russell King:
https://lore.kernel.org/linux-arm-kernel/YoJDKJXc0MJ2QZTb@shell.armlinux.org.uk/

And confirmed by Andrew Morton:
https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/

So the recognition of the problem is widespread.

These platforms have been chosen as initial conversion targets:

- ARM
- ARM64/Aarch64
- asm-generic (including for example x86)
- m68k

The idea is that if this goes in, it will block further misuse
of the function signatures due to the large compile coverage,
and then I can go in and fix the remaining architectures on a
one-by-one basis.

Some of the patches have been circulated before but were not
picked up by subsystem maintainers, so now the arch tree is
target for this series.

It has passed zeroday builds after a lot of iterations in my
personal tree, but there could be some randconfig outliers.
New added or deeply hidden problems appear all the time so
some minor fallout can be expected.

* tag 'virt-to-pfn-for-arch-v6.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-integrator:
m68k/mm: Make pfn accessors static inlines
arm64: memory: Make virt_to_pfn() a static inline
ARM: mm: Make virt_to_pfn() a static inline
asm-generic/page.h: Make pfn accessors static inlines
xen/netback: Pass (void *) to virt_to_page()
netfs: Pass a pointer to virt_to_page()
cifs: Pass a pointer to virt_to_page() in cifsglob
cifs: Pass a pointer to virt_to_page()
riscv: mm: init: Pass a pointer to virt_to_page()
ARC: init: Pass a pointer to virt_to_pfn() in init
m68k: Pass a pointer to virt_to_pfn() virt_to_page()
fs/proc/kcore.c: Pass a pointer to virt_addr_valid()

+108 -80
+1 -1
arch/arc/mm/init.c
··· 87 87 setup_initial_init_mm(_text, _etext, _edata, _end); 88 88 89 89 /* first page of system - kernel .vector starts here */ 90 - min_low_pfn = virt_to_pfn(CONFIG_LINUX_RAM_BASE); 90 + min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE); 91 91 92 92 /* Last usable page of low mem */ 93 93 max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
+1 -1
arch/arm/common/sharpsl_param.c
··· 11 11 #include <linux/module.h> 12 12 #include <linux/string.h> 13 13 #include <asm/mach/sharpsl_param.h> 14 - #include <asm/memory.h> 14 + #include <asm/page.h> 15 15 16 16 /* 17 17 * Certain hardware parameters determined at the time of device manufacture,
+1 -1
arch/arm/include/asm/delay.h
··· 7 7 #ifndef __ASM_ARM_DELAY_H 8 8 #define __ASM_ARM_DELAY_H 9 9 10 - #include <asm/memory.h> 10 + #include <asm/page.h> 11 11 #include <asm/param.h> /* HZ */ 12 12 13 13 /*
+1 -1
arch/arm/include/asm/io.h
··· 23 23 #include <linux/string.h> 24 24 #include <linux/types.h> 25 25 #include <asm/byteorder.h> 26 - #include <asm/memory.h> 26 + #include <asm/page.h> 27 27 #include <asm-generic/pci_iomap.h> 28 28 29 29 /*
+12 -5
arch/arm/include/asm/memory.h
··· 5 5 * Copyright (C) 2000-2002 Russell King 6 6 * modification for nommu, Hyok S. Choi, 2004 7 7 * 8 - * Note: this file should not be included by non-asm/.h files 8 + * Note: this file should not be included explicitly, include <asm/page.h> 9 + * to get access to these definitions. 9 10 */ 10 11 #ifndef __ASM_ARM_MEMORY_H 11 12 #define __ASM_ARM_MEMORY_H 13 + 14 + #ifndef _ASMARM_PAGE_H 15 + #error "Do not include <asm/memory.h> directly" 16 + #endif 12 17 13 18 #include <linux/compiler.h> 14 19 #include <linux/const.h> ··· 293 288 294 289 #endif 295 290 296 - #define virt_to_pfn(kaddr) \ 297 - ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ 298 - PHYS_PFN_OFFSET) 299 - 291 + static inline unsigned long virt_to_pfn(const void *p) 292 + { 293 + unsigned long kaddr = (unsigned long)p; 294 + return (((kaddr - PAGE_OFFSET) >> PAGE_SHIFT) + 295 + PHYS_PFN_OFFSET); 296 + } 300 297 #define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x)) 301 298 302 299 #ifdef CONFIG_DEBUG_VIRTUAL
+2 -2
arch/arm/include/asm/page.h
··· 161 161 #define pfn_valid pfn_valid 162 162 #endif 163 163 164 - #include <asm/memory.h> 165 - 166 164 #endif /* !__ASSEMBLY__ */ 165 + 166 + #include <asm/memory.h> 167 167 168 168 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC 169 169
+1 -1
arch/arm/include/asm/pgtable.h
··· 27 27 #else 28 28 29 29 #include <asm-generic/pgtable-nopud.h> 30 - #include <asm/memory.h> 30 + #include <asm/page.h> 31 31 #include <asm/pgtable-hwdef.h> 32 32 33 33
-2
arch/arm/include/asm/proc-fns.h
··· 147 147 148 148 extern void cpu_resume(void); 149 149 150 - #include <asm/memory.h> 151 - 152 150 #ifdef CONFIG_MMU 153 151 154 152 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
+1 -1
arch/arm/include/asm/sparsemem.h
··· 2 2 #ifndef ASMARM_SPARSEMEM_H 3 3 #define ASMARM_SPARSEMEM_H 4 4 5 - #include <asm/memory.h> 5 + #include <asm/page.h> 6 6 7 7 /* 8 8 * Two definitions are required for sparsemem:
+1 -1
arch/arm/include/asm/uaccess-asm.h
··· 5 5 6 6 #include <asm/asm-offsets.h> 7 7 #include <asm/domain.h> 8 - #include <asm/memory.h> 8 + #include <asm/page.h> 9 9 #include <asm/thread_info.h> 10 10 11 11 .macro csdb
+1 -1
arch/arm/include/asm/uaccess.h
··· 9 9 * User space memory access functions 10 10 */ 11 11 #include <linux/string.h> 12 - #include <asm/memory.h> 12 + #include <asm/page.h> 13 13 #include <asm/domain.h> 14 14 #include <asm/unaligned.h> 15 15 #include <asm/unified.h>
+1 -1
arch/arm/kernel/asm-offsets.c
··· 17 17 #include <asm/glue-pf.h> 18 18 #include <asm/mach/arch.h> 19 19 #include <asm/thread_info.h> 20 - #include <asm/memory.h> 20 + #include <asm/page.h> 21 21 #include <asm/mpu.h> 22 22 #include <asm/procinfo.h> 23 23 #include <asm/suspend.h>
+1 -1
arch/arm/kernel/entry-armv.S
··· 15 15 #include <linux/init.h> 16 16 17 17 #include <asm/assembler.h> 18 - #include <asm/memory.h> 18 + #include <asm/page.h> 19 19 #include <asm/glue-df.h> 20 20 #include <asm/glue-pf.h> 21 21 #include <asm/vfpmacros.h>
+1 -1
arch/arm/kernel/entry-common.S
··· 9 9 #include <asm/unistd.h> 10 10 #include <asm/ftrace.h> 11 11 #include <asm/unwind.h> 12 - #include <asm/memory.h> 12 + #include <asm/page.h> 13 13 #ifdef CONFIG_AEABI 14 14 #include <asm/unistd-oabi.h> 15 15 #endif
+1 -1
arch/arm/kernel/entry-v7m.S
··· 6 6 * 7 7 * Low-level vector interface routines for the ARMv7-M architecture 8 8 */ 9 - #include <asm/memory.h> 9 + #include <asm/page.h> 10 10 #include <asm/glue.h> 11 11 #include <asm/thread_notify.h> 12 12 #include <asm/v7m.h>
+1 -2
arch/arm/kernel/head-nommu.S
··· 14 14 #include <asm/assembler.h> 15 15 #include <asm/ptrace.h> 16 16 #include <asm/asm-offsets.h> 17 - #include <asm/memory.h> 17 + #include <asm/page.h> 18 18 #include <asm/cp15.h> 19 19 #include <asm/thread_info.h> 20 20 #include <asm/v7m.h> 21 21 #include <asm/mpu.h> 22 - #include <asm/page.h> 23 22 24 23 /* 25 24 * Kernel startup entry point.
+1 -1
arch/arm/kernel/head.S
··· 17 17 #include <asm/domain.h> 18 18 #include <asm/ptrace.h> 19 19 #include <asm/asm-offsets.h> 20 - #include <asm/memory.h> 20 + #include <asm/page.h> 21 21 #include <asm/thread_info.h> 22 22 23 23 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
+1 -1
arch/arm/kernel/hibernate.c
··· 19 19 #include <asm/system_misc.h> 20 20 #include <asm/idmap.h> 21 21 #include <asm/suspend.h> 22 - #include <asm/memory.h> 22 + #include <asm/page.h> 23 23 #include <asm/sections.h> 24 24 #include "reboot.h" 25 25
+1 -1
arch/arm/kernel/suspend.c
··· 8 8 #include <asm/bugs.h> 9 9 #include <asm/cacheflush.h> 10 10 #include <asm/idmap.h> 11 - #include <asm/memory.h> 11 + #include <asm/page.h> 12 12 #include <asm/smp_plat.h> 13 13 #include <asm/suspend.h> 14 14 #include <asm/tlbflush.h>
+1 -1
arch/arm/kernel/tcm.c
··· 15 15 #include <linux/string.h> /* memcpy */ 16 16 #include <asm/cputype.h> 17 17 #include <asm/mach/map.h> 18 - #include <asm/memory.h> 18 + #include <asm/page.h> 19 19 #include <asm/system_info.h> 20 20 #include <asm/traps.h> 21 21 #include <asm/tcm.h>
+1 -2
arch/arm/kernel/vmlinux-xip.lds.S
··· 12 12 #include <asm/vmlinux.lds.h> 13 13 #include <asm/cache.h> 14 14 #include <asm/thread_info.h> 15 - #include <asm/memory.h> 16 - #include <asm/mpu.h> 17 15 #include <asm/page.h> 16 + #include <asm/mpu.h> 18 17 19 18 OUTPUT_ARCH(arm) 20 19 ENTRY(stext)
+1 -2
arch/arm/kernel/vmlinux.lds.S
··· 12 12 #include <asm/vmlinux.lds.h> 13 13 #include <asm/cache.h> 14 14 #include <asm/thread_info.h> 15 - #include <asm/memory.h> 16 - #include <asm/mpu.h> 17 15 #include <asm/page.h> 16 + #include <asm/mpu.h> 18 17 19 18 OUTPUT_ARCH(arm) 20 19 ENTRY(stext)
+1 -1
arch/arm/mach-berlin/platsmp.c
··· 12 12 13 13 #include <asm/cacheflush.h> 14 14 #include <asm/cp15.h> 15 - #include <asm/memory.h> 15 + #include <asm/page.h> 16 16 #include <asm/smp_plat.h> 17 17 #include <asm/smp_scu.h> 18 18
+1 -1
arch/arm/mach-keystone/keystone.c
··· 18 18 #include <asm/mach/map.h> 19 19 #include <asm/mach/arch.h> 20 20 #include <asm/mach/time.h> 21 - #include <asm/memory.h> 21 + #include <asm/page.h> 22 22 23 23 #include "memory.h" 24 24
+1 -1
arch/arm/mach-omap2/sleep33xx.S
··· 10 10 #include <linux/platform_data/pm33xx.h> 11 11 #include <linux/ti-emif-sram.h> 12 12 #include <asm/assembler.h> 13 - #include <asm/memory.h> 13 + #include <asm/page.h> 14 14 15 15 #include "iomap.h" 16 16 #include "cm33xx.h"
+1 -1
arch/arm/mach-omap2/sleep43xx.S
··· 11 11 #include <linux/platform_data/pm33xx.h> 12 12 #include <asm/assembler.h> 13 13 #include <asm/hardware/cache-l2x0.h> 14 - #include <asm/memory.h> 14 + #include <asm/page.h> 15 15 16 16 #include "cm33xx.h" 17 17 #include "common.h"
+1 -1
arch/arm/mach-omap2/sleep44xx.S
··· 9 9 #include <linux/linkage.h> 10 10 #include <asm/assembler.h> 11 11 #include <asm/smp_scu.h> 12 - #include <asm/memory.h> 12 + #include <asm/page.h> 13 13 #include <asm/hardware/cache-l2x0.h> 14 14 15 15 #include "omap-secure.h"
+1 -1
arch/arm/mach-pxa/gumstix.c
··· 26 26 #include <linux/clk.h> 27 27 28 28 #include <asm/setup.h> 29 - #include <asm/memory.h> 29 + #include <asm/page.h> 30 30 #include <asm/mach-types.h> 31 31 #include <asm/irq.h> 32 32 #include <linux/sizes.h>
+1 -1
arch/arm/mach-rockchip/sleep.S
··· 6 6 7 7 #include <linux/linkage.h> 8 8 #include <asm/assembler.h> 9 - #include <asm/memory.h> 9 + #include <asm/page.h> 10 10 11 11 .data 12 12 /*
+1 -1
arch/arm/mach-sa1100/pm.c
··· 29 29 #include <linux/time.h> 30 30 31 31 #include <mach/hardware.h> 32 - #include <asm/memory.h> 32 + #include <asm/page.h> 33 33 #include <asm/suspend.h> 34 34 #include <asm/mach/time.h> 35 35
+1 -1
arch/arm/mach-shmobile/headsmp-scu.S
··· 7 7 8 8 #include <linux/linkage.h> 9 9 #include <linux/init.h> 10 - #include <asm/memory.h> 10 + #include <asm/page.h> 11 11 12 12 /* 13 13 * Boot code for secondary CPUs.
+1 -1
arch/arm/mach-shmobile/headsmp.S
··· 11 11 #include <linux/linkage.h> 12 12 #include <linux/threads.h> 13 13 #include <asm/assembler.h> 14 - #include <asm/memory.h> 14 + #include <asm/page.h> 15 15 16 16 #define SCTLR_MMU 0x01 17 17 #define BOOTROM_ADDRESS 0xE6340000
+1 -1
arch/arm/mach-socfpga/headsmp.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 - #include <asm/memory.h> 9 + #include <asm/page.h> 10 10 #include <asm/assembler.h> 11 11 12 12 .arch armv7-a
+1 -1
arch/arm/mach-spear/spear.h
··· 10 10 #ifndef __MACH_SPEAR_H 11 11 #define __MACH_SPEAR_H 12 12 13 - #include <asm/memory.h> 13 + #include <asm/page.h> 14 14 15 15 #if defined(CONFIG_ARCH_SPEAR3XX) || defined (CONFIG_ARCH_SPEAR6XX) 16 16
-1
arch/arm/mm/cache-fa.S
··· 13 13 #include <linux/linkage.h> 14 14 #include <linux/init.h> 15 15 #include <asm/assembler.h> 16 - #include <asm/memory.h> 17 16 #include <asm/page.h> 18 17 19 18 #include "proc-macros.S"
-1
arch/arm/mm/cache-v4wb.S
··· 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 9 #include <asm/assembler.h> 10 - #include <asm/memory.h> 11 10 #include <asm/page.h> 12 11 #include "proc-macros.S" 13 12
+1 -1
arch/arm/mm/dma-mapping.c
··· 25 25 #include <linux/sizes.h> 26 26 #include <linux/cma.h> 27 27 28 - #include <asm/memory.h> 28 + #include <asm/page.h> 29 29 #include <asm/highmem.h> 30 30 #include <asm/cacheflush.h> 31 31 #include <asm/tlbflush.h>
+1 -1
arch/arm/mm/dump.c
··· 15 15 16 16 #include <asm/domain.h> 17 17 #include <asm/fixmap.h> 18 - #include <asm/memory.h> 18 + #include <asm/page.h> 19 19 #include <asm/ptdump.h> 20 20 21 21 static struct addr_marker address_markers[] = {
+1 -1
arch/arm/mm/init.c
··· 26 26 #include <asm/cp15.h> 27 27 #include <asm/mach-types.h> 28 28 #include <asm/memblock.h> 29 - #include <asm/memory.h> 29 + #include <asm/page.h> 30 30 #include <asm/prom.h> 31 31 #include <asm/sections.h> 32 32 #include <asm/setup.h>
-1
arch/arm/mm/kasan_init.c
··· 17 17 #include <asm/cputype.h> 18 18 #include <asm/highmem.h> 19 19 #include <asm/mach/map.h> 20 - #include <asm/memory.h> 21 20 #include <asm/page.h> 22 21 #include <asm/pgalloc.h> 23 22 #include <asm/procinfo.h>
+1 -1
arch/arm/mm/mmu.c
··· 26 26 #include <asm/system_info.h> 27 27 #include <asm/traps.h> 28 28 #include <asm/procinfo.h> 29 - #include <asm/memory.h> 29 + #include <asm/page.h> 30 30 #include <asm/pgalloc.h> 31 31 #include <asm/kasan_def.h> 32 32
+1 -1
arch/arm/mm/physaddr.c
··· 6 6 #include <linux/mm.h> 7 7 8 8 #include <asm/sections.h> 9 - #include <asm/memory.h> 9 + #include <asm/page.h> 10 10 #include <asm/fixmap.h> 11 11 #include <asm/dma.h> 12 12
+1 -1
arch/arm/mm/pmsa-v8.c
··· 11 11 #include <asm/cputype.h> 12 12 #include <asm/mpu.h> 13 13 14 - #include <asm/memory.h> 14 + #include <asm/page.h> 15 15 #include <asm/sections.h> 16 16 17 17 #include "mm.h"
+1 -1
arch/arm/mm/proc-v7.S
··· 14 14 #include <asm/asm-offsets.h> 15 15 #include <asm/hwcap.h> 16 16 #include <asm/pgtable-hwdef.h> 17 - #include <asm/memory.h> 17 + #include <asm/page.h> 18 18 19 19 #include "proc-macros.S" 20 20
+1 -1
arch/arm/mm/proc-v7m.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <asm/assembler.h> 12 - #include <asm/memory.h> 12 + #include <asm/page.h> 13 13 #include <asm/v7m.h> 14 14 #include "proc-macros.S" 15 15
+1 -1
arch/arm/mm/pv-fixup-asm.S
··· 9 9 #include <linux/pgtable.h> 10 10 #include <asm/asm-offsets.h> 11 11 #include <asm/cp15.h> 12 - #include <asm/memory.h> 12 + #include <asm/page.h> 13 13 14 14 .section ".idmap.text", "ax" 15 15
+8 -1
arch/arm64/include/asm/memory.h
··· 331 331 return (void *)(__phys_to_virt(x)); 332 332 } 333 333 334 + /* Needed already here for resolving __phys_to_pfn() in virt_to_pfn() */ 335 + #include <asm-generic/memory_model.h> 336 + 337 + static inline unsigned long virt_to_pfn(const void *kaddr) 338 + { 339 + return __phys_to_pfn(virt_to_phys(kaddr)); 340 + } 341 + 334 342 /* 335 343 * Drivers should NOT use these either. 336 344 */ ··· 347 339 #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) 348 340 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 349 341 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 350 - #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) 351 342 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 352 343 353 344 /*
+1 -2
arch/m68k/include/asm/mcf_pgtable.h
··· 115 115 pgd_val(*pgdp) = virt_to_phys(pmdp); 116 116 } 117 117 118 - #define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK)) 118 + #define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK)) 119 119 #define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd))) 120 120 121 121 static inline int pte_none(pte_t pte) ··· 134 134 pte_val(*ptep) = 0; 135 135 } 136 136 137 - #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 138 137 #define pte_page(pte) virt_to_page(__pte_page(pte)) 139 138 140 139 static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
+9 -2
arch/m68k/include/asm/page_mm.h
··· 121 121 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots 122 122 * of the shifts unnecessary. 123 123 */ 124 - #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 125 - #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 124 + static inline unsigned long virt_to_pfn(const void *kaddr) 125 + { 126 + return __pa(kaddr) >> PAGE_SHIFT; 127 + } 128 + 129 + static inline void *pfn_to_virt(unsigned long pfn) 130 + { 131 + return __va(pfn << PAGE_SHIFT); 132 + } 126 133 127 134 extern int m68k_virt_to_node_shift; 128 135
+9 -2
arch/m68k/include/asm/page_no.h
··· 19 19 #define __pa(vaddr) ((unsigned long)(vaddr)) 20 20 #define __va(paddr) ((void *)((unsigned long)(paddr))) 21 21 22 - #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 23 - #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 22 + static inline unsigned long virt_to_pfn(const void *kaddr) 23 + { 24 + return __pa(kaddr) >> PAGE_SHIFT; 25 + } 26 + 27 + static inline void *pfn_to_virt(unsigned long pfn) 28 + { 29 + return __va(pfn << PAGE_SHIFT); 30 + } 24 31 25 32 #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) 26 33 #define page_to_virt(page) __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
+2 -2
arch/m68k/include/asm/sun3_pgtable.h
··· 91 91 #define pmd_set(pmdp,ptep) do {} while (0) 92 92 93 93 #define __pte_page(pte) \ 94 - ((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT)) 94 + (__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT)) 95 95 96 96 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 97 97 { ··· 111 111 112 112 #define pte_page(pte) virt_to_page(__pte_page(pte)) 113 113 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 114 - #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) 114 + #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd)) 115 115 116 116 117 117 static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
+2 -1
arch/m68k/mm/mcfmmu.c
··· 69 69 70 70 /* now change pg_table to kernel virtual addresses */ 71 71 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { 72 - pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); 72 + pte_t pte = pfn_pte(virt_to_pfn((void *)address), 73 + PAGE_INIT); 73 74 if (address >= (unsigned long) high_memory) 74 75 pte_val(pte) = 0; 75 76
+2 -2
arch/m68k/mm/motorola.c
··· 102 102 LIST_HEAD_INIT(ptable_list[1]), 103 103 }; 104 104 105 - #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) 105 + #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru)) 106 106 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) 107 107 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) 108 108 ··· 201 201 list_del(dp); 202 202 mmu_page_dtor((void *)page); 203 203 if (type == TABLE_PTE) 204 - pgtable_pte_page_dtor(virt_to_page(page)); 204 + pgtable_pte_page_dtor(virt_to_page((void *)page)); 205 205 free_page (page); 206 206 return 1; 207 207 } else if (ptable_list[type].next != dp) {
+1 -1
arch/m68k/mm/sun3mmu.c
··· 75 75 /* now change pg_table to kernel virtual addresses */ 76 76 pg_table = (pte_t *) __va ((unsigned long) pg_table); 77 77 for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) { 78 - pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); 78 + pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT); 79 79 if (address >= (unsigned long)high_memory) 80 80 pte_val (pte) = 0; 81 81 set_pte (pg_table, pte);
+1 -1
arch/m68k/sun3/dvma.c
··· 29 29 j = *(volatile unsigned long *)kaddr; 30 30 *(volatile unsigned long *)kaddr = j; 31 31 32 - ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL); 32 + ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL); 33 33 pte = pte_val(ptep); 34 34 // pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte); 35 35 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
+1 -1
arch/m68k/sun3x/dvma.c
··· 125 125 do { 126 126 pr_debug("mapping %08lx phys to %08lx\n", 127 127 __pa(kaddr), vaddr); 128 - set_pte(pte, pfn_pte(virt_to_pfn(kaddr), 128 + set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr), 129 129 PAGE_KERNEL)); 130 130 pte++; 131 131 kaddr += PAGE_SIZE;
+2 -2
arch/riscv/mm/init.c
··· 356 356 unsigned long vaddr; 357 357 358 358 vaddr = __get_free_page(GFP_KERNEL); 359 - BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); 359 + BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page((void *)vaddr))); 360 360 361 361 return __pa(vaddr); 362 362 } ··· 439 439 unsigned long vaddr; 440 440 441 441 vaddr = __get_free_page(GFP_KERNEL); 442 - BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr))); 442 + BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page((void *)vaddr))); 443 443 444 444 return __pa(vaddr); 445 445 }
+1 -1
drivers/memory/ti-emif-sram-pm.S
··· 8 8 9 9 #include <linux/linkage.h> 10 10 #include <asm/assembler.h> 11 - #include <asm/memory.h> 11 + #include <asm/page.h> 12 12 13 13 #include "emif.h" 14 14 #include "ti-emif-asm-offsets.h"
+1 -1
drivers/net/xen-netback/netback.c
··· 689 689 prev_pending_idx = pending_idx; 690 690 691 691 txp = &queue->pending_tx_info[pending_idx].req; 692 - page = virt_to_page(idx_to_kaddr(queue, pending_idx)); 692 + page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx)); 693 693 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); 694 694 skb->len += txp->size; 695 695 skb->data_len += txp->size;
+1 -1
fs/netfs/iterator.c
··· 240 240 if (is_vmalloc_or_module_addr((void *)kaddr)) 241 241 page = vmalloc_to_page((void *)kaddr); 242 242 else 243 - page = virt_to_page(kaddr); 243 + page = virt_to_page((void *)kaddr); 244 244 245 245 sg_set_page(sg, page, len, off); 246 246 sgtable->nents++;
+1 -1
fs/proc/kcore.c
··· 199 199 ent->addr = (unsigned long)page_to_virt(p); 200 200 ent->size = nr_pages << PAGE_SHIFT; 201 201 202 - if (!virt_addr_valid(ent->addr)) 202 + if (!virt_addr_valid((void *)ent->addr)) 203 203 goto free_out; 204 204 205 205 /* cut not-mapped area. ....from ppc-32 code. */
+1 -1
fs/smb/client/cifsglob.h
··· 2218 2218 } while (buflen); 2219 2219 } else { 2220 2220 sg_set_page(&sgtable->sgl[sgtable->nents++], 2221 - virt_to_page(addr), buflen, off); 2221 + virt_to_page((void *)addr), buflen, off); 2222 2222 } 2223 2223 } 2224 2224
+1 -1
fs/smb/client/smbdirect.c
··· 2500 2500 if (is_vmalloc_or_module_addr((void *)kaddr)) 2501 2501 page = vmalloc_to_page((void *)kaddr); 2502 2502 else 2503 - page = virt_to_page(kaddr); 2503 + page = virt_to_page((void *)kaddr); 2504 2504 2505 2505 if (!smb_set_sge(rdma, page, off, seg)) 2506 2506 return -EIO;
+10 -2
include/asm-generic/page.h
··· 74 74 #define __va(x) ((void *)((unsigned long) (x))) 75 75 #define __pa(x) ((unsigned long) (x)) 76 76 77 - #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 78 - #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 77 + static inline unsigned long virt_to_pfn(const void *kaddr) 78 + { 79 + return __pa(kaddr) >> PAGE_SHIFT; 80 + } 81 + #define virt_to_pfn virt_to_pfn 82 + static inline void *pfn_to_virt(unsigned long pfn) 83 + { 84 + return __va(pfn) << PAGE_SHIFT; 85 + } 86 + #define pfn_to_virt pfn_to_virt 79 87 80 88 #define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr)) 81 89 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page))