···394394 * remap a physical page `pfn' of size `size' with page protection `prot'395395 * into virtual address `from'396396 */397397-#define io_remap_pfn_range(vma, from, pfn, size, prot) \398398- remap_pfn_range(vma, from, pfn, size, prot)399399-400397#include <asm-generic/pgtable.h>401398402399/* to cope with aliasing VIPT cache */
-2
arch/arm/include/asm/pgtable-nommu.h
···7979 * No page table caches to initialise.8080 */8181#define pgtable_cache_init() do { } while (0)8282-#define io_remap_pfn_range remap_pfn_range8383-84828583/*8684 * All 32bit addresses are effectively valid for vmalloc...
-7
arch/arm/include/asm/pgtable.h
···318318#define HAVE_ARCH_UNMAPPED_AREA319319#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN320320321321-/*322322- * remap a physical page `pfn' of size `size' with page protection `prot'323323- * into virtual address `from'324324- */325325-#define io_remap_pfn_range(vma,from,pfn,size,prot) \326326- remap_pfn_range(vma, from, pfn, size, prot)327327-328321#define pgtable_cache_init() do { } while (0)329322330323#endif /* !__ASSEMBLY__ */
-7
arch/arm64/include/asm/pgtable.h
···320320321321#include <asm-generic/pgtable.h>322322323323-/*324324- * remap a physical page `pfn' of size `size' with page protection `prot'325325- * into virtual address `from'326326- */327327-#define io_remap_pfn_range(vma,from,pfn,size,prot) \328328- remap_pfn_range(vma, from, pfn, size, prot)329329-330323#define pgtable_cache_init() do { } while (0)331324332325#endif /* !__ASSEMBLY__ */
-3
arch/avr32/include/asm/pgtable.h
···362362363363#define kern_addr_valid(addr) (1)364364365365-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \366366- remap_pfn_range(vma, vaddr, pfn, size, prot)367367-368365/* No page table caches to initialize (?) */369366#define pgtable_cache_init() do { } while(0)370367
-1
arch/blackfin/include/asm/pgtable.h
···8888 * No page table caches to initialise.8989 */9090#define pgtable_cache_init() do { } while (0)9191-#define io_remap_pfn_range remap_pfn_range92919392/*9493 * All 32bit addresses are effectively valid for vmalloc...
-1
arch/c6x/include/asm/pgtable.h
···7171 * No page table caches to initialise7272 */7373#define pgtable_cache_init() do { } while (0)7474-#define io_remap_pfn_range remap_pfn_range75747675#include <asm-generic/pgtable.h>7776
-3
arch/cris/include/asm/pgtable.h
···258258#define pgd_ERROR(e) \259259 printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))260260261261-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \262262- remap_pfn_range(vma, vaddr, pfn, size, prot)263263-264261265262extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */266263
···5252 */5353#define pgtable_cache_init() do { } while (0)54545555-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \5656- remap_pfn_range(vma, vaddr, pfn, size, prot)5757-5855/*5956 * All 32bit addresses are effectively valid for vmalloc...6057 * Sort of meaningless for non-VM targets.
-4
arch/hexagon/include/asm/pgtable.h
···452452453453#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))454454455455-/* Nothing special about IO remapping at this point */456456-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \457457- remap_pfn_range(vma, vaddr, pfn, size, prot)458458-459455/* I think this is in case we have page table caches; needed by init/main.c */460456#define pgtable_cache_init() do { } while (0)461457
-3
arch/ia64/include/asm/pgtable.h
···493493#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)494494#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })495495496496-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \497497- remap_pfn_range(vma, vaddr, pfn, size, prot)498498-499496/*500497 * ZERO_PAGE is a global shared page that is always zero: used501498 * for zero-mapped memory areas etc..
-3
arch/m32r/include/asm/pgtable.h
···347347/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */348348#define kern_addr_valid(addr) (1)349349350350-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \351351- remap_pfn_range(vma, vaddr, pfn, size, prot)352352-353350#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG354351#define __HAVE_ARCH_PTEP_GET_AND_CLEAR355352#define __HAVE_ARCH_PTEP_SET_WRPROTECT
···5555 */5656#define pgtable_cache_init() do { } while (0)57575858-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \5959- remap_pfn_range(vma, vaddr, pfn, size, prot)6060-6158/*6259 * All 32bit addresses are effectively valid for vmalloc...6360 * Sort of meaningless for non-VM targets.
···113113#define pte_clear(mm, addr, xp) \114114 do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)115115116116-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \117117- remap_pfn_range(vma, vaddr, pfn, size, prot)118118-119116/*120117 * The "pgd_xxx()" functions here are trivial for a folded two-level121118 * setup: the pgd is never bad, and a pmd always exists (as it's folded
···6969#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)7070#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)71717272-#define io_remap_pfn_range remap_pfn_range7373-7472/*7573 * The i386 can't do page protection for execute, and considers that the same7674 * are read.
-7
arch/unicore32/include/asm/pgtable.h
···303303304304#include <asm-generic/pgtable.h>305305306306-/*307307- * remap a physical page `pfn' of size `size' with page protection `prot'308308- * into virtual address `from'309309- */310310-#define io_remap_pfn_range(vma, from, pfn, size, prot) \311311- remap_pfn_range(vma, from, pfn, size, prot)312312-313306#define pgtable_cache_init() do { } while (0)314307315308#endif /* !__ASSEMBLY__ */