at v4.11 85 lines 2.6 kB view raw
1#ifndef _ASM_POWERPC_PGTABLE_H 2#define _ASM_POWERPC_PGTABLE_H 3 4#ifndef __ASSEMBLY__ 5#include <linux/mmdebug.h> 6#include <linux/mmzone.h> 7#include <asm/processor.h> /* For TASK_SIZE */ 8#include <asm/mmu.h> 9#include <asm/page.h> 10 11struct mm_struct; 12 13#endif /* !__ASSEMBLY__ */ 14 15#ifdef CONFIG_PPC_BOOK3S 16#include <asm/book3s/pgtable.h> 17#else 18#include <asm/nohash/pgtable.h> 19#endif /* !CONFIG_PPC_BOOK3S */ 20 21#ifndef __ASSEMBLY__ 22 23#include <asm/tlbflush.h> 24 25/* Keep these as a macros to avoid include dependency mess */ 26#define pte_page(x) pfn_to_page(pte_pfn(x)) 27#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 28 29/* 30 * ZERO_PAGE is a global shared page that is always zero: used 31 * for zero-mapped memory areas etc.. 32 */ 33extern unsigned long empty_zero_page[]; 34#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 35 36extern pgd_t swapper_pg_dir[]; 37 38void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn); 39int dma_pfn_limit_to_zone(u64 pfn_limit); 40extern void paging_init(void); 41 42/* 43 * kern_addr_valid is intended to indicate whether an address is a valid 44 * kernel address. Most 32-bit archs define it as always true (like this) 45 * but most 64-bit archs actually perform a test. What should we do here? 46 */ 47#define kern_addr_valid(addr) (1) 48 49#include <asm-generic/pgtable.h> 50 51 52/* 53 * This gets called at the end of handling a page fault, when 54 * the kernel has put a new PTE into the page table for the process. 55 * We use it to ensure coherency between the i-cache and d-cache 56 * for the page which has just been mapped in. 57 * On machines which use an MMU hash table, we use this to put a 58 * corresponding HPTE into the hash table ahead of time, instead of 59 * waiting for the inevitable extra hash-table miss exception. 60 */ 61extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); 62 63extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 64 unsigned long end, int write, 65 struct page **pages, int *nr); 66#ifndef CONFIG_TRANSPARENT_HUGEPAGE 67#define pmd_large(pmd) 0 68#endif 69pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, 70 bool *is_thp, unsigned *shift); 71static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, 72 bool *is_thp, unsigned *shift) 73{ 74 VM_WARN(!arch_irqs_disabled(), 75 "%s called with irq enabled\n", __func__); 76 return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); 77} 78 79unsigned long vmalloc_to_phys(void *vmalloc_addr); 80 81void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 82void pgtable_cache_init(void); 83#endif /* __ASSEMBLY__ */ 84 85#endif /* _ASM_POWERPC_PGTABLE_H */