at v2.6.25 2.7 kB view raw
1#ifndef _I386_PGALLOC_H 2#define _I386_PGALLOC_H 3 4#include <linux/threads.h> 5#include <linux/mm.h> /* for struct page */ 6#include <linux/pagemap.h> 7#include <asm/tlb.h> 8#include <asm-generic/tlb.h> 9 10#ifdef CONFIG_PARAVIRT 11#include <asm/paravirt.h> 12#else 13#define paravirt_alloc_pt(mm, pfn) do { } while (0) 14#define paravirt_alloc_pd(mm, pfn) do { } while (0) 15#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) 16#define paravirt_release_pt(pfn) do { } while (0) 17#define paravirt_release_pd(pfn) do { } while (0) 18#endif 19 20static inline void pmd_populate_kernel(struct mm_struct *mm, 21 pmd_t *pmd, pte_t *pte) 22{ 23 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); 24 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); 25} 26 27static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) 28{ 29 unsigned long pfn = page_to_pfn(pte); 30 31 paravirt_alloc_pt(mm, pfn); 32 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE)); 33} 34#define pmd_pgtable(pmd) pmd_page(pmd) 35 36/* 37 * Allocate and free page tables. 38 */ 39extern pgd_t *pgd_alloc(struct mm_struct *); 40extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 41 42extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); 43extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); 44 45static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 46{ 47 free_page((unsigned long)pte); 48} 49 50static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 51{ 52 pgtable_page_dtor(pte); 53 __free_page(pte); 54} 55 56 57extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); 58 59#ifdef CONFIG_X86_PAE 60/* 61 * In the PAE case we free the pmds as part of the pgd. 62 */ 63static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 64{ 65 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 66} 67 68static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 69{ 70 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 71 free_page((unsigned long)pmd); 72} 73 74extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); 75 76static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 77{ 78 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); 79 80 /* Note: almost everything apart from _PAGE_PRESENT is 81 reserved at the pmd (PDPT) level. */ 82 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); 83 84 /* 85 * According to Intel App note "TLBs, Paging-Structure Caches, 86 * and Their Invalidation", April 2007, document 317080-001, 87 * section 8.1: in PAE mode we explicitly have to flush the 88 * TLB via cr3 if the top-level pgd is changed... 89 */ 90 if (mm == current->active_mm) 91 write_cr3(read_cr3()); 92} 93#endif /* CONFIG_X86_PAE */ 94 95#endif /* _I386_PGALLOC_H */