at v2.6.25 3.4 kB view raw
1#ifndef _X86_64_PGALLOC_H 2#define _X86_64_PGALLOC_H 3 4#include <asm/pda.h> 5#include <linux/threads.h> 6#include <linux/mm.h> 7 8#define pmd_populate_kernel(mm, pmd, pte) \ 9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))) 10#define pud_populate(mm, pud, pmd) \ 11 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))) 12#define pgd_populate(mm, pgd, pud) \ 13 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud))) 14 15#define pmd_pgtable(pmd) pmd_page(pmd) 16 17static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte) 18{ 19 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT))); 20} 21 22static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 23{ 24 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); 25 free_page((unsigned long)pmd); 26} 27 28static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr) 29{ 30 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 31} 32 33static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 34{ 35 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 36} 37 38static inline void pud_free(struct mm_struct *mm, pud_t *pud) 39{ 40 BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); 41 free_page((unsigned long)pud); 42} 43 44static inline void pgd_list_add(pgd_t *pgd) 45{ 46 struct page *page = virt_to_page(pgd); 47 unsigned long flags; 48 49 spin_lock_irqsave(&pgd_lock, flags); 50 list_add(&page->lru, &pgd_list); 51 spin_unlock_irqrestore(&pgd_lock, flags); 52} 53 54static inline void pgd_list_del(pgd_t *pgd) 55{ 56 struct page *page = virt_to_page(pgd); 57 unsigned long flags; 58 59 spin_lock_irqsave(&pgd_lock, flags); 60 list_del(&page->lru); 61 spin_unlock_irqrestore(&pgd_lock, flags); 62} 63 64static inline pgd_t *pgd_alloc(struct mm_struct *mm) 65{ 66 unsigned boundary; 67 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 68 if (!pgd) 69 return NULL; 70 pgd_list_add(pgd); 71 /* 72 * Copy kernel pointers in from init. 73 * Could keep a freelist or slab cache of those because the kernel 74 * part never changes. 75 */ 76 boundary = pgd_index(__PAGE_OFFSET); 77 memset(pgd, 0, boundary * sizeof(pgd_t)); 78 memcpy(pgd + boundary, 79 init_level4_pgt + boundary, 80 (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); 81 return pgd; 82} 83 84static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 85{ 86 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); 87 pgd_list_del(pgd); 88 free_page((unsigned long)pgd); 89} 90 91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 92{ 93 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 94} 95 96static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 97{ 98 struct page *page; 99 void *p; 100 101 p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); 102 if (!p) 103 return NULL; 104 page = virt_to_page(p); 105 pgtable_page_ctor(page); 106 return page; 107} 108 109/* Should really implement gc for free page table pages. This could be 110 done with a reference count in struct page. */ 111 112static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 113{ 114 BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); 115 free_page((unsigned long)pte); 116} 117 118static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 119{ 120 pgtable_page_dtor(pte); 121 __free_page(pte); 122} 123 124#define __pte_free_tlb(tlb,pte) \ 125do { \ 126 pgtable_page_dtor((pte)); \ 127 tlb_remove_page((tlb), (pte)); \ 128} while (0) 129 130#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 131#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 132 133#endif /* _X86_64_PGALLOC_H */