at v3.4 3.0 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2001, 2003 by Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9#ifndef _ASM_PGALLOC_H 10#define _ASM_PGALLOC_H 11 12#include <linux/highmem.h> 13#include <linux/mm.h> 14#include <linux/sched.h> 15 16static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 17 pte_t *pte) 18{ 19 set_pmd(pmd, __pmd((unsigned long)pte)); 20} 21 22static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 23 pgtable_t pte) 24{ 25 set_pmd(pmd, __pmd((unsigned long)page_address(pte))); 26} 27#define pmd_pgtable(pmd) pmd_page(pmd) 28 29/* 30 * Initialize a new pmd table with invalid pointers. 31 */ 32extern void pmd_init(unsigned long page, unsigned long pagetable); 33 34#ifndef __PAGETABLE_PMD_FOLDED 35 36static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 37{ 38 set_pud(pud, __pud((unsigned long)pmd)); 39} 40#endif 41 42/* 43 * Initialize a new pgd / pmd table with invalid pointers. 44 */ 45extern void pgd_init(unsigned long page); 46 47static inline pgd_t *pgd_alloc(struct mm_struct *mm) 48{ 49 pgd_t *ret, *init; 50 51 ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); 52 if (ret) { 53 init = pgd_offset(&init_mm, 0UL); 54 pgd_init((unsigned long)ret); 55 memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 56 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 57 } 58 59 return ret; 60} 61 62static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 63{ 64 free_pages((unsigned long)pgd, PGD_ORDER); 65} 66 67static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 68 unsigned long address) 69{ 70 pte_t *pte; 71 72 pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); 73 74 return pte; 75} 76 77static inline struct page *pte_alloc_one(struct mm_struct *mm, 78 unsigned long address) 79{ 80 struct page *pte; 81 82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); 83 if (pte) { 84 clear_highpage(pte); 85 pgtable_page_ctor(pte); 86 } 87 return pte; 88} 89 90static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 91{ 92 free_pages((unsigned long)pte, PTE_ORDER); 93} 94 95static inline void pte_free(struct mm_struct *mm, pgtable_t pte) 96{ 97 pgtable_page_dtor(pte); 98 __free_pages(pte, PTE_ORDER); 99} 100 101#define __pte_free_tlb(tlb,pte,address) \ 102do { \ 103 pgtable_page_dtor(pte); \ 104 tlb_remove_page((tlb), pte); \ 105} while (0) 106 107#ifndef __PAGETABLE_PMD_FOLDED 108 109static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 110{ 111 pmd_t *pmd; 112 113 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); 114 if (pmd) 115 pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); 116 return pmd; 117} 118 119static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 120{ 121 free_pages((unsigned long)pmd, PMD_ORDER); 122} 123 124#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x) 125 126#endif 127 128#define check_pgt_cache() do { } while (0) 129 130extern void pagetable_init(void); 131 132#endif /* _ASM_PGALLOC_H */