at v2.6.21 164 lines 3.6 kB view raw
1#ifndef _ASM_IA64_PGALLOC_H 2#define _ASM_IA64_PGALLOC_H 3 4/* 5 * This file contains the functions and defines necessary to allocate 6 * page tables. 7 * 8 * This hopefully works with any (fixed) ia-64 page-size, as defined 9 * in <asm/page.h> (currently 8192). 10 * 11 * Copyright (C) 1998-2001 Hewlett-Packard Co 12 * David Mosberger-Tang <davidm@hpl.hp.com> 13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com> 14 */ 15 16 17#include <linux/compiler.h> 18#include <linux/mm.h> 19#include <linux/page-flags.h> 20#include <linux/threads.h> 21 22#include <asm/mmu_context.h> 23 24DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist); 25#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist) 26DECLARE_PER_CPU(long, __pgtable_quicklist_size); 27#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size) 28 29static inline long pgtable_quicklist_total_size(void) 30{ 31 long ql_size = 0; 32 int cpuid; 33 34 for_each_online_cpu(cpuid) { 35 ql_size += per_cpu(__pgtable_quicklist_size, cpuid); 36 } 37 return ql_size; 38} 39 40static inline void *pgtable_quicklist_alloc(void) 41{ 42 unsigned long *ret = NULL; 43 44 preempt_disable(); 45 46 ret = pgtable_quicklist; 47 if (likely(ret != NULL)) { 48 pgtable_quicklist = (unsigned long *)(*ret); 49 ret[0] = 0; 50 --pgtable_quicklist_size; 51 preempt_enable(); 52 } else { 53 preempt_enable(); 54 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 55 } 56 57 return ret; 58} 59 60static inline void pgtable_quicklist_free(void *pgtable_entry) 61{ 62#ifdef CONFIG_NUMA 63 int nid = page_to_nid(virt_to_page(pgtable_entry)); 64 65 if (unlikely(nid != numa_node_id())) { 66 free_page((unsigned long)pgtable_entry); 67 return; 68 } 69#endif 70 71 preempt_disable(); 72 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist; 73 pgtable_quicklist = (unsigned long *)pgtable_entry; 74 ++pgtable_quicklist_size; 75 preempt_enable(); 76} 77 78static inline pgd_t *pgd_alloc(struct mm_struct *mm) 79{ 80 return pgtable_quicklist_alloc(); 81} 82 83static inline void pgd_free(pgd_t * pgd) 84{ 85 pgtable_quicklist_free(pgd); 86} 87 88#ifdef CONFIG_PGTABLE_4 89static inline void 90pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) 91{ 92 pgd_val(*pgd_entry) = __pa(pud); 93} 94 95static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 96{ 97 return pgtable_quicklist_alloc(); 98} 99 100static inline void pud_free(pud_t * pud) 101{ 102 pgtable_quicklist_free(pud); 103} 104#define __pud_free_tlb(tlb, pud) pud_free(pud) 105#endif /* CONFIG_PGTABLE_4 */ 106 107static inline void 108pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) 109{ 110 pud_val(*pud_entry) = __pa(pmd); 111} 112 113static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 114{ 115 return pgtable_quicklist_alloc(); 116} 117 118static inline void pmd_free(pmd_t * pmd) 119{ 120 pgtable_quicklist_free(pmd); 121} 122 123#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) 124 125static inline void 126pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte) 127{ 128 pmd_val(*pmd_entry) = page_to_phys(pte); 129} 130 131static inline void 132pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) 133{ 134 pmd_val(*pmd_entry) = __pa(pte); 135} 136 137static inline struct page *pte_alloc_one(struct mm_struct *mm, 138 unsigned long addr) 139{ 140 void *pg = pgtable_quicklist_alloc(); 141 return pg ? virt_to_page(pg) : NULL; 142} 143 144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 145 unsigned long addr) 146{ 147 return pgtable_quicklist_alloc(); 148} 149 150static inline void pte_free(struct page *pte) 151{ 152 pgtable_quicklist_free(page_address(pte)); 153} 154 155static inline void pte_free_kernel(pte_t * pte) 156{ 157 pgtable_quicklist_free(pte); 158} 159 160#define __pte_free_tlb(tlb, pte) pte_free(pte) 161 162extern void check_pgt_cache(void); 163 164#endif /* _ASM_IA64_PGALLOC_H */