at v2.6.17 3.6 kB view raw
1#ifndef _ASM_IA64_PGALLOC_H 2#define _ASM_IA64_PGALLOC_H 3 4/* 5 * This file contains the functions and defines necessary to allocate 6 * page tables. 7 * 8 * This hopefully works with any (fixed) ia-64 page-size, as defined 9 * in <asm/page.h> (currently 8192). 10 * 11 * Copyright (C) 1998-2001 Hewlett-Packard Co 12 * David Mosberger-Tang <davidm@hpl.hp.com> 13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com> 14 */ 15 16#include <linux/config.h> 17 18#include <linux/compiler.h> 19#include <linux/mm.h> 20#include <linux/page-flags.h> 21#include <linux/threads.h> 22 23#include <asm/mmu_context.h> 24 25DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist); 26#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist) 27DECLARE_PER_CPU(long, __pgtable_quicklist_size); 28#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size) 29 30static inline long pgtable_quicklist_total_size(void) 31{ 32 long ql_size = 0; 33 int cpuid; 34 35 for_each_online_cpu(cpuid) { 36 ql_size += per_cpu(__pgtable_quicklist_size, cpuid); 37 } 38 return ql_size; 39} 40 41static inline void *pgtable_quicklist_alloc(void) 42{ 43 unsigned long *ret = NULL; 44 45 preempt_disable(); 46 47 ret = pgtable_quicklist; 48 if (likely(ret != NULL)) { 49 pgtable_quicklist = (unsigned long *)(*ret); 50 ret[0] = 0; 51 --pgtable_quicklist_size; 52 preempt_enable(); 53 } else { 54 preempt_enable(); 55 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 56 } 57 58 return ret; 59} 60 61static inline void pgtable_quicklist_free(void *pgtable_entry) 62{ 63#ifdef CONFIG_NUMA 64 unsigned long nid = page_to_nid(virt_to_page(pgtable_entry)); 65 66 if (unlikely(nid != numa_node_id())) { 67 free_page((unsigned long)pgtable_entry); 68 return; 69 } 70#endif 71 72 preempt_disable(); 73 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist; 74 pgtable_quicklist = (unsigned long *)pgtable_entry; 75 ++pgtable_quicklist_size; 76 preempt_enable(); 77} 78 79static inline pgd_t *pgd_alloc(struct mm_struct *mm) 80{ 81 return pgtable_quicklist_alloc(); 82} 83 84static inline void pgd_free(pgd_t * pgd) 85{ 86 pgtable_quicklist_free(pgd); 87} 88 89#ifdef CONFIG_PGTABLE_4 90static inline void 91pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) 92{ 93 pgd_val(*pgd_entry) = __pa(pud); 94} 95 96static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 97{ 98 return pgtable_quicklist_alloc(); 99} 100 101static inline void pud_free(pud_t * pud) 102{ 103 pgtable_quicklist_free(pud); 104} 105#define __pud_free_tlb(tlb, pud) pud_free(pud) 106#endif /* CONFIG_PGTABLE_4 */ 107 108static inline void 109pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) 110{ 111 pud_val(*pud_entry) = __pa(pmd); 112} 113 114static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 115{ 116 return pgtable_quicklist_alloc(); 117} 118 119static inline void pmd_free(pmd_t * pmd) 120{ 121 pgtable_quicklist_free(pmd); 122} 123 124#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) 125 126static inline void 127pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte) 128{ 129 pmd_val(*pmd_entry) = page_to_phys(pte); 130} 131 132static inline void 133pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) 134{ 135 pmd_val(*pmd_entry) = __pa(pte); 136} 137 138static inline struct page *pte_alloc_one(struct mm_struct *mm, 139 unsigned long addr) 140{ 141 return virt_to_page(pgtable_quicklist_alloc()); 142} 143 144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 145 unsigned long addr) 146{ 147 return pgtable_quicklist_alloc(); 148} 149 150static inline void pte_free(struct page *pte) 151{ 152 pgtable_quicklist_free(page_address(pte)); 153} 154 155static inline void pte_free_kernel(pte_t * pte) 156{ 157 pgtable_quicklist_free(pte); 158} 159 160#define __pte_free_tlb(tlb, pte) pte_free(pte) 161 162extern void check_pgt_cache(void); 163 164#endif /* _ASM_IA64_PGALLOC_H */