at v2.6.16-rc4 224 lines 7.0 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9#ifndef _ASM_PGTABLE_32_H 10#define _ASM_PGTABLE_32_H 11 12#include <linux/config.h> 13#include <asm/addrspace.h> 14#include <asm/page.h> 15 16#include <linux/linkage.h> 17#include <asm/cachectl.h> 18#include <asm/fixmap.h> 19 20#include <asm-generic/pgtable-nopmd.h> 21 22/* 23 * - add_wired_entry() add a fixed TLB entry, and move wired register 24 */ 25extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 26 unsigned long entryhi, unsigned long pagemask); 27 28/* 29 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries 30 * starting at the top and working down. This is for populating the 31 * TLB before trap_init() puts the TLB miss handler in place. It 32 * should be used only for entries matching the actual page tables, 33 * to prevent inconsistencies. 34 */ 35extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, 36 unsigned long entryhi, unsigned long pagemask); 37 38 39/* Basically we have the same two-level (which is the logical three level 40 * Linux page table layout folded) page tables as the i386. Some day 41 * when we have proper page coloring support we can have a 1% quicker 42 * tlb refill handling mechanism, but for now it is a bit slower but 43 * works even with the cache aliasing problem the R4k and above have. 44 */ 45 46/* PGDIR_SHIFT determines what a third-level page table entry can map */ 47#ifdef CONFIG_64BIT_PHYS_ADDR 48#define PGDIR_SHIFT 21 49#else 50#define PGDIR_SHIFT 22 51#endif 52#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 53#define PGDIR_MASK (~(PGDIR_SIZE-1)) 54 55/* 56 * Entries per page directory level: we use two-level, so 57 * we don't really have any PUD/PMD directory physically. 58 */ 59#ifdef CONFIG_64BIT_PHYS_ADDR 60#define PGD_ORDER 1 61#define PUD_ORDER aieeee_attempt_to_allocate_pud 62#define PMD_ORDER 1 63#define PTE_ORDER 0 64#else 65#define PGD_ORDER 0 66#define PUD_ORDER aieeee_attempt_to_allocate_pud 67#define PMD_ORDER 1 68#define PTE_ORDER 0 69#endif 70 71#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 72#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 73 74#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 75#define FIRST_USER_ADDRESS 0 76 77#define VMALLOC_START MAP_BASE 78 79#ifdef CONFIG_HIGHMEM 80# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) 81#else 82# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 83#endif 84 85#ifdef CONFIG_64BIT_PHYS_ADDR 86#define pte_ERROR(e) \ 87 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) 88#else 89#define pte_ERROR(e) \ 90 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 91#endif 92#define pgd_ERROR(e) \ 93 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 94 95extern void load_pgd(unsigned long pg_dir); 96 97extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; 98 99/* 100 * Empty pgd/pmd entries point to the invalid_pte_table. 101 */ 102static inline int pmd_none(pmd_t pmd) 103{ 104 return pmd_val(pmd) == (unsigned long) invalid_pte_table; 105} 106 107#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 108 109static inline int pmd_present(pmd_t pmd) 110{ 111 return pmd_val(pmd) != (unsigned long) invalid_pte_table; 112} 113 114static inline void pmd_clear(pmd_t *pmdp) 115{ 116 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 117} 118 119#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 120#define pte_page(x) pfn_to_page(pte_pfn(x)) 121#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) 122static inline pte_t 123pfn_pte(unsigned long pfn, pgprot_t prot) 124{ 125 pte_t pte; 126 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f); 127 pte.pte_low = pgprot_val(prot); 128 return pte; 129} 130 131#else 132 133#define pte_page(x) pfn_to_page(pte_pfn(x)) 134 135#ifdef CONFIG_CPU_VR41XX 136#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) 137#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) 138#else 139#define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) 140#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 141#endif 142#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) */ 143 144#define __pgd_offset(address) pgd_index(address) 145#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 146#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 147 148/* to find an entry in a kernel page-table-directory */ 149#define pgd_offset_k(address) pgd_offset(&init_mm, address) 150 151#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) 152 153/* to find an entry in a page-table-directory */ 154#define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr)) 155 156/* Find an entry in the third-level page table.. */ 157#define __pte_offset(address) \ 158 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 159#define pte_offset(dir, address) \ 160 ((pte_t *) (pmd_page_kernel(*dir)) + __pte_offset(address)) 161#define pte_offset_kernel(dir, address) \ 162 ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) 163 164#define pte_offset_map(dir, address) \ 165 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 166#define pte_offset_map_nested(dir, address) \ 167 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) 168#define pte_unmap(pte) ((void)(pte)) 169#define pte_unmap_nested(pte) ((void)(pte)) 170 171#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 172 173/* Swap entries must have VALID bit cleared. */ 174#define __swp_type(x) (((x).val >> 10) & 0x1f) 175#define __swp_offset(x) ((x).val >> 15) 176#define __swp_entry(type,offset) \ 177 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) 178 179/* 180 * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset 181 * into this range: 182 */ 183#define PTE_FILE_MAX_BITS 27 184 185#define pte_to_pgoff(_pte) \ 186 ((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 )) 187 188#define pgoff_to_pte(off) \ 189 ((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE }) 190 191#else 192 193/* Swap entries must have VALID and GLOBAL bits cleared. */ 194#define __swp_type(x) (((x).val >> 8) & 0x1f) 195#define __swp_offset(x) ((x).val >> 13) 196#define __swp_entry(type,offset) \ 197 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) }) 198 199/* 200 * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset 201 * into this range: 202 */ 203#define PTE_FILE_MAX_BITS 27 204 205#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) 206 /* fixme */ 207#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f)) 208#define pgoff_to_pte(off) \ 209 ((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)}) 210 211#else 212#define pte_to_pgoff(_pte) \ 213 ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) 214 215#define pgoff_to_pte(off) \ 216 ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) 217#endif 218 219#endif 220 221#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 222#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 223 224#endif /* _ASM_PGTABLE_32_H */