at v3.19-rc2 282 lines 9.3 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2002 by Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 * Copyright (C) 2002 Maciej W. Rozycki 9 */ 10#ifndef _ASM_PGTABLE_BITS_H 11#define _ASM_PGTABLE_BITS_H 12 13 14/* 15 * Note that we shift the lower 32bits of each EntryLo[01] entry 16 * 6 bits to the left. That way we can convert the PFN into the 17 * physical address by a single 'and' operation and gain 6 additional 18 * bits for storing information which isn't present in a normal 19 * MIPS page table. 20 * 21 * Similar to the Alpha port, we need to keep track of the ref 22 * and mod bits in software. We have a software "yeah you can read 23 * from this page" bit, and a hardware one which actually lets the 24 * process read from the page. On the same token we have a software 25 * writable bit and the real hardware one which actually lets the 26 * process write to the page, this keeps a mod bit via the hardware 27 * dirty bit. 28 * 29 * Certain revisions of the R4000 and R5000 have a bug where if a 30 * certain sequence occurs in the last 3 instructions of an executable 31 * page, and the following page is not mapped, the cpu can do 32 * unpredictable things. The code (when it is written) to deal with 33 * this problem will be in the update_mmu_cache() code for the r4k. 34 */ 35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 36 37/* 38 * The following bits are directly used by the TLB hardware 39 */ 40#define _PAGE_GLOBAL_SHIFT 0 41#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 42#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 43#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 44#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 45#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 46#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 47#define _CACHE_MASK (7 << _CACHE_SHIFT) 48 49/* 50 * The following bits are implemented in software 51 * 52 * _PAGE_FILE semantics: set:pagecache unset:swap 53 */ 54#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) 55#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 56#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 57#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 58#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 59#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 60#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 61#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 62#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 63#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 64 65#define _PAGE_SILENT_READ _PAGE_VALID 66#define _PAGE_SILENT_WRITE _PAGE_DIRTY 67#define _PAGE_FILE _PAGE_MODIFIED 68 69#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 70 71#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 72 73/* 74 * The following are implemented by software 75 * 76 * _PAGE_FILE semantics: set:pagecache unset:swap 77 */ 78#define _PAGE_PRESENT_SHIFT 0 79#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 80#define _PAGE_READ_SHIFT 1 81#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 82#define _PAGE_WRITE_SHIFT 2 83#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 84#define _PAGE_ACCESSED_SHIFT 3 85#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 86#define _PAGE_MODIFIED_SHIFT 4 87#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 88#define _PAGE_FILE_SHIFT 4 89#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT) 90 91/* 92 * And these are the hardware TLB bits 93 */ 94#define _PAGE_GLOBAL_SHIFT 8 95#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 96#define _PAGE_VALID_SHIFT 9 97#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 98#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ 99#define _PAGE_DIRTY_SHIFT 10 100#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 101#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) 102#define _CACHE_UNCACHED_SHIFT 11 103#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 104#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) 105 106#else /* 'Normal' r4K case */ 107/* 108 * When using the RI/XI bit support, we have 13 bits of flags below 109 * the physical address. The RI/XI bits are placed such that a SRL 5 110 * can strip off the software bits, then a ROTR 2 can move the RI/XI 111 * into bits [63:62]. This also limits physical address to 56 bits, 112 * which is more than we need right now. 113 */ 114 115/* 116 * The following bits are implemented in software 117 * 118 * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. 119 * _PAGE_FILE semantics: set:pagecache unset:swap 120 */ 121#define _PAGE_PRESENT_SHIFT (0) 122#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 123#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 124#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) 125#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 126#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 127#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 128#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 129#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 130#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 131#define _PAGE_FILE (_PAGE_MODIFIED) 132 133#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 134/* huge tlb page */ 135#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 136#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 137#else 138#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) 139#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 140#endif 141 142#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 143/* huge tlb page */ 144#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 145#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 146#else 147#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) 148#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ 149#endif 150 151/* Page cannot be executed */ 152#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT) 153#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; }) 154 155/* Page cannot be read */ 156#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT) 157#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; }) 158 159#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 160#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 161 162#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 163#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 164/* synonym */ 165#define _PAGE_SILENT_READ (_PAGE_VALID) 166 167/* The MIPS dirty bit */ 168#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 169#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 170#define _PAGE_SILENT_WRITE (_PAGE_DIRTY) 171 172#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 173#define _CACHE_MASK (7 << _CACHE_SHIFT) 174 175#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 176 177#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 178 179#ifndef _PFN_SHIFT 180#define _PFN_SHIFT PAGE_SHIFT 181#endif 182#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 183 184#ifndef _PAGE_NO_READ 185#define _PAGE_NO_READ ({BUG(); 0; }) 186#define _PAGE_NO_READ_SHIFT ({BUG(); 0; }) 187#endif 188#ifndef _PAGE_NO_EXEC 189#define _PAGE_NO_EXEC ({BUG(); 0; }) 190#endif 191#ifndef _PAGE_GLOBAL_SHIFT 192#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) 193#endif 194 195 196#ifndef __ASSEMBLY__ 197/* 198 * pte_to_entrylo converts a page table entry (PTE) into a Mips 199 * entrylo0/1 value. 200 */ 201static inline uint64_t pte_to_entrylo(unsigned long pte_val) 202{ 203 if (cpu_has_rixi) { 204 int sa; 205#ifdef CONFIG_32BIT 206 sa = 31 - _PAGE_NO_READ_SHIFT; 207#else 208 sa = 63 - _PAGE_NO_READ_SHIFT; 209#endif 210 /* 211 * C has no way to express that this is a DSRL 212 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily 213 * in the fast path this is done in assembly 214 */ 215 return (pte_val >> _PAGE_GLOBAL_SHIFT) | 216 ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); 217 } 218 219 return pte_val >> _PAGE_GLOBAL_SHIFT; 220} 221#endif 222 223/* 224 * Cache attributes 225 */ 226#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 227 228#define _CACHE_CACHABLE_NONCOHERENT 0 229#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED 230 231#elif defined(CONFIG_CPU_SB1) 232 233/* No penalty for being coherent on the SB1, so just 234 use it for "noncoherent" spaces, too. Shouldn't hurt. */ 235 236#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) 237 238#elif defined(CONFIG_CPU_LOONGSON3) 239 240/* Using COHERENT flag for NONCOHERENT doesn't hurt. */ 241 242#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */ 243#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */ 244 245#elif defined(CONFIG_MACH_JZ4740) 246 247/* Ingenic uses the WA bit to achieve write-combine memory writes */ 248#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) 249 250#endif 251 252#ifndef _CACHE_CACHABLE_NO_WA 253#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) 254#endif 255#ifndef _CACHE_CACHABLE_WA 256#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) 257#endif 258#ifndef _CACHE_UNCACHED 259#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) 260#endif 261#ifndef _CACHE_CACHABLE_NONCOHERENT 262#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) 263#endif 264#ifndef _CACHE_CACHABLE_CE 265#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) 266#endif 267#ifndef _CACHE_CACHABLE_COW 268#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) 269#endif 270#ifndef _CACHE_CACHABLE_CUW 271#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) 272#endif 273#ifndef _CACHE_UNCACHED_ACCELERATED 274#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) 275#endif 276 277#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) 278#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 279 280#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 281 282#endif /* _ASM_PGTABLE_BITS_H */