Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.25-rc5 753 lines 28 kB view raw
1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H 2#define _ASM_POWERPC_PGTABLE_PPC32_H 3 4#include <asm-generic/pgtable-nopmd.h> 5 6#ifndef __ASSEMBLY__ 7#include <linux/sched.h> 8#include <linux/threads.h> 9#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ 10 11extern unsigned long va_to_phys(unsigned long address); 12extern pte_t *va_to_pte(unsigned long address); 13extern unsigned long ioremap_bot, ioremap_base; 14 15#ifdef CONFIG_44x 16extern int icache_44x_need_flush; 17#endif 18 19#endif /* __ASSEMBLY__ */ 20 21/* 22 * The PowerPC MMU uses a hash table containing PTEs, together with 23 * a set of 16 segment registers (on 32-bit implementations), to define 24 * the virtual to physical address mapping. 25 * 26 * We use the hash table as an extended TLB, i.e. a cache of currently 27 * active mappings. We maintain a two-level page table tree, much 28 * like that used by the i386, for the sake of the Linux memory 29 * management code. Low-level assembler code in hashtable.S 30 * (procedure hash_page) is responsible for extracting ptes from the 31 * tree and putting them into the hash table when necessary, and 32 * updating the accessed and modified bits in the page table tree. 33 */ 34 35/* 36 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. 37 * We also use the two level tables, but we can put the real bits in them 38 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, 39 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has 40 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit 41 * based upon user/super access. The TLB does not have accessed nor write 42 * protect. We assume that if the TLB get loaded with an entry it is 43 * accessed, and overload the changed bit for write protect. We use 44 * two bits in the software pte that are supposed to be set to zero in 45 * the TLB entry (24 and 25) for these indicators. Although the level 1 46 * descriptor contains the guarded and writethrough/copyback bits, we can 47 * set these at the page level since they get copied from the Mx_TWC 48 * register when the TLB entry is loaded. We will use bit 27 for guard, since 49 * that is where it exists in the MD_TWC, and bit 26 for writethrough. 50 * These will get masked from the level 2 descriptor at TLB load time, and 51 * copied to the MD_TWC before it gets loaded. 52 * Large page sizes added. We currently support two sizes, 4K and 8M. 53 * This also allows a TLB hander optimization because we can directly 54 * load the PMD into MD_TWC. The 8M pages are only used for kernel 55 * mapping of well known areas. The PMD (PGD) entries contain control 56 * flags in addition to the address, so care must be taken that the 57 * software no longer assumes these are only pointers. 58 */ 59 60/* 61 * At present, all PowerPC 400-class processors share a similar TLB 62 * architecture. The instruction and data sides share a unified, 63 * 64-entry, fully-associative TLB which is maintained totally under 64 * software control. In addition, the instruction side has a 65 * hardware-managed, 4-entry, fully-associative TLB which serves as a 66 * first level to the shared TLB. These two TLBs are known as the UTLB 67 * and ITLB, respectively (see "mmu.h" for definitions). 68 */ 69 70/* 71 * The normal case is that PTEs are 32-bits and we have a 1-page 72 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 73 * 74 * For any >32-bit physical address platform, we can use the following 75 * two level page table layout where the pgdir is 8KB and the MS 13 bits 76 * are an index to the second level table. The combined pgdir/pmd first 77 * level has 2048 entries and the second level has 512 64-bit PTE entries. 78 * -Matt 79 */ 80/* PGDIR_SHIFT determines what a top-level page table entry can map */ 81#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) 82#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 83#define PGDIR_MASK (~(PGDIR_SIZE-1)) 84 85/* 86 * entries per page directory level: our page-table tree is two-level, so 87 * we don't really have any PMD directory. 88 */ 89#ifndef __ASSEMBLY__ 90#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) 91#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) 92#endif /* __ASSEMBLY__ */ 93 94#define PTRS_PER_PTE (1 << PTE_SHIFT) 95#define PTRS_PER_PMD 1 96#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 97 98#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 99#define FIRST_USER_ADDRESS 0 100 101#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 102#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 103 104#define pte_ERROR(e) \ 105 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ 106 (unsigned long long)pte_val(e)) 107#define pgd_ERROR(e) \ 108 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 109 110/* 111 * Just any arbitrary offset to the start of the vmalloc VM area: the 112 * current 64MB value just means that there will be a 64MB "hole" after the 113 * physical memory until the kernel virtual memory starts. That means that 114 * any out-of-bounds memory accesses will hopefully be caught. 115 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 116 * area for the same reason. ;) 117 * 118 * We no longer map larger than phys RAM with the BATs so we don't have 119 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry 120 * about clashes between our early calls to ioremap() that start growing down 121 * from ioremap_base being run into the VM area allocations (growing upwards 122 * from VMALLOC_START). For this reason we have ioremap_bot to check when 123 * we actually run into our mappings setup in the early boot with the VM 124 * system. This really does become a problem for machines with good amounts 125 * of RAM. -- Cort 126 */ 127#define VMALLOC_OFFSET (0x1000000) /* 16M */ 128#ifdef PPC_PIN_SIZE 129#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 130#else 131#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 132#endif 133#define VMALLOC_END ioremap_bot 134 135/* 136 * Bits in a linux-style PTE. These match the bits in the 137 * (hardware-defined) PowerPC PTE as closely as possible. 138 */ 139 140#if defined(CONFIG_40x) 141 142/* There are several potential gotchas here. The 40x hardware TLBLO 143 field looks like this: 144 145 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 146 RPN..................... 0 0 EX WR ZSEL....... W I M G 147 148 Where possible we make the Linux PTE bits match up with this 149 150 - bits 20 and 21 must be cleared, because we use 4k pages (40x can 151 support down to 1k pages), this is done in the TLBMiss exception 152 handler. 153 - We use only zones 0 (for kernel pages) and 1 (for user pages) 154 of the 16 available. Bit 24-26 of the TLB are cleared in the TLB 155 miss handler. Bit 27 is PAGE_USER, thus selecting the correct 156 zone. 157 - PRESENT *must* be in the bottom two bits because swap cache 158 entries use the top 30 bits. Because 40x doesn't support SMP 159 anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 160 is cleared in the TLB miss handler before the TLB entry is loaded. 161 - All other bits of the PTE are loaded into TLBLO without 162 modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for 163 software PTE bits. We actually use use bits 21, 24, 25, and 164 30 respectively for the software bits: ACCESSED, DIRTY, RW, and 165 PRESENT. 166*/ 167 168/* Definitions for 40x embedded chips. */ 169#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ 170#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ 171#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ 172#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ 173#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ 174#define _PAGE_USER 0x010 /* matches one of the zone permission bits */ 175#define _PAGE_RW 0x040 /* software: Writes permitted */ 176#define _PAGE_DIRTY 0x080 /* software: dirty page */ 177#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ 178#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ 179#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ 180 181#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ 182#define _PMD_BAD 0x802 183#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */ 184#define _PMD_SIZE_4M 0x0c0 185#define _PMD_SIZE_16M 0x0e0 186#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) 187 188#elif defined(CONFIG_44x) 189/* 190 * Definitions for PPC440 191 * 192 * Because of the 3 word TLB entries to support 36-bit addressing, 193 * the attribute are difficult to map in such a fashion that they 194 * are easily loaded during exception processing. I decided to 195 * organize the entry so the ERPN is the only portion in the 196 * upper word of the PTE and the attribute bits below are packed 197 * in as sensibly as they can be in the area below a 4KB page size 198 * oriented RPN. This at least makes it easy to load the RPN and 199 * ERPN fields in the TLB. -Matt 200 * 201 * Note that these bits preclude future use of a page size 202 * less than 4KB. 203 * 204 * 205 * PPC 440 core has following TLB attribute fields; 206 * 207 * TLB1: 208 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 209 * RPN................................. - - - - - - ERPN....... 210 * 211 * TLB2: 212 * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 213 * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR 214 * 215 * There are some constrains and options, to decide mapping software bits 216 * into TLB entry. 217 * 218 * - PRESENT *must* be in the bottom three bits because swap cache 219 * entries use the top 29 bits for TLB2. 220 * 221 * - FILE *must* be in the bottom three bits because swap cache 222 * entries use the top 29 bits for TLB2. 223 * 224 * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it 225 * doesn't support SMP. So we can use this as software bit, like 226 * DIRTY. 227 * 228 * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used 229 * for memory protection related functions (see PTE structure in 230 * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the 231 * above bits. Note that the bit values are CPU specific, not architecture 232 * specific. 233 * 234 * The kernel PTE entry holds an arch-dependent swp_entry structure under 235 * certain situations. In other words, in such situations some portion of 236 * the PTE bits are used as a swp_entry. In the PPC implementation, the 237 * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still 238 * hold protection values. That means the three protection bits are 239 * reserved for both PTE and SWAP entry at the most significant three 240 * LSBs. 241 * 242 * There are three protection bits available for SWAP entry: 243 * _PAGE_PRESENT 244 * _PAGE_FILE 245 * _PAGE_HASHPTE (if HW has) 246 * 247 * So those three bits have to be inside of 0-2nd LSB of PTE. 248 * 249 */ 250 251#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ 252#define _PAGE_RW 0x00000002 /* S: Write permission */ 253#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ 254#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ 255#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ 256#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ 257#define _PAGE_USER 0x00000040 /* S: User page */ 258#define _PAGE_ENDIAN 0x00000080 /* H: E bit */ 259#define _PAGE_GUARDED 0x00000100 /* H: G bit */ 260#define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ 261#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ 262#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ 263 264/* TODO: Add large page lowmem mapping support */ 265#define _PMD_PRESENT 0 266#define _PMD_PRESENT_MASK (PAGE_MASK) 267#define _PMD_BAD (~PAGE_MASK) 268 269/* ERPN in a PTE never gets cleared, ignore it */ 270#define _PTE_NONE_MASK 0xffffffff00000000ULL 271 272#elif defined(CONFIG_FSL_BOOKE) 273/* 274 MMU Assist Register 3: 275 276 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 277 RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR 278 279 - PRESENT *must* be in the bottom three bits because swap cache 280 entries use the top 29 bits. 281 282 - FILE *must* be in the bottom three bits because swap cache 283 entries use the top 29 bits. 284*/ 285 286/* Definitions for FSL Book-E Cores */ 287#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ 288#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ 289#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ 290#define _PAGE_ACCESSED 0x00004 /* S: Page referenced */ 291#define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */ 292#define _PAGE_RW 0x00010 /* S: Write permission */ 293#define _PAGE_HWEXEC 0x00020 /* H: UX permission */ 294 295#define _PAGE_ENDIAN 0x00040 /* H: E bit */ 296#define _PAGE_GUARDED 0x00080 /* H: G bit */ 297#define _PAGE_COHERENT 0x00100 /* H: M bit */ 298#define _PAGE_NO_CACHE 0x00200 /* H: I bit */ 299#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ 300 301#ifdef CONFIG_PTE_64BIT 302#define _PAGE_DIRTY 0x08000 /* S: Page dirty */ 303 304/* ERPN in a PTE never gets cleared, ignore it */ 305#define _PTE_NONE_MASK 0xffffffffffff0000ULL 306#else 307#define _PAGE_DIRTY 0x00800 /* S: Page dirty */ 308#endif 309 310#define _PMD_PRESENT 0 311#define _PMD_PRESENT_MASK (PAGE_MASK) 312#define _PMD_BAD (~PAGE_MASK) 313 314#elif defined(CONFIG_8xx) 315/* Definitions for 8xx embedded chips. */ 316#define _PAGE_PRESENT 0x0001 /* Page is valid */ 317#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ 318#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ 319#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ 320 321/* These five software bits must be masked out when the entry is loaded 322 * into the TLB. 323 */ 324#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */ 325#define _PAGE_GUARDED 0x0010 /* software: guarded access */ 326#define _PAGE_DIRTY 0x0020 /* software: page changed */ 327#define _PAGE_RW 0x0040 /* software: user write access allowed */ 328#define _PAGE_ACCESSED 0x0080 /* software: page referenced */ 329 330/* Setting any bits in the nibble with the follow two controls will 331 * require a TLB exception handler change. It is assumed unused bits 332 * are always zero. 333 */ 334#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ 335#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ 336 337#define _PMD_PRESENT 0x0001 338#define _PMD_BAD 0x0ff0 339#define _PMD_PAGE_MASK 0x000c 340#define _PMD_PAGE_8M 0x000c 341 342/* 343 * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE 344 * for an address even if _PAGE_PRESENT is not set, as a performance 345 * optimization. This is a bug if you ever want to use swap unless 346 * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific 347 * definitions for __swp_entry etc. below, which would be gross. 348 * -- paulus 349 */ 350#define _PTE_NONE_MASK _PAGE_ACCESSED 351 352#else /* CONFIG_6xx */ 353/* Definitions for 60x, 740/750, etc. */ 354#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ 355#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ 356#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ 357#define _PAGE_USER 0x004 /* usermode access allowed */ 358#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ 359#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ 360#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ 361#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ 362#define _PAGE_DIRTY 0x080 /* C: page changed */ 363#define _PAGE_ACCESSED 0x100 /* R: page referenced */ 364#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ 365#define _PAGE_RW 0x400 /* software: user write access allowed */ 366 367#define _PTE_NONE_MASK _PAGE_HASHPTE 368 369#define _PMD_PRESENT 0 370#define _PMD_PRESENT_MASK (PAGE_MASK) 371#define _PMD_BAD (~PAGE_MASK) 372#endif 373 374/* 375 * Some bits are only used on some cpu families... 376 */ 377#ifndef _PAGE_HASHPTE 378#define _PAGE_HASHPTE 0 379#endif 380#ifndef _PTE_NONE_MASK 381#define _PTE_NONE_MASK 0 382#endif 383#ifndef _PAGE_SHARED 384#define _PAGE_SHARED 0 385#endif 386#ifndef _PAGE_HWWRITE 387#define _PAGE_HWWRITE 0 388#endif 389#ifndef _PAGE_HWEXEC 390#define _PAGE_HWEXEC 0 391#endif 392#ifndef _PAGE_EXEC 393#define _PAGE_EXEC 0 394#endif 395#ifndef _PMD_PRESENT_MASK 396#define _PMD_PRESENT_MASK _PMD_PRESENT 397#endif 398#ifndef _PMD_SIZE 399#define _PMD_SIZE 0 400#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 401#endif 402 403#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 404 405/* 406 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 407 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 408 * to have it in the Linux PTE, and in fact the bit could be reused for 409 * another purpose. -- paulus. 410 */ 411 412#ifdef CONFIG_44x 413#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED) 414#else 415#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 416#endif 417#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) 418#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) 419 420#ifdef CONFIG_PPC_STD_MMU 421/* On standard PPC MMU, no user access implies kernel read/write access, 422 * so to write-protect kernel memory we must turn on user access */ 423#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) 424#else 425#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) 426#endif 427 428#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) 429#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) 430 431#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) 432/* We want the debuggers to be able to set breakpoints anywhere, so 433 * don't write protect the kernel text */ 434#define _PAGE_RAM_TEXT _PAGE_RAM 435#else 436#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) 437#endif 438 439#define PAGE_NONE __pgprot(_PAGE_BASE) 440#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 441#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 442#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) 443#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 444#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 445#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 446 447#define PAGE_KERNEL __pgprot(_PAGE_RAM) 448#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) 449 450/* 451 * The PowerPC can only do execute protection on a segment (256MB) basis, 452 * not on a page basis. So we consider execute permission the same as read. 453 * Also, write permissions imply read permissions. 454 * This is the closest we can get.. 455 */ 456#define __P000 PAGE_NONE 457#define __P001 PAGE_READONLY_X 458#define __P010 PAGE_COPY 459#define __P011 PAGE_COPY_X 460#define __P100 PAGE_READONLY 461#define __P101 PAGE_READONLY_X 462#define __P110 PAGE_COPY 463#define __P111 PAGE_COPY_X 464 465#define __S000 PAGE_NONE 466#define __S001 PAGE_READONLY_X 467#define __S010 PAGE_SHARED 468#define __S011 PAGE_SHARED_X 469#define __S100 PAGE_READONLY 470#define __S101 PAGE_READONLY_X 471#define __S110 PAGE_SHARED 472#define __S111 PAGE_SHARED_X 473 474#ifndef __ASSEMBLY__ 475/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a 476 * kernel without large page PMD support */ 477extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); 478 479/* 480 * Conversions between PTE values and page frame numbers. 481 */ 482 483/* in some case we want to additionaly adjust where the pfn is in the pte to 484 * allow room for more flags */ 485#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) 486#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) 487#else 488#define PFN_SHIFT_OFFSET (PAGE_SHIFT) 489#endif 490 491#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) 492#define pte_page(x) pfn_to_page(pte_pfn(x)) 493 494#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ 495 pgprot_val(prot)) 496#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 497#endif /* __ASSEMBLY__ */ 498 499#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) 500#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 501#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) 502 503#define pmd_none(pmd) (!pmd_val(pmd)) 504#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 505#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 506#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) 507 508#ifndef __ASSEMBLY__ 509/* 510 * The following only work if pte_present() is true. 511 * Undefined behaviour if not.. 512 */ 513static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 514static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 515static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 516static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 517 518static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 519static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 520 521static inline pte_t pte_wrprotect(pte_t pte) { 522 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 523static inline pte_t pte_mkclean(pte_t pte) { 524 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } 525static inline pte_t pte_mkold(pte_t pte) { 526 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 527 528static inline pte_t pte_mkwrite(pte_t pte) { 529 pte_val(pte) |= _PAGE_RW; return pte; } 530static inline pte_t pte_mkdirty(pte_t pte) { 531 pte_val(pte) |= _PAGE_DIRTY; return pte; } 532static inline pte_t pte_mkyoung(pte_t pte) { 533 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 534 535static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 536{ 537 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); 538 return pte; 539} 540 541/* 542 * When flushing the tlb entry for a page, we also need to flush the hash 543 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. 544 */ 545extern int flush_hash_pages(unsigned context, unsigned long va, 546 unsigned long pmdval, int count); 547 548/* Add an HPTE to the hash table */ 549extern void add_hash_page(unsigned context, unsigned long va, 550 unsigned long pmdval); 551 552/* 553 * Atomic PTE updates. 554 * 555 * pte_update clears and sets bit atomically, and returns 556 * the old pte value. In the 64-bit PTE case we lock around the 557 * low PTE word since we expect ALL flag bits to be there 558 */ 559#ifndef CONFIG_PTE_64BIT 560static inline unsigned long pte_update(pte_t *p, unsigned long clr, 561 unsigned long set) 562{ 563 unsigned long old, tmp; 564 565 __asm__ __volatile__("\ 5661: lwarx %0,0,%3\n\ 567 andc %1,%0,%4\n\ 568 or %1,%1,%5\n" 569 PPC405_ERR77(0,%3) 570" stwcx. %1,0,%3\n\ 571 bne- 1b" 572 : "=&r" (old), "=&r" (tmp), "=m" (*p) 573 : "r" (p), "r" (clr), "r" (set), "m" (*p) 574 : "cc" ); 575#ifdef CONFIG_44x 576 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) 577 icache_44x_need_flush = 1; 578#endif 579 return old; 580} 581#else 582static inline unsigned long long pte_update(pte_t *p, unsigned long clr, 583 unsigned long set) 584{ 585 unsigned long long old; 586 unsigned long tmp; 587 588 __asm__ __volatile__("\ 5891: lwarx %L0,0,%4\n\ 590 lwzx %0,0,%3\n\ 591 andc %1,%L0,%5\n\ 592 or %1,%1,%6\n" 593 PPC405_ERR77(0,%3) 594" stwcx. %1,0,%4\n\ 595 bne- 1b" 596 : "=&r" (old), "=&r" (tmp), "=m" (*p) 597 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) 598 : "cc" ); 599#ifdef CONFIG_44x 600 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) 601 icache_44x_need_flush = 1; 602#endif 603 return old; 604} 605#endif 606 607/* 608 * set_pte stores a linux PTE into the linux page table. 609 * On machines which use an MMU hash table we avoid changing the 610 * _PAGE_HASHPTE bit. 611 */ 612static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 613 pte_t *ptep, pte_t pte) 614{ 615#if _PAGE_HASHPTE != 0 616 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); 617#else 618 *ptep = pte; 619#endif 620} 621 622/* 623 * 2.6 calles this without flushing the TLB entry, this is wrong 624 * for our hash-based implementation, we fix that up here 625 */ 626#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 627static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) 628{ 629 unsigned long old; 630 old = pte_update(ptep, _PAGE_ACCESSED, 0); 631#if _PAGE_HASHPTE != 0 632 if (old & _PAGE_HASHPTE) { 633 unsigned long ptephys = __pa(ptep) & PAGE_MASK; 634 flush_hash_pages(context, addr, ptephys, 1); 635 } 636#endif 637 return (old & _PAGE_ACCESSED) != 0; 638} 639#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 640 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 641 642#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 643static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 644 pte_t *ptep) 645{ 646 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); 647} 648 649#define __HAVE_ARCH_PTEP_SET_WRPROTECT 650static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 651 pte_t *ptep) 652{ 653 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); 654} 655 656#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 657static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 658{ 659 unsigned long bits = pte_val(entry) & 660 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); 661 pte_update(ptep, 0, bits); 662} 663 664#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 665({ \ 666 int __changed = !pte_same(*(__ptep), __entry); \ 667 if (__changed) { \ 668 __ptep_set_access_flags(__ptep, __entry, __dirty); \ 669 flush_tlb_page_nohash(__vma, __address); \ 670 } \ 671 __changed; \ 672}) 673 674/* 675 * Macro to mark a page protection value as "uncacheable". 676 */ 677#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 678 679struct file; 680extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 681 unsigned long size, pgprot_t vma_prot); 682#define __HAVE_PHYS_MEM_ACCESS_PROT 683 684#define __HAVE_ARCH_PTE_SAME 685#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 686 687/* 688 * Note that on Book E processors, the pmd contains the kernel virtual 689 * (lowmem) address of the pte page. The physical address is less useful 690 * because everything runs with translation enabled (even the TLB miss 691 * handler). On everything else the pmd contains the physical address 692 * of the pte page. -- paulus 693 */ 694#ifndef CONFIG_BOOKE 695#define pmd_page_vaddr(pmd) \ 696 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 697#define pmd_page(pmd) \ 698 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) 699#else 700#define pmd_page_vaddr(pmd) \ 701 ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) 702#define pmd_page(pmd) \ 703 (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT)) 704#endif 705 706/* to find an entry in a kernel page-table-directory */ 707#define pgd_offset_k(address) pgd_offset(&init_mm, address) 708 709/* to find an entry in a page-table-directory */ 710#define pgd_index(address) ((address) >> PGDIR_SHIFT) 711#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 712 713/* Find an entry in the third-level page table.. */ 714#define pte_index(address) \ 715 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 716#define pte_offset_kernel(dir, addr) \ 717 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) 718#define pte_offset_map(dir, addr) \ 719 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) 720#define pte_offset_map_nested(dir, addr) \ 721 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) 722 723#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 724#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 725 726/* 727 * Encode and decode a swap entry. 728 * Note that the bits we use in a PTE for representing a swap entry 729 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the 730 *_PAGE_HASHPTE bit (if used). -- paulus 731 */ 732#define __swp_type(entry) ((entry).val & 0x1f) 733#define __swp_offset(entry) ((entry).val >> 5) 734#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 735#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 736#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 737 738/* Encode and decode a nonlinear file mapping entry */ 739#define PTE_FILE_MAX_BITS 29 740#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 741#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 742 743/* 744 * No page table caches to initialise 745 */ 746#define pgtable_cache_init() do { } while (0) 747 748extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, 749 pmd_t **pmdp); 750 751#endif /* !__ASSEMBLY__ */ 752 753#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */