at v4.2-rc8 645 lines 16 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8#ifndef _ASM_PGTABLE_H 9#define _ASM_PGTABLE_H 10 11#include <linux/mm_types.h> 12#include <linux/mmzone.h> 13#ifdef CONFIG_32BIT 14#include <asm/pgtable-32.h> 15#endif 16#ifdef CONFIG_64BIT 17#include <asm/pgtable-64.h> 18#endif 19 20#include <asm/io.h> 21#include <asm/pgtable-bits.h> 22 23struct mm_struct; 24struct vm_area_struct; 25 26#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) 27#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \ 28 _page_cachable_default) 29#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \ 30 _page_cachable_default) 31#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 32 _page_cachable_default) 33#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 34 _PAGE_GLOBAL | _page_cachable_default) 35#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 37#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 38 _page_cachable_default) 39#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 41 42/* 43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 44 * execute, and consider it to be the same as read. Also, write 45 * permissions imply read permissions. This is the closest we can get 46 * by reasonable means.. 47 */ 48 49/* 50 * Dummy values to fill the table in mmap.c 51 * The real values will be generated at runtime 52 */ 53#define __P000 __pgprot(0) 54#define __P001 __pgprot(0) 55#define __P010 __pgprot(0) 56#define __P011 __pgprot(0) 57#define __P100 __pgprot(0) 58#define __P101 __pgprot(0) 59#define __P110 __pgprot(0) 60#define __P111 __pgprot(0) 61 62#define __S000 __pgprot(0) 63#define __S001 __pgprot(0) 64#define __S010 __pgprot(0) 65#define __S011 __pgprot(0) 66#define __S100 __pgprot(0) 67#define __S101 __pgprot(0) 68#define __S110 __pgprot(0) 69#define __S111 __pgprot(0) 70 71extern unsigned long _page_cachable_default; 72 73/* 74 * ZERO_PAGE is a global shared page that is always zero; used 75 * for zero-mapped memory areas etc.. 76 */ 77 78extern unsigned long empty_zero_page; 79extern unsigned long zero_page_mask; 80 81#define ZERO_PAGE(vaddr) \ 82 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 83#define __HAVE_COLOR_ZERO_PAGE 84 85extern void paging_init(void); 86 87/* 88 * Conversion functions: convert a page and protection to a page entry, 89 * and a page entry and page directory to the page they refer to. 90 */ 91#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 92 93#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 94#ifndef CONFIG_TRANSPARENT_HUGEPAGE 95#define pmd_page(pmd) __pmd_page(pmd) 96#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 97 98#define pmd_page_vaddr(pmd) pmd_val(pmd) 99 100#define htw_stop() \ 101do { \ 102 unsigned long flags; \ 103 \ 104 if (cpu_has_htw) { \ 105 local_irq_save(flags); \ 106 if(!raw_current_cpu_data.htw_seq++) { \ 107 write_c0_pwctl(read_c0_pwctl() & \ 108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 109 back_to_back_c0_hazard(); \ 110 } \ 111 local_irq_restore(flags); \ 112 } \ 113} while(0) 114 115#define htw_start() \ 116do { \ 117 unsigned long flags; \ 118 \ 119 if (cpu_has_htw) { \ 120 local_irq_save(flags); \ 121 if (!--raw_current_cpu_data.htw_seq) { \ 122 write_c0_pwctl(read_c0_pwctl() | \ 123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 124 back_to_back_c0_hazard(); \ 125 } \ 126 local_irq_restore(flags); \ 127 } \ 128} while(0) 129 130#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 131 132#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL)) 133#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 134 135static inline void set_pte(pte_t *ptep, pte_t pte) 136{ 137 ptep->pte_high = pte.pte_high; 138 smp_wmb(); 139 ptep->pte_low = pte.pte_low; 140 141 if (pte.pte_high & _PAGE_GLOBAL) { 142 pte_t *buddy = ptep_buddy(ptep); 143 /* 144 * Make sure the buddy is global too (if it's !none, 145 * it better already be global) 146 */ 147 if (pte_none(*buddy)) 148 buddy->pte_high |= _PAGE_GLOBAL; 149 } 150} 151#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 152 153static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 154{ 155 pte_t null = __pte(0); 156 157 htw_stop(); 158 /* Preserve global status for the pair */ 159 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) 160 null.pte_high = _PAGE_GLOBAL; 161 162 set_pte_at(mm, addr, ptep, null); 163 htw_start(); 164} 165#else 166 167#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 168#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 169 170/* 171 * Certain architectures need to do special things when pte's 172 * within a page table are directly modified. Thus, the following 173 * hook is made available. 174 */ 175static inline void set_pte(pte_t *ptep, pte_t pteval) 176{ 177 *ptep = pteval; 178#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 179 if (pte_val(pteval) & _PAGE_GLOBAL) { 180 pte_t *buddy = ptep_buddy(ptep); 181 /* 182 * Make sure the buddy is global too (if it's !none, 183 * it better already be global) 184 */ 185#ifdef CONFIG_SMP 186 /* 187 * For SMP, multiple CPUs can race, so we need to do 188 * this atomically. 189 */ 190#ifdef CONFIG_64BIT 191#define LL_INSN "lld" 192#define SC_INSN "scd" 193#else /* CONFIG_32BIT */ 194#define LL_INSN "ll" 195#define SC_INSN "sc" 196#endif 197 unsigned long page_global = _PAGE_GLOBAL; 198 unsigned long tmp; 199 200 __asm__ __volatile__ ( 201 " .set push\n" 202 " .set noreorder\n" 203 "1: " LL_INSN " %[tmp], %[buddy]\n" 204 " bnez %[tmp], 2f\n" 205 " or %[tmp], %[tmp], %[global]\n" 206 " " SC_INSN " %[tmp], %[buddy]\n" 207 " beqz %[tmp], 1b\n" 208 " nop\n" 209 "2:\n" 210 " .set pop" 211 : [buddy] "+m" (buddy->pte), 212 [tmp] "=&r" (tmp) 213 : [global] "r" (page_global)); 214#else /* !CONFIG_SMP */ 215 if (pte_none(*buddy)) 216 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 217#endif /* CONFIG_SMP */ 218 } 219#endif 220} 221#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 222 223static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 224{ 225 htw_stop(); 226#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 227 /* Preserve global status for the pair */ 228 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 229 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 230 else 231#endif 232 set_pte_at(mm, addr, ptep, __pte(0)); 233 htw_start(); 234} 235#endif 236 237/* 238 * (pmds are folded into puds so this doesn't get actually called, 239 * but the define is needed for a generic inline function.) 240 */ 241#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 242 243#ifndef __PAGETABLE_PMD_FOLDED 244/* 245 * (puds are folded into pgds so this doesn't get actually called, 246 * but the define is needed for a generic inline function.) 247 */ 248#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 249#endif 250 251#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 252#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 253#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 254 255/* 256 * We used to declare this array with size but gcc 3.3 and older are not able 257 * to find that this expression is a constant, so the size is dropped. 258 */ 259extern pgd_t swapper_pg_dir[]; 260 261/* 262 * The following only work if pte_present() is true. 263 * Undefined behaviour if not.. 264 */ 265#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 266static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 267static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 268static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 269 270static inline pte_t pte_wrprotect(pte_t pte) 271{ 272 pte.pte_low &= ~_PAGE_WRITE; 273 pte.pte_high &= ~_PAGE_SILENT_WRITE; 274 return pte; 275} 276 277static inline pte_t pte_mkclean(pte_t pte) 278{ 279 pte.pte_low &= ~_PAGE_MODIFIED; 280 pte.pte_high &= ~_PAGE_SILENT_WRITE; 281 return pte; 282} 283 284static inline pte_t pte_mkold(pte_t pte) 285{ 286 pte.pte_low &= ~_PAGE_ACCESSED; 287 pte.pte_high &= ~_PAGE_SILENT_READ; 288 return pte; 289} 290 291static inline pte_t pte_mkwrite(pte_t pte) 292{ 293 pte.pte_low |= _PAGE_WRITE; 294 if (pte.pte_low & _PAGE_MODIFIED) 295 pte.pte_high |= _PAGE_SILENT_WRITE; 296 return pte; 297} 298 299static inline pte_t pte_mkdirty(pte_t pte) 300{ 301 pte.pte_low |= _PAGE_MODIFIED; 302 if (pte.pte_low & _PAGE_WRITE) 303 pte.pte_high |= _PAGE_SILENT_WRITE; 304 return pte; 305} 306 307static inline pte_t pte_mkyoung(pte_t pte) 308{ 309 pte.pte_low |= _PAGE_ACCESSED; 310 if (pte.pte_low & _PAGE_READ) 311 pte.pte_high |= _PAGE_SILENT_READ; 312 return pte; 313} 314#else 315static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 316static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 317static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 318 319static inline pte_t pte_wrprotect(pte_t pte) 320{ 321 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 322 return pte; 323} 324 325static inline pte_t pte_mkclean(pte_t pte) 326{ 327 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 328 return pte; 329} 330 331static inline pte_t pte_mkold(pte_t pte) 332{ 333 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 334 return pte; 335} 336 337static inline pte_t pte_mkwrite(pte_t pte) 338{ 339 pte_val(pte) |= _PAGE_WRITE; 340 if (pte_val(pte) & _PAGE_MODIFIED) 341 pte_val(pte) |= _PAGE_SILENT_WRITE; 342 return pte; 343} 344 345static inline pte_t pte_mkdirty(pte_t pte) 346{ 347 pte_val(pte) |= _PAGE_MODIFIED; 348 if (pte_val(pte) & _PAGE_WRITE) 349 pte_val(pte) |= _PAGE_SILENT_WRITE; 350 return pte; 351} 352 353static inline pte_t pte_mkyoung(pte_t pte) 354{ 355 pte_val(pte) |= _PAGE_ACCESSED; 356#ifdef CONFIG_CPU_MIPSR2 357 if (!(pte_val(pte) & _PAGE_NO_READ)) 358 pte_val(pte) |= _PAGE_SILENT_READ; 359 else 360#endif 361 if (pte_val(pte) & _PAGE_READ) 362 pte_val(pte) |= _PAGE_SILENT_READ; 363 return pte; 364} 365 366#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 367static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 368 369static inline pte_t pte_mkhuge(pte_t pte) 370{ 371 pte_val(pte) |= _PAGE_HUGE; 372 return pte; 373} 374#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 375#endif 376static inline int pte_special(pte_t pte) { return 0; } 377static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 378 379/* 380 * Macro to make mark a page protection value as "uncacheable". Note 381 * that "protection" is really a misnomer here as the protection value 382 * contains the memory attribute bits, dirty bits, and various other 383 * bits as well. 384 */ 385#define pgprot_noncached pgprot_noncached 386 387static inline pgprot_t pgprot_noncached(pgprot_t _prot) 388{ 389 unsigned long prot = pgprot_val(_prot); 390 391 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 392 393 return __pgprot(prot); 394} 395 396static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 397{ 398 unsigned long prot = pgprot_val(_prot); 399 400 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 401 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 402 403 return __pgprot(prot); 404} 405 406/* 407 * Conversion functions: convert a page and protection to a page entry, 408 * and a page entry and page directory to the page they refer to. 409 */ 410#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 411 412#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 413static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 414{ 415 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK); 416 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 417 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK; 418 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK; 419 return pte; 420} 421#else 422static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 423{ 424 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 425} 426#endif 427 428 429extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 430 pte_t pte); 431extern void __update_cache(struct vm_area_struct *vma, unsigned long address, 432 pte_t pte); 433 434static inline void update_mmu_cache(struct vm_area_struct *vma, 435 unsigned long address, pte_t *ptep) 436{ 437 pte_t pte = *ptep; 438 __update_tlb(vma, address, pte); 439 __update_cache(vma, address, pte); 440} 441 442static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 443 unsigned long address, pmd_t *pmdp) 444{ 445 pte_t pte = *(pte_t *)pmdp; 446 447 __update_tlb(vma, address, pte); 448} 449 450#define kern_addr_valid(addr) (1) 451 452#ifdef CONFIG_PHYS_ADDR_T_64BIT 453extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); 454 455static inline int io_remap_pfn_range(struct vm_area_struct *vma, 456 unsigned long vaddr, 457 unsigned long pfn, 458 unsigned long size, 459 pgprot_t prot) 460{ 461 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); 462 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); 463} 464#define io_remap_pfn_range io_remap_pfn_range 465#endif 466 467#ifdef CONFIG_TRANSPARENT_HUGEPAGE 468 469extern int has_transparent_hugepage(void); 470 471static inline int pmd_trans_huge(pmd_t pmd) 472{ 473 return !!(pmd_val(pmd) & _PAGE_HUGE); 474} 475 476static inline pmd_t pmd_mkhuge(pmd_t pmd) 477{ 478 pmd_val(pmd) |= _PAGE_HUGE; 479 480 return pmd; 481} 482 483static inline int pmd_trans_splitting(pmd_t pmd) 484{ 485 return !!(pmd_val(pmd) & _PAGE_SPLITTING); 486} 487 488static inline pmd_t pmd_mksplitting(pmd_t pmd) 489{ 490 pmd_val(pmd) |= _PAGE_SPLITTING; 491 492 return pmd; 493} 494 495extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 496 pmd_t *pmdp, pmd_t pmd); 497 498#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 499/* Extern to avoid header file madness */ 500extern void pmdp_splitting_flush(struct vm_area_struct *vma, 501 unsigned long address, 502 pmd_t *pmdp); 503 504#define __HAVE_ARCH_PMD_WRITE 505static inline int pmd_write(pmd_t pmd) 506{ 507 return !!(pmd_val(pmd) & _PAGE_WRITE); 508} 509 510static inline pmd_t pmd_wrprotect(pmd_t pmd) 511{ 512 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 513 return pmd; 514} 515 516static inline pmd_t pmd_mkwrite(pmd_t pmd) 517{ 518 pmd_val(pmd) |= _PAGE_WRITE; 519 if (pmd_val(pmd) & _PAGE_MODIFIED) 520 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 521 522 return pmd; 523} 524 525static inline int pmd_dirty(pmd_t pmd) 526{ 527 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 528} 529 530static inline pmd_t pmd_mkclean(pmd_t pmd) 531{ 532 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 533 return pmd; 534} 535 536static inline pmd_t pmd_mkdirty(pmd_t pmd) 537{ 538 pmd_val(pmd) |= _PAGE_MODIFIED; 539 if (pmd_val(pmd) & _PAGE_WRITE) 540 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 541 542 return pmd; 543} 544 545static inline int pmd_young(pmd_t pmd) 546{ 547 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 548} 549 550static inline pmd_t pmd_mkold(pmd_t pmd) 551{ 552 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 553 554 return pmd; 555} 556 557static inline pmd_t pmd_mkyoung(pmd_t pmd) 558{ 559 pmd_val(pmd) |= _PAGE_ACCESSED; 560 561#ifdef CONFIG_CPU_MIPSR2 562 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 563 pmd_val(pmd) |= _PAGE_SILENT_READ; 564 else 565#endif 566 if (pmd_val(pmd) & _PAGE_READ) 567 pmd_val(pmd) |= _PAGE_SILENT_READ; 568 569 return pmd; 570} 571 572/* Extern to avoid header file madness */ 573extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 574 575static inline unsigned long pmd_pfn(pmd_t pmd) 576{ 577 return pmd_val(pmd) >> _PFN_SHIFT; 578} 579 580static inline struct page *pmd_page(pmd_t pmd) 581{ 582 if (pmd_trans_huge(pmd)) 583 return pfn_to_page(pmd_pfn(pmd)); 584 585 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 586} 587 588static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 589{ 590 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); 591 return pmd; 592} 593 594static inline pmd_t pmd_mknotpresent(pmd_t pmd) 595{ 596 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 597 598 return pmd; 599} 600 601/* 602 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 603 * different prototype. 604 */ 605#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 606static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 607 unsigned long address, pmd_t *pmdp) 608{ 609 pmd_t old = *pmdp; 610 611 pmd_clear(pmdp); 612 613 return old; 614} 615 616#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 617 618#include <asm-generic/pgtable.h> 619 620/* 621 * uncached accelerated TLB map for video memory access 622 */ 623#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED 624#define __HAVE_PHYS_MEM_ACCESS_PROT 625 626struct file; 627pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 628 unsigned long size, pgprot_t vma_prot); 629int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 630 unsigned long size, pgprot_t *vma_prot); 631#endif 632 633/* 634 * We provide our own get_unmapped area to cope with the virtual aliasing 635 * constraints placed on us by the cache architecture. 636 */ 637#define HAVE_ARCH_UNMAPPED_AREA 638#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 639 640/* 641 * No page table caches to initialise 642 */ 643#define pgtable_cache_init() do { } while (0) 644 645#endif /* _ASM_PGTABLE_H */