Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.17-rc5 557 lines 18 kB view raw
1/* pgtable.h: FR-V page table mangling 2 * 3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Derived from: 12 * include/asm-m68knommu/pgtable.h 13 * include/asm-i386/pgtable.h 14 */ 15 16#ifndef _ASM_PGTABLE_H 17#define _ASM_PGTABLE_H 18 19#include <linux/config.h> 20#include <asm/mem-layout.h> 21#include <asm/setup.h> 22#include <asm/processor.h> 23 24#ifndef __ASSEMBLY__ 25#include <linux/threads.h> 26#include <linux/slab.h> 27#include <linux/list.h> 28#include <linux/spinlock.h> 29struct mm_struct; 30struct vm_area_struct; 31#endif 32 33#ifndef __ASSEMBLY__ 34#if defined(CONFIG_HIGHPTE) 35typedef unsigned long pte_addr_t; 36#else 37typedef pte_t *pte_addr_t; 38#endif 39#endif 40 41/*****************************************************************************/ 42/* 43 * MMU-less operation case first 44 */ 45#ifndef CONFIG_MMU 46 47#define pgd_present(pgd) (1) /* pages are always present on NO_MM */ 48#define pgd_none(pgd) (0) 49#define pgd_bad(pgd) (0) 50#define pgd_clear(pgdp) 51#define kern_addr_valid(addr) (1) 52#define pmd_offset(a, b) ((void *) 0) 53 54#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ 55#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ 56#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ 57#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ 58#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ 59 60#define __swp_type(x) (0) 61#define __swp_offset(x) (0) 62#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 63#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 64#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 65 66#ifndef __ASSEMBLY__ 67static inline int pte_file(pte_t pte) { return 0; } 68#endif 69 70#define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) 71 72#define swapper_pg_dir ((pgd_t *) NULL) 73 74#define pgtable_cache_init() do {} while(0) 75 76#else /* !CONFIG_MMU */ 77/*****************************************************************************/ 78/* 79 * then MMU operation 80 */ 81 82/* 83 * ZERO_PAGE is a global shared page that is always zero: used 84 * for zero-mapped memory areas etc.. 85 */ 86#ifndef __ASSEMBLY__ 87extern unsigned long empty_zero_page; 88#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) 89#endif 90 91/* 92 * we use 2-level page tables, folding the PMD (mid-level table) into the PGE (top-level entry) 93 * [see Documentation/fujitsu/frv/mmu-layout.txt] 94 * 95 * Page Directory: 96 * - Size: 16KB 97 * - 64 PGEs per PGD 98 * - Each PGE holds 1 PUD and covers 64MB 99 * 100 * Page Upper Directory: 101 * - Size: 256B 102 * - 1 PUE per PUD 103 * - Each PUE holds 1 PMD and covers 64MB 104 * 105 * Page Mid-Level Directory 106 * - Size: 256B 107 * - 1 PME per PMD 108 * - Each PME holds 64 STEs, all of which point to separate chunks of the same Page Table 109 * - All STEs are instantiated at the same time 110 * 111 * Page Table 112 * - Size: 16KB 113 * - 4096 PTEs per PT 114 * - Each Linux PT is subdivided into 64 FR451 PT's, each of which holds 64 entries 115 * 116 * Pages 117 * - Size: 4KB 118 * 119 * total PTEs 120 * = 1 PML4E * 64 PGEs * 1 PUEs * 1 PMEs * 4096 PTEs 121 * = 1 PML4E * 64 PGEs * 64 STEs * 64 PTEs/FR451-PT 122 * = 262144 (or 256 * 1024) 123 */ 124#define PGDIR_SHIFT 26 125#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 126#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 127#define PTRS_PER_PGD 64 128 129#define PUD_SHIFT 26 130#define PTRS_PER_PUD 1 131#define PUD_SIZE (1UL << PUD_SHIFT) 132#define PUD_MASK (~(PUD_SIZE - 1)) 133#define PUE_SIZE 256 134 135#define PMD_SHIFT 26 136#define PMD_SIZE (1UL << PMD_SHIFT) 137#define PMD_MASK (~(PMD_SIZE - 1)) 138#define PTRS_PER_PMD 1 139#define PME_SIZE 256 140 141#define __frv_PT_SIZE 256 142 143#define PTRS_PER_PTE 4096 144 145#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE) 146#define FIRST_USER_ADDRESS 0 147 148#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 149#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS) 150 151#define TWOLEVEL_PGDIR_SHIFT 26 152#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) 153#define BOOT_KERNEL_PGD_PTRS (PTRS_PER_PGD - BOOT_USER_PGD_PTRS) 154 155#ifndef __ASSEMBLY__ 156 157extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 158 159#define pte_ERROR(e) \ 160 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte) 161#define pmd_ERROR(e) \ 162 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 163#define pud_ERROR(e) \ 164 printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(e))) 165#define pgd_ERROR(e) \ 166 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(pgd_val(e)))) 167 168/* 169 * Certain architectures need to do special things when PTEs 170 * within a page table are directly modified. Thus, the following 171 * hook is made available. 172 */ 173#define set_pte(pteptr, pteval) \ 174do { \ 175 *(pteptr) = (pteval); \ 176 asm volatile("dcf %M0" :: "U"(*pteptr)); \ 177} while(0) 178#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 179 180#define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval)) 181 182/* 183 * pgd_offset() returns a (pgd_t *) 184 * pgd_index() is used get the offset into the pgd page's array of pgd_t's; 185 */ 186#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 187 188/* 189 * a shortcut which implies the use of the kernel's pgd, instead 190 * of a process's 191 */ 192#define pgd_offset_k(address) pgd_offset(&init_mm, address) 193 194/* 195 * The "pgd_xxx()" functions here are trivial for a folded two-level 196 * setup: the pud is never bad, and a pud always exists (as it's folded 197 * into the pgd entry) 198 */ 199static inline int pgd_none(pgd_t pgd) { return 0; } 200static inline int pgd_bad(pgd_t pgd) { return 0; } 201static inline int pgd_present(pgd_t pgd) { return 1; } 202static inline void pgd_clear(pgd_t *pgd) { } 203 204#define pgd_populate(mm, pgd, pud) do { } while (0) 205/* 206 * (puds are folded into pgds so this doesn't get actually called, 207 * but the define is needed for a generic inline function.) 208 */ 209#define set_pgd(pgdptr, pgdval) \ 210do { \ 211 memcpy((pgdptr), &(pgdval), sizeof(pgd_t)); \ 212 asm volatile("dcf %M0" :: "U"(*(pgdptr))); \ 213} while(0) 214 215static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 216{ 217 return (pud_t *) pgd; 218} 219 220#define pgd_page(pgd) (pud_page((pud_t){ pgd })) 221#define pgd_page_kernel(pgd) (pud_page_kernel((pud_t){ pgd })) 222 223/* 224 * allocating and freeing a pud is trivial: the 1-entry pud is 225 * inside the pgd, so has no extra memory associated with it. 226 */ 227#define pud_alloc_one(mm, address) NULL 228#define pud_free(x) do { } while (0) 229#define __pud_free_tlb(tlb, x) do { } while (0) 230 231/* 232 * The "pud_xxx()" functions here are trivial for a folded two-level 233 * setup: the pmd is never bad, and a pmd always exists (as it's folded 234 * into the pud entry) 235 */ 236static inline int pud_none(pud_t pud) { return 0; } 237static inline int pud_bad(pud_t pud) { return 0; } 238static inline int pud_present(pud_t pud) { return 1; } 239static inline void pud_clear(pud_t *pud) { } 240 241#define pud_populate(mm, pmd, pte) do { } while (0) 242 243/* 244 * (pmds are folded into puds so this doesn't get actually called, 245 * but the define is needed for a generic inline function.) 246 */ 247#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval }) 248 249#define pud_page(pud) (pmd_page((pmd_t){ pud })) 250#define pud_page_kernel(pud) (pmd_page_kernel((pmd_t){ pud })) 251 252/* 253 * (pmds are folded into pgds so this doesn't get actually called, 254 * but the define is needed for a generic inline function.) 255 */ 256extern void __set_pmd(pmd_t *pmdptr, unsigned long __pmd); 257 258#define set_pmd(pmdptr, pmdval) \ 259do { \ 260 __set_pmd((pmdptr), (pmdval).ste[0]); \ 261} while(0) 262 263#define __pmd_index(address) 0 264 265static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address) 266{ 267 return (pmd_t *) dir + __pmd_index(address); 268} 269 270#define pte_same(a, b) ((a).pte == (b).pte) 271#define pte_page(x) (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT)))) 272#define pte_none(x) (!(x).pte) 273#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) 274#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 275#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 276 277#define VMALLOC_VMADDR(x) ((unsigned long) (x)) 278 279#endif /* !__ASSEMBLY__ */ 280 281/* 282 * control flags in AMPR registers and TLB entries 283 */ 284#define _PAGE_BIT_PRESENT xAMPRx_V_BIT 285#define _PAGE_BIT_WP DAMPRx_WP_BIT 286#define _PAGE_BIT_NOCACHE xAMPRx_C_BIT 287#define _PAGE_BIT_SUPER xAMPRx_S_BIT 288#define _PAGE_BIT_ACCESSED xAMPRx_RESERVED8_BIT 289#define _PAGE_BIT_DIRTY xAMPRx_M_BIT 290#define _PAGE_BIT_NOTGLOBAL xAMPRx_NG_BIT 291 292#define _PAGE_PRESENT xAMPRx_V 293#define _PAGE_WP DAMPRx_WP 294#define _PAGE_NOCACHE xAMPRx_C 295#define _PAGE_SUPER xAMPRx_S 296#define _PAGE_ACCESSED xAMPRx_RESERVED8 /* accessed if set */ 297#define _PAGE_DIRTY xAMPRx_M 298#define _PAGE_NOTGLOBAL xAMPRx_NG 299 300#define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13) 301 302#define _PAGE_FILE 0x002 /* set:pagecache unset:swap */ 303#define _PAGE_PROTNONE 0x000 /* If not present */ 304 305#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 306 307#define __PGPROT_BASE \ 308 (_PAGE_PRESENT | xAMPRx_SS_16Kb | xAMPRx_D | _PAGE_NOTGLOBAL | _PAGE_ACCESSED) 309 310#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) 311#define PAGE_SHARED __pgprot(__PGPROT_BASE) 312#define PAGE_COPY __pgprot(__PGPROT_BASE | _PAGE_WP) 313#define PAGE_READONLY __pgprot(__PGPROT_BASE | _PAGE_WP) 314 315#define __PAGE_KERNEL (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY) 316#define __PAGE_KERNEL_NOCACHE (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_NOCACHE) 317#define __PAGE_KERNEL_RO (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_WP) 318 319#define MAKE_GLOBAL(x) __pgprot((x) & ~_PAGE_NOTGLOBAL) 320 321#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) 322#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) 323#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) 324 325#define _PAGE_TABLE (_PAGE_PRESENT | xAMPRx_SS_16Kb) 326 327#ifndef __ASSEMBLY__ 328 329/* 330 * The FR451 can do execute protection by virtue of having separate TLB miss handlers for 331 * instruction access and for data access. However, we don't have enough reserved bits to say 332 * "execute only", so we don't bother. If you can read it, you can execute it and vice versa. 333 */ 334#define __P000 PAGE_NONE 335#define __P001 PAGE_READONLY 336#define __P010 PAGE_COPY 337#define __P011 PAGE_COPY 338#define __P100 PAGE_READONLY 339#define __P101 PAGE_READONLY 340#define __P110 PAGE_COPY 341#define __P111 PAGE_COPY 342 343#define __S000 PAGE_NONE 344#define __S001 PAGE_READONLY 345#define __S010 PAGE_SHARED 346#define __S011 PAGE_SHARED 347#define __S100 PAGE_READONLY 348#define __S101 PAGE_READONLY 349#define __S110 PAGE_SHARED 350#define __S111 PAGE_SHARED 351 352/* 353 * Define this to warn about kernel memory accesses that are 354 * done without a 'access_ok(VERIFY_WRITE,..)' 355 */ 356#undef TEST_ACCESS_OK 357 358#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 359#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 360 361#define pmd_none(x) (!pmd_val(x)) 362#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) 363#define pmd_bad(x) (pmd_val(x) & xAMPRx_SS) 364#define pmd_clear(xp) do { __set_pmd(xp, 0); } while(0) 365 366#define pmd_page_kernel(pmd) \ 367 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 368 369#ifndef CONFIG_DISCONTIGMEM 370#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 371#endif 372 373#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) 374 375/* 376 * The following only work if pte_present() is true. 377 * Undefined behaviour if not.. 378 */ 379static inline int pte_read(pte_t pte) { return !((pte).pte & _PAGE_SUPER); } 380static inline int pte_exec(pte_t pte) { return !((pte).pte & _PAGE_SUPER); } 381static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; } 382static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; } 383static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); } 384 385static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte |= _PAGE_SUPER; return pte; } 386static inline pte_t pte_exprotect(pte_t pte) { (pte).pte |= _PAGE_SUPER; return pte; } 387static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; } 388static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; } 389static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte; } 390static inline pte_t pte_mkread(pte_t pte) { (pte).pte &= ~_PAGE_SUPER; return pte; } 391static inline pte_t pte_mkexec(pte_t pte) { (pte).pte &= ~_PAGE_SUPER; return pte; } 392static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; } 393static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } 394static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } 395 396static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 397{ 398 int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); 399 asm volatile("dcf %M0" :: "U"(*ptep)); 400 return i; 401} 402 403static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 404{ 405 int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); 406 asm volatile("dcf %M0" :: "U"(*ptep)); 407 return i; 408} 409 410static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 411{ 412 unsigned long x = xchg(&ptep->pte, 0); 413 asm volatile("dcf %M0" :: "U"(*ptep)); 414 return __pte(x); 415} 416 417static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 418{ 419 set_bit(_PAGE_BIT_WP, ptep); 420 asm volatile("dcf %M0" :: "U"(*ptep)); 421} 422 423/* 424 * Macro to mark a page protection value as "uncacheable" 425 */ 426#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE)) 427 428/* 429 * Conversion functions: convert a page and protection to a page entry, 430 * and a page entry and page directory to the page they refer to. 431 */ 432 433#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 434#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE) 435 436/* This takes a physical page address that is used by the remapping functions */ 437#define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot) 438 439static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 440{ 441 pte.pte &= _PAGE_CHG_MASK; 442 pte.pte |= pgprot_val(newprot); 443 return pte; 444} 445 446/* to find an entry in a page-table-directory. */ 447#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 448#define pgd_index_k(addr) pgd_index(addr) 449 450/* Find an entry in the bottom-level page table.. */ 451#define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 452 453/* 454 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 455 * 456 * this macro returns the index of the entry in the pte page which would 457 * control the given virtual address 458 */ 459#define pte_index(address) \ 460 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 461#define pte_offset_kernel(dir, address) \ 462 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) 463 464#if defined(CONFIG_HIGHPTE) 465#define pte_offset_map(dir, address) \ 466 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) 467#define pte_offset_map_nested(dir, address) \ 468 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) 469#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 470#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) 471#else 472#define pte_offset_map(dir, address) \ 473 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) 474#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address)) 475#define pte_unmap(pte) do { } while (0) 476#define pte_unmap_nested(pte) do { } while (0) 477#endif 478 479/* 480 * Handle swap and file entries 481 * - the PTE is encoded in the following format: 482 * bit 0: Must be 0 (!_PAGE_PRESENT) 483 * bit 1: Type: 0 for swap, 1 for file (_PAGE_FILE) 484 * bits 2-7: Swap type 485 * bits 8-31: Swap offset 486 * bits 2-31: File pgoff 487 */ 488#define __swp_type(x) (((x).val >> 2) & 0x1f) 489#define __swp_offset(x) ((x).val >> 8) 490#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 8) }) 491#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte }) 492#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 493 494static inline int pte_file(pte_t pte) 495{ 496 return pte.pte & _PAGE_FILE; 497} 498 499#define PTE_FILE_MAX_BITS 29 500 501#define pte_to_pgoff(PTE) ((PTE).pte >> 2) 502#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE) 503 504/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 505#define PageSkip(page) (0) 506#define kern_addr_valid(addr) (1) 507 508#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 509 remap_pfn_range(vma, vaddr, pfn, size, prot) 510 511#define MK_IOSPACE_PFN(space, pfn) (pfn) 512#define GET_IOSPACE(pfn) 0 513#define GET_PFN(pfn) (pfn) 514 515#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 516#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 517#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 518#define __HAVE_ARCH_PTEP_SET_WRPROTECT 519#define __HAVE_ARCH_PTE_SAME 520#include <asm-generic/pgtable.h> 521 522/* 523 * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache 524 */ 525static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 526{ 527 unsigned long ampr; 528 pgd_t *pge = pgd_offset(current->mm, address); 529 pud_t *pue = pud_offset(pge, address); 530 pmd_t *pme = pmd_offset(pue, address); 531 532 ampr = pme->ste[0] & 0xffffff00; 533 ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V; 534 535 asm volatile("movgs %0,scr0\n" 536 "movgs %0,scr1\n" 537 "movgs %1,dampr4\n" 538 "movgs %1,dampr5\n" 539 : 540 : "r"(address), "r"(ampr) 541 ); 542} 543 544#ifdef CONFIG_PROC_FS 545extern char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer); 546#endif 547 548extern void __init pgtable_cache_init(void); 549 550#endif /* !__ASSEMBLY__ */ 551#endif /* !CONFIG_MMU */ 552 553#ifndef __ASSEMBLY__ 554extern void __init paging_init(void); 555#endif /* !__ASSEMBLY__ */ 556 557#endif /* _ASM_PGTABLE_H */