Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11#include <linux/mm_types.h>
12#include <linux/mmzone.h>
13#ifdef CONFIG_32BIT
14#include <asm/pgtable-32.h>
15#endif
16#ifdef CONFIG_64BIT
17#include <asm/pgtable-64.h>
18#endif
19
20#include <asm/cmpxchg.h>
21#include <asm/io.h>
22#include <asm/pgtable-bits.h>
23
24struct mm_struct;
25struct vm_area_struct;
26
27#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
28 _page_cachable_default)
29#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
30 _page_cachable_default)
31#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
32 _page_cachable_default)
33#define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
34 _page_cachable_default)
35#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _page_cachable_default)
37#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
38 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
39#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
40 _page_cachable_default)
41#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
42 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
43
44/*
45 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
46 * execute, and consider it to be the same as read. Also, write
47 * permissions imply read permissions. This is the closest we can get
48 * by reasonable means..
49 */
50
51/*
52 * Dummy values to fill the table in mmap.c
53 * The real values will be generated at runtime
54 */
55#define __P000 __pgprot(0)
56#define __P001 __pgprot(0)
57#define __P010 __pgprot(0)
58#define __P011 __pgprot(0)
59#define __P100 __pgprot(0)
60#define __P101 __pgprot(0)
61#define __P110 __pgprot(0)
62#define __P111 __pgprot(0)
63
64#define __S000 __pgprot(0)
65#define __S001 __pgprot(0)
66#define __S010 __pgprot(0)
67#define __S011 __pgprot(0)
68#define __S100 __pgprot(0)
69#define __S101 __pgprot(0)
70#define __S110 __pgprot(0)
71#define __S111 __pgprot(0)
72
73extern unsigned long _page_cachable_default;
74
75/*
76 * ZERO_PAGE is a global shared page that is always zero; used
77 * for zero-mapped memory areas etc..
78 */
79
80extern unsigned long empty_zero_page;
81extern unsigned long zero_page_mask;
82
83#define ZERO_PAGE(vaddr) \
84 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
85#define __HAVE_COLOR_ZERO_PAGE
86
87extern void paging_init(void);
88
89/*
90 * Conversion functions: convert a page and protection to a page entry,
91 * and a page entry and page directory to the page they refer to.
92 */
93#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
94
95#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
96#ifndef CONFIG_TRANSPARENT_HUGEPAGE
97#define pmd_page(pmd) __pmd_page(pmd)
98#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
99
100#define pmd_page_vaddr(pmd) pmd_val(pmd)
101
102#define htw_stop() \
103do { \
104 unsigned long flags; \
105 \
106 if (cpu_has_htw) { \
107 local_irq_save(flags); \
108 if(!raw_current_cpu_data.htw_seq++) { \
109 write_c0_pwctl(read_c0_pwctl() & \
110 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
111 back_to_back_c0_hazard(); \
112 } \
113 local_irq_restore(flags); \
114 } \
115} while(0)
116
117#define htw_start() \
118do { \
119 unsigned long flags; \
120 \
121 if (cpu_has_htw) { \
122 local_irq_save(flags); \
123 if (!--raw_current_cpu_data.htw_seq) { \
124 write_c0_pwctl(read_c0_pwctl() | \
125 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
126 back_to_back_c0_hazard(); \
127 } \
128 local_irq_restore(flags); \
129 } \
130} while(0)
131
132static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
133 pte_t *ptep, pte_t pteval);
134
135#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
136
137#ifdef CONFIG_XPA
138# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
139#else
140# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
141#endif
142
143#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
144#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
145
146static inline void set_pte(pte_t *ptep, pte_t pte)
147{
148 ptep->pte_high = pte.pte_high;
149 smp_wmb();
150 ptep->pte_low = pte.pte_low;
151
152#ifdef CONFIG_XPA
153 if (pte.pte_high & _PAGE_GLOBAL) {
154#else
155 if (pte.pte_low & _PAGE_GLOBAL) {
156#endif
157 pte_t *buddy = ptep_buddy(ptep);
158 /*
159 * Make sure the buddy is global too (if it's !none,
160 * it better already be global)
161 */
162 if (pte_none(*buddy)) {
163 if (!IS_ENABLED(CONFIG_XPA))
164 buddy->pte_low |= _PAGE_GLOBAL;
165 buddy->pte_high |= _PAGE_GLOBAL;
166 }
167 }
168}
169
170static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
171{
172 pte_t null = __pte(0);
173
174 htw_stop();
175 /* Preserve global status for the pair */
176 if (IS_ENABLED(CONFIG_XPA)) {
177 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
178 null.pte_high = _PAGE_GLOBAL;
179 } else {
180 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
181 null.pte_low = null.pte_high = _PAGE_GLOBAL;
182 }
183
184 set_pte_at(mm, addr, ptep, null);
185 htw_start();
186}
187#else
188
189#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
190#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
191#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
192
193/*
194 * Certain architectures need to do special things when pte's
195 * within a page table are directly modified. Thus, the following
196 * hook is made available.
197 */
198static inline void set_pte(pte_t *ptep, pte_t pteval)
199{
200 *ptep = pteval;
201#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
202 if (pte_val(pteval) & _PAGE_GLOBAL) {
203 pte_t *buddy = ptep_buddy(ptep);
204 /*
205 * Make sure the buddy is global too (if it's !none,
206 * it better already be global)
207 */
208# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
209 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
210# else
211 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
212# endif
213 }
214#endif
215}
216
217static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
218{
219 htw_stop();
220#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
221 /* Preserve global status for the pair */
222 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
223 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
224 else
225#endif
226 set_pte_at(mm, addr, ptep, __pte(0));
227 htw_start();
228}
229#endif
230
231static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
232 pte_t *ptep, pte_t pteval)
233{
234 extern void __update_cache(unsigned long address, pte_t pte);
235
236 if (!pte_present(pteval))
237 goto cache_sync_done;
238
239 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
240 goto cache_sync_done;
241
242 __update_cache(addr, pteval);
243cache_sync_done:
244 set_pte(ptep, pteval);
245}
246
247/*
248 * (pmds are folded into puds so this doesn't get actually called,
249 * but the define is needed for a generic inline function.)
250 */
251#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
252
253#ifndef __PAGETABLE_PMD_FOLDED
254/*
255 * (puds are folded into pgds so this doesn't get actually called,
256 * but the define is needed for a generic inline function.)
257 */
258#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
259#endif
260
261#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
262#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
263#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
264
265/*
266 * We used to declare this array with size but gcc 3.3 and older are not able
267 * to find that this expression is a constant, so the size is dropped.
268 */
269extern pgd_t swapper_pg_dir[];
270
271/*
272 * The following only work if pte_present() is true.
273 * Undefined behaviour if not..
274 */
275#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
276static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
277static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
278static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
279
280static inline pte_t pte_wrprotect(pte_t pte)
281{
282 pte.pte_low &= ~_PAGE_WRITE;
283 if (!IS_ENABLED(CONFIG_XPA))
284 pte.pte_low &= ~_PAGE_SILENT_WRITE;
285 pte.pte_high &= ~_PAGE_SILENT_WRITE;
286 return pte;
287}
288
289static inline pte_t pte_mkclean(pte_t pte)
290{
291 pte.pte_low &= ~_PAGE_MODIFIED;
292 if (!IS_ENABLED(CONFIG_XPA))
293 pte.pte_low &= ~_PAGE_SILENT_WRITE;
294 pte.pte_high &= ~_PAGE_SILENT_WRITE;
295 return pte;
296}
297
298static inline pte_t pte_mkold(pte_t pte)
299{
300 pte.pte_low &= ~_PAGE_ACCESSED;
301 if (!IS_ENABLED(CONFIG_XPA))
302 pte.pte_low &= ~_PAGE_SILENT_READ;
303 pte.pte_high &= ~_PAGE_SILENT_READ;
304 return pte;
305}
306
307static inline pte_t pte_mkwrite(pte_t pte)
308{
309 pte.pte_low |= _PAGE_WRITE;
310 if (pte.pte_low & _PAGE_MODIFIED) {
311 if (!IS_ENABLED(CONFIG_XPA))
312 pte.pte_low |= _PAGE_SILENT_WRITE;
313 pte.pte_high |= _PAGE_SILENT_WRITE;
314 }
315 return pte;
316}
317
318static inline pte_t pte_mkdirty(pte_t pte)
319{
320 pte.pte_low |= _PAGE_MODIFIED;
321 if (pte.pte_low & _PAGE_WRITE) {
322 if (!IS_ENABLED(CONFIG_XPA))
323 pte.pte_low |= _PAGE_SILENT_WRITE;
324 pte.pte_high |= _PAGE_SILENT_WRITE;
325 }
326 return pte;
327}
328
329static inline pte_t pte_mkyoung(pte_t pte)
330{
331 pte.pte_low |= _PAGE_ACCESSED;
332 if (!(pte.pte_low & _PAGE_NO_READ)) {
333 if (!IS_ENABLED(CONFIG_XPA))
334 pte.pte_low |= _PAGE_SILENT_READ;
335 pte.pte_high |= _PAGE_SILENT_READ;
336 }
337 return pte;
338}
339#else
340static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
341static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
342static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
343
344static inline pte_t pte_wrprotect(pte_t pte)
345{
346 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
347 return pte;
348}
349
350static inline pte_t pte_mkclean(pte_t pte)
351{
352 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
353 return pte;
354}
355
356static inline pte_t pte_mkold(pte_t pte)
357{
358 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
359 return pte;
360}
361
362static inline pte_t pte_mkwrite(pte_t pte)
363{
364 pte_val(pte) |= _PAGE_WRITE;
365 if (pte_val(pte) & _PAGE_MODIFIED)
366 pte_val(pte) |= _PAGE_SILENT_WRITE;
367 return pte;
368}
369
370static inline pte_t pte_mkdirty(pte_t pte)
371{
372 pte_val(pte) |= _PAGE_MODIFIED;
373 if (pte_val(pte) & _PAGE_WRITE)
374 pte_val(pte) |= _PAGE_SILENT_WRITE;
375 return pte;
376}
377
378static inline pte_t pte_mkyoung(pte_t pte)
379{
380 pte_val(pte) |= _PAGE_ACCESSED;
381 if (!(pte_val(pte) & _PAGE_NO_READ))
382 pte_val(pte) |= _PAGE_SILENT_READ;
383 return pte;
384}
385
386#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
387static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
388
389static inline pte_t pte_mkhuge(pte_t pte)
390{
391 pte_val(pte) |= _PAGE_HUGE;
392 return pte;
393}
394#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
395#endif
396static inline int pte_special(pte_t pte) { return 0; }
397static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
398
399/*
400 * Macro to make mark a page protection value as "uncacheable". Note
401 * that "protection" is really a misnomer here as the protection value
402 * contains the memory attribute bits, dirty bits, and various other
403 * bits as well.
404 */
405#define pgprot_noncached pgprot_noncached
406
407static inline pgprot_t pgprot_noncached(pgprot_t _prot)
408{
409 unsigned long prot = pgprot_val(_prot);
410
411 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
412
413 return __pgprot(prot);
414}
415
416#define pgprot_writecombine pgprot_writecombine
417
418static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
419{
420 unsigned long prot = pgprot_val(_prot);
421
422 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
423 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
424
425 return __pgprot(prot);
426}
427
428/*
429 * Conversion functions: convert a page and protection to a page entry,
430 * and a page entry and page directory to the page they refer to.
431 */
432#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
433
434#if defined(CONFIG_XPA)
435static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
436{
437 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
438 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
439 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
440 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
441 return pte;
442}
443#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
444static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
445{
446 pte.pte_low &= _PAGE_CHG_MASK;
447 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
448 pte.pte_low |= pgprot_val(newprot);
449 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
450 return pte;
451}
452#else
453static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
454{
455 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
456 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
457}
458#endif
459
460
461extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
462 pte_t pte);
463
464static inline void update_mmu_cache(struct vm_area_struct *vma,
465 unsigned long address, pte_t *ptep)
466{
467 pte_t pte = *ptep;
468 __update_tlb(vma, address, pte);
469}
470
471static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
472 unsigned long address, pmd_t *pmdp)
473{
474 pte_t pte = *(pte_t *)pmdp;
475
476 __update_tlb(vma, address, pte);
477}
478
479#define kern_addr_valid(addr) (1)
480
481#ifdef CONFIG_PHYS_ADDR_T_64BIT
482extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
483
484static inline int io_remap_pfn_range(struct vm_area_struct *vma,
485 unsigned long vaddr,
486 unsigned long pfn,
487 unsigned long size,
488 pgprot_t prot)
489{
490 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
491 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
492}
493#define io_remap_pfn_range io_remap_pfn_range
494#endif
495
496#ifdef CONFIG_TRANSPARENT_HUGEPAGE
497
498/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
499#define pmdp_establish generic_pmdp_establish
500
501#define has_transparent_hugepage has_transparent_hugepage
502extern int has_transparent_hugepage(void);
503
504static inline int pmd_trans_huge(pmd_t pmd)
505{
506 return !!(pmd_val(pmd) & _PAGE_HUGE);
507}
508
509static inline pmd_t pmd_mkhuge(pmd_t pmd)
510{
511 pmd_val(pmd) |= _PAGE_HUGE;
512
513 return pmd;
514}
515
516extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
517 pmd_t *pmdp, pmd_t pmd);
518
519#define pmd_write pmd_write
520static inline int pmd_write(pmd_t pmd)
521{
522 return !!(pmd_val(pmd) & _PAGE_WRITE);
523}
524
525static inline pmd_t pmd_wrprotect(pmd_t pmd)
526{
527 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
528 return pmd;
529}
530
531static inline pmd_t pmd_mkwrite(pmd_t pmd)
532{
533 pmd_val(pmd) |= _PAGE_WRITE;
534 if (pmd_val(pmd) & _PAGE_MODIFIED)
535 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
536
537 return pmd;
538}
539
540static inline int pmd_dirty(pmd_t pmd)
541{
542 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
543}
544
545static inline pmd_t pmd_mkclean(pmd_t pmd)
546{
547 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
548 return pmd;
549}
550
551static inline pmd_t pmd_mkdirty(pmd_t pmd)
552{
553 pmd_val(pmd) |= _PAGE_MODIFIED;
554 if (pmd_val(pmd) & _PAGE_WRITE)
555 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
556
557 return pmd;
558}
559
560static inline int pmd_young(pmd_t pmd)
561{
562 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
563}
564
565static inline pmd_t pmd_mkold(pmd_t pmd)
566{
567 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
568
569 return pmd;
570}
571
572static inline pmd_t pmd_mkyoung(pmd_t pmd)
573{
574 pmd_val(pmd) |= _PAGE_ACCESSED;
575
576 if (!(pmd_val(pmd) & _PAGE_NO_READ))
577 pmd_val(pmd) |= _PAGE_SILENT_READ;
578
579 return pmd;
580}
581
582/* Extern to avoid header file madness */
583extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
584
585static inline unsigned long pmd_pfn(pmd_t pmd)
586{
587 return pmd_val(pmd) >> _PFN_SHIFT;
588}
589
590static inline struct page *pmd_page(pmd_t pmd)
591{
592 if (pmd_trans_huge(pmd))
593 return pfn_to_page(pmd_pfn(pmd));
594
595 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
596}
597
598static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
599{
600 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
601 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
602 return pmd;
603}
604
605static inline pmd_t pmd_mknotpresent(pmd_t pmd)
606{
607 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
608
609 return pmd;
610}
611
612/*
613 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
614 * different prototype.
615 */
616#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
617static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
618 unsigned long address, pmd_t *pmdp)
619{
620 pmd_t old = *pmdp;
621
622 pmd_clear(pmdp);
623
624 return old;
625}
626
627#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
628
629#include <asm-generic/pgtable.h>
630
631/*
632 * uncached accelerated TLB map for video memory access
633 */
634#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
635#define __HAVE_PHYS_MEM_ACCESS_PROT
636
637struct file;
638pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
639 unsigned long size, pgprot_t vma_prot);
640#endif
641
642/*
643 * We provide our own get_unmapped area to cope with the virtual aliasing
644 * constraints placed on us by the cache architecture.
645 */
646#define HAVE_ARCH_UNMAPPED_AREA
647#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
648
649/*
650 * No page table caches to initialise
651 */
652#define pgtable_cache_init() do { } while (0)
653
654#endif /* _ASM_PGTABLE_H */