Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * arch/arm/include/asm/pgtable.h
3 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
13#include <linux/const.h>
14#include <asm-generic/4level-fixup.h>
15#include <asm/proc-fns.h>
16
17#ifndef CONFIG_MMU
18
19#include "pgtable-nommu.h"
20
21#else
22
23#include <asm/memory.h>
24#include <mach/vmalloc.h>
25#include <asm/pgtable-hwdef.h>
26
27#include <asm/pgtable-2level.h>
28
29/*
30 * Just any arbitrary offset to the start of the vmalloc VM area: the
31 * current 8MB value just means that there will be a 8MB "hole" after the
32 * physical memory until the kernel virtual memory starts. That means that
33 * any out-of-bounds memory accesses will hopefully be caught.
34 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
35 * area for the same reason. ;)
36 *
37 * Note that platforms may override VMALLOC_START, but they must provide
38 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
39 * which may not overlap IO space.
40 */
41#ifndef VMALLOC_START
42#define VMALLOC_OFFSET (8*1024*1024)
43#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
44#endif
45
46#define LIBRARY_TEXT_START 0x0c000000
47
48#ifndef __ASSEMBLY__
49extern void __pte_error(const char *file, int line, pte_t);
50extern void __pmd_error(const char *file, int line, pmd_t);
51extern void __pgd_error(const char *file, int line, pgd_t);
52
53#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
54#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
55#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
56
57/*
58 * This is the lowest virtual address we can permit any user space
59 * mapping to be mapped at. This is particularly important for
60 * non-high vector CPUs.
61 */
62#define FIRST_USER_ADDRESS PAGE_SIZE
63
64/*
65 * The pgprot_* and protection_map entries will be fixed up in runtime
66 * to include the cachable and bufferable bits based on memory policy,
67 * as well as any architecture dependent bits like global/ASID and SMP
68 * shared mapping bits.
69 */
70#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
71
72extern pgprot_t pgprot_user;
73extern pgprot_t pgprot_kernel;
74
75#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
76
77#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
78#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
79#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
80#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
81#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
82#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
83#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
84#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
85#define PAGE_KERNEL_EXEC pgprot_kernel
86
87#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
88#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
89#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
90#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
91#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
92#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
93#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
94
95#define __pgprot_modify(prot,mask,bits) \
96 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
97
98#define pgprot_noncached(prot) \
99 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
100
101#define pgprot_writecombine(prot) \
102 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
103
104#define pgprot_stronglyordered(prot) \
105 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
106
107#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
108#define pgprot_dmacoherent(prot) \
109 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
110#define __HAVE_PHYS_MEM_ACCESS_PROT
111struct file;
112extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
113 unsigned long size, pgprot_t vma_prot);
114#else
115#define pgprot_dmacoherent(prot) \
116 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
117#endif
118
119#endif /* __ASSEMBLY__ */
120
121/*
122 * The table below defines the page protection levels that we insert into our
123 * Linux page table version. These get translated into the best that the
124 * architecture can perform. Note that on most ARM hardware:
125 * 1) We cannot do execute protection
126 * 2) If we could do execute protection, then read is implied
127 * 3) write implies read permissions
128 */
129#define __P000 __PAGE_NONE
130#define __P001 __PAGE_READONLY
131#define __P010 __PAGE_COPY
132#define __P011 __PAGE_COPY
133#define __P100 __PAGE_READONLY_EXEC
134#define __P101 __PAGE_READONLY_EXEC
135#define __P110 __PAGE_COPY_EXEC
136#define __P111 __PAGE_COPY_EXEC
137
138#define __S000 __PAGE_NONE
139#define __S001 __PAGE_READONLY
140#define __S010 __PAGE_SHARED
141#define __S011 __PAGE_SHARED
142#define __S100 __PAGE_READONLY_EXEC
143#define __S101 __PAGE_READONLY_EXEC
144#define __S110 __PAGE_SHARED_EXEC
145#define __S111 __PAGE_SHARED_EXEC
146
147#ifndef __ASSEMBLY__
148/*
149 * ZERO_PAGE is a global shared page that is always zero: used
150 * for zero-mapped memory areas etc..
151 */
152extern struct page *empty_zero_page;
153#define ZERO_PAGE(vaddr) (empty_zero_page)
154
155
156extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
157
158/* to find an entry in a page-table-directory */
159#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
160
161#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
162
163/* to find an entry in a kernel page-table-directory */
164#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
165
166/*
167 * The "pgd_xxx()" functions here are trivial for a folded two-level
168 * setup: the pgd is never bad, and a pmd always exists (as it's folded
169 * into the pgd entry)
170 */
171#define pgd_none(pgd) (0)
172#define pgd_bad(pgd) (0)
173#define pgd_present(pgd) (1)
174#define pgd_clear(pgdp) do { } while (0)
175#define set_pgd(pgd,pgdp) do { } while (0)
176#define set_pud(pud,pudp) do { } while (0)
177
178
179/* Find an entry in the second-level page table.. */
180#define pmd_offset(dir, addr) ((pmd_t *)(dir))
181
182#define pmd_none(pmd) (!pmd_val(pmd))
183#define pmd_present(pmd) (pmd_val(pmd))
184#define pmd_bad(pmd) (pmd_val(pmd) & 2)
185
186#define copy_pmd(pmdpd,pmdps) \
187 do { \
188 pmdpd[0] = pmdps[0]; \
189 pmdpd[1] = pmdps[1]; \
190 flush_pmd_entry(pmdpd); \
191 } while (0)
192
193#define pmd_clear(pmdp) \
194 do { \
195 pmdp[0] = __pmd(0); \
196 pmdp[1] = __pmd(0); \
197 clean_pmd_entry(pmdp); \
198 } while (0)
199
200static inline pte_t *pmd_page_vaddr(pmd_t pmd)
201{
202 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
203}
204
205#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
206
207/* we don't need complex calculations here as the pmd is folded into the pgd */
208#define pmd_addr_end(addr,end) (end)
209
210
211#ifndef CONFIG_HIGHPTE
212#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
213#define __pte_unmap(pte) do { } while (0)
214#else
215#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
216#define __pte_unmap(pte) kunmap_atomic(pte)
217#endif
218
219#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
220
221#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
222
223#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
224#define pte_unmap(pte) __pte_unmap(pte)
225
226#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
227#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
228
229#define pte_page(pte) pfn_to_page(pte_pfn(pte))
230#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
231
232#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
233#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
234
235#if __LINUX_ARM_ARCH__ < 6
236static inline void __sync_icache_dcache(pte_t pteval)
237{
238}
239#else
240extern void __sync_icache_dcache(pte_t pteval);
241#endif
242
243static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
244 pte_t *ptep, pte_t pteval)
245{
246 if (addr >= TASK_SIZE)
247 set_pte_ext(ptep, pteval, 0);
248 else {
249 __sync_icache_dcache(pteval);
250 set_pte_ext(ptep, pteval, PTE_EXT_NG);
251 }
252}
253
254#define pte_none(pte) (!pte_val(pte))
255#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
256#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
257#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
258#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
259#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
260#define pte_special(pte) (0)
261
262#define pte_present_user(pte) \
263 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
264 (L_PTE_PRESENT | L_PTE_USER))
265
266#define PTE_BIT_FUNC(fn,op) \
267static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
268
269PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
270PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
271PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
272PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
273PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
274PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
275
276static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
277
278static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
279{
280 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
281 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
282 return pte;
283}
284
285/*
286 * Encode and decode a swap entry. Swap entries are stored in the Linux
287 * page tables as follows:
288 *
289 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
290 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
291 * <--------------- offset --------------------> <- type --> 0 0 0
292 *
293 * This gives us up to 63 swap files and 32GB per swap file. Note that
294 * the offset field is always non-zero.
295 */
296#define __SWP_TYPE_SHIFT 3
297#define __SWP_TYPE_BITS 6
298#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
299#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
300
301#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
302#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
303#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
304
305#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
306#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
307
308/*
309 * It is an error for the kernel to have more swap files than we can
310 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
311 * is increased beyond what we presently support.
312 */
313#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
314
315/*
316 * Encode and decode a file entry. File entries are stored in the Linux
317 * page tables as follows:
318 *
319 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
320 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
321 * <----------------------- offset ------------------------> 1 0 0
322 */
323#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
324#define pte_to_pgoff(x) (pte_val(x) >> 3)
325#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
326
327#define PTE_FILE_MAX_BITS 29
328
329/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
330/* FIXME: this is not correct */
331#define kern_addr_valid(addr) (1)
332
333#include <asm-generic/pgtable.h>
334
335/*
336 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
337 */
338#define HAVE_ARCH_UNMAPPED_AREA
339
340/*
341 * remap a physical page `pfn' of size `size' with page protection `prot'
342 * into virtual address `from'
343 */
344#define io_remap_pfn_range(vma,from,pfn,size,prot) \
345 remap_pfn_range(vma, from, pfn, size, prot)
346
347#define pgtable_cache_init() do { } while (0)
348
349void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
350void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
351
352#endif /* !__ASSEMBLY__ */
353
354#endif /* CONFIG_MMU */
355
356#endif /* _ASMARM_PGTABLE_H */