Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
3
4#include <asm/page.h>
5#include <asm/e820.h>
6
7#include <asm/pgtable_types.h>
8
9/*
10 * Macro to mark a page protection value as UC-
11 */
12#define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
15 : (prot))
16
17#ifndef __ASSEMBLY__
18#include <asm/x86_init.h>
19
20void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
21
22/*
23 * ZERO_PAGE is a global shared page that is always zero: used
24 * for zero-mapped memory areas etc..
25 */
26extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
27 __visible;
28#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
29
30extern spinlock_t pgd_lock;
31extern struct list_head pgd_list;
32
33extern struct mm_struct *pgd_page_get_mm(struct page *page);
34
35#ifdef CONFIG_PARAVIRT
36#include <asm/paravirt.h>
37#else /* !CONFIG_PARAVIRT */
38#define set_pte(ptep, pte) native_set_pte(ptep, pte)
39#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
40#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
41
42#define set_pte_atomic(ptep, pte) \
43 native_set_pte_atomic(ptep, pte)
44
45#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
46
47#ifndef __PAGETABLE_PUD_FOLDED
48#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
49#define pgd_clear(pgd) native_pgd_clear(pgd)
50#endif
51
52#ifndef set_pud
53# define set_pud(pudp, pud) native_set_pud(pudp, pud)
54#endif
55
56#ifndef __PAGETABLE_PMD_FOLDED
57#define pud_clear(pud) native_pud_clear(pud)
58#endif
59
60#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
61#define pmd_clear(pmd) native_pmd_clear(pmd)
62
63#define pte_update(mm, addr, ptep) do { } while (0)
64#define pte_update_defer(mm, addr, ptep) do { } while (0)
65#define pmd_update(mm, addr, ptep) do { } while (0)
66#define pmd_update_defer(mm, addr, ptep) do { } while (0)
67
68#define pgd_val(x) native_pgd_val(x)
69#define __pgd(x) native_make_pgd(x)
70
71#ifndef __PAGETABLE_PUD_FOLDED
72#define pud_val(x) native_pud_val(x)
73#define __pud(x) native_make_pud(x)
74#endif
75
76#ifndef __PAGETABLE_PMD_FOLDED
77#define pmd_val(x) native_pmd_val(x)
78#define __pmd(x) native_make_pmd(x)
79#endif
80
81#define pte_val(x) native_pte_val(x)
82#define __pte(x) native_make_pte(x)
83
84#define arch_end_context_switch(prev) do {} while(0)
85
86#endif /* CONFIG_PARAVIRT */
87
88/*
89 * The following only work if pte_present() is true.
90 * Undefined behaviour if not..
91 */
92static inline int pte_dirty(pte_t pte)
93{
94 return pte_flags(pte) & _PAGE_DIRTY;
95}
96
97static inline int pte_young(pte_t pte)
98{
99 return pte_flags(pte) & _PAGE_ACCESSED;
100}
101
102static inline int pmd_young(pmd_t pmd)
103{
104 return pmd_flags(pmd) & _PAGE_ACCESSED;
105}
106
107static inline int pte_write(pte_t pte)
108{
109 return pte_flags(pte) & _PAGE_RW;
110}
111
112static inline int pte_file(pte_t pte)
113{
114 return pte_flags(pte) & _PAGE_FILE;
115}
116
117static inline int pte_huge(pte_t pte)
118{
119 return pte_flags(pte) & _PAGE_PSE;
120}
121
122static inline int pte_global(pte_t pte)
123{
124 return pte_flags(pte) & _PAGE_GLOBAL;
125}
126
127static inline int pte_exec(pte_t pte)
128{
129 return !(pte_flags(pte) & _PAGE_NX);
130}
131
132static inline int pte_special(pte_t pte)
133{
134 /*
135 * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
136 * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
137 * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
138 */
139 return (pte_flags(pte) & _PAGE_SPECIAL) &&
140 (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
141}
142
143static inline unsigned long pte_pfn(pte_t pte)
144{
145 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
146}
147
148static inline unsigned long pmd_pfn(pmd_t pmd)
149{
150 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
151}
152
153static inline unsigned long pud_pfn(pud_t pud)
154{
155 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
156}
157
158#define pte_page(pte) pfn_to_page(pte_pfn(pte))
159
160static inline int pmd_large(pmd_t pte)
161{
162 return pmd_flags(pte) & _PAGE_PSE;
163}
164
165#ifdef CONFIG_TRANSPARENT_HUGEPAGE
166static inline int pmd_trans_splitting(pmd_t pmd)
167{
168 return pmd_val(pmd) & _PAGE_SPLITTING;
169}
170
171static inline int pmd_trans_huge(pmd_t pmd)
172{
173 return pmd_val(pmd) & _PAGE_PSE;
174}
175
176static inline int has_transparent_hugepage(void)
177{
178 return cpu_has_pse;
179}
180#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
181
182static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
183{
184 pteval_t v = native_pte_val(pte);
185
186 return native_make_pte(v | set);
187}
188
189static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
190{
191 pteval_t v = native_pte_val(pte);
192
193 return native_make_pte(v & ~clear);
194}
195
196static inline pte_t pte_mkclean(pte_t pte)
197{
198 return pte_clear_flags(pte, _PAGE_DIRTY);
199}
200
201static inline pte_t pte_mkold(pte_t pte)
202{
203 return pte_clear_flags(pte, _PAGE_ACCESSED);
204}
205
206static inline pte_t pte_wrprotect(pte_t pte)
207{
208 return pte_clear_flags(pte, _PAGE_RW);
209}
210
211static inline pte_t pte_mkexec(pte_t pte)
212{
213 return pte_clear_flags(pte, _PAGE_NX);
214}
215
216static inline pte_t pte_mkdirty(pte_t pte)
217{
218 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
219}
220
221static inline pte_t pte_mkyoung(pte_t pte)
222{
223 return pte_set_flags(pte, _PAGE_ACCESSED);
224}
225
226static inline pte_t pte_mkwrite(pte_t pte)
227{
228 return pte_set_flags(pte, _PAGE_RW);
229}
230
231static inline pte_t pte_mkhuge(pte_t pte)
232{
233 return pte_set_flags(pte, _PAGE_PSE);
234}
235
236static inline pte_t pte_clrhuge(pte_t pte)
237{
238 return pte_clear_flags(pte, _PAGE_PSE);
239}
240
241static inline pte_t pte_mkglobal(pte_t pte)
242{
243 return pte_set_flags(pte, _PAGE_GLOBAL);
244}
245
246static inline pte_t pte_clrglobal(pte_t pte)
247{
248 return pte_clear_flags(pte, _PAGE_GLOBAL);
249}
250
251static inline pte_t pte_mkspecial(pte_t pte)
252{
253 return pte_set_flags(pte, _PAGE_SPECIAL);
254}
255
256static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
257{
258 pmdval_t v = native_pmd_val(pmd);
259
260 return __pmd(v | set);
261}
262
263static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
264{
265 pmdval_t v = native_pmd_val(pmd);
266
267 return __pmd(v & ~clear);
268}
269
270static inline pmd_t pmd_mkold(pmd_t pmd)
271{
272 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
273}
274
275static inline pmd_t pmd_wrprotect(pmd_t pmd)
276{
277 return pmd_clear_flags(pmd, _PAGE_RW);
278}
279
280static inline pmd_t pmd_mkdirty(pmd_t pmd)
281{
282 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
283}
284
285static inline pmd_t pmd_mkhuge(pmd_t pmd)
286{
287 return pmd_set_flags(pmd, _PAGE_PSE);
288}
289
290static inline pmd_t pmd_mkyoung(pmd_t pmd)
291{
292 return pmd_set_flags(pmd, _PAGE_ACCESSED);
293}
294
295static inline pmd_t pmd_mkwrite(pmd_t pmd)
296{
297 return pmd_set_flags(pmd, _PAGE_RW);
298}
299
300static inline pmd_t pmd_mknotpresent(pmd_t pmd)
301{
302 return pmd_clear_flags(pmd, _PAGE_PRESENT);
303}
304
305#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
306static inline int pte_soft_dirty(pte_t pte)
307{
308 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
309}
310
311static inline int pmd_soft_dirty(pmd_t pmd)
312{
313 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
314}
315
316static inline pte_t pte_mksoft_dirty(pte_t pte)
317{
318 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
319}
320
321static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
322{
323 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
324}
325
326static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
327{
328 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
329}
330
331static inline pte_t pte_file_mksoft_dirty(pte_t pte)
332{
333 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
334}
335
336static inline int pte_file_soft_dirty(pte_t pte)
337{
338 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
339}
340
341#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
342
343/*
344 * Mask out unsupported bits in a present pgprot. Non-present pgprots
345 * can use those bits for other purposes, so leave them be.
346 */
347static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
348{
349 pgprotval_t protval = pgprot_val(pgprot);
350
351 if (protval & _PAGE_PRESENT)
352 protval &= __supported_pte_mask;
353
354 return protval;
355}
356
357static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
358{
359 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
360 massage_pgprot(pgprot));
361}
362
363static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
364{
365 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
366 massage_pgprot(pgprot));
367}
368
369static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
370{
371 pteval_t val = pte_val(pte);
372
373 /*
374 * Chop off the NX bit (if present), and add the NX portion of
375 * the newprot (if present):
376 */
377 val &= _PAGE_CHG_MASK;
378 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
379
380 return __pte(val);
381}
382
383static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
384{
385 pmdval_t val = pmd_val(pmd);
386
387 val &= _HPAGE_CHG_MASK;
388 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
389
390 return __pmd(val);
391}
392
393/* mprotect needs to preserve PAT bits when updating vm_page_prot */
394#define pgprot_modify pgprot_modify
395static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
396{
397 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
398 pgprotval_t addbits = pgprot_val(newprot);
399 return __pgprot(preservebits | addbits);
400}
401
402#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
403
404#define canon_pgprot(p) __pgprot(massage_pgprot(p))
405
406static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
407 unsigned long flags,
408 unsigned long new_flags)
409{
410 /*
411 * PAT type is always WB for untracked ranges, so no need to check.
412 */
413 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
414 return 1;
415
416 /*
417 * Certain new memtypes are not allowed with certain
418 * requested memtype:
419 * - request is uncached, return cannot be write-back
420 * - request is write-combine, return cannot be write-back
421 */
422 if ((flags == _PAGE_CACHE_UC_MINUS &&
423 new_flags == _PAGE_CACHE_WB) ||
424 (flags == _PAGE_CACHE_WC &&
425 new_flags == _PAGE_CACHE_WB)) {
426 return 0;
427 }
428
429 return 1;
430}
431
432pmd_t *populate_extra_pmd(unsigned long vaddr);
433pte_t *populate_extra_pte(unsigned long vaddr);
434#endif /* __ASSEMBLY__ */
435
436#ifdef CONFIG_X86_32
437# include <asm/pgtable_32.h>
438#else
439# include <asm/pgtable_64.h>
440#endif
441
442#ifndef __ASSEMBLY__
443#include <linux/mm_types.h>
444#include <linux/mmdebug.h>
445#include <linux/log2.h>
446
447static inline int pte_none(pte_t pte)
448{
449 return !pte.pte;
450}
451
452#define __HAVE_ARCH_PTE_SAME
453static inline int pte_same(pte_t a, pte_t b)
454{
455 return a.pte == b.pte;
456}
457
458static inline int pte_present(pte_t a)
459{
460 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
461 _PAGE_NUMA);
462}
463
464#define pte_present_nonuma pte_present_nonuma
465static inline int pte_present_nonuma(pte_t a)
466{
467 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
468}
469
470#define pte_accessible pte_accessible
471static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
472{
473 if (pte_flags(a) & _PAGE_PRESENT)
474 return true;
475
476 if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
477 mm_tlb_flush_pending(mm))
478 return true;
479
480 return false;
481}
482
483static inline int pte_hidden(pte_t pte)
484{
485 return pte_flags(pte) & _PAGE_HIDDEN;
486}
487
488static inline int pmd_present(pmd_t pmd)
489{
490 /*
491 * Checking for _PAGE_PSE is needed too because
492 * split_huge_page will temporarily clear the present bit (but
493 * the _PAGE_PSE flag will remain set at all times while the
494 * _PAGE_PRESENT bit is clear).
495 */
496 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
497 _PAGE_NUMA);
498}
499
500static inline int pmd_none(pmd_t pmd)
501{
502 /* Only check low word on 32-bit platforms, since it might be
503 out of sync with upper half. */
504 return (unsigned long)native_pmd_val(pmd) == 0;
505}
506
507static inline unsigned long pmd_page_vaddr(pmd_t pmd)
508{
509 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
510}
511
512/*
513 * Currently stuck as a macro due to indirect forward reference to
514 * linux/mmzone.h's __section_mem_map_addr() definition:
515 */
516#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
517
518/*
519 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
520 *
521 * this macro returns the index of the entry in the pmd page which would
522 * control the given virtual address
523 */
524static inline unsigned long pmd_index(unsigned long address)
525{
526 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
527}
528
529/*
530 * Conversion functions: convert a page and protection to a page entry,
531 * and a page entry and page directory to the page they refer to.
532 *
533 * (Currently stuck as a macro because of indirect forward reference
534 * to linux/mm.h:page_to_nid())
535 */
536#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
537
538/*
539 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
540 *
541 * this function returns the index of the entry in the pte page which would
542 * control the given virtual address
543 */
544static inline unsigned long pte_index(unsigned long address)
545{
546 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
547}
548
549static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
550{
551 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
552}
553
554static inline int pmd_bad(pmd_t pmd)
555{
556#ifdef CONFIG_NUMA_BALANCING
557 /* pmd_numa check */
558 if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
559 return 0;
560#endif
561 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
562}
563
564static inline unsigned long pages_to_mb(unsigned long npg)
565{
566 return npg >> (20 - PAGE_SHIFT);
567}
568
569#if PAGETABLE_LEVELS > 2
570static inline int pud_none(pud_t pud)
571{
572 return native_pud_val(pud) == 0;
573}
574
575static inline int pud_present(pud_t pud)
576{
577 return pud_flags(pud) & _PAGE_PRESENT;
578}
579
580static inline unsigned long pud_page_vaddr(pud_t pud)
581{
582 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
583}
584
585/*
586 * Currently stuck as a macro due to indirect forward reference to
587 * linux/mmzone.h's __section_mem_map_addr() definition:
588 */
589#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
590
591/* Find an entry in the second-level page table.. */
592static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
593{
594 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
595}
596
597static inline int pud_large(pud_t pud)
598{
599 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
600 (_PAGE_PSE | _PAGE_PRESENT);
601}
602
603static inline int pud_bad(pud_t pud)
604{
605 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
606}
607#else
608static inline int pud_large(pud_t pud)
609{
610 return 0;
611}
612#endif /* PAGETABLE_LEVELS > 2 */
613
614#if PAGETABLE_LEVELS > 3
615static inline int pgd_present(pgd_t pgd)
616{
617 return pgd_flags(pgd) & _PAGE_PRESENT;
618}
619
620static inline unsigned long pgd_page_vaddr(pgd_t pgd)
621{
622 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
623}
624
625/*
626 * Currently stuck as a macro due to indirect forward reference to
627 * linux/mmzone.h's __section_mem_map_addr() definition:
628 */
629#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
630
631/* to find an entry in a page-table-directory. */
632static inline unsigned long pud_index(unsigned long address)
633{
634 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
635}
636
637static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
638{
639 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
640}
641
642static inline int pgd_bad(pgd_t pgd)
643{
644 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
645}
646
647static inline int pgd_none(pgd_t pgd)
648{
649 return !native_pgd_val(pgd);
650}
651#endif /* PAGETABLE_LEVELS > 3 */
652
653#endif /* __ASSEMBLY__ */
654
655/*
656 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
657 *
658 * this macro returns the index of the entry in the pgd page which would
659 * control the given virtual address
660 */
661#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
662
663/*
664 * pgd_offset() returns a (pgd_t *)
665 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
666 */
667#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
668/*
669 * a shortcut which implies the use of the kernel's pgd, instead
670 * of a process's
671 */
672#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
673
674
675#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
676#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
677
678#ifndef __ASSEMBLY__
679
680extern int direct_gbpages;
681void init_mem_mapping(void);
682void early_alloc_pgt_buf(void);
683
684/* local pte updates need not use xchg for locking */
685static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
686{
687 pte_t res = *ptep;
688
689 /* Pure native function needs no input for mm, addr */
690 native_pte_clear(NULL, 0, ptep);
691 return res;
692}
693
694static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
695{
696 pmd_t res = *pmdp;
697
698 native_pmd_clear(pmdp);
699 return res;
700}
701
702static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
703 pte_t *ptep , pte_t pte)
704{
705 native_set_pte(ptep, pte);
706}
707
708static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
709 pmd_t *pmdp , pmd_t pmd)
710{
711 native_set_pmd(pmdp, pmd);
712}
713
714#ifndef CONFIG_PARAVIRT
715/*
716 * Rules for using pte_update - it must be called after any PTE update which
717 * has not been done using the set_pte / clear_pte interfaces. It is used by
718 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
719 * updates should either be sets, clears, or set_pte_atomic for P->P
720 * transitions, which means this hook should only be called for user PTEs.
721 * This hook implies a P->P protection or access change has taken place, which
722 * requires a subsequent TLB flush. The notification can optionally be delayed
723 * until the TLB flush event by using the pte_update_defer form of the
724 * interface, but care must be taken to assure that the flush happens while
725 * still holding the same page table lock so that the shadow and primary pages
726 * do not become out of sync on SMP.
727 */
728#define pte_update(mm, addr, ptep) do { } while (0)
729#define pte_update_defer(mm, addr, ptep) do { } while (0)
730#endif
731
732/*
733 * We only update the dirty/accessed state if we set
734 * the dirty bit by hand in the kernel, since the hardware
735 * will do the accessed bit for us, and we don't want to
736 * race with other CPU's that might be updating the dirty
737 * bit at the same time.
738 */
739struct vm_area_struct;
740
741#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
742extern int ptep_set_access_flags(struct vm_area_struct *vma,
743 unsigned long address, pte_t *ptep,
744 pte_t entry, int dirty);
745
746#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
747extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
748 unsigned long addr, pte_t *ptep);
749
750#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
751extern int ptep_clear_flush_young(struct vm_area_struct *vma,
752 unsigned long address, pte_t *ptep);
753
754#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
755static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
756 pte_t *ptep)
757{
758 pte_t pte = native_ptep_get_and_clear(ptep);
759 pte_update(mm, addr, ptep);
760 return pte;
761}
762
763#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
764static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
765 unsigned long addr, pte_t *ptep,
766 int full)
767{
768 pte_t pte;
769 if (full) {
770 /*
771 * Full address destruction in progress; paravirt does not
772 * care about updates and native needs no locking
773 */
774 pte = native_local_ptep_get_and_clear(ptep);
775 } else {
776 pte = ptep_get_and_clear(mm, addr, ptep);
777 }
778 return pte;
779}
780
781#define __HAVE_ARCH_PTEP_SET_WRPROTECT
782static inline void ptep_set_wrprotect(struct mm_struct *mm,
783 unsigned long addr, pte_t *ptep)
784{
785 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
786 pte_update(mm, addr, ptep);
787}
788
789#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
790
791#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
792
793#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
794extern int pmdp_set_access_flags(struct vm_area_struct *vma,
795 unsigned long address, pmd_t *pmdp,
796 pmd_t entry, int dirty);
797
798#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
799extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
800 unsigned long addr, pmd_t *pmdp);
801
802#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
803extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
804 unsigned long address, pmd_t *pmdp);
805
806
807#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
808extern void pmdp_splitting_flush(struct vm_area_struct *vma,
809 unsigned long addr, pmd_t *pmdp);
810
811#define __HAVE_ARCH_PMD_WRITE
812static inline int pmd_write(pmd_t pmd)
813{
814 return pmd_flags(pmd) & _PAGE_RW;
815}
816
817#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
818static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
819 pmd_t *pmdp)
820{
821 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
822 pmd_update(mm, addr, pmdp);
823 return pmd;
824}
825
826#define __HAVE_ARCH_PMDP_SET_WRPROTECT
827static inline void pmdp_set_wrprotect(struct mm_struct *mm,
828 unsigned long addr, pmd_t *pmdp)
829{
830 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
831 pmd_update(mm, addr, pmdp);
832}
833
834/*
835 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
836 *
837 * dst - pointer to pgd range anwhere on a pgd page
838 * src - ""
839 * count - the number of pgds to copy.
840 *
841 * dst and src can be on the same page, but the range must not overlap,
842 * and must not cross a page boundary.
843 */
844static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
845{
846 memcpy(dst, src, count * sizeof(pgd_t));
847}
848
849#define PTE_SHIFT ilog2(PTRS_PER_PTE)
850static inline int page_level_shift(enum pg_level level)
851{
852 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
853}
854static inline unsigned long page_level_size(enum pg_level level)
855{
856 return 1UL << page_level_shift(level);
857}
858static inline unsigned long page_level_mask(enum pg_level level)
859{
860 return ~(page_level_size(level) - 1);
861}
862
863/*
864 * The x86 doesn't have any external MMU info: the kernel page
865 * tables contain all the necessary information.
866 */
867static inline void update_mmu_cache(struct vm_area_struct *vma,
868 unsigned long addr, pte_t *ptep)
869{
870}
871static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
872 unsigned long addr, pmd_t *pmd)
873{
874}
875
876#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
877static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
878{
879 VM_BUG_ON(pte_present_nonuma(pte));
880 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
881}
882
883static inline int pte_swp_soft_dirty(pte_t pte)
884{
885 VM_BUG_ON(pte_present_nonuma(pte));
886 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
887}
888
889static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
890{
891 VM_BUG_ON(pte_present_nonuma(pte));
892 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
893}
894#endif
895
896#include <asm-generic/pgtable.h>
897#endif /* __ASSEMBLY__ */
898
899#endif /* _ASM_X86_PGTABLE_H */