Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _ASM_ARM64_VMALLOC_H
2#define _ASM_ARM64_VMALLOC_H
3
4#include <asm/page.h>
5#include <asm/pgtable.h>
6
7#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
8
9#define arch_vmap_pud_supported arch_vmap_pud_supported
10static inline bool arch_vmap_pud_supported(pgprot_t prot)
11{
12 return pud_sect_supported();
13}
14
15#define arch_vmap_pmd_supported arch_vmap_pmd_supported
16static inline bool arch_vmap_pmd_supported(pgprot_t prot)
17{
18 return true;
19}
20
21#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
22static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr,
23 unsigned long end, u64 pfn,
24 unsigned int max_page_shift)
25{
26 /*
27 * If the block is at least CONT_PTE_SIZE in size, and is naturally
28 * aligned in both virtual and physical space, then we can pte-map the
29 * block using the PTE_CONT bit for more efficient use of the TLB.
30 */
31 if (max_page_shift < CONT_PTE_SHIFT)
32 return PAGE_SIZE;
33
34 if (end - addr < CONT_PTE_SIZE)
35 return PAGE_SIZE;
36
37 if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
38 return PAGE_SIZE;
39
40 if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
41 return PAGE_SIZE;
42
43 return CONT_PTE_SIZE;
44}
45
46#define arch_vmap_pte_range_unmap_size arch_vmap_pte_range_unmap_size
47static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
48 pte_t *ptep)
49{
50 /*
51 * The caller handles alignment so it's sufficient just to check
52 * PTE_CONT.
53 */
54 return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE;
55}
56
57#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
58static inline int arch_vmap_pte_supported_shift(unsigned long size)
59{
60 if (size >= CONT_PTE_SIZE)
61 return CONT_PTE_SHIFT;
62
63 return PAGE_SHIFT;
64}
65
66#endif
67
68#define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged
69static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
70{
71 return pgprot_tagged(prot);
72}
73
74#endif /* _ASM_ARM64_VMALLOC_H */