Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _ASM_ARM64_VMALLOC_H
2#define _ASM_ARM64_VMALLOC_H
3
4#include <asm/page.h>
5#include <asm/pgtable.h>
6
7#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
8
9#define arch_vmap_pud_supported arch_vmap_pud_supported
10static inline bool arch_vmap_pud_supported(pgprot_t prot)
11{
12 /*
13 * SW table walks can't handle removal of intermediate entries.
14 */
15 return pud_sect_supported() &&
16 !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
17}
18
19#define arch_vmap_pmd_supported arch_vmap_pmd_supported
20static inline bool arch_vmap_pmd_supported(pgprot_t prot)
21{
22 /* See arch_vmap_pud_supported() */
23 return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
24}
25
26#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
27static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr,
28 unsigned long end, u64 pfn,
29 unsigned int max_page_shift)
30{
31 /*
32 * If the block is at least CONT_PTE_SIZE in size, and is naturally
33 * aligned in both virtual and physical space, then we can pte-map the
34 * block using the PTE_CONT bit for more efficient use of the TLB.
35 */
36 if (max_page_shift < CONT_PTE_SHIFT)
37 return PAGE_SIZE;
38
39 if (end - addr < CONT_PTE_SIZE)
40 return PAGE_SIZE;
41
42 if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
43 return PAGE_SIZE;
44
45 if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
46 return PAGE_SIZE;
47
48 return CONT_PTE_SIZE;
49}
50
51#define arch_vmap_pte_range_unmap_size arch_vmap_pte_range_unmap_size
52static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
53 pte_t *ptep)
54{
55 /*
56 * The caller handles alignment so it's sufficient just to check
57 * PTE_CONT.
58 */
59 return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE;
60}
61
62#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
63static inline int arch_vmap_pte_supported_shift(unsigned long size)
64{
65 if (size >= CONT_PTE_SIZE)
66 return CONT_PTE_SHIFT;
67
68 return PAGE_SHIFT;
69}
70
71#endif
72
73#define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged
74static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
75{
76 return pgprot_tagged(prot);
77}
78
79#endif /* _ASM_ARM64_VMALLOC_H */