Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6#ifndef __ARM64_KVM_PKVM_H__
7#define __ARM64_KVM_PKVM_H__
8
9#include <linux/arm_ffa.h>
10#include <linux/memblock.h>
11#include <linux/scatterlist.h>
12#include <asm/kvm_pgtable.h>
13
14/* Maximum number of VMs that can co-exist under pKVM. */
15#define KVM_MAX_PVMS 255
16
17#define HYP_MEMBLOCK_REGIONS 128
18
19int pkvm_init_host_vm(struct kvm *kvm);
20int pkvm_create_hyp_vm(struct kvm *kvm);
21void pkvm_destroy_hyp_vm(struct kvm *kvm);
22int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
23
24/*
25 * This functions as an allow-list of protected VM capabilities.
26 * Features not explicitly allowed by this function are denied.
27 */
28static inline bool kvm_pvm_ext_allowed(long ext)
29{
30 switch (ext) {
31 case KVM_CAP_IRQCHIP:
32 case KVM_CAP_ARM_PSCI:
33 case KVM_CAP_ARM_PSCI_0_2:
34 case KVM_CAP_NR_VCPUS:
35 case KVM_CAP_MAX_VCPUS:
36 case KVM_CAP_MAX_VCPU_ID:
37 case KVM_CAP_MSI_DEVID:
38 case KVM_CAP_ARM_VM_IPA_SIZE:
39 case KVM_CAP_ARM_PMU_V3:
40 case KVM_CAP_ARM_SVE:
41 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
42 case KVM_CAP_ARM_PTRAUTH_GENERIC:
43 return true;
44 default:
45 return false;
46 }
47}
48
49extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
50extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
51
52static inline unsigned long
53hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
54{
55 unsigned long nr_pages = reg->size >> PAGE_SHIFT;
56 unsigned long start, end;
57
58 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
59 end = start + nr_pages * vmemmap_entry_size;
60 start = ALIGN_DOWN(start, PAGE_SIZE);
61 end = ALIGN(end, PAGE_SIZE);
62
63 return end - start;
64}
65
66static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
67{
68 unsigned long res = 0, i;
69
70 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
71 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
72 vmemmap_entry_size);
73 }
74
75 return res >> PAGE_SHIFT;
76}
77
78static inline unsigned long hyp_vm_table_pages(void)
79{
80 return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
81}
82
83static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
84{
85 unsigned long total = 0;
86 int i;
87
88 /* Provision the worst case scenario */
89 for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
90 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
91 total += nr_pages;
92 }
93
94 return total;
95}
96
97static inline unsigned long __hyp_pgtable_total_pages(void)
98{
99 unsigned long res = 0, i;
100
101 /* Cover all of memory with page-granularity */
102 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
103 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
104 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
105 }
106
107 return res;
108}
109
110static inline unsigned long hyp_s1_pgtable_pages(void)
111{
112 unsigned long res;
113
114 res = __hyp_pgtable_total_pages();
115
116 /* Allow 1 GiB for private mappings */
117 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
118
119 return res;
120}
121
122static inline unsigned long host_s2_pgtable_pages(void)
123{
124 unsigned long res;
125
126 /*
127 * Include an extra 16 pages to safely upper-bound the worst case of
128 * concatenated pgds.
129 */
130 res = __hyp_pgtable_total_pages() + 16;
131
132 /* Allow 1 GiB for MMIO mappings */
133 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
134
135 return res;
136}
137
138#define KVM_FFA_MBOX_NR_PAGES 1
139
140static inline unsigned long hyp_ffa_proxy_pages(void)
141{
142 size_t desc_max;
143
144 /*
145 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
146 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
147 */
148 desc_max = sizeof(struct ffa_mem_region) +
149 sizeof(struct ffa_mem_region_attributes) +
150 sizeof(struct ffa_composite_mem_region) +
151 SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
152
153 /* Plus a page each for the hypervisor's RX and TX mailboxes. */
154 return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
155}
156
157static inline size_t pkvm_host_sve_state_size(void)
158{
159 if (!system_supports_sve())
160 return 0;
161
162 return size_add(sizeof(struct cpu_sve_state),
163 SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
164}
165
166struct pkvm_mapping {
167 struct rb_node node;
168 u64 gfn;
169 u64 pfn;
170};
171
172int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
173 struct kvm_pgtable_mm_ops *mm_ops);
174void pkvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
175int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
176 enum kvm_pgtable_prot prot, void *mc,
177 enum kvm_pgtable_walk_flags flags);
178int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
179int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
180int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
181bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
182int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
183 enum kvm_pgtable_walk_flags flags);
184void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
185 enum kvm_pgtable_walk_flags flags);
186int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
187 struct kvm_mmu_memory_cache *mc);
188void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
189kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
190 enum kvm_pgtable_prot prot, void *mc,
191 bool force_pte);
192#endif /* __ARM64_KVM_PKVM_H__ */