Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6#ifndef __ARM64_KVM_PKVM_H__
7#define __ARM64_KVM_PKVM_H__
8
9#include <linux/arm_ffa.h>
10#include <linux/memblock.h>
11#include <linux/scatterlist.h>
12#include <asm/kvm_host.h>
13#include <asm/kvm_pgtable.h>
14
15/* Maximum number of VMs that can co-exist under pKVM. */
16#define KVM_MAX_PVMS 255
17
18#define HYP_MEMBLOCK_REGIONS 128
19
20int pkvm_init_host_vm(struct kvm *kvm, unsigned long type);
21int pkvm_create_hyp_vm(struct kvm *kvm);
22bool pkvm_hyp_vm_is_created(struct kvm *kvm);
23void pkvm_destroy_hyp_vm(struct kvm *kvm);
24int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
25
26/*
27 * Check whether the specific capability is allowed in pKVM.
28 *
29 * Certain features are allowed only for non-protected VMs in pKVM, which is why
30 * this takes the VM (kvm) as a parameter.
31 */
32static inline bool kvm_pkvm_ext_allowed(struct kvm *kvm, long ext)
33{
34 switch (ext) {
35 case KVM_CAP_IRQCHIP:
36 case KVM_CAP_ARM_PSCI:
37 case KVM_CAP_ARM_PSCI_0_2:
38 case KVM_CAP_NR_VCPUS:
39 case KVM_CAP_MAX_VCPUS:
40 case KVM_CAP_MAX_VCPU_ID:
41 case KVM_CAP_MSI_DEVID:
42 case KVM_CAP_ARM_VM_IPA_SIZE:
43 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
44 case KVM_CAP_ARM_PTRAUTH_GENERIC:
45 return true;
46 case KVM_CAP_ARM_MTE:
47 return false;
48 default:
49 return !kvm || !kvm_vm_is_protected(kvm);
50 }
51}
52
53/*
54 * Check whether the KVM VM IOCTL is allowed in pKVM.
55 *
56 * Certain features are allowed only for non-protected VMs in pKVM, which is why
57 * this takes the VM (kvm) as a parameter.
58 */
59static inline bool kvm_pkvm_ioctl_allowed(struct kvm *kvm, unsigned int ioctl)
60{
61 long ext;
62 int r;
63
64 r = kvm_get_cap_for_kvm_ioctl(ioctl, &ext);
65
66 if (WARN_ON_ONCE(r < 0))
67 return false;
68
69 return kvm_pkvm_ext_allowed(kvm, ext);
70}
71
72extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
73extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
74
75static inline unsigned long
76hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
77{
78 unsigned long nr_pages = reg->size >> PAGE_SHIFT;
79 unsigned long start, end;
80
81 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
82 end = start + nr_pages * vmemmap_entry_size;
83 start = ALIGN_DOWN(start, PAGE_SIZE);
84 end = ALIGN(end, PAGE_SIZE);
85
86 return end - start;
87}
88
89static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
90{
91 unsigned long res = 0, i;
92
93 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
94 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
95 vmemmap_entry_size);
96 }
97
98 return res >> PAGE_SHIFT;
99}
100
101static inline unsigned long hyp_vm_table_pages(void)
102{
103 return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
104}
105
106static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
107{
108 unsigned long total = 0;
109 int i;
110
111 /* Provision the worst case scenario */
112 for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
113 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
114 total += nr_pages;
115 }
116
117 return total;
118}
119
120static inline unsigned long __hyp_pgtable_total_pages(void)
121{
122 unsigned long res = 0, i;
123
124 /* Cover all of memory with page-granularity */
125 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
126 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
127 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
128 }
129
130 return res;
131}
132
133static inline unsigned long hyp_s1_pgtable_pages(void)
134{
135 unsigned long res;
136
137 res = __hyp_pgtable_total_pages();
138
139 /* Allow 1 GiB for private mappings */
140 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
141
142 return res;
143}
144
145static inline unsigned long host_s2_pgtable_pages(void)
146{
147 unsigned long res;
148
149 /*
150 * Include an extra 16 pages to safely upper-bound the worst case of
151 * concatenated pgds.
152 */
153 res = __hyp_pgtable_total_pages() + 16;
154
155 /* Allow 1 GiB for MMIO mappings */
156 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
157
158 return res;
159}
160
161#ifdef CONFIG_NVHE_EL2_DEBUG
162static inline unsigned long pkvm_selftest_pages(void) { return 32; }
163#else
164static inline unsigned long pkvm_selftest_pages(void) { return 0; }
165#endif
166
167#define KVM_FFA_MBOX_NR_PAGES 1
168
169static inline unsigned long hyp_ffa_proxy_pages(void)
170{
171 size_t desc_max;
172
173 /*
174 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
175 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
176 */
177 desc_max = sizeof(struct ffa_mem_region) +
178 sizeof(struct ffa_mem_region_attributes) +
179 sizeof(struct ffa_composite_mem_region) +
180 SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
181
182 /* Plus a page each for the hypervisor's RX and TX mailboxes. */
183 return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
184}
185
186static inline size_t pkvm_host_sve_state_size(void)
187{
188 if (!system_supports_sve())
189 return 0;
190
191 return size_add(sizeof(struct cpu_sve_state),
192 SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
193}
194
195struct pkvm_mapping {
196 struct rb_node node;
197 u64 gfn;
198 u64 pfn;
199 u64 nr_pages;
200 u64 __subtree_last; /* Internal member for interval tree */
201};
202
203int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
204 struct kvm_pgtable_mm_ops *mm_ops);
205void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
206 u64 addr, u64 size);
207void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
208int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
209 enum kvm_pgtable_prot prot, void *mc,
210 enum kvm_pgtable_walk_flags flags);
211int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
212int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
213int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
214bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
215int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
216 enum kvm_pgtable_walk_flags flags);
217void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
218 enum kvm_pgtable_walk_flags flags);
219int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
220 struct kvm_mmu_memory_cache *mc);
221void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
222kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
223 enum kvm_pgtable_prot prot, void *mc,
224 bool force_pte);
225#endif /* __ARM64_KVM_PKVM_H__ */