Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2022 Ventana Micro Systems Inc.
4 */
5
6#include <linux/bitmap.h>
7#include <linux/cpumask.h>
8#include <linux/errno.h>
9#include <linux/err.h>
10#include <linux/module.h>
11#include <linux/smp.h>
12#include <linux/kvm_host.h>
13#include <asm/cacheflush.h>
14#include <asm/csr.h>
15
16/*
17 * Instruction encoding of hfence.gvma is:
18 * HFENCE.GVMA rs1, rs2
19 * HFENCE.GVMA zero, rs2
20 * HFENCE.GVMA rs1
21 * HFENCE.GVMA
22 *
23 * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2
24 * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2
25 * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1
26 * rs1==zero and rs2==zero ==> HFENCE.GVMA
27 *
28 * Instruction encoding of HFENCE.GVMA is:
29 * 0110001 rs2(5) rs1(5) 000 00000 1110011
30 */
31
32void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
33 gpa_t gpa, gpa_t gpsz,
34 unsigned long order)
35{
36 gpa_t pos;
37
38 if (PTRS_PER_PTE < (gpsz >> order)) {
39 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
40 return;
41 }
42
43 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) {
44 /*
45 * rs1 = a0 (GPA >> 2)
46 * rs2 = a1 (VMID)
47 * HFENCE.GVMA a0, a1
48 * 0110001 01011 01010 000 00000 1110011
49 */
50 asm volatile ("srli a0, %0, 2\n"
51 "add a1, %1, zero\n"
52 ".word 0x62b50073\n"
53 :: "r" (pos), "r" (vmid)
54 : "a0", "a1", "memory");
55 }
56}
57
58void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
59{
60 /*
61 * rs1 = zero
62 * rs2 = a0 (VMID)
63 * HFENCE.GVMA zero, a0
64 * 0110001 01010 00000 000 00000 1110011
65 */
66 asm volatile ("add a0, %0, zero\n"
67 ".word 0x62a00073\n"
68 :: "r" (vmid) : "a0", "memory");
69}
70
71void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
72 unsigned long order)
73{
74 gpa_t pos;
75
76 if (PTRS_PER_PTE < (gpsz >> order)) {
77 kvm_riscv_local_hfence_gvma_all();
78 return;
79 }
80
81 for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order)) {
82 /*
83 * rs1 = a0 (GPA >> 2)
84 * rs2 = zero
85 * HFENCE.GVMA a0
86 * 0110001 00000 01010 000 00000 1110011
87 */
88 asm volatile ("srli a0, %0, 2\n"
89 ".word 0x62050073\n"
90 :: "r" (pos) : "a0", "memory");
91 }
92}
93
94void kvm_riscv_local_hfence_gvma_all(void)
95{
96 /*
97 * rs1 = zero
98 * rs2 = zero
99 * HFENCE.GVMA
100 * 0110001 00000 00000 000 00000 1110011
101 */
102 asm volatile (".word 0x62000073" ::: "memory");
103}
104
105/*
106 * Instruction encoding of hfence.gvma is:
107 * HFENCE.VVMA rs1, rs2
108 * HFENCE.VVMA zero, rs2
109 * HFENCE.VVMA rs1
110 * HFENCE.VVMA
111 *
112 * rs1!=zero and rs2!=zero ==> HFENCE.VVMA rs1, rs2
113 * rs1==zero and rs2!=zero ==> HFENCE.VVMA zero, rs2
114 * rs1!=zero and rs2==zero ==> HFENCE.VVMA rs1
115 * rs1==zero and rs2==zero ==> HFENCE.VVMA
116 *
117 * Instruction encoding of HFENCE.VVMA is:
118 * 0010001 rs2(5) rs1(5) 000 00000 1110011
119 */
120
121void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
122 unsigned long asid,
123 unsigned long gva,
124 unsigned long gvsz,
125 unsigned long order)
126{
127 unsigned long pos, hgatp;
128
129 if (PTRS_PER_PTE < (gvsz >> order)) {
130 kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
131 return;
132 }
133
134 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
135
136 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) {
137 /*
138 * rs1 = a0 (GVA)
139 * rs2 = a1 (ASID)
140 * HFENCE.VVMA a0, a1
141 * 0010001 01011 01010 000 00000 1110011
142 */
143 asm volatile ("add a0, %0, zero\n"
144 "add a1, %1, zero\n"
145 ".word 0x22b50073\n"
146 :: "r" (pos), "r" (asid)
147 : "a0", "a1", "memory");
148 }
149
150 csr_write(CSR_HGATP, hgatp);
151}
152
153void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
154 unsigned long asid)
155{
156 unsigned long hgatp;
157
158 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
159
160 /*
161 * rs1 = zero
162 * rs2 = a0 (ASID)
163 * HFENCE.VVMA zero, a0
164 * 0010001 01010 00000 000 00000 1110011
165 */
166 asm volatile ("add a0, %0, zero\n"
167 ".word 0x22a00073\n"
168 :: "r" (asid) : "a0", "memory");
169
170 csr_write(CSR_HGATP, hgatp);
171}
172
173void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
174 unsigned long gva, unsigned long gvsz,
175 unsigned long order)
176{
177 unsigned long pos, hgatp;
178
179 if (PTRS_PER_PTE < (gvsz >> order)) {
180 kvm_riscv_local_hfence_vvma_all(vmid);
181 return;
182 }
183
184 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
185
186 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) {
187 /*
188 * rs1 = a0 (GVA)
189 * rs2 = zero
190 * HFENCE.VVMA a0
191 * 0010001 00000 01010 000 00000 1110011
192 */
193 asm volatile ("add a0, %0, zero\n"
194 ".word 0x22050073\n"
195 :: "r" (pos) : "a0", "memory");
196 }
197
198 csr_write(CSR_HGATP, hgatp);
199}
200
201void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
202{
203 unsigned long hgatp;
204
205 hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
206
207 /*
208 * rs1 = zero
209 * rs2 = zero
210 * HFENCE.VVMA
211 * 0010001 00000 00000 000 00000 1110011
212 */
213 asm volatile (".word 0x22000073" ::: "memory");
214
215 csr_write(CSR_HGATP, hgatp);
216}
217
218void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
219{
220 unsigned long vmid;
221
222 if (!kvm_riscv_gstage_vmid_bits() ||
223 vcpu->arch.last_exit_cpu == vcpu->cpu)
224 return;
225
226 /*
227 * On RISC-V platforms with hardware VMID support, we share same
228 * VMID for all VCPUs of a particular Guest/VM. This means we might
229 * have stale G-stage TLB entries on the current Host CPU due to
230 * some other VCPU of the same Guest which ran previously on the
231 * current Host CPU.
232 *
233 * To cleanup stale TLB entries, we simply flush all G-stage TLB
234 * entries by VMID whenever underlying Host CPU changes for a VCPU.
235 */
236
237 vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
238 kvm_riscv_local_hfence_gvma_vmid_all(vmid);
239}
240
241void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
242{
243 local_flush_icache_all();
244}
245
246void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
247{
248 struct kvm_vmid *vmid;
249
250 vmid = &vcpu->kvm->arch.vmid;
251 kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
252}
253
254void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
255{
256 struct kvm_vmid *vmid;
257
258 vmid = &vcpu->kvm->arch.vmid;
259 kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
260}
261
262static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
263 struct kvm_riscv_hfence *out_data)
264{
265 bool ret = false;
266 struct kvm_vcpu_arch *varch = &vcpu->arch;
267
268 spin_lock(&varch->hfence_lock);
269
270 if (varch->hfence_queue[varch->hfence_head].type) {
271 memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
272 sizeof(*out_data));
273 varch->hfence_queue[varch->hfence_head].type = 0;
274
275 varch->hfence_head++;
276 if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
277 varch->hfence_head = 0;
278
279 ret = true;
280 }
281
282 spin_unlock(&varch->hfence_lock);
283
284 return ret;
285}
286
287static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
288 const struct kvm_riscv_hfence *data)
289{
290 bool ret = false;
291 struct kvm_vcpu_arch *varch = &vcpu->arch;
292
293 spin_lock(&varch->hfence_lock);
294
295 if (!varch->hfence_queue[varch->hfence_tail].type) {
296 memcpy(&varch->hfence_queue[varch->hfence_tail],
297 data, sizeof(*data));
298
299 varch->hfence_tail++;
300 if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
301 varch->hfence_tail = 0;
302
303 ret = true;
304 }
305
306 spin_unlock(&varch->hfence_lock);
307
308 return ret;
309}
310
311void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
312{
313 struct kvm_riscv_hfence d = { 0 };
314 struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
315
316 while (vcpu_hfence_dequeue(vcpu, &d)) {
317 switch (d.type) {
318 case KVM_RISCV_HFENCE_UNKNOWN:
319 break;
320 case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
321 kvm_riscv_local_hfence_gvma_vmid_gpa(
322 READ_ONCE(v->vmid),
323 d.addr, d.size, d.order);
324 break;
325 case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
326 kvm_riscv_local_hfence_vvma_asid_gva(
327 READ_ONCE(v->vmid), d.asid,
328 d.addr, d.size, d.order);
329 break;
330 case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
331 kvm_riscv_local_hfence_vvma_asid_all(
332 READ_ONCE(v->vmid), d.asid);
333 break;
334 case KVM_RISCV_HFENCE_VVMA_GVA:
335 kvm_riscv_local_hfence_vvma_gva(
336 READ_ONCE(v->vmid),
337 d.addr, d.size, d.order);
338 break;
339 default:
340 break;
341 }
342 }
343}
344
345static void make_xfence_request(struct kvm *kvm,
346 unsigned long hbase, unsigned long hmask,
347 unsigned int req, unsigned int fallback_req,
348 const struct kvm_riscv_hfence *data)
349{
350 unsigned long i;
351 struct kvm_vcpu *vcpu;
352 unsigned int actual_req = req;
353 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
354
355 bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS);
356 kvm_for_each_vcpu(i, vcpu, kvm) {
357 if (hbase != -1UL) {
358 if (vcpu->vcpu_id < hbase)
359 continue;
360 if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
361 continue;
362 }
363
364 bitmap_set(vcpu_mask, i, 1);
365
366 if (!data || !data->type)
367 continue;
368
369 /*
370 * Enqueue hfence data to VCPU hfence queue. If we don't
371 * have space in the VCPU hfence queue then fallback to
372 * a more conservative hfence request.
373 */
374 if (!vcpu_hfence_enqueue(vcpu, data))
375 actual_req = fallback_req;
376 }
377
378 kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
379}
380
381void kvm_riscv_fence_i(struct kvm *kvm,
382 unsigned long hbase, unsigned long hmask)
383{
384 make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
385 KVM_REQ_FENCE_I, NULL);
386}
387
388void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
389 unsigned long hbase, unsigned long hmask,
390 gpa_t gpa, gpa_t gpsz,
391 unsigned long order)
392{
393 struct kvm_riscv_hfence data;
394
395 data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
396 data.asid = 0;
397 data.addr = gpa;
398 data.size = gpsz;
399 data.order = order;
400 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
401 KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
402}
403
404void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
405 unsigned long hbase, unsigned long hmask)
406{
407 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
408 KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
409}
410
411void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
412 unsigned long hbase, unsigned long hmask,
413 unsigned long gva, unsigned long gvsz,
414 unsigned long order, unsigned long asid)
415{
416 struct kvm_riscv_hfence data;
417
418 data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
419 data.asid = asid;
420 data.addr = gva;
421 data.size = gvsz;
422 data.order = order;
423 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
424 KVM_REQ_HFENCE_VVMA_ALL, &data);
425}
426
427void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
428 unsigned long hbase, unsigned long hmask,
429 unsigned long asid)
430{
431 struct kvm_riscv_hfence data;
432
433 data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
434 data.asid = asid;
435 data.addr = data.size = data.order = 0;
436 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
437 KVM_REQ_HFENCE_VVMA_ALL, &data);
438}
439
440void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
441 unsigned long hbase, unsigned long hmask,
442 unsigned long gva, unsigned long gvsz,
443 unsigned long order)
444{
445 struct kvm_riscv_hfence data;
446
447 data.type = KVM_RISCV_HFENCE_VVMA_GVA;
448 data.asid = 0;
449 data.addr = gva;
450 data.size = gvsz;
451 data.order = order;
452 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
453 KVM_REQ_HFENCE_VVMA_ALL, &data);
454}
455
456void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
457 unsigned long hbase, unsigned long hmask)
458{
459 make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
460 KVM_REQ_HFENCE_VVMA_ALL, NULL);
461}