Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_LOONGARCH_KVM_PARA_H
3#define _ASM_LOONGARCH_KVM_PARA_H
4
5#include <uapi/asm/kvm_para.h>
6
7/*
8 * Hypercall code field
9 */
10#define HYPERVISOR_KVM 1
11#define HYPERVISOR_VENDOR_SHIFT 8
12#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code)
13
14#define KVM_HCALL_CODE_SERVICE 0
15#define KVM_HCALL_CODE_SWDBG 1
16#define KVM_HCALL_CODE_USER_SERVICE 2
17
18#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE)
19#define KVM_HCALL_FUNC_IPI 1
20#define KVM_HCALL_FUNC_NOTIFY 2
21
22#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
23
24#define KVM_HCALL_USER_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_USER_SERVICE)
25
26/*
27 * LoongArch hypercall return code
28 */
29#define KVM_HCALL_SUCCESS 0
30#define KVM_HCALL_INVALID_CODE -1UL
31#define KVM_HCALL_INVALID_PARAMETER -2UL
32
33#define KVM_STEAL_PHYS_VALID BIT_ULL(0)
34#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6)
35
36struct kvm_steal_time {
37 __u64 steal;
38 __u32 version;
39 __u32 flags;
40 __u8 preempted;
41 __u8 pad[47];
42};
43#define KVM_VCPU_PREEMPTED (1 << 0)
44
45/*
46 * Hypercall interface for KVM hypervisor
47 *
48 * a0: function identifier
49 * a1-a5: args
50 * Return value will be placed in a0.
51 * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
52 */
53static __always_inline long kvm_hypercall0(u64 fid)
54{
55 register long ret asm("a0");
56 register unsigned long fun asm("a0") = fid;
57
58 __asm__ __volatile__(
59 "hvcl "__stringify(KVM_HCALL_SERVICE)
60 : "=r" (ret)
61 : "r" (fun)
62 : "memory"
63 );
64
65 return ret;
66}
67
68static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0)
69{
70 register long ret asm("a0");
71 register unsigned long fun asm("a0") = fid;
72 register unsigned long a1 asm("a1") = arg0;
73
74 __asm__ __volatile__(
75 "hvcl "__stringify(KVM_HCALL_SERVICE)
76 : "=r" (ret)
77 : "r" (fun), "r" (a1)
78 : "memory"
79 );
80
81 return ret;
82}
83
84static __always_inline long kvm_hypercall2(u64 fid,
85 unsigned long arg0, unsigned long arg1)
86{
87 register long ret asm("a0");
88 register unsigned long fun asm("a0") = fid;
89 register unsigned long a1 asm("a1") = arg0;
90 register unsigned long a2 asm("a2") = arg1;
91
92 __asm__ __volatile__(
93 "hvcl "__stringify(KVM_HCALL_SERVICE)
94 : "=r" (ret)
95 : "r" (fun), "r" (a1), "r" (a2)
96 : "memory"
97 );
98
99 return ret;
100}
101
102static __always_inline long kvm_hypercall3(u64 fid,
103 unsigned long arg0, unsigned long arg1, unsigned long arg2)
104{
105 register long ret asm("a0");
106 register unsigned long fun asm("a0") = fid;
107 register unsigned long a1 asm("a1") = arg0;
108 register unsigned long a2 asm("a2") = arg1;
109 register unsigned long a3 asm("a3") = arg2;
110
111 __asm__ __volatile__(
112 "hvcl "__stringify(KVM_HCALL_SERVICE)
113 : "=r" (ret)
114 : "r" (fun), "r" (a1), "r" (a2), "r" (a3)
115 : "memory"
116 );
117
118 return ret;
119}
120
121static __always_inline long kvm_hypercall4(u64 fid,
122 unsigned long arg0, unsigned long arg1,
123 unsigned long arg2, unsigned long arg3)
124{
125 register long ret asm("a0");
126 register unsigned long fun asm("a0") = fid;
127 register unsigned long a1 asm("a1") = arg0;
128 register unsigned long a2 asm("a2") = arg1;
129 register unsigned long a3 asm("a3") = arg2;
130 register unsigned long a4 asm("a4") = arg3;
131
132 __asm__ __volatile__(
133 "hvcl "__stringify(KVM_HCALL_SERVICE)
134 : "=r" (ret)
135 : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4)
136 : "memory"
137 );
138
139 return ret;
140}
141
142static __always_inline long kvm_hypercall5(u64 fid,
143 unsigned long arg0, unsigned long arg1,
144 unsigned long arg2, unsigned long arg3, unsigned long arg4)
145{
146 register long ret asm("a0");
147 register unsigned long fun asm("a0") = fid;
148 register unsigned long a1 asm("a1") = arg0;
149 register unsigned long a2 asm("a2") = arg1;
150 register unsigned long a3 asm("a3") = arg2;
151 register unsigned long a4 asm("a4") = arg3;
152 register unsigned long a5 asm("a5") = arg4;
153
154 __asm__ __volatile__(
155 "hvcl "__stringify(KVM_HCALL_SERVICE)
156 : "=r" (ret)
157 : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5)
158 : "memory"
159 );
160
161 return ret;
162}
163
164#ifdef CONFIG_PARAVIRT
165bool kvm_para_available(void);
166unsigned int kvm_arch_para_features(void);
167#else
168static inline bool kvm_para_available(void)
169{
170 return false;
171}
172
173static inline unsigned int kvm_arch_para_features(void)
174{
175 return 0;
176}
177#endif
178
179static inline unsigned int kvm_arch_para_hints(void)
180{
181 return 0;
182}
183
184static inline bool kvm_check_and_clear_guest_paused(void)
185{
186 return false;
187}
188
189#endif /* _ASM_LOONGARCH_KVM_PARA_H */