Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_MSHYPER_H
3#define _ASM_X86_MSHYPER_H
4
5#include <linux/types.h>
6#include <linux/nmi.h>
7#include <linux/msi.h>
8#include <linux/io.h>
9#include <asm/hyperv-tlfs.h>
10#include <asm/nospec-branch.h>
11#include <asm/paravirt.h>
12#include <asm/mshyperv.h>
13
14/*
15 * Hyper-V always provides a single IO-APIC at this MMIO address.
16 * Ideally, the value should be looked up in ACPI tables, but it
17 * is needed for mapping the IO-APIC early in boot on Confidential
18 * VMs, before ACPI functions can be used.
19 */
20#define HV_IOAPIC_BASE_ADDRESS 0xfec00000
21
22#define HV_VTL_NORMAL 0x0
23#define HV_VTL_SECURE 0x1
24#define HV_VTL_MGMT 0x2
25
26union hv_ghcb;
27
28DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
29
30typedef int (*hyperv_fill_flush_list_func)(
31 struct hv_guest_mapping_flush_list *flush,
32 void *data);
33
34void hyperv_vector_handler(struct pt_regs *regs);
35
36static inline unsigned char hv_get_nmi_reason(void)
37{
38 return 0;
39}
40
41#if IS_ENABLED(CONFIG_HYPERV)
42extern int hyperv_init_cpuhp;
43
44extern void *hv_hypercall_pg;
45
46extern u64 hv_current_partition_id;
47
48extern union hv_ghcb * __percpu *hv_ghcb_pg;
49
50int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
51int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
52int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
53
54static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
55{
56 u64 input_address = input ? virt_to_phys(input) : 0;
57 u64 output_address = output ? virt_to_phys(output) : 0;
58 u64 hv_status;
59
60#ifdef CONFIG_X86_64
61 if (!hv_hypercall_pg)
62 return U64_MAX;
63
64 __asm__ __volatile__("mov %4, %%r8\n"
65 CALL_NOSPEC
66 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
67 "+c" (control), "+d" (input_address)
68 : "r" (output_address),
69 THUNK_TARGET(hv_hypercall_pg)
70 : "cc", "memory", "r8", "r9", "r10", "r11");
71#else
72 u32 input_address_hi = upper_32_bits(input_address);
73 u32 input_address_lo = lower_32_bits(input_address);
74 u32 output_address_hi = upper_32_bits(output_address);
75 u32 output_address_lo = lower_32_bits(output_address);
76
77 if (!hv_hypercall_pg)
78 return U64_MAX;
79
80 __asm__ __volatile__(CALL_NOSPEC
81 : "=A" (hv_status),
82 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
83 : "A" (control),
84 "b" (input_address_hi),
85 "D"(output_address_hi), "S"(output_address_lo),
86 THUNK_TARGET(hv_hypercall_pg)
87 : "cc", "memory");
88#endif /* !x86_64 */
89 return hv_status;
90}
91
92/* Hypercall to the L0 hypervisor */
93static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output)
94{
95 return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output);
96}
97
98/* Fast hypercall with 8 bytes of input and no output */
99static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
100{
101 u64 hv_status;
102
103#ifdef CONFIG_X86_64
104 {
105 __asm__ __volatile__(CALL_NOSPEC
106 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
107 "+c" (control), "+d" (input1)
108 : THUNK_TARGET(hv_hypercall_pg)
109 : "cc", "r8", "r9", "r10", "r11");
110 }
111#else
112 {
113 u32 input1_hi = upper_32_bits(input1);
114 u32 input1_lo = lower_32_bits(input1);
115
116 __asm__ __volatile__ (CALL_NOSPEC
117 : "=A"(hv_status),
118 "+c"(input1_lo),
119 ASM_CALL_CONSTRAINT
120 : "A" (control),
121 "b" (input1_hi),
122 THUNK_TARGET(hv_hypercall_pg)
123 : "cc", "edi", "esi");
124 }
125#endif
126 return hv_status;
127}
128
129static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
130{
131 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT;
132
133 return _hv_do_fast_hypercall8(control, input1);
134}
135
136static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1)
137{
138 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
139
140 return _hv_do_fast_hypercall8(control, input1);
141}
142
143/* Fast hypercall with 16 bytes of input */
144static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
145{
146 u64 hv_status;
147
148#ifdef CONFIG_X86_64
149 {
150 __asm__ __volatile__("mov %4, %%r8\n"
151 CALL_NOSPEC
152 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
153 "+c" (control), "+d" (input1)
154 : "r" (input2),
155 THUNK_TARGET(hv_hypercall_pg)
156 : "cc", "r8", "r9", "r10", "r11");
157 }
158#else
159 {
160 u32 input1_hi = upper_32_bits(input1);
161 u32 input1_lo = lower_32_bits(input1);
162 u32 input2_hi = upper_32_bits(input2);
163 u32 input2_lo = lower_32_bits(input2);
164
165 __asm__ __volatile__ (CALL_NOSPEC
166 : "=A"(hv_status),
167 "+c"(input1_lo), ASM_CALL_CONSTRAINT
168 : "A" (control), "b" (input1_hi),
169 "D"(input2_hi), "S"(input2_lo),
170 THUNK_TARGET(hv_hypercall_pg)
171 : "cc");
172 }
173#endif
174 return hv_status;
175}
176
177static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
178{
179 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT;
180
181 return _hv_do_fast_hypercall16(control, input1, input2);
182}
183
184static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2)
185{
186 u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED;
187
188 return _hv_do_fast_hypercall16(control, input1, input2);
189}
190
191extern struct hv_vp_assist_page **hv_vp_assist_page;
192
193static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
194{
195 if (!hv_vp_assist_page)
196 return NULL;
197
198 return hv_vp_assist_page[cpu];
199}
200
201void __init hyperv_init(void);
202void hyperv_setup_mmu_ops(void);
203void set_hv_tscchange_cb(void (*cb)(void));
204void clear_hv_tscchange_cb(void);
205void hyperv_stop_tsc_emulation(void);
206int hyperv_flush_guest_mapping(u64 as);
207int hyperv_flush_guest_mapping_range(u64 as,
208 hyperv_fill_flush_list_func fill_func, void *data);
209int hyperv_fill_flush_guest_mapping_list(
210 struct hv_guest_mapping_flush_list *flush,
211 u64 start_gfn, u64 end_gfn);
212
213#ifdef CONFIG_X86_64
214void hv_apic_init(void);
215void __init hv_init_spinlocks(void);
216bool hv_vcpu_is_preempted(int vcpu);
217#else
218static inline void hv_apic_init(void) {}
219#endif
220
221struct irq_domain *hv_create_pci_msi_domain(void);
222
223int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
224 struct hv_interrupt_entry *entry);
225int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
226
227#ifdef CONFIG_AMD_MEM_ENCRYPT
228void hv_ghcb_msr_write(u64 msr, u64 value);
229void hv_ghcb_msr_read(u64 msr, u64 *value);
230bool hv_ghcb_negotiate_protocol(void);
231void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
232void hv_vtom_init(void);
233#else
234static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
235static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
236static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
237static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
238static inline void hv_vtom_init(void) {}
239#endif
240
241extern bool hv_isolation_type_snp(void);
242
243static inline bool hv_is_synic_reg(unsigned int reg)
244{
245 return (reg >= HV_REGISTER_SCONTROL) &&
246 (reg <= HV_REGISTER_SINT15);
247}
248
249static inline bool hv_is_sint_reg(unsigned int reg)
250{
251 return (reg >= HV_REGISTER_SINT0) &&
252 (reg <= HV_REGISTER_SINT15);
253}
254
255u64 hv_get_register(unsigned int reg);
256void hv_set_register(unsigned int reg, u64 value);
257u64 hv_get_non_nested_register(unsigned int reg);
258void hv_set_non_nested_register(unsigned int reg, u64 value);
259
260static __always_inline u64 hv_raw_get_register(unsigned int reg)
261{
262 return __rdmsr(reg);
263}
264
265#else /* CONFIG_HYPERV */
266static inline void hyperv_init(void) {}
267static inline void hyperv_setup_mmu_ops(void) {}
268static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
269static inline void clear_hv_tscchange_cb(void) {}
270static inline void hyperv_stop_tsc_emulation(void) {};
271static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
272{
273 return NULL;
274}
275static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
276static inline int hyperv_flush_guest_mapping_range(u64 as,
277 hyperv_fill_flush_list_func fill_func, void *data)
278{
279 return -1;
280}
281static inline void hv_set_register(unsigned int reg, u64 value) { }
282static inline u64 hv_get_register(unsigned int reg) { return 0; }
283static inline void hv_set_non_nested_register(unsigned int reg, u64 value) { }
284static inline u64 hv_get_non_nested_register(unsigned int reg) { return 0; }
285#endif /* CONFIG_HYPERV */
286
287
288#ifdef CONFIG_HYPERV_VTL_MODE
289void __init hv_vtl_init_platform(void);
290#else
291static inline void __init hv_vtl_init_platform(void) {}
292#endif
293
294#include <asm-generic/mshyperv.h>
295
296#endif