Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 *
21 */
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28
29#include "hyperv.h"
30#include "hyperv_vmbus.h"
31
32/* The one and only */
33struct hv_context hv_context = {
34 .synic_initialized = false,
35 .hypercall_page = NULL,
36 .signal_event_param = NULL,
37 .signal_event_buffer = NULL,
38};
39
40/*
41 * query_hypervisor_presence
42 * - Query the cpuid for presence of windows hypervisor
43 */
44static int query_hypervisor_presence(void)
45{
46 unsigned int eax;
47 unsigned int ebx;
48 unsigned int ecx;
49 unsigned int edx;
50 unsigned int op;
51
52 eax = 0;
53 ebx = 0;
54 ecx = 0;
55 edx = 0;
56 op = HVCPUID_VERSION_FEATURES;
57 cpuid(op, &eax, &ebx, &ecx, &edx);
58
59 return ecx & HV_PRESENT_BIT;
60}
61
62/*
63 * query_hypervisor_info - Get version info of the windows hypervisor
64 */
65static int query_hypervisor_info(void)
66{
67 unsigned int eax;
68 unsigned int ebx;
69 unsigned int ecx;
70 unsigned int edx;
71 unsigned int max_leaf;
72 unsigned int op;
73
74 /*
75 * Its assumed that this is called after confirming that Viridian
76 * is present. Query id and revision.
77 */
78 eax = 0;
79 ebx = 0;
80 ecx = 0;
81 edx = 0;
82 op = HVCPUID_VENDOR_MAXFUNCTION;
83 cpuid(op, &eax, &ebx, &ecx, &edx);
84
85 max_leaf = eax;
86
87 if (max_leaf >= HVCPUID_VERSION) {
88 eax = 0;
89 ebx = 0;
90 ecx = 0;
91 edx = 0;
92 op = HVCPUID_VERSION;
93 cpuid(op, &eax, &ebx, &ecx, &edx);
94 pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
95 eax,
96 ebx >> 16,
97 ebx & 0xFFFF,
98 ecx,
99 edx >> 24,
100 edx & 0xFFFFFF);
101 }
102 return max_leaf;
103}
104
105/*
106 * do_hypercall- Invoke the specified hypercall
107 */
108static u64 do_hypercall(u64 control, void *input, void *output)
109{
110#ifdef CONFIG_X86_64
111 u64 hv_status = 0;
112 u64 input_address = (input) ? virt_to_phys(input) : 0;
113 u64 output_address = (output) ? virt_to_phys(output) : 0;
114 volatile void *hypercall_page = hv_context.hypercall_page;
115
116 __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
117 __asm__ __volatile__("call *%3" : "=a" (hv_status) :
118 "c" (control), "d" (input_address),
119 "m" (hypercall_page));
120
121 return hv_status;
122
123#else
124
125 u32 control_hi = control >> 32;
126 u32 control_lo = control & 0xFFFFFFFF;
127 u32 hv_status_hi = 1;
128 u32 hv_status_lo = 1;
129 u64 input_address = (input) ? virt_to_phys(input) : 0;
130 u32 input_address_hi = input_address >> 32;
131 u32 input_address_lo = input_address & 0xFFFFFFFF;
132 u64 output_address = (output) ? virt_to_phys(output) : 0;
133 u32 output_address_hi = output_address >> 32;
134 u32 output_address_lo = output_address & 0xFFFFFFFF;
135 volatile void *hypercall_page = hv_context.hypercall_page;
136
137 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
138 "=a"(hv_status_lo) : "d" (control_hi),
139 "a" (control_lo), "b" (input_address_hi),
140 "c" (input_address_lo), "D"(output_address_hi),
141 "S"(output_address_lo), "m" (hypercall_page));
142
143 return hv_status_lo | ((u64)hv_status_hi << 32);
144#endif /* !x86_64 */
145}
146
147/*
148 * hv_init - Main initialization routine.
149 *
150 * This routine must be called before any other routines in here are called
151 */
152int hv_init(void)
153{
154 int ret = 0;
155 int max_leaf;
156 union hv_x64_msr_hypercall_contents hypercall_msr;
157 void *virtaddr = NULL;
158
159 memset(hv_context.synic_event_page, 0, sizeof(void *) * MAX_NUM_CPUS);
160 memset(hv_context.synic_message_page, 0,
161 sizeof(void *) * MAX_NUM_CPUS);
162
163 if (!query_hypervisor_presence())
164 goto cleanup;
165
166 max_leaf = query_hypervisor_info();
167 /* HvQueryHypervisorFeatures(maxLeaf); */
168
169 /*
170 * We only support running on top of Hyper-V
171 */
172 rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
173
174 if (hv_context.guestid != 0)
175 goto cleanup;
176
177 /* Write our OS info */
178 wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
179 hv_context.guestid = HV_LINUX_GUEST_ID;
180
181 /* See if the hypercall page is already set */
182 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
183
184 /*
185 * Allocate the hypercall page memory
186 * virtaddr = osd_page_alloc(1);
187 */
188 virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
189
190 if (!virtaddr)
191 goto cleanup;
192
193 hypercall_msr.enable = 1;
194
195 hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
196 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
197
198 /* Confirm that hypercall page did get setup. */
199 hypercall_msr.as_uint64 = 0;
200 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
201
202 if (!hypercall_msr.enable)
203 goto cleanup;
204
205 hv_context.hypercall_page = virtaddr;
206
207 /* Setup the global signal event param for the signal event hypercall */
208 hv_context.signal_event_buffer =
209 kmalloc(sizeof(struct hv_input_signal_event_buffer),
210 GFP_KERNEL);
211 if (!hv_context.signal_event_buffer)
212 goto cleanup;
213
214 hv_context.signal_event_param =
215 (struct hv_input_signal_event *)
216 (ALIGN((unsigned long)
217 hv_context.signal_event_buffer,
218 HV_HYPERCALL_PARAM_ALIGN));
219 hv_context.signal_event_param->connectionid.asu32 = 0;
220 hv_context.signal_event_param->connectionid.u.id =
221 VMBUS_EVENT_CONNECTION_ID;
222 hv_context.signal_event_param->flag_number = 0;
223 hv_context.signal_event_param->rsvdz = 0;
224
225 return ret;
226
227cleanup:
228 if (virtaddr) {
229 if (hypercall_msr.enable) {
230 hypercall_msr.as_uint64 = 0;
231 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
232 }
233
234 vfree(virtaddr);
235 }
236 ret = -1;
237 return ret;
238}
239
240/*
241 * hv_cleanup - Cleanup routine.
242 *
243 * This routine is called normally during driver unloading or exiting.
244 */
245void hv_cleanup(void)
246{
247 union hv_x64_msr_hypercall_contents hypercall_msr;
248
249 kfree(hv_context.signal_event_buffer);
250 hv_context.signal_event_buffer = NULL;
251 hv_context.signal_event_param = NULL;
252
253 if (hv_context.hypercall_page) {
254 hypercall_msr.as_uint64 = 0;
255 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
256 vfree(hv_context.hypercall_page);
257 hv_context.hypercall_page = NULL;
258 }
259}
260
261/*
262 * hv_post_message - Post a message using the hypervisor message IPC.
263 *
264 * This involves a hypercall.
265 */
266u16 hv_post_message(union hv_connection_id connection_id,
267 enum hv_message_type message_type,
268 void *payload, size_t payload_size)
269{
270 struct aligned_input {
271 u64 alignment8;
272 struct hv_input_post_message msg;
273 };
274
275 struct hv_input_post_message *aligned_msg;
276 u16 status;
277 unsigned long addr;
278
279 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
280 return -1;
281
282 addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
283 if (!addr)
284 return -1;
285
286 aligned_msg = (struct hv_input_post_message *)
287 (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
288
289 aligned_msg->connectionid = connection_id;
290 aligned_msg->message_type = message_type;
291 aligned_msg->payload_size = payload_size;
292 memcpy((void *)aligned_msg->payload, payload, payload_size);
293
294 status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
295 & 0xFFFF;
296
297 kfree((void *)addr);
298
299 return status;
300}
301
302
303/*
304 * hv_signal_event -
305 * Signal an event on the specified connection using the hypervisor event IPC.
306 *
307 * This involves a hypercall.
308 */
309u16 hv_signal_event(void)
310{
311 u16 status;
312
313 status = do_hypercall(HVCALL_SIGNAL_EVENT,
314 hv_context.signal_event_param,
315 NULL) & 0xFFFF;
316 return status;
317}
318
319/*
320 * hv_synic_init - Initialize the Synthethic Interrupt Controller.
321 *
322 * If it is already initialized by another entity (ie x2v shim), we need to
323 * retrieve the initialized message and event pages. Otherwise, we create and
324 * initialize the message and event pages.
325 */
326void hv_synic_init(void *irqarg)
327{
328 u64 version;
329 union hv_synic_simp simp;
330 union hv_synic_siefp siefp;
331 union hv_synic_sint shared_sint;
332 union hv_synic_scontrol sctrl;
333
334 u32 irq_vector = *((u32 *)(irqarg));
335 int cpu = smp_processor_id();
336
337 if (!hv_context.hypercall_page)
338 return;
339
340 /* Check the version */
341 rdmsrl(HV_X64_MSR_SVERSION, version);
342
343 hv_context.synic_message_page[cpu] =
344 (void *)get_zeroed_page(GFP_ATOMIC);
345
346 if (hv_context.synic_message_page[cpu] == NULL) {
347 pr_err("Unable to allocate SYNIC message page\n");
348 goto cleanup;
349 }
350
351 hv_context.synic_event_page[cpu] =
352 (void *)get_zeroed_page(GFP_ATOMIC);
353
354 if (hv_context.synic_event_page[cpu] == NULL) {
355 pr_err("Unable to allocate SYNIC event page\n");
356 goto cleanup;
357 }
358
359 /* Setup the Synic's message page */
360 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
361 simp.simp_enabled = 1;
362 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
363 >> PAGE_SHIFT;
364
365 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
366
367 /* Setup the Synic's event page */
368 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
369 siefp.siefp_enabled = 1;
370 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
371 >> PAGE_SHIFT;
372
373 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
374
375 /* Setup the shared SINT. */
376 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
377
378 shared_sint.as_uint64 = 0;
379 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
380 shared_sint.masked = false;
381 shared_sint.auto_eoi = true;
382
383 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
384
385 /* Enable the global synic bit */
386 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
387 sctrl.enable = 1;
388
389 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
390
391 hv_context.synic_initialized = true;
392 return;
393
394cleanup:
395 if (hv_context.synic_event_page[cpu])
396 free_page((unsigned long)hv_context.synic_event_page[cpu]);
397
398 if (hv_context.synic_message_page[cpu])
399 free_page((unsigned long)hv_context.synic_message_page[cpu]);
400 return;
401}
402
403/*
404 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
405 */
406void hv_synic_cleanup(void *arg)
407{
408 union hv_synic_sint shared_sint;
409 union hv_synic_simp simp;
410 union hv_synic_siefp siefp;
411 int cpu = smp_processor_id();
412
413 if (!hv_context.synic_initialized)
414 return;
415
416 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
417
418 shared_sint.masked = 1;
419
420 /* Need to correctly cleanup in the case of SMP!!! */
421 /* Disable the interrupt */
422 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
423
424 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
425 simp.simp_enabled = 0;
426 simp.base_simp_gpa = 0;
427
428 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
429
430 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
431 siefp.siefp_enabled = 0;
432 siefp.base_siefp_gpa = 0;
433
434 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
435
436 free_page((unsigned long)hv_context.synic_message_page[cpu]);
437 free_page((unsigned long)hv_context.synic_event_page[cpu]);
438}