Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/hyperv.h>
16#include <linux/version.h>
17#include <linux/random.h>
18#include <linux/clockchips.h>
19#include <asm/mshyperv.h>
20#include "hyperv_vmbus.h"
21
22/* The one and only */
23struct hv_context hv_context;
24
25/*
26 * If false, we're using the old mechanism for stimer0 interrupts
27 * where it sends a VMbus message when it expires. The old
28 * mechanism is used when running on older versions of Hyper-V
29 * that don't support Direct Mode. While Hyper-V provides
30 * four stimer's per CPU, Linux uses only stimer0.
31 */
32static bool direct_mode_enabled;
33static int stimer0_irq;
34static int stimer0_vector;
35
36#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
37#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
38#define HV_MIN_DELTA_TICKS 1
39
40/*
41 * hv_init - Main initialization routine.
42 *
43 * This routine must be called before any other routines in here are called
44 */
45int hv_init(void)
46{
47 hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
48 if (!hv_context.cpu_context)
49 return -ENOMEM;
50
51 direct_mode_enabled = ms_hyperv.misc_features &
52 HV_STIMER_DIRECT_MODE_AVAILABLE;
53 return 0;
54}
55
56/*
57 * hv_post_message - Post a message using the hypervisor message IPC.
58 *
59 * This involves a hypercall.
60 */
61int hv_post_message(union hv_connection_id connection_id,
62 enum hv_message_type message_type,
63 void *payload, size_t payload_size)
64{
65 struct hv_input_post_message *aligned_msg;
66 struct hv_per_cpu_context *hv_cpu;
67 u64 status;
68
69 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
70 return -EMSGSIZE;
71
72 hv_cpu = get_cpu_ptr(hv_context.cpu_context);
73 aligned_msg = hv_cpu->post_msg_page;
74 aligned_msg->connectionid = connection_id;
75 aligned_msg->reserved = 0;
76 aligned_msg->message_type = message_type;
77 aligned_msg->payload_size = payload_size;
78 memcpy((void *)aligned_msg->payload, payload, payload_size);
79
80 status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
81
82 /* Preemption must remain disabled until after the hypercall
83 * so some other thread can't get scheduled onto this cpu and
84 * corrupt the per-cpu post_msg_page
85 */
86 put_cpu_ptr(hv_cpu);
87
88 return status & 0xFFFF;
89}
90
91/*
92 * ISR for when stimer0 is operating in Direct Mode. Direct Mode
93 * does not use VMbus or any VMbus messages, so process here and not
94 * in the VMbus driver code.
95 */
96
97static void hv_stimer0_isr(void)
98{
99 struct hv_per_cpu_context *hv_cpu;
100
101 hv_cpu = this_cpu_ptr(hv_context.cpu_context);
102 hv_cpu->clk_evt->event_handler(hv_cpu->clk_evt);
103 add_interrupt_randomness(stimer0_vector, 0);
104}
105
106static int hv_ce_set_next_event(unsigned long delta,
107 struct clock_event_device *evt)
108{
109 u64 current_tick;
110
111 WARN_ON(!clockevent_state_oneshot(evt));
112
113 current_tick = hyperv_cs->read(NULL);
114 current_tick += delta;
115 hv_init_timer(0, current_tick);
116 return 0;
117}
118
119static int hv_ce_shutdown(struct clock_event_device *evt)
120{
121 hv_init_timer(0, 0);
122 hv_init_timer_config(0, 0);
123 if (direct_mode_enabled)
124 hv_disable_stimer0_percpu_irq(stimer0_irq);
125
126 return 0;
127}
128
129static int hv_ce_set_oneshot(struct clock_event_device *evt)
130{
131 union hv_stimer_config timer_cfg;
132
133 timer_cfg.as_uint64 = 0;
134 timer_cfg.enable = 1;
135 timer_cfg.auto_enable = 1;
136 if (direct_mode_enabled) {
137 /*
138 * When it expires, the timer will directly interrupt
139 * on the specified hardware vector/IRQ.
140 */
141 timer_cfg.direct_mode = 1;
142 timer_cfg.apic_vector = stimer0_vector;
143 hv_enable_stimer0_percpu_irq(stimer0_irq);
144 } else {
145 /*
146 * When it expires, the timer will generate a VMbus message,
147 * to be handled by the normal VMbus interrupt handler.
148 */
149 timer_cfg.direct_mode = 0;
150 timer_cfg.sintx = VMBUS_MESSAGE_SINT;
151 }
152 hv_init_timer_config(0, timer_cfg.as_uint64);
153 return 0;
154}
155
156static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
157{
158 dev->name = "Hyper-V clockevent";
159 dev->features = CLOCK_EVT_FEAT_ONESHOT;
160 dev->cpumask = cpumask_of(cpu);
161 dev->rating = 1000;
162 /*
163 * Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
164 * result in clockevents_config_and_register() taking additional
165 * references to the hv_vmbus module making it impossible to unload.
166 */
167
168 dev->set_state_shutdown = hv_ce_shutdown;
169 dev->set_state_oneshot = hv_ce_set_oneshot;
170 dev->set_next_event = hv_ce_set_next_event;
171}
172
173
174int hv_synic_alloc(void)
175{
176 int cpu;
177 struct hv_per_cpu_context *hv_cpu;
178
179 /*
180 * First, zero all per-cpu memory areas so hv_synic_free() can
181 * detect what memory has been allocated and cleanup properly
182 * after any failures.
183 */
184 for_each_present_cpu(cpu) {
185 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
186 memset(hv_cpu, 0, sizeof(*hv_cpu));
187 }
188
189 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
190 GFP_KERNEL);
191 if (hv_context.hv_numa_map == NULL) {
192 pr_err("Unable to allocate NUMA map\n");
193 goto err;
194 }
195
196 for_each_present_cpu(cpu) {
197 hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
198
199 tasklet_init(&hv_cpu->msg_dpc,
200 vmbus_on_msg_dpc, (unsigned long) hv_cpu);
201
202 hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
203 GFP_KERNEL);
204 if (hv_cpu->clk_evt == NULL) {
205 pr_err("Unable to allocate clock event device\n");
206 goto err;
207 }
208 hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
209
210 hv_cpu->synic_message_page =
211 (void *)get_zeroed_page(GFP_ATOMIC);
212 if (hv_cpu->synic_message_page == NULL) {
213 pr_err("Unable to allocate SYNIC message page\n");
214 goto err;
215 }
216
217 hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
218 if (hv_cpu->synic_event_page == NULL) {
219 pr_err("Unable to allocate SYNIC event page\n");
220 goto err;
221 }
222
223 hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
224 if (hv_cpu->post_msg_page == NULL) {
225 pr_err("Unable to allocate post msg page\n");
226 goto err;
227 }
228
229 INIT_LIST_HEAD(&hv_cpu->chan_list);
230 }
231
232 if (direct_mode_enabled &&
233 hv_setup_stimer0_irq(&stimer0_irq, &stimer0_vector,
234 hv_stimer0_isr))
235 goto err;
236
237 return 0;
238err:
239 /*
240 * Any memory allocations that succeeded will be freed when
241 * the caller cleans up by calling hv_synic_free()
242 */
243 return -ENOMEM;
244}
245
246
247void hv_synic_free(void)
248{
249 int cpu;
250
251 for_each_present_cpu(cpu) {
252 struct hv_per_cpu_context *hv_cpu
253 = per_cpu_ptr(hv_context.cpu_context, cpu);
254
255 kfree(hv_cpu->clk_evt);
256 free_page((unsigned long)hv_cpu->synic_event_page);
257 free_page((unsigned long)hv_cpu->synic_message_page);
258 free_page((unsigned long)hv_cpu->post_msg_page);
259 }
260
261 kfree(hv_context.hv_numa_map);
262}
263
264/*
265 * hv_synic_init - Initialize the Synthetic Interrupt Controller.
266 *
267 * If it is already initialized by another entity (ie x2v shim), we need to
268 * retrieve the initialized message and event pages. Otherwise, we create and
269 * initialize the message and event pages.
270 */
271int hv_synic_init(unsigned int cpu)
272{
273 struct hv_per_cpu_context *hv_cpu
274 = per_cpu_ptr(hv_context.cpu_context, cpu);
275 union hv_synic_simp simp;
276 union hv_synic_siefp siefp;
277 union hv_synic_sint shared_sint;
278 union hv_synic_scontrol sctrl;
279
280 /* Setup the Synic's message page */
281 hv_get_simp(simp.as_uint64);
282 simp.simp_enabled = 1;
283 simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
284 >> PAGE_SHIFT;
285
286 hv_set_simp(simp.as_uint64);
287
288 /* Setup the Synic's event page */
289 hv_get_siefp(siefp.as_uint64);
290 siefp.siefp_enabled = 1;
291 siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
292 >> PAGE_SHIFT;
293
294 hv_set_siefp(siefp.as_uint64);
295
296 /* Setup the shared SINT. */
297 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
298
299 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
300 shared_sint.masked = false;
301 if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
302 shared_sint.auto_eoi = false;
303 else
304 shared_sint.auto_eoi = true;
305
306 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
307
308 /* Enable the global synic bit */
309 hv_get_synic_state(sctrl.as_uint64);
310 sctrl.enable = 1;
311
312 hv_set_synic_state(sctrl.as_uint64);
313
314 /*
315 * Register the per-cpu clockevent source.
316 */
317 if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE)
318 clockevents_config_and_register(hv_cpu->clk_evt,
319 HV_TIMER_FREQUENCY,
320 HV_MIN_DELTA_TICKS,
321 HV_MAX_MAX_DELTA_TICKS);
322 return 0;
323}
324
325/*
326 * hv_synic_clockevents_cleanup - Cleanup clockevent devices
327 */
328void hv_synic_clockevents_cleanup(void)
329{
330 int cpu;
331
332 if (!(ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE))
333 return;
334
335 if (direct_mode_enabled)
336 hv_remove_stimer0_irq(stimer0_irq);
337
338 for_each_present_cpu(cpu) {
339 struct hv_per_cpu_context *hv_cpu
340 = per_cpu_ptr(hv_context.cpu_context, cpu);
341
342 clockevents_unbind_device(hv_cpu->clk_evt, cpu);
343 }
344}
345
346/*
347 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
348 */
349int hv_synic_cleanup(unsigned int cpu)
350{
351 union hv_synic_sint shared_sint;
352 union hv_synic_simp simp;
353 union hv_synic_siefp siefp;
354 union hv_synic_scontrol sctrl;
355 struct vmbus_channel *channel, *sc;
356 bool channel_found = false;
357 unsigned long flags;
358
359 hv_get_synic_state(sctrl.as_uint64);
360 if (sctrl.enable != 1)
361 return -EFAULT;
362
363 /*
364 * Search for channels which are bound to the CPU we're about to
365 * cleanup. In case we find one and vmbus is still connected we need to
366 * fail, this will effectively prevent CPU offlining. There is no way
367 * we can re-bind channels to different CPUs for now.
368 */
369 mutex_lock(&vmbus_connection.channel_mutex);
370 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
371 if (channel->target_cpu == cpu) {
372 channel_found = true;
373 break;
374 }
375 spin_lock_irqsave(&channel->lock, flags);
376 list_for_each_entry(sc, &channel->sc_list, sc_list) {
377 if (sc->target_cpu == cpu) {
378 channel_found = true;
379 break;
380 }
381 }
382 spin_unlock_irqrestore(&channel->lock, flags);
383 if (channel_found)
384 break;
385 }
386 mutex_unlock(&vmbus_connection.channel_mutex);
387
388 if (channel_found && vmbus_connection.conn_state == CONNECTED)
389 return -EBUSY;
390
391 /* Turn off clockevent device */
392 if (ms_hyperv.features & HV_MSR_SYNTIMER_AVAILABLE) {
393 struct hv_per_cpu_context *hv_cpu
394 = this_cpu_ptr(hv_context.cpu_context);
395
396 clockevents_unbind_device(hv_cpu->clk_evt, cpu);
397 hv_ce_shutdown(hv_cpu->clk_evt);
398 }
399
400 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
401
402 shared_sint.masked = 1;
403
404 /* Need to correctly cleanup in the case of SMP!!! */
405 /* Disable the interrupt */
406 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
407
408 hv_get_simp(simp.as_uint64);
409 simp.simp_enabled = 0;
410 simp.base_simp_gpa = 0;
411
412 hv_set_simp(simp.as_uint64);
413
414 hv_get_siefp(siefp.as_uint64);
415 siefp.siefp_enabled = 0;
416 siefp.base_siefp_gpa = 0;
417
418 hv_set_siefp(siefp.as_uint64);
419
420 /* Disable the global synic bit */
421 sctrl.enable = 0;
422 hv_set_synic_state(sctrl.as_uint64);
423
424 return 0;
425}