Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
9 */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/sysctl.h>
17#include <linux/slab.h>
18#include <linux/acpi.h>
19#include <linux/completion.h>
20#include <linux/hyperv.h>
21#include <linux/kernel_stat.h>
22#include <linux/clockchips.h>
23#include <linux/cpu.h>
24#include <linux/sched/task_stack.h>
25
26#include <linux/delay.h>
27#include <linux/notifier.h>
28#include <linux/panic_notifier.h>
29#include <linux/ptrace.h>
30#include <linux/screen_info.h>
31#include <linux/kdebug.h>
32#include <linux/efi.h>
33#include <linux/random.h>
34#include <linux/kernel.h>
35#include <linux/syscore_ops.h>
36#include <clocksource/hyperv_timer.h>
37#include "hyperv_vmbus.h"
38
39struct vmbus_dynid {
40 struct list_head node;
41 struct hv_vmbus_device_id id;
42};
43
44static struct acpi_device *hv_acpi_dev;
45
46static struct completion probe_event;
47
48static int hyperv_cpuhp_online;
49
50static void *hv_panic_page;
51
52static long __percpu *vmbus_evt;
53
54/* Values parsed from ACPI DSDT */
55int vmbus_irq;
56int vmbus_interrupt;
57
58/*
59 * Boolean to control whether to report panic messages over Hyper-V.
60 *
61 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
62 */
63static int sysctl_record_panic_msg = 1;
64
65static int hyperv_report_reg(void)
66{
67 return !sysctl_record_panic_msg || !hv_panic_page;
68}
69
70static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
71 void *args)
72{
73 struct pt_regs *regs;
74
75 vmbus_initiate_unload(true);
76
77 /*
78 * Hyper-V should be notified only once about a panic. If we will be
79 * doing hyperv_report_panic_msg() later with kmsg data, don't do
80 * the notification here.
81 */
82 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
83 && hyperv_report_reg()) {
84 regs = current_pt_regs();
85 hyperv_report_panic(regs, val, false);
86 }
87 return NOTIFY_DONE;
88}
89
90static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
91 void *args)
92{
93 struct die_args *die = args;
94 struct pt_regs *regs = die->regs;
95
96 /* Don't notify Hyper-V if the die event is other than oops */
97 if (val != DIE_OOPS)
98 return NOTIFY_DONE;
99
100 /*
101 * Hyper-V should be notified only once about a panic. If we will be
102 * doing hyperv_report_panic_msg() later with kmsg data, don't do
103 * the notification here.
104 */
105 if (hyperv_report_reg())
106 hyperv_report_panic(regs, val, true);
107 return NOTIFY_DONE;
108}
109
110static struct notifier_block hyperv_die_block = {
111 .notifier_call = hyperv_die_event,
112};
113static struct notifier_block hyperv_panic_block = {
114 .notifier_call = hyperv_panic_event,
115};
116
117static const char *fb_mmio_name = "fb_range";
118static struct resource *fb_mmio;
119static struct resource *hyperv_mmio;
120static DEFINE_MUTEX(hyperv_mmio_lock);
121
122static int vmbus_exists(void)
123{
124 if (hv_acpi_dev == NULL)
125 return -ENODEV;
126
127 return 0;
128}
129
130static u8 channel_monitor_group(const struct vmbus_channel *channel)
131{
132 return (u8)channel->offermsg.monitorid / 32;
133}
134
135static u8 channel_monitor_offset(const struct vmbus_channel *channel)
136{
137 return (u8)channel->offermsg.monitorid % 32;
138}
139
140static u32 channel_pending(const struct vmbus_channel *channel,
141 const struct hv_monitor_page *monitor_page)
142{
143 u8 monitor_group = channel_monitor_group(channel);
144
145 return monitor_page->trigger_group[monitor_group].pending;
146}
147
148static u32 channel_latency(const struct vmbus_channel *channel,
149 const struct hv_monitor_page *monitor_page)
150{
151 u8 monitor_group = channel_monitor_group(channel);
152 u8 monitor_offset = channel_monitor_offset(channel);
153
154 return monitor_page->latency[monitor_group][monitor_offset];
155}
156
157static u32 channel_conn_id(struct vmbus_channel *channel,
158 struct hv_monitor_page *monitor_page)
159{
160 u8 monitor_group = channel_monitor_group(channel);
161 u8 monitor_offset = channel_monitor_offset(channel);
162
163 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
164}
165
166static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
167 char *buf)
168{
169 struct hv_device *hv_dev = device_to_hv_device(dev);
170
171 if (!hv_dev->channel)
172 return -ENODEV;
173 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
174}
175static DEVICE_ATTR_RO(id);
176
177static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
178 char *buf)
179{
180 struct hv_device *hv_dev = device_to_hv_device(dev);
181
182 if (!hv_dev->channel)
183 return -ENODEV;
184 return sprintf(buf, "%d\n", hv_dev->channel->state);
185}
186static DEVICE_ATTR_RO(state);
187
188static ssize_t monitor_id_show(struct device *dev,
189 struct device_attribute *dev_attr, char *buf)
190{
191 struct hv_device *hv_dev = device_to_hv_device(dev);
192
193 if (!hv_dev->channel)
194 return -ENODEV;
195 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
196}
197static DEVICE_ATTR_RO(monitor_id);
198
199static ssize_t class_id_show(struct device *dev,
200 struct device_attribute *dev_attr, char *buf)
201{
202 struct hv_device *hv_dev = device_to_hv_device(dev);
203
204 if (!hv_dev->channel)
205 return -ENODEV;
206 return sprintf(buf, "{%pUl}\n",
207 &hv_dev->channel->offermsg.offer.if_type);
208}
209static DEVICE_ATTR_RO(class_id);
210
211static ssize_t device_id_show(struct device *dev,
212 struct device_attribute *dev_attr, char *buf)
213{
214 struct hv_device *hv_dev = device_to_hv_device(dev);
215
216 if (!hv_dev->channel)
217 return -ENODEV;
218 return sprintf(buf, "{%pUl}\n",
219 &hv_dev->channel->offermsg.offer.if_instance);
220}
221static DEVICE_ATTR_RO(device_id);
222
223static ssize_t modalias_show(struct device *dev,
224 struct device_attribute *dev_attr, char *buf)
225{
226 struct hv_device *hv_dev = device_to_hv_device(dev);
227
228 return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
229}
230static DEVICE_ATTR_RO(modalias);
231
232#ifdef CONFIG_NUMA
233static ssize_t numa_node_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 struct hv_device *hv_dev = device_to_hv_device(dev);
237
238 if (!hv_dev->channel)
239 return -ENODEV;
240
241 return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
242}
243static DEVICE_ATTR_RO(numa_node);
244#endif
245
246static ssize_t server_monitor_pending_show(struct device *dev,
247 struct device_attribute *dev_attr,
248 char *buf)
249{
250 struct hv_device *hv_dev = device_to_hv_device(dev);
251
252 if (!hv_dev->channel)
253 return -ENODEV;
254 return sprintf(buf, "%d\n",
255 channel_pending(hv_dev->channel,
256 vmbus_connection.monitor_pages[0]));
257}
258static DEVICE_ATTR_RO(server_monitor_pending);
259
260static ssize_t client_monitor_pending_show(struct device *dev,
261 struct device_attribute *dev_attr,
262 char *buf)
263{
264 struct hv_device *hv_dev = device_to_hv_device(dev);
265
266 if (!hv_dev->channel)
267 return -ENODEV;
268 return sprintf(buf, "%d\n",
269 channel_pending(hv_dev->channel,
270 vmbus_connection.monitor_pages[1]));
271}
272static DEVICE_ATTR_RO(client_monitor_pending);
273
274static ssize_t server_monitor_latency_show(struct device *dev,
275 struct device_attribute *dev_attr,
276 char *buf)
277{
278 struct hv_device *hv_dev = device_to_hv_device(dev);
279
280 if (!hv_dev->channel)
281 return -ENODEV;
282 return sprintf(buf, "%d\n",
283 channel_latency(hv_dev->channel,
284 vmbus_connection.monitor_pages[0]));
285}
286static DEVICE_ATTR_RO(server_monitor_latency);
287
288static ssize_t client_monitor_latency_show(struct device *dev,
289 struct device_attribute *dev_attr,
290 char *buf)
291{
292 struct hv_device *hv_dev = device_to_hv_device(dev);
293
294 if (!hv_dev->channel)
295 return -ENODEV;
296 return sprintf(buf, "%d\n",
297 channel_latency(hv_dev->channel,
298 vmbus_connection.monitor_pages[1]));
299}
300static DEVICE_ATTR_RO(client_monitor_latency);
301
302static ssize_t server_monitor_conn_id_show(struct device *dev,
303 struct device_attribute *dev_attr,
304 char *buf)
305{
306 struct hv_device *hv_dev = device_to_hv_device(dev);
307
308 if (!hv_dev->channel)
309 return -ENODEV;
310 return sprintf(buf, "%d\n",
311 channel_conn_id(hv_dev->channel,
312 vmbus_connection.monitor_pages[0]));
313}
314static DEVICE_ATTR_RO(server_monitor_conn_id);
315
316static ssize_t client_monitor_conn_id_show(struct device *dev,
317 struct device_attribute *dev_attr,
318 char *buf)
319{
320 struct hv_device *hv_dev = device_to_hv_device(dev);
321
322 if (!hv_dev->channel)
323 return -ENODEV;
324 return sprintf(buf, "%d\n",
325 channel_conn_id(hv_dev->channel,
326 vmbus_connection.monitor_pages[1]));
327}
328static DEVICE_ATTR_RO(client_monitor_conn_id);
329
330static ssize_t out_intr_mask_show(struct device *dev,
331 struct device_attribute *dev_attr, char *buf)
332{
333 struct hv_device *hv_dev = device_to_hv_device(dev);
334 struct hv_ring_buffer_debug_info outbound;
335 int ret;
336
337 if (!hv_dev->channel)
338 return -ENODEV;
339
340 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
341 &outbound);
342 if (ret < 0)
343 return ret;
344
345 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
346}
347static DEVICE_ATTR_RO(out_intr_mask);
348
349static ssize_t out_read_index_show(struct device *dev,
350 struct device_attribute *dev_attr, char *buf)
351{
352 struct hv_device *hv_dev = device_to_hv_device(dev);
353 struct hv_ring_buffer_debug_info outbound;
354 int ret;
355
356 if (!hv_dev->channel)
357 return -ENODEV;
358
359 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
360 &outbound);
361 if (ret < 0)
362 return ret;
363 return sprintf(buf, "%d\n", outbound.current_read_index);
364}
365static DEVICE_ATTR_RO(out_read_index);
366
367static ssize_t out_write_index_show(struct device *dev,
368 struct device_attribute *dev_attr,
369 char *buf)
370{
371 struct hv_device *hv_dev = device_to_hv_device(dev);
372 struct hv_ring_buffer_debug_info outbound;
373 int ret;
374
375 if (!hv_dev->channel)
376 return -ENODEV;
377
378 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
379 &outbound);
380 if (ret < 0)
381 return ret;
382 return sprintf(buf, "%d\n", outbound.current_write_index);
383}
384static DEVICE_ATTR_RO(out_write_index);
385
386static ssize_t out_read_bytes_avail_show(struct device *dev,
387 struct device_attribute *dev_attr,
388 char *buf)
389{
390 struct hv_device *hv_dev = device_to_hv_device(dev);
391 struct hv_ring_buffer_debug_info outbound;
392 int ret;
393
394 if (!hv_dev->channel)
395 return -ENODEV;
396
397 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
398 &outbound);
399 if (ret < 0)
400 return ret;
401 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
402}
403static DEVICE_ATTR_RO(out_read_bytes_avail);
404
405static ssize_t out_write_bytes_avail_show(struct device *dev,
406 struct device_attribute *dev_attr,
407 char *buf)
408{
409 struct hv_device *hv_dev = device_to_hv_device(dev);
410 struct hv_ring_buffer_debug_info outbound;
411 int ret;
412
413 if (!hv_dev->channel)
414 return -ENODEV;
415
416 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
417 &outbound);
418 if (ret < 0)
419 return ret;
420 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
421}
422static DEVICE_ATTR_RO(out_write_bytes_avail);
423
424static ssize_t in_intr_mask_show(struct device *dev,
425 struct device_attribute *dev_attr, char *buf)
426{
427 struct hv_device *hv_dev = device_to_hv_device(dev);
428 struct hv_ring_buffer_debug_info inbound;
429 int ret;
430
431 if (!hv_dev->channel)
432 return -ENODEV;
433
434 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
435 if (ret < 0)
436 return ret;
437
438 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
439}
440static DEVICE_ATTR_RO(in_intr_mask);
441
442static ssize_t in_read_index_show(struct device *dev,
443 struct device_attribute *dev_attr, char *buf)
444{
445 struct hv_device *hv_dev = device_to_hv_device(dev);
446 struct hv_ring_buffer_debug_info inbound;
447 int ret;
448
449 if (!hv_dev->channel)
450 return -ENODEV;
451
452 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
453 if (ret < 0)
454 return ret;
455
456 return sprintf(buf, "%d\n", inbound.current_read_index);
457}
458static DEVICE_ATTR_RO(in_read_index);
459
460static ssize_t in_write_index_show(struct device *dev,
461 struct device_attribute *dev_attr, char *buf)
462{
463 struct hv_device *hv_dev = device_to_hv_device(dev);
464 struct hv_ring_buffer_debug_info inbound;
465 int ret;
466
467 if (!hv_dev->channel)
468 return -ENODEV;
469
470 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
471 if (ret < 0)
472 return ret;
473
474 return sprintf(buf, "%d\n", inbound.current_write_index);
475}
476static DEVICE_ATTR_RO(in_write_index);
477
478static ssize_t in_read_bytes_avail_show(struct device *dev,
479 struct device_attribute *dev_attr,
480 char *buf)
481{
482 struct hv_device *hv_dev = device_to_hv_device(dev);
483 struct hv_ring_buffer_debug_info inbound;
484 int ret;
485
486 if (!hv_dev->channel)
487 return -ENODEV;
488
489 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
490 if (ret < 0)
491 return ret;
492
493 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
494}
495static DEVICE_ATTR_RO(in_read_bytes_avail);
496
497static ssize_t in_write_bytes_avail_show(struct device *dev,
498 struct device_attribute *dev_attr,
499 char *buf)
500{
501 struct hv_device *hv_dev = device_to_hv_device(dev);
502 struct hv_ring_buffer_debug_info inbound;
503 int ret;
504
505 if (!hv_dev->channel)
506 return -ENODEV;
507
508 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
509 if (ret < 0)
510 return ret;
511
512 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
513}
514static DEVICE_ATTR_RO(in_write_bytes_avail);
515
516static ssize_t channel_vp_mapping_show(struct device *dev,
517 struct device_attribute *dev_attr,
518 char *buf)
519{
520 struct hv_device *hv_dev = device_to_hv_device(dev);
521 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
522 int buf_size = PAGE_SIZE, n_written, tot_written;
523 struct list_head *cur;
524
525 if (!channel)
526 return -ENODEV;
527
528 mutex_lock(&vmbus_connection.channel_mutex);
529
530 tot_written = snprintf(buf, buf_size, "%u:%u\n",
531 channel->offermsg.child_relid, channel->target_cpu);
532
533 list_for_each(cur, &channel->sc_list) {
534 if (tot_written >= buf_size - 1)
535 break;
536
537 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
538 n_written = scnprintf(buf + tot_written,
539 buf_size - tot_written,
540 "%u:%u\n",
541 cur_sc->offermsg.child_relid,
542 cur_sc->target_cpu);
543 tot_written += n_written;
544 }
545
546 mutex_unlock(&vmbus_connection.channel_mutex);
547
548 return tot_written;
549}
550static DEVICE_ATTR_RO(channel_vp_mapping);
551
552static ssize_t vendor_show(struct device *dev,
553 struct device_attribute *dev_attr,
554 char *buf)
555{
556 struct hv_device *hv_dev = device_to_hv_device(dev);
557
558 return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
559}
560static DEVICE_ATTR_RO(vendor);
561
562static ssize_t device_show(struct device *dev,
563 struct device_attribute *dev_attr,
564 char *buf)
565{
566 struct hv_device *hv_dev = device_to_hv_device(dev);
567
568 return sprintf(buf, "0x%x\n", hv_dev->device_id);
569}
570static DEVICE_ATTR_RO(device);
571
572static ssize_t driver_override_store(struct device *dev,
573 struct device_attribute *attr,
574 const char *buf, size_t count)
575{
576 struct hv_device *hv_dev = device_to_hv_device(dev);
577 char *driver_override, *old, *cp;
578
579 /* We need to keep extra room for a newline */
580 if (count >= (PAGE_SIZE - 1))
581 return -EINVAL;
582
583 driver_override = kstrndup(buf, count, GFP_KERNEL);
584 if (!driver_override)
585 return -ENOMEM;
586
587 cp = strchr(driver_override, '\n');
588 if (cp)
589 *cp = '\0';
590
591 device_lock(dev);
592 old = hv_dev->driver_override;
593 if (strlen(driver_override)) {
594 hv_dev->driver_override = driver_override;
595 } else {
596 kfree(driver_override);
597 hv_dev->driver_override = NULL;
598 }
599 device_unlock(dev);
600
601 kfree(old);
602
603 return count;
604}
605
606static ssize_t driver_override_show(struct device *dev,
607 struct device_attribute *attr, char *buf)
608{
609 struct hv_device *hv_dev = device_to_hv_device(dev);
610 ssize_t len;
611
612 device_lock(dev);
613 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
614 device_unlock(dev);
615
616 return len;
617}
618static DEVICE_ATTR_RW(driver_override);
619
620/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
621static struct attribute *vmbus_dev_attrs[] = {
622 &dev_attr_id.attr,
623 &dev_attr_state.attr,
624 &dev_attr_monitor_id.attr,
625 &dev_attr_class_id.attr,
626 &dev_attr_device_id.attr,
627 &dev_attr_modalias.attr,
628#ifdef CONFIG_NUMA
629 &dev_attr_numa_node.attr,
630#endif
631 &dev_attr_server_monitor_pending.attr,
632 &dev_attr_client_monitor_pending.attr,
633 &dev_attr_server_monitor_latency.attr,
634 &dev_attr_client_monitor_latency.attr,
635 &dev_attr_server_monitor_conn_id.attr,
636 &dev_attr_client_monitor_conn_id.attr,
637 &dev_attr_out_intr_mask.attr,
638 &dev_attr_out_read_index.attr,
639 &dev_attr_out_write_index.attr,
640 &dev_attr_out_read_bytes_avail.attr,
641 &dev_attr_out_write_bytes_avail.attr,
642 &dev_attr_in_intr_mask.attr,
643 &dev_attr_in_read_index.attr,
644 &dev_attr_in_write_index.attr,
645 &dev_attr_in_read_bytes_avail.attr,
646 &dev_attr_in_write_bytes_avail.attr,
647 &dev_attr_channel_vp_mapping.attr,
648 &dev_attr_vendor.attr,
649 &dev_attr_device.attr,
650 &dev_attr_driver_override.attr,
651 NULL,
652};
653
654/*
655 * Device-level attribute_group callback function. Returns the permission for
656 * each attribute, and returns 0 if an attribute is not visible.
657 */
658static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
659 struct attribute *attr, int idx)
660{
661 struct device *dev = kobj_to_dev(kobj);
662 const struct hv_device *hv_dev = device_to_hv_device(dev);
663
664 /* Hide the monitor attributes if the monitor mechanism is not used. */
665 if (!hv_dev->channel->offermsg.monitor_allocated &&
666 (attr == &dev_attr_monitor_id.attr ||
667 attr == &dev_attr_server_monitor_pending.attr ||
668 attr == &dev_attr_client_monitor_pending.attr ||
669 attr == &dev_attr_server_monitor_latency.attr ||
670 attr == &dev_attr_client_monitor_latency.attr ||
671 attr == &dev_attr_server_monitor_conn_id.attr ||
672 attr == &dev_attr_client_monitor_conn_id.attr))
673 return 0;
674
675 return attr->mode;
676}
677
678static const struct attribute_group vmbus_dev_group = {
679 .attrs = vmbus_dev_attrs,
680 .is_visible = vmbus_dev_attr_is_visible
681};
682__ATTRIBUTE_GROUPS(vmbus_dev);
683
684/* Set up the attribute for /sys/bus/vmbus/hibernation */
685static ssize_t hibernation_show(struct bus_type *bus, char *buf)
686{
687 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
688}
689
690static BUS_ATTR_RO(hibernation);
691
692static struct attribute *vmbus_bus_attrs[] = {
693 &bus_attr_hibernation.attr,
694 NULL,
695};
696static const struct attribute_group vmbus_bus_group = {
697 .attrs = vmbus_bus_attrs,
698};
699__ATTRIBUTE_GROUPS(vmbus_bus);
700
701/*
702 * vmbus_uevent - add uevent for our device
703 *
704 * This routine is invoked when a device is added or removed on the vmbus to
705 * generate a uevent to udev in the userspace. The udev will then look at its
706 * rule and the uevent generated here to load the appropriate driver
707 *
708 * The alias string will be of the form vmbus:guid where guid is the string
709 * representation of the device guid (each byte of the guid will be
710 * represented with two hex characters.
711 */
712static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
713{
714 struct hv_device *dev = device_to_hv_device(device);
715 const char *format = "MODALIAS=vmbus:%*phN";
716
717 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
718}
719
720static const struct hv_vmbus_device_id *
721hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
722{
723 if (id == NULL)
724 return NULL; /* empty device table */
725
726 for (; !guid_is_null(&id->guid); id++)
727 if (guid_equal(&id->guid, guid))
728 return id;
729
730 return NULL;
731}
732
733static const struct hv_vmbus_device_id *
734hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
735{
736 const struct hv_vmbus_device_id *id = NULL;
737 struct vmbus_dynid *dynid;
738
739 spin_lock(&drv->dynids.lock);
740 list_for_each_entry(dynid, &drv->dynids.list, node) {
741 if (guid_equal(&dynid->id.guid, guid)) {
742 id = &dynid->id;
743 break;
744 }
745 }
746 spin_unlock(&drv->dynids.lock);
747
748 return id;
749}
750
751static const struct hv_vmbus_device_id vmbus_device_null;
752
753/*
754 * Return a matching hv_vmbus_device_id pointer.
755 * If there is no match, return NULL.
756 */
757static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
758 struct hv_device *dev)
759{
760 const guid_t *guid = &dev->dev_type;
761 const struct hv_vmbus_device_id *id;
762
763 /* When driver_override is set, only bind to the matching driver */
764 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
765 return NULL;
766
767 /* Look at the dynamic ids first, before the static ones */
768 id = hv_vmbus_dynid_match(drv, guid);
769 if (!id)
770 id = hv_vmbus_dev_match(drv->id_table, guid);
771
772 /* driver_override will always match, send a dummy id */
773 if (!id && dev->driver_override)
774 id = &vmbus_device_null;
775
776 return id;
777}
778
779/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
780static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
781{
782 struct vmbus_dynid *dynid;
783
784 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
785 if (!dynid)
786 return -ENOMEM;
787
788 dynid->id.guid = *guid;
789
790 spin_lock(&drv->dynids.lock);
791 list_add_tail(&dynid->node, &drv->dynids.list);
792 spin_unlock(&drv->dynids.lock);
793
794 return driver_attach(&drv->driver);
795}
796
797static void vmbus_free_dynids(struct hv_driver *drv)
798{
799 struct vmbus_dynid *dynid, *n;
800
801 spin_lock(&drv->dynids.lock);
802 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
803 list_del(&dynid->node);
804 kfree(dynid);
805 }
806 spin_unlock(&drv->dynids.lock);
807}
808
809/*
810 * store_new_id - sysfs frontend to vmbus_add_dynid()
811 *
812 * Allow GUIDs to be added to an existing driver via sysfs.
813 */
814static ssize_t new_id_store(struct device_driver *driver, const char *buf,
815 size_t count)
816{
817 struct hv_driver *drv = drv_to_hv_drv(driver);
818 guid_t guid;
819 ssize_t retval;
820
821 retval = guid_parse(buf, &guid);
822 if (retval)
823 return retval;
824
825 if (hv_vmbus_dynid_match(drv, &guid))
826 return -EEXIST;
827
828 retval = vmbus_add_dynid(drv, &guid);
829 if (retval)
830 return retval;
831 return count;
832}
833static DRIVER_ATTR_WO(new_id);
834
835/*
836 * store_remove_id - remove a PCI device ID from this driver
837 *
838 * Removes a dynamic pci device ID to this driver.
839 */
840static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
841 size_t count)
842{
843 struct hv_driver *drv = drv_to_hv_drv(driver);
844 struct vmbus_dynid *dynid, *n;
845 guid_t guid;
846 ssize_t retval;
847
848 retval = guid_parse(buf, &guid);
849 if (retval)
850 return retval;
851
852 retval = -ENODEV;
853 spin_lock(&drv->dynids.lock);
854 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
855 struct hv_vmbus_device_id *id = &dynid->id;
856
857 if (guid_equal(&id->guid, &guid)) {
858 list_del(&dynid->node);
859 kfree(dynid);
860 retval = count;
861 break;
862 }
863 }
864 spin_unlock(&drv->dynids.lock);
865
866 return retval;
867}
868static DRIVER_ATTR_WO(remove_id);
869
870static struct attribute *vmbus_drv_attrs[] = {
871 &driver_attr_new_id.attr,
872 &driver_attr_remove_id.attr,
873 NULL,
874};
875ATTRIBUTE_GROUPS(vmbus_drv);
876
877
878/*
879 * vmbus_match - Attempt to match the specified device to the specified driver
880 */
881static int vmbus_match(struct device *device, struct device_driver *driver)
882{
883 struct hv_driver *drv = drv_to_hv_drv(driver);
884 struct hv_device *hv_dev = device_to_hv_device(device);
885
886 /* The hv_sock driver handles all hv_sock offers. */
887 if (is_hvsock_channel(hv_dev->channel))
888 return drv->hvsock;
889
890 if (hv_vmbus_get_id(drv, hv_dev))
891 return 1;
892
893 return 0;
894}
895
896/*
897 * vmbus_probe - Add the new vmbus's child device
898 */
899static int vmbus_probe(struct device *child_device)
900{
901 int ret = 0;
902 struct hv_driver *drv =
903 drv_to_hv_drv(child_device->driver);
904 struct hv_device *dev = device_to_hv_device(child_device);
905 const struct hv_vmbus_device_id *dev_id;
906
907 dev_id = hv_vmbus_get_id(drv, dev);
908 if (drv->probe) {
909 ret = drv->probe(dev, dev_id);
910 if (ret != 0)
911 pr_err("probe failed for device %s (%d)\n",
912 dev_name(child_device), ret);
913
914 } else {
915 pr_err("probe not set for driver %s\n",
916 dev_name(child_device));
917 ret = -ENODEV;
918 }
919 return ret;
920}
921
922/*
923 * vmbus_remove - Remove a vmbus device
924 */
925static int vmbus_remove(struct device *child_device)
926{
927 struct hv_driver *drv;
928 struct hv_device *dev = device_to_hv_device(child_device);
929
930 if (child_device->driver) {
931 drv = drv_to_hv_drv(child_device->driver);
932 if (drv->remove)
933 drv->remove(dev);
934 }
935
936 return 0;
937}
938
939
940/*
941 * vmbus_shutdown - Shutdown a vmbus device
942 */
943static void vmbus_shutdown(struct device *child_device)
944{
945 struct hv_driver *drv;
946 struct hv_device *dev = device_to_hv_device(child_device);
947
948
949 /* The device may not be attached yet */
950 if (!child_device->driver)
951 return;
952
953 drv = drv_to_hv_drv(child_device->driver);
954
955 if (drv->shutdown)
956 drv->shutdown(dev);
957}
958
959#ifdef CONFIG_PM_SLEEP
960/*
961 * vmbus_suspend - Suspend a vmbus device
962 */
963static int vmbus_suspend(struct device *child_device)
964{
965 struct hv_driver *drv;
966 struct hv_device *dev = device_to_hv_device(child_device);
967
968 /* The device may not be attached yet */
969 if (!child_device->driver)
970 return 0;
971
972 drv = drv_to_hv_drv(child_device->driver);
973 if (!drv->suspend)
974 return -EOPNOTSUPP;
975
976 return drv->suspend(dev);
977}
978
979/*
980 * vmbus_resume - Resume a vmbus device
981 */
982static int vmbus_resume(struct device *child_device)
983{
984 struct hv_driver *drv;
985 struct hv_device *dev = device_to_hv_device(child_device);
986
987 /* The device may not be attached yet */
988 if (!child_device->driver)
989 return 0;
990
991 drv = drv_to_hv_drv(child_device->driver);
992 if (!drv->resume)
993 return -EOPNOTSUPP;
994
995 return drv->resume(dev);
996}
997#else
998#define vmbus_suspend NULL
999#define vmbus_resume NULL
1000#endif /* CONFIG_PM_SLEEP */
1001
1002/*
1003 * vmbus_device_release - Final callback release of the vmbus child device
1004 */
1005static void vmbus_device_release(struct device *device)
1006{
1007 struct hv_device *hv_dev = device_to_hv_device(device);
1008 struct vmbus_channel *channel = hv_dev->channel;
1009
1010 hv_debug_rm_dev_dir(hv_dev);
1011
1012 mutex_lock(&vmbus_connection.channel_mutex);
1013 hv_process_channel_removal(channel);
1014 mutex_unlock(&vmbus_connection.channel_mutex);
1015 kfree(hv_dev);
1016}
1017
1018/*
1019 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
1020 *
1021 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
1022 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1023 * is no way to wake up a Generation-2 VM.
1024 *
1025 * The other 4 ops are for hibernation.
1026 */
1027
1028static const struct dev_pm_ops vmbus_pm = {
1029 .suspend_noirq = NULL,
1030 .resume_noirq = NULL,
1031 .freeze_noirq = vmbus_suspend,
1032 .thaw_noirq = vmbus_resume,
1033 .poweroff_noirq = vmbus_suspend,
1034 .restore_noirq = vmbus_resume,
1035};
1036
1037/* The one and only one */
1038static struct bus_type hv_bus = {
1039 .name = "vmbus",
1040 .match = vmbus_match,
1041 .shutdown = vmbus_shutdown,
1042 .remove = vmbus_remove,
1043 .probe = vmbus_probe,
1044 .uevent = vmbus_uevent,
1045 .dev_groups = vmbus_dev_groups,
1046 .drv_groups = vmbus_drv_groups,
1047 .bus_groups = vmbus_bus_groups,
1048 .pm = &vmbus_pm,
1049};
1050
1051struct onmessage_work_context {
1052 struct work_struct work;
1053 struct {
1054 struct hv_message_header header;
1055 u8 payload[];
1056 } msg;
1057};
1058
1059static void vmbus_onmessage_work(struct work_struct *work)
1060{
1061 struct onmessage_work_context *ctx;
1062
1063 /* Do not process messages if we're in DISCONNECTED state */
1064 if (vmbus_connection.conn_state == DISCONNECTED)
1065 return;
1066
1067 ctx = container_of(work, struct onmessage_work_context,
1068 work);
1069 vmbus_onmessage((struct vmbus_channel_message_header *)
1070 &ctx->msg.payload);
1071 kfree(ctx);
1072}
1073
1074void vmbus_on_msg_dpc(unsigned long data)
1075{
1076 struct hv_per_cpu_context *hv_cpu = (void *)data;
1077 void *page_addr = hv_cpu->synic_message_page;
1078 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1079 VMBUS_MESSAGE_SINT;
1080 struct vmbus_channel_message_header *hdr;
1081 enum vmbus_channel_message_type msgtype;
1082 const struct vmbus_channel_message_table_entry *entry;
1083 struct onmessage_work_context *ctx;
1084 __u8 payload_size;
1085 u32 message_type;
1086
1087 /*
1088 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1089 * it is being used in 'struct vmbus_channel_message_header' definition
1090 * which is supposed to match hypervisor ABI.
1091 */
1092 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1093
1094 /*
1095 * Since the message is in memory shared with the host, an erroneous or
1096 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1097 * or individual message handlers are executing; to prevent this, copy
1098 * the message into private memory.
1099 */
1100 memcpy(&msg_copy, msg, sizeof(struct hv_message));
1101
1102 message_type = msg_copy.header.message_type;
1103 if (message_type == HVMSG_NONE)
1104 /* no msg */
1105 return;
1106
1107 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1108 msgtype = hdr->msgtype;
1109
1110 trace_vmbus_on_msg_dpc(hdr);
1111
1112 if (msgtype >= CHANNELMSG_COUNT) {
1113 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1114 goto msg_handled;
1115 }
1116
1117 payload_size = msg_copy.header.payload_size;
1118 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1119 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1120 goto msg_handled;
1121 }
1122
1123 entry = &channel_message_table[msgtype];
1124
1125 if (!entry->message_handler)
1126 goto msg_handled;
1127
1128 if (payload_size < entry->min_payload_len) {
1129 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1130 goto msg_handled;
1131 }
1132
1133 if (entry->handler_type == VMHT_BLOCKING) {
1134 ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
1135 if (ctx == NULL)
1136 return;
1137
1138 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1139 memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
1140
1141 /*
1142 * The host can generate a rescind message while we
1143 * may still be handling the original offer. We deal with
1144 * this condition by relying on the synchronization provided
1145 * by offer_in_progress and by channel_mutex. See also the
1146 * inline comments in vmbus_onoffer_rescind().
1147 */
1148 switch (msgtype) {
1149 case CHANNELMSG_RESCIND_CHANNELOFFER:
1150 /*
1151 * If we are handling the rescind message;
1152 * schedule the work on the global work queue.
1153 *
1154 * The OFFER message and the RESCIND message should
1155 * not be handled by the same serialized work queue,
1156 * because the OFFER handler may call vmbus_open(),
1157 * which tries to open the channel by sending an
1158 * OPEN_CHANNEL message to the host and waits for
1159 * the host's response; however, if the host has
1160 * rescinded the channel before it receives the
1161 * OPEN_CHANNEL message, the host just silently
1162 * ignores the OPEN_CHANNEL message; as a result,
1163 * the guest's OFFER handler hangs for ever, if we
1164 * handle the RESCIND message in the same serialized
1165 * work queue: the RESCIND handler can not start to
1166 * run before the OFFER handler finishes.
1167 */
1168 schedule_work(&ctx->work);
1169 break;
1170
1171 case CHANNELMSG_OFFERCHANNEL:
1172 /*
1173 * The host sends the offer message of a given channel
1174 * before sending the rescind message of the same
1175 * channel. These messages are sent to the guest's
1176 * connect CPU; the guest then starts processing them
1177 * in the tasklet handler on this CPU:
1178 *
1179 * VMBUS_CONNECT_CPU
1180 *
1181 * [vmbus_on_msg_dpc()]
1182 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1183 * queue_work()
1184 * ...
1185 * [vmbus_on_msg_dpc()]
1186 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1187 *
1188 * We rely on the memory-ordering properties of the
1189 * queue_work() and schedule_work() primitives, which
1190 * guarantee that the atomic increment will be visible
1191 * to the CPUs which will execute the offer & rescind
1192 * works by the time these works will start execution.
1193 */
1194 atomic_inc(&vmbus_connection.offer_in_progress);
1195 fallthrough;
1196
1197 default:
1198 queue_work(vmbus_connection.work_queue, &ctx->work);
1199 }
1200 } else
1201 entry->message_handler(hdr);
1202
1203msg_handled:
1204 vmbus_signal_eom(msg, message_type);
1205}
1206
1207#ifdef CONFIG_PM_SLEEP
1208/*
1209 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1210 * hibernation, because hv_sock connections can not persist across hibernation.
1211 */
1212static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1213{
1214 struct onmessage_work_context *ctx;
1215 struct vmbus_channel_rescind_offer *rescind;
1216
1217 WARN_ON(!is_hvsock_channel(channel));
1218
1219 /*
1220 * Allocation size is small and the allocation should really not fail,
1221 * otherwise the state of the hv_sock connections ends up in limbo.
1222 */
1223 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1224 GFP_KERNEL | __GFP_NOFAIL);
1225
1226 /*
1227 * So far, these are not really used by Linux. Just set them to the
1228 * reasonable values conforming to the definitions of the fields.
1229 */
1230 ctx->msg.header.message_type = 1;
1231 ctx->msg.header.payload_size = sizeof(*rescind);
1232
1233 /* These values are actually used by Linux. */
1234 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1235 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1236 rescind->child_relid = channel->offermsg.child_relid;
1237
1238 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1239
1240 queue_work(vmbus_connection.work_queue, &ctx->work);
1241}
1242#endif /* CONFIG_PM_SLEEP */
1243
1244/*
1245 * Schedule all channels with events pending
1246 */
1247static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1248{
1249 unsigned long *recv_int_page;
1250 u32 maxbits, relid;
1251
1252 if (vmbus_proto_version < VERSION_WIN8) {
1253 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
1254 recv_int_page = vmbus_connection.recv_int_page;
1255 } else {
1256 /*
1257 * When the host is win8 and beyond, the event page
1258 * can be directly checked to get the id of the channel
1259 * that has the interrupt pending.
1260 */
1261 void *page_addr = hv_cpu->synic_event_page;
1262 union hv_synic_event_flags *event
1263 = (union hv_synic_event_flags *)page_addr +
1264 VMBUS_MESSAGE_SINT;
1265
1266 maxbits = HV_EVENT_FLAGS_COUNT;
1267 recv_int_page = event->flags;
1268 }
1269
1270 if (unlikely(!recv_int_page))
1271 return;
1272
1273 for_each_set_bit(relid, recv_int_page, maxbits) {
1274 void (*callback_fn)(void *context);
1275 struct vmbus_channel *channel;
1276
1277 if (!sync_test_and_clear_bit(relid, recv_int_page))
1278 continue;
1279
1280 /* Special case - vmbus channel protocol msg */
1281 if (relid == 0)
1282 continue;
1283
1284 /*
1285 * Pairs with the kfree_rcu() in vmbus_chan_release().
1286 * Guarantees that the channel data structure doesn't
1287 * get freed while the channel pointer below is being
1288 * dereferenced.
1289 */
1290 rcu_read_lock();
1291
1292 /* Find channel based on relid */
1293 channel = relid2channel(relid);
1294 if (channel == NULL)
1295 goto sched_unlock_rcu;
1296
1297 if (channel->rescind)
1298 goto sched_unlock_rcu;
1299
1300 /*
1301 * Make sure that the ring buffer data structure doesn't get
1302 * freed while we dereference the ring buffer pointer. Test
1303 * for the channel's onchannel_callback being NULL within a
1304 * sched_lock critical section. See also the inline comments
1305 * in vmbus_reset_channel_cb().
1306 */
1307 spin_lock(&channel->sched_lock);
1308
1309 callback_fn = channel->onchannel_callback;
1310 if (unlikely(callback_fn == NULL))
1311 goto sched_unlock;
1312
1313 trace_vmbus_chan_sched(channel);
1314
1315 ++channel->interrupts;
1316
1317 switch (channel->callback_mode) {
1318 case HV_CALL_ISR:
1319 (*callback_fn)(channel->channel_callback_context);
1320 break;
1321
1322 case HV_CALL_BATCHED:
1323 hv_begin_read(&channel->inbound);
1324 fallthrough;
1325 case HV_CALL_DIRECT:
1326 tasklet_schedule(&channel->callback_event);
1327 }
1328
1329sched_unlock:
1330 spin_unlock(&channel->sched_lock);
1331sched_unlock_rcu:
1332 rcu_read_unlock();
1333 }
1334}
1335
1336static void vmbus_isr(void)
1337{
1338 struct hv_per_cpu_context *hv_cpu
1339 = this_cpu_ptr(hv_context.cpu_context);
1340 void *page_addr = hv_cpu->synic_event_page;
1341 struct hv_message *msg;
1342 union hv_synic_event_flags *event;
1343 bool handled = false;
1344
1345 if (unlikely(page_addr == NULL))
1346 return;
1347
1348 event = (union hv_synic_event_flags *)page_addr +
1349 VMBUS_MESSAGE_SINT;
1350 /*
1351 * Check for events before checking for messages. This is the order
1352 * in which events and messages are checked in Windows guests on
1353 * Hyper-V, and the Windows team suggested we do the same.
1354 */
1355
1356 if ((vmbus_proto_version == VERSION_WS2008) ||
1357 (vmbus_proto_version == VERSION_WIN7)) {
1358
1359 /* Since we are a child, we only need to check bit 0 */
1360 if (sync_test_and_clear_bit(0, event->flags))
1361 handled = true;
1362 } else {
1363 /*
1364 * Our host is win8 or above. The signaling mechanism
1365 * has changed and we can directly look at the event page.
1366 * If bit n is set then we have an interrup on the channel
1367 * whose id is n.
1368 */
1369 handled = true;
1370 }
1371
1372 if (handled)
1373 vmbus_chan_sched(hv_cpu);
1374
1375 page_addr = hv_cpu->synic_message_page;
1376 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1377
1378 /* Check if there are actual msgs to be processed */
1379 if (msg->header.message_type != HVMSG_NONE) {
1380 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1381 hv_stimer0_isr();
1382 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1383 } else
1384 tasklet_schedule(&hv_cpu->msg_dpc);
1385 }
1386
1387 add_interrupt_randomness(vmbus_interrupt, 0);
1388}
1389
1390static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1391{
1392 vmbus_isr();
1393 return IRQ_HANDLED;
1394}
1395
1396/*
1397 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1398 * buffer and call into Hyper-V to transfer the data.
1399 */
1400static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1401 enum kmsg_dump_reason reason)
1402{
1403 struct kmsg_dump_iter iter;
1404 size_t bytes_written;
1405
1406 /* We are only interested in panics. */
1407 if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1408 return;
1409
1410 /*
1411 * Write dump contents to the page. No need to synchronize; panic should
1412 * be single-threaded.
1413 */
1414 kmsg_dump_rewind(&iter);
1415 kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
1416 &bytes_written);
1417 if (!bytes_written)
1418 return;
1419 /*
1420 * P3 to contain the physical address of the panic page & P4 to
1421 * contain the size of the panic data in that page. Rest of the
1422 * registers are no-op when the NOTIFY_MSG flag is set.
1423 */
1424 hv_set_register(HV_REGISTER_CRASH_P0, 0);
1425 hv_set_register(HV_REGISTER_CRASH_P1, 0);
1426 hv_set_register(HV_REGISTER_CRASH_P2, 0);
1427 hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
1428 hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
1429
1430 /*
1431 * Let Hyper-V know there is crash data available along with
1432 * the panic message.
1433 */
1434 hv_set_register(HV_REGISTER_CRASH_CTL,
1435 (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
1436}
1437
1438static struct kmsg_dumper hv_kmsg_dumper = {
1439 .dump = hv_kmsg_dump,
1440};
1441
1442static void hv_kmsg_dump_register(void)
1443{
1444 int ret;
1445
1446 hv_panic_page = hv_alloc_hyperv_zeroed_page();
1447 if (!hv_panic_page) {
1448 pr_err("Hyper-V: panic message page memory allocation failed\n");
1449 return;
1450 }
1451
1452 ret = kmsg_dump_register(&hv_kmsg_dumper);
1453 if (ret) {
1454 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
1455 hv_free_hyperv_page((unsigned long)hv_panic_page);
1456 hv_panic_page = NULL;
1457 }
1458}
1459
1460static struct ctl_table_header *hv_ctl_table_hdr;
1461
1462/*
1463 * sysctl option to allow the user to control whether kmsg data should be
1464 * reported to Hyper-V on panic.
1465 */
1466static struct ctl_table hv_ctl_table[] = {
1467 {
1468 .procname = "hyperv_record_panic_msg",
1469 .data = &sysctl_record_panic_msg,
1470 .maxlen = sizeof(int),
1471 .mode = 0644,
1472 .proc_handler = proc_dointvec_minmax,
1473 .extra1 = SYSCTL_ZERO,
1474 .extra2 = SYSCTL_ONE
1475 },
1476 {}
1477};
1478
1479static struct ctl_table hv_root_table[] = {
1480 {
1481 .procname = "kernel",
1482 .mode = 0555,
1483 .child = hv_ctl_table
1484 },
1485 {}
1486};
1487
1488/*
1489 * vmbus_bus_init -Main vmbus driver initialization routine.
1490 *
1491 * Here, we
1492 * - initialize the vmbus driver context
1493 * - invoke the vmbus hv main init routine
1494 * - retrieve the channel offers
1495 */
1496static int vmbus_bus_init(void)
1497{
1498 int ret;
1499
1500 ret = hv_init();
1501 if (ret != 0) {
1502 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1503 return ret;
1504 }
1505
1506 ret = bus_register(&hv_bus);
1507 if (ret)
1508 return ret;
1509
1510 /*
1511 * VMbus interrupts are best modeled as per-cpu interrupts. If
1512 * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1513 * allocate a per-cpu IRQ using standard Linux kernel functionality.
1514 * If not on such an architecture (e.g., x86/x64), then rely on
1515 * code in the arch-specific portion of the code tree to connect
1516 * the VMbus interrupt handler.
1517 */
1518
1519 if (vmbus_irq == -1) {
1520 hv_setup_vmbus_handler(vmbus_isr);
1521 } else {
1522 vmbus_evt = alloc_percpu(long);
1523 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1524 "Hyper-V VMbus", vmbus_evt);
1525 if (ret) {
1526 pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1527 vmbus_irq, ret);
1528 free_percpu(vmbus_evt);
1529 goto err_setup;
1530 }
1531 }
1532
1533 ret = hv_synic_alloc();
1534 if (ret)
1535 goto err_alloc;
1536
1537 /*
1538 * Initialize the per-cpu interrupt state and stimer state.
1539 * Then connect to the host.
1540 */
1541 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1542 hv_synic_init, hv_synic_cleanup);
1543 if (ret < 0)
1544 goto err_cpuhp;
1545 hyperv_cpuhp_online = ret;
1546
1547 ret = vmbus_connect();
1548 if (ret)
1549 goto err_connect;
1550
1551 /*
1552 * Only register if the crash MSRs are available
1553 */
1554 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1555 u64 hyperv_crash_ctl;
1556 /*
1557 * Sysctl registration is not fatal, since by default
1558 * reporting is enabled.
1559 */
1560 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1561 if (!hv_ctl_table_hdr)
1562 pr_err("Hyper-V: sysctl table register error");
1563
1564 /*
1565 * Register for panic kmsg callback only if the right
1566 * capability is supported by the hypervisor.
1567 */
1568 hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
1569 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
1570 hv_kmsg_dump_register();
1571
1572 register_die_notifier(&hyperv_die_block);
1573 }
1574
1575 /*
1576 * Always register the panic notifier because we need to unload
1577 * the VMbus channel connection to prevent any VMbus
1578 * activity after the VM panics.
1579 */
1580 atomic_notifier_chain_register(&panic_notifier_list,
1581 &hyperv_panic_block);
1582
1583 vmbus_request_offers();
1584
1585 return 0;
1586
1587err_connect:
1588 cpuhp_remove_state(hyperv_cpuhp_online);
1589err_cpuhp:
1590 hv_synic_free();
1591err_alloc:
1592 if (vmbus_irq == -1) {
1593 hv_remove_vmbus_handler();
1594 } else {
1595 free_percpu_irq(vmbus_irq, vmbus_evt);
1596 free_percpu(vmbus_evt);
1597 }
1598err_setup:
1599 bus_unregister(&hv_bus);
1600 unregister_sysctl_table(hv_ctl_table_hdr);
1601 hv_ctl_table_hdr = NULL;
1602 return ret;
1603}
1604
1605/**
1606 * __vmbus_child_driver_register() - Register a vmbus's driver
1607 * @hv_driver: Pointer to driver structure you want to register
1608 * @owner: owner module of the drv
1609 * @mod_name: module name string
1610 *
1611 * Registers the given driver with Linux through the 'driver_register()' call
1612 * and sets up the hyper-v vmbus handling for this driver.
1613 * It will return the state of the 'driver_register()' call.
1614 *
1615 */
1616int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1617{
1618 int ret;
1619
1620 pr_info("registering driver %s\n", hv_driver->name);
1621
1622 ret = vmbus_exists();
1623 if (ret < 0)
1624 return ret;
1625
1626 hv_driver->driver.name = hv_driver->name;
1627 hv_driver->driver.owner = owner;
1628 hv_driver->driver.mod_name = mod_name;
1629 hv_driver->driver.bus = &hv_bus;
1630
1631 spin_lock_init(&hv_driver->dynids.lock);
1632 INIT_LIST_HEAD(&hv_driver->dynids.list);
1633
1634 ret = driver_register(&hv_driver->driver);
1635
1636 return ret;
1637}
1638EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1639
1640/**
1641 * vmbus_driver_unregister() - Unregister a vmbus's driver
1642 * @hv_driver: Pointer to driver structure you want to
1643 * un-register
1644 *
1645 * Un-register the given driver that was previous registered with a call to
1646 * vmbus_driver_register()
1647 */
1648void vmbus_driver_unregister(struct hv_driver *hv_driver)
1649{
1650 pr_info("unregistering driver %s\n", hv_driver->name);
1651
1652 if (!vmbus_exists()) {
1653 driver_unregister(&hv_driver->driver);
1654 vmbus_free_dynids(hv_driver);
1655 }
1656}
1657EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1658
1659
1660/*
1661 * Called when last reference to channel is gone.
1662 */
1663static void vmbus_chan_release(struct kobject *kobj)
1664{
1665 struct vmbus_channel *channel
1666 = container_of(kobj, struct vmbus_channel, kobj);
1667
1668 kfree_rcu(channel, rcu);
1669}
1670
1671struct vmbus_chan_attribute {
1672 struct attribute attr;
1673 ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1674 ssize_t (*store)(struct vmbus_channel *chan,
1675 const char *buf, size_t count);
1676};
1677#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1678 struct vmbus_chan_attribute chan_attr_##_name \
1679 = __ATTR(_name, _mode, _show, _store)
1680#define VMBUS_CHAN_ATTR_RW(_name) \
1681 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1682#define VMBUS_CHAN_ATTR_RO(_name) \
1683 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1684#define VMBUS_CHAN_ATTR_WO(_name) \
1685 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1686
1687static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1688 struct attribute *attr, char *buf)
1689{
1690 const struct vmbus_chan_attribute *attribute
1691 = container_of(attr, struct vmbus_chan_attribute, attr);
1692 struct vmbus_channel *chan
1693 = container_of(kobj, struct vmbus_channel, kobj);
1694
1695 if (!attribute->show)
1696 return -EIO;
1697
1698 return attribute->show(chan, buf);
1699}
1700
1701static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1702 struct attribute *attr, const char *buf,
1703 size_t count)
1704{
1705 const struct vmbus_chan_attribute *attribute
1706 = container_of(attr, struct vmbus_chan_attribute, attr);
1707 struct vmbus_channel *chan
1708 = container_of(kobj, struct vmbus_channel, kobj);
1709
1710 if (!attribute->store)
1711 return -EIO;
1712
1713 return attribute->store(chan, buf, count);
1714}
1715
1716static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1717 .show = vmbus_chan_attr_show,
1718 .store = vmbus_chan_attr_store,
1719};
1720
1721static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1722{
1723 struct hv_ring_buffer_info *rbi = &channel->outbound;
1724 ssize_t ret;
1725
1726 mutex_lock(&rbi->ring_buffer_mutex);
1727 if (!rbi->ring_buffer) {
1728 mutex_unlock(&rbi->ring_buffer_mutex);
1729 return -EINVAL;
1730 }
1731
1732 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1733 mutex_unlock(&rbi->ring_buffer_mutex);
1734 return ret;
1735}
1736static VMBUS_CHAN_ATTR_RO(out_mask);
1737
1738static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1739{
1740 struct hv_ring_buffer_info *rbi = &channel->inbound;
1741 ssize_t ret;
1742
1743 mutex_lock(&rbi->ring_buffer_mutex);
1744 if (!rbi->ring_buffer) {
1745 mutex_unlock(&rbi->ring_buffer_mutex);
1746 return -EINVAL;
1747 }
1748
1749 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1750 mutex_unlock(&rbi->ring_buffer_mutex);
1751 return ret;
1752}
1753static VMBUS_CHAN_ATTR_RO(in_mask);
1754
1755static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1756{
1757 struct hv_ring_buffer_info *rbi = &channel->inbound;
1758 ssize_t ret;
1759
1760 mutex_lock(&rbi->ring_buffer_mutex);
1761 if (!rbi->ring_buffer) {
1762 mutex_unlock(&rbi->ring_buffer_mutex);
1763 return -EINVAL;
1764 }
1765
1766 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1767 mutex_unlock(&rbi->ring_buffer_mutex);
1768 return ret;
1769}
1770static VMBUS_CHAN_ATTR_RO(read_avail);
1771
1772static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1773{
1774 struct hv_ring_buffer_info *rbi = &channel->outbound;
1775 ssize_t ret;
1776
1777 mutex_lock(&rbi->ring_buffer_mutex);
1778 if (!rbi->ring_buffer) {
1779 mutex_unlock(&rbi->ring_buffer_mutex);
1780 return -EINVAL;
1781 }
1782
1783 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1784 mutex_unlock(&rbi->ring_buffer_mutex);
1785 return ret;
1786}
1787static VMBUS_CHAN_ATTR_RO(write_avail);
1788
1789static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1790{
1791 return sprintf(buf, "%u\n", channel->target_cpu);
1792}
1793static ssize_t target_cpu_store(struct vmbus_channel *channel,
1794 const char *buf, size_t count)
1795{
1796 u32 target_cpu, origin_cpu;
1797 ssize_t ret = count;
1798
1799 if (vmbus_proto_version < VERSION_WIN10_V4_1)
1800 return -EIO;
1801
1802 if (sscanf(buf, "%uu", &target_cpu) != 1)
1803 return -EIO;
1804
1805 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1806 if (target_cpu >= nr_cpumask_bits)
1807 return -EINVAL;
1808
1809 /* No CPUs should come up or down during this. */
1810 cpus_read_lock();
1811
1812 if (!cpu_online(target_cpu)) {
1813 cpus_read_unlock();
1814 return -EINVAL;
1815 }
1816
1817 /*
1818 * Synchronizes target_cpu_store() and channel closure:
1819 *
1820 * { Initially: state = CHANNEL_OPENED }
1821 *
1822 * CPU1 CPU2
1823 *
1824 * [target_cpu_store()] [vmbus_disconnect_ring()]
1825 *
1826 * LOCK channel_mutex LOCK channel_mutex
1827 * LOAD r1 = state LOAD r2 = state
1828 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1829 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1830 * [...] SEND CLOSECHANNEL
1831 * UNLOCK channel_mutex UNLOCK channel_mutex
1832 *
1833 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1834 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1835 *
1836 * Note. The host processes the channel messages "sequentially", in
1837 * the order in which they are received on a per-partition basis.
1838 */
1839 mutex_lock(&vmbus_connection.channel_mutex);
1840
1841 /*
1842 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1843 * avoid sending the message and fail here for such channels.
1844 */
1845 if (channel->state != CHANNEL_OPENED_STATE) {
1846 ret = -EIO;
1847 goto cpu_store_unlock;
1848 }
1849
1850 origin_cpu = channel->target_cpu;
1851 if (target_cpu == origin_cpu)
1852 goto cpu_store_unlock;
1853
1854 if (vmbus_send_modifychannel(channel,
1855 hv_cpu_number_to_vp_number(target_cpu))) {
1856 ret = -EIO;
1857 goto cpu_store_unlock;
1858 }
1859
1860 /*
1861 * For version before VERSION_WIN10_V5_3, the following warning holds:
1862 *
1863 * Warning. At this point, there is *no* guarantee that the host will
1864 * have successfully processed the vmbus_send_modifychannel() request.
1865 * See the header comment of vmbus_send_modifychannel() for more info.
1866 *
1867 * Lags in the processing of the above vmbus_send_modifychannel() can
1868 * result in missed interrupts if the "old" target CPU is taken offline
1869 * before Hyper-V starts sending interrupts to the "new" target CPU.
1870 * But apart from this offlining scenario, the code tolerates such
1871 * lags. It will function correctly even if a channel interrupt comes
1872 * in on a CPU that is different from the channel target_cpu value.
1873 */
1874
1875 channel->target_cpu = target_cpu;
1876
1877 /* See init_vp_index(). */
1878 if (hv_is_perf_channel(channel))
1879 hv_update_alloced_cpus(origin_cpu, target_cpu);
1880
1881 /* Currently set only for storvsc channels. */
1882 if (channel->change_target_cpu_callback) {
1883 (*channel->change_target_cpu_callback)(channel,
1884 origin_cpu, target_cpu);
1885 }
1886
1887cpu_store_unlock:
1888 mutex_unlock(&vmbus_connection.channel_mutex);
1889 cpus_read_unlock();
1890 return ret;
1891}
1892static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1893
1894static ssize_t channel_pending_show(struct vmbus_channel *channel,
1895 char *buf)
1896{
1897 return sprintf(buf, "%d\n",
1898 channel_pending(channel,
1899 vmbus_connection.monitor_pages[1]));
1900}
1901static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1902
1903static ssize_t channel_latency_show(struct vmbus_channel *channel,
1904 char *buf)
1905{
1906 return sprintf(buf, "%d\n",
1907 channel_latency(channel,
1908 vmbus_connection.monitor_pages[1]));
1909}
1910static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1911
1912static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1913{
1914 return sprintf(buf, "%llu\n", channel->interrupts);
1915}
1916static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1917
1918static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1919{
1920 return sprintf(buf, "%llu\n", channel->sig_events);
1921}
1922static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1923
1924static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1925 char *buf)
1926{
1927 return sprintf(buf, "%llu\n",
1928 (unsigned long long)channel->intr_in_full);
1929}
1930static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1931
1932static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1933 char *buf)
1934{
1935 return sprintf(buf, "%llu\n",
1936 (unsigned long long)channel->intr_out_empty);
1937}
1938static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1939
1940static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1941 char *buf)
1942{
1943 return sprintf(buf, "%llu\n",
1944 (unsigned long long)channel->out_full_first);
1945}
1946static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1947
1948static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1949 char *buf)
1950{
1951 return sprintf(buf, "%llu\n",
1952 (unsigned long long)channel->out_full_total);
1953}
1954static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1955
1956static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1957 char *buf)
1958{
1959 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1960}
1961static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1962
1963static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1964 char *buf)
1965{
1966 return sprintf(buf, "%u\n",
1967 channel->offermsg.offer.sub_channel_index);
1968}
1969static VMBUS_CHAN_ATTR_RO(subchannel_id);
1970
1971static struct attribute *vmbus_chan_attrs[] = {
1972 &chan_attr_out_mask.attr,
1973 &chan_attr_in_mask.attr,
1974 &chan_attr_read_avail.attr,
1975 &chan_attr_write_avail.attr,
1976 &chan_attr_cpu.attr,
1977 &chan_attr_pending.attr,
1978 &chan_attr_latency.attr,
1979 &chan_attr_interrupts.attr,
1980 &chan_attr_events.attr,
1981 &chan_attr_intr_in_full.attr,
1982 &chan_attr_intr_out_empty.attr,
1983 &chan_attr_out_full_first.attr,
1984 &chan_attr_out_full_total.attr,
1985 &chan_attr_monitor_id.attr,
1986 &chan_attr_subchannel_id.attr,
1987 NULL
1988};
1989
1990/*
1991 * Channel-level attribute_group callback function. Returns the permission for
1992 * each attribute, and returns 0 if an attribute is not visible.
1993 */
1994static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1995 struct attribute *attr, int idx)
1996{
1997 const struct vmbus_channel *channel =
1998 container_of(kobj, struct vmbus_channel, kobj);
1999
2000 /* Hide the monitor attributes if the monitor mechanism is not used. */
2001 if (!channel->offermsg.monitor_allocated &&
2002 (attr == &chan_attr_pending.attr ||
2003 attr == &chan_attr_latency.attr ||
2004 attr == &chan_attr_monitor_id.attr))
2005 return 0;
2006
2007 return attr->mode;
2008}
2009
2010static struct attribute_group vmbus_chan_group = {
2011 .attrs = vmbus_chan_attrs,
2012 .is_visible = vmbus_chan_attr_is_visible
2013};
2014
2015static struct kobj_type vmbus_chan_ktype = {
2016 .sysfs_ops = &vmbus_chan_sysfs_ops,
2017 .release = vmbus_chan_release,
2018};
2019
2020/*
2021 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
2022 */
2023int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
2024{
2025 const struct device *device = &dev->device;
2026 struct kobject *kobj = &channel->kobj;
2027 u32 relid = channel->offermsg.child_relid;
2028 int ret;
2029
2030 kobj->kset = dev->channels_kset;
2031 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2032 "%u", relid);
2033 if (ret)
2034 return ret;
2035
2036 ret = sysfs_create_group(kobj, &vmbus_chan_group);
2037
2038 if (ret) {
2039 /*
2040 * The calling functions' error handling paths will cleanup the
2041 * empty channel directory.
2042 */
2043 dev_err(device, "Unable to set up channel sysfs files\n");
2044 return ret;
2045 }
2046
2047 kobject_uevent(kobj, KOBJ_ADD);
2048
2049 return 0;
2050}
2051
2052/*
2053 * vmbus_remove_channel_attr_group - remove the channel's attribute group
2054 */
2055void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2056{
2057 sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2058}
2059
2060/*
2061 * vmbus_device_create - Creates and registers a new child device
2062 * on the vmbus.
2063 */
2064struct hv_device *vmbus_device_create(const guid_t *type,
2065 const guid_t *instance,
2066 struct vmbus_channel *channel)
2067{
2068 struct hv_device *child_device_obj;
2069
2070 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2071 if (!child_device_obj) {
2072 pr_err("Unable to allocate device object for child device\n");
2073 return NULL;
2074 }
2075
2076 child_device_obj->channel = channel;
2077 guid_copy(&child_device_obj->dev_type, type);
2078 guid_copy(&child_device_obj->dev_instance, instance);
2079 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
2080
2081 return child_device_obj;
2082}
2083
2084/*
2085 * vmbus_device_register - Register the child device
2086 */
2087int vmbus_device_register(struct hv_device *child_device_obj)
2088{
2089 struct kobject *kobj = &child_device_obj->device.kobj;
2090 int ret;
2091
2092 dev_set_name(&child_device_obj->device, "%pUl",
2093 &child_device_obj->channel->offermsg.offer.if_instance);
2094
2095 child_device_obj->device.bus = &hv_bus;
2096 child_device_obj->device.parent = &hv_acpi_dev->dev;
2097 child_device_obj->device.release = vmbus_device_release;
2098
2099 /*
2100 * Register with the LDM. This will kick off the driver/device
2101 * binding...which will eventually call vmbus_match() and vmbus_probe()
2102 */
2103 ret = device_register(&child_device_obj->device);
2104 if (ret) {
2105 pr_err("Unable to register child device\n");
2106 return ret;
2107 }
2108
2109 child_device_obj->channels_kset = kset_create_and_add("channels",
2110 NULL, kobj);
2111 if (!child_device_obj->channels_kset) {
2112 ret = -ENOMEM;
2113 goto err_dev_unregister;
2114 }
2115
2116 ret = vmbus_add_channel_kobj(child_device_obj,
2117 child_device_obj->channel);
2118 if (ret) {
2119 pr_err("Unable to register primary channeln");
2120 goto err_kset_unregister;
2121 }
2122 hv_debug_add_dev_dir(child_device_obj);
2123
2124 return 0;
2125
2126err_kset_unregister:
2127 kset_unregister(child_device_obj->channels_kset);
2128
2129err_dev_unregister:
2130 device_unregister(&child_device_obj->device);
2131 return ret;
2132}
2133
2134/*
2135 * vmbus_device_unregister - Remove the specified child device
2136 * from the vmbus.
2137 */
2138void vmbus_device_unregister(struct hv_device *device_obj)
2139{
2140 pr_debug("child device %s unregistered\n",
2141 dev_name(&device_obj->device));
2142
2143 kset_unregister(device_obj->channels_kset);
2144
2145 /*
2146 * Kick off the process of unregistering the device.
2147 * This will call vmbus_remove() and eventually vmbus_device_release()
2148 */
2149 device_unregister(&device_obj->device);
2150}
2151
2152
2153/*
2154 * VMBUS is an acpi enumerated device. Get the information we
2155 * need from DSDT.
2156 */
2157#define VTPM_BASE_ADDRESS 0xfed40000
2158static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2159{
2160 resource_size_t start = 0;
2161 resource_size_t end = 0;
2162 struct resource *new_res;
2163 struct resource **old_res = &hyperv_mmio;
2164 struct resource **prev_res = NULL;
2165 struct resource r;
2166
2167 switch (res->type) {
2168
2169 /*
2170 * "Address" descriptors are for bus windows. Ignore
2171 * "memory" descriptors, which are for registers on
2172 * devices.
2173 */
2174 case ACPI_RESOURCE_TYPE_ADDRESS32:
2175 start = res->data.address32.address.minimum;
2176 end = res->data.address32.address.maximum;
2177 break;
2178
2179 case ACPI_RESOURCE_TYPE_ADDRESS64:
2180 start = res->data.address64.address.minimum;
2181 end = res->data.address64.address.maximum;
2182 break;
2183
2184 /*
2185 * The IRQ information is needed only on ARM64, which Hyper-V
2186 * sets up in the extended format. IRQ information is present
2187 * on x86/x64 in the non-extended format but it is not used by
2188 * Linux. So don't bother checking for the non-extended format.
2189 */
2190 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2191 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2192 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2193 return AE_ERROR;
2194 }
2195 /* ARM64 INTID for VMbus */
2196 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2197 /* Linux IRQ number */
2198 vmbus_irq = r.start;
2199 return AE_OK;
2200
2201 default:
2202 /* Unused resource type */
2203 return AE_OK;
2204
2205 }
2206 /*
2207 * Ignore ranges that are below 1MB, as they're not
2208 * necessary or useful here.
2209 */
2210 if (end < 0x100000)
2211 return AE_OK;
2212
2213 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2214 if (!new_res)
2215 return AE_NO_MEMORY;
2216
2217 /* If this range overlaps the virtual TPM, truncate it. */
2218 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2219 end = VTPM_BASE_ADDRESS;
2220
2221 new_res->name = "hyperv mmio";
2222 new_res->flags = IORESOURCE_MEM;
2223 new_res->start = start;
2224 new_res->end = end;
2225
2226 /*
2227 * If two ranges are adjacent, merge them.
2228 */
2229 do {
2230 if (!*old_res) {
2231 *old_res = new_res;
2232 break;
2233 }
2234
2235 if (((*old_res)->end + 1) == new_res->start) {
2236 (*old_res)->end = new_res->end;
2237 kfree(new_res);
2238 break;
2239 }
2240
2241 if ((*old_res)->start == new_res->end + 1) {
2242 (*old_res)->start = new_res->start;
2243 kfree(new_res);
2244 break;
2245 }
2246
2247 if ((*old_res)->start > new_res->end) {
2248 new_res->sibling = *old_res;
2249 if (prev_res)
2250 (*prev_res)->sibling = new_res;
2251 *old_res = new_res;
2252 break;
2253 }
2254
2255 prev_res = old_res;
2256 old_res = &(*old_res)->sibling;
2257
2258 } while (1);
2259
2260 return AE_OK;
2261}
2262
2263static int vmbus_acpi_remove(struct acpi_device *device)
2264{
2265 struct resource *cur_res;
2266 struct resource *next_res;
2267
2268 if (hyperv_mmio) {
2269 if (fb_mmio) {
2270 __release_region(hyperv_mmio, fb_mmio->start,
2271 resource_size(fb_mmio));
2272 fb_mmio = NULL;
2273 }
2274
2275 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2276 next_res = cur_res->sibling;
2277 kfree(cur_res);
2278 }
2279 }
2280
2281 return 0;
2282}
2283
2284static void vmbus_reserve_fb(void)
2285{
2286 int size;
2287 /*
2288 * Make a claim for the frame buffer in the resource tree under the
2289 * first node, which will be the one below 4GB. The length seems to
2290 * be underreported, particularly in a Generation 1 VM. So start out
2291 * reserving a larger area and make it smaller until it succeeds.
2292 */
2293
2294 if (screen_info.lfb_base) {
2295 if (efi_enabled(EFI_BOOT))
2296 size = max_t(__u32, screen_info.lfb_size, 0x800000);
2297 else
2298 size = max_t(__u32, screen_info.lfb_size, 0x4000000);
2299
2300 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
2301 fb_mmio = __request_region(hyperv_mmio,
2302 screen_info.lfb_base, size,
2303 fb_mmio_name, 0);
2304 }
2305 }
2306}
2307
2308/**
2309 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2310 * @new: If successful, supplied a pointer to the
2311 * allocated MMIO space.
2312 * @device_obj: Identifies the caller
2313 * @min: Minimum guest physical address of the
2314 * allocation
2315 * @max: Maximum guest physical address
2316 * @size: Size of the range to be allocated
2317 * @align: Alignment of the range to be allocated
2318 * @fb_overlap_ok: Whether this allocation can be allowed
2319 * to overlap the video frame buffer.
2320 *
2321 * This function walks the resources granted to VMBus by the
2322 * _CRS object in the ACPI namespace underneath the parent
2323 * "bridge" whether that's a root PCI bus in the Generation 1
2324 * case or a Module Device in the Generation 2 case. It then
2325 * attempts to allocate from the global MMIO pool in a way that
2326 * matches the constraints supplied in these parameters and by
2327 * that _CRS.
2328 *
2329 * Return: 0 on success, -errno on failure
2330 */
2331int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2332 resource_size_t min, resource_size_t max,
2333 resource_size_t size, resource_size_t align,
2334 bool fb_overlap_ok)
2335{
2336 struct resource *iter, *shadow;
2337 resource_size_t range_min, range_max, start;
2338 const char *dev_n = dev_name(&device_obj->device);
2339 int retval;
2340
2341 retval = -ENXIO;
2342 mutex_lock(&hyperv_mmio_lock);
2343
2344 /*
2345 * If overlaps with frame buffers are allowed, then first attempt to
2346 * make the allocation from within the reserved region. Because it
2347 * is already reserved, no shadow allocation is necessary.
2348 */
2349 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2350 !(max < fb_mmio->start)) {
2351
2352 range_min = fb_mmio->start;
2353 range_max = fb_mmio->end;
2354 start = (range_min + align - 1) & ~(align - 1);
2355 for (; start + size - 1 <= range_max; start += align) {
2356 *new = request_mem_region_exclusive(start, size, dev_n);
2357 if (*new) {
2358 retval = 0;
2359 goto exit;
2360 }
2361 }
2362 }
2363
2364 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2365 if ((iter->start >= max) || (iter->end <= min))
2366 continue;
2367
2368 range_min = iter->start;
2369 range_max = iter->end;
2370 start = (range_min + align - 1) & ~(align - 1);
2371 for (; start + size - 1 <= range_max; start += align) {
2372 shadow = __request_region(iter, start, size, NULL,
2373 IORESOURCE_BUSY);
2374 if (!shadow)
2375 continue;
2376
2377 *new = request_mem_region_exclusive(start, size, dev_n);
2378 if (*new) {
2379 shadow->name = (char *)*new;
2380 retval = 0;
2381 goto exit;
2382 }
2383
2384 __release_region(iter, start, size);
2385 }
2386 }
2387
2388exit:
2389 mutex_unlock(&hyperv_mmio_lock);
2390 return retval;
2391}
2392EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2393
2394/**
2395 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2396 * @start: Base address of region to release.
2397 * @size: Size of the range to be allocated
2398 *
2399 * This function releases anything requested by
2400 * vmbus_mmio_allocate().
2401 */
2402void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2403{
2404 struct resource *iter;
2405
2406 mutex_lock(&hyperv_mmio_lock);
2407 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2408 if ((iter->start >= start + size) || (iter->end <= start))
2409 continue;
2410
2411 __release_region(iter, start, size);
2412 }
2413 release_mem_region(start, size);
2414 mutex_unlock(&hyperv_mmio_lock);
2415
2416}
2417EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2418
2419static int vmbus_acpi_add(struct acpi_device *device)
2420{
2421 acpi_status result;
2422 int ret_val = -ENODEV;
2423 struct acpi_device *ancestor;
2424
2425 hv_acpi_dev = device;
2426
2427 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2428 vmbus_walk_resources, NULL);
2429
2430 if (ACPI_FAILURE(result))
2431 goto acpi_walk_err;
2432 /*
2433 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2434 * firmware) is the VMOD that has the mmio ranges. Get that.
2435 */
2436 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
2437 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2438 vmbus_walk_resources, NULL);
2439
2440 if (ACPI_FAILURE(result))
2441 continue;
2442 if (hyperv_mmio) {
2443 vmbus_reserve_fb();
2444 break;
2445 }
2446 }
2447 ret_val = 0;
2448
2449acpi_walk_err:
2450 complete(&probe_event);
2451 if (ret_val)
2452 vmbus_acpi_remove(device);
2453 return ret_val;
2454}
2455
2456#ifdef CONFIG_PM_SLEEP
2457static int vmbus_bus_suspend(struct device *dev)
2458{
2459 struct vmbus_channel *channel, *sc;
2460
2461 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2462 /*
2463 * We wait here until the completion of any channel
2464 * offers that are currently in progress.
2465 */
2466 usleep_range(1000, 2000);
2467 }
2468
2469 mutex_lock(&vmbus_connection.channel_mutex);
2470 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2471 if (!is_hvsock_channel(channel))
2472 continue;
2473
2474 vmbus_force_channel_rescinded(channel);
2475 }
2476 mutex_unlock(&vmbus_connection.channel_mutex);
2477
2478 /*
2479 * Wait until all the sub-channels and hv_sock channels have been
2480 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2481 * they would conflict with the new sub-channels that will be created
2482 * in the resume path. hv_sock channels should also be destroyed, but
2483 * a hv_sock channel of an established hv_sock connection can not be
2484 * really destroyed since it may still be referenced by the userspace
2485 * application, so we just force the hv_sock channel to be rescinded
2486 * by vmbus_force_channel_rescinded(), and the userspace application
2487 * will thoroughly destroy the channel after hibernation.
2488 *
2489 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2490 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2491 */
2492 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2493 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2494
2495 if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2496 pr_err("Can not suspend due to a previous failed resuming\n");
2497 return -EBUSY;
2498 }
2499
2500 mutex_lock(&vmbus_connection.channel_mutex);
2501
2502 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2503 /*
2504 * Remove the channel from the array of channels and invalidate
2505 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2506 * up the relid (and other fields, if necessary) and add the
2507 * channel back to the array.
2508 */
2509 vmbus_channel_unmap_relid(channel);
2510 channel->offermsg.child_relid = INVALID_RELID;
2511
2512 if (is_hvsock_channel(channel)) {
2513 if (!channel->rescind) {
2514 pr_err("hv_sock channel not rescinded!\n");
2515 WARN_ON_ONCE(1);
2516 }
2517 continue;
2518 }
2519
2520 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2521 pr_err("Sub-channel not deleted!\n");
2522 WARN_ON_ONCE(1);
2523 }
2524
2525 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2526 }
2527
2528 mutex_unlock(&vmbus_connection.channel_mutex);
2529
2530 vmbus_initiate_unload(false);
2531
2532 /* Reset the event for the next resume. */
2533 reinit_completion(&vmbus_connection.ready_for_resume_event);
2534
2535 return 0;
2536}
2537
2538static int vmbus_bus_resume(struct device *dev)
2539{
2540 struct vmbus_channel_msginfo *msginfo;
2541 size_t msgsize;
2542 int ret;
2543
2544 /*
2545 * We only use the 'vmbus_proto_version', which was in use before
2546 * hibernation, to re-negotiate with the host.
2547 */
2548 if (!vmbus_proto_version) {
2549 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2550 return -EINVAL;
2551 }
2552
2553 msgsize = sizeof(*msginfo) +
2554 sizeof(struct vmbus_channel_initiate_contact);
2555
2556 msginfo = kzalloc(msgsize, GFP_KERNEL);
2557
2558 if (msginfo == NULL)
2559 return -ENOMEM;
2560
2561 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2562
2563 kfree(msginfo);
2564
2565 if (ret != 0)
2566 return ret;
2567
2568 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2569
2570 vmbus_request_offers();
2571
2572 if (wait_for_completion_timeout(
2573 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2574 pr_err("Some vmbus device is missing after suspending?\n");
2575
2576 /* Reset the event for the next suspend. */
2577 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2578
2579 return 0;
2580}
2581#else
2582#define vmbus_bus_suspend NULL
2583#define vmbus_bus_resume NULL
2584#endif /* CONFIG_PM_SLEEP */
2585
2586static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2587 {"VMBUS", 0},
2588 {"VMBus", 0},
2589 {"", 0},
2590};
2591MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2592
2593/*
2594 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2595 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2596 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2597 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2598 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2599 * resume callback must also run via the "noirq" ops.
2600 *
2601 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2602 * earlier in this file before vmbus_pm.
2603 */
2604
2605static const struct dev_pm_ops vmbus_bus_pm = {
2606 .suspend_noirq = NULL,
2607 .resume_noirq = NULL,
2608 .freeze_noirq = vmbus_bus_suspend,
2609 .thaw_noirq = vmbus_bus_resume,
2610 .poweroff_noirq = vmbus_bus_suspend,
2611 .restore_noirq = vmbus_bus_resume
2612};
2613
2614static struct acpi_driver vmbus_acpi_driver = {
2615 .name = "vmbus",
2616 .ids = vmbus_acpi_device_ids,
2617 .ops = {
2618 .add = vmbus_acpi_add,
2619 .remove = vmbus_acpi_remove,
2620 },
2621 .drv.pm = &vmbus_bus_pm,
2622};
2623
2624static void hv_kexec_handler(void)
2625{
2626 hv_stimer_global_cleanup();
2627 vmbus_initiate_unload(false);
2628 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2629 mb();
2630 cpuhp_remove_state(hyperv_cpuhp_online);
2631};
2632
2633static void hv_crash_handler(struct pt_regs *regs)
2634{
2635 int cpu;
2636
2637 vmbus_initiate_unload(true);
2638 /*
2639 * In crash handler we can't schedule synic cleanup for all CPUs,
2640 * doing the cleanup for current CPU only. This should be sufficient
2641 * for kdump.
2642 */
2643 cpu = smp_processor_id();
2644 hv_stimer_cleanup(cpu);
2645 hv_synic_disable_regs(cpu);
2646};
2647
2648static int hv_synic_suspend(void)
2649{
2650 /*
2651 * When we reach here, all the non-boot CPUs have been offlined.
2652 * If we're in a legacy configuration where stimer Direct Mode is
2653 * not enabled, the stimers on the non-boot CPUs have been unbound
2654 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2655 * hv_stimer_cleanup() -> clockevents_unbind_device().
2656 *
2657 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2658 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2659 * 1) it's unnecessary as interrupts remain disabled between
2660 * syscore_suspend() and syscore_resume(): see create_image() and
2661 * resume_target_kernel()
2662 * 2) the stimer on CPU0 is automatically disabled later by
2663 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2664 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2665 * 3) a warning would be triggered if we call
2666 * clockevents_unbind_device(), which may sleep, in an
2667 * interrupts-disabled context.
2668 */
2669
2670 hv_synic_disable_regs(0);
2671
2672 return 0;
2673}
2674
2675static void hv_synic_resume(void)
2676{
2677 hv_synic_enable_regs(0);
2678
2679 /*
2680 * Note: we don't need to call hv_stimer_init(0), because the timer
2681 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2682 * automatically re-enabled in timekeeping_resume().
2683 */
2684}
2685
2686/* The callbacks run only on CPU0, with irqs_disabled. */
2687static struct syscore_ops hv_synic_syscore_ops = {
2688 .suspend = hv_synic_suspend,
2689 .resume = hv_synic_resume,
2690};
2691
2692static int __init hv_acpi_init(void)
2693{
2694 int ret, t;
2695
2696 if (!hv_is_hyperv_initialized())
2697 return -ENODEV;
2698
2699 if (hv_root_partition)
2700 return 0;
2701
2702 init_completion(&probe_event);
2703
2704 /*
2705 * Get ACPI resources first.
2706 */
2707 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2708
2709 if (ret)
2710 return ret;
2711
2712 t = wait_for_completion_timeout(&probe_event, 5*HZ);
2713 if (t == 0) {
2714 ret = -ETIMEDOUT;
2715 goto cleanup;
2716 }
2717
2718 /*
2719 * If we're on an architecture with a hardcoded hypervisor
2720 * vector (i.e. x86/x64), override the VMbus interrupt found
2721 * in the ACPI tables. Ensure vmbus_irq is not set since the
2722 * normal Linux IRQ mechanism is not used in this case.
2723 */
2724#ifdef HYPERVISOR_CALLBACK_VECTOR
2725 vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2726 vmbus_irq = -1;
2727#endif
2728
2729 hv_debug_init();
2730
2731 ret = vmbus_bus_init();
2732 if (ret)
2733 goto cleanup;
2734
2735 hv_setup_kexec_handler(hv_kexec_handler);
2736 hv_setup_crash_handler(hv_crash_handler);
2737
2738 register_syscore_ops(&hv_synic_syscore_ops);
2739
2740 return 0;
2741
2742cleanup:
2743 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2744 hv_acpi_dev = NULL;
2745 return ret;
2746}
2747
2748static void __exit vmbus_exit(void)
2749{
2750 int cpu;
2751
2752 unregister_syscore_ops(&hv_synic_syscore_ops);
2753
2754 hv_remove_kexec_handler();
2755 hv_remove_crash_handler();
2756 vmbus_connection.conn_state = DISCONNECTED;
2757 hv_stimer_global_cleanup();
2758 vmbus_disconnect();
2759 if (vmbus_irq == -1) {
2760 hv_remove_vmbus_handler();
2761 } else {
2762 free_percpu_irq(vmbus_irq, vmbus_evt);
2763 free_percpu(vmbus_evt);
2764 }
2765 for_each_online_cpu(cpu) {
2766 struct hv_per_cpu_context *hv_cpu
2767 = per_cpu_ptr(hv_context.cpu_context, cpu);
2768
2769 tasklet_kill(&hv_cpu->msg_dpc);
2770 }
2771 hv_debug_rm_all_dir();
2772
2773 vmbus_free_channels();
2774 kfree(vmbus_connection.channels);
2775
2776 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2777 kmsg_dump_unregister(&hv_kmsg_dumper);
2778 unregister_die_notifier(&hyperv_die_block);
2779 atomic_notifier_chain_unregister(&panic_notifier_list,
2780 &hyperv_panic_block);
2781 }
2782
2783 free_page((unsigned long)hv_panic_page);
2784 unregister_sysctl_table(hv_ctl_table_hdr);
2785 hv_ctl_table_hdr = NULL;
2786 bus_unregister(&hv_bus);
2787
2788 cpuhp_remove_state(hyperv_cpuhp_online);
2789 hv_synic_free();
2790 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2791}
2792
2793
2794MODULE_LICENSE("GPL");
2795MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2796
2797subsys_initcall(hv_acpi_init);
2798module_exit(vmbus_exit);