Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
21 *
22 */
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/device.h>
28#include <linux/interrupt.h>
29#include <linux/sysctl.h>
30#include <linux/slab.h>
31#include <linux/acpi.h>
32#include <linux/completion.h>
33#include <linux/hyperv.h>
34#include <linux/kernel_stat.h>
35#include <linux/clockchips.h>
36#include <linux/cpu.h>
37#include <linux/sched/task_stack.h>
38
39#include <asm/mshyperv.h>
40#include <linux/notifier.h>
41#include <linux/ptrace.h>
42#include <linux/screen_info.h>
43#include <linux/kdebug.h>
44#include <linux/efi.h>
45#include <linux/random.h>
46#include "hyperv_vmbus.h"
47
48struct vmbus_dynid {
49 struct list_head node;
50 struct hv_vmbus_device_id id;
51};
52
53static struct acpi_device *hv_acpi_dev;
54
55static struct completion probe_event;
56
57static int hyperv_cpuhp_online;
58
59static void *hv_panic_page;
60
61static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
62 void *args)
63{
64 struct pt_regs *regs;
65
66 regs = current_pt_regs();
67
68 hyperv_report_panic(regs, val);
69 return NOTIFY_DONE;
70}
71
72static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
73 void *args)
74{
75 struct die_args *die = (struct die_args *)args;
76 struct pt_regs *regs = die->regs;
77
78 hyperv_report_panic(regs, val);
79 return NOTIFY_DONE;
80}
81
82static struct notifier_block hyperv_die_block = {
83 .notifier_call = hyperv_die_event,
84};
85static struct notifier_block hyperv_panic_block = {
86 .notifier_call = hyperv_panic_event,
87};
88
89static const char *fb_mmio_name = "fb_range";
90static struct resource *fb_mmio;
91static struct resource *hyperv_mmio;
92static DEFINE_SEMAPHORE(hyperv_mmio_lock);
93
94static int vmbus_exists(void)
95{
96 if (hv_acpi_dev == NULL)
97 return -ENODEV;
98
99 return 0;
100}
101
102#define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2)
103static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
104{
105 int i;
106 for (i = 0; i < VMBUS_ALIAS_LEN; i += 2)
107 sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
108}
109
110static u8 channel_monitor_group(const struct vmbus_channel *channel)
111{
112 return (u8)channel->offermsg.monitorid / 32;
113}
114
115static u8 channel_monitor_offset(const struct vmbus_channel *channel)
116{
117 return (u8)channel->offermsg.monitorid % 32;
118}
119
120static u32 channel_pending(const struct vmbus_channel *channel,
121 const struct hv_monitor_page *monitor_page)
122{
123 u8 monitor_group = channel_monitor_group(channel);
124
125 return monitor_page->trigger_group[monitor_group].pending;
126}
127
128static u32 channel_latency(const struct vmbus_channel *channel,
129 const struct hv_monitor_page *monitor_page)
130{
131 u8 monitor_group = channel_monitor_group(channel);
132 u8 monitor_offset = channel_monitor_offset(channel);
133
134 return monitor_page->latency[monitor_group][monitor_offset];
135}
136
137static u32 channel_conn_id(struct vmbus_channel *channel,
138 struct hv_monitor_page *monitor_page)
139{
140 u8 monitor_group = channel_monitor_group(channel);
141 u8 monitor_offset = channel_monitor_offset(channel);
142 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
143}
144
145static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
146 char *buf)
147{
148 struct hv_device *hv_dev = device_to_hv_device(dev);
149
150 if (!hv_dev->channel)
151 return -ENODEV;
152 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
153}
154static DEVICE_ATTR_RO(id);
155
156static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
157 char *buf)
158{
159 struct hv_device *hv_dev = device_to_hv_device(dev);
160
161 if (!hv_dev->channel)
162 return -ENODEV;
163 return sprintf(buf, "%d\n", hv_dev->channel->state);
164}
165static DEVICE_ATTR_RO(state);
166
167static ssize_t monitor_id_show(struct device *dev,
168 struct device_attribute *dev_attr, char *buf)
169{
170 struct hv_device *hv_dev = device_to_hv_device(dev);
171
172 if (!hv_dev->channel)
173 return -ENODEV;
174 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
175}
176static DEVICE_ATTR_RO(monitor_id);
177
178static ssize_t class_id_show(struct device *dev,
179 struct device_attribute *dev_attr, char *buf)
180{
181 struct hv_device *hv_dev = device_to_hv_device(dev);
182
183 if (!hv_dev->channel)
184 return -ENODEV;
185 return sprintf(buf, "{%pUl}\n",
186 hv_dev->channel->offermsg.offer.if_type.b);
187}
188static DEVICE_ATTR_RO(class_id);
189
190static ssize_t device_id_show(struct device *dev,
191 struct device_attribute *dev_attr, char *buf)
192{
193 struct hv_device *hv_dev = device_to_hv_device(dev);
194
195 if (!hv_dev->channel)
196 return -ENODEV;
197 return sprintf(buf, "{%pUl}\n",
198 hv_dev->channel->offermsg.offer.if_instance.b);
199}
200static DEVICE_ATTR_RO(device_id);
201
202static ssize_t modalias_show(struct device *dev,
203 struct device_attribute *dev_attr, char *buf)
204{
205 struct hv_device *hv_dev = device_to_hv_device(dev);
206 char alias_name[VMBUS_ALIAS_LEN + 1];
207
208 print_alias_name(hv_dev, alias_name);
209 return sprintf(buf, "vmbus:%s\n", alias_name);
210}
211static DEVICE_ATTR_RO(modalias);
212
213#ifdef CONFIG_NUMA
214static ssize_t numa_node_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
216{
217 struct hv_device *hv_dev = device_to_hv_device(dev);
218
219 if (!hv_dev->channel)
220 return -ENODEV;
221
222 return sprintf(buf, "%d\n", hv_dev->channel->numa_node);
223}
224static DEVICE_ATTR_RO(numa_node);
225#endif
226
227static ssize_t server_monitor_pending_show(struct device *dev,
228 struct device_attribute *dev_attr,
229 char *buf)
230{
231 struct hv_device *hv_dev = device_to_hv_device(dev);
232
233 if (!hv_dev->channel)
234 return -ENODEV;
235 return sprintf(buf, "%d\n",
236 channel_pending(hv_dev->channel,
237 vmbus_connection.monitor_pages[1]));
238}
239static DEVICE_ATTR_RO(server_monitor_pending);
240
241static ssize_t client_monitor_pending_show(struct device *dev,
242 struct device_attribute *dev_attr,
243 char *buf)
244{
245 struct hv_device *hv_dev = device_to_hv_device(dev);
246
247 if (!hv_dev->channel)
248 return -ENODEV;
249 return sprintf(buf, "%d\n",
250 channel_pending(hv_dev->channel,
251 vmbus_connection.monitor_pages[1]));
252}
253static DEVICE_ATTR_RO(client_monitor_pending);
254
255static ssize_t server_monitor_latency_show(struct device *dev,
256 struct device_attribute *dev_attr,
257 char *buf)
258{
259 struct hv_device *hv_dev = device_to_hv_device(dev);
260
261 if (!hv_dev->channel)
262 return -ENODEV;
263 return sprintf(buf, "%d\n",
264 channel_latency(hv_dev->channel,
265 vmbus_connection.monitor_pages[0]));
266}
267static DEVICE_ATTR_RO(server_monitor_latency);
268
269static ssize_t client_monitor_latency_show(struct device *dev,
270 struct device_attribute *dev_attr,
271 char *buf)
272{
273 struct hv_device *hv_dev = device_to_hv_device(dev);
274
275 if (!hv_dev->channel)
276 return -ENODEV;
277 return sprintf(buf, "%d\n",
278 channel_latency(hv_dev->channel,
279 vmbus_connection.monitor_pages[1]));
280}
281static DEVICE_ATTR_RO(client_monitor_latency);
282
283static ssize_t server_monitor_conn_id_show(struct device *dev,
284 struct device_attribute *dev_attr,
285 char *buf)
286{
287 struct hv_device *hv_dev = device_to_hv_device(dev);
288
289 if (!hv_dev->channel)
290 return -ENODEV;
291 return sprintf(buf, "%d\n",
292 channel_conn_id(hv_dev->channel,
293 vmbus_connection.monitor_pages[0]));
294}
295static DEVICE_ATTR_RO(server_monitor_conn_id);
296
297static ssize_t client_monitor_conn_id_show(struct device *dev,
298 struct device_attribute *dev_attr,
299 char *buf)
300{
301 struct hv_device *hv_dev = device_to_hv_device(dev);
302
303 if (!hv_dev->channel)
304 return -ENODEV;
305 return sprintf(buf, "%d\n",
306 channel_conn_id(hv_dev->channel,
307 vmbus_connection.monitor_pages[1]));
308}
309static DEVICE_ATTR_RO(client_monitor_conn_id);
310
311static ssize_t out_intr_mask_show(struct device *dev,
312 struct device_attribute *dev_attr, char *buf)
313{
314 struct hv_device *hv_dev = device_to_hv_device(dev);
315 struct hv_ring_buffer_debug_info outbound;
316 int ret;
317
318 if (!hv_dev->channel)
319 return -ENODEV;
320
321 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
322 &outbound);
323 if (ret < 0)
324 return ret;
325
326 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
327}
328static DEVICE_ATTR_RO(out_intr_mask);
329
330static ssize_t out_read_index_show(struct device *dev,
331 struct device_attribute *dev_attr, char *buf)
332{
333 struct hv_device *hv_dev = device_to_hv_device(dev);
334 struct hv_ring_buffer_debug_info outbound;
335 int ret;
336
337 if (!hv_dev->channel)
338 return -ENODEV;
339
340 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
341 &outbound);
342 if (ret < 0)
343 return ret;
344 return sprintf(buf, "%d\n", outbound.current_read_index);
345}
346static DEVICE_ATTR_RO(out_read_index);
347
348static ssize_t out_write_index_show(struct device *dev,
349 struct device_attribute *dev_attr,
350 char *buf)
351{
352 struct hv_device *hv_dev = device_to_hv_device(dev);
353 struct hv_ring_buffer_debug_info outbound;
354 int ret;
355
356 if (!hv_dev->channel)
357 return -ENODEV;
358
359 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
360 &outbound);
361 if (ret < 0)
362 return ret;
363 return sprintf(buf, "%d\n", outbound.current_write_index);
364}
365static DEVICE_ATTR_RO(out_write_index);
366
367static ssize_t out_read_bytes_avail_show(struct device *dev,
368 struct device_attribute *dev_attr,
369 char *buf)
370{
371 struct hv_device *hv_dev = device_to_hv_device(dev);
372 struct hv_ring_buffer_debug_info outbound;
373 int ret;
374
375 if (!hv_dev->channel)
376 return -ENODEV;
377
378 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
379 &outbound);
380 if (ret < 0)
381 return ret;
382 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
383}
384static DEVICE_ATTR_RO(out_read_bytes_avail);
385
386static ssize_t out_write_bytes_avail_show(struct device *dev,
387 struct device_attribute *dev_attr,
388 char *buf)
389{
390 struct hv_device *hv_dev = device_to_hv_device(dev);
391 struct hv_ring_buffer_debug_info outbound;
392 int ret;
393
394 if (!hv_dev->channel)
395 return -ENODEV;
396
397 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
398 &outbound);
399 if (ret < 0)
400 return ret;
401 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
402}
403static DEVICE_ATTR_RO(out_write_bytes_avail);
404
405static ssize_t in_intr_mask_show(struct device *dev,
406 struct device_attribute *dev_attr, char *buf)
407{
408 struct hv_device *hv_dev = device_to_hv_device(dev);
409 struct hv_ring_buffer_debug_info inbound;
410 int ret;
411
412 if (!hv_dev->channel)
413 return -ENODEV;
414
415 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
416 if (ret < 0)
417 return ret;
418
419 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
420}
421static DEVICE_ATTR_RO(in_intr_mask);
422
423static ssize_t in_read_index_show(struct device *dev,
424 struct device_attribute *dev_attr, char *buf)
425{
426 struct hv_device *hv_dev = device_to_hv_device(dev);
427 struct hv_ring_buffer_debug_info inbound;
428 int ret;
429
430 if (!hv_dev->channel)
431 return -ENODEV;
432
433 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
434 if (ret < 0)
435 return ret;
436
437 return sprintf(buf, "%d\n", inbound.current_read_index);
438}
439static DEVICE_ATTR_RO(in_read_index);
440
441static ssize_t in_write_index_show(struct device *dev,
442 struct device_attribute *dev_attr, char *buf)
443{
444 struct hv_device *hv_dev = device_to_hv_device(dev);
445 struct hv_ring_buffer_debug_info inbound;
446 int ret;
447
448 if (!hv_dev->channel)
449 return -ENODEV;
450
451 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
452 if (ret < 0)
453 return ret;
454
455 return sprintf(buf, "%d\n", inbound.current_write_index);
456}
457static DEVICE_ATTR_RO(in_write_index);
458
459static ssize_t in_read_bytes_avail_show(struct device *dev,
460 struct device_attribute *dev_attr,
461 char *buf)
462{
463 struct hv_device *hv_dev = device_to_hv_device(dev);
464 struct hv_ring_buffer_debug_info inbound;
465 int ret;
466
467 if (!hv_dev->channel)
468 return -ENODEV;
469
470 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
471 if (ret < 0)
472 return ret;
473
474 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
475}
476static DEVICE_ATTR_RO(in_read_bytes_avail);
477
478static ssize_t in_write_bytes_avail_show(struct device *dev,
479 struct device_attribute *dev_attr,
480 char *buf)
481{
482 struct hv_device *hv_dev = device_to_hv_device(dev);
483 struct hv_ring_buffer_debug_info inbound;
484 int ret;
485
486 if (!hv_dev->channel)
487 return -ENODEV;
488
489 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
490 if (ret < 0)
491 return ret;
492
493 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
494}
495static DEVICE_ATTR_RO(in_write_bytes_avail);
496
497static ssize_t channel_vp_mapping_show(struct device *dev,
498 struct device_attribute *dev_attr,
499 char *buf)
500{
501 struct hv_device *hv_dev = device_to_hv_device(dev);
502 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
503 unsigned long flags;
504 int buf_size = PAGE_SIZE, n_written, tot_written;
505 struct list_head *cur;
506
507 if (!channel)
508 return -ENODEV;
509
510 tot_written = snprintf(buf, buf_size, "%u:%u\n",
511 channel->offermsg.child_relid, channel->target_cpu);
512
513 spin_lock_irqsave(&channel->lock, flags);
514
515 list_for_each(cur, &channel->sc_list) {
516 if (tot_written >= buf_size - 1)
517 break;
518
519 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
520 n_written = scnprintf(buf + tot_written,
521 buf_size - tot_written,
522 "%u:%u\n",
523 cur_sc->offermsg.child_relid,
524 cur_sc->target_cpu);
525 tot_written += n_written;
526 }
527
528 spin_unlock_irqrestore(&channel->lock, flags);
529
530 return tot_written;
531}
532static DEVICE_ATTR_RO(channel_vp_mapping);
533
534static ssize_t vendor_show(struct device *dev,
535 struct device_attribute *dev_attr,
536 char *buf)
537{
538 struct hv_device *hv_dev = device_to_hv_device(dev);
539 return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
540}
541static DEVICE_ATTR_RO(vendor);
542
543static ssize_t device_show(struct device *dev,
544 struct device_attribute *dev_attr,
545 char *buf)
546{
547 struct hv_device *hv_dev = device_to_hv_device(dev);
548 return sprintf(buf, "0x%x\n", hv_dev->device_id);
549}
550static DEVICE_ATTR_RO(device);
551
552static ssize_t driver_override_store(struct device *dev,
553 struct device_attribute *attr,
554 const char *buf, size_t count)
555{
556 struct hv_device *hv_dev = device_to_hv_device(dev);
557 char *driver_override, *old, *cp;
558
559 /* We need to keep extra room for a newline */
560 if (count >= (PAGE_SIZE - 1))
561 return -EINVAL;
562
563 driver_override = kstrndup(buf, count, GFP_KERNEL);
564 if (!driver_override)
565 return -ENOMEM;
566
567 cp = strchr(driver_override, '\n');
568 if (cp)
569 *cp = '\0';
570
571 device_lock(dev);
572 old = hv_dev->driver_override;
573 if (strlen(driver_override)) {
574 hv_dev->driver_override = driver_override;
575 } else {
576 kfree(driver_override);
577 hv_dev->driver_override = NULL;
578 }
579 device_unlock(dev);
580
581 kfree(old);
582
583 return count;
584}
585
586static ssize_t driver_override_show(struct device *dev,
587 struct device_attribute *attr, char *buf)
588{
589 struct hv_device *hv_dev = device_to_hv_device(dev);
590 ssize_t len;
591
592 device_lock(dev);
593 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
594 device_unlock(dev);
595
596 return len;
597}
598static DEVICE_ATTR_RW(driver_override);
599
600/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
601static struct attribute *vmbus_dev_attrs[] = {
602 &dev_attr_id.attr,
603 &dev_attr_state.attr,
604 &dev_attr_monitor_id.attr,
605 &dev_attr_class_id.attr,
606 &dev_attr_device_id.attr,
607 &dev_attr_modalias.attr,
608#ifdef CONFIG_NUMA
609 &dev_attr_numa_node.attr,
610#endif
611 &dev_attr_server_monitor_pending.attr,
612 &dev_attr_client_monitor_pending.attr,
613 &dev_attr_server_monitor_latency.attr,
614 &dev_attr_client_monitor_latency.attr,
615 &dev_attr_server_monitor_conn_id.attr,
616 &dev_attr_client_monitor_conn_id.attr,
617 &dev_attr_out_intr_mask.attr,
618 &dev_attr_out_read_index.attr,
619 &dev_attr_out_write_index.attr,
620 &dev_attr_out_read_bytes_avail.attr,
621 &dev_attr_out_write_bytes_avail.attr,
622 &dev_attr_in_intr_mask.attr,
623 &dev_attr_in_read_index.attr,
624 &dev_attr_in_write_index.attr,
625 &dev_attr_in_read_bytes_avail.attr,
626 &dev_attr_in_write_bytes_avail.attr,
627 &dev_attr_channel_vp_mapping.attr,
628 &dev_attr_vendor.attr,
629 &dev_attr_device.attr,
630 &dev_attr_driver_override.attr,
631 NULL,
632};
633ATTRIBUTE_GROUPS(vmbus_dev);
634
635/*
636 * vmbus_uevent - add uevent for our device
637 *
638 * This routine is invoked when a device is added or removed on the vmbus to
639 * generate a uevent to udev in the userspace. The udev will then look at its
640 * rule and the uevent generated here to load the appropriate driver
641 *
642 * The alias string will be of the form vmbus:guid where guid is the string
643 * representation of the device guid (each byte of the guid will be
644 * represented with two hex characters.
645 */
646static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
647{
648 struct hv_device *dev = device_to_hv_device(device);
649 int ret;
650 char alias_name[VMBUS_ALIAS_LEN + 1];
651
652 print_alias_name(dev, alias_name);
653 ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name);
654 return ret;
655}
656
657static const uuid_le null_guid;
658
659static inline bool is_null_guid(const uuid_le *guid)
660{
661 if (uuid_le_cmp(*guid, null_guid))
662 return false;
663 return true;
664}
665
666static const struct hv_vmbus_device_id *
667hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid)
668
669{
670 if (id == NULL)
671 return NULL; /* empty device table */
672
673 for (; !is_null_guid(&id->guid); id++)
674 if (!uuid_le_cmp(id->guid, *guid))
675 return id;
676
677 return NULL;
678}
679
680static const struct hv_vmbus_device_id *
681hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
682{
683 const struct hv_vmbus_device_id *id = NULL;
684 struct vmbus_dynid *dynid;
685
686 spin_lock(&drv->dynids.lock);
687 list_for_each_entry(dynid, &drv->dynids.list, node) {
688 if (!uuid_le_cmp(dynid->id.guid, *guid)) {
689 id = &dynid->id;
690 break;
691 }
692 }
693 spin_unlock(&drv->dynids.lock);
694
695 return id;
696}
697
698static const struct hv_vmbus_device_id vmbus_device_null = {
699 .guid = NULL_UUID_LE,
700};
701
702/*
703 * Return a matching hv_vmbus_device_id pointer.
704 * If there is no match, return NULL.
705 */
706static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
707 struct hv_device *dev)
708{
709 const uuid_le *guid = &dev->dev_type;
710 const struct hv_vmbus_device_id *id;
711
712 /* When driver_override is set, only bind to the matching driver */
713 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
714 return NULL;
715
716 /* Look at the dynamic ids first, before the static ones */
717 id = hv_vmbus_dynid_match(drv, guid);
718 if (!id)
719 id = hv_vmbus_dev_match(drv->id_table, guid);
720
721 /* driver_override will always match, send a dummy id */
722 if (!id && dev->driver_override)
723 id = &vmbus_device_null;
724
725 return id;
726}
727
728/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
729static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
730{
731 struct vmbus_dynid *dynid;
732
733 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
734 if (!dynid)
735 return -ENOMEM;
736
737 dynid->id.guid = *guid;
738
739 spin_lock(&drv->dynids.lock);
740 list_add_tail(&dynid->node, &drv->dynids.list);
741 spin_unlock(&drv->dynids.lock);
742
743 return driver_attach(&drv->driver);
744}
745
746static void vmbus_free_dynids(struct hv_driver *drv)
747{
748 struct vmbus_dynid *dynid, *n;
749
750 spin_lock(&drv->dynids.lock);
751 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
752 list_del(&dynid->node);
753 kfree(dynid);
754 }
755 spin_unlock(&drv->dynids.lock);
756}
757
758/*
759 * store_new_id - sysfs frontend to vmbus_add_dynid()
760 *
761 * Allow GUIDs to be added to an existing driver via sysfs.
762 */
763static ssize_t new_id_store(struct device_driver *driver, const char *buf,
764 size_t count)
765{
766 struct hv_driver *drv = drv_to_hv_drv(driver);
767 uuid_le guid;
768 ssize_t retval;
769
770 retval = uuid_le_to_bin(buf, &guid);
771 if (retval)
772 return retval;
773
774 if (hv_vmbus_dynid_match(drv, &guid))
775 return -EEXIST;
776
777 retval = vmbus_add_dynid(drv, &guid);
778 if (retval)
779 return retval;
780 return count;
781}
782static DRIVER_ATTR_WO(new_id);
783
784/*
785 * store_remove_id - remove a PCI device ID from this driver
786 *
787 * Removes a dynamic pci device ID to this driver.
788 */
789static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
790 size_t count)
791{
792 struct hv_driver *drv = drv_to_hv_drv(driver);
793 struct vmbus_dynid *dynid, *n;
794 uuid_le guid;
795 ssize_t retval;
796
797 retval = uuid_le_to_bin(buf, &guid);
798 if (retval)
799 return retval;
800
801 retval = -ENODEV;
802 spin_lock(&drv->dynids.lock);
803 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
804 struct hv_vmbus_device_id *id = &dynid->id;
805
806 if (!uuid_le_cmp(id->guid, guid)) {
807 list_del(&dynid->node);
808 kfree(dynid);
809 retval = count;
810 break;
811 }
812 }
813 spin_unlock(&drv->dynids.lock);
814
815 return retval;
816}
817static DRIVER_ATTR_WO(remove_id);
818
819static struct attribute *vmbus_drv_attrs[] = {
820 &driver_attr_new_id.attr,
821 &driver_attr_remove_id.attr,
822 NULL,
823};
824ATTRIBUTE_GROUPS(vmbus_drv);
825
826
827/*
828 * vmbus_match - Attempt to match the specified device to the specified driver
829 */
830static int vmbus_match(struct device *device, struct device_driver *driver)
831{
832 struct hv_driver *drv = drv_to_hv_drv(driver);
833 struct hv_device *hv_dev = device_to_hv_device(device);
834
835 /* The hv_sock driver handles all hv_sock offers. */
836 if (is_hvsock_channel(hv_dev->channel))
837 return drv->hvsock;
838
839 if (hv_vmbus_get_id(drv, hv_dev))
840 return 1;
841
842 return 0;
843}
844
845/*
846 * vmbus_probe - Add the new vmbus's child device
847 */
848static int vmbus_probe(struct device *child_device)
849{
850 int ret = 0;
851 struct hv_driver *drv =
852 drv_to_hv_drv(child_device->driver);
853 struct hv_device *dev = device_to_hv_device(child_device);
854 const struct hv_vmbus_device_id *dev_id;
855
856 dev_id = hv_vmbus_get_id(drv, dev);
857 if (drv->probe) {
858 ret = drv->probe(dev, dev_id);
859 if (ret != 0)
860 pr_err("probe failed for device %s (%d)\n",
861 dev_name(child_device), ret);
862
863 } else {
864 pr_err("probe not set for driver %s\n",
865 dev_name(child_device));
866 ret = -ENODEV;
867 }
868 return ret;
869}
870
871/*
872 * vmbus_remove - Remove a vmbus device
873 */
874static int vmbus_remove(struct device *child_device)
875{
876 struct hv_driver *drv;
877 struct hv_device *dev = device_to_hv_device(child_device);
878
879 if (child_device->driver) {
880 drv = drv_to_hv_drv(child_device->driver);
881 if (drv->remove)
882 drv->remove(dev);
883 }
884
885 return 0;
886}
887
888
889/*
890 * vmbus_shutdown - Shutdown a vmbus device
891 */
892static void vmbus_shutdown(struct device *child_device)
893{
894 struct hv_driver *drv;
895 struct hv_device *dev = device_to_hv_device(child_device);
896
897
898 /* The device may not be attached yet */
899 if (!child_device->driver)
900 return;
901
902 drv = drv_to_hv_drv(child_device->driver);
903
904 if (drv->shutdown)
905 drv->shutdown(dev);
906}
907
908
909/*
910 * vmbus_device_release - Final callback release of the vmbus child device
911 */
912static void vmbus_device_release(struct device *device)
913{
914 struct hv_device *hv_dev = device_to_hv_device(device);
915 struct vmbus_channel *channel = hv_dev->channel;
916
917 mutex_lock(&vmbus_connection.channel_mutex);
918 hv_process_channel_removal(channel);
919 mutex_unlock(&vmbus_connection.channel_mutex);
920 kfree(hv_dev);
921}
922
923/* The one and only one */
924static struct bus_type hv_bus = {
925 .name = "vmbus",
926 .match = vmbus_match,
927 .shutdown = vmbus_shutdown,
928 .remove = vmbus_remove,
929 .probe = vmbus_probe,
930 .uevent = vmbus_uevent,
931 .dev_groups = vmbus_dev_groups,
932 .drv_groups = vmbus_drv_groups,
933};
934
935struct onmessage_work_context {
936 struct work_struct work;
937 struct hv_message msg;
938};
939
940static void vmbus_onmessage_work(struct work_struct *work)
941{
942 struct onmessage_work_context *ctx;
943
944 /* Do not process messages if we're in DISCONNECTED state */
945 if (vmbus_connection.conn_state == DISCONNECTED)
946 return;
947
948 ctx = container_of(work, struct onmessage_work_context,
949 work);
950 vmbus_onmessage(&ctx->msg);
951 kfree(ctx);
952}
953
954static void hv_process_timer_expiration(struct hv_message *msg,
955 struct hv_per_cpu_context *hv_cpu)
956{
957 struct clock_event_device *dev = hv_cpu->clk_evt;
958
959 if (dev->event_handler)
960 dev->event_handler(dev);
961
962 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
963}
964
965void vmbus_on_msg_dpc(unsigned long data)
966{
967 struct hv_per_cpu_context *hv_cpu = (void *)data;
968 void *page_addr = hv_cpu->synic_message_page;
969 struct hv_message *msg = (struct hv_message *)page_addr +
970 VMBUS_MESSAGE_SINT;
971 struct vmbus_channel_message_header *hdr;
972 const struct vmbus_channel_message_table_entry *entry;
973 struct onmessage_work_context *ctx;
974 u32 message_type = msg->header.message_type;
975
976 if (message_type == HVMSG_NONE)
977 /* no msg */
978 return;
979
980 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
981
982 trace_vmbus_on_msg_dpc(hdr);
983
984 if (hdr->msgtype >= CHANNELMSG_COUNT) {
985 WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
986 goto msg_handled;
987 }
988
989 entry = &channel_message_table[hdr->msgtype];
990 if (entry->handler_type == VMHT_BLOCKING) {
991 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
992 if (ctx == NULL)
993 return;
994
995 INIT_WORK(&ctx->work, vmbus_onmessage_work);
996 memcpy(&ctx->msg, msg, sizeof(*msg));
997
998 /*
999 * The host can generate a rescind message while we
1000 * may still be handling the original offer. We deal with
1001 * this condition by ensuring the processing is done on the
1002 * same CPU.
1003 */
1004 switch (hdr->msgtype) {
1005 case CHANNELMSG_RESCIND_CHANNELOFFER:
1006 /*
1007 * If we are handling the rescind message;
1008 * schedule the work on the global work queue.
1009 */
1010 schedule_work_on(vmbus_connection.connect_cpu,
1011 &ctx->work);
1012 break;
1013
1014 case CHANNELMSG_OFFERCHANNEL:
1015 atomic_inc(&vmbus_connection.offer_in_progress);
1016 queue_work_on(vmbus_connection.connect_cpu,
1017 vmbus_connection.work_queue,
1018 &ctx->work);
1019 break;
1020
1021 default:
1022 queue_work(vmbus_connection.work_queue, &ctx->work);
1023 }
1024 } else
1025 entry->message_handler(hdr);
1026
1027msg_handled:
1028 vmbus_signal_eom(msg, message_type);
1029}
1030
1031
1032/*
1033 * Direct callback for channels using other deferred processing
1034 */
1035static void vmbus_channel_isr(struct vmbus_channel *channel)
1036{
1037 void (*callback_fn)(void *);
1038
1039 callback_fn = READ_ONCE(channel->onchannel_callback);
1040 if (likely(callback_fn != NULL))
1041 (*callback_fn)(channel->channel_callback_context);
1042}
1043
1044/*
1045 * Schedule all channels with events pending
1046 */
1047static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1048{
1049 unsigned long *recv_int_page;
1050 u32 maxbits, relid;
1051
1052 if (vmbus_proto_version < VERSION_WIN8) {
1053 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
1054 recv_int_page = vmbus_connection.recv_int_page;
1055 } else {
1056 /*
1057 * When the host is win8 and beyond, the event page
1058 * can be directly checked to get the id of the channel
1059 * that has the interrupt pending.
1060 */
1061 void *page_addr = hv_cpu->synic_event_page;
1062 union hv_synic_event_flags *event
1063 = (union hv_synic_event_flags *)page_addr +
1064 VMBUS_MESSAGE_SINT;
1065
1066 maxbits = HV_EVENT_FLAGS_COUNT;
1067 recv_int_page = event->flags;
1068 }
1069
1070 if (unlikely(!recv_int_page))
1071 return;
1072
1073 for_each_set_bit(relid, recv_int_page, maxbits) {
1074 struct vmbus_channel *channel;
1075
1076 if (!sync_test_and_clear_bit(relid, recv_int_page))
1077 continue;
1078
1079 /* Special case - vmbus channel protocol msg */
1080 if (relid == 0)
1081 continue;
1082
1083 rcu_read_lock();
1084
1085 /* Find channel based on relid */
1086 list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
1087 if (channel->offermsg.child_relid != relid)
1088 continue;
1089
1090 if (channel->rescind)
1091 continue;
1092
1093 trace_vmbus_chan_sched(channel);
1094
1095 ++channel->interrupts;
1096
1097 switch (channel->callback_mode) {
1098 case HV_CALL_ISR:
1099 vmbus_channel_isr(channel);
1100 break;
1101
1102 case HV_CALL_BATCHED:
1103 hv_begin_read(&channel->inbound);
1104 /* fallthrough */
1105 case HV_CALL_DIRECT:
1106 tasklet_schedule(&channel->callback_event);
1107 }
1108 }
1109
1110 rcu_read_unlock();
1111 }
1112}
1113
1114static void vmbus_isr(void)
1115{
1116 struct hv_per_cpu_context *hv_cpu
1117 = this_cpu_ptr(hv_context.cpu_context);
1118 void *page_addr = hv_cpu->synic_event_page;
1119 struct hv_message *msg;
1120 union hv_synic_event_flags *event;
1121 bool handled = false;
1122
1123 if (unlikely(page_addr == NULL))
1124 return;
1125
1126 event = (union hv_synic_event_flags *)page_addr +
1127 VMBUS_MESSAGE_SINT;
1128 /*
1129 * Check for events before checking for messages. This is the order
1130 * in which events and messages are checked in Windows guests on
1131 * Hyper-V, and the Windows team suggested we do the same.
1132 */
1133
1134 if ((vmbus_proto_version == VERSION_WS2008) ||
1135 (vmbus_proto_version == VERSION_WIN7)) {
1136
1137 /* Since we are a child, we only need to check bit 0 */
1138 if (sync_test_and_clear_bit(0, event->flags))
1139 handled = true;
1140 } else {
1141 /*
1142 * Our host is win8 or above. The signaling mechanism
1143 * has changed and we can directly look at the event page.
1144 * If bit n is set then we have an interrup on the channel
1145 * whose id is n.
1146 */
1147 handled = true;
1148 }
1149
1150 if (handled)
1151 vmbus_chan_sched(hv_cpu);
1152
1153 page_addr = hv_cpu->synic_message_page;
1154 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1155
1156 /* Check if there are actual msgs to be processed */
1157 if (msg->header.message_type != HVMSG_NONE) {
1158 if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
1159 hv_process_timer_expiration(msg, hv_cpu);
1160 else
1161 tasklet_schedule(&hv_cpu->msg_dpc);
1162 }
1163
1164 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
1165}
1166
1167/*
1168 * Boolean to control whether to report panic messages over Hyper-V.
1169 *
1170 * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
1171 */
1172static int sysctl_record_panic_msg = 1;
1173
1174/*
1175 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1176 * buffer and call into Hyper-V to transfer the data.
1177 */
1178static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1179 enum kmsg_dump_reason reason)
1180{
1181 size_t bytes_written;
1182 phys_addr_t panic_pa;
1183
1184 /* We are only interested in panics. */
1185 if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1186 return;
1187
1188 panic_pa = virt_to_phys(hv_panic_page);
1189
1190 /*
1191 * Write dump contents to the page. No need to synchronize; panic should
1192 * be single-threaded.
1193 */
1194 kmsg_dump_get_buffer(dumper, true, hv_panic_page, PAGE_SIZE,
1195 &bytes_written);
1196 if (bytes_written)
1197 hyperv_report_panic_msg(panic_pa, bytes_written);
1198}
1199
1200static struct kmsg_dumper hv_kmsg_dumper = {
1201 .dump = hv_kmsg_dump,
1202};
1203
1204static struct ctl_table_header *hv_ctl_table_hdr;
1205static int zero;
1206static int one = 1;
1207
1208/*
1209 * sysctl option to allow the user to control whether kmsg data should be
1210 * reported to Hyper-V on panic.
1211 */
1212static struct ctl_table hv_ctl_table[] = {
1213 {
1214 .procname = "hyperv_record_panic_msg",
1215 .data = &sysctl_record_panic_msg,
1216 .maxlen = sizeof(int),
1217 .mode = 0644,
1218 .proc_handler = proc_dointvec_minmax,
1219 .extra1 = &zero,
1220 .extra2 = &one
1221 },
1222 {}
1223};
1224
1225static struct ctl_table hv_root_table[] = {
1226 {
1227 .procname = "kernel",
1228 .mode = 0555,
1229 .child = hv_ctl_table
1230 },
1231 {}
1232};
1233
1234/*
1235 * vmbus_bus_init -Main vmbus driver initialization routine.
1236 *
1237 * Here, we
1238 * - initialize the vmbus driver context
1239 * - invoke the vmbus hv main init routine
1240 * - retrieve the channel offers
1241 */
1242static int vmbus_bus_init(void)
1243{
1244 int ret;
1245
1246 /* Hypervisor initialization...setup hypercall page..etc */
1247 ret = hv_init();
1248 if (ret != 0) {
1249 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1250 return ret;
1251 }
1252
1253 ret = bus_register(&hv_bus);
1254 if (ret)
1255 return ret;
1256
1257 hv_setup_vmbus_irq(vmbus_isr);
1258
1259 ret = hv_synic_alloc();
1260 if (ret)
1261 goto err_alloc;
1262 /*
1263 * Initialize the per-cpu interrupt state and
1264 * connect to the host.
1265 */
1266 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1267 hv_synic_init, hv_synic_cleanup);
1268 if (ret < 0)
1269 goto err_alloc;
1270 hyperv_cpuhp_online = ret;
1271
1272 ret = vmbus_connect();
1273 if (ret)
1274 goto err_connect;
1275
1276 /*
1277 * Only register if the crash MSRs are available
1278 */
1279 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1280 u64 hyperv_crash_ctl;
1281 /*
1282 * Sysctl registration is not fatal, since by default
1283 * reporting is enabled.
1284 */
1285 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1286 if (!hv_ctl_table_hdr)
1287 pr_err("Hyper-V: sysctl table register error");
1288
1289 /*
1290 * Register for panic kmsg callback only if the right
1291 * capability is supported by the hypervisor.
1292 */
1293 hv_get_crash_ctl(hyperv_crash_ctl);
1294 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
1295 hv_panic_page = (void *)get_zeroed_page(GFP_KERNEL);
1296 if (hv_panic_page) {
1297 ret = kmsg_dump_register(&hv_kmsg_dumper);
1298 if (ret)
1299 pr_err("Hyper-V: kmsg dump register "
1300 "error 0x%x\n", ret);
1301 } else
1302 pr_err("Hyper-V: panic message page memory "
1303 "allocation failed");
1304 }
1305
1306 register_die_notifier(&hyperv_die_block);
1307 atomic_notifier_chain_register(&panic_notifier_list,
1308 &hyperv_panic_block);
1309 }
1310
1311 vmbus_request_offers();
1312
1313 return 0;
1314
1315err_connect:
1316 cpuhp_remove_state(hyperv_cpuhp_online);
1317err_alloc:
1318 hv_synic_free();
1319 hv_remove_vmbus_irq();
1320
1321 bus_unregister(&hv_bus);
1322 free_page((unsigned long)hv_panic_page);
1323 unregister_sysctl_table(hv_ctl_table_hdr);
1324 hv_ctl_table_hdr = NULL;
1325 return ret;
1326}
1327
1328/**
1329 * __vmbus_child_driver_register() - Register a vmbus's driver
1330 * @hv_driver: Pointer to driver structure you want to register
1331 * @owner: owner module of the drv
1332 * @mod_name: module name string
1333 *
1334 * Registers the given driver with Linux through the 'driver_register()' call
1335 * and sets up the hyper-v vmbus handling for this driver.
1336 * It will return the state of the 'driver_register()' call.
1337 *
1338 */
1339int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1340{
1341 int ret;
1342
1343 pr_info("registering driver %s\n", hv_driver->name);
1344
1345 ret = vmbus_exists();
1346 if (ret < 0)
1347 return ret;
1348
1349 hv_driver->driver.name = hv_driver->name;
1350 hv_driver->driver.owner = owner;
1351 hv_driver->driver.mod_name = mod_name;
1352 hv_driver->driver.bus = &hv_bus;
1353
1354 spin_lock_init(&hv_driver->dynids.lock);
1355 INIT_LIST_HEAD(&hv_driver->dynids.list);
1356
1357 ret = driver_register(&hv_driver->driver);
1358
1359 return ret;
1360}
1361EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1362
1363/**
1364 * vmbus_driver_unregister() - Unregister a vmbus's driver
1365 * @hv_driver: Pointer to driver structure you want to
1366 * un-register
1367 *
1368 * Un-register the given driver that was previous registered with a call to
1369 * vmbus_driver_register()
1370 */
1371void vmbus_driver_unregister(struct hv_driver *hv_driver)
1372{
1373 pr_info("unregistering driver %s\n", hv_driver->name);
1374
1375 if (!vmbus_exists()) {
1376 driver_unregister(&hv_driver->driver);
1377 vmbus_free_dynids(hv_driver);
1378 }
1379}
1380EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1381
1382
1383/*
1384 * Called when last reference to channel is gone.
1385 */
1386static void vmbus_chan_release(struct kobject *kobj)
1387{
1388 struct vmbus_channel *channel
1389 = container_of(kobj, struct vmbus_channel, kobj);
1390
1391 kfree_rcu(channel, rcu);
1392}
1393
1394struct vmbus_chan_attribute {
1395 struct attribute attr;
1396 ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
1397 ssize_t (*store)(struct vmbus_channel *chan,
1398 const char *buf, size_t count);
1399};
1400#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1401 struct vmbus_chan_attribute chan_attr_##_name \
1402 = __ATTR(_name, _mode, _show, _store)
1403#define VMBUS_CHAN_ATTR_RW(_name) \
1404 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1405#define VMBUS_CHAN_ATTR_RO(_name) \
1406 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1407#define VMBUS_CHAN_ATTR_WO(_name) \
1408 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1409
1410static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1411 struct attribute *attr, char *buf)
1412{
1413 const struct vmbus_chan_attribute *attribute
1414 = container_of(attr, struct vmbus_chan_attribute, attr);
1415 const struct vmbus_channel *chan
1416 = container_of(kobj, struct vmbus_channel, kobj);
1417
1418 if (!attribute->show)
1419 return -EIO;
1420
1421 if (chan->state != CHANNEL_OPENED_STATE)
1422 return -EINVAL;
1423
1424 return attribute->show(chan, buf);
1425}
1426
1427static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1428 .show = vmbus_chan_attr_show,
1429};
1430
1431static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
1432{
1433 const struct hv_ring_buffer_info *rbi = &channel->outbound;
1434
1435 return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1436}
1437static VMBUS_CHAN_ATTR_RO(out_mask);
1438
1439static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
1440{
1441 const struct hv_ring_buffer_info *rbi = &channel->inbound;
1442
1443 return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1444}
1445static VMBUS_CHAN_ATTR_RO(in_mask);
1446
1447static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
1448{
1449 const struct hv_ring_buffer_info *rbi = &channel->inbound;
1450
1451 return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1452}
1453static VMBUS_CHAN_ATTR_RO(read_avail);
1454
1455static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
1456{
1457 const struct hv_ring_buffer_info *rbi = &channel->outbound;
1458
1459 return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1460}
1461static VMBUS_CHAN_ATTR_RO(write_avail);
1462
1463static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
1464{
1465 return sprintf(buf, "%u\n", channel->target_cpu);
1466}
1467static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
1468
1469static ssize_t channel_pending_show(const struct vmbus_channel *channel,
1470 char *buf)
1471{
1472 return sprintf(buf, "%d\n",
1473 channel_pending(channel,
1474 vmbus_connection.monitor_pages[1]));
1475}
1476static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
1477
1478static ssize_t channel_latency_show(const struct vmbus_channel *channel,
1479 char *buf)
1480{
1481 return sprintf(buf, "%d\n",
1482 channel_latency(channel,
1483 vmbus_connection.monitor_pages[1]));
1484}
1485static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
1486
1487static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
1488{
1489 return sprintf(buf, "%llu\n", channel->interrupts);
1490}
1491static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
1492
1493static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
1494{
1495 return sprintf(buf, "%llu\n", channel->sig_events);
1496}
1497static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
1498
1499static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
1500 char *buf)
1501{
1502 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1503}
1504static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
1505
1506static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
1507 char *buf)
1508{
1509 return sprintf(buf, "%u\n",
1510 channel->offermsg.offer.sub_channel_index);
1511}
1512static VMBUS_CHAN_ATTR_RO(subchannel_id);
1513
1514static struct attribute *vmbus_chan_attrs[] = {
1515 &chan_attr_out_mask.attr,
1516 &chan_attr_in_mask.attr,
1517 &chan_attr_read_avail.attr,
1518 &chan_attr_write_avail.attr,
1519 &chan_attr_cpu.attr,
1520 &chan_attr_pending.attr,
1521 &chan_attr_latency.attr,
1522 &chan_attr_interrupts.attr,
1523 &chan_attr_events.attr,
1524 &chan_attr_monitor_id.attr,
1525 &chan_attr_subchannel_id.attr,
1526 NULL
1527};
1528
1529static struct kobj_type vmbus_chan_ktype = {
1530 .sysfs_ops = &vmbus_chan_sysfs_ops,
1531 .release = vmbus_chan_release,
1532 .default_attrs = vmbus_chan_attrs,
1533};
1534
1535/*
1536 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1537 */
1538int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1539{
1540 struct kobject *kobj = &channel->kobj;
1541 u32 relid = channel->offermsg.child_relid;
1542 int ret;
1543
1544 kobj->kset = dev->channels_kset;
1545 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
1546 "%u", relid);
1547 if (ret)
1548 return ret;
1549
1550 kobject_uevent(kobj, KOBJ_ADD);
1551
1552 return 0;
1553}
1554
1555/*
1556 * vmbus_device_create - Creates and registers a new child device
1557 * on the vmbus.
1558 */
1559struct hv_device *vmbus_device_create(const uuid_le *type,
1560 const uuid_le *instance,
1561 struct vmbus_channel *channel)
1562{
1563 struct hv_device *child_device_obj;
1564
1565 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
1566 if (!child_device_obj) {
1567 pr_err("Unable to allocate device object for child device\n");
1568 return NULL;
1569 }
1570
1571 child_device_obj->channel = channel;
1572 memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
1573 memcpy(&child_device_obj->dev_instance, instance,
1574 sizeof(uuid_le));
1575 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
1576
1577
1578 return child_device_obj;
1579}
1580
1581/*
1582 * vmbus_device_register - Register the child device
1583 */
1584int vmbus_device_register(struct hv_device *child_device_obj)
1585{
1586 struct kobject *kobj = &child_device_obj->device.kobj;
1587 int ret;
1588
1589 dev_set_name(&child_device_obj->device, "%pUl",
1590 child_device_obj->channel->offermsg.offer.if_instance.b);
1591
1592 child_device_obj->device.bus = &hv_bus;
1593 child_device_obj->device.parent = &hv_acpi_dev->dev;
1594 child_device_obj->device.release = vmbus_device_release;
1595
1596 /*
1597 * Register with the LDM. This will kick off the driver/device
1598 * binding...which will eventually call vmbus_match() and vmbus_probe()
1599 */
1600 ret = device_register(&child_device_obj->device);
1601 if (ret) {
1602 pr_err("Unable to register child device\n");
1603 return ret;
1604 }
1605
1606 child_device_obj->channels_kset = kset_create_and_add("channels",
1607 NULL, kobj);
1608 if (!child_device_obj->channels_kset) {
1609 ret = -ENOMEM;
1610 goto err_dev_unregister;
1611 }
1612
1613 ret = vmbus_add_channel_kobj(child_device_obj,
1614 child_device_obj->channel);
1615 if (ret) {
1616 pr_err("Unable to register primary channeln");
1617 goto err_kset_unregister;
1618 }
1619
1620 return 0;
1621
1622err_kset_unregister:
1623 kset_unregister(child_device_obj->channels_kset);
1624
1625err_dev_unregister:
1626 device_unregister(&child_device_obj->device);
1627 return ret;
1628}
1629
1630/*
1631 * vmbus_device_unregister - Remove the specified child device
1632 * from the vmbus.
1633 */
1634void vmbus_device_unregister(struct hv_device *device_obj)
1635{
1636 pr_debug("child device %s unregistered\n",
1637 dev_name(&device_obj->device));
1638
1639 kset_unregister(device_obj->channels_kset);
1640
1641 /*
1642 * Kick off the process of unregistering the device.
1643 * This will call vmbus_remove() and eventually vmbus_device_release()
1644 */
1645 device_unregister(&device_obj->device);
1646}
1647
1648
1649/*
1650 * VMBUS is an acpi enumerated device. Get the information we
1651 * need from DSDT.
1652 */
1653#define VTPM_BASE_ADDRESS 0xfed40000
1654static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1655{
1656 resource_size_t start = 0;
1657 resource_size_t end = 0;
1658 struct resource *new_res;
1659 struct resource **old_res = &hyperv_mmio;
1660 struct resource **prev_res = NULL;
1661
1662 switch (res->type) {
1663
1664 /*
1665 * "Address" descriptors are for bus windows. Ignore
1666 * "memory" descriptors, which are for registers on
1667 * devices.
1668 */
1669 case ACPI_RESOURCE_TYPE_ADDRESS32:
1670 start = res->data.address32.address.minimum;
1671 end = res->data.address32.address.maximum;
1672 break;
1673
1674 case ACPI_RESOURCE_TYPE_ADDRESS64:
1675 start = res->data.address64.address.minimum;
1676 end = res->data.address64.address.maximum;
1677 break;
1678
1679 default:
1680 /* Unused resource type */
1681 return AE_OK;
1682
1683 }
1684 /*
1685 * Ignore ranges that are below 1MB, as they're not
1686 * necessary or useful here.
1687 */
1688 if (end < 0x100000)
1689 return AE_OK;
1690
1691 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
1692 if (!new_res)
1693 return AE_NO_MEMORY;
1694
1695 /* If this range overlaps the virtual TPM, truncate it. */
1696 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
1697 end = VTPM_BASE_ADDRESS;
1698
1699 new_res->name = "hyperv mmio";
1700 new_res->flags = IORESOURCE_MEM;
1701 new_res->start = start;
1702 new_res->end = end;
1703
1704 /*
1705 * If two ranges are adjacent, merge them.
1706 */
1707 do {
1708 if (!*old_res) {
1709 *old_res = new_res;
1710 break;
1711 }
1712
1713 if (((*old_res)->end + 1) == new_res->start) {
1714 (*old_res)->end = new_res->end;
1715 kfree(new_res);
1716 break;
1717 }
1718
1719 if ((*old_res)->start == new_res->end + 1) {
1720 (*old_res)->start = new_res->start;
1721 kfree(new_res);
1722 break;
1723 }
1724
1725 if ((*old_res)->start > new_res->end) {
1726 new_res->sibling = *old_res;
1727 if (prev_res)
1728 (*prev_res)->sibling = new_res;
1729 *old_res = new_res;
1730 break;
1731 }
1732
1733 prev_res = old_res;
1734 old_res = &(*old_res)->sibling;
1735
1736 } while (1);
1737
1738 return AE_OK;
1739}
1740
1741static int vmbus_acpi_remove(struct acpi_device *device)
1742{
1743 struct resource *cur_res;
1744 struct resource *next_res;
1745
1746 if (hyperv_mmio) {
1747 if (fb_mmio) {
1748 __release_region(hyperv_mmio, fb_mmio->start,
1749 resource_size(fb_mmio));
1750 fb_mmio = NULL;
1751 }
1752
1753 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
1754 next_res = cur_res->sibling;
1755 kfree(cur_res);
1756 }
1757 }
1758
1759 return 0;
1760}
1761
1762static void vmbus_reserve_fb(void)
1763{
1764 int size;
1765 /*
1766 * Make a claim for the frame buffer in the resource tree under the
1767 * first node, which will be the one below 4GB. The length seems to
1768 * be underreported, particularly in a Generation 1 VM. So start out
1769 * reserving a larger area and make it smaller until it succeeds.
1770 */
1771
1772 if (screen_info.lfb_base) {
1773 if (efi_enabled(EFI_BOOT))
1774 size = max_t(__u32, screen_info.lfb_size, 0x800000);
1775 else
1776 size = max_t(__u32, screen_info.lfb_size, 0x4000000);
1777
1778 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
1779 fb_mmio = __request_region(hyperv_mmio,
1780 screen_info.lfb_base, size,
1781 fb_mmio_name, 0);
1782 }
1783 }
1784}
1785
1786/**
1787 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
1788 * @new: If successful, supplied a pointer to the
1789 * allocated MMIO space.
1790 * @device_obj: Identifies the caller
1791 * @min: Minimum guest physical address of the
1792 * allocation
1793 * @max: Maximum guest physical address
1794 * @size: Size of the range to be allocated
1795 * @align: Alignment of the range to be allocated
1796 * @fb_overlap_ok: Whether this allocation can be allowed
1797 * to overlap the video frame buffer.
1798 *
1799 * This function walks the resources granted to VMBus by the
1800 * _CRS object in the ACPI namespace underneath the parent
1801 * "bridge" whether that's a root PCI bus in the Generation 1
1802 * case or a Module Device in the Generation 2 case. It then
1803 * attempts to allocate from the global MMIO pool in a way that
1804 * matches the constraints supplied in these parameters and by
1805 * that _CRS.
1806 *
1807 * Return: 0 on success, -errno on failure
1808 */
1809int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1810 resource_size_t min, resource_size_t max,
1811 resource_size_t size, resource_size_t align,
1812 bool fb_overlap_ok)
1813{
1814 struct resource *iter, *shadow;
1815 resource_size_t range_min, range_max, start;
1816 const char *dev_n = dev_name(&device_obj->device);
1817 int retval;
1818
1819 retval = -ENXIO;
1820 down(&hyperv_mmio_lock);
1821
1822 /*
1823 * If overlaps with frame buffers are allowed, then first attempt to
1824 * make the allocation from within the reserved region. Because it
1825 * is already reserved, no shadow allocation is necessary.
1826 */
1827 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
1828 !(max < fb_mmio->start)) {
1829
1830 range_min = fb_mmio->start;
1831 range_max = fb_mmio->end;
1832 start = (range_min + align - 1) & ~(align - 1);
1833 for (; start + size - 1 <= range_max; start += align) {
1834 *new = request_mem_region_exclusive(start, size, dev_n);
1835 if (*new) {
1836 retval = 0;
1837 goto exit;
1838 }
1839 }
1840 }
1841
1842 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1843 if ((iter->start >= max) || (iter->end <= min))
1844 continue;
1845
1846 range_min = iter->start;
1847 range_max = iter->end;
1848 start = (range_min + align - 1) & ~(align - 1);
1849 for (; start + size - 1 <= range_max; start += align) {
1850 shadow = __request_region(iter, start, size, NULL,
1851 IORESOURCE_BUSY);
1852 if (!shadow)
1853 continue;
1854
1855 *new = request_mem_region_exclusive(start, size, dev_n);
1856 if (*new) {
1857 shadow->name = (char *)*new;
1858 retval = 0;
1859 goto exit;
1860 }
1861
1862 __release_region(iter, start, size);
1863 }
1864 }
1865
1866exit:
1867 up(&hyperv_mmio_lock);
1868 return retval;
1869}
1870EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
1871
1872/**
1873 * vmbus_free_mmio() - Free a memory-mapped I/O range.
1874 * @start: Base address of region to release.
1875 * @size: Size of the range to be allocated
1876 *
1877 * This function releases anything requested by
1878 * vmbus_mmio_allocate().
1879 */
1880void vmbus_free_mmio(resource_size_t start, resource_size_t size)
1881{
1882 struct resource *iter;
1883
1884 down(&hyperv_mmio_lock);
1885 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
1886 if ((iter->start >= start + size) || (iter->end <= start))
1887 continue;
1888
1889 __release_region(iter, start, size);
1890 }
1891 release_mem_region(start, size);
1892 up(&hyperv_mmio_lock);
1893
1894}
1895EXPORT_SYMBOL_GPL(vmbus_free_mmio);
1896
1897static int vmbus_acpi_add(struct acpi_device *device)
1898{
1899 acpi_status result;
1900 int ret_val = -ENODEV;
1901 struct acpi_device *ancestor;
1902
1903 hv_acpi_dev = device;
1904
1905 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1906 vmbus_walk_resources, NULL);
1907
1908 if (ACPI_FAILURE(result))
1909 goto acpi_walk_err;
1910 /*
1911 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
1912 * firmware) is the VMOD that has the mmio ranges. Get that.
1913 */
1914 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
1915 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
1916 vmbus_walk_resources, NULL);
1917
1918 if (ACPI_FAILURE(result))
1919 continue;
1920 if (hyperv_mmio) {
1921 vmbus_reserve_fb();
1922 break;
1923 }
1924 }
1925 ret_val = 0;
1926
1927acpi_walk_err:
1928 complete(&probe_event);
1929 if (ret_val)
1930 vmbus_acpi_remove(device);
1931 return ret_val;
1932}
1933
1934static const struct acpi_device_id vmbus_acpi_device_ids[] = {
1935 {"VMBUS", 0},
1936 {"VMBus", 0},
1937 {"", 0},
1938};
1939MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
1940
1941static struct acpi_driver vmbus_acpi_driver = {
1942 .name = "vmbus",
1943 .ids = vmbus_acpi_device_ids,
1944 .ops = {
1945 .add = vmbus_acpi_add,
1946 .remove = vmbus_acpi_remove,
1947 },
1948};
1949
1950static void hv_kexec_handler(void)
1951{
1952 hv_synic_clockevents_cleanup();
1953 vmbus_initiate_unload(false);
1954 vmbus_connection.conn_state = DISCONNECTED;
1955 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
1956 mb();
1957 cpuhp_remove_state(hyperv_cpuhp_online);
1958 hyperv_cleanup();
1959};
1960
1961static void hv_crash_handler(struct pt_regs *regs)
1962{
1963 vmbus_initiate_unload(true);
1964 /*
1965 * In crash handler we can't schedule synic cleanup for all CPUs,
1966 * doing the cleanup for current CPU only. This should be sufficient
1967 * for kdump.
1968 */
1969 vmbus_connection.conn_state = DISCONNECTED;
1970 hv_synic_cleanup(smp_processor_id());
1971 hyperv_cleanup();
1972};
1973
1974static int __init hv_acpi_init(void)
1975{
1976 int ret, t;
1977
1978 if (!hv_is_hyperv_initialized())
1979 return -ENODEV;
1980
1981 init_completion(&probe_event);
1982
1983 /*
1984 * Get ACPI resources first.
1985 */
1986 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
1987
1988 if (ret)
1989 return ret;
1990
1991 t = wait_for_completion_timeout(&probe_event, 5*HZ);
1992 if (t == 0) {
1993 ret = -ETIMEDOUT;
1994 goto cleanup;
1995 }
1996
1997 ret = vmbus_bus_init();
1998 if (ret)
1999 goto cleanup;
2000
2001 hv_setup_kexec_handler(hv_kexec_handler);
2002 hv_setup_crash_handler(hv_crash_handler);
2003
2004 return 0;
2005
2006cleanup:
2007 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2008 hv_acpi_dev = NULL;
2009 return ret;
2010}
2011
2012static void __exit vmbus_exit(void)
2013{
2014 int cpu;
2015
2016 hv_remove_kexec_handler();
2017 hv_remove_crash_handler();
2018 vmbus_connection.conn_state = DISCONNECTED;
2019 hv_synic_clockevents_cleanup();
2020 vmbus_disconnect();
2021 hv_remove_vmbus_irq();
2022 for_each_online_cpu(cpu) {
2023 struct hv_per_cpu_context *hv_cpu
2024 = per_cpu_ptr(hv_context.cpu_context, cpu);
2025
2026 tasklet_kill(&hv_cpu->msg_dpc);
2027 }
2028 vmbus_free_channels();
2029
2030 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2031 kmsg_dump_unregister(&hv_kmsg_dumper);
2032 unregister_die_notifier(&hyperv_die_block);
2033 atomic_notifier_chain_unregister(&panic_notifier_list,
2034 &hyperv_panic_block);
2035 }
2036
2037 free_page((unsigned long)hv_panic_page);
2038 unregister_sysctl_table(hv_ctl_table_hdr);
2039 hv_ctl_table_hdr = NULL;
2040 bus_unregister(&hv_bus);
2041
2042 cpuhp_remove_state(hyperv_cpuhp_online);
2043 hv_synic_free();
2044 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2045}
2046
2047
2048MODULE_LICENSE("GPL");
2049
2050subsys_initcall(hv_acpi_init);
2051module_exit(vmbus_exit);