Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/interrupt.h>
25#include <linux/sched.h>
26#include <linux/wait.h>
27#include <linux/mm.h>
28#include <linux/slab.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/completion.h>
32#include <linux/delay.h>
33#include <linux/hyperv.h>
34#include <asm/mshyperv.h>
35
36#include "hyperv_vmbus.h"
37
38static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
39
40static const struct vmbus_device vmbus_devs[] = {
41 /* IDE */
42 { .dev_type = HV_IDE,
43 HV_IDE_GUID,
44 .perf_device = true,
45 },
46
47 /* SCSI */
48 { .dev_type = HV_SCSI,
49 HV_SCSI_GUID,
50 .perf_device = true,
51 },
52
53 /* Fibre Channel */
54 { .dev_type = HV_FC,
55 HV_SYNTHFC_GUID,
56 .perf_device = true,
57 },
58
59 /* Synthetic NIC */
60 { .dev_type = HV_NIC,
61 HV_NIC_GUID,
62 .perf_device = true,
63 },
64
65 /* Network Direct */
66 { .dev_type = HV_ND,
67 HV_ND_GUID,
68 .perf_device = true,
69 },
70
71 /* PCIE */
72 { .dev_type = HV_PCIE,
73 HV_PCIE_GUID,
74 .perf_device = true,
75 },
76
77 /* Synthetic Frame Buffer */
78 { .dev_type = HV_FB,
79 HV_SYNTHVID_GUID,
80 .perf_device = false,
81 },
82
83 /* Synthetic Keyboard */
84 { .dev_type = HV_KBD,
85 HV_KBD_GUID,
86 .perf_device = false,
87 },
88
89 /* Synthetic MOUSE */
90 { .dev_type = HV_MOUSE,
91 HV_MOUSE_GUID,
92 .perf_device = false,
93 },
94
95 /* KVP */
96 { .dev_type = HV_KVP,
97 HV_KVP_GUID,
98 .perf_device = false,
99 },
100
101 /* Time Synch */
102 { .dev_type = HV_TS,
103 HV_TS_GUID,
104 .perf_device = false,
105 },
106
107 /* Heartbeat */
108 { .dev_type = HV_HB,
109 HV_HEART_BEAT_GUID,
110 .perf_device = false,
111 },
112
113 /* Shutdown */
114 { .dev_type = HV_SHUTDOWN,
115 HV_SHUTDOWN_GUID,
116 .perf_device = false,
117 },
118
119 /* File copy */
120 { .dev_type = HV_FCOPY,
121 HV_FCOPY_GUID,
122 .perf_device = false,
123 },
124
125 /* Backup */
126 { .dev_type = HV_BACKUP,
127 HV_VSS_GUID,
128 .perf_device = false,
129 },
130
131 /* Dynamic Memory */
132 { .dev_type = HV_DM,
133 HV_DM_GUID,
134 .perf_device = false,
135 },
136
137 /* Unknown GUID */
138 { .dev_type = HV_UNKNOWN,
139 .perf_device = false,
140 },
141};
142
143static const struct {
144 uuid_le guid;
145} vmbus_unsupported_devs[] = {
146 { HV_AVMA1_GUID },
147 { HV_AVMA2_GUID },
148 { HV_RDV_GUID },
149};
150
151/*
152 * The rescinded channel may be blocked waiting for a response from the host;
153 * take care of that.
154 */
155static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
156{
157 struct vmbus_channel_msginfo *msginfo;
158 unsigned long flags;
159
160
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
162
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
164 msglistentry) {
165
166 if (msginfo->waiting_channel == channel) {
167 complete(&msginfo->waitevent);
168 break;
169 }
170 }
171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
172}
173
174static bool is_unsupported_vmbus_devs(const uuid_le *guid)
175{
176 int i;
177
178 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
179 if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
180 return true;
181 return false;
182}
183
184static u16 hv_get_dev_type(const struct vmbus_channel *channel)
185{
186 const uuid_le *guid = &channel->offermsg.offer.if_type;
187 u16 i;
188
189 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
190 return HV_UNKNOWN;
191
192 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
193 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
194 return i;
195 }
196 pr_info("Unknown GUID: %pUl\n", guid);
197 return i;
198}
199
200/**
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data
205 *
206 * @icmsghdrp is of type &struct icmsg_hdr.
207 * Set up and fill in default negotiate response message.
208 *
209 * The fw_version and fw_vercnt specifies the framework version that
210 * we can support.
211 *
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
214 *
215 * Versions are given in decreasing order.
216 *
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
218 *
219 * Mainly used by Hyper-V drivers.
220 */
221bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
222 u8 *buf, const int *fw_version, int fw_vercnt,
223 const int *srv_version, int srv_vercnt,
224 int *nego_fw_version, int *nego_srv_version)
225{
226 int icframe_major, icframe_minor;
227 int icmsg_major, icmsg_minor;
228 int fw_major, fw_minor;
229 int srv_major, srv_minor;
230 int i, j;
231 bool found_match = false;
232 struct icmsg_negotiate *negop;
233
234 icmsghdrp->icmsgsize = 0x10;
235 negop = (struct icmsg_negotiate *)&buf[
236 sizeof(struct vmbuspipe_hdr) +
237 sizeof(struct icmsg_hdr)];
238
239 icframe_major = negop->icframe_vercnt;
240 icframe_minor = 0;
241
242 icmsg_major = negop->icmsg_vercnt;
243 icmsg_minor = 0;
244
245 /*
246 * Select the framework version number we will
247 * support.
248 */
249
250 for (i = 0; i < fw_vercnt; i++) {
251 fw_major = (fw_version[i] >> 16);
252 fw_minor = (fw_version[i] & 0xFFFF);
253
254 for (j = 0; j < negop->icframe_vercnt; j++) {
255 if ((negop->icversion_data[j].major == fw_major) &&
256 (negop->icversion_data[j].minor == fw_minor)) {
257 icframe_major = negop->icversion_data[j].major;
258 icframe_minor = negop->icversion_data[j].minor;
259 found_match = true;
260 break;
261 }
262 }
263
264 if (found_match)
265 break;
266 }
267
268 if (!found_match)
269 goto fw_error;
270
271 found_match = false;
272
273 for (i = 0; i < srv_vercnt; i++) {
274 srv_major = (srv_version[i] >> 16);
275 srv_minor = (srv_version[i] & 0xFFFF);
276
277 for (j = negop->icframe_vercnt;
278 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
279 j++) {
280
281 if ((negop->icversion_data[j].major == srv_major) &&
282 (negop->icversion_data[j].minor == srv_minor)) {
283
284 icmsg_major = negop->icversion_data[j].major;
285 icmsg_minor = negop->icversion_data[j].minor;
286 found_match = true;
287 break;
288 }
289 }
290
291 if (found_match)
292 break;
293 }
294
295 /*
296 * Respond with the framework and service
297 * version numbers we can support.
298 */
299
300fw_error:
301 if (!found_match) {
302 negop->icframe_vercnt = 0;
303 negop->icmsg_vercnt = 0;
304 } else {
305 negop->icframe_vercnt = 1;
306 negop->icmsg_vercnt = 1;
307 }
308
309 if (nego_fw_version)
310 *nego_fw_version = (icframe_major << 16) | icframe_minor;
311
312 if (nego_srv_version)
313 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
314
315 negop->icversion_data[0].major = icframe_major;
316 negop->icversion_data[0].minor = icframe_minor;
317 negop->icversion_data[1].major = icmsg_major;
318 negop->icversion_data[1].minor = icmsg_minor;
319 return found_match;
320}
321
322EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
323
324/*
325 * alloc_channel - Allocate and initialize a vmbus channel object
326 */
327static struct vmbus_channel *alloc_channel(void)
328{
329 struct vmbus_channel *channel;
330
331 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
332 if (!channel)
333 return NULL;
334
335 spin_lock_init(&channel->lock);
336
337 INIT_LIST_HEAD(&channel->sc_list);
338 INIT_LIST_HEAD(&channel->percpu_list);
339
340 tasklet_init(&channel->callback_event,
341 vmbus_on_event, (unsigned long)channel);
342
343 return channel;
344}
345
346/*
347 * free_channel - Release the resources used by the vmbus channel object
348 */
349static void free_channel(struct vmbus_channel *channel)
350{
351 tasklet_kill(&channel->callback_event);
352
353 kfree_rcu(channel, rcu);
354}
355
356static void percpu_channel_enq(void *arg)
357{
358 struct vmbus_channel *channel = arg;
359 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context);
361
362 list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
363}
364
365static void percpu_channel_deq(void *arg)
366{
367 struct vmbus_channel *channel = arg;
368
369 list_del_rcu(&channel->percpu_list);
370}
371
372
373static void vmbus_release_relid(u32 relid)
374{
375 struct vmbus_channel_relid_released msg;
376
377 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
378 msg.child_relid = relid;
379 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
380 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
381 true);
382}
383
384void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
385{
386 unsigned long flags;
387 struct vmbus_channel *primary_channel;
388
389 BUG_ON(!channel->rescind);
390 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
391
392 if (channel->target_cpu != get_cpu()) {
393 put_cpu();
394 smp_call_function_single(channel->target_cpu,
395 percpu_channel_deq, channel, true);
396 } else {
397 percpu_channel_deq(channel);
398 put_cpu();
399 }
400
401 if (channel->primary_channel == NULL) {
402 list_del(&channel->listentry);
403
404 primary_channel = channel;
405 } else {
406 primary_channel = channel->primary_channel;
407 spin_lock_irqsave(&primary_channel->lock, flags);
408 list_del(&channel->sc_list);
409 primary_channel->num_sc--;
410 spin_unlock_irqrestore(&primary_channel->lock, flags);
411 }
412
413 /*
414 * We need to free the bit for init_vp_index() to work in the case
415 * of sub-channel, when we reload drivers like hv_netvsc.
416 */
417 if (channel->affinity_policy == HV_LOCALIZED)
418 cpumask_clear_cpu(channel->target_cpu,
419 &primary_channel->alloced_cpus_in_node);
420
421 vmbus_release_relid(relid);
422
423 free_channel(channel);
424}
425
426void vmbus_free_channels(void)
427{
428 struct vmbus_channel *channel, *tmp;
429
430 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
431 listentry) {
432 /* hv_process_channel_removal() needs this */
433 channel->rescind = true;
434
435 vmbus_device_unregister(channel->device_obj);
436 }
437}
438
439/*
440 * vmbus_process_offer - Process the offer by creating a channel/device
441 * associated with this offer
442 */
443static void vmbus_process_offer(struct vmbus_channel *newchannel)
444{
445 struct vmbus_channel *channel;
446 bool fnew = true;
447 unsigned long flags;
448 u16 dev_type;
449 int ret;
450
451 /* Make sure this is a new offer */
452 mutex_lock(&vmbus_connection.channel_mutex);
453
454 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
455 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
456 newchannel->offermsg.offer.if_type) &&
457 !uuid_le_cmp(channel->offermsg.offer.if_instance,
458 newchannel->offermsg.offer.if_instance)) {
459 fnew = false;
460 break;
461 }
462 }
463
464 if (fnew)
465 list_add_tail(&newchannel->listentry,
466 &vmbus_connection.chn_list);
467
468 mutex_unlock(&vmbus_connection.channel_mutex);
469
470 if (!fnew) {
471 /*
472 * Check to see if this is a sub-channel.
473 */
474 if (newchannel->offermsg.offer.sub_channel_index != 0) {
475 /*
476 * Process the sub-channel.
477 */
478 newchannel->primary_channel = channel;
479 spin_lock_irqsave(&channel->lock, flags);
480 list_add_tail(&newchannel->sc_list, &channel->sc_list);
481 channel->num_sc++;
482 spin_unlock_irqrestore(&channel->lock, flags);
483 } else {
484 atomic_dec(&vmbus_connection.offer_in_progress);
485 goto err_free_chan;
486 }
487 }
488
489 dev_type = hv_get_dev_type(newchannel);
490
491 init_vp_index(newchannel, dev_type);
492
493 if (newchannel->target_cpu != get_cpu()) {
494 put_cpu();
495 smp_call_function_single(newchannel->target_cpu,
496 percpu_channel_enq,
497 newchannel, true);
498 } else {
499 percpu_channel_enq(newchannel);
500 put_cpu();
501 }
502
503 /*
504 * This state is used to indicate a successful open
505 * so that when we do close the channel normally, we
506 * can cleanup properly
507 */
508 newchannel->state = CHANNEL_OPEN_STATE;
509
510 if (!fnew) {
511 if (channel->sc_creation_callback != NULL)
512 channel->sc_creation_callback(newchannel);
513 atomic_dec(&vmbus_connection.offer_in_progress);
514 return;
515 }
516
517 /*
518 * Start the process of binding this offer to the driver
519 * We need to set the DeviceObject field before calling
520 * vmbus_child_dev_add()
521 */
522 newchannel->device_obj = vmbus_device_create(
523 &newchannel->offermsg.offer.if_type,
524 &newchannel->offermsg.offer.if_instance,
525 newchannel);
526 if (!newchannel->device_obj)
527 goto err_deq_chan;
528
529 newchannel->device_obj->device_id = dev_type;
530 /*
531 * Add the new device to the bus. This will kick off device-driver
532 * binding which eventually invokes the device driver's AddDevice()
533 * method.
534 */
535 ret = vmbus_device_register(newchannel->device_obj);
536
537 if (ret != 0) {
538 pr_err("unable to add child device object (relid %d)\n",
539 newchannel->offermsg.child_relid);
540 kfree(newchannel->device_obj);
541 goto err_deq_chan;
542 }
543
544 atomic_dec(&vmbus_connection.offer_in_progress);
545 return;
546
547err_deq_chan:
548 mutex_lock(&vmbus_connection.channel_mutex);
549 list_del(&newchannel->listentry);
550 mutex_unlock(&vmbus_connection.channel_mutex);
551
552 if (newchannel->target_cpu != get_cpu()) {
553 put_cpu();
554 smp_call_function_single(newchannel->target_cpu,
555 percpu_channel_deq, newchannel, true);
556 } else {
557 percpu_channel_deq(newchannel);
558 put_cpu();
559 }
560
561 vmbus_release_relid(newchannel->offermsg.child_relid);
562
563err_free_chan:
564 free_channel(newchannel);
565}
566
567/*
568 * We use this state to statically distribute the channel interrupt load.
569 */
570static int next_numa_node_id;
571
572/*
573 * Starting with Win8, we can statically distribute the incoming
574 * channel interrupt load by binding a channel to VCPU.
575 * We do this in a hierarchical fashion:
576 * First distribute the primary channels across available NUMA nodes
577 * and then distribute the subchannels amongst the CPUs in the NUMA
578 * node assigned to the primary channel.
579 *
580 * For pre-win8 hosts or non-performance critical channels we assign the
581 * first CPU in the first NUMA node.
582 */
583static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
584{
585 u32 cur_cpu;
586 bool perf_chn = vmbus_devs[dev_type].perf_device;
587 struct vmbus_channel *primary = channel->primary_channel;
588 int next_node;
589 struct cpumask available_mask;
590 struct cpumask *alloced_mask;
591
592 if ((vmbus_proto_version == VERSION_WS2008) ||
593 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
594 /*
595 * Prior to win8, all channel interrupts are
596 * delivered on cpu 0.
597 * Also if the channel is not a performance critical
598 * channel, bind it to cpu 0.
599 */
600 channel->numa_node = 0;
601 channel->target_cpu = 0;
602 channel->target_vp = hv_context.vp_index[0];
603 return;
604 }
605
606 /*
607 * Based on the channel affinity policy, we will assign the NUMA
608 * nodes.
609 */
610
611 if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
612 while (true) {
613 next_node = next_numa_node_id++;
614 if (next_node == nr_node_ids) {
615 next_node = next_numa_node_id = 0;
616 continue;
617 }
618 if (cpumask_empty(cpumask_of_node(next_node)))
619 continue;
620 break;
621 }
622 channel->numa_node = next_node;
623 primary = channel;
624 }
625 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
626
627 if (cpumask_weight(alloced_mask) ==
628 cpumask_weight(cpumask_of_node(primary->numa_node))) {
629 /*
630 * We have cycled through all the CPUs in the node;
631 * reset the alloced map.
632 */
633 cpumask_clear(alloced_mask);
634 }
635
636 cpumask_xor(&available_mask, alloced_mask,
637 cpumask_of_node(primary->numa_node));
638
639 cur_cpu = -1;
640
641 if (primary->affinity_policy == HV_LOCALIZED) {
642 /*
643 * Normally Hyper-V host doesn't create more subchannels
644 * than there are VCPUs on the node but it is possible when not
645 * all present VCPUs on the node are initialized by guest.
646 * Clear the alloced_cpus_in_node to start over.
647 */
648 if (cpumask_equal(&primary->alloced_cpus_in_node,
649 cpumask_of_node(primary->numa_node)))
650 cpumask_clear(&primary->alloced_cpus_in_node);
651 }
652
653 while (true) {
654 cur_cpu = cpumask_next(cur_cpu, &available_mask);
655 if (cur_cpu >= nr_cpu_ids) {
656 cur_cpu = -1;
657 cpumask_copy(&available_mask,
658 cpumask_of_node(primary->numa_node));
659 continue;
660 }
661
662 if (primary->affinity_policy == HV_LOCALIZED) {
663 /*
664 * NOTE: in the case of sub-channel, we clear the
665 * sub-channel related bit(s) in
666 * primary->alloced_cpus_in_node in
667 * hv_process_channel_removal(), so when we
668 * reload drivers like hv_netvsc in SMP guest, here
669 * we're able to re-allocate
670 * bit from primary->alloced_cpus_in_node.
671 */
672 if (!cpumask_test_cpu(cur_cpu,
673 &primary->alloced_cpus_in_node)) {
674 cpumask_set_cpu(cur_cpu,
675 &primary->alloced_cpus_in_node);
676 cpumask_set_cpu(cur_cpu, alloced_mask);
677 break;
678 }
679 } else {
680 cpumask_set_cpu(cur_cpu, alloced_mask);
681 break;
682 }
683 }
684
685 channel->target_cpu = cur_cpu;
686 channel->target_vp = hv_context.vp_index[cur_cpu];
687}
688
689static void vmbus_wait_for_unload(void)
690{
691 int cpu;
692 void *page_addr;
693 struct hv_message *msg;
694 struct vmbus_channel_message_header *hdr;
695 u32 message_type;
696
697 /*
698 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
699 * used for initial contact or to CPU0 depending on host version. When
700 * we're crashing on a different CPU let's hope that IRQ handler on
701 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
702 * functional and vmbus_unload_response() will complete
703 * vmbus_connection.unload_event. If not, the last thing we can do is
704 * read message pages for all CPUs directly.
705 */
706 while (1) {
707 if (completion_done(&vmbus_connection.unload_event))
708 break;
709
710 for_each_online_cpu(cpu) {
711 struct hv_per_cpu_context *hv_cpu
712 = per_cpu_ptr(hv_context.cpu_context, cpu);
713
714 page_addr = hv_cpu->synic_message_page;
715 msg = (struct hv_message *)page_addr
716 + VMBUS_MESSAGE_SINT;
717
718 message_type = READ_ONCE(msg->header.message_type);
719 if (message_type == HVMSG_NONE)
720 continue;
721
722 hdr = (struct vmbus_channel_message_header *)
723 msg->u.payload;
724
725 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
726 complete(&vmbus_connection.unload_event);
727
728 vmbus_signal_eom(msg, message_type);
729 }
730
731 mdelay(10);
732 }
733
734 /*
735 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
736 * maybe-pending messages on all CPUs to be able to receive new
737 * messages after we reconnect.
738 */
739 for_each_online_cpu(cpu) {
740 struct hv_per_cpu_context *hv_cpu
741 = per_cpu_ptr(hv_context.cpu_context, cpu);
742
743 page_addr = hv_cpu->synic_message_page;
744 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
745 msg->header.message_type = HVMSG_NONE;
746 }
747}
748
749/*
750 * vmbus_unload_response - Handler for the unload response.
751 */
752static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
753{
754 /*
755 * This is a global event; just wakeup the waiting thread.
756 * Once we successfully unload, we can cleanup the monitor state.
757 */
758 complete(&vmbus_connection.unload_event);
759}
760
761void vmbus_initiate_unload(bool crash)
762{
763 struct vmbus_channel_message_header hdr;
764
765 /* Pre-Win2012R2 hosts don't support reconnect */
766 if (vmbus_proto_version < VERSION_WIN8_1)
767 return;
768
769 init_completion(&vmbus_connection.unload_event);
770 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
771 hdr.msgtype = CHANNELMSG_UNLOAD;
772 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
773 !crash);
774
775 /*
776 * vmbus_initiate_unload() is also called on crash and the crash can be
777 * happening in an interrupt context, where scheduling is impossible.
778 */
779 if (!crash)
780 wait_for_completion(&vmbus_connection.unload_event);
781 else
782 vmbus_wait_for_unload();
783}
784
785/*
786 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
787 *
788 */
789static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
790{
791 struct vmbus_channel_offer_channel *offer;
792 struct vmbus_channel *newchannel;
793
794 offer = (struct vmbus_channel_offer_channel *)hdr;
795
796 /* Allocate the channel object and save this offer. */
797 newchannel = alloc_channel();
798 if (!newchannel) {
799 vmbus_release_relid(offer->child_relid);
800 atomic_dec(&vmbus_connection.offer_in_progress);
801 pr_err("Unable to allocate channel object\n");
802 return;
803 }
804
805 /*
806 * Setup state for signalling the host.
807 */
808 newchannel->sig_event = (struct hv_input_signal_event *)
809 (ALIGN((unsigned long)
810 &newchannel->sig_buf,
811 HV_HYPERCALL_PARAM_ALIGN));
812
813 newchannel->sig_event->connectionid.asu32 = 0;
814 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
815 newchannel->sig_event->flag_number = 0;
816 newchannel->sig_event->rsvdz = 0;
817
818 if (vmbus_proto_version != VERSION_WS2008) {
819 newchannel->is_dedicated_interrupt =
820 (offer->is_dedicated_interrupt != 0);
821 newchannel->sig_event->connectionid.u.id =
822 offer->connection_id;
823 }
824
825 memcpy(&newchannel->offermsg, offer,
826 sizeof(struct vmbus_channel_offer_channel));
827 newchannel->monitor_grp = (u8)offer->monitorid / 32;
828 newchannel->monitor_bit = (u8)offer->monitorid % 32;
829
830 vmbus_process_offer(newchannel);
831}
832
833/*
834 * vmbus_onoffer_rescind - Rescind offer handler.
835 *
836 * We queue a work item to process this offer synchronously
837 */
838static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
839{
840 struct vmbus_channel_rescind_offer *rescind;
841 struct vmbus_channel *channel;
842 unsigned long flags;
843 struct device *dev;
844
845 rescind = (struct vmbus_channel_rescind_offer *)hdr;
846
847 /*
848 * The offer msg and the corresponding rescind msg
849 * from the host are guranteed to be ordered -
850 * offer comes in first and then the rescind.
851 * Since we process these events in work elements,
852 * and with preemption, we may end up processing
853 * the events out of order. Given that we handle these
854 * work elements on the same CPU, this is possible only
855 * in the case of preemption. In any case wait here
856 * until the offer processing has moved beyond the
857 * point where the channel is discoverable.
858 */
859
860 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
861 /*
862 * We wait here until any channel offer is currently
863 * being processed.
864 */
865 msleep(1);
866 }
867
868 mutex_lock(&vmbus_connection.channel_mutex);
869 channel = relid2channel(rescind->child_relid);
870 mutex_unlock(&vmbus_connection.channel_mutex);
871
872 if (channel == NULL) {
873 /*
874 * We failed in processing the offer message;
875 * we would have cleaned up the relid in that
876 * failure path.
877 */
878 return;
879 }
880
881 spin_lock_irqsave(&channel->lock, flags);
882 channel->rescind = true;
883 spin_unlock_irqrestore(&channel->lock, flags);
884
885 vmbus_rescind_cleanup(channel);
886
887 if (channel->device_obj) {
888 if (channel->chn_rescind_callback) {
889 channel->chn_rescind_callback(channel);
890 return;
891 }
892 /*
893 * We will have to unregister this device from the
894 * driver core.
895 */
896 dev = get_device(&channel->device_obj->device);
897 if (dev) {
898 vmbus_device_unregister(channel->device_obj);
899 put_device(dev);
900 }
901 }
902 if (channel->primary_channel != NULL) {
903 /*
904 * Sub-channel is being rescinded. Following is the channel
905 * close sequence when initiated from the driveri (refer to
906 * vmbus_close() for details):
907 * 1. Close all sub-channels first
908 * 2. Then close the primary channel.
909 */
910 if (channel->state == CHANNEL_OPEN_STATE) {
911 /*
912 * The channel is currently not open;
913 * it is safe for us to cleanup the channel.
914 */
915 mutex_lock(&vmbus_connection.channel_mutex);
916 hv_process_channel_removal(channel,
917 channel->offermsg.child_relid);
918 mutex_unlock(&vmbus_connection.channel_mutex);
919 }
920 }
921}
922
923void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
924{
925 mutex_lock(&vmbus_connection.channel_mutex);
926
927 BUG_ON(!is_hvsock_channel(channel));
928
929 channel->rescind = true;
930 vmbus_device_unregister(channel->device_obj);
931
932 mutex_unlock(&vmbus_connection.channel_mutex);
933}
934EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
935
936
937/*
938 * vmbus_onoffers_delivered -
939 * This is invoked when all offers have been delivered.
940 *
941 * Nothing to do here.
942 */
943static void vmbus_onoffers_delivered(
944 struct vmbus_channel_message_header *hdr)
945{
946}
947
948/*
949 * vmbus_onopen_result - Open result handler.
950 *
951 * This is invoked when we received a response to our channel open request.
952 * Find the matching request, copy the response and signal the requesting
953 * thread.
954 */
955static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
956{
957 struct vmbus_channel_open_result *result;
958 struct vmbus_channel_msginfo *msginfo;
959 struct vmbus_channel_message_header *requestheader;
960 struct vmbus_channel_open_channel *openmsg;
961 unsigned long flags;
962
963 result = (struct vmbus_channel_open_result *)hdr;
964
965 /*
966 * Find the open msg, copy the result and signal/unblock the wait event
967 */
968 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
969
970 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
971 msglistentry) {
972 requestheader =
973 (struct vmbus_channel_message_header *)msginfo->msg;
974
975 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
976 openmsg =
977 (struct vmbus_channel_open_channel *)msginfo->msg;
978 if (openmsg->child_relid == result->child_relid &&
979 openmsg->openid == result->openid) {
980 memcpy(&msginfo->response.open_result,
981 result,
982 sizeof(
983 struct vmbus_channel_open_result));
984 complete(&msginfo->waitevent);
985 break;
986 }
987 }
988 }
989 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
990}
991
992/*
993 * vmbus_ongpadl_created - GPADL created handler.
994 *
995 * This is invoked when we received a response to our gpadl create request.
996 * Find the matching request, copy the response and signal the requesting
997 * thread.
998 */
999static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1000{
1001 struct vmbus_channel_gpadl_created *gpadlcreated;
1002 struct vmbus_channel_msginfo *msginfo;
1003 struct vmbus_channel_message_header *requestheader;
1004 struct vmbus_channel_gpadl_header *gpadlheader;
1005 unsigned long flags;
1006
1007 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1008
1009 /*
1010 * Find the establish msg, copy the result and signal/unblock the wait
1011 * event
1012 */
1013 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1014
1015 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1016 msglistentry) {
1017 requestheader =
1018 (struct vmbus_channel_message_header *)msginfo->msg;
1019
1020 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1021 gpadlheader =
1022 (struct vmbus_channel_gpadl_header *)requestheader;
1023
1024 if ((gpadlcreated->child_relid ==
1025 gpadlheader->child_relid) &&
1026 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1027 memcpy(&msginfo->response.gpadl_created,
1028 gpadlcreated,
1029 sizeof(
1030 struct vmbus_channel_gpadl_created));
1031 complete(&msginfo->waitevent);
1032 break;
1033 }
1034 }
1035 }
1036 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1037}
1038
1039/*
1040 * vmbus_ongpadl_torndown - GPADL torndown handler.
1041 *
1042 * This is invoked when we received a response to our gpadl teardown request.
1043 * Find the matching request, copy the response and signal the requesting
1044 * thread.
1045 */
1046static void vmbus_ongpadl_torndown(
1047 struct vmbus_channel_message_header *hdr)
1048{
1049 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1050 struct vmbus_channel_msginfo *msginfo;
1051 struct vmbus_channel_message_header *requestheader;
1052 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1053 unsigned long flags;
1054
1055 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1056
1057 /*
1058 * Find the open msg, copy the result and signal/unblock the wait event
1059 */
1060 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1061
1062 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1063 msglistentry) {
1064 requestheader =
1065 (struct vmbus_channel_message_header *)msginfo->msg;
1066
1067 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1068 gpadl_teardown =
1069 (struct vmbus_channel_gpadl_teardown *)requestheader;
1070
1071 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1072 memcpy(&msginfo->response.gpadl_torndown,
1073 gpadl_torndown,
1074 sizeof(
1075 struct vmbus_channel_gpadl_torndown));
1076 complete(&msginfo->waitevent);
1077 break;
1078 }
1079 }
1080 }
1081 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1082}
1083
1084/*
1085 * vmbus_onversion_response - Version response handler
1086 *
1087 * This is invoked when we received a response to our initiate contact request.
1088 * Find the matching request, copy the response and signal the requesting
1089 * thread.
1090 */
1091static void vmbus_onversion_response(
1092 struct vmbus_channel_message_header *hdr)
1093{
1094 struct vmbus_channel_msginfo *msginfo;
1095 struct vmbus_channel_message_header *requestheader;
1096 struct vmbus_channel_version_response *version_response;
1097 unsigned long flags;
1098
1099 version_response = (struct vmbus_channel_version_response *)hdr;
1100 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1101
1102 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1103 msglistentry) {
1104 requestheader =
1105 (struct vmbus_channel_message_header *)msginfo->msg;
1106
1107 if (requestheader->msgtype ==
1108 CHANNELMSG_INITIATE_CONTACT) {
1109 memcpy(&msginfo->response.version_response,
1110 version_response,
1111 sizeof(struct vmbus_channel_version_response));
1112 complete(&msginfo->waitevent);
1113 }
1114 }
1115 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1116}
1117
1118/* Channel message dispatch table */
1119const struct vmbus_channel_message_table_entry
1120channel_message_table[CHANNELMSG_COUNT] = {
1121 { CHANNELMSG_INVALID, 0, NULL },
1122 { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
1123 { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
1124 { CHANNELMSG_REQUESTOFFERS, 0, NULL },
1125 { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
1126 { CHANNELMSG_OPENCHANNEL, 0, NULL },
1127 { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
1128 { CHANNELMSG_CLOSECHANNEL, 0, NULL },
1129 { CHANNELMSG_GPADL_HEADER, 0, NULL },
1130 { CHANNELMSG_GPADL_BODY, 0, NULL },
1131 { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
1132 { CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
1133 { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
1134 { CHANNELMSG_RELID_RELEASED, 0, NULL },
1135 { CHANNELMSG_INITIATE_CONTACT, 0, NULL },
1136 { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
1137 { CHANNELMSG_UNLOAD, 0, NULL },
1138 { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
1139 { CHANNELMSG_18, 0, NULL },
1140 { CHANNELMSG_19, 0, NULL },
1141 { CHANNELMSG_20, 0, NULL },
1142 { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
1143};
1144
1145/*
1146 * vmbus_onmessage - Handler for channel protocol messages.
1147 *
1148 * This is invoked in the vmbus worker thread context.
1149 */
1150void vmbus_onmessage(void *context)
1151{
1152 struct hv_message *msg = context;
1153 struct vmbus_channel_message_header *hdr;
1154 int size;
1155
1156 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1157 size = msg->header.payload_size;
1158
1159 if (hdr->msgtype >= CHANNELMSG_COUNT) {
1160 pr_err("Received invalid channel message type %d size %d\n",
1161 hdr->msgtype, size);
1162 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1163 (unsigned char *)msg->u.payload, size);
1164 return;
1165 }
1166
1167 if (channel_message_table[hdr->msgtype].message_handler)
1168 channel_message_table[hdr->msgtype].message_handler(hdr);
1169 else
1170 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1171}
1172
1173/*
1174 * vmbus_request_offers - Send a request to get all our pending offers.
1175 */
1176int vmbus_request_offers(void)
1177{
1178 struct vmbus_channel_message_header *msg;
1179 struct vmbus_channel_msginfo *msginfo;
1180 int ret;
1181
1182 msginfo = kmalloc(sizeof(*msginfo) +
1183 sizeof(struct vmbus_channel_message_header),
1184 GFP_KERNEL);
1185 if (!msginfo)
1186 return -ENOMEM;
1187
1188 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1189
1190 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1191
1192
1193 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1194 true);
1195 if (ret != 0) {
1196 pr_err("Unable to request offers - %d\n", ret);
1197
1198 goto cleanup;
1199 }
1200
1201cleanup:
1202 kfree(msginfo);
1203
1204 return ret;
1205}
1206
1207/*
1208 * Retrieve the (sub) channel on which to send an outgoing request.
1209 * When a primary channel has multiple sub-channels, we try to
1210 * distribute the load equally amongst all available channels.
1211 */
1212struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1213{
1214 struct list_head *cur, *tmp;
1215 int cur_cpu;
1216 struct vmbus_channel *cur_channel;
1217 struct vmbus_channel *outgoing_channel = primary;
1218 int next_channel;
1219 int i = 1;
1220
1221 if (list_empty(&primary->sc_list))
1222 return outgoing_channel;
1223
1224 next_channel = primary->next_oc++;
1225
1226 if (next_channel > (primary->num_sc)) {
1227 primary->next_oc = 0;
1228 return outgoing_channel;
1229 }
1230
1231 cur_cpu = hv_context.vp_index[get_cpu()];
1232 put_cpu();
1233 list_for_each_safe(cur, tmp, &primary->sc_list) {
1234 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1235 if (cur_channel->state != CHANNEL_OPENED_STATE)
1236 continue;
1237
1238 if (cur_channel->target_vp == cur_cpu)
1239 return cur_channel;
1240
1241 if (i == next_channel)
1242 return cur_channel;
1243
1244 i++;
1245 }
1246
1247 return outgoing_channel;
1248}
1249EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1250
1251static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1252{
1253 struct list_head *cur, *tmp;
1254 struct vmbus_channel *cur_channel;
1255
1256 if (primary_channel->sc_creation_callback == NULL)
1257 return;
1258
1259 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1260 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1261
1262 primary_channel->sc_creation_callback(cur_channel);
1263 }
1264}
1265
1266void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1267 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1268{
1269 primary_channel->sc_creation_callback = sc_cr_cb;
1270}
1271EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1272
1273bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1274{
1275 bool ret;
1276
1277 ret = !list_empty(&primary->sc_list);
1278
1279 if (ret) {
1280 /*
1281 * Invoke the callback on sub-channel creation.
1282 * This will present a uniform interface to the
1283 * clients.
1284 */
1285 invoke_sc_cb(primary);
1286 }
1287
1288 return ret;
1289}
1290EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1291
1292void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1293 void (*chn_rescind_cb)(struct vmbus_channel *))
1294{
1295 channel->chn_rescind_callback = chn_rescind_cb;
1296}
1297EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);