at v4.5-rc3 32 kB view raw
1/* 2 * 3 * Copyright (c) 2011, Microsoft Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 * Place - Suite 330, Boston, MA 02111-1307 USA. 17 * 18 * Authors: 19 * Haiyang Zhang <haiyangz@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com> 21 * K. Y. Srinivasan <kys@microsoft.com> 22 * 23 */ 24 25#ifndef _HYPERV_H 26#define _HYPERV_H 27 28#include <uapi/linux/hyperv.h> 29#include <uapi/asm/hyperv.h> 30 31#include <linux/types.h> 32#include <linux/scatterlist.h> 33#include <linux/list.h> 34#include <linux/timer.h> 35#include <linux/workqueue.h> 36#include <linux/completion.h> 37#include <linux/device.h> 38#include <linux/mod_devicetable.h> 39 40 41#define MAX_PAGE_BUFFER_COUNT 32 42#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 43 44#pragma pack(push, 1) 45 46/* Single-page buffer */ 47struct hv_page_buffer { 48 u32 len; 49 u32 offset; 50 u64 pfn; 51}; 52 53/* Multiple-page buffer */ 54struct hv_multipage_buffer { 55 /* Length and Offset determines the # of pfns in the array */ 56 u32 len; 57 u32 offset; 58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 59}; 60 61/* 62 * Multiple-page buffer array; the pfn array is variable size: 63 * The number of entries in the PFN array is determined by 64 * "len" and "offset". 65 */ 66struct hv_mpb_array { 67 /* Length and Offset determines the # of pfns in the array */ 68 u32 len; 69 u32 offset; 70 u64 pfn_array[]; 71}; 72 73/* 0x18 includes the proprietary packet header */ 74#define MAX_PAGE_BUFFER_PACKET (0x18 + \ 75 (sizeof(struct hv_page_buffer) * \ 76 MAX_PAGE_BUFFER_COUNT)) 77#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ 78 sizeof(struct hv_multipage_buffer)) 79 80 81#pragma pack(pop) 82 83struct hv_ring_buffer { 84 /* Offset in bytes from the start of ring data below */ 85 u32 write_index; 86 87 /* Offset in bytes from the start of ring data below */ 88 u32 read_index; 89 90 u32 interrupt_mask; 91 92 /* 93 * Win8 uses some of the reserved bits to implement 94 * interrupt driven flow management. On the send side 95 * we can request that the receiver interrupt the sender 96 * when the ring transitions from being full to being able 97 * to handle a message of size "pending_send_sz". 98 * 99 * Add necessary state for this enhancement. 100 */ 101 u32 pending_send_sz; 102 103 u32 reserved1[12]; 104 105 union { 106 struct { 107 u32 feat_pending_send_sz:1; 108 }; 109 u32 value; 110 } feature_bits; 111 112 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 113 u8 reserved2[4028]; 114 115 /* 116 * Ring data starts here + RingDataStartOffset 117 * !!! DO NOT place any fields below this !!! 118 */ 119 u8 buffer[0]; 120} __packed; 121 122struct hv_ring_buffer_info { 123 struct hv_ring_buffer *ring_buffer; 124 u32 ring_size; /* Include the shared header */ 125 spinlock_t ring_lock; 126 127 u32 ring_datasize; /* < ring_size */ 128 u32 ring_data_startoffset; 129}; 130 131/* 132 * 133 * hv_get_ringbuffer_availbytes() 134 * 135 * Get number of bytes available to read and to write to 136 * for the specified ring buffer 137 */ 138static inline void 139hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, 140 u32 *read, u32 *write) 141{ 142 u32 read_loc, write_loc, dsize; 143 144 /* Capture the read/write indices before they changed */ 145 read_loc = rbi->ring_buffer->read_index; 146 write_loc = rbi->ring_buffer->write_index; 147 dsize = rbi->ring_datasize; 148 149 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 150 read_loc - write_loc; 151 *read = dsize - *write; 152} 153 154/* 155 * VMBUS version is 32 bit entity broken up into 156 * two 16 bit quantities: major_number. minor_number. 157 * 158 * 0 . 13 (Windows Server 2008) 159 * 1 . 1 (Windows 7) 160 * 2 . 4 (Windows 8) 161 * 3 . 0 (Windows 8 R2) 162 * 4 . 0 (Windows 10) 163 */ 164 165#define VERSION_WS2008 ((0 << 16) | (13)) 166#define VERSION_WIN7 ((1 << 16) | (1)) 167#define VERSION_WIN8 ((2 << 16) | (4)) 168#define VERSION_WIN8_1 ((3 << 16) | (0)) 169#define VERSION_WIN10 ((4 << 16) | (0)) 170 171#define VERSION_INVAL -1 172 173#define VERSION_CURRENT VERSION_WIN10 174 175/* Make maximum size of pipe payload of 16K */ 176#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 177 178/* Define PipeMode values. */ 179#define VMBUS_PIPE_TYPE_BYTE 0x00000000 180#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 181 182/* The size of the user defined data buffer for non-pipe offers. */ 183#define MAX_USER_DEFINED_BYTES 120 184 185/* The size of the user defined data buffer for pipe offers. */ 186#define MAX_PIPE_USER_DEFINED_BYTES 116 187 188/* 189 * At the center of the Channel Management library is the Channel Offer. This 190 * struct contains the fundamental information about an offer. 191 */ 192struct vmbus_channel_offer { 193 uuid_le if_type; 194 uuid_le if_instance; 195 196 /* 197 * These two fields are not currently used. 198 */ 199 u64 reserved1; 200 u64 reserved2; 201 202 u16 chn_flags; 203 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 204 205 union { 206 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ 207 struct { 208 unsigned char user_def[MAX_USER_DEFINED_BYTES]; 209 } std; 210 211 /* 212 * Pipes: 213 * The following sructure is an integrated pipe protocol, which 214 * is implemented on top of standard user-defined data. Pipe 215 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own 216 * use. 217 */ 218 struct { 219 u32 pipe_mode; 220 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 221 } pipe; 222 } u; 223 /* 224 * The sub_channel_index is defined in win8. 225 */ 226 u16 sub_channel_index; 227 u16 reserved3; 228} __packed; 229 230/* Server Flags */ 231#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 232#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 233#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 234#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 235#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 236#define VMBUS_CHANNEL_PARENT_OFFER 0x200 237#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 238 239struct vmpacket_descriptor { 240 u16 type; 241 u16 offset8; 242 u16 len8; 243 u16 flags; 244 u64 trans_id; 245} __packed; 246 247struct vmpacket_header { 248 u32 prev_pkt_start_offset; 249 struct vmpacket_descriptor descriptor; 250} __packed; 251 252struct vmtransfer_page_range { 253 u32 byte_count; 254 u32 byte_offset; 255} __packed; 256 257struct vmtransfer_page_packet_header { 258 struct vmpacket_descriptor d; 259 u16 xfer_pageset_id; 260 u8 sender_owns_set; 261 u8 reserved; 262 u32 range_cnt; 263 struct vmtransfer_page_range ranges[1]; 264} __packed; 265 266struct vmgpadl_packet_header { 267 struct vmpacket_descriptor d; 268 u32 gpadl; 269 u32 reserved; 270} __packed; 271 272struct vmadd_remove_transfer_page_set { 273 struct vmpacket_descriptor d; 274 u32 gpadl; 275 u16 xfer_pageset_id; 276 u16 reserved; 277} __packed; 278 279/* 280 * This structure defines a range in guest physical space that can be made to 281 * look virtually contiguous. 282 */ 283struct gpa_range { 284 u32 byte_count; 285 u32 byte_offset; 286 u64 pfn_array[0]; 287}; 288 289/* 290 * This is the format for an Establish Gpadl packet, which contains a handle by 291 * which this GPADL will be known and a set of GPA ranges associated with it. 292 * This can be converted to a MDL by the guest OS. If there are multiple GPA 293 * ranges, then the resulting MDL will be "chained," representing multiple VA 294 * ranges. 295 */ 296struct vmestablish_gpadl { 297 struct vmpacket_descriptor d; 298 u32 gpadl; 299 u32 range_cnt; 300 struct gpa_range range[1]; 301} __packed; 302 303/* 304 * This is the format for a Teardown Gpadl packet, which indicates that the 305 * GPADL handle in the Establish Gpadl packet will never be referenced again. 306 */ 307struct vmteardown_gpadl { 308 struct vmpacket_descriptor d; 309 u32 gpadl; 310 u32 reserved; /* for alignment to a 8-byte boundary */ 311} __packed; 312 313/* 314 * This is the format for a GPA-Direct packet, which contains a set of GPA 315 * ranges, in addition to commands and/or data. 316 */ 317struct vmdata_gpa_direct { 318 struct vmpacket_descriptor d; 319 u32 reserved; 320 u32 range_cnt; 321 struct gpa_range range[1]; 322} __packed; 323 324/* This is the format for a Additional Data Packet. */ 325struct vmadditional_data { 326 struct vmpacket_descriptor d; 327 u64 total_bytes; 328 u32 offset; 329 u32 byte_cnt; 330 unsigned char data[1]; 331} __packed; 332 333union vmpacket_largest_possible_header { 334 struct vmpacket_descriptor simple_hdr; 335 struct vmtransfer_page_packet_header xfer_page_hdr; 336 struct vmgpadl_packet_header gpadl_hdr; 337 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; 338 struct vmestablish_gpadl establish_gpadl_hdr; 339 struct vmteardown_gpadl teardown_gpadl_hdr; 340 struct vmdata_gpa_direct data_gpa_direct_hdr; 341}; 342 343#define VMPACKET_DATA_START_ADDRESS(__packet) \ 344 (void *)(((unsigned char *)__packet) + \ 345 ((struct vmpacket_descriptor)__packet)->offset8 * 8) 346 347#define VMPACKET_DATA_LENGTH(__packet) \ 348 ((((struct vmpacket_descriptor)__packet)->len8 - \ 349 ((struct vmpacket_descriptor)__packet)->offset8) * 8) 350 351#define VMPACKET_TRANSFER_MODE(__packet) \ 352 (((struct IMPACT)__packet)->type) 353 354enum vmbus_packet_type { 355 VM_PKT_INVALID = 0x0, 356 VM_PKT_SYNCH = 0x1, 357 VM_PKT_ADD_XFER_PAGESET = 0x2, 358 VM_PKT_RM_XFER_PAGESET = 0x3, 359 VM_PKT_ESTABLISH_GPADL = 0x4, 360 VM_PKT_TEARDOWN_GPADL = 0x5, 361 VM_PKT_DATA_INBAND = 0x6, 362 VM_PKT_DATA_USING_XFER_PAGES = 0x7, 363 VM_PKT_DATA_USING_GPADL = 0x8, 364 VM_PKT_DATA_USING_GPA_DIRECT = 0x9, 365 VM_PKT_CANCEL_REQUEST = 0xa, 366 VM_PKT_COMP = 0xb, 367 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, 368 VM_PKT_ADDITIONAL_DATA = 0xd 369}; 370 371#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 372 373 374/* Version 1 messages */ 375enum vmbus_channel_message_type { 376 CHANNELMSG_INVALID = 0, 377 CHANNELMSG_OFFERCHANNEL = 1, 378 CHANNELMSG_RESCIND_CHANNELOFFER = 2, 379 CHANNELMSG_REQUESTOFFERS = 3, 380 CHANNELMSG_ALLOFFERS_DELIVERED = 4, 381 CHANNELMSG_OPENCHANNEL = 5, 382 CHANNELMSG_OPENCHANNEL_RESULT = 6, 383 CHANNELMSG_CLOSECHANNEL = 7, 384 CHANNELMSG_GPADL_HEADER = 8, 385 CHANNELMSG_GPADL_BODY = 9, 386 CHANNELMSG_GPADL_CREATED = 10, 387 CHANNELMSG_GPADL_TEARDOWN = 11, 388 CHANNELMSG_GPADL_TORNDOWN = 12, 389 CHANNELMSG_RELID_RELEASED = 13, 390 CHANNELMSG_INITIATE_CONTACT = 14, 391 CHANNELMSG_VERSION_RESPONSE = 15, 392 CHANNELMSG_UNLOAD = 16, 393 CHANNELMSG_UNLOAD_RESPONSE = 17, 394 CHANNELMSG_COUNT 395}; 396 397struct vmbus_channel_message_header { 398 enum vmbus_channel_message_type msgtype; 399 u32 padding; 400} __packed; 401 402/* Query VMBus Version parameters */ 403struct vmbus_channel_query_vmbus_version { 404 struct vmbus_channel_message_header header; 405 u32 version; 406} __packed; 407 408/* VMBus Version Supported parameters */ 409struct vmbus_channel_version_supported { 410 struct vmbus_channel_message_header header; 411 u8 version_supported; 412} __packed; 413 414/* Offer Channel parameters */ 415struct vmbus_channel_offer_channel { 416 struct vmbus_channel_message_header header; 417 struct vmbus_channel_offer offer; 418 u32 child_relid; 419 u8 monitorid; 420 /* 421 * win7 and beyond splits this field into a bit field. 422 */ 423 u8 monitor_allocated:1; 424 u8 reserved:7; 425 /* 426 * These are new fields added in win7 and later. 427 * Do not access these fields without checking the 428 * negotiated protocol. 429 * 430 * If "is_dedicated_interrupt" is set, we must not set the 431 * associated bit in the channel bitmap while sending the 432 * interrupt to the host. 433 * 434 * connection_id is to be used in signaling the host. 435 */ 436 u16 is_dedicated_interrupt:1; 437 u16 reserved1:15; 438 u32 connection_id; 439} __packed; 440 441/* Rescind Offer parameters */ 442struct vmbus_channel_rescind_offer { 443 struct vmbus_channel_message_header header; 444 u32 child_relid; 445} __packed; 446 447/* 448 * Request Offer -- no parameters, SynIC message contains the partition ID 449 * Set Snoop -- no parameters, SynIC message contains the partition ID 450 * Clear Snoop -- no parameters, SynIC message contains the partition ID 451 * All Offers Delivered -- no parameters, SynIC message contains the partition 452 * ID 453 * Flush Client -- no parameters, SynIC message contains the partition ID 454 */ 455 456/* Open Channel parameters */ 457struct vmbus_channel_open_channel { 458 struct vmbus_channel_message_header header; 459 460 /* Identifies the specific VMBus channel that is being opened. */ 461 u32 child_relid; 462 463 /* ID making a particular open request at a channel offer unique. */ 464 u32 openid; 465 466 /* GPADL for the channel's ring buffer. */ 467 u32 ringbuffer_gpadlhandle; 468 469 /* 470 * Starting with win8, this field will be used to specify 471 * the target virtual processor on which to deliver the interrupt for 472 * the host to guest communication. 473 * Prior to win8, incoming channel interrupts would only 474 * be delivered on cpu 0. Setting this value to 0 would 475 * preserve the earlier behavior. 476 */ 477 u32 target_vp; 478 479 /* 480 * The upstream ring buffer begins at offset zero in the memory 481 * described by RingBufferGpadlHandle. The downstream ring buffer 482 * follows it at this offset (in pages). 483 */ 484 u32 downstream_ringbuffer_pageoffset; 485 486 /* User-specific data to be passed along to the server endpoint. */ 487 unsigned char userdata[MAX_USER_DEFINED_BYTES]; 488} __packed; 489 490/* Open Channel Result parameters */ 491struct vmbus_channel_open_result { 492 struct vmbus_channel_message_header header; 493 u32 child_relid; 494 u32 openid; 495 u32 status; 496} __packed; 497 498/* Close channel parameters; */ 499struct vmbus_channel_close_channel { 500 struct vmbus_channel_message_header header; 501 u32 child_relid; 502} __packed; 503 504/* Channel Message GPADL */ 505#define GPADL_TYPE_RING_BUFFER 1 506#define GPADL_TYPE_SERVER_SAVE_AREA 2 507#define GPADL_TYPE_TRANSACTION 8 508 509/* 510 * The number of PFNs in a GPADL message is defined by the number of 511 * pages that would be spanned by ByteCount and ByteOffset. If the 512 * implied number of PFNs won't fit in this packet, there will be a 513 * follow-up packet that contains more. 514 */ 515struct vmbus_channel_gpadl_header { 516 struct vmbus_channel_message_header header; 517 u32 child_relid; 518 u32 gpadl; 519 u16 range_buflen; 520 u16 rangecount; 521 struct gpa_range range[0]; 522} __packed; 523 524/* This is the followup packet that contains more PFNs. */ 525struct vmbus_channel_gpadl_body { 526 struct vmbus_channel_message_header header; 527 u32 msgnumber; 528 u32 gpadl; 529 u64 pfn[0]; 530} __packed; 531 532struct vmbus_channel_gpadl_created { 533 struct vmbus_channel_message_header header; 534 u32 child_relid; 535 u32 gpadl; 536 u32 creation_status; 537} __packed; 538 539struct vmbus_channel_gpadl_teardown { 540 struct vmbus_channel_message_header header; 541 u32 child_relid; 542 u32 gpadl; 543} __packed; 544 545struct vmbus_channel_gpadl_torndown { 546 struct vmbus_channel_message_header header; 547 u32 gpadl; 548} __packed; 549 550struct vmbus_channel_relid_released { 551 struct vmbus_channel_message_header header; 552 u32 child_relid; 553} __packed; 554 555struct vmbus_channel_initiate_contact { 556 struct vmbus_channel_message_header header; 557 u32 vmbus_version_requested; 558 u32 target_vcpu; /* The VCPU the host should respond to */ 559 u64 interrupt_page; 560 u64 monitor_page1; 561 u64 monitor_page2; 562} __packed; 563 564struct vmbus_channel_version_response { 565 struct vmbus_channel_message_header header; 566 u8 version_supported; 567} __packed; 568 569enum vmbus_channel_state { 570 CHANNEL_OFFER_STATE, 571 CHANNEL_OPENING_STATE, 572 CHANNEL_OPEN_STATE, 573 CHANNEL_OPENED_STATE, 574}; 575 576/* 577 * Represents each channel msg on the vmbus connection This is a 578 * variable-size data structure depending on the msg type itself 579 */ 580struct vmbus_channel_msginfo { 581 /* Bookkeeping stuff */ 582 struct list_head msglistentry; 583 584 /* So far, this is only used to handle gpadl body message */ 585 struct list_head submsglist; 586 587 /* Synchronize the request/response if needed */ 588 struct completion waitevent; 589 union { 590 struct vmbus_channel_version_supported version_supported; 591 struct vmbus_channel_open_result open_result; 592 struct vmbus_channel_gpadl_torndown gpadl_torndown; 593 struct vmbus_channel_gpadl_created gpadl_created; 594 struct vmbus_channel_version_response version_response; 595 } response; 596 597 u32 msgsize; 598 /* 599 * The channel message that goes out on the "wire". 600 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header 601 */ 602 unsigned char msg[0]; 603}; 604 605struct vmbus_close_msg { 606 struct vmbus_channel_msginfo info; 607 struct vmbus_channel_close_channel msg; 608}; 609 610/* Define connection identifier type. */ 611union hv_connection_id { 612 u32 asu32; 613 struct { 614 u32 id:24; 615 u32 reserved:8; 616 } u; 617}; 618 619/* Definition of the hv_signal_event hypercall input structure. */ 620struct hv_input_signal_event { 621 union hv_connection_id connectionid; 622 u16 flag_number; 623 u16 rsvdz; 624}; 625 626struct hv_input_signal_event_buffer { 627 u64 align8; 628 struct hv_input_signal_event event; 629}; 630 631enum hv_signal_policy { 632 HV_SIGNAL_POLICY_DEFAULT = 0, 633 HV_SIGNAL_POLICY_EXPLICIT, 634}; 635 636struct vmbus_channel { 637 /* Unique channel id */ 638 int id; 639 640 struct list_head listentry; 641 642 struct hv_device *device_obj; 643 644 enum vmbus_channel_state state; 645 646 struct vmbus_channel_offer_channel offermsg; 647 /* 648 * These are based on the OfferMsg.MonitorId. 649 * Save it here for easy access. 650 */ 651 u8 monitor_grp; 652 u8 monitor_bit; 653 654 bool rescind; /* got rescind msg */ 655 656 u32 ringbuffer_gpadlhandle; 657 658 /* Allocated memory for ring buffer */ 659 void *ringbuffer_pages; 660 u32 ringbuffer_pagecount; 661 struct hv_ring_buffer_info outbound; /* send to parent */ 662 struct hv_ring_buffer_info inbound; /* receive from parent */ 663 spinlock_t inbound_lock; 664 665 struct vmbus_close_msg close_msg; 666 667 /* Channel callback are invoked in this workqueue context */ 668 /* HANDLE dataWorkQueue; */ 669 670 void (*onchannel_callback)(void *context); 671 void *channel_callback_context; 672 673 /* 674 * A channel can be marked for efficient (batched) 675 * reading: 676 * If batched_reading is set to "true", we read until the 677 * channel is empty and hold off interrupts from the host 678 * during the entire read process. 679 * If batched_reading is set to "false", the client is not 680 * going to perform batched reading. 681 * 682 * By default we will enable batched reading; specific 683 * drivers that don't want this behavior can turn it off. 684 */ 685 686 bool batched_reading; 687 688 bool is_dedicated_interrupt; 689 struct hv_input_signal_event_buffer sig_buf; 690 struct hv_input_signal_event *sig_event; 691 692 /* 693 * Starting with win8, this field will be used to specify 694 * the target virtual processor on which to deliver the interrupt for 695 * the host to guest communication. 696 * Prior to win8, incoming channel interrupts would only 697 * be delivered on cpu 0. Setting this value to 0 would 698 * preserve the earlier behavior. 699 */ 700 u32 target_vp; 701 /* The corresponding CPUID in the guest */ 702 u32 target_cpu; 703 /* 704 * State to manage the CPU affiliation of channels. 705 */ 706 struct cpumask alloced_cpus_in_node; 707 int numa_node; 708 /* 709 * Support for sub-channels. For high performance devices, 710 * it will be useful to have multiple sub-channels to support 711 * a scalable communication infrastructure with the host. 712 * The support for sub-channels is implemented as an extention 713 * to the current infrastructure. 714 * The initial offer is considered the primary channel and this 715 * offer message will indicate if the host supports sub-channels. 716 * The guest is free to ask for sub-channels to be offerred and can 717 * open these sub-channels as a normal "primary" channel. However, 718 * all sub-channels will have the same type and instance guids as the 719 * primary channel. Requests sent on a given channel will result in a 720 * response on the same channel. 721 */ 722 723 /* 724 * Sub-channel creation callback. This callback will be called in 725 * process context when a sub-channel offer is received from the host. 726 * The guest can open the sub-channel in the context of this callback. 727 */ 728 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 729 730 /* 731 * The spinlock to protect the structure. It is being used to protect 732 * test-and-set access to various attributes of the structure as well 733 * as all sc_list operations. 734 */ 735 spinlock_t lock; 736 /* 737 * All Sub-channels of a primary channel are linked here. 738 */ 739 struct list_head sc_list; 740 /* 741 * Current number of sub-channels. 742 */ 743 int num_sc; 744 /* 745 * Number of a sub-channel (position within sc_list) which is supposed 746 * to be used as the next outgoing channel. 747 */ 748 int next_oc; 749 /* 750 * The primary channel this sub-channel belongs to. 751 * This will be NULL for the primary channel. 752 */ 753 struct vmbus_channel *primary_channel; 754 /* 755 * Support per-channel state for use by vmbus drivers. 756 */ 757 void *per_channel_state; 758 /* 759 * To support per-cpu lookup mapping of relid to channel, 760 * link up channels based on their CPU affinity. 761 */ 762 struct list_head percpu_list; 763 /* 764 * Host signaling policy: The default policy will be 765 * based on the ring buffer state. We will also support 766 * a policy where the client driver can have explicit 767 * signaling control. 768 */ 769 enum hv_signal_policy signal_policy; 770}; 771 772static inline void set_channel_signal_state(struct vmbus_channel *c, 773 enum hv_signal_policy policy) 774{ 775 c->signal_policy = policy; 776} 777 778static inline void set_channel_read_state(struct vmbus_channel *c, bool state) 779{ 780 c->batched_reading = state; 781} 782 783static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 784{ 785 c->per_channel_state = s; 786} 787 788static inline void *get_per_channel_state(struct vmbus_channel *c) 789{ 790 return c->per_channel_state; 791} 792 793void vmbus_onmessage(void *context); 794 795int vmbus_request_offers(void); 796 797/* 798 * APIs for managing sub-channels. 799 */ 800 801void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 802 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 803 804/* 805 * Retrieve the (sub) channel on which to send an outgoing request. 806 * When a primary channel has multiple sub-channels, we choose a 807 * channel whose VCPU binding is closest to the VCPU on which 808 * this call is being made. 809 */ 810struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); 811 812/* 813 * Check if sub-channels have already been offerred. This API will be useful 814 * when the driver is unloaded after establishing sub-channels. In this case, 815 * when the driver is re-loaded, the driver would have to check if the 816 * subchannels have already been established before attempting to request 817 * the creation of sub-channels. 818 * This function returns TRUE to indicate that subchannels have already been 819 * created. 820 * This function should be invoked after setting the callback function for 821 * sub-channel creation. 822 */ 823bool vmbus_are_subchannels_present(struct vmbus_channel *primary); 824 825/* The format must be the same as struct vmdata_gpa_direct */ 826struct vmbus_channel_packet_page_buffer { 827 u16 type; 828 u16 dataoffset8; 829 u16 length8; 830 u16 flags; 831 u64 transactionid; 832 u32 reserved; 833 u32 rangecount; 834 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; 835} __packed; 836 837/* The format must be the same as struct vmdata_gpa_direct */ 838struct vmbus_channel_packet_multipage_buffer { 839 u16 type; 840 u16 dataoffset8; 841 u16 length8; 842 u16 flags; 843 u64 transactionid; 844 u32 reserved; 845 u32 rangecount; /* Always 1 in this case */ 846 struct hv_multipage_buffer range; 847} __packed; 848 849/* The format must be the same as struct vmdata_gpa_direct */ 850struct vmbus_packet_mpb_array { 851 u16 type; 852 u16 dataoffset8; 853 u16 length8; 854 u16 flags; 855 u64 transactionid; 856 u32 reserved; 857 u32 rangecount; /* Always 1 in this case */ 858 struct hv_mpb_array range; 859} __packed; 860 861 862extern int vmbus_open(struct vmbus_channel *channel, 863 u32 send_ringbuffersize, 864 u32 recv_ringbuffersize, 865 void *userdata, 866 u32 userdatalen, 867 void(*onchannel_callback)(void *context), 868 void *context); 869 870extern void vmbus_close(struct vmbus_channel *channel); 871 872extern int vmbus_sendpacket(struct vmbus_channel *channel, 873 void *buffer, 874 u32 bufferLen, 875 u64 requestid, 876 enum vmbus_packet_type type, 877 u32 flags); 878 879extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, 880 void *buffer, 881 u32 bufferLen, 882 u64 requestid, 883 enum vmbus_packet_type type, 884 u32 flags, 885 bool kick_q); 886 887extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 888 struct hv_page_buffer pagebuffers[], 889 u32 pagecount, 890 void *buffer, 891 u32 bufferlen, 892 u64 requestid); 893 894extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, 895 struct hv_page_buffer pagebuffers[], 896 u32 pagecount, 897 void *buffer, 898 u32 bufferlen, 899 u64 requestid, 900 u32 flags, 901 bool kick_q); 902 903extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, 904 struct hv_multipage_buffer *mpb, 905 void *buffer, 906 u32 bufferlen, 907 u64 requestid); 908 909extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 910 struct vmbus_packet_mpb_array *mpb, 911 u32 desc_size, 912 void *buffer, 913 u32 bufferlen, 914 u64 requestid); 915 916extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 917 void *kbuffer, 918 u32 size, 919 u32 *gpadl_handle); 920 921extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, 922 u32 gpadl_handle); 923 924extern int vmbus_recvpacket(struct vmbus_channel *channel, 925 void *buffer, 926 u32 bufferlen, 927 u32 *buffer_actual_len, 928 u64 *requestid); 929 930extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, 931 void *buffer, 932 u32 bufferlen, 933 u32 *buffer_actual_len, 934 u64 *requestid); 935 936 937extern void vmbus_ontimer(unsigned long data); 938 939/* Base driver object */ 940struct hv_driver { 941 const char *name; 942 943 /* the device type supported by this driver */ 944 uuid_le dev_type; 945 const struct hv_vmbus_device_id *id_table; 946 947 struct device_driver driver; 948 949 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 950 int (*remove)(struct hv_device *); 951 void (*shutdown)(struct hv_device *); 952 953}; 954 955/* Base device object */ 956struct hv_device { 957 /* the device type id of this device */ 958 uuid_le dev_type; 959 960 /* the device instance id of this device */ 961 uuid_le dev_instance; 962 963 struct device device; 964 965 struct vmbus_channel *channel; 966}; 967 968 969static inline struct hv_device *device_to_hv_device(struct device *d) 970{ 971 return container_of(d, struct hv_device, device); 972} 973 974static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 975{ 976 return container_of(d, struct hv_driver, driver); 977} 978 979static inline void hv_set_drvdata(struct hv_device *dev, void *data) 980{ 981 dev_set_drvdata(&dev->device, data); 982} 983 984static inline void *hv_get_drvdata(struct hv_device *dev) 985{ 986 return dev_get_drvdata(&dev->device); 987} 988 989/* Vmbus interface */ 990#define vmbus_driver_register(driver) \ 991 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 992int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, 993 struct module *owner, 994 const char *mod_name); 995void vmbus_driver_unregister(struct hv_driver *hv_driver); 996 997int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 998 resource_size_t min, resource_size_t max, 999 resource_size_t size, resource_size_t align, 1000 bool fb_overlap_ok); 1001 1002int vmbus_cpu_number_to_vp_number(int cpu_number); 1003u64 hv_do_hypercall(u64 control, void *input, void *output); 1004 1005/* 1006 * GUID definitions of various offer types - services offered to the guest. 1007 */ 1008 1009/* 1010 * Network GUID 1011 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1012 */ 1013#define HV_NIC_GUID \ 1014 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 1015 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) 1016 1017/* 1018 * IDE GUID 1019 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1020 */ 1021#define HV_IDE_GUID \ 1022 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 1023 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1024 1025/* 1026 * SCSI GUID 1027 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1028 */ 1029#define HV_SCSI_GUID \ 1030 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 1031 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1032 1033/* 1034 * Shutdown GUID 1035 * {0e0b6031-5213-4934-818b-38d90ced39db} 1036 */ 1037#define HV_SHUTDOWN_GUID \ 1038 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 1039 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) 1040 1041/* 1042 * Time Synch GUID 1043 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1044 */ 1045#define HV_TS_GUID \ 1046 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 1047 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 1048 1049/* 1050 * Heartbeat GUID 1051 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1052 */ 1053#define HV_HEART_BEAT_GUID \ 1054 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 1055 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 1056 1057/* 1058 * KVP GUID 1059 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1060 */ 1061#define HV_KVP_GUID \ 1062 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 1063 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) 1064 1065/* 1066 * Dynamic memory GUID 1067 * {525074dc-8985-46e2-8057-a307dc18a502} 1068 */ 1069#define HV_DM_GUID \ 1070 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 1071 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) 1072 1073/* 1074 * Mouse GUID 1075 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1076 */ 1077#define HV_MOUSE_GUID \ 1078 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 1079 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) 1080 1081/* 1082 * Keyboard GUID 1083 * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 1084 */ 1085#define HV_KBD_GUID \ 1086 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 1087 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) 1088 1089/* 1090 * VSS (Backup/Restore) GUID 1091 */ 1092#define HV_VSS_GUID \ 1093 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 1094 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) 1095/* 1096 * Synthetic Video GUID 1097 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1098 */ 1099#define HV_SYNTHVID_GUID \ 1100 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 1101 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) 1102 1103/* 1104 * Synthetic FC GUID 1105 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1106 */ 1107#define HV_SYNTHFC_GUID \ 1108 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 1109 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) 1110 1111/* 1112 * Guest File Copy Service 1113 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} 1114 */ 1115 1116#define HV_FCOPY_GUID \ 1117 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 1118 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) 1119 1120/* 1121 * NetworkDirect. This is the guest RDMA service. 1122 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1123 */ 1124#define HV_ND_GUID \ 1125 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 1126 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) 1127 1128/* 1129 * PCI Express Pass Through 1130 * {44C4F61D-4444-4400-9D52-802E27EDE19F} 1131 */ 1132 1133#define HV_PCIE_GUID \ 1134 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 1135 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) 1136 1137/* 1138 * Common header for Hyper-V ICs 1139 */ 1140 1141#define ICMSGTYPE_NEGOTIATE 0 1142#define ICMSGTYPE_HEARTBEAT 1 1143#define ICMSGTYPE_KVPEXCHANGE 2 1144#define ICMSGTYPE_SHUTDOWN 3 1145#define ICMSGTYPE_TIMESYNC 4 1146#define ICMSGTYPE_VSS 5 1147 1148#define ICMSGHDRFLAG_TRANSACTION 1 1149#define ICMSGHDRFLAG_REQUEST 2 1150#define ICMSGHDRFLAG_RESPONSE 4 1151 1152 1153/* 1154 * While we want to handle util services as regular devices, 1155 * there is only one instance of each of these services; so 1156 * we statically allocate the service specific state. 1157 */ 1158 1159struct hv_util_service { 1160 u8 *recv_buffer; 1161 void (*util_cb)(void *); 1162 int (*util_init)(struct hv_util_service *); 1163 void (*util_deinit)(void); 1164}; 1165 1166struct vmbuspipe_hdr { 1167 u32 flags; 1168 u32 msgsize; 1169} __packed; 1170 1171struct ic_version { 1172 u16 major; 1173 u16 minor; 1174} __packed; 1175 1176struct icmsg_hdr { 1177 struct ic_version icverframe; 1178 u16 icmsgtype; 1179 struct ic_version icvermsg; 1180 u16 icmsgsize; 1181 u32 status; 1182 u8 ictransaction_id; 1183 u8 icflags; 1184 u8 reserved[2]; 1185} __packed; 1186 1187struct icmsg_negotiate { 1188 u16 icframe_vercnt; 1189 u16 icmsg_vercnt; 1190 u32 reserved; 1191 struct ic_version icversion_data[1]; /* any size array */ 1192} __packed; 1193 1194struct shutdown_msg_data { 1195 u32 reason_code; 1196 u32 timeout_seconds; 1197 u32 flags; 1198 u8 display_message[2048]; 1199} __packed; 1200 1201struct heartbeat_msg_data { 1202 u64 seq_num; 1203 u32 reserved[8]; 1204} __packed; 1205 1206/* Time Sync IC defs */ 1207#define ICTIMESYNCFLAG_PROBE 0 1208#define ICTIMESYNCFLAG_SYNC 1 1209#define ICTIMESYNCFLAG_SAMPLE 2 1210 1211#ifdef __x86_64__ 1212#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ 1213#else 1214#define WLTIMEDELTA 116444736000000000LL 1215#endif 1216 1217struct ictimesync_data { 1218 u64 parenttime; 1219 u64 childtime; 1220 u64 roundtriptime; 1221 u8 flags; 1222} __packed; 1223 1224struct hyperv_service_callback { 1225 u8 msg_type; 1226 char *log_msg; 1227 uuid_le data; 1228 struct vmbus_channel *channel; 1229 void (*callback) (void *context); 1230}; 1231 1232#define MAX_SRV_VER 0x7ffffff 1233extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *, 1234 struct icmsg_negotiate *, u8 *, int, 1235 int); 1236 1237void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1238 1239/* 1240 * Negotiated version with the Host. 1241 */ 1242 1243extern __u32 vmbus_proto_version; 1244 1245#endif /* _HYPERV_H */