1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * 4 * Copyright (c) 2011, Microsoft Corporation. 5 * 6 * Authors: 7 * Haiyang Zhang <haiyangz@microsoft.com> 8 * Hank Janssen <hjanssen@microsoft.com> 9 * K. Y. Srinivasan <kys@microsoft.com> 10 */ 11 12 #ifndef _HYPERV_H 13 #define _HYPERV_H 14 15 #include <uapi/linux/hyperv.h> 16 17 #include <linux/types.h> 18 #include <linux/scatterlist.h> 19 #include <linux/list.h> 20 #include <linux/timer.h> 21 #include <linux/completion.h> 22 #include <linux/device.h> 23 #include <linux/mod_devicetable.h> 24 #include <linux/interrupt.h> 25 #include <linux/reciprocal_div.h> 26 27 #define MAX_PAGE_BUFFER_COUNT 32 28 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */ 29 30 #pragma pack(push, 1) 31 32 /* Single-page buffer */ 33 struct hv_page_buffer { 34 u32 len; 35 u32 offset; 36 u64 pfn; 37 }; 38 39 /* Multiple-page buffer */ 40 struct hv_multipage_buffer { 41 /* Length and Offset determines the # of pfns in the array */ 42 u32 len; 43 u32 offset; 44 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT]; 45 }; 46 47 /* 48 * Multiple-page buffer array; the pfn array is variable size: 49 * The number of entries in the PFN array is determined by 50 * "len" and "offset". 51 */ 52 struct hv_mpb_array { 53 /* Length and Offset determines the # of pfns in the array */ 54 u32 len; 55 u32 offset; 56 u64 pfn_array[]; 57 }; 58 59 /* 0x18 includes the proprietary packet header */ 60 #define MAX_PAGE_BUFFER_PACKET (0x18 + \ 61 (sizeof(struct hv_page_buffer) * \ 62 MAX_PAGE_BUFFER_COUNT)) 63 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \ 64 sizeof(struct hv_multipage_buffer)) 65 66 67 #pragma pack(pop) 68 69 struct hv_ring_buffer { 70 /* Offset in bytes from the start of ring data below */ 71 u32 write_index; 72 73 /* Offset in bytes from the start of ring data below */ 74 u32 read_index; 75 76 u32 interrupt_mask; 77 78 /* 79 * WS2012/Win8 and later versions of Hyper-V implement interrupt 80 * driven flow management. The feature bit feat_pending_send_sz 81 * is set by the host on the host->guest ring buffer, and by the 82 * guest on the guest->host ring buffer. 83 * 84 * The meaning of the feature bit is a bit complex in that it has 85 * semantics that apply to both ring buffers. If the guest sets 86 * the feature bit in the guest->host ring buffer, the guest is 87 * telling the host that: 88 * 1) It will set the pending_send_sz field in the guest->host ring 89 * buffer when it is waiting for space to become available, and 90 * 2) It will read the pending_send_sz field in the host->guest 91 * ring buffer and interrupt the host when it frees enough space 92 * 93 * Similarly, if the host sets the feature bit in the host->guest 94 * ring buffer, the host is telling the guest that: 95 * 1) It will set the pending_send_sz field in the host->guest ring 96 * buffer when it is waiting for space to become available, and 97 * 2) It will read the pending_send_sz field in the guest->host 98 * ring buffer and interrupt the guest when it frees enough space 99 * 100 * If either the guest or host does not set the feature bit that it 101 * owns, that guest or host must do polling if it encounters a full 102 * ring buffer, and not signal the other end with an interrupt. 103 */ 104 u32 pending_send_sz; 105 u32 reserved1[12]; 106 union { 107 struct { 108 u32 feat_pending_send_sz:1; 109 }; 110 u32 value; 111 } feature_bits; 112 113 /* Pad it to PAGE_SIZE so that data starts on page boundary */ 114 u8 reserved2[4028]; 115 116 /* 117 * Ring data starts here + RingDataStartOffset 118 * !!! DO NOT place any fields below this !!! 119 */ 120 u8 buffer[0]; 121 } __packed; 122 123 struct hv_ring_buffer_info { 124 struct hv_ring_buffer *ring_buffer; 125 u32 ring_size; /* Include the shared header */ 126 struct reciprocal_value ring_size_div10_reciprocal; 127 spinlock_t ring_lock; 128 129 u32 ring_datasize; /* < ring_size */ 130 u32 priv_read_index; 131 /* 132 * The ring buffer mutex lock. This lock prevents the ring buffer from 133 * being freed while the ring buffer is being accessed. 134 */ 135 struct mutex ring_buffer_mutex; 136 }; 137 138 139 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) 140 { 141 u32 read_loc, write_loc, dsize, read; 142 143 dsize = rbi->ring_datasize; 144 read_loc = rbi->ring_buffer->read_index; 145 write_loc = READ_ONCE(rbi->ring_buffer->write_index); 146 147 read = write_loc >= read_loc ? (write_loc - read_loc) : 148 (dsize - read_loc) + write_loc; 149 150 return read; 151 } 152 153 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi) 154 { 155 u32 read_loc, write_loc, dsize, write; 156 157 dsize = rbi->ring_datasize; 158 read_loc = READ_ONCE(rbi->ring_buffer->read_index); 159 write_loc = rbi->ring_buffer->write_index; 160 161 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : 162 read_loc - write_loc; 163 return write; 164 } 165 166 static inline u32 hv_get_avail_to_write_percent( 167 const struct hv_ring_buffer_info *rbi) 168 { 169 u32 avail_write = hv_get_bytes_to_write(rbi); 170 171 return reciprocal_divide( 172 (avail_write << 3) + (avail_write << 1), 173 rbi->ring_size_div10_reciprocal); 174 } 175 176 /* 177 * VMBUS version is 32 bit entity broken up into 178 * two 16 bit quantities: major_number. minor_number. 179 * 180 * 0 . 13 (Windows Server 2008) 181 * 1 . 1 (Windows 7) 182 * 2 . 4 (Windows 8) 183 * 3 . 0 (Windows 8 R2) 184 * 4 . 0 (Windows 10) 185 * 4 . 1 (Windows 10 RS3) 186 * 5 . 0 (Newer Windows 10) 187 * 5 . 1 (Windows 10 RS4) 188 * 5 . 2 (Windows Server 2019, RS5) 189 */ 190 191 #define VERSION_WS2008 ((0 << 16) | (13)) 192 #define VERSION_WIN7 ((1 << 16) | (1)) 193 #define VERSION_WIN8 ((2 << 16) | (4)) 194 #define VERSION_WIN8_1 ((3 << 16) | (0)) 195 #define VERSION_WIN10 ((4 << 16) | (0)) 196 #define VERSION_WIN10_V4_1 ((4 << 16) | (1)) 197 #define VERSION_WIN10_V5 ((5 << 16) | (0)) 198 #define VERSION_WIN10_V5_1 ((5 << 16) | (1)) 199 #define VERSION_WIN10_V5_2 ((5 << 16) | (2)) 200 201 /* Make maximum size of pipe payload of 16K */ 202 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384) 203 204 /* Define PipeMode values. */ 205 #define VMBUS_PIPE_TYPE_BYTE 0x00000000 206 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004 207 208 /* The size of the user defined data buffer for non-pipe offers. */ 209 #define MAX_USER_DEFINED_BYTES 120 210 211 /* The size of the user defined data buffer for pipe offers. */ 212 #define MAX_PIPE_USER_DEFINED_BYTES 116 213 214 /* 215 * At the center of the Channel Management library is the Channel Offer. This 216 * struct contains the fundamental information about an offer. 217 */ 218 struct vmbus_channel_offer { 219 guid_t if_type; 220 guid_t if_instance; 221 222 /* 223 * These two fields are not currently used. 224 */ 225 u64 reserved1; 226 u64 reserved2; 227 228 u16 chn_flags; 229 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */ 230 231 union { 232 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */ 233 struct { 234 unsigned char user_def[MAX_USER_DEFINED_BYTES]; 235 } std; 236 237 /* 238 * Pipes: 239 * The following sructure is an integrated pipe protocol, which 240 * is implemented on top of standard user-defined data. Pipe 241 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own 242 * use. 243 */ 244 struct { 245 u32 pipe_mode; 246 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES]; 247 } pipe; 248 } u; 249 /* 250 * The sub_channel_index is defined in Win8: a value of zero means a 251 * primary channel and a value of non-zero means a sub-channel. 252 * 253 * Before Win8, the field is reserved, meaning it's always zero. 254 */ 255 u16 sub_channel_index; 256 u16 reserved3; 257 } __packed; 258 259 /* Server Flags */ 260 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1 261 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2 262 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4 263 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10 264 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100 265 #define VMBUS_CHANNEL_PARENT_OFFER 0x200 266 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400 267 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000 268 269 struct vmpacket_descriptor { 270 u16 type; 271 u16 offset8; 272 u16 len8; 273 u16 flags; 274 u64 trans_id; 275 } __packed; 276 277 struct vmpacket_header { 278 u32 prev_pkt_start_offset; 279 struct vmpacket_descriptor descriptor; 280 } __packed; 281 282 struct vmtransfer_page_range { 283 u32 byte_count; 284 u32 byte_offset; 285 } __packed; 286 287 struct vmtransfer_page_packet_header { 288 struct vmpacket_descriptor d; 289 u16 xfer_pageset_id; 290 u8 sender_owns_set; 291 u8 reserved; 292 u32 range_cnt; 293 struct vmtransfer_page_range ranges[1]; 294 } __packed; 295 296 struct vmgpadl_packet_header { 297 struct vmpacket_descriptor d; 298 u32 gpadl; 299 u32 reserved; 300 } __packed; 301 302 struct vmadd_remove_transfer_page_set { 303 struct vmpacket_descriptor d; 304 u32 gpadl; 305 u16 xfer_pageset_id; 306 u16 reserved; 307 } __packed; 308 309 /* 310 * This structure defines a range in guest physical space that can be made to 311 * look virtually contiguous. 312 */ 313 struct gpa_range { 314 u32 byte_count; 315 u32 byte_offset; 316 u64 pfn_array[0]; 317 }; 318 319 /* 320 * This is the format for an Establish Gpadl packet, which contains a handle by 321 * which this GPADL will be known and a set of GPA ranges associated with it. 322 * This can be converted to a MDL by the guest OS. If there are multiple GPA 323 * ranges, then the resulting MDL will be "chained," representing multiple VA 324 * ranges. 325 */ 326 struct vmestablish_gpadl { 327 struct vmpacket_descriptor d; 328 u32 gpadl; 329 u32 range_cnt; 330 struct gpa_range range[1]; 331 } __packed; 332 333 /* 334 * This is the format for a Teardown Gpadl packet, which indicates that the 335 * GPADL handle in the Establish Gpadl packet will never be referenced again. 336 */ 337 struct vmteardown_gpadl { 338 struct vmpacket_descriptor d; 339 u32 gpadl; 340 u32 reserved; /* for alignment to a 8-byte boundary */ 341 } __packed; 342 343 /* 344 * This is the format for a GPA-Direct packet, which contains a set of GPA 345 * ranges, in addition to commands and/or data. 346 */ 347 struct vmdata_gpa_direct { 348 struct vmpacket_descriptor d; 349 u32 reserved; 350 u32 range_cnt; 351 struct gpa_range range[1]; 352 } __packed; 353 354 /* This is the format for a Additional Data Packet. */ 355 struct vmadditional_data { 356 struct vmpacket_descriptor d; 357 u64 total_bytes; 358 u32 offset; 359 u32 byte_cnt; 360 unsigned char data[1]; 361 } __packed; 362 363 union vmpacket_largest_possible_header { 364 struct vmpacket_descriptor simple_hdr; 365 struct vmtransfer_page_packet_header xfer_page_hdr; 366 struct vmgpadl_packet_header gpadl_hdr; 367 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr; 368 struct vmestablish_gpadl establish_gpadl_hdr; 369 struct vmteardown_gpadl teardown_gpadl_hdr; 370 struct vmdata_gpa_direct data_gpa_direct_hdr; 371 }; 372 373 #define VMPACKET_DATA_START_ADDRESS(__packet) \ 374 (void *)(((unsigned char *)__packet) + \ 375 ((struct vmpacket_descriptor)__packet)->offset8 * 8) 376 377 #define VMPACKET_DATA_LENGTH(__packet) \ 378 ((((struct vmpacket_descriptor)__packet)->len8 - \ 379 ((struct vmpacket_descriptor)__packet)->offset8) * 8) 380 381 #define VMPACKET_TRANSFER_MODE(__packet) \ 382 (((struct IMPACT)__packet)->type) 383 384 enum vmbus_packet_type { 385 VM_PKT_INVALID = 0x0, 386 VM_PKT_SYNCH = 0x1, 387 VM_PKT_ADD_XFER_PAGESET = 0x2, 388 VM_PKT_RM_XFER_PAGESET = 0x3, 389 VM_PKT_ESTABLISH_GPADL = 0x4, 390 VM_PKT_TEARDOWN_GPADL = 0x5, 391 VM_PKT_DATA_INBAND = 0x6, 392 VM_PKT_DATA_USING_XFER_PAGES = 0x7, 393 VM_PKT_DATA_USING_GPADL = 0x8, 394 VM_PKT_DATA_USING_GPA_DIRECT = 0x9, 395 VM_PKT_CANCEL_REQUEST = 0xa, 396 VM_PKT_COMP = 0xb, 397 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc, 398 VM_PKT_ADDITIONAL_DATA = 0xd 399 }; 400 401 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1 402 403 404 /* Version 1 messages */ 405 enum vmbus_channel_message_type { 406 CHANNELMSG_INVALID = 0, 407 CHANNELMSG_OFFERCHANNEL = 1, 408 CHANNELMSG_RESCIND_CHANNELOFFER = 2, 409 CHANNELMSG_REQUESTOFFERS = 3, 410 CHANNELMSG_ALLOFFERS_DELIVERED = 4, 411 CHANNELMSG_OPENCHANNEL = 5, 412 CHANNELMSG_OPENCHANNEL_RESULT = 6, 413 CHANNELMSG_CLOSECHANNEL = 7, 414 CHANNELMSG_GPADL_HEADER = 8, 415 CHANNELMSG_GPADL_BODY = 9, 416 CHANNELMSG_GPADL_CREATED = 10, 417 CHANNELMSG_GPADL_TEARDOWN = 11, 418 CHANNELMSG_GPADL_TORNDOWN = 12, 419 CHANNELMSG_RELID_RELEASED = 13, 420 CHANNELMSG_INITIATE_CONTACT = 14, 421 CHANNELMSG_VERSION_RESPONSE = 15, 422 CHANNELMSG_UNLOAD = 16, 423 CHANNELMSG_UNLOAD_RESPONSE = 17, 424 CHANNELMSG_18 = 18, 425 CHANNELMSG_19 = 19, 426 CHANNELMSG_20 = 20, 427 CHANNELMSG_TL_CONNECT_REQUEST = 21, 428 CHANNELMSG_COUNT 429 }; 430 431 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */ 432 #define INVALID_RELID U32_MAX 433 434 struct vmbus_channel_message_header { 435 enum vmbus_channel_message_type msgtype; 436 u32 padding; 437 } __packed; 438 439 /* Query VMBus Version parameters */ 440 struct vmbus_channel_query_vmbus_version { 441 struct vmbus_channel_message_header header; 442 u32 version; 443 } __packed; 444 445 /* VMBus Version Supported parameters */ 446 struct vmbus_channel_version_supported { 447 struct vmbus_channel_message_header header; 448 u8 version_supported; 449 } __packed; 450 451 /* Offer Channel parameters */ 452 struct vmbus_channel_offer_channel { 453 struct vmbus_channel_message_header header; 454 struct vmbus_channel_offer offer; 455 u32 child_relid; 456 u8 monitorid; 457 /* 458 * win7 and beyond splits this field into a bit field. 459 */ 460 u8 monitor_allocated:1; 461 u8 reserved:7; 462 /* 463 * These are new fields added in win7 and later. 464 * Do not access these fields without checking the 465 * negotiated protocol. 466 * 467 * If "is_dedicated_interrupt" is set, we must not set the 468 * associated bit in the channel bitmap while sending the 469 * interrupt to the host. 470 * 471 * connection_id is to be used in signaling the host. 472 */ 473 u16 is_dedicated_interrupt:1; 474 u16 reserved1:15; 475 u32 connection_id; 476 } __packed; 477 478 /* Rescind Offer parameters */ 479 struct vmbus_channel_rescind_offer { 480 struct vmbus_channel_message_header header; 481 u32 child_relid; 482 } __packed; 483 484 static inline u32 485 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi) 486 { 487 return rbi->ring_buffer->pending_send_sz; 488 } 489 490 /* 491 * Request Offer -- no parameters, SynIC message contains the partition ID 492 * Set Snoop -- no parameters, SynIC message contains the partition ID 493 * Clear Snoop -- no parameters, SynIC message contains the partition ID 494 * All Offers Delivered -- no parameters, SynIC message contains the partition 495 * ID 496 * Flush Client -- no parameters, SynIC message contains the partition ID 497 */ 498 499 /* Open Channel parameters */ 500 struct vmbus_channel_open_channel { 501 struct vmbus_channel_message_header header; 502 503 /* Identifies the specific VMBus channel that is being opened. */ 504 u32 child_relid; 505 506 /* ID making a particular open request at a channel offer unique. */ 507 u32 openid; 508 509 /* GPADL for the channel's ring buffer. */ 510 u32 ringbuffer_gpadlhandle; 511 512 /* 513 * Starting with win8, this field will be used to specify 514 * the target virtual processor on which to deliver the interrupt for 515 * the host to guest communication. 516 * Prior to win8, incoming channel interrupts would only 517 * be delivered on cpu 0. Setting this value to 0 would 518 * preserve the earlier behavior. 519 */ 520 u32 target_vp; 521 522 /* 523 * The upstream ring buffer begins at offset zero in the memory 524 * described by RingBufferGpadlHandle. The downstream ring buffer 525 * follows it at this offset (in pages). 526 */ 527 u32 downstream_ringbuffer_pageoffset; 528 529 /* User-specific data to be passed along to the server endpoint. */ 530 unsigned char userdata[MAX_USER_DEFINED_BYTES]; 531 } __packed; 532 533 /* Open Channel Result parameters */ 534 struct vmbus_channel_open_result { 535 struct vmbus_channel_message_header header; 536 u32 child_relid; 537 u32 openid; 538 u32 status; 539 } __packed; 540 541 /* Close channel parameters; */ 542 struct vmbus_channel_close_channel { 543 struct vmbus_channel_message_header header; 544 u32 child_relid; 545 } __packed; 546 547 /* Channel Message GPADL */ 548 #define GPADL_TYPE_RING_BUFFER 1 549 #define GPADL_TYPE_SERVER_SAVE_AREA 2 550 #define GPADL_TYPE_TRANSACTION 8 551 552 /* 553 * The number of PFNs in a GPADL message is defined by the number of 554 * pages that would be spanned by ByteCount and ByteOffset. If the 555 * implied number of PFNs won't fit in this packet, there will be a 556 * follow-up packet that contains more. 557 */ 558 struct vmbus_channel_gpadl_header { 559 struct vmbus_channel_message_header header; 560 u32 child_relid; 561 u32 gpadl; 562 u16 range_buflen; 563 u16 rangecount; 564 struct gpa_range range[0]; 565 } __packed; 566 567 /* This is the followup packet that contains more PFNs. */ 568 struct vmbus_channel_gpadl_body { 569 struct vmbus_channel_message_header header; 570 u32 msgnumber; 571 u32 gpadl; 572 u64 pfn[0]; 573 } __packed; 574 575 struct vmbus_channel_gpadl_created { 576 struct vmbus_channel_message_header header; 577 u32 child_relid; 578 u32 gpadl; 579 u32 creation_status; 580 } __packed; 581 582 struct vmbus_channel_gpadl_teardown { 583 struct vmbus_channel_message_header header; 584 u32 child_relid; 585 u32 gpadl; 586 } __packed; 587 588 struct vmbus_channel_gpadl_torndown { 589 struct vmbus_channel_message_header header; 590 u32 gpadl; 591 } __packed; 592 593 struct vmbus_channel_relid_released { 594 struct vmbus_channel_message_header header; 595 u32 child_relid; 596 } __packed; 597 598 struct vmbus_channel_initiate_contact { 599 struct vmbus_channel_message_header header; 600 u32 vmbus_version_requested; 601 u32 target_vcpu; /* The VCPU the host should respond to */ 602 union { 603 u64 interrupt_page; 604 struct { 605 u8 msg_sint; 606 u8 padding1[3]; 607 u32 padding2; 608 }; 609 }; 610 u64 monitor_page1; 611 u64 monitor_page2; 612 } __packed; 613 614 /* Hyper-V socket: guest's connect()-ing to host */ 615 struct vmbus_channel_tl_connect_request { 616 struct vmbus_channel_message_header header; 617 guid_t guest_endpoint_id; 618 guid_t host_service_id; 619 } __packed; 620 621 struct vmbus_channel_version_response { 622 struct vmbus_channel_message_header header; 623 u8 version_supported; 624 625 u8 connection_state; 626 u16 padding; 627 628 /* 629 * On new hosts that support VMBus protocol 5.0, we must use 630 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message, 631 * and for subsequent messages, we must use the Message Connection ID 632 * field in the host-returned Version Response Message. 633 * 634 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1). 635 */ 636 u32 msg_conn_id; 637 } __packed; 638 639 enum vmbus_channel_state { 640 CHANNEL_OFFER_STATE, 641 CHANNEL_OPENING_STATE, 642 CHANNEL_OPEN_STATE, 643 CHANNEL_OPENED_STATE, 644 }; 645 646 /* 647 * Represents each channel msg on the vmbus connection This is a 648 * variable-size data structure depending on the msg type itself 649 */ 650 struct vmbus_channel_msginfo { 651 /* Bookkeeping stuff */ 652 struct list_head msglistentry; 653 654 /* So far, this is only used to handle gpadl body message */ 655 struct list_head submsglist; 656 657 /* Synchronize the request/response if needed */ 658 struct completion waitevent; 659 struct vmbus_channel *waiting_channel; 660 union { 661 struct vmbus_channel_version_supported version_supported; 662 struct vmbus_channel_open_result open_result; 663 struct vmbus_channel_gpadl_torndown gpadl_torndown; 664 struct vmbus_channel_gpadl_created gpadl_created; 665 struct vmbus_channel_version_response version_response; 666 } response; 667 668 u32 msgsize; 669 /* 670 * The channel message that goes out on the "wire". 671 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header 672 */ 673 unsigned char msg[0]; 674 }; 675 676 struct vmbus_close_msg { 677 struct vmbus_channel_msginfo info; 678 struct vmbus_channel_close_channel msg; 679 }; 680 681 /* Define connection identifier type. */ 682 union hv_connection_id { 683 u32 asu32; 684 struct { 685 u32 id:24; 686 u32 reserved:8; 687 } u; 688 }; 689 690 enum hv_numa_policy { 691 HV_BALANCED = 0, 692 HV_LOCALIZED, 693 }; 694 695 enum vmbus_device_type { 696 HV_IDE = 0, 697 HV_SCSI, 698 HV_FC, 699 HV_NIC, 700 HV_ND, 701 HV_PCIE, 702 HV_FB, 703 HV_KBD, 704 HV_MOUSE, 705 HV_KVP, 706 HV_TS, 707 HV_HB, 708 HV_SHUTDOWN, 709 HV_FCOPY, 710 HV_BACKUP, 711 HV_DM, 712 HV_UNKNOWN, 713 }; 714 715 struct vmbus_device { 716 u16 dev_type; 717 guid_t guid; 718 bool perf_device; 719 }; 720 721 struct vmbus_channel { 722 struct list_head listentry; 723 724 struct hv_device *device_obj; 725 726 enum vmbus_channel_state state; 727 728 struct vmbus_channel_offer_channel offermsg; 729 /* 730 * These are based on the OfferMsg.MonitorId. 731 * Save it here for easy access. 732 */ 733 u8 monitor_grp; 734 u8 monitor_bit; 735 736 bool rescind; /* got rescind msg */ 737 struct completion rescind_event; 738 739 u32 ringbuffer_gpadlhandle; 740 741 /* Allocated memory for ring buffer */ 742 struct page *ringbuffer_page; 743 u32 ringbuffer_pagecount; 744 u32 ringbuffer_send_offset; 745 struct hv_ring_buffer_info outbound; /* send to parent */ 746 struct hv_ring_buffer_info inbound; /* receive from parent */ 747 748 struct vmbus_close_msg close_msg; 749 750 /* Statistics */ 751 u64 interrupts; /* Host to Guest interrupts */ 752 u64 sig_events; /* Guest to Host events */ 753 754 /* 755 * Guest to host interrupts caused by the outbound ring buffer changing 756 * from empty to not empty. 757 */ 758 u64 intr_out_empty; 759 760 /* 761 * Indicates that a full outbound ring buffer was encountered. The flag 762 * is set to true when a full outbound ring buffer is encountered and 763 * set to false when a write to the outbound ring buffer is completed. 764 */ 765 bool out_full_flag; 766 767 /* Channel callback's invoked in softirq context */ 768 struct tasklet_struct callback_event; 769 void (*onchannel_callback)(void *context); 770 void *channel_callback_context; 771 772 /* 773 * A channel can be marked for one of three modes of reading: 774 * BATCHED - callback called from taslket and should read 775 * channel until empty. Interrupts from the host 776 * are masked while read is in process (default). 777 * DIRECT - callback called from tasklet (softirq). 778 * ISR - callback called in interrupt context and must 779 * invoke its own deferred processing. 780 * Host interrupts are disabled and must be re-enabled 781 * when ring is empty. 782 */ 783 enum hv_callback_mode { 784 HV_CALL_BATCHED, 785 HV_CALL_DIRECT, 786 HV_CALL_ISR 787 } callback_mode; 788 789 bool is_dedicated_interrupt; 790 u64 sig_event; 791 792 /* 793 * Starting with win8, this field will be used to specify 794 * the target virtual processor on which to deliver the interrupt for 795 * the host to guest communication. 796 * Prior to win8, incoming channel interrupts would only 797 * be delivered on cpu 0. Setting this value to 0 would 798 * preserve the earlier behavior. 799 */ 800 u32 target_vp; 801 /* The corresponding CPUID in the guest */ 802 u32 target_cpu; 803 /* 804 * State to manage the CPU affiliation of channels. 805 */ 806 struct cpumask alloced_cpus_in_node; 807 int numa_node; 808 /* 809 * Support for sub-channels. For high performance devices, 810 * it will be useful to have multiple sub-channels to support 811 * a scalable communication infrastructure with the host. 812 * The support for sub-channels is implemented as an extention 813 * to the current infrastructure. 814 * The initial offer is considered the primary channel and this 815 * offer message will indicate if the host supports sub-channels. 816 * The guest is free to ask for sub-channels to be offerred and can 817 * open these sub-channels as a normal "primary" channel. However, 818 * all sub-channels will have the same type and instance guids as the 819 * primary channel. Requests sent on a given channel will result in a 820 * response on the same channel. 821 */ 822 823 /* 824 * Sub-channel creation callback. This callback will be called in 825 * process context when a sub-channel offer is received from the host. 826 * The guest can open the sub-channel in the context of this callback. 827 */ 828 void (*sc_creation_callback)(struct vmbus_channel *new_sc); 829 830 /* 831 * Channel rescind callback. Some channels (the hvsock ones), need to 832 * register a callback which is invoked in vmbus_onoffer_rescind(). 833 */ 834 void (*chn_rescind_callback)(struct vmbus_channel *channel); 835 836 /* 837 * The spinlock to protect the structure. It is being used to protect 838 * test-and-set access to various attributes of the structure as well 839 * as all sc_list operations. 840 */ 841 spinlock_t lock; 842 /* 843 * All Sub-channels of a primary channel are linked here. 844 */ 845 struct list_head sc_list; 846 /* 847 * The primary channel this sub-channel belongs to. 848 * This will be NULL for the primary channel. 849 */ 850 struct vmbus_channel *primary_channel; 851 /* 852 * Support per-channel state for use by vmbus drivers. 853 */ 854 void *per_channel_state; 855 /* 856 * To support per-cpu lookup mapping of relid to channel, 857 * link up channels based on their CPU affinity. 858 */ 859 struct list_head percpu_list; 860 861 /* 862 * Defer freeing channel until after all cpu's have 863 * gone through grace period. 864 */ 865 struct rcu_head rcu; 866 867 /* 868 * For sysfs per-channel properties. 869 */ 870 struct kobject kobj; 871 872 /* 873 * For performance critical channels (storage, networking 874 * etc,), Hyper-V has a mechanism to enhance the throughput 875 * at the expense of latency: 876 * When the host is to be signaled, we just set a bit in a shared page 877 * and this bit will be inspected by the hypervisor within a certain 878 * window and if the bit is set, the host will be signaled. The window 879 * of time is the monitor latency - currently around 100 usecs. This 880 * mechanism improves throughput by: 881 * 882 * A) Making the host more efficient - each time it wakes up, 883 * potentially it will process morev number of packets. The 884 * monitor latency allows a batch to build up. 885 * B) By deferring the hypercall to signal, we will also minimize 886 * the interrupts. 887 * 888 * Clearly, these optimizations improve throughput at the expense of 889 * latency. Furthermore, since the channel is shared for both 890 * control and data messages, control messages currently suffer 891 * unnecessary latency adversley impacting performance and boot 892 * time. To fix this issue, permit tagging the channel as being 893 * in "low latency" mode. In this mode, we will bypass the monitor 894 * mechanism. 895 */ 896 bool low_latency; 897 898 /* 899 * NUMA distribution policy: 900 * We support two policies: 901 * 1) Balanced: Here all performance critical channels are 902 * distributed evenly amongst all the NUMA nodes. 903 * This policy will be the default policy. 904 * 2) Localized: All channels of a given instance of a 905 * performance critical service will be assigned CPUs 906 * within a selected NUMA node. 907 */ 908 enum hv_numa_policy affinity_policy; 909 910 bool probe_done; 911 912 /* 913 * We must offload the handling of the primary/sub channels 914 * from the single-threaded vmbus_connection.work_queue to 915 * two different workqueue, otherwise we can block 916 * vmbus_connection.work_queue and hang: see vmbus_process_offer(). 917 */ 918 struct work_struct add_channel_work; 919 920 /* 921 * Guest to host interrupts caused by the inbound ring buffer changing 922 * from full to not full while a packet is waiting. 923 */ 924 u64 intr_in_full; 925 926 /* 927 * The total number of write operations that encountered a full 928 * outbound ring buffer. 929 */ 930 u64 out_full_total; 931 932 /* 933 * The number of write operations that were the first to encounter a 934 * full outbound ring buffer. 935 */ 936 u64 out_full_first; 937 938 /* enabling/disabling fuzz testing on the channel (default is false)*/ 939 bool fuzz_testing_state; 940 941 /* 942 * Interrupt delay will delay the guest from emptying the ring buffer 943 * for a specific amount of time. The delay is in microseconds and will 944 * be between 1 to a maximum of 1000, its default is 0 (no delay). 945 * The Message delay will delay guest reading on a per message basis 946 * in microseconds between 1 to 1000 with the default being 0 947 * (no delay). 948 */ 949 u32 fuzz_testing_interrupt_delay; 950 u32 fuzz_testing_message_delay; 951 952 }; 953 954 static inline bool is_hvsock_channel(const struct vmbus_channel *c) 955 { 956 return !!(c->offermsg.offer.chn_flags & 957 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 958 } 959 960 static inline bool is_sub_channel(const struct vmbus_channel *c) 961 { 962 return c->offermsg.offer.sub_channel_index != 0; 963 } 964 965 static inline void set_channel_affinity_state(struct vmbus_channel *c, 966 enum hv_numa_policy policy) 967 { 968 c->affinity_policy = policy; 969 } 970 971 static inline void set_channel_read_mode(struct vmbus_channel *c, 972 enum hv_callback_mode mode) 973 { 974 c->callback_mode = mode; 975 } 976 977 static inline void set_per_channel_state(struct vmbus_channel *c, void *s) 978 { 979 c->per_channel_state = s; 980 } 981 982 static inline void *get_per_channel_state(struct vmbus_channel *c) 983 { 984 return c->per_channel_state; 985 } 986 987 static inline void set_channel_pending_send_size(struct vmbus_channel *c, 988 u32 size) 989 { 990 unsigned long flags; 991 992 if (size) { 993 spin_lock_irqsave(&c->outbound.ring_lock, flags); 994 ++c->out_full_total; 995 996 if (!c->out_full_flag) { 997 ++c->out_full_first; 998 c->out_full_flag = true; 999 } 1000 spin_unlock_irqrestore(&c->outbound.ring_lock, flags); 1001 } else { 1002 c->out_full_flag = false; 1003 } 1004 1005 c->outbound.ring_buffer->pending_send_sz = size; 1006 } 1007 1008 static inline void set_low_latency_mode(struct vmbus_channel *c) 1009 { 1010 c->low_latency = true; 1011 } 1012 1013 static inline void clear_low_latency_mode(struct vmbus_channel *c) 1014 { 1015 c->low_latency = false; 1016 } 1017 1018 void vmbus_onmessage(void *context); 1019 1020 int vmbus_request_offers(void); 1021 1022 /* 1023 * APIs for managing sub-channels. 1024 */ 1025 1026 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, 1027 void (*sc_cr_cb)(struct vmbus_channel *new_sc)); 1028 1029 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, 1030 void (*chn_rescind_cb)(struct vmbus_channel *)); 1031 1032 /* 1033 * Check if sub-channels have already been offerred. This API will be useful 1034 * when the driver is unloaded after establishing sub-channels. In this case, 1035 * when the driver is re-loaded, the driver would have to check if the 1036 * subchannels have already been established before attempting to request 1037 * the creation of sub-channels. 1038 * This function returns TRUE to indicate that subchannels have already been 1039 * created. 1040 * This function should be invoked after setting the callback function for 1041 * sub-channel creation. 1042 */ 1043 bool vmbus_are_subchannels_present(struct vmbus_channel *primary); 1044 1045 /* The format must be the same as struct vmdata_gpa_direct */ 1046 struct vmbus_channel_packet_page_buffer { 1047 u16 type; 1048 u16 dataoffset8; 1049 u16 length8; 1050 u16 flags; 1051 u64 transactionid; 1052 u32 reserved; 1053 u32 rangecount; 1054 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT]; 1055 } __packed; 1056 1057 /* The format must be the same as struct vmdata_gpa_direct */ 1058 struct vmbus_channel_packet_multipage_buffer { 1059 u16 type; 1060 u16 dataoffset8; 1061 u16 length8; 1062 u16 flags; 1063 u64 transactionid; 1064 u32 reserved; 1065 u32 rangecount; /* Always 1 in this case */ 1066 struct hv_multipage_buffer range; 1067 } __packed; 1068 1069 /* The format must be the same as struct vmdata_gpa_direct */ 1070 struct vmbus_packet_mpb_array { 1071 u16 type; 1072 u16 dataoffset8; 1073 u16 length8; 1074 u16 flags; 1075 u64 transactionid; 1076 u32 reserved; 1077 u32 rangecount; /* Always 1 in this case */ 1078 struct hv_mpb_array range; 1079 } __packed; 1080 1081 int vmbus_alloc_ring(struct vmbus_channel *channel, 1082 u32 send_size, u32 recv_size); 1083 void vmbus_free_ring(struct vmbus_channel *channel); 1084 1085 int vmbus_connect_ring(struct vmbus_channel *channel, 1086 void (*onchannel_callback)(void *context), 1087 void *context); 1088 int vmbus_disconnect_ring(struct vmbus_channel *channel); 1089 1090 extern int vmbus_open(struct vmbus_channel *channel, 1091 u32 send_ringbuffersize, 1092 u32 recv_ringbuffersize, 1093 void *userdata, 1094 u32 userdatalen, 1095 void (*onchannel_callback)(void *context), 1096 void *context); 1097 1098 extern void vmbus_close(struct vmbus_channel *channel); 1099 1100 extern int vmbus_sendpacket(struct vmbus_channel *channel, 1101 void *buffer, 1102 u32 bufferLen, 1103 u64 requestid, 1104 enum vmbus_packet_type type, 1105 u32 flags); 1106 1107 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, 1108 struct hv_page_buffer pagebuffers[], 1109 u32 pagecount, 1110 void *buffer, 1111 u32 bufferlen, 1112 u64 requestid); 1113 1114 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, 1115 struct vmbus_packet_mpb_array *mpb, 1116 u32 desc_size, 1117 void *buffer, 1118 u32 bufferlen, 1119 u64 requestid); 1120 1121 extern int vmbus_establish_gpadl(struct vmbus_channel *channel, 1122 void *kbuffer, 1123 u32 size, 1124 u32 *gpadl_handle); 1125 1126 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, 1127 u32 gpadl_handle); 1128 1129 void vmbus_reset_channel_cb(struct vmbus_channel *channel); 1130 1131 extern int vmbus_recvpacket(struct vmbus_channel *channel, 1132 void *buffer, 1133 u32 bufferlen, 1134 u32 *buffer_actual_len, 1135 u64 *requestid); 1136 1137 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel, 1138 void *buffer, 1139 u32 bufferlen, 1140 u32 *buffer_actual_len, 1141 u64 *requestid); 1142 1143 1144 extern void vmbus_ontimer(unsigned long data); 1145 1146 /* Base driver object */ 1147 struct hv_driver { 1148 const char *name; 1149 1150 /* 1151 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 1152 * channel flag, actually doesn't mean a synthetic device because the 1153 * offer's if_type/if_instance can change for every new hvsock 1154 * connection. 1155 * 1156 * However, to facilitate the notification of new-offer/rescind-offer 1157 * from vmbus driver to hvsock driver, we can handle hvsock offer as 1158 * a special vmbus device, and hence we need the below flag to 1159 * indicate if the driver is the hvsock driver or not: we need to 1160 * specially treat the hvosck offer & driver in vmbus_match(). 1161 */ 1162 bool hvsock; 1163 1164 /* the device type supported by this driver */ 1165 guid_t dev_type; 1166 const struct hv_vmbus_device_id *id_table; 1167 1168 struct device_driver driver; 1169 1170 /* dynamic device GUID's */ 1171 struct { 1172 spinlock_t lock; 1173 struct list_head list; 1174 } dynids; 1175 1176 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *); 1177 int (*remove)(struct hv_device *); 1178 void (*shutdown)(struct hv_device *); 1179 1180 int (*suspend)(struct hv_device *); 1181 int (*resume)(struct hv_device *); 1182 1183 }; 1184 1185 /* Base device object */ 1186 struct hv_device { 1187 /* the device type id of this device */ 1188 guid_t dev_type; 1189 1190 /* the device instance id of this device */ 1191 guid_t dev_instance; 1192 u16 vendor_id; 1193 u16 device_id; 1194 1195 struct device device; 1196 char *driver_override; /* Driver name to force a match */ 1197 1198 struct vmbus_channel *channel; 1199 struct kset *channels_kset; 1200 1201 /* place holder to keep track of the dir for hv device in debugfs */ 1202 struct dentry *debug_dir; 1203 1204 }; 1205 1206 1207 static inline struct hv_device *device_to_hv_device(struct device *d) 1208 { 1209 return container_of(d, struct hv_device, device); 1210 } 1211 1212 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d) 1213 { 1214 return container_of(d, struct hv_driver, driver); 1215 } 1216 1217 static inline void hv_set_drvdata(struct hv_device *dev, void *data) 1218 { 1219 dev_set_drvdata(&dev->device, data); 1220 } 1221 1222 static inline void *hv_get_drvdata(struct hv_device *dev) 1223 { 1224 return dev_get_drvdata(&dev->device); 1225 } 1226 1227 struct hv_ring_buffer_debug_info { 1228 u32 current_interrupt_mask; 1229 u32 current_read_index; 1230 u32 current_write_index; 1231 u32 bytes_avail_toread; 1232 u32 bytes_avail_towrite; 1233 }; 1234 1235 1236 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 1237 struct hv_ring_buffer_debug_info *debug_info); 1238 1239 /* Vmbus interface */ 1240 #define vmbus_driver_register(driver) \ 1241 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) 1242 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver, 1243 struct module *owner, 1244 const char *mod_name); 1245 void vmbus_driver_unregister(struct hv_driver *hv_driver); 1246 1247 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel); 1248 1249 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, 1250 resource_size_t min, resource_size_t max, 1251 resource_size_t size, resource_size_t align, 1252 bool fb_overlap_ok); 1253 void vmbus_free_mmio(resource_size_t start, resource_size_t size); 1254 1255 /* 1256 * GUID definitions of various offer types - services offered to the guest. 1257 */ 1258 1259 /* 1260 * Network GUID 1261 * {f8615163-df3e-46c5-913f-f2d2f965ed0e} 1262 */ 1263 #define HV_NIC_GUID \ 1264 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ 1265 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) 1266 1267 /* 1268 * IDE GUID 1269 * {32412632-86cb-44a2-9b5c-50d1417354f5} 1270 */ 1271 #define HV_IDE_GUID \ 1272 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ 1273 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) 1274 1275 /* 1276 * SCSI GUID 1277 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} 1278 */ 1279 #define HV_SCSI_GUID \ 1280 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ 1281 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) 1282 1283 /* 1284 * Shutdown GUID 1285 * {0e0b6031-5213-4934-818b-38d90ced39db} 1286 */ 1287 #define HV_SHUTDOWN_GUID \ 1288 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ 1289 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) 1290 1291 /* 1292 * Time Synch GUID 1293 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} 1294 */ 1295 #define HV_TS_GUID \ 1296 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ 1297 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 1298 1299 /* 1300 * Heartbeat GUID 1301 * {57164f39-9115-4e78-ab55-382f3bd5422d} 1302 */ 1303 #define HV_HEART_BEAT_GUID \ 1304 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ 1305 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 1306 1307 /* 1308 * KVP GUID 1309 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} 1310 */ 1311 #define HV_KVP_GUID \ 1312 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ 1313 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) 1314 1315 /* 1316 * Dynamic memory GUID 1317 * {525074dc-8985-46e2-8057-a307dc18a502} 1318 */ 1319 #define HV_DM_GUID \ 1320 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ 1321 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) 1322 1323 /* 1324 * Mouse GUID 1325 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} 1326 */ 1327 #define HV_MOUSE_GUID \ 1328 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ 1329 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) 1330 1331 /* 1332 * Keyboard GUID 1333 * {f912ad6d-2b17-48ea-bd65-f927a61c7684} 1334 */ 1335 #define HV_KBD_GUID \ 1336 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ 1337 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) 1338 1339 /* 1340 * VSS (Backup/Restore) GUID 1341 */ 1342 #define HV_VSS_GUID \ 1343 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ 1344 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) 1345 /* 1346 * Synthetic Video GUID 1347 * {DA0A7802-E377-4aac-8E77-0558EB1073F8} 1348 */ 1349 #define HV_SYNTHVID_GUID \ 1350 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ 1351 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) 1352 1353 /* 1354 * Synthetic FC GUID 1355 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} 1356 */ 1357 #define HV_SYNTHFC_GUID \ 1358 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ 1359 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) 1360 1361 /* 1362 * Guest File Copy Service 1363 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192} 1364 */ 1365 1366 #define HV_FCOPY_GUID \ 1367 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ 1368 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) 1369 1370 /* 1371 * NetworkDirect. This is the guest RDMA service. 1372 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} 1373 */ 1374 #define HV_ND_GUID \ 1375 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ 1376 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) 1377 1378 /* 1379 * PCI Express Pass Through 1380 * {44C4F61D-4444-4400-9D52-802E27EDE19F} 1381 */ 1382 1383 #define HV_PCIE_GUID \ 1384 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ 1385 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) 1386 1387 /* 1388 * Linux doesn't support the 3 devices: the first two are for 1389 * Automatic Virtual Machine Activation, and the third is for 1390 * Remote Desktop Virtualization. 1391 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5} 1392 * {3375baf4-9e15-4b30-b765-67acb10d607b} 1393 * {276aacf4-ac15-426c-98dd-7521ad3f01fe} 1394 */ 1395 1396 #define HV_AVMA1_GUID \ 1397 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ 1398 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) 1399 1400 #define HV_AVMA2_GUID \ 1401 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ 1402 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) 1403 1404 #define HV_RDV_GUID \ 1405 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ 1406 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) 1407 1408 /* 1409 * Common header for Hyper-V ICs 1410 */ 1411 1412 #define ICMSGTYPE_NEGOTIATE 0 1413 #define ICMSGTYPE_HEARTBEAT 1 1414 #define ICMSGTYPE_KVPEXCHANGE 2 1415 #define ICMSGTYPE_SHUTDOWN 3 1416 #define ICMSGTYPE_TIMESYNC 4 1417 #define ICMSGTYPE_VSS 5 1418 1419 #define ICMSGHDRFLAG_TRANSACTION 1 1420 #define ICMSGHDRFLAG_REQUEST 2 1421 #define ICMSGHDRFLAG_RESPONSE 4 1422 1423 1424 /* 1425 * While we want to handle util services as regular devices, 1426 * there is only one instance of each of these services; so 1427 * we statically allocate the service specific state. 1428 */ 1429 1430 struct hv_util_service { 1431 u8 *recv_buffer; 1432 void *channel; 1433 void (*util_cb)(void *); 1434 int (*util_init)(struct hv_util_service *); 1435 void (*util_deinit)(void); 1436 }; 1437 1438 struct vmbuspipe_hdr { 1439 u32 flags; 1440 u32 msgsize; 1441 } __packed; 1442 1443 struct ic_version { 1444 u16 major; 1445 u16 minor; 1446 } __packed; 1447 1448 struct icmsg_hdr { 1449 struct ic_version icverframe; 1450 u16 icmsgtype; 1451 struct ic_version icvermsg; 1452 u16 icmsgsize; 1453 u32 status; 1454 u8 ictransaction_id; 1455 u8 icflags; 1456 u8 reserved[2]; 1457 } __packed; 1458 1459 struct icmsg_negotiate { 1460 u16 icframe_vercnt; 1461 u16 icmsg_vercnt; 1462 u32 reserved; 1463 struct ic_version icversion_data[1]; /* any size array */ 1464 } __packed; 1465 1466 struct shutdown_msg_data { 1467 u32 reason_code; 1468 u32 timeout_seconds; 1469 u32 flags; 1470 u8 display_message[2048]; 1471 } __packed; 1472 1473 struct heartbeat_msg_data { 1474 u64 seq_num; 1475 u32 reserved[8]; 1476 } __packed; 1477 1478 /* Time Sync IC defs */ 1479 #define ICTIMESYNCFLAG_PROBE 0 1480 #define ICTIMESYNCFLAG_SYNC 1 1481 #define ICTIMESYNCFLAG_SAMPLE 2 1482 1483 #ifdef __x86_64__ 1484 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */ 1485 #else 1486 #define WLTIMEDELTA 116444736000000000LL 1487 #endif 1488 1489 struct ictimesync_data { 1490 u64 parenttime; 1491 u64 childtime; 1492 u64 roundtriptime; 1493 u8 flags; 1494 } __packed; 1495 1496 struct ictimesync_ref_data { 1497 u64 parenttime; 1498 u64 vmreferencetime; 1499 u8 flags; 1500 char leapflags; 1501 char stratum; 1502 u8 reserved[3]; 1503 } __packed; 1504 1505 struct hyperv_service_callback { 1506 u8 msg_type; 1507 char *log_msg; 1508 guid_t data; 1509 struct vmbus_channel *channel; 1510 void (*callback)(void *context); 1511 }; 1512 1513 #define MAX_SRV_VER 0x7ffffff 1514 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, 1515 const int *fw_version, int fw_vercnt, 1516 const int *srv_version, int srv_vercnt, 1517 int *nego_fw_version, int *nego_srv_version); 1518 1519 void hv_process_channel_removal(struct vmbus_channel *channel); 1520 1521 void vmbus_setevent(struct vmbus_channel *channel); 1522 /* 1523 * Negotiated version with the Host. 1524 */ 1525 1526 extern __u32 vmbus_proto_version; 1527 1528 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, 1529 const guid_t *shv_host_servie_id); 1530 void vmbus_set_event(struct vmbus_channel *channel); 1531 1532 /* Get the start of the ring buffer. */ 1533 static inline void * 1534 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info) 1535 { 1536 return ring_info->ring_buffer->buffer; 1537 } 1538 1539 /* 1540 * Mask off host interrupt callback notifications 1541 */ 1542 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi) 1543 { 1544 rbi->ring_buffer->interrupt_mask = 1; 1545 1546 /* make sure mask update is not reordered */ 1547 virt_mb(); 1548 } 1549 1550 /* 1551 * Re-enable host callback and return number of outstanding bytes 1552 */ 1553 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi) 1554 { 1555 1556 rbi->ring_buffer->interrupt_mask = 0; 1557 1558 /* make sure mask update is not reordered */ 1559 virt_mb(); 1560 1561 /* 1562 * Now check to see if the ring buffer is still empty. 1563 * If it is not, we raced and we need to process new 1564 * incoming messages. 1565 */ 1566 return hv_get_bytes_to_read(rbi); 1567 } 1568 1569 /* 1570 * An API to support in-place processing of incoming VMBUS packets. 1571 */ 1572 1573 /* Get data payload associated with descriptor */ 1574 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc) 1575 { 1576 return (void *)((unsigned long)desc + (desc->offset8 << 3)); 1577 } 1578 1579 /* Get data size associated with descriptor */ 1580 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc) 1581 { 1582 return (desc->len8 << 3) - (desc->offset8 << 3); 1583 } 1584 1585 1586 struct vmpacket_descriptor * 1587 hv_pkt_iter_first(struct vmbus_channel *channel); 1588 1589 struct vmpacket_descriptor * 1590 __hv_pkt_iter_next(struct vmbus_channel *channel, 1591 const struct vmpacket_descriptor *pkt); 1592 1593 void hv_pkt_iter_close(struct vmbus_channel *channel); 1594 1595 /* 1596 * Get next packet descriptor from iterator 1597 * If at end of list, return NULL and update host. 1598 */ 1599 static inline struct vmpacket_descriptor * 1600 hv_pkt_iter_next(struct vmbus_channel *channel, 1601 const struct vmpacket_descriptor *pkt) 1602 { 1603 struct vmpacket_descriptor *nxt; 1604 1605 nxt = __hv_pkt_iter_next(channel, pkt); 1606 if (!nxt) 1607 hv_pkt_iter_close(channel); 1608 1609 return nxt; 1610 } 1611 1612 #define foreach_vmbus_pkt(pkt, channel) \ 1613 for (pkt = hv_pkt_iter_first(channel); pkt; \ 1614 pkt = hv_pkt_iter_next(channel, pkt)) 1615 1616 /* 1617 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver 1618 * sends requests to read and write blocks. Each block must be 128 bytes or 1619 * smaller. Optionally, the VF driver can register a callback function which 1620 * will be invoked when the host says that one or more of the first 64 block 1621 * IDs is "invalid" which means that the VF driver should reread them. 1622 */ 1623 #define HV_CONFIG_BLOCK_SIZE_MAX 128 1624 1625 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len, 1626 unsigned int block_id, unsigned int *bytes_returned); 1627 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len, 1628 unsigned int block_id); 1629 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context, 1630 void (*block_invalidate)(void *context, 1631 u64 block_mask)); 1632 1633 struct hyperv_pci_block_ops { 1634 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len, 1635 unsigned int block_id, unsigned int *bytes_returned); 1636 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len, 1637 unsigned int block_id); 1638 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context, 1639 void (*block_invalidate)(void *context, 1640 u64 block_mask)); 1641 }; 1642 1643 extern struct hyperv_pci_block_ops hvpci_block_ops; 1644 1645 #endif /* _HYPERV_H */ 1646