1 /*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27
28 #include <uapi/linux/hyperv.h>
29
30 #include <linux/types.h>
31 #include <linux/scatterlist.h>
32 #include <linux/list.h>
33 #include <linux/timer.h>
34 #include <linux/completion.h>
35 #include <linux/device.h>
36 #include <linux/mod_devicetable.h>
37 #include <linux/interrupt.h>
38 #include <linux/reciprocal_div.h>
39
40 #define MAX_PAGE_BUFFER_COUNT 32
41 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
42
43 #pragma pack(push, 1)
44
45 /* Single-page buffer */
46 struct hv_page_buffer {
47 u32 len;
48 u32 offset;
49 u64 pfn;
50 };
51
52 /* Multiple-page buffer */
53 struct hv_multipage_buffer {
54 /* Length and Offset determines the # of pfns in the array */
55 u32 len;
56 u32 offset;
57 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
58 };
59
60 /*
61 * Multiple-page buffer array; the pfn array is variable size:
62 * The number of entries in the PFN array is determined by
63 * "len" and "offset".
64 */
65 struct hv_mpb_array {
66 /* Length and Offset determines the # of pfns in the array */
67 u32 len;
68 u32 offset;
69 u64 pfn_array[];
70 };
71
72 /* 0x18 includes the proprietary packet header */
73 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
74 (sizeof(struct hv_page_buffer) * \
75 MAX_PAGE_BUFFER_COUNT))
76 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
77 sizeof(struct hv_multipage_buffer))
78
79
80 #pragma pack(pop)
81
82 struct hv_ring_buffer {
83 /* Offset in bytes from the start of ring data below */
84 u32 write_index;
85
86 /* Offset in bytes from the start of ring data below */
87 u32 read_index;
88
89 u32 interrupt_mask;
90
91 /*
92 * WS2012/Win8 and later versions of Hyper-V implement interrupt
93 * driven flow management. The feature bit feat_pending_send_sz
94 * is set by the host on the host->guest ring buffer, and by the
95 * guest on the guest->host ring buffer.
96 *
97 * The meaning of the feature bit is a bit complex in that it has
98 * semantics that apply to both ring buffers. If the guest sets
99 * the feature bit in the guest->host ring buffer, the guest is
100 * telling the host that:
101 * 1) It will set the pending_send_sz field in the guest->host ring
102 * buffer when it is waiting for space to become available, and
103 * 2) It will read the pending_send_sz field in the host->guest
104 * ring buffer and interrupt the host when it frees enough space
105 *
106 * Similarly, if the host sets the feature bit in the host->guest
107 * ring buffer, the host is telling the guest that:
108 * 1) It will set the pending_send_sz field in the host->guest ring
109 * buffer when it is waiting for space to become available, and
110 * 2) It will read the pending_send_sz field in the guest->host
111 * ring buffer and interrupt the guest when it frees enough space
112 *
113 * If either the guest or host does not set the feature bit that it
114 * owns, that guest or host must do polling if it encounters a full
115 * ring buffer, and not signal the other end with an interrupt.
116 */
117 u32 pending_send_sz;
118 u32 reserved1[12];
119 union {
120 struct {
121 u32 feat_pending_send_sz:1;
122 };
123 u32 value;
124 } feature_bits;
125
126 /* Pad it to PAGE_SIZE so that data starts on page boundary */
127 u8 reserved2[4028];
128
129 /*
130 * Ring data starts here + RingDataStartOffset
131 * !!! DO NOT place any fields below this !!!
132 */
133 u8 buffer[0];
134 } __packed;
135
136 struct hv_ring_buffer_info {
137 struct hv_ring_buffer *ring_buffer;
138 u32 ring_size; /* Include the shared header */
139 struct reciprocal_value ring_size_div10_reciprocal;
140 spinlock_t ring_lock;
141
142 u32 ring_datasize; /* < ring_size */
143 u32 priv_read_index;
144 };
145
146
hv_get_bytes_to_read(const struct hv_ring_buffer_info * rbi)147 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
148 {
149 u32 read_loc, write_loc, dsize, read;
150
151 dsize = rbi->ring_datasize;
152 read_loc = rbi->ring_buffer->read_index;
153 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
154
155 read = write_loc >= read_loc ? (write_loc - read_loc) :
156 (dsize - read_loc) + write_loc;
157
158 return read;
159 }
160
hv_get_bytes_to_write(const struct hv_ring_buffer_info * rbi)161 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
162 {
163 u32 read_loc, write_loc, dsize, write;
164
165 dsize = rbi->ring_datasize;
166 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
167 write_loc = rbi->ring_buffer->write_index;
168
169 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
170 read_loc - write_loc;
171 return write;
172 }
173
hv_get_avail_to_write_percent(const struct hv_ring_buffer_info * rbi)174 static inline u32 hv_get_avail_to_write_percent(
175 const struct hv_ring_buffer_info *rbi)
176 {
177 u32 avail_write = hv_get_bytes_to_write(rbi);
178
179 return reciprocal_divide(
180 (avail_write << 3) + (avail_write << 1),
181 rbi->ring_size_div10_reciprocal);
182 }
183
184 /*
185 * VMBUS version is 32 bit entity broken up into
186 * two 16 bit quantities: major_number. minor_number.
187 *
188 * 0 . 13 (Windows Server 2008)
189 * 1 . 1 (Windows 7)
190 * 2 . 4 (Windows 8)
191 * 3 . 0 (Windows 8 R2)
192 * 4 . 0 (Windows 10)
193 * 5 . 0 (Newer Windows 10)
194 */
195
196 #define VERSION_WS2008 ((0 << 16) | (13))
197 #define VERSION_WIN7 ((1 << 16) | (1))
198 #define VERSION_WIN8 ((2 << 16) | (4))
199 #define VERSION_WIN8_1 ((3 << 16) | (0))
200 #define VERSION_WIN10 ((4 << 16) | (0))
201 #define VERSION_WIN10_V5 ((5 << 16) | (0))
202
203 #define VERSION_INVAL -1
204
205 #define VERSION_CURRENT VERSION_WIN10_V5
206
207 /* Make maximum size of pipe payload of 16K */
208 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
209
210 /* Define PipeMode values. */
211 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
212 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
213
214 /* The size of the user defined data buffer for non-pipe offers. */
215 #define MAX_USER_DEFINED_BYTES 120
216
217 /* The size of the user defined data buffer for pipe offers. */
218 #define MAX_PIPE_USER_DEFINED_BYTES 116
219
220 /*
221 * At the center of the Channel Management library is the Channel Offer. This
222 * struct contains the fundamental information about an offer.
223 */
224 struct vmbus_channel_offer {
225 uuid_le if_type;
226 uuid_le if_instance;
227
228 /*
229 * These two fields are not currently used.
230 */
231 u64 reserved1;
232 u64 reserved2;
233
234 u16 chn_flags;
235 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
236
237 union {
238 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
239 struct {
240 unsigned char user_def[MAX_USER_DEFINED_BYTES];
241 } std;
242
243 /*
244 * Pipes:
245 * The following sructure is an integrated pipe protocol, which
246 * is implemented on top of standard user-defined data. Pipe
247 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
248 * use.
249 */
250 struct {
251 u32 pipe_mode;
252 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
253 } pipe;
254 } u;
255 /*
256 * The sub_channel_index is defined in win8.
257 */
258 u16 sub_channel_index;
259 u16 reserved3;
260 } __packed;
261
262 /* Server Flags */
263 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
264 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
265 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
266 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
267 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
268 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
269 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
270 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
271
272 struct vmpacket_descriptor {
273 u16 type;
274 u16 offset8;
275 u16 len8;
276 u16 flags;
277 u64 trans_id;
278 } __packed;
279
280 struct vmpacket_header {
281 u32 prev_pkt_start_offset;
282 struct vmpacket_descriptor descriptor;
283 } __packed;
284
285 struct vmtransfer_page_range {
286 u32 byte_count;
287 u32 byte_offset;
288 } __packed;
289
290 struct vmtransfer_page_packet_header {
291 struct vmpacket_descriptor d;
292 u16 xfer_pageset_id;
293 u8 sender_owns_set;
294 u8 reserved;
295 u32 range_cnt;
296 struct vmtransfer_page_range ranges[1];
297 } __packed;
298
299 struct vmgpadl_packet_header {
300 struct vmpacket_descriptor d;
301 u32 gpadl;
302 u32 reserved;
303 } __packed;
304
305 struct vmadd_remove_transfer_page_set {
306 struct vmpacket_descriptor d;
307 u32 gpadl;
308 u16 xfer_pageset_id;
309 u16 reserved;
310 } __packed;
311
312 /*
313 * This structure defines a range in guest physical space that can be made to
314 * look virtually contiguous.
315 */
316 struct gpa_range {
317 u32 byte_count;
318 u32 byte_offset;
319 u64 pfn_array[0];
320 };
321
322 /*
323 * This is the format for an Establish Gpadl packet, which contains a handle by
324 * which this GPADL will be known and a set of GPA ranges associated with it.
325 * This can be converted to a MDL by the guest OS. If there are multiple GPA
326 * ranges, then the resulting MDL will be "chained," representing multiple VA
327 * ranges.
328 */
329 struct vmestablish_gpadl {
330 struct vmpacket_descriptor d;
331 u32 gpadl;
332 u32 range_cnt;
333 struct gpa_range range[1];
334 } __packed;
335
336 /*
337 * This is the format for a Teardown Gpadl packet, which indicates that the
338 * GPADL handle in the Establish Gpadl packet will never be referenced again.
339 */
340 struct vmteardown_gpadl {
341 struct vmpacket_descriptor d;
342 u32 gpadl;
343 u32 reserved; /* for alignment to a 8-byte boundary */
344 } __packed;
345
346 /*
347 * This is the format for a GPA-Direct packet, which contains a set of GPA
348 * ranges, in addition to commands and/or data.
349 */
350 struct vmdata_gpa_direct {
351 struct vmpacket_descriptor d;
352 u32 reserved;
353 u32 range_cnt;
354 struct gpa_range range[1];
355 } __packed;
356
357 /* This is the format for a Additional Data Packet. */
358 struct vmadditional_data {
359 struct vmpacket_descriptor d;
360 u64 total_bytes;
361 u32 offset;
362 u32 byte_cnt;
363 unsigned char data[1];
364 } __packed;
365
366 union vmpacket_largest_possible_header {
367 struct vmpacket_descriptor simple_hdr;
368 struct vmtransfer_page_packet_header xfer_page_hdr;
369 struct vmgpadl_packet_header gpadl_hdr;
370 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
371 struct vmestablish_gpadl establish_gpadl_hdr;
372 struct vmteardown_gpadl teardown_gpadl_hdr;
373 struct vmdata_gpa_direct data_gpa_direct_hdr;
374 };
375
376 #define VMPACKET_DATA_START_ADDRESS(__packet) \
377 (void *)(((unsigned char *)__packet) + \
378 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
379
380 #define VMPACKET_DATA_LENGTH(__packet) \
381 ((((struct vmpacket_descriptor)__packet)->len8 - \
382 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
383
384 #define VMPACKET_TRANSFER_MODE(__packet) \
385 (((struct IMPACT)__packet)->type)
386
387 enum vmbus_packet_type {
388 VM_PKT_INVALID = 0x0,
389 VM_PKT_SYNCH = 0x1,
390 VM_PKT_ADD_XFER_PAGESET = 0x2,
391 VM_PKT_RM_XFER_PAGESET = 0x3,
392 VM_PKT_ESTABLISH_GPADL = 0x4,
393 VM_PKT_TEARDOWN_GPADL = 0x5,
394 VM_PKT_DATA_INBAND = 0x6,
395 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
396 VM_PKT_DATA_USING_GPADL = 0x8,
397 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
398 VM_PKT_CANCEL_REQUEST = 0xa,
399 VM_PKT_COMP = 0xb,
400 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
401 VM_PKT_ADDITIONAL_DATA = 0xd
402 };
403
404 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
405
406
407 /* Version 1 messages */
408 enum vmbus_channel_message_type {
409 CHANNELMSG_INVALID = 0,
410 CHANNELMSG_OFFERCHANNEL = 1,
411 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
412 CHANNELMSG_REQUESTOFFERS = 3,
413 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
414 CHANNELMSG_OPENCHANNEL = 5,
415 CHANNELMSG_OPENCHANNEL_RESULT = 6,
416 CHANNELMSG_CLOSECHANNEL = 7,
417 CHANNELMSG_GPADL_HEADER = 8,
418 CHANNELMSG_GPADL_BODY = 9,
419 CHANNELMSG_GPADL_CREATED = 10,
420 CHANNELMSG_GPADL_TEARDOWN = 11,
421 CHANNELMSG_GPADL_TORNDOWN = 12,
422 CHANNELMSG_RELID_RELEASED = 13,
423 CHANNELMSG_INITIATE_CONTACT = 14,
424 CHANNELMSG_VERSION_RESPONSE = 15,
425 CHANNELMSG_UNLOAD = 16,
426 CHANNELMSG_UNLOAD_RESPONSE = 17,
427 CHANNELMSG_18 = 18,
428 CHANNELMSG_19 = 19,
429 CHANNELMSG_20 = 20,
430 CHANNELMSG_TL_CONNECT_REQUEST = 21,
431 CHANNELMSG_22 = 22,
432 CHANNELMSG_TL_CONNECT_RESULT = 23,
433 CHANNELMSG_COUNT
434 };
435
436 struct vmbus_channel_message_header {
437 enum vmbus_channel_message_type msgtype;
438 u32 padding;
439 } __packed;
440
441 /* Query VMBus Version parameters */
442 struct vmbus_channel_query_vmbus_version {
443 struct vmbus_channel_message_header header;
444 u32 version;
445 } __packed;
446
447 /* VMBus Version Supported parameters */
448 struct vmbus_channel_version_supported {
449 struct vmbus_channel_message_header header;
450 u8 version_supported;
451 } __packed;
452
453 /* Offer Channel parameters */
454 struct vmbus_channel_offer_channel {
455 struct vmbus_channel_message_header header;
456 struct vmbus_channel_offer offer;
457 u32 child_relid;
458 u8 monitorid;
459 /*
460 * win7 and beyond splits this field into a bit field.
461 */
462 u8 monitor_allocated:1;
463 u8 reserved:7;
464 /*
465 * These are new fields added in win7 and later.
466 * Do not access these fields without checking the
467 * negotiated protocol.
468 *
469 * If "is_dedicated_interrupt" is set, we must not set the
470 * associated bit in the channel bitmap while sending the
471 * interrupt to the host.
472 *
473 * connection_id is to be used in signaling the host.
474 */
475 u16 is_dedicated_interrupt:1;
476 u16 reserved1:15;
477 u32 connection_id;
478 } __packed;
479
480 /* Rescind Offer parameters */
481 struct vmbus_channel_rescind_offer {
482 struct vmbus_channel_message_header header;
483 u32 child_relid;
484 } __packed;
485
486 static inline u32
hv_ringbuffer_pending_size(const struct hv_ring_buffer_info * rbi)487 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
488 {
489 return rbi->ring_buffer->pending_send_sz;
490 }
491
492 /*
493 * Request Offer -- no parameters, SynIC message contains the partition ID
494 * Set Snoop -- no parameters, SynIC message contains the partition ID
495 * Clear Snoop -- no parameters, SynIC message contains the partition ID
496 * All Offers Delivered -- no parameters, SynIC message contains the partition
497 * ID
498 * Flush Client -- no parameters, SynIC message contains the partition ID
499 */
500
501 /* Open Channel parameters */
502 struct vmbus_channel_open_channel {
503 struct vmbus_channel_message_header header;
504
505 /* Identifies the specific VMBus channel that is being opened. */
506 u32 child_relid;
507
508 /* ID making a particular open request at a channel offer unique. */
509 u32 openid;
510
511 /* GPADL for the channel's ring buffer. */
512 u32 ringbuffer_gpadlhandle;
513
514 /*
515 * Starting with win8, this field will be used to specify
516 * the target virtual processor on which to deliver the interrupt for
517 * the host to guest communication.
518 * Prior to win8, incoming channel interrupts would only
519 * be delivered on cpu 0. Setting this value to 0 would
520 * preserve the earlier behavior.
521 */
522 u32 target_vp;
523
524 /*
525 * The upstream ring buffer begins at offset zero in the memory
526 * described by RingBufferGpadlHandle. The downstream ring buffer
527 * follows it at this offset (in pages).
528 */
529 u32 downstream_ringbuffer_pageoffset;
530
531 /* User-specific data to be passed along to the server endpoint. */
532 unsigned char userdata[MAX_USER_DEFINED_BYTES];
533 } __packed;
534
535 /* Open Channel Result parameters */
536 struct vmbus_channel_open_result {
537 struct vmbus_channel_message_header header;
538 u32 child_relid;
539 u32 openid;
540 u32 status;
541 } __packed;
542
543 /* Close channel parameters; */
544 struct vmbus_channel_close_channel {
545 struct vmbus_channel_message_header header;
546 u32 child_relid;
547 } __packed;
548
549 /* Channel Message GPADL */
550 #define GPADL_TYPE_RING_BUFFER 1
551 #define GPADL_TYPE_SERVER_SAVE_AREA 2
552 #define GPADL_TYPE_TRANSACTION 8
553
554 /*
555 * The number of PFNs in a GPADL message is defined by the number of
556 * pages that would be spanned by ByteCount and ByteOffset. If the
557 * implied number of PFNs won't fit in this packet, there will be a
558 * follow-up packet that contains more.
559 */
560 struct vmbus_channel_gpadl_header {
561 struct vmbus_channel_message_header header;
562 u32 child_relid;
563 u32 gpadl;
564 u16 range_buflen;
565 u16 rangecount;
566 struct gpa_range range[0];
567 } __packed;
568
569 /* This is the followup packet that contains more PFNs. */
570 struct vmbus_channel_gpadl_body {
571 struct vmbus_channel_message_header header;
572 u32 msgnumber;
573 u32 gpadl;
574 u64 pfn[0];
575 } __packed;
576
577 struct vmbus_channel_gpadl_created {
578 struct vmbus_channel_message_header header;
579 u32 child_relid;
580 u32 gpadl;
581 u32 creation_status;
582 } __packed;
583
584 struct vmbus_channel_gpadl_teardown {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 gpadl;
588 } __packed;
589
590 struct vmbus_channel_gpadl_torndown {
591 struct vmbus_channel_message_header header;
592 u32 gpadl;
593 } __packed;
594
595 struct vmbus_channel_relid_released {
596 struct vmbus_channel_message_header header;
597 u32 child_relid;
598 } __packed;
599
600 struct vmbus_channel_initiate_contact {
601 struct vmbus_channel_message_header header;
602 u32 vmbus_version_requested;
603 u32 target_vcpu; /* The VCPU the host should respond to */
604 union {
605 u64 interrupt_page;
606 struct {
607 u8 msg_sint;
608 u8 padding1[3];
609 u32 padding2;
610 };
611 };
612 u64 monitor_page1;
613 u64 monitor_page2;
614 } __packed;
615
616 /* Hyper-V socket: guest's connect()-ing to host */
617 struct vmbus_channel_tl_connect_request {
618 struct vmbus_channel_message_header header;
619 uuid_le guest_endpoint_id;
620 uuid_le host_service_id;
621 } __packed;
622
623 struct vmbus_channel_version_response {
624 struct vmbus_channel_message_header header;
625 u8 version_supported;
626
627 u8 connection_state;
628 u16 padding;
629
630 /*
631 * On new hosts that support VMBus protocol 5.0, we must use
632 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
633 * and for subsequent messages, we must use the Message Connection ID
634 * field in the host-returned Version Response Message.
635 *
636 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
637 */
638 u32 msg_conn_id;
639 } __packed;
640
641 enum vmbus_channel_state {
642 CHANNEL_OFFER_STATE,
643 CHANNEL_OPENING_STATE,
644 CHANNEL_OPEN_STATE,
645 CHANNEL_OPENED_STATE,
646 };
647
648 /*
649 * Represents each channel msg on the vmbus connection This is a
650 * variable-size data structure depending on the msg type itself
651 */
652 struct vmbus_channel_msginfo {
653 /* Bookkeeping stuff */
654 struct list_head msglistentry;
655
656 /* So far, this is only used to handle gpadl body message */
657 struct list_head submsglist;
658
659 /* Synchronize the request/response if needed */
660 struct completion waitevent;
661 struct vmbus_channel *waiting_channel;
662 union {
663 struct vmbus_channel_version_supported version_supported;
664 struct vmbus_channel_open_result open_result;
665 struct vmbus_channel_gpadl_torndown gpadl_torndown;
666 struct vmbus_channel_gpadl_created gpadl_created;
667 struct vmbus_channel_version_response version_response;
668 } response;
669
670 u32 msgsize;
671 /*
672 * The channel message that goes out on the "wire".
673 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
674 */
675 unsigned char msg[0];
676 };
677
678 struct vmbus_close_msg {
679 struct vmbus_channel_msginfo info;
680 struct vmbus_channel_close_channel msg;
681 };
682
683 /* Define connection identifier type. */
684 union hv_connection_id {
685 u32 asu32;
686 struct {
687 u32 id:24;
688 u32 reserved:8;
689 } u;
690 };
691
692 enum hv_numa_policy {
693 HV_BALANCED = 0,
694 HV_LOCALIZED,
695 };
696
697 enum vmbus_device_type {
698 HV_IDE = 0,
699 HV_SCSI,
700 HV_FC,
701 HV_NIC,
702 HV_ND,
703 HV_PCIE,
704 HV_FB,
705 HV_KBD,
706 HV_MOUSE,
707 HV_KVP,
708 HV_TS,
709 HV_HB,
710 HV_SHUTDOWN,
711 HV_FCOPY,
712 HV_BACKUP,
713 HV_DM,
714 HV_UNKNOWN,
715 };
716
717 struct vmbus_device {
718 u16 dev_type;
719 uuid_le guid;
720 bool perf_device;
721 };
722
723 struct vmbus_channel {
724 struct list_head listentry;
725
726 struct hv_device *device_obj;
727
728 enum vmbus_channel_state state;
729
730 struct vmbus_channel_offer_channel offermsg;
731 /*
732 * These are based on the OfferMsg.MonitorId.
733 * Save it here for easy access.
734 */
735 u8 monitor_grp;
736 u8 monitor_bit;
737
738 bool rescind; /* got rescind msg */
739 struct completion rescind_event;
740
741 u32 ringbuffer_gpadlhandle;
742
743 /* Allocated memory for ring buffer */
744 struct page *ringbuffer_page;
745 u32 ringbuffer_pagecount;
746 struct hv_ring_buffer_info outbound; /* send to parent */
747 struct hv_ring_buffer_info inbound; /* receive from parent */
748
749 struct vmbus_close_msg close_msg;
750
751 /* Statistics */
752 u64 interrupts; /* Host to Guest interrupts */
753 u64 sig_events; /* Guest to Host events */
754
755 /* Channel callback's invoked in softirq context */
756 struct tasklet_struct callback_event;
757 void (*onchannel_callback)(void *context);
758 void *channel_callback_context;
759
760 /*
761 * A channel can be marked for one of three modes of reading:
762 * BATCHED - callback called from taslket and should read
763 * channel until empty. Interrupts from the host
764 * are masked while read is in process (default).
765 * DIRECT - callback called from tasklet (softirq).
766 * ISR - callback called in interrupt context and must
767 * invoke its own deferred processing.
768 * Host interrupts are disabled and must be re-enabled
769 * when ring is empty.
770 */
771 enum hv_callback_mode {
772 HV_CALL_BATCHED,
773 HV_CALL_DIRECT,
774 HV_CALL_ISR
775 } callback_mode;
776
777 bool is_dedicated_interrupt;
778 u64 sig_event;
779
780 /*
781 * Starting with win8, this field will be used to specify
782 * the target virtual processor on which to deliver the interrupt for
783 * the host to guest communication.
784 * Prior to win8, incoming channel interrupts would only
785 * be delivered on cpu 0. Setting this value to 0 would
786 * preserve the earlier behavior.
787 */
788 u32 target_vp;
789 /* The corresponding CPUID in the guest */
790 u32 target_cpu;
791 /*
792 * State to manage the CPU affiliation of channels.
793 */
794 struct cpumask alloced_cpus_in_node;
795 int numa_node;
796 /*
797 * Support for sub-channels. For high performance devices,
798 * it will be useful to have multiple sub-channels to support
799 * a scalable communication infrastructure with the host.
800 * The support for sub-channels is implemented as an extention
801 * to the current infrastructure.
802 * The initial offer is considered the primary channel and this
803 * offer message will indicate if the host supports sub-channels.
804 * The guest is free to ask for sub-channels to be offerred and can
805 * open these sub-channels as a normal "primary" channel. However,
806 * all sub-channels will have the same type and instance guids as the
807 * primary channel. Requests sent on a given channel will result in a
808 * response on the same channel.
809 */
810
811 /*
812 * Sub-channel creation callback. This callback will be called in
813 * process context when a sub-channel offer is received from the host.
814 * The guest can open the sub-channel in the context of this callback.
815 */
816 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
817
818 /*
819 * Channel rescind callback. Some channels (the hvsock ones), need to
820 * register a callback which is invoked in vmbus_onoffer_rescind().
821 */
822 void (*chn_rescind_callback)(struct vmbus_channel *channel);
823
824 /*
825 * The spinlock to protect the structure. It is being used to protect
826 * test-and-set access to various attributes of the structure as well
827 * as all sc_list operations.
828 */
829 spinlock_t lock;
830 /*
831 * All Sub-channels of a primary channel are linked here.
832 */
833 struct list_head sc_list;
834 /*
835 * Current number of sub-channels.
836 */
837 int num_sc;
838 /*
839 * Number of a sub-channel (position within sc_list) which is supposed
840 * to be used as the next outgoing channel.
841 */
842 int next_oc;
843 /*
844 * The primary channel this sub-channel belongs to.
845 * This will be NULL for the primary channel.
846 */
847 struct vmbus_channel *primary_channel;
848 /*
849 * Support per-channel state for use by vmbus drivers.
850 */
851 void *per_channel_state;
852 /*
853 * To support per-cpu lookup mapping of relid to channel,
854 * link up channels based on their CPU affinity.
855 */
856 struct list_head percpu_list;
857
858 /*
859 * Defer freeing channel until after all cpu's have
860 * gone through grace period.
861 */
862 struct rcu_head rcu;
863
864 /*
865 * For sysfs per-channel properties.
866 */
867 struct kobject kobj;
868
869 /*
870 * For performance critical channels (storage, networking
871 * etc,), Hyper-V has a mechanism to enhance the throughput
872 * at the expense of latency:
873 * When the host is to be signaled, we just set a bit in a shared page
874 * and this bit will be inspected by the hypervisor within a certain
875 * window and if the bit is set, the host will be signaled. The window
876 * of time is the monitor latency - currently around 100 usecs. This
877 * mechanism improves throughput by:
878 *
879 * A) Making the host more efficient - each time it wakes up,
880 * potentially it will process morev number of packets. The
881 * monitor latency allows a batch to build up.
882 * B) By deferring the hypercall to signal, we will also minimize
883 * the interrupts.
884 *
885 * Clearly, these optimizations improve throughput at the expense of
886 * latency. Furthermore, since the channel is shared for both
887 * control and data messages, control messages currently suffer
888 * unnecessary latency adversley impacting performance and boot
889 * time. To fix this issue, permit tagging the channel as being
890 * in "low latency" mode. In this mode, we will bypass the monitor
891 * mechanism.
892 */
893 bool low_latency;
894
895 /*
896 * NUMA distribution policy:
897 * We support two policies:
898 * 1) Balanced: Here all performance critical channels are
899 * distributed evenly amongst all the NUMA nodes.
900 * This policy will be the default policy.
901 * 2) Localized: All channels of a given instance of a
902 * performance critical service will be assigned CPUs
903 * within a selected NUMA node.
904 */
905 enum hv_numa_policy affinity_policy;
906
907 bool probe_done;
908
909 /*
910 * We must offload the handling of the primary/sub channels
911 * from the single-threaded vmbus_connection.work_queue to
912 * two different workqueue, otherwise we can block
913 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
914 */
915 struct work_struct add_channel_work;
916 };
917
is_hvsock_channel(const struct vmbus_channel * c)918 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
919 {
920 return !!(c->offermsg.offer.chn_flags &
921 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
922 }
923
set_channel_affinity_state(struct vmbus_channel * c,enum hv_numa_policy policy)924 static inline void set_channel_affinity_state(struct vmbus_channel *c,
925 enum hv_numa_policy policy)
926 {
927 c->affinity_policy = policy;
928 }
929
set_channel_read_mode(struct vmbus_channel * c,enum hv_callback_mode mode)930 static inline void set_channel_read_mode(struct vmbus_channel *c,
931 enum hv_callback_mode mode)
932 {
933 c->callback_mode = mode;
934 }
935
set_per_channel_state(struct vmbus_channel * c,void * s)936 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
937 {
938 c->per_channel_state = s;
939 }
940
get_per_channel_state(struct vmbus_channel * c)941 static inline void *get_per_channel_state(struct vmbus_channel *c)
942 {
943 return c->per_channel_state;
944 }
945
set_channel_pending_send_size(struct vmbus_channel * c,u32 size)946 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
947 u32 size)
948 {
949 c->outbound.ring_buffer->pending_send_sz = size;
950 }
951
set_low_latency_mode(struct vmbus_channel * c)952 static inline void set_low_latency_mode(struct vmbus_channel *c)
953 {
954 c->low_latency = true;
955 }
956
clear_low_latency_mode(struct vmbus_channel * c)957 static inline void clear_low_latency_mode(struct vmbus_channel *c)
958 {
959 c->low_latency = false;
960 }
961
962 void vmbus_onmessage(void *context);
963
964 int vmbus_request_offers(void);
965
966 /*
967 * APIs for managing sub-channels.
968 */
969
970 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
971 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
972
973 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
974 void (*chn_rescind_cb)(struct vmbus_channel *));
975
976 /*
977 * Retrieve the (sub) channel on which to send an outgoing request.
978 * When a primary channel has multiple sub-channels, we choose a
979 * channel whose VCPU binding is closest to the VCPU on which
980 * this call is being made.
981 */
982 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
983
984 /*
985 * Check if sub-channels have already been offerred. This API will be useful
986 * when the driver is unloaded after establishing sub-channels. In this case,
987 * when the driver is re-loaded, the driver would have to check if the
988 * subchannels have already been established before attempting to request
989 * the creation of sub-channels.
990 * This function returns TRUE to indicate that subchannels have already been
991 * created.
992 * This function should be invoked after setting the callback function for
993 * sub-channel creation.
994 */
995 bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
996
997 /* The format must be the same as struct vmdata_gpa_direct */
998 struct vmbus_channel_packet_page_buffer {
999 u16 type;
1000 u16 dataoffset8;
1001 u16 length8;
1002 u16 flags;
1003 u64 transactionid;
1004 u32 reserved;
1005 u32 rangecount;
1006 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1007 } __packed;
1008
1009 /* The format must be the same as struct vmdata_gpa_direct */
1010 struct vmbus_channel_packet_multipage_buffer {
1011 u16 type;
1012 u16 dataoffset8;
1013 u16 length8;
1014 u16 flags;
1015 u64 transactionid;
1016 u32 reserved;
1017 u32 rangecount; /* Always 1 in this case */
1018 struct hv_multipage_buffer range;
1019 } __packed;
1020
1021 /* The format must be the same as struct vmdata_gpa_direct */
1022 struct vmbus_packet_mpb_array {
1023 u16 type;
1024 u16 dataoffset8;
1025 u16 length8;
1026 u16 flags;
1027 u64 transactionid;
1028 u32 reserved;
1029 u32 rangecount; /* Always 1 in this case */
1030 struct hv_mpb_array range;
1031 } __packed;
1032
1033
1034 extern int vmbus_open(struct vmbus_channel *channel,
1035 u32 send_ringbuffersize,
1036 u32 recv_ringbuffersize,
1037 void *userdata,
1038 u32 userdatalen,
1039 void (*onchannel_callback)(void *context),
1040 void *context);
1041
1042 extern void vmbus_close(struct vmbus_channel *channel);
1043
1044 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1045 void *buffer,
1046 u32 bufferLen,
1047 u64 requestid,
1048 enum vmbus_packet_type type,
1049 u32 flags);
1050
1051 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1052 struct hv_page_buffer pagebuffers[],
1053 u32 pagecount,
1054 void *buffer,
1055 u32 bufferlen,
1056 u64 requestid);
1057
1058 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1059 struct vmbus_packet_mpb_array *mpb,
1060 u32 desc_size,
1061 void *buffer,
1062 u32 bufferlen,
1063 u64 requestid);
1064
1065 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1066 void *kbuffer,
1067 u32 size,
1068 u32 *gpadl_handle);
1069
1070 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1071 u32 gpadl_handle);
1072
1073 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1074
1075 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1076 void *buffer,
1077 u32 bufferlen,
1078 u32 *buffer_actual_len,
1079 u64 *requestid);
1080
1081 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1082 void *buffer,
1083 u32 bufferlen,
1084 u32 *buffer_actual_len,
1085 u64 *requestid);
1086
1087
1088 extern void vmbus_ontimer(unsigned long data);
1089
1090 /* Base driver object */
1091 struct hv_driver {
1092 const char *name;
1093
1094 /*
1095 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1096 * channel flag, actually doesn't mean a synthetic device because the
1097 * offer's if_type/if_instance can change for every new hvsock
1098 * connection.
1099 *
1100 * However, to facilitate the notification of new-offer/rescind-offer
1101 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1102 * a special vmbus device, and hence we need the below flag to
1103 * indicate if the driver is the hvsock driver or not: we need to
1104 * specially treat the hvosck offer & driver in vmbus_match().
1105 */
1106 bool hvsock;
1107
1108 /* the device type supported by this driver */
1109 uuid_le dev_type;
1110 const struct hv_vmbus_device_id *id_table;
1111
1112 struct device_driver driver;
1113
1114 /* dynamic device GUID's */
1115 struct {
1116 spinlock_t lock;
1117 struct list_head list;
1118 } dynids;
1119
1120 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1121 int (*remove)(struct hv_device *);
1122 void (*shutdown)(struct hv_device *);
1123
1124 };
1125
1126 /* Base device object */
1127 struct hv_device {
1128 /* the device type id of this device */
1129 uuid_le dev_type;
1130
1131 /* the device instance id of this device */
1132 uuid_le dev_instance;
1133 u16 vendor_id;
1134 u16 device_id;
1135
1136 struct device device;
1137
1138 struct vmbus_channel *channel;
1139 struct kset *channels_kset;
1140 };
1141
1142
device_to_hv_device(struct device * d)1143 static inline struct hv_device *device_to_hv_device(struct device *d)
1144 {
1145 return container_of(d, struct hv_device, device);
1146 }
1147
drv_to_hv_drv(struct device_driver * d)1148 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1149 {
1150 return container_of(d, struct hv_driver, driver);
1151 }
1152
hv_set_drvdata(struct hv_device * dev,void * data)1153 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1154 {
1155 dev_set_drvdata(&dev->device, data);
1156 }
1157
hv_get_drvdata(struct hv_device * dev)1158 static inline void *hv_get_drvdata(struct hv_device *dev)
1159 {
1160 return dev_get_drvdata(&dev->device);
1161 }
1162
1163 struct hv_ring_buffer_debug_info {
1164 u32 current_interrupt_mask;
1165 u32 current_read_index;
1166 u32 current_write_index;
1167 u32 bytes_avail_toread;
1168 u32 bytes_avail_towrite;
1169 };
1170
1171
1172 int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
1173 struct hv_ring_buffer_debug_info *debug_info);
1174
1175 /* Vmbus interface */
1176 #define vmbus_driver_register(driver) \
1177 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1178 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1179 struct module *owner,
1180 const char *mod_name);
1181 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1182
1183 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1184
1185 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1186 resource_size_t min, resource_size_t max,
1187 resource_size_t size, resource_size_t align,
1188 bool fb_overlap_ok);
1189 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1190
1191 /*
1192 * GUID definitions of various offer types - services offered to the guest.
1193 */
1194
1195 /*
1196 * Network GUID
1197 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1198 */
1199 #define HV_NIC_GUID \
1200 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1201 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1202
1203 /*
1204 * IDE GUID
1205 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1206 */
1207 #define HV_IDE_GUID \
1208 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1209 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1210
1211 /*
1212 * SCSI GUID
1213 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1214 */
1215 #define HV_SCSI_GUID \
1216 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1217 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1218
1219 /*
1220 * Shutdown GUID
1221 * {0e0b6031-5213-4934-818b-38d90ced39db}
1222 */
1223 #define HV_SHUTDOWN_GUID \
1224 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1225 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1226
1227 /*
1228 * Time Synch GUID
1229 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1230 */
1231 #define HV_TS_GUID \
1232 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1233 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1234
1235 /*
1236 * Heartbeat GUID
1237 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1238 */
1239 #define HV_HEART_BEAT_GUID \
1240 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1241 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1242
1243 /*
1244 * KVP GUID
1245 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1246 */
1247 #define HV_KVP_GUID \
1248 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1249 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1250
1251 /*
1252 * Dynamic memory GUID
1253 * {525074dc-8985-46e2-8057-a307dc18a502}
1254 */
1255 #define HV_DM_GUID \
1256 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1257 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1258
1259 /*
1260 * Mouse GUID
1261 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1262 */
1263 #define HV_MOUSE_GUID \
1264 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1265 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1266
1267 /*
1268 * Keyboard GUID
1269 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1270 */
1271 #define HV_KBD_GUID \
1272 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1273 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1274
1275 /*
1276 * VSS (Backup/Restore) GUID
1277 */
1278 #define HV_VSS_GUID \
1279 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1280 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1281 /*
1282 * Synthetic Video GUID
1283 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1284 */
1285 #define HV_SYNTHVID_GUID \
1286 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1287 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1288
1289 /*
1290 * Synthetic FC GUID
1291 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1292 */
1293 #define HV_SYNTHFC_GUID \
1294 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1295 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1296
1297 /*
1298 * Guest File Copy Service
1299 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1300 */
1301
1302 #define HV_FCOPY_GUID \
1303 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1304 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1305
1306 /*
1307 * NetworkDirect. This is the guest RDMA service.
1308 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1309 */
1310 #define HV_ND_GUID \
1311 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1312 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1313
1314 /*
1315 * PCI Express Pass Through
1316 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1317 */
1318
1319 #define HV_PCIE_GUID \
1320 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1321 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1322
1323 /*
1324 * Linux doesn't support the 3 devices: the first two are for
1325 * Automatic Virtual Machine Activation, and the third is for
1326 * Remote Desktop Virtualization.
1327 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1328 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1329 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1330 */
1331
1332 #define HV_AVMA1_GUID \
1333 .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1334 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1335
1336 #define HV_AVMA2_GUID \
1337 .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1338 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1339
1340 #define HV_RDV_GUID \
1341 .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1342 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1343
1344 /*
1345 * Common header for Hyper-V ICs
1346 */
1347
1348 #define ICMSGTYPE_NEGOTIATE 0
1349 #define ICMSGTYPE_HEARTBEAT 1
1350 #define ICMSGTYPE_KVPEXCHANGE 2
1351 #define ICMSGTYPE_SHUTDOWN 3
1352 #define ICMSGTYPE_TIMESYNC 4
1353 #define ICMSGTYPE_VSS 5
1354
1355 #define ICMSGHDRFLAG_TRANSACTION 1
1356 #define ICMSGHDRFLAG_REQUEST 2
1357 #define ICMSGHDRFLAG_RESPONSE 4
1358
1359
1360 /*
1361 * While we want to handle util services as regular devices,
1362 * there is only one instance of each of these services; so
1363 * we statically allocate the service specific state.
1364 */
1365
1366 struct hv_util_service {
1367 u8 *recv_buffer;
1368 void *channel;
1369 void (*util_cb)(void *);
1370 int (*util_init)(struct hv_util_service *);
1371 void (*util_deinit)(void);
1372 };
1373
1374 struct vmbuspipe_hdr {
1375 u32 flags;
1376 u32 msgsize;
1377 } __packed;
1378
1379 struct ic_version {
1380 u16 major;
1381 u16 minor;
1382 } __packed;
1383
1384 struct icmsg_hdr {
1385 struct ic_version icverframe;
1386 u16 icmsgtype;
1387 struct ic_version icvermsg;
1388 u16 icmsgsize;
1389 u32 status;
1390 u8 ictransaction_id;
1391 u8 icflags;
1392 u8 reserved[2];
1393 } __packed;
1394
1395 struct icmsg_negotiate {
1396 u16 icframe_vercnt;
1397 u16 icmsg_vercnt;
1398 u32 reserved;
1399 struct ic_version icversion_data[1]; /* any size array */
1400 } __packed;
1401
1402 struct shutdown_msg_data {
1403 u32 reason_code;
1404 u32 timeout_seconds;
1405 u32 flags;
1406 u8 display_message[2048];
1407 } __packed;
1408
1409 struct heartbeat_msg_data {
1410 u64 seq_num;
1411 u32 reserved[8];
1412 } __packed;
1413
1414 /* Time Sync IC defs */
1415 #define ICTIMESYNCFLAG_PROBE 0
1416 #define ICTIMESYNCFLAG_SYNC 1
1417 #define ICTIMESYNCFLAG_SAMPLE 2
1418
1419 #ifdef __x86_64__
1420 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1421 #else
1422 #define WLTIMEDELTA 116444736000000000LL
1423 #endif
1424
1425 struct ictimesync_data {
1426 u64 parenttime;
1427 u64 childtime;
1428 u64 roundtriptime;
1429 u8 flags;
1430 } __packed;
1431
1432 struct ictimesync_ref_data {
1433 u64 parenttime;
1434 u64 vmreferencetime;
1435 u8 flags;
1436 char leapflags;
1437 char stratum;
1438 u8 reserved[3];
1439 } __packed;
1440
1441 struct hyperv_service_callback {
1442 u8 msg_type;
1443 char *log_msg;
1444 uuid_le data;
1445 struct vmbus_channel *channel;
1446 void (*callback)(void *context);
1447 };
1448
1449 #define MAX_SRV_VER 0x7ffffff
1450 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1451 const int *fw_version, int fw_vercnt,
1452 const int *srv_version, int srv_vercnt,
1453 int *nego_fw_version, int *nego_srv_version);
1454
1455 void hv_process_channel_removal(u32 relid);
1456
1457 void vmbus_setevent(struct vmbus_channel *channel);
1458 /*
1459 * Negotiated version with the Host.
1460 */
1461
1462 extern __u32 vmbus_proto_version;
1463
1464 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1465 const uuid_le *shv_host_servie_id);
1466 void vmbus_set_event(struct vmbus_channel *channel);
1467
1468 /* Get the start of the ring buffer. */
1469 static inline void *
hv_get_ring_buffer(const struct hv_ring_buffer_info * ring_info)1470 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1471 {
1472 return ring_info->ring_buffer->buffer;
1473 }
1474
1475 /*
1476 * Mask off host interrupt callback notifications
1477 */
hv_begin_read(struct hv_ring_buffer_info * rbi)1478 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1479 {
1480 rbi->ring_buffer->interrupt_mask = 1;
1481
1482 /* make sure mask update is not reordered */
1483 virt_mb();
1484 }
1485
1486 /*
1487 * Re-enable host callback and return number of outstanding bytes
1488 */
hv_end_read(struct hv_ring_buffer_info * rbi)1489 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1490 {
1491
1492 rbi->ring_buffer->interrupt_mask = 0;
1493
1494 /* make sure mask update is not reordered */
1495 virt_mb();
1496
1497 /*
1498 * Now check to see if the ring buffer is still empty.
1499 * If it is not, we raced and we need to process new
1500 * incoming messages.
1501 */
1502 return hv_get_bytes_to_read(rbi);
1503 }
1504
1505 /*
1506 * An API to support in-place processing of incoming VMBUS packets.
1507 */
1508
1509 /* Get data payload associated with descriptor */
hv_pkt_data(const struct vmpacket_descriptor * desc)1510 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1511 {
1512 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1513 }
1514
1515 /* Get data size associated with descriptor */
hv_pkt_datalen(const struct vmpacket_descriptor * desc)1516 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1517 {
1518 return (desc->len8 << 3) - (desc->offset8 << 3);
1519 }
1520
1521
1522 struct vmpacket_descriptor *
1523 hv_pkt_iter_first(struct vmbus_channel *channel);
1524
1525 struct vmpacket_descriptor *
1526 __hv_pkt_iter_next(struct vmbus_channel *channel,
1527 const struct vmpacket_descriptor *pkt);
1528
1529 void hv_pkt_iter_close(struct vmbus_channel *channel);
1530
1531 /*
1532 * Get next packet descriptor from iterator
1533 * If at end of list, return NULL and update host.
1534 */
1535 static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1536 hv_pkt_iter_next(struct vmbus_channel *channel,
1537 const struct vmpacket_descriptor *pkt)
1538 {
1539 struct vmpacket_descriptor *nxt;
1540
1541 nxt = __hv_pkt_iter_next(channel, pkt);
1542 if (!nxt)
1543 hv_pkt_iter_close(channel);
1544
1545 return nxt;
1546 }
1547
1548 #define foreach_vmbus_pkt(pkt, channel) \
1549 for (pkt = hv_pkt_iter_first(channel); pkt; \
1550 pkt = hv_pkt_iter_next(channel, pkt))
1551
1552 #endif /* _HYPERV_H */
1553