]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/hyperv.h
tools: hv: Use hyperv.h to get the KVP definitions
[mirror_ubuntu-artful-kernel.git] / include / linux / hyperv.h
1 /*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27
28 #include <linux/types.h>
29
30 /*
31 * An implementation of HyperV key value pair (KVP) functionality for Linux.
32 *
33 *
34 * Copyright (C) 2010, Novell, Inc.
35 * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
36 *
37 */
38
39 /*
40 * Maximum value size - used for both key names and value data, and includes
41 * any applicable NULL terminators.
42 *
43 * Note: This limit is somewhat arbitrary, but falls easily within what is
44 * supported for all native guests (back to Win 2000) and what is reasonable
45 * for the IC KVP exchange functionality. Note that Windows Me/98/95 are
46 * limited to 255 character key names.
47 *
48 * MSDN recommends not storing data values larger than 2048 bytes in the
49 * registry.
50 *
51 * Note: This value is used in defining the KVP exchange message - this value
52 * cannot be modified without affecting the message size and compatibility.
53 */
54
55 /*
56 * bytes, including any null terminators
57 */
58 #define HV_KVP_EXCHANGE_MAX_VALUE_SIZE (2048)
59
60
61 /*
62 * Maximum key size - the registry limit for the length of an entry name
63 * is 256 characters, including the null terminator
64 */
65
66 #define HV_KVP_EXCHANGE_MAX_KEY_SIZE (512)
67
68 /*
69 * In Linux, we implement the KVP functionality in two components:
70 * 1) The kernel component which is packaged as part of the hv_utils driver
71 * is responsible for communicating with the host and responsible for
72 * implementing the host/guest protocol. 2) A user level daemon that is
73 * responsible for data gathering.
74 *
75 * Host/Guest Protocol: The host iterates over an index and expects the guest
76 * to assign a key name to the index and also return the value corresponding to
77 * the key. The host will have atmost one KVP transaction outstanding at any
78 * given point in time. The host side iteration stops when the guest returns
79 * an error. Microsoft has specified the following mapping of key names to
80 * host specified index:
81 *
82 * Index Key Name
83 * 0 FullyQualifiedDomainName
84 * 1 IntegrationServicesVersion
85 * 2 NetworkAddressIPv4
86 * 3 NetworkAddressIPv6
87 * 4 OSBuildNumber
88 * 5 OSName
89 * 6 OSMajorVersion
90 * 7 OSMinorVersion
91 * 8 OSVersion
92 * 9 ProcessorArchitecture
93 *
94 * The Windows host expects the Key Name and Key Value to be encoded in utf16.
95 *
96 * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the
97 * data gathering functionality in a user mode daemon. The user level daemon
98 * is also responsible for binding the key name to the index as well. The
99 * kernel and user-level daemon communicate using a connector channel.
100 *
101 * The user mode component first registers with the
102 * the kernel component. Subsequently, the kernel component requests, data
103 * for the specified keys. In response to this message the user mode component
104 * fills in the value corresponding to the specified key. We overload the
105 * sequence field in the cn_msg header to define our KVP message types.
106 *
107 *
108 * The kernel component simply acts as a conduit for communication between the
109 * Windows host and the user-level daemon. The kernel component passes up the
110 * index received from the Host to the user-level daemon. If the index is
111 * valid (supported), the corresponding key as well as its
112 * value (both are strings) is returned. If the index is invalid
113 * (not supported), a NULL key string is returned.
114 */
115
116 /*
117 *
118 * The following definitions are shared with the user-mode component; do not
119 * change any of this without making the corresponding changes in
120 * the KVP user-mode component.
121 */
122
123 enum hv_ku_op {
124 KVP_REGISTER = 0, /* Register the user mode component */
125 KVP_KERNEL_GET, /* Kernel is requesting the value */
126 KVP_KERNEL_SET, /* Kernel is providing the value */
127 KVP_USER_GET, /* User is requesting the value */
128 KVP_USER_SET /* User is providing the value */
129 };
130
131 struct hv_ku_msg {
132 __u32 kvp_index; /* Key index */
133 __u8 kvp_key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; /* Key name */
134 __u8 kvp_value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; /* Key value */
135 };
136
137
138
139
140
141 /*
142 * Registry value types.
143 */
144
145 #define REG_SZ 1
146
147 enum hv_kvp_exchg_op {
148 KVP_OP_GET = 0,
149 KVP_OP_SET,
150 KVP_OP_DELETE,
151 KVP_OP_ENUMERATE,
152 KVP_OP_COUNT /* Number of operations, must be last. */
153 };
154
155 enum hv_kvp_exchg_pool {
156 KVP_POOL_EXTERNAL = 0,
157 KVP_POOL_GUEST,
158 KVP_POOL_AUTO,
159 KVP_POOL_AUTO_EXTERNAL,
160 KVP_POOL_AUTO_INTERNAL,
161 KVP_POOL_COUNT /* Number of pools, must be last. */
162 };
163
164 struct hv_kvp_hdr {
165 __u8 operation;
166 __u8 pool;
167 __u16 pad;
168 } __attribute__((packed));
169
170 struct hv_kvp_exchg_msg_value {
171 __u32 value_type;
172 __u32 key_size;
173 __u32 value_size;
174 __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
175 __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
176 } __attribute__((packed));
177
178 struct hv_kvp_msg_enumerate {
179 __u32 index;
180 struct hv_kvp_exchg_msg_value data;
181 } __attribute__((packed));
182
183 struct hv_kvp_msg {
184 struct hv_kvp_hdr kvp_hdr;
185 struct hv_kvp_msg_enumerate kvp_data;
186 } __attribute__((packed));
187
188 #ifdef __KERNEL__
189 #include <linux/scatterlist.h>
190 #include <linux/list.h>
191 #include <linux/uuid.h>
192 #include <linux/timer.h>
193 #include <linux/workqueue.h>
194 #include <linux/completion.h>
195 #include <linux/device.h>
196 #include <linux/mod_devicetable.h>
197
198
199 #define MAX_PAGE_BUFFER_COUNT 18
200 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
201
202 #pragma pack(push, 1)
203
204 /* Single-page buffer */
205 struct hv_page_buffer {
206 u32 len;
207 u32 offset;
208 u64 pfn;
209 };
210
211 /* Multiple-page buffer */
212 struct hv_multipage_buffer {
213 /* Length and Offset determines the # of pfns in the array */
214 u32 len;
215 u32 offset;
216 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
217 };
218
219 /* 0x18 includes the proprietary packet header */
220 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
221 (sizeof(struct hv_page_buffer) * \
222 MAX_PAGE_BUFFER_COUNT))
223 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
224 sizeof(struct hv_multipage_buffer))
225
226
227 #pragma pack(pop)
228
229 struct hv_ring_buffer {
230 /* Offset in bytes from the start of ring data below */
231 u32 write_index;
232
233 /* Offset in bytes from the start of ring data below */
234 u32 read_index;
235
236 u32 interrupt_mask;
237
238 /* Pad it to PAGE_SIZE so that data starts on page boundary */
239 u8 reserved[4084];
240
241 /* NOTE:
242 * The interrupt_mask field is used only for channels but since our
243 * vmbus connection also uses this data structure and its data starts
244 * here, we commented out this field.
245 */
246
247 /*
248 * Ring data starts here + RingDataStartOffset
249 * !!! DO NOT place any fields below this !!!
250 */
251 u8 buffer[0];
252 } __packed;
253
254 struct hv_ring_buffer_info {
255 struct hv_ring_buffer *ring_buffer;
256 u32 ring_size; /* Include the shared header */
257 spinlock_t ring_lock;
258
259 u32 ring_datasize; /* < ring_size */
260 u32 ring_data_startoffset;
261 };
262
263 struct hv_ring_buffer_debug_info {
264 u32 current_interrupt_mask;
265 u32 current_read_index;
266 u32 current_write_index;
267 u32 bytes_avail_toread;
268 u32 bytes_avail_towrite;
269 };
270
271 /*
272 * We use the same version numbering for all Hyper-V modules.
273 *
274 * Definition of versioning is as follows;
275 *
276 * Major Number Changes for these scenarios;
277 * 1. When a new version of Windows Hyper-V
278 * is released.
279 * 2. A Major change has occurred in the
280 * Linux IC's.
281 * (For example the merge for the first time
282 * into the kernel) Every time the Major Number
283 * changes, the Revision number is reset to 0.
284 * Minor Number Changes when new functionality is added
285 * to the Linux IC's that is not a bug fix.
286 *
287 * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
288 */
289 #define HV_DRV_VERSION "3.1"
290
291
292 /*
293 * A revision number of vmbus that is used for ensuring both ends on a
294 * partition are using compatible versions.
295 */
296 #define VMBUS_REVISION_NUMBER 13
297
298 /* Make maximum size of pipe payload of 16K */
299 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
300
301 /* Define PipeMode values. */
302 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
303 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
304
305 /* The size of the user defined data buffer for non-pipe offers. */
306 #define MAX_USER_DEFINED_BYTES 120
307
308 /* The size of the user defined data buffer for pipe offers. */
309 #define MAX_PIPE_USER_DEFINED_BYTES 116
310
311 /*
312 * At the center of the Channel Management library is the Channel Offer. This
313 * struct contains the fundamental information about an offer.
314 */
315 struct vmbus_channel_offer {
316 uuid_le if_type;
317 uuid_le if_instance;
318 u64 int_latency; /* in 100ns units */
319 u32 if_revision;
320 u32 server_ctx_size; /* in bytes */
321 u16 chn_flags;
322 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
323
324 union {
325 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
326 struct {
327 unsigned char user_def[MAX_USER_DEFINED_BYTES];
328 } std;
329
330 /*
331 * Pipes:
332 * The following sructure is an integrated pipe protocol, which
333 * is implemented on top of standard user-defined data. Pipe
334 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
335 * use.
336 */
337 struct {
338 u32 pipe_mode;
339 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
340 } pipe;
341 } u;
342 u32 padding;
343 } __packed;
344
345 /* Server Flags */
346 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
347 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
348 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
349 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
350 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
351 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
352 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
353
354 struct vmpacket_descriptor {
355 u16 type;
356 u16 offset8;
357 u16 len8;
358 u16 flags;
359 u64 trans_id;
360 } __packed;
361
362 struct vmpacket_header {
363 u32 prev_pkt_start_offset;
364 struct vmpacket_descriptor descriptor;
365 } __packed;
366
367 struct vmtransfer_page_range {
368 u32 byte_count;
369 u32 byte_offset;
370 } __packed;
371
372 struct vmtransfer_page_packet_header {
373 struct vmpacket_descriptor d;
374 u16 xfer_pageset_id;
375 bool sender_owns_set;
376 u8 reserved;
377 u32 range_cnt;
378 struct vmtransfer_page_range ranges[1];
379 } __packed;
380
381 struct vmgpadl_packet_header {
382 struct vmpacket_descriptor d;
383 u32 gpadl;
384 u32 reserved;
385 } __packed;
386
387 struct vmadd_remove_transfer_page_set {
388 struct vmpacket_descriptor d;
389 u32 gpadl;
390 u16 xfer_pageset_id;
391 u16 reserved;
392 } __packed;
393
394 /*
395 * This structure defines a range in guest physical space that can be made to
396 * look virtually contiguous.
397 */
398 struct gpa_range {
399 u32 byte_count;
400 u32 byte_offset;
401 u64 pfn_array[0];
402 };
403
404 /*
405 * This is the format for an Establish Gpadl packet, which contains a handle by
406 * which this GPADL will be known and a set of GPA ranges associated with it.
407 * This can be converted to a MDL by the guest OS. If there are multiple GPA
408 * ranges, then the resulting MDL will be "chained," representing multiple VA
409 * ranges.
410 */
411 struct vmestablish_gpadl {
412 struct vmpacket_descriptor d;
413 u32 gpadl;
414 u32 range_cnt;
415 struct gpa_range range[1];
416 } __packed;
417
418 /*
419 * This is the format for a Teardown Gpadl packet, which indicates that the
420 * GPADL handle in the Establish Gpadl packet will never be referenced again.
421 */
422 struct vmteardown_gpadl {
423 struct vmpacket_descriptor d;
424 u32 gpadl;
425 u32 reserved; /* for alignment to a 8-byte boundary */
426 } __packed;
427
428 /*
429 * This is the format for a GPA-Direct packet, which contains a set of GPA
430 * ranges, in addition to commands and/or data.
431 */
432 struct vmdata_gpa_direct {
433 struct vmpacket_descriptor d;
434 u32 reserved;
435 u32 range_cnt;
436 struct gpa_range range[1];
437 } __packed;
438
439 /* This is the format for a Additional Data Packet. */
440 struct vmadditional_data {
441 struct vmpacket_descriptor d;
442 u64 total_bytes;
443 u32 offset;
444 u32 byte_cnt;
445 unsigned char data[1];
446 } __packed;
447
448 union vmpacket_largest_possible_header {
449 struct vmpacket_descriptor simple_hdr;
450 struct vmtransfer_page_packet_header xfer_page_hdr;
451 struct vmgpadl_packet_header gpadl_hdr;
452 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
453 struct vmestablish_gpadl establish_gpadl_hdr;
454 struct vmteardown_gpadl teardown_gpadl_hdr;
455 struct vmdata_gpa_direct data_gpa_direct_hdr;
456 };
457
458 #define VMPACKET_DATA_START_ADDRESS(__packet) \
459 (void *)(((unsigned char *)__packet) + \
460 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
461
462 #define VMPACKET_DATA_LENGTH(__packet) \
463 ((((struct vmpacket_descriptor)__packet)->len8 - \
464 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
465
466 #define VMPACKET_TRANSFER_MODE(__packet) \
467 (((struct IMPACT)__packet)->type)
468
469 enum vmbus_packet_type {
470 VM_PKT_INVALID = 0x0,
471 VM_PKT_SYNCH = 0x1,
472 VM_PKT_ADD_XFER_PAGESET = 0x2,
473 VM_PKT_RM_XFER_PAGESET = 0x3,
474 VM_PKT_ESTABLISH_GPADL = 0x4,
475 VM_PKT_TEARDOWN_GPADL = 0x5,
476 VM_PKT_DATA_INBAND = 0x6,
477 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
478 VM_PKT_DATA_USING_GPADL = 0x8,
479 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
480 VM_PKT_CANCEL_REQUEST = 0xa,
481 VM_PKT_COMP = 0xb,
482 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
483 VM_PKT_ADDITIONAL_DATA = 0xd
484 };
485
486 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
487
488
489 /* Version 1 messages */
490 enum vmbus_channel_message_type {
491 CHANNELMSG_INVALID = 0,
492 CHANNELMSG_OFFERCHANNEL = 1,
493 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
494 CHANNELMSG_REQUESTOFFERS = 3,
495 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
496 CHANNELMSG_OPENCHANNEL = 5,
497 CHANNELMSG_OPENCHANNEL_RESULT = 6,
498 CHANNELMSG_CLOSECHANNEL = 7,
499 CHANNELMSG_GPADL_HEADER = 8,
500 CHANNELMSG_GPADL_BODY = 9,
501 CHANNELMSG_GPADL_CREATED = 10,
502 CHANNELMSG_GPADL_TEARDOWN = 11,
503 CHANNELMSG_GPADL_TORNDOWN = 12,
504 CHANNELMSG_RELID_RELEASED = 13,
505 CHANNELMSG_INITIATE_CONTACT = 14,
506 CHANNELMSG_VERSION_RESPONSE = 15,
507 CHANNELMSG_UNLOAD = 16,
508 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
509 CHANNELMSG_VIEWRANGE_ADD = 17,
510 CHANNELMSG_VIEWRANGE_REMOVE = 18,
511 #endif
512 CHANNELMSG_COUNT
513 };
514
515 struct vmbus_channel_message_header {
516 enum vmbus_channel_message_type msgtype;
517 u32 padding;
518 } __packed;
519
520 /* Query VMBus Version parameters */
521 struct vmbus_channel_query_vmbus_version {
522 struct vmbus_channel_message_header header;
523 u32 version;
524 } __packed;
525
526 /* VMBus Version Supported parameters */
527 struct vmbus_channel_version_supported {
528 struct vmbus_channel_message_header header;
529 bool version_supported;
530 } __packed;
531
532 /* Offer Channel parameters */
533 struct vmbus_channel_offer_channel {
534 struct vmbus_channel_message_header header;
535 struct vmbus_channel_offer offer;
536 u32 child_relid;
537 u8 monitorid;
538 bool monitor_allocated;
539 } __packed;
540
541 /* Rescind Offer parameters */
542 struct vmbus_channel_rescind_offer {
543 struct vmbus_channel_message_header header;
544 u32 child_relid;
545 } __packed;
546
547 /*
548 * Request Offer -- no parameters, SynIC message contains the partition ID
549 * Set Snoop -- no parameters, SynIC message contains the partition ID
550 * Clear Snoop -- no parameters, SynIC message contains the partition ID
551 * All Offers Delivered -- no parameters, SynIC message contains the partition
552 * ID
553 * Flush Client -- no parameters, SynIC message contains the partition ID
554 */
555
556 /* Open Channel parameters */
557 struct vmbus_channel_open_channel {
558 struct vmbus_channel_message_header header;
559
560 /* Identifies the specific VMBus channel that is being opened. */
561 u32 child_relid;
562
563 /* ID making a particular open request at a channel offer unique. */
564 u32 openid;
565
566 /* GPADL for the channel's ring buffer. */
567 u32 ringbuffer_gpadlhandle;
568
569 /* GPADL for the channel's server context save area. */
570 u32 server_contextarea_gpadlhandle;
571
572 /*
573 * The upstream ring buffer begins at offset zero in the memory
574 * described by RingBufferGpadlHandle. The downstream ring buffer
575 * follows it at this offset (in pages).
576 */
577 u32 downstream_ringbuffer_pageoffset;
578
579 /* User-specific data to be passed along to the server endpoint. */
580 unsigned char userdata[MAX_USER_DEFINED_BYTES];
581 } __packed;
582
583 /* Open Channel Result parameters */
584 struct vmbus_channel_open_result {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 openid;
588 u32 status;
589 } __packed;
590
591 /* Close channel parameters; */
592 struct vmbus_channel_close_channel {
593 struct vmbus_channel_message_header header;
594 u32 child_relid;
595 } __packed;
596
597 /* Channel Message GPADL */
598 #define GPADL_TYPE_RING_BUFFER 1
599 #define GPADL_TYPE_SERVER_SAVE_AREA 2
600 #define GPADL_TYPE_TRANSACTION 8
601
602 /*
603 * The number of PFNs in a GPADL message is defined by the number of
604 * pages that would be spanned by ByteCount and ByteOffset. If the
605 * implied number of PFNs won't fit in this packet, there will be a
606 * follow-up packet that contains more.
607 */
608 struct vmbus_channel_gpadl_header {
609 struct vmbus_channel_message_header header;
610 u32 child_relid;
611 u32 gpadl;
612 u16 range_buflen;
613 u16 rangecount;
614 struct gpa_range range[0];
615 } __packed;
616
617 /* This is the followup packet that contains more PFNs. */
618 struct vmbus_channel_gpadl_body {
619 struct vmbus_channel_message_header header;
620 u32 msgnumber;
621 u32 gpadl;
622 u64 pfn[0];
623 } __packed;
624
625 struct vmbus_channel_gpadl_created {
626 struct vmbus_channel_message_header header;
627 u32 child_relid;
628 u32 gpadl;
629 u32 creation_status;
630 } __packed;
631
632 struct vmbus_channel_gpadl_teardown {
633 struct vmbus_channel_message_header header;
634 u32 child_relid;
635 u32 gpadl;
636 } __packed;
637
638 struct vmbus_channel_gpadl_torndown {
639 struct vmbus_channel_message_header header;
640 u32 gpadl;
641 } __packed;
642
643 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
644 struct vmbus_channel_view_range_add {
645 struct vmbus_channel_message_header header;
646 PHYSICAL_ADDRESS viewrange_base;
647 u64 viewrange_length;
648 u32 child_relid;
649 } __packed;
650
651 struct vmbus_channel_view_range_remove {
652 struct vmbus_channel_message_header header;
653 PHYSICAL_ADDRESS viewrange_base;
654 u32 child_relid;
655 } __packed;
656 #endif
657
658 struct vmbus_channel_relid_released {
659 struct vmbus_channel_message_header header;
660 u32 child_relid;
661 } __packed;
662
663 struct vmbus_channel_initiate_contact {
664 struct vmbus_channel_message_header header;
665 u32 vmbus_version_requested;
666 u32 padding2;
667 u64 interrupt_page;
668 u64 monitor_page1;
669 u64 monitor_page2;
670 } __packed;
671
672 struct vmbus_channel_version_response {
673 struct vmbus_channel_message_header header;
674 bool version_supported;
675 } __packed;
676
677 enum vmbus_channel_state {
678 CHANNEL_OFFER_STATE,
679 CHANNEL_OPENING_STATE,
680 CHANNEL_OPEN_STATE,
681 };
682
683 struct vmbus_channel_debug_info {
684 u32 relid;
685 enum vmbus_channel_state state;
686 uuid_le interfacetype;
687 uuid_le interface_instance;
688 u32 monitorid;
689 u32 servermonitor_pending;
690 u32 servermonitor_latency;
691 u32 servermonitor_connectionid;
692 u32 clientmonitor_pending;
693 u32 clientmonitor_latency;
694 u32 clientmonitor_connectionid;
695
696 struct hv_ring_buffer_debug_info inbound;
697 struct hv_ring_buffer_debug_info outbound;
698 };
699
700 /*
701 * Represents each channel msg on the vmbus connection This is a
702 * variable-size data structure depending on the msg type itself
703 */
704 struct vmbus_channel_msginfo {
705 /* Bookkeeping stuff */
706 struct list_head msglistentry;
707
708 /* So far, this is only used to handle gpadl body message */
709 struct list_head submsglist;
710
711 /* Synchronize the request/response if needed */
712 struct completion waitevent;
713 union {
714 struct vmbus_channel_version_supported version_supported;
715 struct vmbus_channel_open_result open_result;
716 struct vmbus_channel_gpadl_torndown gpadl_torndown;
717 struct vmbus_channel_gpadl_created gpadl_created;
718 struct vmbus_channel_version_response version_response;
719 } response;
720
721 u32 msgsize;
722 /*
723 * The channel message that goes out on the "wire".
724 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
725 */
726 unsigned char msg[0];
727 };
728
729 struct vmbus_close_msg {
730 struct vmbus_channel_msginfo info;
731 struct vmbus_channel_close_channel msg;
732 };
733
734 struct vmbus_channel {
735 struct list_head listentry;
736
737 struct hv_device *device_obj;
738
739 struct work_struct work;
740
741 enum vmbus_channel_state state;
742
743 struct vmbus_channel_offer_channel offermsg;
744 /*
745 * These are based on the OfferMsg.MonitorId.
746 * Save it here for easy access.
747 */
748 u8 monitor_grp;
749 u8 monitor_bit;
750
751 u32 ringbuffer_gpadlhandle;
752
753 /* Allocated memory for ring buffer */
754 void *ringbuffer_pages;
755 u32 ringbuffer_pagecount;
756 struct hv_ring_buffer_info outbound; /* send to parent */
757 struct hv_ring_buffer_info inbound; /* receive from parent */
758 spinlock_t inbound_lock;
759 struct workqueue_struct *controlwq;
760
761 struct vmbus_close_msg close_msg;
762
763 /* Channel callback are invoked in this workqueue context */
764 /* HANDLE dataWorkQueue; */
765
766 void (*onchannel_callback)(void *context);
767 void *channel_callback_context;
768 };
769
770 void vmbus_onmessage(void *context);
771
772 int vmbus_request_offers(void);
773
774 /* The format must be the same as struct vmdata_gpa_direct */
775 struct vmbus_channel_packet_page_buffer {
776 u16 type;
777 u16 dataoffset8;
778 u16 length8;
779 u16 flags;
780 u64 transactionid;
781 u32 reserved;
782 u32 rangecount;
783 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
784 } __packed;
785
786 /* The format must be the same as struct vmdata_gpa_direct */
787 struct vmbus_channel_packet_multipage_buffer {
788 u16 type;
789 u16 dataoffset8;
790 u16 length8;
791 u16 flags;
792 u64 transactionid;
793 u32 reserved;
794 u32 rangecount; /* Always 1 in this case */
795 struct hv_multipage_buffer range;
796 } __packed;
797
798
799 extern int vmbus_open(struct vmbus_channel *channel,
800 u32 send_ringbuffersize,
801 u32 recv_ringbuffersize,
802 void *userdata,
803 u32 userdatalen,
804 void(*onchannel_callback)(void *context),
805 void *context);
806
807 extern void vmbus_close(struct vmbus_channel *channel);
808
809 extern int vmbus_sendpacket(struct vmbus_channel *channel,
810 const void *buffer,
811 u32 bufferLen,
812 u64 requestid,
813 enum vmbus_packet_type type,
814 u32 flags);
815
816 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
817 struct hv_page_buffer pagebuffers[],
818 u32 pagecount,
819 void *buffer,
820 u32 bufferlen,
821 u64 requestid);
822
823 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
824 struct hv_multipage_buffer *mpb,
825 void *buffer,
826 u32 bufferlen,
827 u64 requestid);
828
829 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
830 void *kbuffer,
831 u32 size,
832 u32 *gpadl_handle);
833
834 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
835 u32 gpadl_handle);
836
837 extern int vmbus_recvpacket(struct vmbus_channel *channel,
838 void *buffer,
839 u32 bufferlen,
840 u32 *buffer_actual_len,
841 u64 *requestid);
842
843 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
844 void *buffer,
845 u32 bufferlen,
846 u32 *buffer_actual_len,
847 u64 *requestid);
848
849
850 extern void vmbus_get_debug_info(struct vmbus_channel *channel,
851 struct vmbus_channel_debug_info *debug);
852
853 extern void vmbus_ontimer(unsigned long data);
854
855 struct hv_dev_port_info {
856 u32 int_mask;
857 u32 read_idx;
858 u32 write_idx;
859 u32 bytes_avail_toread;
860 u32 bytes_avail_towrite;
861 };
862
863 /* Base driver object */
864 struct hv_driver {
865 const char *name;
866
867 /* the device type supported by this driver */
868 uuid_le dev_type;
869 const struct hv_vmbus_device_id *id_table;
870
871 struct device_driver driver;
872
873 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
874 int (*remove)(struct hv_device *);
875 void (*shutdown)(struct hv_device *);
876
877 };
878
879 /* Base device object */
880 struct hv_device {
881 /* the device type id of this device */
882 uuid_le dev_type;
883
884 /* the device instance id of this device */
885 uuid_le dev_instance;
886
887 struct device device;
888
889 struct vmbus_channel *channel;
890 };
891
892
893 static inline struct hv_device *device_to_hv_device(struct device *d)
894 {
895 return container_of(d, struct hv_device, device);
896 }
897
898 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
899 {
900 return container_of(d, struct hv_driver, driver);
901 }
902
903 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
904 {
905 dev_set_drvdata(&dev->device, data);
906 }
907
908 static inline void *hv_get_drvdata(struct hv_device *dev)
909 {
910 return dev_get_drvdata(&dev->device);
911 }
912
913 /* Vmbus interface */
914 #define vmbus_driver_register(driver) \
915 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
916 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
917 struct module *owner,
918 const char *mod_name);
919 void vmbus_driver_unregister(struct hv_driver *hv_driver);
920
921 /**
922 * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
923 *
924 * This macro is used to create a struct hv_vmbus_device_id that matches a
925 * specific device.
926 */
927 #define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \
928 g8, g9, ga, gb, gc, gd, ge, gf) \
929 .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
930 g8, g9, ga, gb, gc, gd, ge, gf },
931
932 /*
933 * Common header for Hyper-V ICs
934 */
935
936 #define ICMSGTYPE_NEGOTIATE 0
937 #define ICMSGTYPE_HEARTBEAT 1
938 #define ICMSGTYPE_KVPEXCHANGE 2
939 #define ICMSGTYPE_SHUTDOWN 3
940 #define ICMSGTYPE_TIMESYNC 4
941 #define ICMSGTYPE_VSS 5
942
943 #define ICMSGHDRFLAG_TRANSACTION 1
944 #define ICMSGHDRFLAG_REQUEST 2
945 #define ICMSGHDRFLAG_RESPONSE 4
946
947 #define HV_S_OK 0x00000000
948 #define HV_E_FAIL 0x80004005
949 #define HV_ERROR_NOT_SUPPORTED 0x80070032
950 #define HV_ERROR_MACHINE_LOCKED 0x800704F7
951
952 /*
953 * While we want to handle util services as regular devices,
954 * there is only one instance of each of these services; so
955 * we statically allocate the service specific state.
956 */
957
958 struct hv_util_service {
959 u8 *recv_buffer;
960 void (*util_cb)(void *);
961 int (*util_init)(struct hv_util_service *);
962 void (*util_deinit)(void);
963 };
964
965 struct vmbuspipe_hdr {
966 u32 flags;
967 u32 msgsize;
968 } __packed;
969
970 struct ic_version {
971 u16 major;
972 u16 minor;
973 } __packed;
974
975 struct icmsg_hdr {
976 struct ic_version icverframe;
977 u16 icmsgtype;
978 struct ic_version icvermsg;
979 u16 icmsgsize;
980 u32 status;
981 u8 ictransaction_id;
982 u8 icflags;
983 u8 reserved[2];
984 } __packed;
985
986 struct icmsg_negotiate {
987 u16 icframe_vercnt;
988 u16 icmsg_vercnt;
989 u32 reserved;
990 struct ic_version icversion_data[1]; /* any size array */
991 } __packed;
992
993 struct shutdown_msg_data {
994 u32 reason_code;
995 u32 timeout_seconds;
996 u32 flags;
997 u8 display_message[2048];
998 } __packed;
999
1000 struct heartbeat_msg_data {
1001 u64 seq_num;
1002 u32 reserved[8];
1003 } __packed;
1004
1005 /* Time Sync IC defs */
1006 #define ICTIMESYNCFLAG_PROBE 0
1007 #define ICTIMESYNCFLAG_SYNC 1
1008 #define ICTIMESYNCFLAG_SAMPLE 2
1009
1010 #ifdef __x86_64__
1011 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1012 #else
1013 #define WLTIMEDELTA 116444736000000000LL
1014 #endif
1015
1016 struct ictimesync_data {
1017 u64 parenttime;
1018 u64 childtime;
1019 u64 roundtriptime;
1020 u8 flags;
1021 } __packed;
1022
1023 struct hyperv_service_callback {
1024 u8 msg_type;
1025 char *log_msg;
1026 uuid_le data;
1027 struct vmbus_channel *channel;
1028 void (*callback) (void *context);
1029 };
1030
1031 extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1032 struct icmsg_negotiate *, u8 *);
1033
1034 int hv_kvp_init(struct hv_util_service *);
1035 void hv_kvp_deinit(void);
1036 void hv_kvp_onchannelcallback(void *);
1037
1038 #endif /* __KERNEL__ */
1039 #endif /* _HYPERV_H */