]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/hyperv.h
Merge branches 'pm-cpuidle', 'pm-cpufreq' and 'pm-sleep'
[mirror_ubuntu-artful-kernel.git] / include / linux / hyperv.h
1 /*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27
28 #include <uapi/linux/hyperv.h>
29 #include <uapi/asm/hyperv.h>
30
31 #include <linux/types.h>
32 #include <linux/scatterlist.h>
33 #include <linux/list.h>
34 #include <linux/timer.h>
35 #include <linux/workqueue.h>
36 #include <linux/completion.h>
37 #include <linux/device.h>
38 #include <linux/mod_devicetable.h>
39
40
41 #define MAX_PAGE_BUFFER_COUNT 32
42 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
43
44 #pragma pack(push, 1)
45
46 /* Single-page buffer */
47 struct hv_page_buffer {
48 u32 len;
49 u32 offset;
50 u64 pfn;
51 };
52
53 /* Multiple-page buffer */
54 struct hv_multipage_buffer {
55 /* Length and Offset determines the # of pfns in the array */
56 u32 len;
57 u32 offset;
58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
59 };
60
61 /*
62 * Multiple-page buffer array; the pfn array is variable size:
63 * The number of entries in the PFN array is determined by
64 * "len" and "offset".
65 */
66 struct hv_mpb_array {
67 /* Length and Offset determines the # of pfns in the array */
68 u32 len;
69 u32 offset;
70 u64 pfn_array[];
71 };
72
73 /* 0x18 includes the proprietary packet header */
74 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
75 (sizeof(struct hv_page_buffer) * \
76 MAX_PAGE_BUFFER_COUNT))
77 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
78 sizeof(struct hv_multipage_buffer))
79
80
81 #pragma pack(pop)
82
83 struct hv_ring_buffer {
84 /* Offset in bytes from the start of ring data below */
85 u32 write_index;
86
87 /* Offset in bytes from the start of ring data below */
88 u32 read_index;
89
90 u32 interrupt_mask;
91
92 /*
93 * Win8 uses some of the reserved bits to implement
94 * interrupt driven flow management. On the send side
95 * we can request that the receiver interrupt the sender
96 * when the ring transitions from being full to being able
97 * to handle a message of size "pending_send_sz".
98 *
99 * Add necessary state for this enhancement.
100 */
101 u32 pending_send_sz;
102
103 u32 reserved1[12];
104
105 union {
106 struct {
107 u32 feat_pending_send_sz:1;
108 };
109 u32 value;
110 } feature_bits;
111
112 /* Pad it to PAGE_SIZE so that data starts on page boundary */
113 u8 reserved2[4028];
114
115 /*
116 * Ring data starts here + RingDataStartOffset
117 * !!! DO NOT place any fields below this !!!
118 */
119 u8 buffer[0];
120 } __packed;
121
122 struct hv_ring_buffer_info {
123 struct hv_ring_buffer *ring_buffer;
124 u32 ring_size; /* Include the shared header */
125 spinlock_t ring_lock;
126
127 u32 ring_datasize; /* < ring_size */
128 u32 ring_data_startoffset;
129 u32 priv_write_index;
130 u32 priv_read_index;
131 u32 cached_read_index;
132 };
133
134 /*
135 *
136 * hv_get_ringbuffer_availbytes()
137 *
138 * Get number of bytes available to read and to write to
139 * for the specified ring buffer
140 */
141 static inline void
142 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
143 u32 *read, u32 *write)
144 {
145 u32 read_loc, write_loc, dsize;
146
147 /* Capture the read/write indices before they changed */
148 read_loc = rbi->ring_buffer->read_index;
149 write_loc = rbi->ring_buffer->write_index;
150 dsize = rbi->ring_datasize;
151
152 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
153 read_loc - write_loc;
154 *read = dsize - *write;
155 }
156
157 static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
158 {
159 u32 read_loc, write_loc, dsize, read;
160
161 dsize = rbi->ring_datasize;
162 read_loc = rbi->ring_buffer->read_index;
163 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
164
165 read = write_loc >= read_loc ? (write_loc - read_loc) :
166 (dsize - read_loc) + write_loc;
167
168 return read;
169 }
170
171 static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
172 {
173 u32 read_loc, write_loc, dsize, write;
174
175 dsize = rbi->ring_datasize;
176 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
177 write_loc = rbi->ring_buffer->write_index;
178
179 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
180 read_loc - write_loc;
181 return write;
182 }
183
184 static inline u32 hv_get_cached_bytes_to_write(
185 const struct hv_ring_buffer_info *rbi)
186 {
187 u32 read_loc, write_loc, dsize, write;
188
189 dsize = rbi->ring_datasize;
190 read_loc = rbi->cached_read_index;
191 write_loc = rbi->ring_buffer->write_index;
192
193 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
194 read_loc - write_loc;
195 return write;
196 }
197 /*
198 * VMBUS version is 32 bit entity broken up into
199 * two 16 bit quantities: major_number. minor_number.
200 *
201 * 0 . 13 (Windows Server 2008)
202 * 1 . 1 (Windows 7)
203 * 2 . 4 (Windows 8)
204 * 3 . 0 (Windows 8 R2)
205 * 4 . 0 (Windows 10)
206 */
207
208 #define VERSION_WS2008 ((0 << 16) | (13))
209 #define VERSION_WIN7 ((1 << 16) | (1))
210 #define VERSION_WIN8 ((2 << 16) | (4))
211 #define VERSION_WIN8_1 ((3 << 16) | (0))
212 #define VERSION_WIN10 ((4 << 16) | (0))
213
214 #define VERSION_INVAL -1
215
216 #define VERSION_CURRENT VERSION_WIN10
217
218 /* Make maximum size of pipe payload of 16K */
219 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
220
221 /* Define PipeMode values. */
222 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
223 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
224
225 /* The size of the user defined data buffer for non-pipe offers. */
226 #define MAX_USER_DEFINED_BYTES 120
227
228 /* The size of the user defined data buffer for pipe offers. */
229 #define MAX_PIPE_USER_DEFINED_BYTES 116
230
231 /*
232 * At the center of the Channel Management library is the Channel Offer. This
233 * struct contains the fundamental information about an offer.
234 */
235 struct vmbus_channel_offer {
236 uuid_le if_type;
237 uuid_le if_instance;
238
239 /*
240 * These two fields are not currently used.
241 */
242 u64 reserved1;
243 u64 reserved2;
244
245 u16 chn_flags;
246 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
247
248 union {
249 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
250 struct {
251 unsigned char user_def[MAX_USER_DEFINED_BYTES];
252 } std;
253
254 /*
255 * Pipes:
256 * The following sructure is an integrated pipe protocol, which
257 * is implemented on top of standard user-defined data. Pipe
258 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
259 * use.
260 */
261 struct {
262 u32 pipe_mode;
263 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
264 } pipe;
265 } u;
266 /*
267 * The sub_channel_index is defined in win8.
268 */
269 u16 sub_channel_index;
270 u16 reserved3;
271 } __packed;
272
273 /* Server Flags */
274 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
275 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
276 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
277 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
278 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
279 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
280 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
281 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
282
283 struct vmpacket_descriptor {
284 u16 type;
285 u16 offset8;
286 u16 len8;
287 u16 flags;
288 u64 trans_id;
289 } __packed;
290
291 struct vmpacket_header {
292 u32 prev_pkt_start_offset;
293 struct vmpacket_descriptor descriptor;
294 } __packed;
295
296 struct vmtransfer_page_range {
297 u32 byte_count;
298 u32 byte_offset;
299 } __packed;
300
301 struct vmtransfer_page_packet_header {
302 struct vmpacket_descriptor d;
303 u16 xfer_pageset_id;
304 u8 sender_owns_set;
305 u8 reserved;
306 u32 range_cnt;
307 struct vmtransfer_page_range ranges[1];
308 } __packed;
309
310 struct vmgpadl_packet_header {
311 struct vmpacket_descriptor d;
312 u32 gpadl;
313 u32 reserved;
314 } __packed;
315
316 struct vmadd_remove_transfer_page_set {
317 struct vmpacket_descriptor d;
318 u32 gpadl;
319 u16 xfer_pageset_id;
320 u16 reserved;
321 } __packed;
322
323 /*
324 * This structure defines a range in guest physical space that can be made to
325 * look virtually contiguous.
326 */
327 struct gpa_range {
328 u32 byte_count;
329 u32 byte_offset;
330 u64 pfn_array[0];
331 };
332
333 /*
334 * This is the format for an Establish Gpadl packet, which contains a handle by
335 * which this GPADL will be known and a set of GPA ranges associated with it.
336 * This can be converted to a MDL by the guest OS. If there are multiple GPA
337 * ranges, then the resulting MDL will be "chained," representing multiple VA
338 * ranges.
339 */
340 struct vmestablish_gpadl {
341 struct vmpacket_descriptor d;
342 u32 gpadl;
343 u32 range_cnt;
344 struct gpa_range range[1];
345 } __packed;
346
347 /*
348 * This is the format for a Teardown Gpadl packet, which indicates that the
349 * GPADL handle in the Establish Gpadl packet will never be referenced again.
350 */
351 struct vmteardown_gpadl {
352 struct vmpacket_descriptor d;
353 u32 gpadl;
354 u32 reserved; /* for alignment to a 8-byte boundary */
355 } __packed;
356
357 /*
358 * This is the format for a GPA-Direct packet, which contains a set of GPA
359 * ranges, in addition to commands and/or data.
360 */
361 struct vmdata_gpa_direct {
362 struct vmpacket_descriptor d;
363 u32 reserved;
364 u32 range_cnt;
365 struct gpa_range range[1];
366 } __packed;
367
368 /* This is the format for a Additional Data Packet. */
369 struct vmadditional_data {
370 struct vmpacket_descriptor d;
371 u64 total_bytes;
372 u32 offset;
373 u32 byte_cnt;
374 unsigned char data[1];
375 } __packed;
376
377 union vmpacket_largest_possible_header {
378 struct vmpacket_descriptor simple_hdr;
379 struct vmtransfer_page_packet_header xfer_page_hdr;
380 struct vmgpadl_packet_header gpadl_hdr;
381 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
382 struct vmestablish_gpadl establish_gpadl_hdr;
383 struct vmteardown_gpadl teardown_gpadl_hdr;
384 struct vmdata_gpa_direct data_gpa_direct_hdr;
385 };
386
387 #define VMPACKET_DATA_START_ADDRESS(__packet) \
388 (void *)(((unsigned char *)__packet) + \
389 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
390
391 #define VMPACKET_DATA_LENGTH(__packet) \
392 ((((struct vmpacket_descriptor)__packet)->len8 - \
393 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
394
395 #define VMPACKET_TRANSFER_MODE(__packet) \
396 (((struct IMPACT)__packet)->type)
397
398 enum vmbus_packet_type {
399 VM_PKT_INVALID = 0x0,
400 VM_PKT_SYNCH = 0x1,
401 VM_PKT_ADD_XFER_PAGESET = 0x2,
402 VM_PKT_RM_XFER_PAGESET = 0x3,
403 VM_PKT_ESTABLISH_GPADL = 0x4,
404 VM_PKT_TEARDOWN_GPADL = 0x5,
405 VM_PKT_DATA_INBAND = 0x6,
406 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
407 VM_PKT_DATA_USING_GPADL = 0x8,
408 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
409 VM_PKT_CANCEL_REQUEST = 0xa,
410 VM_PKT_COMP = 0xb,
411 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
412 VM_PKT_ADDITIONAL_DATA = 0xd
413 };
414
415 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
416
417
418 /* Version 1 messages */
419 enum vmbus_channel_message_type {
420 CHANNELMSG_INVALID = 0,
421 CHANNELMSG_OFFERCHANNEL = 1,
422 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
423 CHANNELMSG_REQUESTOFFERS = 3,
424 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
425 CHANNELMSG_OPENCHANNEL = 5,
426 CHANNELMSG_OPENCHANNEL_RESULT = 6,
427 CHANNELMSG_CLOSECHANNEL = 7,
428 CHANNELMSG_GPADL_HEADER = 8,
429 CHANNELMSG_GPADL_BODY = 9,
430 CHANNELMSG_GPADL_CREATED = 10,
431 CHANNELMSG_GPADL_TEARDOWN = 11,
432 CHANNELMSG_GPADL_TORNDOWN = 12,
433 CHANNELMSG_RELID_RELEASED = 13,
434 CHANNELMSG_INITIATE_CONTACT = 14,
435 CHANNELMSG_VERSION_RESPONSE = 15,
436 CHANNELMSG_UNLOAD = 16,
437 CHANNELMSG_UNLOAD_RESPONSE = 17,
438 CHANNELMSG_18 = 18,
439 CHANNELMSG_19 = 19,
440 CHANNELMSG_20 = 20,
441 CHANNELMSG_TL_CONNECT_REQUEST = 21,
442 CHANNELMSG_COUNT
443 };
444
445 struct vmbus_channel_message_header {
446 enum vmbus_channel_message_type msgtype;
447 u32 padding;
448 } __packed;
449
450 /* Query VMBus Version parameters */
451 struct vmbus_channel_query_vmbus_version {
452 struct vmbus_channel_message_header header;
453 u32 version;
454 } __packed;
455
456 /* VMBus Version Supported parameters */
457 struct vmbus_channel_version_supported {
458 struct vmbus_channel_message_header header;
459 u8 version_supported;
460 } __packed;
461
462 /* Offer Channel parameters */
463 struct vmbus_channel_offer_channel {
464 struct vmbus_channel_message_header header;
465 struct vmbus_channel_offer offer;
466 u32 child_relid;
467 u8 monitorid;
468 /*
469 * win7 and beyond splits this field into a bit field.
470 */
471 u8 monitor_allocated:1;
472 u8 reserved:7;
473 /*
474 * These are new fields added in win7 and later.
475 * Do not access these fields without checking the
476 * negotiated protocol.
477 *
478 * If "is_dedicated_interrupt" is set, we must not set the
479 * associated bit in the channel bitmap while sending the
480 * interrupt to the host.
481 *
482 * connection_id is to be used in signaling the host.
483 */
484 u16 is_dedicated_interrupt:1;
485 u16 reserved1:15;
486 u32 connection_id;
487 } __packed;
488
489 /* Rescind Offer parameters */
490 struct vmbus_channel_rescind_offer {
491 struct vmbus_channel_message_header header;
492 u32 child_relid;
493 } __packed;
494
495 /*
496 * Request Offer -- no parameters, SynIC message contains the partition ID
497 * Set Snoop -- no parameters, SynIC message contains the partition ID
498 * Clear Snoop -- no parameters, SynIC message contains the partition ID
499 * All Offers Delivered -- no parameters, SynIC message contains the partition
500 * ID
501 * Flush Client -- no parameters, SynIC message contains the partition ID
502 */
503
504 /* Open Channel parameters */
505 struct vmbus_channel_open_channel {
506 struct vmbus_channel_message_header header;
507
508 /* Identifies the specific VMBus channel that is being opened. */
509 u32 child_relid;
510
511 /* ID making a particular open request at a channel offer unique. */
512 u32 openid;
513
514 /* GPADL for the channel's ring buffer. */
515 u32 ringbuffer_gpadlhandle;
516
517 /*
518 * Starting with win8, this field will be used to specify
519 * the target virtual processor on which to deliver the interrupt for
520 * the host to guest communication.
521 * Prior to win8, incoming channel interrupts would only
522 * be delivered on cpu 0. Setting this value to 0 would
523 * preserve the earlier behavior.
524 */
525 u32 target_vp;
526
527 /*
528 * The upstream ring buffer begins at offset zero in the memory
529 * described by RingBufferGpadlHandle. The downstream ring buffer
530 * follows it at this offset (in pages).
531 */
532 u32 downstream_ringbuffer_pageoffset;
533
534 /* User-specific data to be passed along to the server endpoint. */
535 unsigned char userdata[MAX_USER_DEFINED_BYTES];
536 } __packed;
537
538 /* Open Channel Result parameters */
539 struct vmbus_channel_open_result {
540 struct vmbus_channel_message_header header;
541 u32 child_relid;
542 u32 openid;
543 u32 status;
544 } __packed;
545
546 /* Close channel parameters; */
547 struct vmbus_channel_close_channel {
548 struct vmbus_channel_message_header header;
549 u32 child_relid;
550 } __packed;
551
552 /* Channel Message GPADL */
553 #define GPADL_TYPE_RING_BUFFER 1
554 #define GPADL_TYPE_SERVER_SAVE_AREA 2
555 #define GPADL_TYPE_TRANSACTION 8
556
557 /*
558 * The number of PFNs in a GPADL message is defined by the number of
559 * pages that would be spanned by ByteCount and ByteOffset. If the
560 * implied number of PFNs won't fit in this packet, there will be a
561 * follow-up packet that contains more.
562 */
563 struct vmbus_channel_gpadl_header {
564 struct vmbus_channel_message_header header;
565 u32 child_relid;
566 u32 gpadl;
567 u16 range_buflen;
568 u16 rangecount;
569 struct gpa_range range[0];
570 } __packed;
571
572 /* This is the followup packet that contains more PFNs. */
573 struct vmbus_channel_gpadl_body {
574 struct vmbus_channel_message_header header;
575 u32 msgnumber;
576 u32 gpadl;
577 u64 pfn[0];
578 } __packed;
579
580 struct vmbus_channel_gpadl_created {
581 struct vmbus_channel_message_header header;
582 u32 child_relid;
583 u32 gpadl;
584 u32 creation_status;
585 } __packed;
586
587 struct vmbus_channel_gpadl_teardown {
588 struct vmbus_channel_message_header header;
589 u32 child_relid;
590 u32 gpadl;
591 } __packed;
592
593 struct vmbus_channel_gpadl_torndown {
594 struct vmbus_channel_message_header header;
595 u32 gpadl;
596 } __packed;
597
598 struct vmbus_channel_relid_released {
599 struct vmbus_channel_message_header header;
600 u32 child_relid;
601 } __packed;
602
603 struct vmbus_channel_initiate_contact {
604 struct vmbus_channel_message_header header;
605 u32 vmbus_version_requested;
606 u32 target_vcpu; /* The VCPU the host should respond to */
607 u64 interrupt_page;
608 u64 monitor_page1;
609 u64 monitor_page2;
610 } __packed;
611
612 /* Hyper-V socket: guest's connect()-ing to host */
613 struct vmbus_channel_tl_connect_request {
614 struct vmbus_channel_message_header header;
615 uuid_le guest_endpoint_id;
616 uuid_le host_service_id;
617 } __packed;
618
619 struct vmbus_channel_version_response {
620 struct vmbus_channel_message_header header;
621 u8 version_supported;
622 } __packed;
623
624 enum vmbus_channel_state {
625 CHANNEL_OFFER_STATE,
626 CHANNEL_OPENING_STATE,
627 CHANNEL_OPEN_STATE,
628 CHANNEL_OPENED_STATE,
629 };
630
631 /*
632 * Represents each channel msg on the vmbus connection This is a
633 * variable-size data structure depending on the msg type itself
634 */
635 struct vmbus_channel_msginfo {
636 /* Bookkeeping stuff */
637 struct list_head msglistentry;
638
639 /* So far, this is only used to handle gpadl body message */
640 struct list_head submsglist;
641
642 /* Synchronize the request/response if needed */
643 struct completion waitevent;
644 union {
645 struct vmbus_channel_version_supported version_supported;
646 struct vmbus_channel_open_result open_result;
647 struct vmbus_channel_gpadl_torndown gpadl_torndown;
648 struct vmbus_channel_gpadl_created gpadl_created;
649 struct vmbus_channel_version_response version_response;
650 } response;
651
652 u32 msgsize;
653 /*
654 * The channel message that goes out on the "wire".
655 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
656 */
657 unsigned char msg[0];
658 };
659
660 struct vmbus_close_msg {
661 struct vmbus_channel_msginfo info;
662 struct vmbus_channel_close_channel msg;
663 };
664
665 /* Define connection identifier type. */
666 union hv_connection_id {
667 u32 asu32;
668 struct {
669 u32 id:24;
670 u32 reserved:8;
671 } u;
672 };
673
674 /* Definition of the hv_signal_event hypercall input structure. */
675 struct hv_input_signal_event {
676 union hv_connection_id connectionid;
677 u16 flag_number;
678 u16 rsvdz;
679 };
680
681 struct hv_input_signal_event_buffer {
682 u64 align8;
683 struct hv_input_signal_event event;
684 };
685
686 enum hv_signal_policy {
687 HV_SIGNAL_POLICY_DEFAULT = 0,
688 HV_SIGNAL_POLICY_EXPLICIT,
689 };
690
691 enum hv_numa_policy {
692 HV_BALANCED = 0,
693 HV_LOCALIZED,
694 };
695
696 enum vmbus_device_type {
697 HV_IDE = 0,
698 HV_SCSI,
699 HV_FC,
700 HV_NIC,
701 HV_ND,
702 HV_PCIE,
703 HV_FB,
704 HV_KBD,
705 HV_MOUSE,
706 HV_KVP,
707 HV_TS,
708 HV_HB,
709 HV_SHUTDOWN,
710 HV_FCOPY,
711 HV_BACKUP,
712 HV_DM,
713 HV_UNKNOWN,
714 };
715
716 struct vmbus_device {
717 u16 dev_type;
718 uuid_le guid;
719 bool perf_device;
720 };
721
722 struct vmbus_channel {
723 struct list_head listentry;
724
725 struct hv_device *device_obj;
726
727 enum vmbus_channel_state state;
728
729 struct vmbus_channel_offer_channel offermsg;
730 /*
731 * These are based on the OfferMsg.MonitorId.
732 * Save it here for easy access.
733 */
734 u8 monitor_grp;
735 u8 monitor_bit;
736
737 bool rescind; /* got rescind msg */
738
739 u32 ringbuffer_gpadlhandle;
740
741 /* Allocated memory for ring buffer */
742 void *ringbuffer_pages;
743 u32 ringbuffer_pagecount;
744 struct hv_ring_buffer_info outbound; /* send to parent */
745 struct hv_ring_buffer_info inbound; /* receive from parent */
746 spinlock_t inbound_lock;
747
748 struct vmbus_close_msg close_msg;
749
750 /* Channel callback are invoked in this workqueue context */
751 /* HANDLE dataWorkQueue; */
752
753 void (*onchannel_callback)(void *context);
754 void *channel_callback_context;
755
756 /*
757 * A channel can be marked for efficient (batched)
758 * reading:
759 * If batched_reading is set to "true", we read until the
760 * channel is empty and hold off interrupts from the host
761 * during the entire read process.
762 * If batched_reading is set to "false", the client is not
763 * going to perform batched reading.
764 *
765 * By default we will enable batched reading; specific
766 * drivers that don't want this behavior can turn it off.
767 */
768
769 bool batched_reading;
770
771 bool is_dedicated_interrupt;
772 struct hv_input_signal_event_buffer sig_buf;
773 struct hv_input_signal_event *sig_event;
774
775 /*
776 * Starting with win8, this field will be used to specify
777 * the target virtual processor on which to deliver the interrupt for
778 * the host to guest communication.
779 * Prior to win8, incoming channel interrupts would only
780 * be delivered on cpu 0. Setting this value to 0 would
781 * preserve the earlier behavior.
782 */
783 u32 target_vp;
784 /* The corresponding CPUID in the guest */
785 u32 target_cpu;
786 /*
787 * State to manage the CPU affiliation of channels.
788 */
789 struct cpumask alloced_cpus_in_node;
790 int numa_node;
791 /*
792 * Support for sub-channels. For high performance devices,
793 * it will be useful to have multiple sub-channels to support
794 * a scalable communication infrastructure with the host.
795 * The support for sub-channels is implemented as an extention
796 * to the current infrastructure.
797 * The initial offer is considered the primary channel and this
798 * offer message will indicate if the host supports sub-channels.
799 * The guest is free to ask for sub-channels to be offerred and can
800 * open these sub-channels as a normal "primary" channel. However,
801 * all sub-channels will have the same type and instance guids as the
802 * primary channel. Requests sent on a given channel will result in a
803 * response on the same channel.
804 */
805
806 /*
807 * Sub-channel creation callback. This callback will be called in
808 * process context when a sub-channel offer is received from the host.
809 * The guest can open the sub-channel in the context of this callback.
810 */
811 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
812
813 /*
814 * Channel rescind callback. Some channels (the hvsock ones), need to
815 * register a callback which is invoked in vmbus_onoffer_rescind().
816 */
817 void (*chn_rescind_callback)(struct vmbus_channel *channel);
818
819 /*
820 * The spinlock to protect the structure. It is being used to protect
821 * test-and-set access to various attributes of the structure as well
822 * as all sc_list operations.
823 */
824 spinlock_t lock;
825 /*
826 * All Sub-channels of a primary channel are linked here.
827 */
828 struct list_head sc_list;
829 /*
830 * Current number of sub-channels.
831 */
832 int num_sc;
833 /*
834 * Number of a sub-channel (position within sc_list) which is supposed
835 * to be used as the next outgoing channel.
836 */
837 int next_oc;
838 /*
839 * The primary channel this sub-channel belongs to.
840 * This will be NULL for the primary channel.
841 */
842 struct vmbus_channel *primary_channel;
843 /*
844 * Support per-channel state for use by vmbus drivers.
845 */
846 void *per_channel_state;
847 /*
848 * To support per-cpu lookup mapping of relid to channel,
849 * link up channels based on their CPU affinity.
850 */
851 struct list_head percpu_list;
852 /*
853 * Host signaling policy: The default policy will be
854 * based on the ring buffer state. We will also support
855 * a policy where the client driver can have explicit
856 * signaling control.
857 */
858 enum hv_signal_policy signal_policy;
859 /*
860 * On the channel send side, many of the VMBUS
861 * device drivers explicity serialize access to the
862 * outgoing ring buffer. Give more control to the
863 * VMBUS device drivers in terms how to serialize
864 * accesss to the outgoing ring buffer.
865 * The default behavior will be to aquire the
866 * ring lock to preserve the current behavior.
867 */
868 bool acquire_ring_lock;
869 /*
870 * For performance critical channels (storage, networking
871 * etc,), Hyper-V has a mechanism to enhance the throughput
872 * at the expense of latency:
873 * When the host is to be signaled, we just set a bit in a shared page
874 * and this bit will be inspected by the hypervisor within a certain
875 * window and if the bit is set, the host will be signaled. The window
876 * of time is the monitor latency - currently around 100 usecs. This
877 * mechanism improves throughput by:
878 *
879 * A) Making the host more efficient - each time it wakes up,
880 * potentially it will process morev number of packets. The
881 * monitor latency allows a batch to build up.
882 * B) By deferring the hypercall to signal, we will also minimize
883 * the interrupts.
884 *
885 * Clearly, these optimizations improve throughput at the expense of
886 * latency. Furthermore, since the channel is shared for both
887 * control and data messages, control messages currently suffer
888 * unnecessary latency adversley impacting performance and boot
889 * time. To fix this issue, permit tagging the channel as being
890 * in "low latency" mode. In this mode, we will bypass the monitor
891 * mechanism.
892 */
893 bool low_latency;
894
895 /*
896 * NUMA distribution policy:
897 * We support teo policies:
898 * 1) Balanced: Here all performance critical channels are
899 * distributed evenly amongst all the NUMA nodes.
900 * This policy will be the default policy.
901 * 2) Localized: All channels of a given instance of a
902 * performance critical service will be assigned CPUs
903 * within a selected NUMA node.
904 */
905 enum hv_numa_policy affinity_policy;
906
907 };
908
909 static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
910 {
911 c->acquire_ring_lock = state;
912 }
913
914 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
915 {
916 return !!(c->offermsg.offer.chn_flags &
917 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
918 }
919
920 static inline void set_channel_signal_state(struct vmbus_channel *c,
921 enum hv_signal_policy policy)
922 {
923 c->signal_policy = policy;
924 }
925
926 static inline void set_channel_affinity_state(struct vmbus_channel *c,
927 enum hv_numa_policy policy)
928 {
929 c->affinity_policy = policy;
930 }
931
932 static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
933 {
934 c->batched_reading = state;
935 }
936
937 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
938 {
939 c->per_channel_state = s;
940 }
941
942 static inline void *get_per_channel_state(struct vmbus_channel *c)
943 {
944 return c->per_channel_state;
945 }
946
947 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
948 u32 size)
949 {
950 c->outbound.ring_buffer->pending_send_sz = size;
951 }
952
953 static inline void set_low_latency_mode(struct vmbus_channel *c)
954 {
955 c->low_latency = true;
956 }
957
958 static inline void clear_low_latency_mode(struct vmbus_channel *c)
959 {
960 c->low_latency = false;
961 }
962
963 void vmbus_onmessage(void *context);
964
965 int vmbus_request_offers(void);
966
967 /*
968 * APIs for managing sub-channels.
969 */
970
971 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
972 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
973
974 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
975 void (*chn_rescind_cb)(struct vmbus_channel *));
976
977 /*
978 * Retrieve the (sub) channel on which to send an outgoing request.
979 * When a primary channel has multiple sub-channels, we choose a
980 * channel whose VCPU binding is closest to the VCPU on which
981 * this call is being made.
982 */
983 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
984
985 /*
986 * Check if sub-channels have already been offerred. This API will be useful
987 * when the driver is unloaded after establishing sub-channels. In this case,
988 * when the driver is re-loaded, the driver would have to check if the
989 * subchannels have already been established before attempting to request
990 * the creation of sub-channels.
991 * This function returns TRUE to indicate that subchannels have already been
992 * created.
993 * This function should be invoked after setting the callback function for
994 * sub-channel creation.
995 */
996 bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
997
998 /* The format must be the same as struct vmdata_gpa_direct */
999 struct vmbus_channel_packet_page_buffer {
1000 u16 type;
1001 u16 dataoffset8;
1002 u16 length8;
1003 u16 flags;
1004 u64 transactionid;
1005 u32 reserved;
1006 u32 rangecount;
1007 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1008 } __packed;
1009
1010 /* The format must be the same as struct vmdata_gpa_direct */
1011 struct vmbus_channel_packet_multipage_buffer {
1012 u16 type;
1013 u16 dataoffset8;
1014 u16 length8;
1015 u16 flags;
1016 u64 transactionid;
1017 u32 reserved;
1018 u32 rangecount; /* Always 1 in this case */
1019 struct hv_multipage_buffer range;
1020 } __packed;
1021
1022 /* The format must be the same as struct vmdata_gpa_direct */
1023 struct vmbus_packet_mpb_array {
1024 u16 type;
1025 u16 dataoffset8;
1026 u16 length8;
1027 u16 flags;
1028 u64 transactionid;
1029 u32 reserved;
1030 u32 rangecount; /* Always 1 in this case */
1031 struct hv_mpb_array range;
1032 } __packed;
1033
1034
1035 extern int vmbus_open(struct vmbus_channel *channel,
1036 u32 send_ringbuffersize,
1037 u32 recv_ringbuffersize,
1038 void *userdata,
1039 u32 userdatalen,
1040 void(*onchannel_callback)(void *context),
1041 void *context);
1042
1043 extern void vmbus_close(struct vmbus_channel *channel);
1044
1045 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1046 void *buffer,
1047 u32 bufferLen,
1048 u64 requestid,
1049 enum vmbus_packet_type type,
1050 u32 flags);
1051
1052 extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
1053 void *buffer,
1054 u32 bufferLen,
1055 u64 requestid,
1056 enum vmbus_packet_type type,
1057 u32 flags,
1058 bool kick_q);
1059
1060 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1061 struct hv_page_buffer pagebuffers[],
1062 u32 pagecount,
1063 void *buffer,
1064 u32 bufferlen,
1065 u64 requestid);
1066
1067 extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
1068 struct hv_page_buffer pagebuffers[],
1069 u32 pagecount,
1070 void *buffer,
1071 u32 bufferlen,
1072 u64 requestid,
1073 u32 flags,
1074 bool kick_q);
1075
1076 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1077 struct hv_multipage_buffer *mpb,
1078 void *buffer,
1079 u32 bufferlen,
1080 u64 requestid);
1081
1082 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1083 struct vmbus_packet_mpb_array *mpb,
1084 u32 desc_size,
1085 void *buffer,
1086 u32 bufferlen,
1087 u64 requestid);
1088
1089 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1090 void *kbuffer,
1091 u32 size,
1092 u32 *gpadl_handle);
1093
1094 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1095 u32 gpadl_handle);
1096
1097 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1098 void *buffer,
1099 u32 bufferlen,
1100 u32 *buffer_actual_len,
1101 u64 *requestid);
1102
1103 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1104 void *buffer,
1105 u32 bufferlen,
1106 u32 *buffer_actual_len,
1107 u64 *requestid);
1108
1109
1110 extern void vmbus_ontimer(unsigned long data);
1111
1112 /* Base driver object */
1113 struct hv_driver {
1114 const char *name;
1115
1116 /*
1117 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1118 * channel flag, actually doesn't mean a synthetic device because the
1119 * offer's if_type/if_instance can change for every new hvsock
1120 * connection.
1121 *
1122 * However, to facilitate the notification of new-offer/rescind-offer
1123 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1124 * a special vmbus device, and hence we need the below flag to
1125 * indicate if the driver is the hvsock driver or not: we need to
1126 * specially treat the hvosck offer & driver in vmbus_match().
1127 */
1128 bool hvsock;
1129
1130 /* the device type supported by this driver */
1131 uuid_le dev_type;
1132 const struct hv_vmbus_device_id *id_table;
1133
1134 struct device_driver driver;
1135
1136 /* dynamic device GUID's */
1137 struct {
1138 spinlock_t lock;
1139 struct list_head list;
1140 } dynids;
1141
1142 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1143 int (*remove)(struct hv_device *);
1144 void (*shutdown)(struct hv_device *);
1145
1146 };
1147
1148 /* Base device object */
1149 struct hv_device {
1150 /* the device type id of this device */
1151 uuid_le dev_type;
1152
1153 /* the device instance id of this device */
1154 uuid_le dev_instance;
1155 u16 vendor_id;
1156 u16 device_id;
1157
1158 struct device device;
1159
1160 struct vmbus_channel *channel;
1161 };
1162
1163
1164 static inline struct hv_device *device_to_hv_device(struct device *d)
1165 {
1166 return container_of(d, struct hv_device, device);
1167 }
1168
1169 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1170 {
1171 return container_of(d, struct hv_driver, driver);
1172 }
1173
1174 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1175 {
1176 dev_set_drvdata(&dev->device, data);
1177 }
1178
1179 static inline void *hv_get_drvdata(struct hv_device *dev)
1180 {
1181 return dev_get_drvdata(&dev->device);
1182 }
1183
1184 /* Vmbus interface */
1185 #define vmbus_driver_register(driver) \
1186 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1187 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1188 struct module *owner,
1189 const char *mod_name);
1190 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1191
1192 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1193
1194 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1195 resource_size_t min, resource_size_t max,
1196 resource_size_t size, resource_size_t align,
1197 bool fb_overlap_ok);
1198 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1199 int vmbus_cpu_number_to_vp_number(int cpu_number);
1200 u64 hv_do_hypercall(u64 control, void *input, void *output);
1201
1202 /*
1203 * GUID definitions of various offer types - services offered to the guest.
1204 */
1205
1206 /*
1207 * Network GUID
1208 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1209 */
1210 #define HV_NIC_GUID \
1211 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1212 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1213
1214 /*
1215 * IDE GUID
1216 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1217 */
1218 #define HV_IDE_GUID \
1219 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1220 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1221
1222 /*
1223 * SCSI GUID
1224 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1225 */
1226 #define HV_SCSI_GUID \
1227 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1228 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1229
1230 /*
1231 * Shutdown GUID
1232 * {0e0b6031-5213-4934-818b-38d90ced39db}
1233 */
1234 #define HV_SHUTDOWN_GUID \
1235 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1236 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1237
1238 /*
1239 * Time Synch GUID
1240 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1241 */
1242 #define HV_TS_GUID \
1243 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1244 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1245
1246 /*
1247 * Heartbeat GUID
1248 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1249 */
1250 #define HV_HEART_BEAT_GUID \
1251 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1252 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1253
1254 /*
1255 * KVP GUID
1256 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1257 */
1258 #define HV_KVP_GUID \
1259 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1260 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1261
1262 /*
1263 * Dynamic memory GUID
1264 * {525074dc-8985-46e2-8057-a307dc18a502}
1265 */
1266 #define HV_DM_GUID \
1267 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1268 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1269
1270 /*
1271 * Mouse GUID
1272 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1273 */
1274 #define HV_MOUSE_GUID \
1275 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1276 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1277
1278 /*
1279 * Keyboard GUID
1280 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1281 */
1282 #define HV_KBD_GUID \
1283 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1284 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1285
1286 /*
1287 * VSS (Backup/Restore) GUID
1288 */
1289 #define HV_VSS_GUID \
1290 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1291 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1292 /*
1293 * Synthetic Video GUID
1294 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1295 */
1296 #define HV_SYNTHVID_GUID \
1297 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1298 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1299
1300 /*
1301 * Synthetic FC GUID
1302 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1303 */
1304 #define HV_SYNTHFC_GUID \
1305 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1306 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1307
1308 /*
1309 * Guest File Copy Service
1310 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1311 */
1312
1313 #define HV_FCOPY_GUID \
1314 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1315 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1316
1317 /*
1318 * NetworkDirect. This is the guest RDMA service.
1319 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1320 */
1321 #define HV_ND_GUID \
1322 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1323 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1324
1325 /*
1326 * PCI Express Pass Through
1327 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1328 */
1329
1330 #define HV_PCIE_GUID \
1331 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1332 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1333
1334 /*
1335 * Linux doesn't support the 3 devices: the first two are for
1336 * Automatic Virtual Machine Activation, and the third is for
1337 * Remote Desktop Virtualization.
1338 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1339 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1340 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1341 */
1342
1343 #define HV_AVMA1_GUID \
1344 .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1345 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1346
1347 #define HV_AVMA2_GUID \
1348 .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1349 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1350
1351 #define HV_RDV_GUID \
1352 .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1353 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1354
1355 /*
1356 * Common header for Hyper-V ICs
1357 */
1358
1359 #define ICMSGTYPE_NEGOTIATE 0
1360 #define ICMSGTYPE_HEARTBEAT 1
1361 #define ICMSGTYPE_KVPEXCHANGE 2
1362 #define ICMSGTYPE_SHUTDOWN 3
1363 #define ICMSGTYPE_TIMESYNC 4
1364 #define ICMSGTYPE_VSS 5
1365
1366 #define ICMSGHDRFLAG_TRANSACTION 1
1367 #define ICMSGHDRFLAG_REQUEST 2
1368 #define ICMSGHDRFLAG_RESPONSE 4
1369
1370
1371 /*
1372 * While we want to handle util services as regular devices,
1373 * there is only one instance of each of these services; so
1374 * we statically allocate the service specific state.
1375 */
1376
1377 struct hv_util_service {
1378 u8 *recv_buffer;
1379 void *channel;
1380 void (*util_cb)(void *);
1381 int (*util_init)(struct hv_util_service *);
1382 void (*util_deinit)(void);
1383 };
1384
1385 struct vmbuspipe_hdr {
1386 u32 flags;
1387 u32 msgsize;
1388 } __packed;
1389
1390 struct ic_version {
1391 u16 major;
1392 u16 minor;
1393 } __packed;
1394
1395 struct icmsg_hdr {
1396 struct ic_version icverframe;
1397 u16 icmsgtype;
1398 struct ic_version icvermsg;
1399 u16 icmsgsize;
1400 u32 status;
1401 u8 ictransaction_id;
1402 u8 icflags;
1403 u8 reserved[2];
1404 } __packed;
1405
1406 struct icmsg_negotiate {
1407 u16 icframe_vercnt;
1408 u16 icmsg_vercnt;
1409 u32 reserved;
1410 struct ic_version icversion_data[1]; /* any size array */
1411 } __packed;
1412
1413 struct shutdown_msg_data {
1414 u32 reason_code;
1415 u32 timeout_seconds;
1416 u32 flags;
1417 u8 display_message[2048];
1418 } __packed;
1419
1420 struct heartbeat_msg_data {
1421 u64 seq_num;
1422 u32 reserved[8];
1423 } __packed;
1424
1425 /* Time Sync IC defs */
1426 #define ICTIMESYNCFLAG_PROBE 0
1427 #define ICTIMESYNCFLAG_SYNC 1
1428 #define ICTIMESYNCFLAG_SAMPLE 2
1429
1430 #ifdef __x86_64__
1431 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1432 #else
1433 #define WLTIMEDELTA 116444736000000000LL
1434 #endif
1435
1436 struct ictimesync_data {
1437 u64 parenttime;
1438 u64 childtime;
1439 u64 roundtriptime;
1440 u8 flags;
1441 } __packed;
1442
1443 struct ictimesync_ref_data {
1444 u64 parenttime;
1445 u64 vmreferencetime;
1446 u8 flags;
1447 char leapflags;
1448 char stratum;
1449 u8 reserved[3];
1450 } __packed;
1451
1452 struct hyperv_service_callback {
1453 u8 msg_type;
1454 char *log_msg;
1455 uuid_le data;
1456 struct vmbus_channel *channel;
1457 void (*callback) (void *context);
1458 };
1459
1460 #define MAX_SRV_VER 0x7ffffff
1461 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1462 struct icmsg_negotiate *, u8 *, int,
1463 int);
1464
1465 void hv_event_tasklet_disable(struct vmbus_channel *channel);
1466 void hv_event_tasklet_enable(struct vmbus_channel *channel);
1467
1468 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1469
1470 void vmbus_setevent(struct vmbus_channel *channel);
1471 /*
1472 * Negotiated version with the Host.
1473 */
1474
1475 extern __u32 vmbus_proto_version;
1476
1477 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1478 const uuid_le *shv_host_servie_id);
1479 void vmbus_set_event(struct vmbus_channel *channel);
1480
1481 /* Get the start of the ring buffer. */
1482 static inline void *
1483 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1484 {
1485 return (void *)ring_info->ring_buffer->buffer;
1486 }
1487
1488 /*
1489 * To optimize the flow management on the send-side,
1490 * when the sender is blocked because of lack of
1491 * sufficient space in the ring buffer, potential the
1492 * consumer of the ring buffer can signal the producer.
1493 * This is controlled by the following parameters:
1494 *
1495 * 1. pending_send_sz: This is the size in bytes that the
1496 * producer is trying to send.
1497 * 2. The feature bit feat_pending_send_sz set to indicate if
1498 * the consumer of the ring will signal when the ring
1499 * state transitions from being full to a state where
1500 * there is room for the producer to send the pending packet.
1501 */
1502
1503 static inline void hv_signal_on_read(struct vmbus_channel *channel)
1504 {
1505 u32 cur_write_sz, cached_write_sz;
1506 u32 pending_sz;
1507 struct hv_ring_buffer_info *rbi = &channel->inbound;
1508
1509 /*
1510 * Issue a full memory barrier before making the signaling decision.
1511 * Here is the reason for having this barrier:
1512 * If the reading of the pend_sz (in this function)
1513 * were to be reordered and read before we commit the new read
1514 * index (in the calling function) we could
1515 * have a problem. If the host were to set the pending_sz after we
1516 * have sampled pending_sz and go to sleep before we commit the
1517 * read index, we could miss sending the interrupt. Issue a full
1518 * memory barrier to address this.
1519 */
1520 virt_mb();
1521
1522 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1523 /* If the other end is not blocked on write don't bother. */
1524 if (pending_sz == 0)
1525 return;
1526
1527 cur_write_sz = hv_get_bytes_to_write(rbi);
1528
1529 if (cur_write_sz < pending_sz)
1530 return;
1531
1532 cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1533 if (cached_write_sz < pending_sz)
1534 vmbus_setevent(channel);
1535
1536 return;
1537 }
1538
1539 static inline void
1540 init_cached_read_index(struct vmbus_channel *channel)
1541 {
1542 struct hv_ring_buffer_info *rbi = &channel->inbound;
1543
1544 rbi->cached_read_index = rbi->ring_buffer->read_index;
1545 }
1546
1547 /*
1548 * An API to support in-place processing of incoming VMBUS packets.
1549 */
1550 #define VMBUS_PKT_TRAILER 8
1551
1552 static inline struct vmpacket_descriptor *
1553 get_next_pkt_raw(struct vmbus_channel *channel)
1554 {
1555 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1556 u32 priv_read_loc = ring_info->priv_read_index;
1557 void *ring_buffer = hv_get_ring_buffer(ring_info);
1558 u32 dsize = ring_info->ring_datasize;
1559 /*
1560 * delta is the difference between what is available to read and
1561 * what was already consumed in place. We commit read index after
1562 * the whole batch is processed.
1563 */
1564 u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
1565 priv_read_loc - ring_info->ring_buffer->read_index :
1566 (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
1567 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
1568
1569 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
1570 return NULL;
1571
1572 return ring_buffer + priv_read_loc;
1573 }
1574
1575 /*
1576 * A helper function to step through packets "in-place"
1577 * This API is to be called after each successful call
1578 * get_next_pkt_raw().
1579 */
1580 static inline void put_pkt_raw(struct vmbus_channel *channel,
1581 struct vmpacket_descriptor *desc)
1582 {
1583 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1584 u32 packetlen = desc->len8 << 3;
1585 u32 dsize = ring_info->ring_datasize;
1586
1587 /*
1588 * Include the packet trailer.
1589 */
1590 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
1591 ring_info->priv_read_index %= dsize;
1592 }
1593
1594 /*
1595 * This call commits the read index and potentially signals the host.
1596 * Here is the pattern for using the "in-place" consumption APIs:
1597 *
1598 * init_cached_read_index();
1599 *
1600 * while (get_next_pkt_raw() {
1601 * process the packet "in-place";
1602 * put_pkt_raw();
1603 * }
1604 * if (packets processed in place)
1605 * commit_rd_index();
1606 */
1607 static inline void commit_rd_index(struct vmbus_channel *channel)
1608 {
1609 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1610 /*
1611 * Make sure all reads are done before we update the read index since
1612 * the writer may start writing to the read area once the read index
1613 * is updated.
1614 */
1615 virt_rmb();
1616 ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1617
1618 hv_signal_on_read(channel);
1619 }
1620
1621
1622 #endif /* _HYPERV_H */