]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/hyperv.h
PM / OPP: Mark cpumask as const in dev_pm_opp_set_sharing_cpus()
[mirror_ubuntu-artful-kernel.git] / include / linux / hyperv.h
1 /*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27
28 #include <uapi/linux/hyperv.h>
29 #include <uapi/asm/hyperv.h>
30
31 #include <linux/types.h>
32 #include <linux/scatterlist.h>
33 #include <linux/list.h>
34 #include <linux/timer.h>
35 #include <linux/workqueue.h>
36 #include <linux/completion.h>
37 #include <linux/device.h>
38 #include <linux/mod_devicetable.h>
39
40
41 #define MAX_PAGE_BUFFER_COUNT 32
42 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
43
44 #pragma pack(push, 1)
45
46 /* Single-page buffer */
47 struct hv_page_buffer {
48 u32 len;
49 u32 offset;
50 u64 pfn;
51 };
52
53 /* Multiple-page buffer */
54 struct hv_multipage_buffer {
55 /* Length and Offset determines the # of pfns in the array */
56 u32 len;
57 u32 offset;
58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
59 };
60
61 /*
62 * Multiple-page buffer array; the pfn array is variable size:
63 * The number of entries in the PFN array is determined by
64 * "len" and "offset".
65 */
66 struct hv_mpb_array {
67 /* Length and Offset determines the # of pfns in the array */
68 u32 len;
69 u32 offset;
70 u64 pfn_array[];
71 };
72
73 /* 0x18 includes the proprietary packet header */
74 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
75 (sizeof(struct hv_page_buffer) * \
76 MAX_PAGE_BUFFER_COUNT))
77 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
78 sizeof(struct hv_multipage_buffer))
79
80
81 #pragma pack(pop)
82
83 struct hv_ring_buffer {
84 /* Offset in bytes from the start of ring data below */
85 u32 write_index;
86
87 /* Offset in bytes from the start of ring data below */
88 u32 read_index;
89
90 u32 interrupt_mask;
91
92 /*
93 * Win8 uses some of the reserved bits to implement
94 * interrupt driven flow management. On the send side
95 * we can request that the receiver interrupt the sender
96 * when the ring transitions from being full to being able
97 * to handle a message of size "pending_send_sz".
98 *
99 * Add necessary state for this enhancement.
100 */
101 u32 pending_send_sz;
102
103 u32 reserved1[12];
104
105 union {
106 struct {
107 u32 feat_pending_send_sz:1;
108 };
109 u32 value;
110 } feature_bits;
111
112 /* Pad it to PAGE_SIZE so that data starts on page boundary */
113 u8 reserved2[4028];
114
115 /*
116 * Ring data starts here + RingDataStartOffset
117 * !!! DO NOT place any fields below this !!!
118 */
119 u8 buffer[0];
120 } __packed;
121
122 struct hv_ring_buffer_info {
123 struct hv_ring_buffer *ring_buffer;
124 u32 ring_size; /* Include the shared header */
125 spinlock_t ring_lock;
126
127 u32 ring_datasize; /* < ring_size */
128 u32 ring_data_startoffset;
129 };
130
131 /*
132 *
133 * hv_get_ringbuffer_availbytes()
134 *
135 * Get number of bytes available to read and to write to
136 * for the specified ring buffer
137 */
138 static inline void
139 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
140 u32 *read, u32 *write)
141 {
142 u32 read_loc, write_loc, dsize;
143
144 /* Capture the read/write indices before they changed */
145 read_loc = rbi->ring_buffer->read_index;
146 write_loc = rbi->ring_buffer->write_index;
147 dsize = rbi->ring_datasize;
148
149 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
150 read_loc - write_loc;
151 *read = dsize - *write;
152 }
153
154 /*
155 * VMBUS version is 32 bit entity broken up into
156 * two 16 bit quantities: major_number. minor_number.
157 *
158 * 0 . 13 (Windows Server 2008)
159 * 1 . 1 (Windows 7)
160 * 2 . 4 (Windows 8)
161 * 3 . 0 (Windows 8 R2)
162 * 4 . 0 (Windows 10)
163 */
164
165 #define VERSION_WS2008 ((0 << 16) | (13))
166 #define VERSION_WIN7 ((1 << 16) | (1))
167 #define VERSION_WIN8 ((2 << 16) | (4))
168 #define VERSION_WIN8_1 ((3 << 16) | (0))
169 #define VERSION_WIN10 ((4 << 16) | (0))
170
171 #define VERSION_INVAL -1
172
173 #define VERSION_CURRENT VERSION_WIN10
174
175 /* Make maximum size of pipe payload of 16K */
176 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
177
178 /* Define PipeMode values. */
179 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
180 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
181
182 /* The size of the user defined data buffer for non-pipe offers. */
183 #define MAX_USER_DEFINED_BYTES 120
184
185 /* The size of the user defined data buffer for pipe offers. */
186 #define MAX_PIPE_USER_DEFINED_BYTES 116
187
188 /*
189 * At the center of the Channel Management library is the Channel Offer. This
190 * struct contains the fundamental information about an offer.
191 */
192 struct vmbus_channel_offer {
193 uuid_le if_type;
194 uuid_le if_instance;
195
196 /*
197 * These two fields are not currently used.
198 */
199 u64 reserved1;
200 u64 reserved2;
201
202 u16 chn_flags;
203 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
204
205 union {
206 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
207 struct {
208 unsigned char user_def[MAX_USER_DEFINED_BYTES];
209 } std;
210
211 /*
212 * Pipes:
213 * The following sructure is an integrated pipe protocol, which
214 * is implemented on top of standard user-defined data. Pipe
215 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
216 * use.
217 */
218 struct {
219 u32 pipe_mode;
220 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
221 } pipe;
222 } u;
223 /*
224 * The sub_channel_index is defined in win8.
225 */
226 u16 sub_channel_index;
227 u16 reserved3;
228 } __packed;
229
230 /* Server Flags */
231 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
232 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
233 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
234 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
235 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
236 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
237 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
238 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
239
240 struct vmpacket_descriptor {
241 u16 type;
242 u16 offset8;
243 u16 len8;
244 u16 flags;
245 u64 trans_id;
246 } __packed;
247
248 struct vmpacket_header {
249 u32 prev_pkt_start_offset;
250 struct vmpacket_descriptor descriptor;
251 } __packed;
252
253 struct vmtransfer_page_range {
254 u32 byte_count;
255 u32 byte_offset;
256 } __packed;
257
258 struct vmtransfer_page_packet_header {
259 struct vmpacket_descriptor d;
260 u16 xfer_pageset_id;
261 u8 sender_owns_set;
262 u8 reserved;
263 u32 range_cnt;
264 struct vmtransfer_page_range ranges[1];
265 } __packed;
266
267 struct vmgpadl_packet_header {
268 struct vmpacket_descriptor d;
269 u32 gpadl;
270 u32 reserved;
271 } __packed;
272
273 struct vmadd_remove_transfer_page_set {
274 struct vmpacket_descriptor d;
275 u32 gpadl;
276 u16 xfer_pageset_id;
277 u16 reserved;
278 } __packed;
279
280 /*
281 * This structure defines a range in guest physical space that can be made to
282 * look virtually contiguous.
283 */
284 struct gpa_range {
285 u32 byte_count;
286 u32 byte_offset;
287 u64 pfn_array[0];
288 };
289
290 /*
291 * This is the format for an Establish Gpadl packet, which contains a handle by
292 * which this GPADL will be known and a set of GPA ranges associated with it.
293 * This can be converted to a MDL by the guest OS. If there are multiple GPA
294 * ranges, then the resulting MDL will be "chained," representing multiple VA
295 * ranges.
296 */
297 struct vmestablish_gpadl {
298 struct vmpacket_descriptor d;
299 u32 gpadl;
300 u32 range_cnt;
301 struct gpa_range range[1];
302 } __packed;
303
304 /*
305 * This is the format for a Teardown Gpadl packet, which indicates that the
306 * GPADL handle in the Establish Gpadl packet will never be referenced again.
307 */
308 struct vmteardown_gpadl {
309 struct vmpacket_descriptor d;
310 u32 gpadl;
311 u32 reserved; /* for alignment to a 8-byte boundary */
312 } __packed;
313
314 /*
315 * This is the format for a GPA-Direct packet, which contains a set of GPA
316 * ranges, in addition to commands and/or data.
317 */
318 struct vmdata_gpa_direct {
319 struct vmpacket_descriptor d;
320 u32 reserved;
321 u32 range_cnt;
322 struct gpa_range range[1];
323 } __packed;
324
325 /* This is the format for a Additional Data Packet. */
326 struct vmadditional_data {
327 struct vmpacket_descriptor d;
328 u64 total_bytes;
329 u32 offset;
330 u32 byte_cnt;
331 unsigned char data[1];
332 } __packed;
333
334 union vmpacket_largest_possible_header {
335 struct vmpacket_descriptor simple_hdr;
336 struct vmtransfer_page_packet_header xfer_page_hdr;
337 struct vmgpadl_packet_header gpadl_hdr;
338 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
339 struct vmestablish_gpadl establish_gpadl_hdr;
340 struct vmteardown_gpadl teardown_gpadl_hdr;
341 struct vmdata_gpa_direct data_gpa_direct_hdr;
342 };
343
344 #define VMPACKET_DATA_START_ADDRESS(__packet) \
345 (void *)(((unsigned char *)__packet) + \
346 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
347
348 #define VMPACKET_DATA_LENGTH(__packet) \
349 ((((struct vmpacket_descriptor)__packet)->len8 - \
350 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
351
352 #define VMPACKET_TRANSFER_MODE(__packet) \
353 (((struct IMPACT)__packet)->type)
354
355 enum vmbus_packet_type {
356 VM_PKT_INVALID = 0x0,
357 VM_PKT_SYNCH = 0x1,
358 VM_PKT_ADD_XFER_PAGESET = 0x2,
359 VM_PKT_RM_XFER_PAGESET = 0x3,
360 VM_PKT_ESTABLISH_GPADL = 0x4,
361 VM_PKT_TEARDOWN_GPADL = 0x5,
362 VM_PKT_DATA_INBAND = 0x6,
363 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
364 VM_PKT_DATA_USING_GPADL = 0x8,
365 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
366 VM_PKT_CANCEL_REQUEST = 0xa,
367 VM_PKT_COMP = 0xb,
368 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
369 VM_PKT_ADDITIONAL_DATA = 0xd
370 };
371
372 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
373
374
375 /* Version 1 messages */
376 enum vmbus_channel_message_type {
377 CHANNELMSG_INVALID = 0,
378 CHANNELMSG_OFFERCHANNEL = 1,
379 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
380 CHANNELMSG_REQUESTOFFERS = 3,
381 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
382 CHANNELMSG_OPENCHANNEL = 5,
383 CHANNELMSG_OPENCHANNEL_RESULT = 6,
384 CHANNELMSG_CLOSECHANNEL = 7,
385 CHANNELMSG_GPADL_HEADER = 8,
386 CHANNELMSG_GPADL_BODY = 9,
387 CHANNELMSG_GPADL_CREATED = 10,
388 CHANNELMSG_GPADL_TEARDOWN = 11,
389 CHANNELMSG_GPADL_TORNDOWN = 12,
390 CHANNELMSG_RELID_RELEASED = 13,
391 CHANNELMSG_INITIATE_CONTACT = 14,
392 CHANNELMSG_VERSION_RESPONSE = 15,
393 CHANNELMSG_UNLOAD = 16,
394 CHANNELMSG_UNLOAD_RESPONSE = 17,
395 CHANNELMSG_18 = 18,
396 CHANNELMSG_19 = 19,
397 CHANNELMSG_20 = 20,
398 CHANNELMSG_TL_CONNECT_REQUEST = 21,
399 CHANNELMSG_COUNT
400 };
401
402 struct vmbus_channel_message_header {
403 enum vmbus_channel_message_type msgtype;
404 u32 padding;
405 } __packed;
406
407 /* Query VMBus Version parameters */
408 struct vmbus_channel_query_vmbus_version {
409 struct vmbus_channel_message_header header;
410 u32 version;
411 } __packed;
412
413 /* VMBus Version Supported parameters */
414 struct vmbus_channel_version_supported {
415 struct vmbus_channel_message_header header;
416 u8 version_supported;
417 } __packed;
418
419 /* Offer Channel parameters */
420 struct vmbus_channel_offer_channel {
421 struct vmbus_channel_message_header header;
422 struct vmbus_channel_offer offer;
423 u32 child_relid;
424 u8 monitorid;
425 /*
426 * win7 and beyond splits this field into a bit field.
427 */
428 u8 monitor_allocated:1;
429 u8 reserved:7;
430 /*
431 * These are new fields added in win7 and later.
432 * Do not access these fields without checking the
433 * negotiated protocol.
434 *
435 * If "is_dedicated_interrupt" is set, we must not set the
436 * associated bit in the channel bitmap while sending the
437 * interrupt to the host.
438 *
439 * connection_id is to be used in signaling the host.
440 */
441 u16 is_dedicated_interrupt:1;
442 u16 reserved1:15;
443 u32 connection_id;
444 } __packed;
445
446 /* Rescind Offer parameters */
447 struct vmbus_channel_rescind_offer {
448 struct vmbus_channel_message_header header;
449 u32 child_relid;
450 } __packed;
451
452 /*
453 * Request Offer -- no parameters, SynIC message contains the partition ID
454 * Set Snoop -- no parameters, SynIC message contains the partition ID
455 * Clear Snoop -- no parameters, SynIC message contains the partition ID
456 * All Offers Delivered -- no parameters, SynIC message contains the partition
457 * ID
458 * Flush Client -- no parameters, SynIC message contains the partition ID
459 */
460
461 /* Open Channel parameters */
462 struct vmbus_channel_open_channel {
463 struct vmbus_channel_message_header header;
464
465 /* Identifies the specific VMBus channel that is being opened. */
466 u32 child_relid;
467
468 /* ID making a particular open request at a channel offer unique. */
469 u32 openid;
470
471 /* GPADL for the channel's ring buffer. */
472 u32 ringbuffer_gpadlhandle;
473
474 /*
475 * Starting with win8, this field will be used to specify
476 * the target virtual processor on which to deliver the interrupt for
477 * the host to guest communication.
478 * Prior to win8, incoming channel interrupts would only
479 * be delivered on cpu 0. Setting this value to 0 would
480 * preserve the earlier behavior.
481 */
482 u32 target_vp;
483
484 /*
485 * The upstream ring buffer begins at offset zero in the memory
486 * described by RingBufferGpadlHandle. The downstream ring buffer
487 * follows it at this offset (in pages).
488 */
489 u32 downstream_ringbuffer_pageoffset;
490
491 /* User-specific data to be passed along to the server endpoint. */
492 unsigned char userdata[MAX_USER_DEFINED_BYTES];
493 } __packed;
494
495 /* Open Channel Result parameters */
496 struct vmbus_channel_open_result {
497 struct vmbus_channel_message_header header;
498 u32 child_relid;
499 u32 openid;
500 u32 status;
501 } __packed;
502
503 /* Close channel parameters; */
504 struct vmbus_channel_close_channel {
505 struct vmbus_channel_message_header header;
506 u32 child_relid;
507 } __packed;
508
509 /* Channel Message GPADL */
510 #define GPADL_TYPE_RING_BUFFER 1
511 #define GPADL_TYPE_SERVER_SAVE_AREA 2
512 #define GPADL_TYPE_TRANSACTION 8
513
514 /*
515 * The number of PFNs in a GPADL message is defined by the number of
516 * pages that would be spanned by ByteCount and ByteOffset. If the
517 * implied number of PFNs won't fit in this packet, there will be a
518 * follow-up packet that contains more.
519 */
520 struct vmbus_channel_gpadl_header {
521 struct vmbus_channel_message_header header;
522 u32 child_relid;
523 u32 gpadl;
524 u16 range_buflen;
525 u16 rangecount;
526 struct gpa_range range[0];
527 } __packed;
528
529 /* This is the followup packet that contains more PFNs. */
530 struct vmbus_channel_gpadl_body {
531 struct vmbus_channel_message_header header;
532 u32 msgnumber;
533 u32 gpadl;
534 u64 pfn[0];
535 } __packed;
536
537 struct vmbus_channel_gpadl_created {
538 struct vmbus_channel_message_header header;
539 u32 child_relid;
540 u32 gpadl;
541 u32 creation_status;
542 } __packed;
543
544 struct vmbus_channel_gpadl_teardown {
545 struct vmbus_channel_message_header header;
546 u32 child_relid;
547 u32 gpadl;
548 } __packed;
549
550 struct vmbus_channel_gpadl_torndown {
551 struct vmbus_channel_message_header header;
552 u32 gpadl;
553 } __packed;
554
555 struct vmbus_channel_relid_released {
556 struct vmbus_channel_message_header header;
557 u32 child_relid;
558 } __packed;
559
560 struct vmbus_channel_initiate_contact {
561 struct vmbus_channel_message_header header;
562 u32 vmbus_version_requested;
563 u32 target_vcpu; /* The VCPU the host should respond to */
564 u64 interrupt_page;
565 u64 monitor_page1;
566 u64 monitor_page2;
567 } __packed;
568
569 /* Hyper-V socket: guest's connect()-ing to host */
570 struct vmbus_channel_tl_connect_request {
571 struct vmbus_channel_message_header header;
572 uuid_le guest_endpoint_id;
573 uuid_le host_service_id;
574 } __packed;
575
576 struct vmbus_channel_version_response {
577 struct vmbus_channel_message_header header;
578 u8 version_supported;
579 } __packed;
580
581 enum vmbus_channel_state {
582 CHANNEL_OFFER_STATE,
583 CHANNEL_OPENING_STATE,
584 CHANNEL_OPEN_STATE,
585 CHANNEL_OPENED_STATE,
586 };
587
588 /*
589 * Represents each channel msg on the vmbus connection This is a
590 * variable-size data structure depending on the msg type itself
591 */
592 struct vmbus_channel_msginfo {
593 /* Bookkeeping stuff */
594 struct list_head msglistentry;
595
596 /* So far, this is only used to handle gpadl body message */
597 struct list_head submsglist;
598
599 /* Synchronize the request/response if needed */
600 struct completion waitevent;
601 union {
602 struct vmbus_channel_version_supported version_supported;
603 struct vmbus_channel_open_result open_result;
604 struct vmbus_channel_gpadl_torndown gpadl_torndown;
605 struct vmbus_channel_gpadl_created gpadl_created;
606 struct vmbus_channel_version_response version_response;
607 } response;
608
609 u32 msgsize;
610 /*
611 * The channel message that goes out on the "wire".
612 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
613 */
614 unsigned char msg[0];
615 };
616
617 struct vmbus_close_msg {
618 struct vmbus_channel_msginfo info;
619 struct vmbus_channel_close_channel msg;
620 };
621
622 /* Define connection identifier type. */
623 union hv_connection_id {
624 u32 asu32;
625 struct {
626 u32 id:24;
627 u32 reserved:8;
628 } u;
629 };
630
631 /* Definition of the hv_signal_event hypercall input structure. */
632 struct hv_input_signal_event {
633 union hv_connection_id connectionid;
634 u16 flag_number;
635 u16 rsvdz;
636 };
637
638 struct hv_input_signal_event_buffer {
639 u64 align8;
640 struct hv_input_signal_event event;
641 };
642
643 enum hv_signal_policy {
644 HV_SIGNAL_POLICY_DEFAULT = 0,
645 HV_SIGNAL_POLICY_EXPLICIT,
646 };
647
648 enum vmbus_device_type {
649 HV_IDE = 0,
650 HV_SCSI,
651 HV_FC,
652 HV_NIC,
653 HV_ND,
654 HV_PCIE,
655 HV_FB,
656 HV_KBD,
657 HV_MOUSE,
658 HV_KVP,
659 HV_TS,
660 HV_HB,
661 HV_SHUTDOWN,
662 HV_FCOPY,
663 HV_BACKUP,
664 HV_DM,
665 HV_UNKOWN,
666 };
667
668 struct vmbus_device {
669 u16 dev_type;
670 uuid_le guid;
671 bool perf_device;
672 };
673
674 struct vmbus_channel {
675 /* Unique channel id */
676 int id;
677
678 struct list_head listentry;
679
680 struct hv_device *device_obj;
681
682 enum vmbus_channel_state state;
683
684 struct vmbus_channel_offer_channel offermsg;
685 /*
686 * These are based on the OfferMsg.MonitorId.
687 * Save it here for easy access.
688 */
689 u8 monitor_grp;
690 u8 monitor_bit;
691
692 bool rescind; /* got rescind msg */
693
694 u32 ringbuffer_gpadlhandle;
695
696 /* Allocated memory for ring buffer */
697 void *ringbuffer_pages;
698 u32 ringbuffer_pagecount;
699 struct hv_ring_buffer_info outbound; /* send to parent */
700 struct hv_ring_buffer_info inbound; /* receive from parent */
701 spinlock_t inbound_lock;
702
703 struct vmbus_close_msg close_msg;
704
705 /* Channel callback are invoked in this workqueue context */
706 /* HANDLE dataWorkQueue; */
707
708 void (*onchannel_callback)(void *context);
709 void *channel_callback_context;
710
711 /*
712 * A channel can be marked for efficient (batched)
713 * reading:
714 * If batched_reading is set to "true", we read until the
715 * channel is empty and hold off interrupts from the host
716 * during the entire read process.
717 * If batched_reading is set to "false", the client is not
718 * going to perform batched reading.
719 *
720 * By default we will enable batched reading; specific
721 * drivers that don't want this behavior can turn it off.
722 */
723
724 bool batched_reading;
725
726 bool is_dedicated_interrupt;
727 struct hv_input_signal_event_buffer sig_buf;
728 struct hv_input_signal_event *sig_event;
729
730 /*
731 * Starting with win8, this field will be used to specify
732 * the target virtual processor on which to deliver the interrupt for
733 * the host to guest communication.
734 * Prior to win8, incoming channel interrupts would only
735 * be delivered on cpu 0. Setting this value to 0 would
736 * preserve the earlier behavior.
737 */
738 u32 target_vp;
739 /* The corresponding CPUID in the guest */
740 u32 target_cpu;
741 /*
742 * State to manage the CPU affiliation of channels.
743 */
744 struct cpumask alloced_cpus_in_node;
745 int numa_node;
746 /*
747 * Support for sub-channels. For high performance devices,
748 * it will be useful to have multiple sub-channels to support
749 * a scalable communication infrastructure with the host.
750 * The support for sub-channels is implemented as an extention
751 * to the current infrastructure.
752 * The initial offer is considered the primary channel and this
753 * offer message will indicate if the host supports sub-channels.
754 * The guest is free to ask for sub-channels to be offerred and can
755 * open these sub-channels as a normal "primary" channel. However,
756 * all sub-channels will have the same type and instance guids as the
757 * primary channel. Requests sent on a given channel will result in a
758 * response on the same channel.
759 */
760
761 /*
762 * Sub-channel creation callback. This callback will be called in
763 * process context when a sub-channel offer is received from the host.
764 * The guest can open the sub-channel in the context of this callback.
765 */
766 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
767
768 /*
769 * Channel rescind callback. Some channels (the hvsock ones), need to
770 * register a callback which is invoked in vmbus_onoffer_rescind().
771 */
772 void (*chn_rescind_callback)(struct vmbus_channel *channel);
773
774 /*
775 * The spinlock to protect the structure. It is being used to protect
776 * test-and-set access to various attributes of the structure as well
777 * as all sc_list operations.
778 */
779 spinlock_t lock;
780 /*
781 * All Sub-channels of a primary channel are linked here.
782 */
783 struct list_head sc_list;
784 /*
785 * Current number of sub-channels.
786 */
787 int num_sc;
788 /*
789 * Number of a sub-channel (position within sc_list) which is supposed
790 * to be used as the next outgoing channel.
791 */
792 int next_oc;
793 /*
794 * The primary channel this sub-channel belongs to.
795 * This will be NULL for the primary channel.
796 */
797 struct vmbus_channel *primary_channel;
798 /*
799 * Support per-channel state for use by vmbus drivers.
800 */
801 void *per_channel_state;
802 /*
803 * To support per-cpu lookup mapping of relid to channel,
804 * link up channels based on their CPU affinity.
805 */
806 struct list_head percpu_list;
807 /*
808 * Host signaling policy: The default policy will be
809 * based on the ring buffer state. We will also support
810 * a policy where the client driver can have explicit
811 * signaling control.
812 */
813 enum hv_signal_policy signal_policy;
814 /*
815 * On the channel send side, many of the VMBUS
816 * device drivers explicity serialize access to the
817 * outgoing ring buffer. Give more control to the
818 * VMBUS device drivers in terms how to serialize
819 * accesss to the outgoing ring buffer.
820 * The default behavior will be to aquire the
821 * ring lock to preserve the current behavior.
822 */
823 bool acquire_ring_lock;
824
825 };
826
827 static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
828 {
829 c->acquire_ring_lock = state;
830 }
831
832 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
833 {
834 return !!(c->offermsg.offer.chn_flags &
835 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
836 }
837
838 static inline void set_channel_signal_state(struct vmbus_channel *c,
839 enum hv_signal_policy policy)
840 {
841 c->signal_policy = policy;
842 }
843
844 static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
845 {
846 c->batched_reading = state;
847 }
848
849 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
850 {
851 c->per_channel_state = s;
852 }
853
854 static inline void *get_per_channel_state(struct vmbus_channel *c)
855 {
856 return c->per_channel_state;
857 }
858
859 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
860 u32 size)
861 {
862 c->outbound.ring_buffer->pending_send_sz = size;
863 }
864
865 void vmbus_onmessage(void *context);
866
867 int vmbus_request_offers(void);
868
869 /*
870 * APIs for managing sub-channels.
871 */
872
873 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
874 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
875
876 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
877 void (*chn_rescind_cb)(struct vmbus_channel *));
878
879 /*
880 * Retrieve the (sub) channel on which to send an outgoing request.
881 * When a primary channel has multiple sub-channels, we choose a
882 * channel whose VCPU binding is closest to the VCPU on which
883 * this call is being made.
884 */
885 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
886
887 /*
888 * Check if sub-channels have already been offerred. This API will be useful
889 * when the driver is unloaded after establishing sub-channels. In this case,
890 * when the driver is re-loaded, the driver would have to check if the
891 * subchannels have already been established before attempting to request
892 * the creation of sub-channels.
893 * This function returns TRUE to indicate that subchannels have already been
894 * created.
895 * This function should be invoked after setting the callback function for
896 * sub-channel creation.
897 */
898 bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
899
900 /* The format must be the same as struct vmdata_gpa_direct */
901 struct vmbus_channel_packet_page_buffer {
902 u16 type;
903 u16 dataoffset8;
904 u16 length8;
905 u16 flags;
906 u64 transactionid;
907 u32 reserved;
908 u32 rangecount;
909 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
910 } __packed;
911
912 /* The format must be the same as struct vmdata_gpa_direct */
913 struct vmbus_channel_packet_multipage_buffer {
914 u16 type;
915 u16 dataoffset8;
916 u16 length8;
917 u16 flags;
918 u64 transactionid;
919 u32 reserved;
920 u32 rangecount; /* Always 1 in this case */
921 struct hv_multipage_buffer range;
922 } __packed;
923
924 /* The format must be the same as struct vmdata_gpa_direct */
925 struct vmbus_packet_mpb_array {
926 u16 type;
927 u16 dataoffset8;
928 u16 length8;
929 u16 flags;
930 u64 transactionid;
931 u32 reserved;
932 u32 rangecount; /* Always 1 in this case */
933 struct hv_mpb_array range;
934 } __packed;
935
936
937 extern int vmbus_open(struct vmbus_channel *channel,
938 u32 send_ringbuffersize,
939 u32 recv_ringbuffersize,
940 void *userdata,
941 u32 userdatalen,
942 void(*onchannel_callback)(void *context),
943 void *context);
944
945 extern void vmbus_close(struct vmbus_channel *channel);
946
947 extern int vmbus_sendpacket(struct vmbus_channel *channel,
948 void *buffer,
949 u32 bufferLen,
950 u64 requestid,
951 enum vmbus_packet_type type,
952 u32 flags);
953
954 extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
955 void *buffer,
956 u32 bufferLen,
957 u64 requestid,
958 enum vmbus_packet_type type,
959 u32 flags,
960 bool kick_q);
961
962 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
963 struct hv_page_buffer pagebuffers[],
964 u32 pagecount,
965 void *buffer,
966 u32 bufferlen,
967 u64 requestid);
968
969 extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
970 struct hv_page_buffer pagebuffers[],
971 u32 pagecount,
972 void *buffer,
973 u32 bufferlen,
974 u64 requestid,
975 u32 flags,
976 bool kick_q);
977
978 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
979 struct hv_multipage_buffer *mpb,
980 void *buffer,
981 u32 bufferlen,
982 u64 requestid);
983
984 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
985 struct vmbus_packet_mpb_array *mpb,
986 u32 desc_size,
987 void *buffer,
988 u32 bufferlen,
989 u64 requestid);
990
991 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
992 void *kbuffer,
993 u32 size,
994 u32 *gpadl_handle);
995
996 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
997 u32 gpadl_handle);
998
999 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1000 void *buffer,
1001 u32 bufferlen,
1002 u32 *buffer_actual_len,
1003 u64 *requestid);
1004
1005 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1006 void *buffer,
1007 u32 bufferlen,
1008 u32 *buffer_actual_len,
1009 u64 *requestid);
1010
1011
1012 extern void vmbus_ontimer(unsigned long data);
1013
1014 /* Base driver object */
1015 struct hv_driver {
1016 const char *name;
1017
1018 /*
1019 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1020 * channel flag, actually doesn't mean a synthetic device because the
1021 * offer's if_type/if_instance can change for every new hvsock
1022 * connection.
1023 *
1024 * However, to facilitate the notification of new-offer/rescind-offer
1025 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1026 * a special vmbus device, and hence we need the below flag to
1027 * indicate if the driver is the hvsock driver or not: we need to
1028 * specially treat the hvosck offer & driver in vmbus_match().
1029 */
1030 bool hvsock;
1031
1032 /* the device type supported by this driver */
1033 uuid_le dev_type;
1034 const struct hv_vmbus_device_id *id_table;
1035
1036 struct device_driver driver;
1037
1038 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1039 int (*remove)(struct hv_device *);
1040 void (*shutdown)(struct hv_device *);
1041
1042 };
1043
1044 /* Base device object */
1045 struct hv_device {
1046 /* the device type id of this device */
1047 uuid_le dev_type;
1048
1049 /* the device instance id of this device */
1050 uuid_le dev_instance;
1051 u16 vendor_id;
1052 u16 device_id;
1053
1054 struct device device;
1055
1056 struct vmbus_channel *channel;
1057 };
1058
1059
1060 static inline struct hv_device *device_to_hv_device(struct device *d)
1061 {
1062 return container_of(d, struct hv_device, device);
1063 }
1064
1065 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1066 {
1067 return container_of(d, struct hv_driver, driver);
1068 }
1069
1070 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1071 {
1072 dev_set_drvdata(&dev->device, data);
1073 }
1074
1075 static inline void *hv_get_drvdata(struct hv_device *dev)
1076 {
1077 return dev_get_drvdata(&dev->device);
1078 }
1079
1080 /* Vmbus interface */
1081 #define vmbus_driver_register(driver) \
1082 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1083 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1084 struct module *owner,
1085 const char *mod_name);
1086 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1087
1088 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1089
1090 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1091 resource_size_t min, resource_size_t max,
1092 resource_size_t size, resource_size_t align,
1093 bool fb_overlap_ok);
1094
1095 int vmbus_cpu_number_to_vp_number(int cpu_number);
1096 u64 hv_do_hypercall(u64 control, void *input, void *output);
1097
1098 /*
1099 * GUID definitions of various offer types - services offered to the guest.
1100 */
1101
1102 /*
1103 * Network GUID
1104 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1105 */
1106 #define HV_NIC_GUID \
1107 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1108 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1109
1110 /*
1111 * IDE GUID
1112 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1113 */
1114 #define HV_IDE_GUID \
1115 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1116 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1117
1118 /*
1119 * SCSI GUID
1120 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1121 */
1122 #define HV_SCSI_GUID \
1123 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1124 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1125
1126 /*
1127 * Shutdown GUID
1128 * {0e0b6031-5213-4934-818b-38d90ced39db}
1129 */
1130 #define HV_SHUTDOWN_GUID \
1131 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1132 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1133
1134 /*
1135 * Time Synch GUID
1136 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1137 */
1138 #define HV_TS_GUID \
1139 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1140 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1141
1142 /*
1143 * Heartbeat GUID
1144 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1145 */
1146 #define HV_HEART_BEAT_GUID \
1147 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1148 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1149
1150 /*
1151 * KVP GUID
1152 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1153 */
1154 #define HV_KVP_GUID \
1155 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1156 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1157
1158 /*
1159 * Dynamic memory GUID
1160 * {525074dc-8985-46e2-8057-a307dc18a502}
1161 */
1162 #define HV_DM_GUID \
1163 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1164 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1165
1166 /*
1167 * Mouse GUID
1168 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1169 */
1170 #define HV_MOUSE_GUID \
1171 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1172 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1173
1174 /*
1175 * Keyboard GUID
1176 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1177 */
1178 #define HV_KBD_GUID \
1179 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1180 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1181
1182 /*
1183 * VSS (Backup/Restore) GUID
1184 */
1185 #define HV_VSS_GUID \
1186 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1187 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1188 /*
1189 * Synthetic Video GUID
1190 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1191 */
1192 #define HV_SYNTHVID_GUID \
1193 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1194 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1195
1196 /*
1197 * Synthetic FC GUID
1198 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1199 */
1200 #define HV_SYNTHFC_GUID \
1201 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1202 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1203
1204 /*
1205 * Guest File Copy Service
1206 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1207 */
1208
1209 #define HV_FCOPY_GUID \
1210 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1211 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1212
1213 /*
1214 * NetworkDirect. This is the guest RDMA service.
1215 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1216 */
1217 #define HV_ND_GUID \
1218 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1219 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1220
1221 /*
1222 * PCI Express Pass Through
1223 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1224 */
1225
1226 #define HV_PCIE_GUID \
1227 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1228 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1229
1230 /*
1231 * Common header for Hyper-V ICs
1232 */
1233
1234 #define ICMSGTYPE_NEGOTIATE 0
1235 #define ICMSGTYPE_HEARTBEAT 1
1236 #define ICMSGTYPE_KVPEXCHANGE 2
1237 #define ICMSGTYPE_SHUTDOWN 3
1238 #define ICMSGTYPE_TIMESYNC 4
1239 #define ICMSGTYPE_VSS 5
1240
1241 #define ICMSGHDRFLAG_TRANSACTION 1
1242 #define ICMSGHDRFLAG_REQUEST 2
1243 #define ICMSGHDRFLAG_RESPONSE 4
1244
1245
1246 /*
1247 * While we want to handle util services as regular devices,
1248 * there is only one instance of each of these services; so
1249 * we statically allocate the service specific state.
1250 */
1251
1252 struct hv_util_service {
1253 u8 *recv_buffer;
1254 void *channel;
1255 void (*util_cb)(void *);
1256 int (*util_init)(struct hv_util_service *);
1257 void (*util_deinit)(void);
1258 };
1259
1260 struct vmbuspipe_hdr {
1261 u32 flags;
1262 u32 msgsize;
1263 } __packed;
1264
1265 struct ic_version {
1266 u16 major;
1267 u16 minor;
1268 } __packed;
1269
1270 struct icmsg_hdr {
1271 struct ic_version icverframe;
1272 u16 icmsgtype;
1273 struct ic_version icvermsg;
1274 u16 icmsgsize;
1275 u32 status;
1276 u8 ictransaction_id;
1277 u8 icflags;
1278 u8 reserved[2];
1279 } __packed;
1280
1281 struct icmsg_negotiate {
1282 u16 icframe_vercnt;
1283 u16 icmsg_vercnt;
1284 u32 reserved;
1285 struct ic_version icversion_data[1]; /* any size array */
1286 } __packed;
1287
1288 struct shutdown_msg_data {
1289 u32 reason_code;
1290 u32 timeout_seconds;
1291 u32 flags;
1292 u8 display_message[2048];
1293 } __packed;
1294
1295 struct heartbeat_msg_data {
1296 u64 seq_num;
1297 u32 reserved[8];
1298 } __packed;
1299
1300 /* Time Sync IC defs */
1301 #define ICTIMESYNCFLAG_PROBE 0
1302 #define ICTIMESYNCFLAG_SYNC 1
1303 #define ICTIMESYNCFLAG_SAMPLE 2
1304
1305 #ifdef __x86_64__
1306 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1307 #else
1308 #define WLTIMEDELTA 116444736000000000LL
1309 #endif
1310
1311 struct ictimesync_data {
1312 u64 parenttime;
1313 u64 childtime;
1314 u64 roundtriptime;
1315 u8 flags;
1316 } __packed;
1317
1318 struct hyperv_service_callback {
1319 u8 msg_type;
1320 char *log_msg;
1321 uuid_le data;
1322 struct vmbus_channel *channel;
1323 void (*callback) (void *context);
1324 };
1325
1326 #define MAX_SRV_VER 0x7ffffff
1327 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1328 struct icmsg_negotiate *, u8 *, int,
1329 int);
1330
1331 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1332
1333 /*
1334 * Negotiated version with the Host.
1335 */
1336
1337 extern __u32 vmbus_proto_version;
1338
1339 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1340 const uuid_le *shv_host_servie_id);
1341 #endif /* _HYPERV_H */