]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/hyperv.h
Drivers: hv: balloon: add a fall through comment to hv_memory_notifier()
[mirror_ubuntu-bionic-kernel.git] / include / linux / hyperv.h
CommitLineData
5c473400
S
1/*
2 *
3 * Copyright (c) 2011, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
3f335ea2
S
24
25#ifndef _HYPERV_H
26#define _HYPERV_H
27
5267cf02 28#include <uapi/linux/hyperv.h>
c75efa97 29#include <uapi/asm/hyperv.h>
2939437c 30
5267cf02 31#include <linux/types.h>
8ff3e6fc
S
32#include <linux/scatterlist.h>
33#include <linux/list.h>
34#include <linux/timer.h>
35#include <linux/workqueue.h>
36#include <linux/completion.h>
37#include <linux/device.h>
2e2c1d17 38#include <linux/mod_devicetable.h>
8ff3e6fc
S
39
40
7e5ec368 41#define MAX_PAGE_BUFFER_COUNT 32
a363bf7b
S
42#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
43
44#pragma pack(push, 1)
45
46/* Single-page buffer */
47struct hv_page_buffer {
48 u32 len;
49 u32 offset;
50 u64 pfn;
51};
52
53/* Multiple-page buffer */
54struct hv_multipage_buffer {
55 /* Length and Offset determines the # of pfns in the array */
56 u32 len;
57 u32 offset;
58 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
59};
60
d61031ee
S
61/*
62 * Multiple-page buffer array; the pfn array is variable size:
63 * The number of entries in the PFN array is determined by
64 * "len" and "offset".
65 */
66struct hv_mpb_array {
67 /* Length and Offset determines the # of pfns in the array */
68 u32 len;
69 u32 offset;
70 u64 pfn_array[];
71};
72
a363bf7b
S
73/* 0x18 includes the proprietary packet header */
74#define MAX_PAGE_BUFFER_PACKET (0x18 + \
75 (sizeof(struct hv_page_buffer) * \
76 MAX_PAGE_BUFFER_COUNT))
77#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
78 sizeof(struct hv_multipage_buffer))
79
80
81#pragma pack(pop)
82
7effffb7
S
83struct hv_ring_buffer {
84 /* Offset in bytes from the start of ring data below */
85 u32 write_index;
86
87 /* Offset in bytes from the start of ring data below */
88 u32 read_index;
89
90 u32 interrupt_mask;
91
2416603e
S
92 /*
93 * Win8 uses some of the reserved bits to implement
94 * interrupt driven flow management. On the send side
95 * we can request that the receiver interrupt the sender
96 * when the ring transitions from being full to being able
97 * to handle a message of size "pending_send_sz".
98 *
99 * Add necessary state for this enhancement.
7effffb7 100 */
2416603e
S
101 u32 pending_send_sz;
102
103 u32 reserved1[12];
104
105 union {
106 struct {
107 u32 feat_pending_send_sz:1;
108 };
109 u32 value;
110 } feature_bits;
111
112 /* Pad it to PAGE_SIZE so that data starts on page boundary */
113 u8 reserved2[4028];
7effffb7
S
114
115 /*
116 * Ring data starts here + RingDataStartOffset
117 * !!! DO NOT place any fields below this !!!
118 */
119 u8 buffer[0];
120} __packed;
121
122struct hv_ring_buffer_info {
123 struct hv_ring_buffer *ring_buffer;
124 u32 ring_size; /* Include the shared header */
125 spinlock_t ring_lock;
126
127 u32 ring_datasize; /* < ring_size */
128 u32 ring_data_startoffset;
ab028db4
S
129 u32 priv_write_index;
130 u32 priv_read_index;
7effffb7
S
131};
132
33be96e4
HZ
133/*
134 *
135 * hv_get_ringbuffer_availbytes()
136 *
137 * Get number of bytes available to read and to write to
138 * for the specified ring buffer
139 */
140static inline void
141hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
142 u32 *read, u32 *write)
143{
144 u32 read_loc, write_loc, dsize;
145
33be96e4
HZ
146 /* Capture the read/write indices before they changed */
147 read_loc = rbi->ring_buffer->read_index;
148 write_loc = rbi->ring_buffer->write_index;
149 dsize = rbi->ring_datasize;
150
151 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
152 read_loc - write_loc;
153 *read = dsize - *write;
154}
155
a6341f00
S
156static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
157{
158 u32 read_loc, write_loc, dsize, read;
159
160 dsize = rbi->ring_datasize;
161 read_loc = rbi->ring_buffer->read_index;
162 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
163
164 read = write_loc >= read_loc ? (write_loc - read_loc) :
165 (dsize - read_loc) + write_loc;
166
167 return read;
168}
169
170static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
171{
172 u32 read_loc, write_loc, dsize, write;
173
174 dsize = rbi->ring_datasize;
175 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
176 write_loc = rbi->ring_buffer->write_index;
177
178 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
179 read_loc - write_loc;
180 return write;
181}
182
eafa7072
S
183/*
184 * VMBUS version is 32 bit entity broken up into
185 * two 16 bit quantities: major_number. minor_number.
186 *
187 * 0 . 13 (Windows Server 2008)
188 * 1 . 1 (Windows 7)
189 * 2 . 4 (Windows 8)
03367ef5 190 * 3 . 0 (Windows 8 R2)
6c4e5f9c 191 * 4 . 0 (Windows 10)
eafa7072
S
192 */
193
194#define VERSION_WS2008 ((0 << 16) | (13))
195#define VERSION_WIN7 ((1 << 16) | (1))
196#define VERSION_WIN8 ((2 << 16) | (4))
03367ef5 197#define VERSION_WIN8_1 ((3 << 16) | (0))
6c4e5f9c 198#define VERSION_WIN10 ((4 << 16) | (0))
eafa7072
S
199
200#define VERSION_INVAL -1
201
6c4e5f9c 202#define VERSION_CURRENT VERSION_WIN10
f7c6dfda 203
517d8dc6
S
204/* Make maximum size of pipe payload of 16K */
205#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
206
207/* Define PipeMode values. */
208#define VMBUS_PIPE_TYPE_BYTE 0x00000000
209#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
210
211/* The size of the user defined data buffer for non-pipe offers. */
212#define MAX_USER_DEFINED_BYTES 120
213
214/* The size of the user defined data buffer for pipe offers. */
215#define MAX_PIPE_USER_DEFINED_BYTES 116
216
217/*
218 * At the center of the Channel Management library is the Channel Offer. This
219 * struct contains the fundamental information about an offer.
220 */
221struct vmbus_channel_offer {
358d2ee2
S
222 uuid_le if_type;
223 uuid_le if_instance;
29423b7e
S
224
225 /*
226 * These two fields are not currently used.
227 */
228 u64 reserved1;
229 u64 reserved2;
230
517d8dc6
S
231 u16 chn_flags;
232 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
233
234 union {
235 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
236 struct {
237 unsigned char user_def[MAX_USER_DEFINED_BYTES];
238 } std;
239
240 /*
241 * Pipes:
242 * The following sructure is an integrated pipe protocol, which
243 * is implemented on top of standard user-defined data. Pipe
244 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
245 * use.
246 */
247 struct {
248 u32 pipe_mode;
249 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
250 } pipe;
251 } u;
29423b7e
S
252 /*
253 * The sub_channel_index is defined in win8.
254 */
255 u16 sub_channel_index;
256 u16 reserved3;
517d8dc6
S
257} __packed;
258
259/* Server Flags */
260#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
261#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
262#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
263#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
264#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
265#define VMBUS_CHANNEL_PARENT_OFFER 0x200
266#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
e8d6ca02 267#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
517d8dc6 268
50ed40e0
S
269struct vmpacket_descriptor {
270 u16 type;
271 u16 offset8;
272 u16 len8;
273 u16 flags;
274 u64 trans_id;
275} __packed;
276
277struct vmpacket_header {
278 u32 prev_pkt_start_offset;
279 struct vmpacket_descriptor descriptor;
280} __packed;
281
282struct vmtransfer_page_range {
283 u32 byte_count;
284 u32 byte_offset;
285} __packed;
286
287struct vmtransfer_page_packet_header {
288 struct vmpacket_descriptor d;
289 u16 xfer_pageset_id;
1508d811 290 u8 sender_owns_set;
50ed40e0
S
291 u8 reserved;
292 u32 range_cnt;
293 struct vmtransfer_page_range ranges[1];
294} __packed;
295
296struct vmgpadl_packet_header {
297 struct vmpacket_descriptor d;
298 u32 gpadl;
299 u32 reserved;
300} __packed;
301
302struct vmadd_remove_transfer_page_set {
303 struct vmpacket_descriptor d;
304 u32 gpadl;
305 u16 xfer_pageset_id;
306 u16 reserved;
307} __packed;
308
309/*
310 * This structure defines a range in guest physical space that can be made to
311 * look virtually contiguous.
312 */
313struct gpa_range {
314 u32 byte_count;
315 u32 byte_offset;
316 u64 pfn_array[0];
317};
318
319/*
320 * This is the format for an Establish Gpadl packet, which contains a handle by
321 * which this GPADL will be known and a set of GPA ranges associated with it.
322 * This can be converted to a MDL by the guest OS. If there are multiple GPA
323 * ranges, then the resulting MDL will be "chained," representing multiple VA
324 * ranges.
325 */
326struct vmestablish_gpadl {
327 struct vmpacket_descriptor d;
328 u32 gpadl;
329 u32 range_cnt;
330 struct gpa_range range[1];
331} __packed;
332
333/*
334 * This is the format for a Teardown Gpadl packet, which indicates that the
335 * GPADL handle in the Establish Gpadl packet will never be referenced again.
336 */
337struct vmteardown_gpadl {
338 struct vmpacket_descriptor d;
339 u32 gpadl;
340 u32 reserved; /* for alignment to a 8-byte boundary */
341} __packed;
342
343/*
344 * This is the format for a GPA-Direct packet, which contains a set of GPA
345 * ranges, in addition to commands and/or data.
346 */
347struct vmdata_gpa_direct {
348 struct vmpacket_descriptor d;
349 u32 reserved;
350 u32 range_cnt;
351 struct gpa_range range[1];
352} __packed;
353
354/* This is the format for a Additional Data Packet. */
355struct vmadditional_data {
356 struct vmpacket_descriptor d;
357 u64 total_bytes;
358 u32 offset;
359 u32 byte_cnt;
360 unsigned char data[1];
361} __packed;
362
363union vmpacket_largest_possible_header {
364 struct vmpacket_descriptor simple_hdr;
365 struct vmtransfer_page_packet_header xfer_page_hdr;
366 struct vmgpadl_packet_header gpadl_hdr;
367 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
368 struct vmestablish_gpadl establish_gpadl_hdr;
369 struct vmteardown_gpadl teardown_gpadl_hdr;
370 struct vmdata_gpa_direct data_gpa_direct_hdr;
371};
372
373#define VMPACKET_DATA_START_ADDRESS(__packet) \
374 (void *)(((unsigned char *)__packet) + \
375 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
376
377#define VMPACKET_DATA_LENGTH(__packet) \
378 ((((struct vmpacket_descriptor)__packet)->len8 - \
379 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
380
381#define VMPACKET_TRANSFER_MODE(__packet) \
382 (((struct IMPACT)__packet)->type)
383
384enum vmbus_packet_type {
385 VM_PKT_INVALID = 0x0,
386 VM_PKT_SYNCH = 0x1,
387 VM_PKT_ADD_XFER_PAGESET = 0x2,
388 VM_PKT_RM_XFER_PAGESET = 0x3,
389 VM_PKT_ESTABLISH_GPADL = 0x4,
390 VM_PKT_TEARDOWN_GPADL = 0x5,
391 VM_PKT_DATA_INBAND = 0x6,
392 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
393 VM_PKT_DATA_USING_GPADL = 0x8,
394 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
395 VM_PKT_CANCEL_REQUEST = 0xa,
396 VM_PKT_COMP = 0xb,
397 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
398 VM_PKT_ADDITIONAL_DATA = 0xd
399};
400
401#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
517d8dc6 402
b56dda06 403
b56dda06
S
404/* Version 1 messages */
405enum vmbus_channel_message_type {
406 CHANNELMSG_INVALID = 0,
407 CHANNELMSG_OFFERCHANNEL = 1,
408 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
409 CHANNELMSG_REQUESTOFFERS = 3,
410 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
411 CHANNELMSG_OPENCHANNEL = 5,
412 CHANNELMSG_OPENCHANNEL_RESULT = 6,
413 CHANNELMSG_CLOSECHANNEL = 7,
414 CHANNELMSG_GPADL_HEADER = 8,
415 CHANNELMSG_GPADL_BODY = 9,
416 CHANNELMSG_GPADL_CREATED = 10,
417 CHANNELMSG_GPADL_TEARDOWN = 11,
418 CHANNELMSG_GPADL_TORNDOWN = 12,
419 CHANNELMSG_RELID_RELEASED = 13,
420 CHANNELMSG_INITIATE_CONTACT = 14,
421 CHANNELMSG_VERSION_RESPONSE = 15,
422 CHANNELMSG_UNLOAD = 16,
2db84eff 423 CHANNELMSG_UNLOAD_RESPONSE = 17,
5c23a1a5
DC
424 CHANNELMSG_18 = 18,
425 CHANNELMSG_19 = 19,
426 CHANNELMSG_20 = 20,
427 CHANNELMSG_TL_CONNECT_REQUEST = 21,
b56dda06
S
428 CHANNELMSG_COUNT
429};
430
431struct vmbus_channel_message_header {
432 enum vmbus_channel_message_type msgtype;
433 u32 padding;
434} __packed;
435
436/* Query VMBus Version parameters */
437struct vmbus_channel_query_vmbus_version {
438 struct vmbus_channel_message_header header;
439 u32 version;
440} __packed;
441
442/* VMBus Version Supported parameters */
443struct vmbus_channel_version_supported {
444 struct vmbus_channel_message_header header;
1508d811 445 u8 version_supported;
b56dda06
S
446} __packed;
447
448/* Offer Channel parameters */
449struct vmbus_channel_offer_channel {
450 struct vmbus_channel_message_header header;
451 struct vmbus_channel_offer offer;
452 u32 child_relid;
453 u8 monitorid;
29423b7e
S
454 /*
455 * win7 and beyond splits this field into a bit field.
456 */
457 u8 monitor_allocated:1;
458 u8 reserved:7;
459 /*
460 * These are new fields added in win7 and later.
461 * Do not access these fields without checking the
462 * negotiated protocol.
463 *
464 * If "is_dedicated_interrupt" is set, we must not set the
465 * associated bit in the channel bitmap while sending the
466 * interrupt to the host.
467 *
468 * connection_id is to be used in signaling the host.
469 */
470 u16 is_dedicated_interrupt:1;
471 u16 reserved1:15;
472 u32 connection_id;
b56dda06
S
473} __packed;
474
475/* Rescind Offer parameters */
476struct vmbus_channel_rescind_offer {
477 struct vmbus_channel_message_header header;
478 u32 child_relid;
479} __packed;
480
481/*
482 * Request Offer -- no parameters, SynIC message contains the partition ID
483 * Set Snoop -- no parameters, SynIC message contains the partition ID
484 * Clear Snoop -- no parameters, SynIC message contains the partition ID
485 * All Offers Delivered -- no parameters, SynIC message contains the partition
486 * ID
487 * Flush Client -- no parameters, SynIC message contains the partition ID
488 */
489
490/* Open Channel parameters */
491struct vmbus_channel_open_channel {
492 struct vmbus_channel_message_header header;
493
494 /* Identifies the specific VMBus channel that is being opened. */
495 u32 child_relid;
496
497 /* ID making a particular open request at a channel offer unique. */
498 u32 openid;
499
500 /* GPADL for the channel's ring buffer. */
501 u32 ringbuffer_gpadlhandle;
502
abbf3b2a
S
503 /*
504 * Starting with win8, this field will be used to specify
505 * the target virtual processor on which to deliver the interrupt for
506 * the host to guest communication.
507 * Prior to win8, incoming channel interrupts would only
508 * be delivered on cpu 0. Setting this value to 0 would
509 * preserve the earlier behavior.
510 */
511 u32 target_vp;
b56dda06
S
512
513 /*
514 * The upstream ring buffer begins at offset zero in the memory
515 * described by RingBufferGpadlHandle. The downstream ring buffer
516 * follows it at this offset (in pages).
517 */
518 u32 downstream_ringbuffer_pageoffset;
519
520 /* User-specific data to be passed along to the server endpoint. */
521 unsigned char userdata[MAX_USER_DEFINED_BYTES];
522} __packed;
523
524/* Open Channel Result parameters */
525struct vmbus_channel_open_result {
526 struct vmbus_channel_message_header header;
527 u32 child_relid;
528 u32 openid;
529 u32 status;
530} __packed;
531
532/* Close channel parameters; */
533struct vmbus_channel_close_channel {
534 struct vmbus_channel_message_header header;
535 u32 child_relid;
536} __packed;
537
538/* Channel Message GPADL */
539#define GPADL_TYPE_RING_BUFFER 1
540#define GPADL_TYPE_SERVER_SAVE_AREA 2
541#define GPADL_TYPE_TRANSACTION 8
542
543/*
544 * The number of PFNs in a GPADL message is defined by the number of
545 * pages that would be spanned by ByteCount and ByteOffset. If the
546 * implied number of PFNs won't fit in this packet, there will be a
547 * follow-up packet that contains more.
548 */
549struct vmbus_channel_gpadl_header {
550 struct vmbus_channel_message_header header;
551 u32 child_relid;
552 u32 gpadl;
553 u16 range_buflen;
554 u16 rangecount;
555 struct gpa_range range[0];
556} __packed;
557
558/* This is the followup packet that contains more PFNs. */
559struct vmbus_channel_gpadl_body {
560 struct vmbus_channel_message_header header;
561 u32 msgnumber;
562 u32 gpadl;
563 u64 pfn[0];
564} __packed;
565
566struct vmbus_channel_gpadl_created {
567 struct vmbus_channel_message_header header;
568 u32 child_relid;
569 u32 gpadl;
570 u32 creation_status;
571} __packed;
572
573struct vmbus_channel_gpadl_teardown {
574 struct vmbus_channel_message_header header;
575 u32 child_relid;
576 u32 gpadl;
577} __packed;
578
579struct vmbus_channel_gpadl_torndown {
580 struct vmbus_channel_message_header header;
581 u32 gpadl;
582} __packed;
583
b56dda06
S
584struct vmbus_channel_relid_released {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587} __packed;
588
589struct vmbus_channel_initiate_contact {
590 struct vmbus_channel_message_header header;
591 u32 vmbus_version_requested;
e28bab48 592 u32 target_vcpu; /* The VCPU the host should respond to */
b56dda06
S
593 u64 interrupt_page;
594 u64 monitor_page1;
595 u64 monitor_page2;
596} __packed;
597
5c23a1a5
DC
598/* Hyper-V socket: guest's connect()-ing to host */
599struct vmbus_channel_tl_connect_request {
600 struct vmbus_channel_message_header header;
601 uuid_le guest_endpoint_id;
602 uuid_le host_service_id;
603} __packed;
604
b56dda06
S
605struct vmbus_channel_version_response {
606 struct vmbus_channel_message_header header;
1508d811 607 u8 version_supported;
b56dda06
S
608} __packed;
609
610enum vmbus_channel_state {
611 CHANNEL_OFFER_STATE,
612 CHANNEL_OPENING_STATE,
613 CHANNEL_OPEN_STATE,
e68d2971 614 CHANNEL_OPENED_STATE,
b56dda06
S
615};
616
b56dda06
S
617/*
618 * Represents each channel msg on the vmbus connection This is a
619 * variable-size data structure depending on the msg type itself
620 */
621struct vmbus_channel_msginfo {
622 /* Bookkeeping stuff */
623 struct list_head msglistentry;
624
625 /* So far, this is only used to handle gpadl body message */
626 struct list_head submsglist;
627
628 /* Synchronize the request/response if needed */
629 struct completion waitevent;
ccb61f8a 630 struct vmbus_channel *waiting_channel;
b56dda06
S
631 union {
632 struct vmbus_channel_version_supported version_supported;
633 struct vmbus_channel_open_result open_result;
634 struct vmbus_channel_gpadl_torndown gpadl_torndown;
635 struct vmbus_channel_gpadl_created gpadl_created;
636 struct vmbus_channel_version_response version_response;
637 } response;
638
639 u32 msgsize;
640 /*
641 * The channel message that goes out on the "wire".
642 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
643 */
644 unsigned char msg[0];
645};
646
f9f1db83
S
647struct vmbus_close_msg {
648 struct vmbus_channel_msginfo info;
649 struct vmbus_channel_close_channel msg;
650};
651
b3bf60c7
S
652/* Define connection identifier type. */
653union hv_connection_id {
654 u32 asu32;
655 struct {
656 u32 id:24;
657 u32 reserved:8;
658 } u;
659};
660
661/* Definition of the hv_signal_event hypercall input structure. */
662struct hv_input_signal_event {
663 union hv_connection_id connectionid;
664 u16 flag_number;
665 u16 rsvdz;
666};
667
668struct hv_input_signal_event_buffer {
669 u64 align8;
670 struct hv_input_signal_event event;
671};
672
8599846d
S
673enum hv_signal_policy {
674 HV_SIGNAL_POLICY_DEFAULT = 0,
675 HV_SIGNAL_POLICY_EXPLICIT,
676};
677
509879bd
S
678enum hv_numa_policy {
679 HV_BALANCED = 0,
680 HV_LOCALIZED,
681};
682
7047f17d
S
683enum vmbus_device_type {
684 HV_IDE = 0,
685 HV_SCSI,
686 HV_FC,
687 HV_NIC,
688 HV_ND,
689 HV_PCIE,
690 HV_FB,
691 HV_KBD,
692 HV_MOUSE,
693 HV_KVP,
694 HV_TS,
695 HV_HB,
696 HV_SHUTDOWN,
697 HV_FCOPY,
698 HV_BACKUP,
699 HV_DM,
f45be72c 700 HV_UNKNOWN,
7047f17d
S
701};
702
703struct vmbus_device {
704 u16 dev_type;
705 uuid_le guid;
706 bool perf_device;
707};
708
7d7c75cd
S
709struct vmbus_channel {
710 struct list_head listentry;
711
712 struct hv_device *device_obj;
713
7d7c75cd 714 enum vmbus_channel_state state;
7d7c75cd
S
715
716 struct vmbus_channel_offer_channel offermsg;
717 /*
718 * These are based on the OfferMsg.MonitorId.
719 * Save it here for easy access.
720 */
721 u8 monitor_grp;
722 u8 monitor_bit;
723
c3582a2c
HZ
724 bool rescind; /* got rescind msg */
725
7d7c75cd
S
726 u32 ringbuffer_gpadlhandle;
727
728 /* Allocated memory for ring buffer */
729 void *ringbuffer_pages;
730 u32 ringbuffer_pagecount;
731 struct hv_ring_buffer_info outbound; /* send to parent */
732 struct hv_ring_buffer_info inbound; /* receive from parent */
733 spinlock_t inbound_lock;
7d7c75cd 734
f9f1db83
S
735 struct vmbus_close_msg close_msg;
736
7d7c75cd
S
737 /* Channel callback are invoked in this workqueue context */
738 /* HANDLE dataWorkQueue; */
739
740 void (*onchannel_callback)(void *context);
741 void *channel_callback_context;
132368bd
S
742
743 /*
744 * A channel can be marked for efficient (batched)
745 * reading:
746 * If batched_reading is set to "true", we read until the
747 * channel is empty and hold off interrupts from the host
748 * during the entire read process.
749 * If batched_reading is set to "false", the client is not
750 * going to perform batched reading.
751 *
752 * By default we will enable batched reading; specific
753 * drivers that don't want this behavior can turn it off.
754 */
755
756 bool batched_reading;
b3bf60c7
S
757
758 bool is_dedicated_interrupt;
759 struct hv_input_signal_event_buffer sig_buf;
760 struct hv_input_signal_event *sig_event;
abbf3b2a
S
761
762 /*
763 * Starting with win8, this field will be used to specify
764 * the target virtual processor on which to deliver the interrupt for
765 * the host to guest communication.
766 * Prior to win8, incoming channel interrupts would only
767 * be delivered on cpu 0. Setting this value to 0 would
768 * preserve the earlier behavior.
769 */
770 u32 target_vp;
d3ba720d
S
771 /* The corresponding CPUID in the guest */
772 u32 target_cpu;
1f656ff3
S
773 /*
774 * State to manage the CPU affiliation of channels.
775 */
3b71107d 776 struct cpumask alloced_cpus_in_node;
1f656ff3 777 int numa_node;
e68d2971
S
778 /*
779 * Support for sub-channels. For high performance devices,
780 * it will be useful to have multiple sub-channels to support
781 * a scalable communication infrastructure with the host.
782 * The support for sub-channels is implemented as an extention
783 * to the current infrastructure.
784 * The initial offer is considered the primary channel and this
785 * offer message will indicate if the host supports sub-channels.
786 * The guest is free to ask for sub-channels to be offerred and can
787 * open these sub-channels as a normal "primary" channel. However,
788 * all sub-channels will have the same type and instance guids as the
789 * primary channel. Requests sent on a given channel will result in a
790 * response on the same channel.
791 */
792
793 /*
794 * Sub-channel creation callback. This callback will be called in
795 * process context when a sub-channel offer is received from the host.
796 * The guest can open the sub-channel in the context of this callback.
797 */
798 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
799
499e8401
DC
800 /*
801 * Channel rescind callback. Some channels (the hvsock ones), need to
802 * register a callback which is invoked in vmbus_onoffer_rescind().
803 */
804 void (*chn_rescind_callback)(struct vmbus_channel *channel);
805
67fae053
VK
806 /*
807 * The spinlock to protect the structure. It is being used to protect
808 * test-and-set access to various attributes of the structure as well
809 * as all sc_list operations.
810 */
811 spinlock_t lock;
e68d2971
S
812 /*
813 * All Sub-channels of a primary channel are linked here.
814 */
815 struct list_head sc_list;
fea844a2
VK
816 /*
817 * Current number of sub-channels.
818 */
819 int num_sc;
820 /*
821 * Number of a sub-channel (position within sc_list) which is supposed
822 * to be used as the next outgoing channel.
823 */
824 int next_oc;
e68d2971
S
825 /*
826 * The primary channel this sub-channel belongs to.
827 * This will be NULL for the primary channel.
828 */
829 struct vmbus_channel *primary_channel;
8a7206a8
S
830 /*
831 * Support per-channel state for use by vmbus drivers.
832 */
833 void *per_channel_state;
3a28fa35
S
834 /*
835 * To support per-cpu lookup mapping of relid to channel,
836 * link up channels based on their CPU affinity.
837 */
838 struct list_head percpu_list;
8599846d
S
839 /*
840 * Host signaling policy: The default policy will be
841 * based on the ring buffer state. We will also support
842 * a policy where the client driver can have explicit
843 * signaling control.
844 */
845 enum hv_signal_policy signal_policy;
fe760e4d
S
846 /*
847 * On the channel send side, many of the VMBUS
848 * device drivers explicity serialize access to the
849 * outgoing ring buffer. Give more control to the
850 * VMBUS device drivers in terms how to serialize
851 * accesss to the outgoing ring buffer.
852 * The default behavior will be to aquire the
853 * ring lock to preserve the current behavior.
854 */
855 bool acquire_ring_lock;
3724287c
S
856 /*
857 * For performance critical channels (storage, networking
858 * etc,), Hyper-V has a mechanism to enhance the throughput
859 * at the expense of latency:
860 * When the host is to be signaled, we just set a bit in a shared page
861 * and this bit will be inspected by the hypervisor within a certain
862 * window and if the bit is set, the host will be signaled. The window
863 * of time is the monitor latency - currently around 100 usecs. This
864 * mechanism improves throughput by:
865 *
866 * A) Making the host more efficient - each time it wakes up,
867 * potentially it will process morev number of packets. The
868 * monitor latency allows a batch to build up.
869 * B) By deferring the hypercall to signal, we will also minimize
870 * the interrupts.
871 *
872 * Clearly, these optimizations improve throughput at the expense of
873 * latency. Furthermore, since the channel is shared for both
874 * control and data messages, control messages currently suffer
875 * unnecessary latency adversley impacting performance and boot
876 * time. To fix this issue, permit tagging the channel as being
877 * in "low latency" mode. In this mode, we will bypass the monitor
878 * mechanism.
879 */
880 bool low_latency;
fe760e4d 881
509879bd
S
882 /*
883 * NUMA distribution policy:
884 * We support teo policies:
885 * 1) Balanced: Here all performance critical channels are
886 * distributed evenly amongst all the NUMA nodes.
887 * This policy will be the default policy.
888 * 2) Localized: All channels of a given instance of a
889 * performance critical service will be assigned CPUs
890 * within a selected NUMA node.
891 */
892 enum hv_numa_policy affinity_policy;
893
7d7c75cd 894};
b56dda06 895
fe760e4d
S
896static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
897{
898 c->acquire_ring_lock = state;
899}
900
e8d6ca02
DC
901static inline bool is_hvsock_channel(const struct vmbus_channel *c)
902{
903 return !!(c->offermsg.offer.chn_flags &
904 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
905}
906
8599846d
S
907static inline void set_channel_signal_state(struct vmbus_channel *c,
908 enum hv_signal_policy policy)
909{
910 c->signal_policy = policy;
911}
912
509879bd
S
913static inline void set_channel_affinity_state(struct vmbus_channel *c,
914 enum hv_numa_policy policy)
915{
916 c->affinity_policy = policy;
917}
918
132368bd
S
919static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
920{
921 c->batched_reading = state;
922}
923
8a7206a8
S
924static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
925{
926 c->per_channel_state = s;
927}
928
929static inline void *get_per_channel_state(struct vmbus_channel *c)
930{
931 return c->per_channel_state;
932}
933
3c75354d
DC
934static inline void set_channel_pending_send_size(struct vmbus_channel *c,
935 u32 size)
936{
937 c->outbound.ring_buffer->pending_send_sz = size;
938}
939
3724287c
S
940static inline void set_low_latency_mode(struct vmbus_channel *c)
941{
942 c->low_latency = true;
943}
944
945static inline void clear_low_latency_mode(struct vmbus_channel *c)
946{
947 c->low_latency = false;
948}
949
b56dda06
S
950void vmbus_onmessage(void *context);
951
952int vmbus_request_offers(void);
953
e68d2971
S
954/*
955 * APIs for managing sub-channels.
956 */
957
958void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
959 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
960
499e8401
DC
961void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
962 void (*chn_rescind_cb)(struct vmbus_channel *));
963
e68d2971
S
964/*
965 * Retrieve the (sub) channel on which to send an outgoing request.
966 * When a primary channel has multiple sub-channels, we choose a
967 * channel whose VCPU binding is closest to the VCPU on which
968 * this call is being made.
969 */
970struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
971
972/*
973 * Check if sub-channels have already been offerred. This API will be useful
974 * when the driver is unloaded after establishing sub-channels. In this case,
975 * when the driver is re-loaded, the driver would have to check if the
976 * subchannels have already been established before attempting to request
977 * the creation of sub-channels.
978 * This function returns TRUE to indicate that subchannels have already been
979 * created.
980 * This function should be invoked after setting the callback function for
981 * sub-channel creation.
982 */
983bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
984
c35470b2
S
985/* The format must be the same as struct vmdata_gpa_direct */
986struct vmbus_channel_packet_page_buffer {
987 u16 type;
988 u16 dataoffset8;
989 u16 length8;
990 u16 flags;
991 u64 transactionid;
992 u32 reserved;
993 u32 rangecount;
994 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
995} __packed;
996
997/* The format must be the same as struct vmdata_gpa_direct */
998struct vmbus_channel_packet_multipage_buffer {
999 u16 type;
1000 u16 dataoffset8;
1001 u16 length8;
1002 u16 flags;
1003 u64 transactionid;
1004 u32 reserved;
1005 u32 rangecount; /* Always 1 in this case */
1006 struct hv_multipage_buffer range;
1007} __packed;
1008
d61031ee
S
1009/* The format must be the same as struct vmdata_gpa_direct */
1010struct vmbus_packet_mpb_array {
1011 u16 type;
1012 u16 dataoffset8;
1013 u16 length8;
1014 u16 flags;
1015 u64 transactionid;
1016 u32 reserved;
1017 u32 rangecount; /* Always 1 in this case */
1018 struct hv_mpb_array range;
1019} __packed;
1020
c35470b2
S
1021
1022extern int vmbus_open(struct vmbus_channel *channel,
1023 u32 send_ringbuffersize,
1024 u32 recv_ringbuffersize,
1025 void *userdata,
1026 u32 userdatalen,
1027 void(*onchannel_callback)(void *context),
1028 void *context);
1029
1030extern void vmbus_close(struct vmbus_channel *channel);
1031
1032extern int vmbus_sendpacket(struct vmbus_channel *channel,
011a7c3c 1033 void *buffer,
c35470b2
S
1034 u32 bufferLen,
1035 u64 requestid,
1036 enum vmbus_packet_type type,
1037 u32 flags);
1038
e9395e3f
S
1039extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
1040 void *buffer,
1041 u32 bufferLen,
1042 u64 requestid,
1043 enum vmbus_packet_type type,
1044 u32 flags,
1045 bool kick_q);
1046
c35470b2
S
1047extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1048 struct hv_page_buffer pagebuffers[],
1049 u32 pagecount,
1050 void *buffer,
1051 u32 bufferlen,
1052 u64 requestid);
1053
87e93d61
S
1054extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
1055 struct hv_page_buffer pagebuffers[],
1056 u32 pagecount,
1057 void *buffer,
1058 u32 bufferlen,
1059 u64 requestid,
1060 u32 flags,
1061 bool kick_q);
1062
c35470b2
S
1063extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1064 struct hv_multipage_buffer *mpb,
1065 void *buffer,
1066 u32 bufferlen,
1067 u64 requestid);
1068
d61031ee
S
1069extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1070 struct vmbus_packet_mpb_array *mpb,
1071 u32 desc_size,
1072 void *buffer,
1073 u32 bufferlen,
1074 u64 requestid);
1075
c35470b2
S
1076extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1077 void *kbuffer,
1078 u32 size,
1079 u32 *gpadl_handle);
1080
1081extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1082 u32 gpadl_handle);
1083
1084extern int vmbus_recvpacket(struct vmbus_channel *channel,
1085 void *buffer,
1086 u32 bufferlen,
1087 u32 *buffer_actual_len,
1088 u64 *requestid);
1089
1090extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1091 void *buffer,
1092 u32 bufferlen,
1093 u32 *buffer_actual_len,
1094 u64 *requestid);
1095
c35470b2 1096
c35470b2
S
1097extern void vmbus_ontimer(unsigned long data);
1098
35ea09c3
S
1099/* Base driver object */
1100struct hv_driver {
1101 const char *name;
1102
8981da32
DC
1103 /*
1104 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1105 * channel flag, actually doesn't mean a synthetic device because the
1106 * offer's if_type/if_instance can change for every new hvsock
1107 * connection.
1108 *
1109 * However, to facilitate the notification of new-offer/rescind-offer
1110 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1111 * a special vmbus device, and hence we need the below flag to
1112 * indicate if the driver is the hvsock driver or not: we need to
1113 * specially treat the hvosck offer & driver in vmbus_match().
1114 */
1115 bool hvsock;
1116
35ea09c3 1117 /* the device type supported by this driver */
358d2ee2 1118 uuid_le dev_type;
2e2c1d17 1119 const struct hv_vmbus_device_id *id_table;
35ea09c3
S
1120
1121 struct device_driver driver;
1122
fc76936d
SH
1123 /* dynamic device GUID's */
1124 struct {
1125 spinlock_t lock;
1126 struct list_head list;
1127 } dynids;
1128
84946899 1129 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
35ea09c3
S
1130 int (*remove)(struct hv_device *);
1131 void (*shutdown)(struct hv_device *);
1132
1133};
1134
1135/* Base device object */
1136struct hv_device {
1137 /* the device type id of this device */
358d2ee2 1138 uuid_le dev_type;
35ea09c3
S
1139
1140 /* the device instance id of this device */
358d2ee2 1141 uuid_le dev_instance;
7047f17d
S
1142 u16 vendor_id;
1143 u16 device_id;
35ea09c3
S
1144
1145 struct device device;
1146
1147 struct vmbus_channel *channel;
35ea09c3
S
1148};
1149
27b5b3ca
S
1150
1151static inline struct hv_device *device_to_hv_device(struct device *d)
1152{
1153 return container_of(d, struct hv_device, device);
1154}
1155
1156static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1157{
1158 return container_of(d, struct hv_driver, driver);
1159}
1160
ab101e86
S
1161static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1162{
1163 dev_set_drvdata(&dev->device, data);
1164}
1165
1166static inline void *hv_get_drvdata(struct hv_device *dev)
1167{
1168 return dev_get_drvdata(&dev->device);
1169}
27b5b3ca
S
1170
1171/* Vmbus interface */
768fa219
GKH
1172#define vmbus_driver_register(driver) \
1173 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1174int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1175 struct module *owner,
1176 const char *mod_name);
1177void vmbus_driver_unregister(struct hv_driver *hv_driver);
27b5b3ca 1178
85d9aa70
DC
1179void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1180
35464483
JO
1181int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1182 resource_size_t min, resource_size_t max,
1183 resource_size_t size, resource_size_t align,
1184 bool fb_overlap_ok);
97fb77dc 1185void vmbus_free_mmio(resource_size_t start, resource_size_t size);
619848bd 1186int vmbus_cpu_number_to_vp_number(int cpu_number);
a108393d 1187u64 hv_do_hypercall(u64 control, void *input, void *output);
619848bd 1188
7fb96565
S
1189/*
1190 * GUID definitions of various offer types - services offered to the guest.
1191 */
1192
1193/*
1194 * Network GUID
1195 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1196 */
1197#define HV_NIC_GUID \
af3ff643
S
1198 .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1199 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
7fb96565
S
1200
1201/*
1202 * IDE GUID
1203 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1204 */
1205#define HV_IDE_GUID \
af3ff643
S
1206 .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1207 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
7fb96565
S
1208
1209/*
1210 * SCSI GUID
1211 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1212 */
1213#define HV_SCSI_GUID \
af3ff643
S
1214 .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1215 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
7fb96565
S
1216
1217/*
1218 * Shutdown GUID
1219 * {0e0b6031-5213-4934-818b-38d90ced39db}
1220 */
1221#define HV_SHUTDOWN_GUID \
af3ff643
S
1222 .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1223 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
7fb96565
S
1224
1225/*
1226 * Time Synch GUID
1227 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1228 */
1229#define HV_TS_GUID \
af3ff643
S
1230 .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1231 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
7fb96565
S
1232
1233/*
1234 * Heartbeat GUID
1235 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1236 */
1237#define HV_HEART_BEAT_GUID \
af3ff643
S
1238 .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1239 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
7fb96565
S
1240
1241/*
1242 * KVP GUID
1243 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1244 */
1245#define HV_KVP_GUID \
af3ff643
S
1246 .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1247 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
7fb96565
S
1248
1249/*
1250 * Dynamic memory GUID
1251 * {525074dc-8985-46e2-8057-a307dc18a502}
1252 */
1253#define HV_DM_GUID \
af3ff643
S
1254 .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1255 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
7fb96565
S
1256
1257/*
1258 * Mouse GUID
1259 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1260 */
1261#define HV_MOUSE_GUID \
af3ff643
S
1262 .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1263 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
7fb96565 1264
2048157a
DC
1265/*
1266 * Keyboard GUID
1267 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1268 */
1269#define HV_KBD_GUID \
1270 .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1271 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1272
96dd86fa
S
1273/*
1274 * VSS (Backup/Restore) GUID
1275 */
1276#define HV_VSS_GUID \
af3ff643
S
1277 .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1278 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
68a2d20b
HZ
1279/*
1280 * Synthetic Video GUID
1281 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1282 */
1283#define HV_SYNTHVID_GUID \
af3ff643
S
1284 .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1285 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
68a2d20b 1286
98b80d89
S
1287/*
1288 * Synthetic FC GUID
1289 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1290 */
1291#define HV_SYNTHFC_GUID \
af3ff643
S
1292 .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1293 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
98b80d89 1294
01325476
S
1295/*
1296 * Guest File Copy Service
1297 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1298 */
1299
1300#define HV_FCOPY_GUID \
af3ff643
S
1301 .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1302 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
01325476 1303
04653a00
S
1304/*
1305 * NetworkDirect. This is the guest RDMA service.
1306 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1307 */
1308#define HV_ND_GUID \
af3ff643
S
1309 .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1310 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
04653a00 1311
3053c762
JO
1312/*
1313 * PCI Express Pass Through
1314 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1315 */
1316
1317#define HV_PCIE_GUID \
af3ff643
S
1318 .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1319 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
3053c762 1320
0f98829a
DC
1321/*
1322 * Linux doesn't support the 3 devices: the first two are for
1323 * Automatic Virtual Machine Activation, and the third is for
1324 * Remote Desktop Virtualization.
1325 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1326 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1327 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1328 */
1329
1330#define HV_AVMA1_GUID \
1331 .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1332 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1333
1334#define HV_AVMA2_GUID \
1335 .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1336 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1337
1338#define HV_RDV_GUID \
1339 .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1340 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1341
b189702d
S
1342/*
1343 * Common header for Hyper-V ICs
1344 */
1345
1346#define ICMSGTYPE_NEGOTIATE 0
1347#define ICMSGTYPE_HEARTBEAT 1
1348#define ICMSGTYPE_KVPEXCHANGE 2
1349#define ICMSGTYPE_SHUTDOWN 3
1350#define ICMSGTYPE_TIMESYNC 4
1351#define ICMSGTYPE_VSS 5
1352
1353#define ICMSGHDRFLAG_TRANSACTION 1
1354#define ICMSGHDRFLAG_REQUEST 2
1355#define ICMSGHDRFLAG_RESPONSE 4
1356
b189702d 1357
a29b643c
S
1358/*
1359 * While we want to handle util services as regular devices,
1360 * there is only one instance of each of these services; so
1361 * we statically allocate the service specific state.
1362 */
1363
1364struct hv_util_service {
1365 u8 *recv_buffer;
b9830d12 1366 void *channel;
a29b643c
S
1367 void (*util_cb)(void *);
1368 int (*util_init)(struct hv_util_service *);
1369 void (*util_deinit)(void);
1370};
1371
b189702d
S
1372struct vmbuspipe_hdr {
1373 u32 flags;
1374 u32 msgsize;
1375} __packed;
1376
1377struct ic_version {
1378 u16 major;
1379 u16 minor;
1380} __packed;
1381
1382struct icmsg_hdr {
1383 struct ic_version icverframe;
1384 u16 icmsgtype;
1385 struct ic_version icvermsg;
1386 u16 icmsgsize;
1387 u32 status;
1388 u8 ictransaction_id;
1389 u8 icflags;
1390 u8 reserved[2];
1391} __packed;
1392
1393struct icmsg_negotiate {
1394 u16 icframe_vercnt;
1395 u16 icmsg_vercnt;
1396 u32 reserved;
1397 struct ic_version icversion_data[1]; /* any size array */
1398} __packed;
1399
1400struct shutdown_msg_data {
1401 u32 reason_code;
1402 u32 timeout_seconds;
1403 u32 flags;
1404 u8 display_message[2048];
1405} __packed;
1406
1407struct heartbeat_msg_data {
1408 u64 seq_num;
1409 u32 reserved[8];
1410} __packed;
1411
1412/* Time Sync IC defs */
1413#define ICTIMESYNCFLAG_PROBE 0
1414#define ICTIMESYNCFLAG_SYNC 1
1415#define ICTIMESYNCFLAG_SAMPLE 2
1416
1417#ifdef __x86_64__
1418#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1419#else
1420#define WLTIMEDELTA 116444736000000000LL
1421#endif
1422
1423struct ictimesync_data {
1424 u64 parenttime;
1425 u64 childtime;
1426 u64 roundtriptime;
1427 u8 flags;
1428} __packed;
1429
8e1d2607
AN
1430struct ictimesync_ref_data {
1431 u64 parenttime;
1432 u64 vmreferencetime;
1433 u8 flags;
1434 char leapflags;
1435 char stratum;
1436 u8 reserved[3];
1437} __packed;
1438
b189702d
S
1439struct hyperv_service_callback {
1440 u8 msg_type;
1441 char *log_msg;
358d2ee2 1442 uuid_le data;
b189702d
S
1443 struct vmbus_channel *channel;
1444 void (*callback) (void *context);
1445};
1446
c836d0ab 1447#define MAX_SRV_VER 0x7ffffff
6741335b 1448extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
c836d0ab
S
1449 struct icmsg_negotiate *, u8 *, int,
1450 int);
b189702d 1451
638fea33
DC
1452void hv_event_tasklet_disable(struct vmbus_channel *channel);
1453void hv_event_tasklet_enable(struct vmbus_channel *channel);
1454
ed6cfcc5 1455void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
96dd86fa 1456
1f6ee4e7 1457void vmbus_setevent(struct vmbus_channel *channel);
37f7278b
S
1458/*
1459 * Negotiated version with the Host.
1460 */
1461
1462extern __u32 vmbus_proto_version;
1463
5c23a1a5
DC
1464int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
1465 const uuid_le *shv_host_servie_id);
5cc47247 1466void vmbus_set_event(struct vmbus_channel *channel);
687f32e6
S
1467
1468/* Get the start of the ring buffer. */
1469static inline void *
1470hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1471{
1472 return (void *)ring_info->ring_buffer->buffer;
1473}
1474
1475/*
1476 * To optimize the flow management on the send-side,
1477 * when the sender is blocked because of lack of
1478 * sufficient space in the ring buffer, potential the
1479 * consumer of the ring buffer can signal the producer.
1480 * This is controlled by the following parameters:
1481 *
1482 * 1. pending_send_sz: This is the size in bytes that the
1483 * producer is trying to send.
1484 * 2. The feature bit feat_pending_send_sz set to indicate if
1485 * the consumer of the ring will signal when the ring
1486 * state transitions from being full to a state where
1487 * there is room for the producer to send the pending packet.
1488 */
1489
3372592a 1490static inline void hv_signal_on_read(struct vmbus_channel *channel)
687f32e6
S
1491{
1492 u32 cur_write_sz;
1493 u32 pending_sz;
3372592a 1494 struct hv_ring_buffer_info *rbi = &channel->inbound;
687f32e6
S
1495
1496 /*
1497 * Issue a full memory barrier before making the signaling decision.
1498 * Here is the reason for having this barrier:
1499 * If the reading of the pend_sz (in this function)
1500 * were to be reordered and read before we commit the new read
1501 * index (in the calling function) we could
1502 * have a problem. If the host were to set the pending_sz after we
1503 * have sampled pending_sz and go to sleep before we commit the
1504 * read index, we could miss sending the interrupt. Issue a full
1505 * memory barrier to address this.
1506 */
1507 virt_mb();
1508
1509 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1510 /* If the other end is not blocked on write don't bother. */
1511 if (pending_sz == 0)
3372592a 1512 return;
687f32e6
S
1513
1514 cur_write_sz = hv_get_bytes_to_write(rbi);
1515
1516 if (cur_write_sz >= pending_sz)
3372592a 1517 vmbus_setevent(channel);
687f32e6 1518
3372592a 1519 return;
687f32e6
S
1520}
1521
ab028db4
S
1522/*
1523 * An API to support in-place processing of incoming VMBUS packets.
1524 */
1525#define VMBUS_PKT_TRAILER 8
1526
1527static inline struct vmpacket_descriptor *
1528get_next_pkt_raw(struct vmbus_channel *channel)
1529{
1530 struct hv_ring_buffer_info *ring_info = &channel->inbound;
fa32ff65 1531 u32 priv_read_loc = ring_info->priv_read_index;
ab028db4 1532 void *ring_buffer = hv_get_ring_buffer(ring_info);
ab028db4 1533 u32 dsize = ring_info->ring_datasize;
fa32ff65
VK
1534 /*
1535 * delta is the difference between what is available to read and
1536 * what was already consumed in place. We commit read index after
1537 * the whole batch is processed.
1538 */
1539 u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
1540 priv_read_loc - ring_info->ring_buffer->read_index :
1541 (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
ab028db4
S
1542 u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
1543
1544 if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
1545 return NULL;
1546
fa32ff65 1547 return ring_buffer + priv_read_loc;
ab028db4
S
1548}
1549
1550/*
1551 * A helper function to step through packets "in-place"
1552 * This API is to be called after each successful call
1553 * get_next_pkt_raw().
1554 */
1555static inline void put_pkt_raw(struct vmbus_channel *channel,
1556 struct vmpacket_descriptor *desc)
1557{
1558 struct hv_ring_buffer_info *ring_info = &channel->inbound;
ab028db4
S
1559 u32 packetlen = desc->len8 << 3;
1560 u32 dsize = ring_info->ring_datasize;
1561
ab028db4
S
1562 /*
1563 * Include the packet trailer.
1564 */
1565 ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
fa32ff65 1566 ring_info->priv_read_index %= dsize;
ab028db4
S
1567}
1568
1569/*
1570 * This call commits the read index and potentially signals the host.
1571 * Here is the pattern for using the "in-place" consumption APIs:
1572 *
1573 * while (get_next_pkt_raw() {
1574 * process the packet "in-place";
1575 * put_pkt_raw();
1576 * }
1577 * if (packets processed in place)
1578 * commit_rd_index();
1579 */
1580static inline void commit_rd_index(struct vmbus_channel *channel)
1581{
1582 struct hv_ring_buffer_info *ring_info = &channel->inbound;
1583 /*
1584 * Make sure all reads are done before we update the read index since
1585 * the writer may start writing to the read area once the read index
1586 * is updated.
1587 */
1588 virt_rmb();
1589 ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1590
3372592a 1591 hv_signal_on_read(channel);
ab028db4
S
1592}
1593
1594
3f335ea2 1595#endif /* _HYPERV_H */