]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/hyperv.h
Merge branch 'pci/microchip'
[mirror_ubuntu-jammy-kernel.git] / include / linux / hyperv.h
CommitLineData
3b20eb23 1/* SPDX-License-Identifier: GPL-2.0-only */
5c473400
S
2/*
3 *
4 * Copyright (c) 2011, Microsoft Corporation.
5 *
5c473400
S
6 * Authors:
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
5c473400 10 */
3f335ea2
S
11
12#ifndef _HYPERV_H
13#define _HYPERV_H
14
5267cf02 15#include <uapi/linux/hyperv.h>
2939437c 16
bca6b91d 17#include <linux/mm.h>
5267cf02 18#include <linux/types.h>
8ff3e6fc
S
19#include <linux/scatterlist.h>
20#include <linux/list.h>
21#include <linux/timer.h>
8ff3e6fc
S
22#include <linux/completion.h>
23#include <linux/device.h>
2e2c1d17 24#include <linux/mod_devicetable.h>
631e63a9 25#include <linux/interrupt.h>
63273cb4 26#include <linux/reciprocal_div.h>
bca6b91d 27#include <asm/hyperv-tlfs.h>
8ff3e6fc 28
7e5ec368 29#define MAX_PAGE_BUFFER_COUNT 32
a363bf7b
S
30#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
31
32#pragma pack(push, 1)
33
c1135c7f
BF
34/*
35 * Types for GPADL, decides is how GPADL header is created.
36 *
37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38 * same as HV_HYP_PAGE_SIZE.
39 *
40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43 * HV_HYP_PAGE will be different between different types of GPADL, for example
44 * if PAGE_SIZE is 64K:
45 *
46 * BUFFER:
47 *
48 * gva: |-- 64k --|-- 64k --| ... |
49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50 * index: 0 1 2 15 16 17 18 .. 31 32 ...
51 * | | ... | | | ... | ...
52 * v V V V V V
53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
55 *
56 * RING:
57 *
58 * | header | data | header | data |
59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
62 * | / / / | / /
63 * | / / / | / /
64 * | / / ... / ... | / ... /
65 * | / / / | / /
66 * | / / / | / /
67 * V V V V V V v
68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
70 */
71enum hv_gpadl_type {
72 HV_GPADL_BUFFER,
73 HV_GPADL_RING
74};
75
a363bf7b
S
76/* Single-page buffer */
77struct hv_page_buffer {
78 u32 len;
79 u32 offset;
80 u64 pfn;
81};
82
83/* Multiple-page buffer */
84struct hv_multipage_buffer {
85 /* Length and Offset determines the # of pfns in the array */
86 u32 len;
87 u32 offset;
88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
89};
90
d61031ee
S
91/*
92 * Multiple-page buffer array; the pfn array is variable size:
93 * The number of entries in the PFN array is determined by
94 * "len" and "offset".
95 */
96struct hv_mpb_array {
97 /* Length and Offset determines the # of pfns in the array */
98 u32 len;
99 u32 offset;
100 u64 pfn_array[];
101};
102
a363bf7b
S
103/* 0x18 includes the proprietary packet header */
104#define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
109
110
111#pragma pack(pop)
112
7effffb7
S
113struct hv_ring_buffer {
114 /* Offset in bytes from the start of ring data below */
115 u32 write_index;
116
117 /* Offset in bytes from the start of ring data below */
118 u32 read_index;
119
120 u32 interrupt_mask;
121
2416603e 122 /*
71b38245
MK
123 * WS2012/Win8 and later versions of Hyper-V implement interrupt
124 * driven flow management. The feature bit feat_pending_send_sz
125 * is set by the host on the host->guest ring buffer, and by the
126 * guest on the guest->host ring buffer.
2416603e 127 *
71b38245
MK
128 * The meaning of the feature bit is a bit complex in that it has
129 * semantics that apply to both ring buffers. If the guest sets
130 * the feature bit in the guest->host ring buffer, the guest is
131 * telling the host that:
132 * 1) It will set the pending_send_sz field in the guest->host ring
133 * buffer when it is waiting for space to become available, and
134 * 2) It will read the pending_send_sz field in the host->guest
135 * ring buffer and interrupt the host when it frees enough space
136 *
137 * Similarly, if the host sets the feature bit in the host->guest
138 * ring buffer, the host is telling the guest that:
139 * 1) It will set the pending_send_sz field in the host->guest ring
140 * buffer when it is waiting for space to become available, and
141 * 2) It will read the pending_send_sz field in the guest->host
142 * ring buffer and interrupt the guest when it frees enough space
143 *
144 * If either the guest or host does not set the feature bit that it
145 * owns, that guest or host must do polling if it encounters a full
146 * ring buffer, and not signal the other end with an interrupt.
7effffb7 147 */
2416603e 148 u32 pending_send_sz;
2416603e 149 u32 reserved1[12];
2416603e
S
150 union {
151 struct {
152 u32 feat_pending_send_sz:1;
153 };
154 u32 value;
155 } feature_bits;
156
157 /* Pad it to PAGE_SIZE so that data starts on page boundary */
c1135c7f 158 u8 reserved2[PAGE_SIZE - 68];
7effffb7
S
159
160 /*
161 * Ring data starts here + RingDataStartOffset
162 * !!! DO NOT place any fields below this !!!
163 */
db5871e8 164 u8 buffer[];
7effffb7
S
165} __packed;
166
c1135c7f
BF
167/* Calculate the proper size of a ringbuffer, it must be page-aligned */
168#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
169 (payload_sz))
170
7effffb7
S
171struct hv_ring_buffer_info {
172 struct hv_ring_buffer *ring_buffer;
173 u32 ring_size; /* Include the shared header */
63273cb4 174 struct reciprocal_value ring_size_div10_reciprocal;
7effffb7
S
175 spinlock_t ring_lock;
176
177 u32 ring_datasize; /* < ring_size */
ab028db4 178 u32 priv_read_index;
14948e39
KB
179 /*
180 * The ring buffer mutex lock. This lock prevents the ring buffer from
181 * being freed while the ring buffer is being accessed.
182 */
183 struct mutex ring_buffer_mutex;
7effffb7
S
184};
185
33be96e4 186
e4165a0f 187static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
a6341f00
S
188{
189 u32 read_loc, write_loc, dsize, read;
190
191 dsize = rbi->ring_datasize;
192 read_loc = rbi->ring_buffer->read_index;
193 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
194
195 read = write_loc >= read_loc ? (write_loc - read_loc) :
196 (dsize - read_loc) + write_loc;
197
198 return read;
199}
200
e4165a0f 201static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
a6341f00
S
202{
203 u32 read_loc, write_loc, dsize, write;
204
205 dsize = rbi->ring_datasize;
206 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
207 write_loc = rbi->ring_buffer->write_index;
208
209 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
210 read_loc - write_loc;
211 return write;
212}
213
63273cb4
LL
214static inline u32 hv_get_avail_to_write_percent(
215 const struct hv_ring_buffer_info *rbi)
216{
217 u32 avail_write = hv_get_bytes_to_write(rbi);
218
219 return reciprocal_divide(
220 (avail_write << 3) + (avail_write << 1),
221 rbi->ring_size_div10_reciprocal);
222}
223
eafa7072
S
224/*
225 * VMBUS version is 32 bit entity broken up into
226 * two 16 bit quantities: major_number. minor_number.
227 *
228 * 0 . 13 (Windows Server 2008)
229 * 1 . 1 (Windows 7)
230 * 2 . 4 (Windows 8)
03367ef5 231 * 3 . 0 (Windows 8 R2)
6c4e5f9c 232 * 4 . 0 (Windows 10)
2d4f49b3 233 * 4 . 1 (Windows 10 RS3)
ae20b254 234 * 5 . 0 (Newer Windows 10)
2d4f49b3
AP
235 * 5 . 1 (Windows 10 RS4)
236 * 5 . 2 (Windows Server 2019, RS5)
eafa7072
S
237 */
238
239#define VERSION_WS2008 ((0 << 16) | (13))
240#define VERSION_WIN7 ((1 << 16) | (1))
241#define VERSION_WIN8 ((2 << 16) | (4))
03367ef5 242#define VERSION_WIN8_1 ((3 << 16) | (0))
2d4f49b3
AP
243#define VERSION_WIN10 ((4 << 16) | (0))
244#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
ae20b254 245#define VERSION_WIN10_V5 ((5 << 16) | (0))
2d4f49b3
AP
246#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
247#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
eafa7072 248
517d8dc6
S
249/* Make maximum size of pipe payload of 16K */
250#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
251
252/* Define PipeMode values. */
253#define VMBUS_PIPE_TYPE_BYTE 0x00000000
254#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
255
256/* The size of the user defined data buffer for non-pipe offers. */
257#define MAX_USER_DEFINED_BYTES 120
258
259/* The size of the user defined data buffer for pipe offers. */
260#define MAX_PIPE_USER_DEFINED_BYTES 116
261
262/*
263 * At the center of the Channel Management library is the Channel Offer. This
264 * struct contains the fundamental information about an offer.
265 */
266struct vmbus_channel_offer {
593db803
AS
267 guid_t if_type;
268 guid_t if_instance;
29423b7e
S
269
270 /*
271 * These two fields are not currently used.
272 */
273 u64 reserved1;
274 u64 reserved2;
275
517d8dc6
S
276 u16 chn_flags;
277 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
278
279 union {
280 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
281 struct {
282 unsigned char user_def[MAX_USER_DEFINED_BYTES];
283 } std;
284
285 /*
286 * Pipes:
287 * The following sructure is an integrated pipe protocol, which
288 * is implemented on top of standard user-defined data. Pipe
289 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
290 * use.
291 */
292 struct {
293 u32 pipe_mode;
294 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
295 } pipe;
296 } u;
29423b7e 297 /*
ed56ef67
DC
298 * The sub_channel_index is defined in Win8: a value of zero means a
299 * primary channel and a value of non-zero means a sub-channel.
300 *
301 * Before Win8, the field is reserved, meaning it's always zero.
29423b7e
S
302 */
303 u16 sub_channel_index;
304 u16 reserved3;
517d8dc6
S
305} __packed;
306
307/* Server Flags */
308#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
309#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
310#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
311#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
312#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
313#define VMBUS_CHANNEL_PARENT_OFFER 0x200
314#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
e8d6ca02 315#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
517d8dc6 316
50ed40e0
S
317struct vmpacket_descriptor {
318 u16 type;
319 u16 offset8;
320 u16 len8;
321 u16 flags;
322 u64 trans_id;
323} __packed;
324
325struct vmpacket_header {
326 u32 prev_pkt_start_offset;
327 struct vmpacket_descriptor descriptor;
328} __packed;
329
330struct vmtransfer_page_range {
331 u32 byte_count;
332 u32 byte_offset;
333} __packed;
334
335struct vmtransfer_page_packet_header {
336 struct vmpacket_descriptor d;
337 u16 xfer_pageset_id;
1508d811 338 u8 sender_owns_set;
50ed40e0
S
339 u8 reserved;
340 u32 range_cnt;
341 struct vmtransfer_page_range ranges[1];
342} __packed;
343
344struct vmgpadl_packet_header {
345 struct vmpacket_descriptor d;
346 u32 gpadl;
347 u32 reserved;
348} __packed;
349
350struct vmadd_remove_transfer_page_set {
351 struct vmpacket_descriptor d;
352 u32 gpadl;
353 u16 xfer_pageset_id;
354 u16 reserved;
355} __packed;
356
357/*
358 * This structure defines a range in guest physical space that can be made to
359 * look virtually contiguous.
360 */
361struct gpa_range {
362 u32 byte_count;
363 u32 byte_offset;
db5871e8 364 u64 pfn_array[];
50ed40e0
S
365};
366
367/*
368 * This is the format for an Establish Gpadl packet, which contains a handle by
369 * which this GPADL will be known and a set of GPA ranges associated with it.
370 * This can be converted to a MDL by the guest OS. If there are multiple GPA
371 * ranges, then the resulting MDL will be "chained," representing multiple VA
372 * ranges.
373 */
374struct vmestablish_gpadl {
375 struct vmpacket_descriptor d;
376 u32 gpadl;
377 u32 range_cnt;
378 struct gpa_range range[1];
379} __packed;
380
381/*
382 * This is the format for a Teardown Gpadl packet, which indicates that the
383 * GPADL handle in the Establish Gpadl packet will never be referenced again.
384 */
385struct vmteardown_gpadl {
386 struct vmpacket_descriptor d;
387 u32 gpadl;
388 u32 reserved; /* for alignment to a 8-byte boundary */
389} __packed;
390
391/*
392 * This is the format for a GPA-Direct packet, which contains a set of GPA
393 * ranges, in addition to commands and/or data.
394 */
395struct vmdata_gpa_direct {
396 struct vmpacket_descriptor d;
397 u32 reserved;
398 u32 range_cnt;
399 struct gpa_range range[1];
400} __packed;
401
402/* This is the format for a Additional Data Packet. */
403struct vmadditional_data {
404 struct vmpacket_descriptor d;
405 u64 total_bytes;
406 u32 offset;
407 u32 byte_cnt;
408 unsigned char data[1];
409} __packed;
410
411union vmpacket_largest_possible_header {
412 struct vmpacket_descriptor simple_hdr;
413 struct vmtransfer_page_packet_header xfer_page_hdr;
414 struct vmgpadl_packet_header gpadl_hdr;
415 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
416 struct vmestablish_gpadl establish_gpadl_hdr;
417 struct vmteardown_gpadl teardown_gpadl_hdr;
418 struct vmdata_gpa_direct data_gpa_direct_hdr;
419};
420
421#define VMPACKET_DATA_START_ADDRESS(__packet) \
422 (void *)(((unsigned char *)__packet) + \
423 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
424
425#define VMPACKET_DATA_LENGTH(__packet) \
426 ((((struct vmpacket_descriptor)__packet)->len8 - \
427 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
428
429#define VMPACKET_TRANSFER_MODE(__packet) \
430 (((struct IMPACT)__packet)->type)
431
432enum vmbus_packet_type {
433 VM_PKT_INVALID = 0x0,
434 VM_PKT_SYNCH = 0x1,
435 VM_PKT_ADD_XFER_PAGESET = 0x2,
436 VM_PKT_RM_XFER_PAGESET = 0x3,
437 VM_PKT_ESTABLISH_GPADL = 0x4,
438 VM_PKT_TEARDOWN_GPADL = 0x5,
439 VM_PKT_DATA_INBAND = 0x6,
440 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
441 VM_PKT_DATA_USING_GPADL = 0x8,
442 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
443 VM_PKT_CANCEL_REQUEST = 0xa,
444 VM_PKT_COMP = 0xb,
445 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
446 VM_PKT_ADDITIONAL_DATA = 0xd
447};
448
449#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
517d8dc6 450
b56dda06 451
b56dda06
S
452/* Version 1 messages */
453enum vmbus_channel_message_type {
454 CHANNELMSG_INVALID = 0,
455 CHANNELMSG_OFFERCHANNEL = 1,
456 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
457 CHANNELMSG_REQUESTOFFERS = 3,
458 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
459 CHANNELMSG_OPENCHANNEL = 5,
460 CHANNELMSG_OPENCHANNEL_RESULT = 6,
461 CHANNELMSG_CLOSECHANNEL = 7,
462 CHANNELMSG_GPADL_HEADER = 8,
463 CHANNELMSG_GPADL_BODY = 9,
464 CHANNELMSG_GPADL_CREATED = 10,
465 CHANNELMSG_GPADL_TEARDOWN = 11,
466 CHANNELMSG_GPADL_TORNDOWN = 12,
467 CHANNELMSG_RELID_RELEASED = 13,
468 CHANNELMSG_INITIATE_CONTACT = 14,
469 CHANNELMSG_VERSION_RESPONSE = 15,
470 CHANNELMSG_UNLOAD = 16,
2db84eff 471 CHANNELMSG_UNLOAD_RESPONSE = 17,
5c23a1a5
DC
472 CHANNELMSG_18 = 18,
473 CHANNELMSG_19 = 19,
474 CHANNELMSG_20 = 20,
475 CHANNELMSG_TL_CONNECT_REQUEST = 21,
75278105 476 CHANNELMSG_MODIFYCHANNEL = 22,
ddc9d357 477 CHANNELMSG_TL_CONNECT_RESULT = 23,
b56dda06
S
478 CHANNELMSG_COUNT
479};
480
d8bd2d44
DC
481/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
482#define INVALID_RELID U32_MAX
483
b56dda06
S
484struct vmbus_channel_message_header {
485 enum vmbus_channel_message_type msgtype;
486 u32 padding;
487} __packed;
488
489/* Query VMBus Version parameters */
490struct vmbus_channel_query_vmbus_version {
491 struct vmbus_channel_message_header header;
492 u32 version;
493} __packed;
494
495/* VMBus Version Supported parameters */
496struct vmbus_channel_version_supported {
497 struct vmbus_channel_message_header header;
1508d811 498 u8 version_supported;
b56dda06
S
499} __packed;
500
501/* Offer Channel parameters */
502struct vmbus_channel_offer_channel {
503 struct vmbus_channel_message_header header;
504 struct vmbus_channel_offer offer;
505 u32 child_relid;
506 u8 monitorid;
29423b7e
S
507 /*
508 * win7 and beyond splits this field into a bit field.
509 */
510 u8 monitor_allocated:1;
511 u8 reserved:7;
512 /*
513 * These are new fields added in win7 and later.
514 * Do not access these fields without checking the
515 * negotiated protocol.
516 *
517 * If "is_dedicated_interrupt" is set, we must not set the
518 * associated bit in the channel bitmap while sending the
519 * interrupt to the host.
520 *
521 * connection_id is to be used in signaling the host.
522 */
523 u16 is_dedicated_interrupt:1;
524 u16 reserved1:15;
525 u32 connection_id;
b56dda06
S
526} __packed;
527
528/* Rescind Offer parameters */
529struct vmbus_channel_rescind_offer {
530 struct vmbus_channel_message_header header;
531 u32 child_relid;
532} __packed;
533
4827ee1d
SH
534static inline u32
535hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
536{
537 return rbi->ring_buffer->pending_send_sz;
538}
539
b56dda06
S
540/*
541 * Request Offer -- no parameters, SynIC message contains the partition ID
542 * Set Snoop -- no parameters, SynIC message contains the partition ID
543 * Clear Snoop -- no parameters, SynIC message contains the partition ID
544 * All Offers Delivered -- no parameters, SynIC message contains the partition
545 * ID
546 * Flush Client -- no parameters, SynIC message contains the partition ID
547 */
548
549/* Open Channel parameters */
550struct vmbus_channel_open_channel {
551 struct vmbus_channel_message_header header;
552
553 /* Identifies the specific VMBus channel that is being opened. */
554 u32 child_relid;
555
556 /* ID making a particular open request at a channel offer unique. */
557 u32 openid;
558
559 /* GPADL for the channel's ring buffer. */
560 u32 ringbuffer_gpadlhandle;
561
abbf3b2a
S
562 /*
563 * Starting with win8, this field will be used to specify
564 * the target virtual processor on which to deliver the interrupt for
565 * the host to guest communication.
566 * Prior to win8, incoming channel interrupts would only
567 * be delivered on cpu 0. Setting this value to 0 would
568 * preserve the earlier behavior.
569 */
570 u32 target_vp;
b56dda06
S
571
572 /*
2a9d7de2
SH
573 * The upstream ring buffer begins at offset zero in the memory
574 * described by RingBufferGpadlHandle. The downstream ring buffer
575 * follows it at this offset (in pages).
576 */
b56dda06
S
577 u32 downstream_ringbuffer_pageoffset;
578
579 /* User-specific data to be passed along to the server endpoint. */
580 unsigned char userdata[MAX_USER_DEFINED_BYTES];
581} __packed;
582
583/* Open Channel Result parameters */
584struct vmbus_channel_open_result {
585 struct vmbus_channel_message_header header;
586 u32 child_relid;
587 u32 openid;
588 u32 status;
589} __packed;
590
591/* Close channel parameters; */
592struct vmbus_channel_close_channel {
593 struct vmbus_channel_message_header header;
594 u32 child_relid;
595} __packed;
596
597/* Channel Message GPADL */
598#define GPADL_TYPE_RING_BUFFER 1
599#define GPADL_TYPE_SERVER_SAVE_AREA 2
600#define GPADL_TYPE_TRANSACTION 8
601
602/*
603 * The number of PFNs in a GPADL message is defined by the number of
604 * pages that would be spanned by ByteCount and ByteOffset. If the
605 * implied number of PFNs won't fit in this packet, there will be a
606 * follow-up packet that contains more.
607 */
608struct vmbus_channel_gpadl_header {
609 struct vmbus_channel_message_header header;
610 u32 child_relid;
611 u32 gpadl;
612 u16 range_buflen;
613 u16 rangecount;
db5871e8 614 struct gpa_range range[];
b56dda06
S
615} __packed;
616
617/* This is the followup packet that contains more PFNs. */
618struct vmbus_channel_gpadl_body {
619 struct vmbus_channel_message_header header;
620 u32 msgnumber;
621 u32 gpadl;
db5871e8 622 u64 pfn[];
b56dda06
S
623} __packed;
624
625struct vmbus_channel_gpadl_created {
626 struct vmbus_channel_message_header header;
627 u32 child_relid;
628 u32 gpadl;
629 u32 creation_status;
630} __packed;
631
632struct vmbus_channel_gpadl_teardown {
633 struct vmbus_channel_message_header header;
634 u32 child_relid;
635 u32 gpadl;
636} __packed;
637
638struct vmbus_channel_gpadl_torndown {
639 struct vmbus_channel_message_header header;
640 u32 gpadl;
641} __packed;
642
b56dda06
S
643struct vmbus_channel_relid_released {
644 struct vmbus_channel_message_header header;
645 u32 child_relid;
646} __packed;
647
648struct vmbus_channel_initiate_contact {
649 struct vmbus_channel_message_header header;
650 u32 vmbus_version_requested;
e28bab48 651 u32 target_vcpu; /* The VCPU the host should respond to */
ae20b254
DC
652 union {
653 u64 interrupt_page;
654 struct {
655 u8 msg_sint;
656 u8 padding1[3];
657 u32 padding2;
658 };
659 };
b56dda06
S
660 u64 monitor_page1;
661 u64 monitor_page2;
662} __packed;
663
5c23a1a5
DC
664/* Hyper-V socket: guest's connect()-ing to host */
665struct vmbus_channel_tl_connect_request {
666 struct vmbus_channel_message_header header;
593db803
AS
667 guid_t guest_endpoint_id;
668 guid_t host_service_id;
5c23a1a5
DC
669} __packed;
670
75278105
APM
671/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
672struct vmbus_channel_modifychannel {
673 struct vmbus_channel_message_header header;
674 u32 child_relid;
675 u32 target_vp;
676} __packed;
677
b56dda06
S
678struct vmbus_channel_version_response {
679 struct vmbus_channel_message_header header;
1508d811 680 u8 version_supported;
ae20b254
DC
681
682 u8 connection_state;
683 u16 padding;
684
685 /*
686 * On new hosts that support VMBus protocol 5.0, we must use
687 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
688 * and for subsequent messages, we must use the Message Connection ID
689 * field in the host-returned Version Response Message.
690 *
691 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
692 */
693 u32 msg_conn_id;
b56dda06
S
694} __packed;
695
696enum vmbus_channel_state {
697 CHANNEL_OFFER_STATE,
698 CHANNEL_OPENING_STATE,
699 CHANNEL_OPEN_STATE,
e68d2971 700 CHANNEL_OPENED_STATE,
b56dda06
S
701};
702
b56dda06
S
703/*
704 * Represents each channel msg on the vmbus connection This is a
705 * variable-size data structure depending on the msg type itself
706 */
707struct vmbus_channel_msginfo {
708 /* Bookkeeping stuff */
709 struct list_head msglistentry;
710
711 /* So far, this is only used to handle gpadl body message */
712 struct list_head submsglist;
713
714 /* Synchronize the request/response if needed */
715 struct completion waitevent;
ccb61f8a 716 struct vmbus_channel *waiting_channel;
b56dda06
S
717 union {
718 struct vmbus_channel_version_supported version_supported;
719 struct vmbus_channel_open_result open_result;
720 struct vmbus_channel_gpadl_torndown gpadl_torndown;
721 struct vmbus_channel_gpadl_created gpadl_created;
722 struct vmbus_channel_version_response version_response;
723 } response;
724
725 u32 msgsize;
726 /*
727 * The channel message that goes out on the "wire".
728 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
729 */
db5871e8 730 unsigned char msg[];
b56dda06
S
731};
732
f9f1db83
S
733struct vmbus_close_msg {
734 struct vmbus_channel_msginfo info;
735 struct vmbus_channel_close_channel msg;
736};
737
b3bf60c7
S
738/* Define connection identifier type. */
739union hv_connection_id {
740 u32 asu32;
741 struct {
742 u32 id:24;
743 u32 reserved:8;
744 } u;
745};
746
7047f17d
S
747enum vmbus_device_type {
748 HV_IDE = 0,
749 HV_SCSI,
750 HV_FC,
751 HV_NIC,
752 HV_ND,
753 HV_PCIE,
754 HV_FB,
755 HV_KBD,
756 HV_MOUSE,
757 HV_KVP,
758 HV_TS,
759 HV_HB,
760 HV_SHUTDOWN,
761 HV_FCOPY,
762 HV_BACKUP,
763 HV_DM,
f45be72c 764 HV_UNKNOWN,
7047f17d
S
765};
766
e8b7db38
AB
767/*
768 * Provides request ids for VMBus. Encapsulates guest memory
769 * addresses and stores the next available slot in req_arr
770 * to generate new ids in constant time.
771 */
772struct vmbus_requestor {
773 u64 *req_arr;
774 unsigned long *req_bitmap; /* is a given slot available? */
775 u32 size;
776 u64 next_request_id;
777 spinlock_t req_lock; /* provides atomicity */
778};
779
780#define VMBUS_NO_RQSTOR U64_MAX
781#define VMBUS_RQST_ERROR (U64_MAX - 1)
4d18fcc9 782#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
e8b7db38 783
7047f17d
S
784struct vmbus_device {
785 u16 dev_type;
593db803 786 guid_t guid;
7047f17d
S
787 bool perf_device;
788};
789
7d7c75cd
S
790struct vmbus_channel {
791 struct list_head listentry;
792
793 struct hv_device *device_obj;
794
7d7c75cd 795 enum vmbus_channel_state state;
7d7c75cd
S
796
797 struct vmbus_channel_offer_channel offermsg;
798 /*
799 * These are based on the OfferMsg.MonitorId.
800 * Save it here for easy access.
801 */
802 u8 monitor_grp;
803 u8 monitor_bit;
804
c3582a2c 805 bool rescind; /* got rescind msg */
7fa32e5e 806 struct completion rescind_event;
c3582a2c 807
7d7c75cd
S
808 u32 ringbuffer_gpadlhandle;
809
810 /* Allocated memory for ring buffer */
52a42c2a 811 struct page *ringbuffer_page;
7d7c75cd 812 u32 ringbuffer_pagecount;
ae6935ed 813 u32 ringbuffer_send_offset;
7d7c75cd
S
814 struct hv_ring_buffer_info outbound; /* send to parent */
815 struct hv_ring_buffer_info inbound; /* receive from parent */
7d7c75cd 816
f9f1db83
S
817 struct vmbus_close_msg close_msg;
818
6981fbf3
SH
819 /* Statistics */
820 u64 interrupts; /* Host to Guest interrupts */
821 u64 sig_events; /* Guest to Host events */
822
396ae57e
KB
823 /*
824 * Guest to host interrupts caused by the outbound ring buffer changing
825 * from empty to not empty.
826 */
827 u64 intr_out_empty;
828
829 /*
830 * Indicates that a full outbound ring buffer was encountered. The flag
831 * is set to true when a full outbound ring buffer is encountered and
832 * set to false when a write to the outbound ring buffer is completed.
833 */
834 bool out_full_flag;
835
51c6ce2a 836 /* Channel callback's invoked in softirq context */
631e63a9 837 struct tasklet_struct callback_event;
7d7c75cd
S
838 void (*onchannel_callback)(void *context);
839 void *channel_callback_context;
132368bd 840
7769e18c
APM
841 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
842 u32 old, u32 new);
843
9403b66e
APM
844 /*
845 * Synchronize channel scheduling and channel removal; see the inline
846 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
847 */
848 spinlock_t sched_lock;
849
132368bd 850 /*
b71e3282
SH
851 * A channel can be marked for one of three modes of reading:
852 * BATCHED - callback called from taslket and should read
853 * channel until empty. Interrupts from the host
854 * are masked while read is in process (default).
855 * DIRECT - callback called from tasklet (softirq).
856 * ISR - callback called in interrupt context and must
857 * invoke its own deferred processing.
858 * Host interrupts are disabled and must be re-enabled
859 * when ring is empty.
132368bd 860 */
b71e3282
SH
861 enum hv_callback_mode {
862 HV_CALL_BATCHED,
863 HV_CALL_DIRECT,
864 HV_CALL_ISR
865 } callback_mode;
b3bf60c7
S
866
867 bool is_dedicated_interrupt;
05784171 868 u64 sig_event;
abbf3b2a
S
869
870 /*
5bf74682
APM
871 * Starting with win8, this field will be used to specify the
872 * target CPU on which to deliver the interrupt for the host
873 * to guest communication.
874 *
875 * Prior to win8, incoming channel interrupts would only be
876 * delivered on CPU 0. Setting this value to 0 would preserve
877 * the earlier behavior.
abbf3b2a 878 */
d3ba720d 879 u32 target_cpu;
e68d2971
S
880 /*
881 * Support for sub-channels. For high performance devices,
882 * it will be useful to have multiple sub-channels to support
883 * a scalable communication infrastructure with the host.
884 * The support for sub-channels is implemented as an extention
885 * to the current infrastructure.
886 * The initial offer is considered the primary channel and this
887 * offer message will indicate if the host supports sub-channels.
888 * The guest is free to ask for sub-channels to be offerred and can
889 * open these sub-channels as a normal "primary" channel. However,
890 * all sub-channels will have the same type and instance guids as the
891 * primary channel. Requests sent on a given channel will result in a
892 * response on the same channel.
893 */
894
895 /*
896 * Sub-channel creation callback. This callback will be called in
897 * process context when a sub-channel offer is received from the host.
898 * The guest can open the sub-channel in the context of this callback.
899 */
900 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
901
499e8401
DC
902 /*
903 * Channel rescind callback. Some channels (the hvsock ones), need to
904 * register a callback which is invoked in vmbus_onoffer_rescind().
905 */
906 void (*chn_rescind_callback)(struct vmbus_channel *channel);
907
e68d2971
S
908 /*
909 * All Sub-channels of a primary channel are linked here.
910 */
911 struct list_head sc_list;
912 /*
913 * The primary channel this sub-channel belongs to.
914 * This will be NULL for the primary channel.
915 */
916 struct vmbus_channel *primary_channel;
8a7206a8
S
917 /*
918 * Support per-channel state for use by vmbus drivers.
919 */
920 void *per_channel_state;
8200f208
SH
921
922 /*
923 * Defer freeing channel until after all cpu's have
924 * gone through grace period.
925 */
926 struct rcu_head rcu;
927
c2e5df61
SH
928 /*
929 * For sysfs per-channel properties.
930 */
931 struct kobject kobj;
932
3724287c
S
933 /*
934 * For performance critical channels (storage, networking
935 * etc,), Hyper-V has a mechanism to enhance the throughput
936 * at the expense of latency:
937 * When the host is to be signaled, we just set a bit in a shared page
938 * and this bit will be inspected by the hypervisor within a certain
939 * window and if the bit is set, the host will be signaled. The window
940 * of time is the monitor latency - currently around 100 usecs. This
941 * mechanism improves throughput by:
942 *
943 * A) Making the host more efficient - each time it wakes up,
944 * potentially it will process morev number of packets. The
945 * monitor latency allows a batch to build up.
946 * B) By deferring the hypercall to signal, we will also minimize
947 * the interrupts.
948 *
949 * Clearly, these optimizations improve throughput at the expense of
950 * latency. Furthermore, since the channel is shared for both
951 * control and data messages, control messages currently suffer
952 * unnecessary latency adversley impacting performance and boot
953 * time. To fix this issue, permit tagging the channel as being
954 * in "low latency" mode. In this mode, we will bypass the monitor
955 * mechanism.
956 */
957 bool low_latency;
fe760e4d 958
6f3d791f
S
959 bool probe_done;
960
afaa33da
APM
961 /*
962 * Cache the device ID here for easy access; this is useful, in
963 * particular, in situations where the channel's device_obj has
964 * not been allocated/initialized yet.
965 */
966 u16 device_id;
967
37c2578c
DC
968 /*
969 * We must offload the handling of the primary/sub channels
970 * from the single-threaded vmbus_connection.work_queue to
971 * two different workqueue, otherwise we can block
972 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
973 */
974 struct work_struct add_channel_work;
396ae57e
KB
975
976 /*
977 * Guest to host interrupts caused by the inbound ring buffer changing
978 * from full to not full while a packet is waiting.
979 */
980 u64 intr_in_full;
981
982 /*
983 * The total number of write operations that encountered a full
984 * outbound ring buffer.
985 */
986 u64 out_full_total;
987
988 /*
989 * The number of write operations that were the first to encounter a
990 * full outbound ring buffer.
991 */
992 u64 out_full_first;
af9ca6f9
BB
993
994 /* enabling/disabling fuzz testing on the channel (default is false)*/
995 bool fuzz_testing_state;
996
997 /*
998 * Interrupt delay will delay the guest from emptying the ring buffer
999 * for a specific amount of time. The delay is in microseconds and will
1000 * be between 1 to a maximum of 1000, its default is 0 (no delay).
1001 * The Message delay will delay guest reading on a per message basis
1002 * in microseconds between 1 to 1000 with the default being 0
1003 * (no delay).
1004 */
1005 u32 fuzz_testing_interrupt_delay;
1006 u32 fuzz_testing_message_delay;
1007
e8b7db38
AB
1008 /* request/transaction ids for VMBus */
1009 struct vmbus_requestor requestor;
1010 u32 rqstor_size;
7d7c75cd 1011};
b56dda06 1012
e8b7db38
AB
1013u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr);
1014u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id);
1015
e8d6ca02
DC
1016static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1017{
1018 return !!(c->offermsg.offer.chn_flags &
1019 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1020}
1021
ed56ef67
DC
1022static inline bool is_sub_channel(const struct vmbus_channel *c)
1023{
1024 return c->offermsg.offer.sub_channel_index != 0;
1025}
1026
b71e3282
SH
1027static inline void set_channel_read_mode(struct vmbus_channel *c,
1028 enum hv_callback_mode mode)
132368bd 1029{
b71e3282 1030 c->callback_mode = mode;
132368bd
S
1031}
1032
8a7206a8
S
1033static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1034{
1035 c->per_channel_state = s;
1036}
1037
1038static inline void *get_per_channel_state(struct vmbus_channel *c)
1039{
1040 return c->per_channel_state;
1041}
1042
3c75354d
DC
1043static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1044 u32 size)
1045{
396ae57e
KB
1046 unsigned long flags;
1047
1048 if (size) {
1049 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1050 ++c->out_full_total;
1051
1052 if (!c->out_full_flag) {
1053 ++c->out_full_first;
1054 c->out_full_flag = true;
1055 }
1056 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1057 } else {
1058 c->out_full_flag = false;
1059 }
1060
3c75354d
DC
1061 c->outbound.ring_buffer->pending_send_sz = size;
1062}
1063
3724287c
S
1064static inline void set_low_latency_mode(struct vmbus_channel *c)
1065{
1066 c->low_latency = true;
1067}
1068
1069static inline void clear_low_latency_mode(struct vmbus_channel *c)
1070{
1071 c->low_latency = false;
1072}
1073
5cc41500 1074void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
b56dda06
S
1075
1076int vmbus_request_offers(void);
1077
e68d2971
S
1078/*
1079 * APIs for managing sub-channels.
1080 */
1081
1082void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1083 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1084
499e8401
DC
1085void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1086 void (*chn_rescind_cb)(struct vmbus_channel *));
1087
e68d2971
S
1088/*
1089 * Check if sub-channels have already been offerred. This API will be useful
1090 * when the driver is unloaded after establishing sub-channels. In this case,
1091 * when the driver is re-loaded, the driver would have to check if the
1092 * subchannels have already been established before attempting to request
1093 * the creation of sub-channels.
1094 * This function returns TRUE to indicate that subchannels have already been
1095 * created.
1096 * This function should be invoked after setting the callback function for
1097 * sub-channel creation.
1098 */
1099bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1100
c35470b2
S
1101/* The format must be the same as struct vmdata_gpa_direct */
1102struct vmbus_channel_packet_page_buffer {
1103 u16 type;
1104 u16 dataoffset8;
1105 u16 length8;
1106 u16 flags;
1107 u64 transactionid;
1108 u32 reserved;
1109 u32 rangecount;
1110 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1111} __packed;
1112
1113/* The format must be the same as struct vmdata_gpa_direct */
1114struct vmbus_channel_packet_multipage_buffer {
1115 u16 type;
1116 u16 dataoffset8;
1117 u16 length8;
1118 u16 flags;
1119 u64 transactionid;
1120 u32 reserved;
1121 u32 rangecount; /* Always 1 in this case */
1122 struct hv_multipage_buffer range;
1123} __packed;
1124
d61031ee
S
1125/* The format must be the same as struct vmdata_gpa_direct */
1126struct vmbus_packet_mpb_array {
1127 u16 type;
1128 u16 dataoffset8;
1129 u16 length8;
1130 u16 flags;
1131 u64 transactionid;
1132 u32 reserved;
1133 u32 rangecount; /* Always 1 in this case */
1134 struct hv_mpb_array range;
1135} __packed;
1136
ae6935ed
SH
1137int vmbus_alloc_ring(struct vmbus_channel *channel,
1138 u32 send_size, u32 recv_size);
1139void vmbus_free_ring(struct vmbus_channel *channel);
1140
1141int vmbus_connect_ring(struct vmbus_channel *channel,
1142 void (*onchannel_callback)(void *context),
1143 void *context);
1144int vmbus_disconnect_ring(struct vmbus_channel *channel);
c35470b2
S
1145
1146extern int vmbus_open(struct vmbus_channel *channel,
1147 u32 send_ringbuffersize,
1148 u32 recv_ringbuffersize,
1149 void *userdata,
1150 u32 userdatalen,
2a9d7de2 1151 void (*onchannel_callback)(void *context),
c35470b2
S
1152 void *context);
1153
1154extern void vmbus_close(struct vmbus_channel *channel);
1155
1156extern int vmbus_sendpacket(struct vmbus_channel *channel,
011a7c3c 1157 void *buffer,
c35470b2
S
1158 u32 bufferLen,
1159 u64 requestid,
1160 enum vmbus_packet_type type,
1161 u32 flags);
1162
1163extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1164 struct hv_page_buffer pagebuffers[],
1165 u32 pagecount,
1166 void *buffer,
1167 u32 bufferlen,
1168 u64 requestid);
1169
d61031ee
S
1170extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1171 struct vmbus_packet_mpb_array *mpb,
1172 u32 desc_size,
1173 void *buffer,
1174 u32 bufferlen,
1175 u64 requestid);
1176
c35470b2
S
1177extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1178 void *kbuffer,
1179 u32 size,
1180 u32 *gpadl_handle);
1181
1182extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1183 u32 gpadl_handle);
1184
d3b26dd7
DC
1185void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1186
c35470b2
S
1187extern int vmbus_recvpacket(struct vmbus_channel *channel,
1188 void *buffer,
1189 u32 bufferlen,
1190 u32 *buffer_actual_len,
1191 u64 *requestid);
1192
1193extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1194 void *buffer,
1195 u32 bufferlen,
1196 u32 *buffer_actual_len,
1197 u64 *requestid);
1198
c35470b2 1199
c35470b2
S
1200extern void vmbus_ontimer(unsigned long data);
1201
35ea09c3
S
1202/* Base driver object */
1203struct hv_driver {
1204 const char *name;
1205
8981da32
DC
1206 /*
1207 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1208 * channel flag, actually doesn't mean a synthetic device because the
1209 * offer's if_type/if_instance can change for every new hvsock
1210 * connection.
1211 *
1212 * However, to facilitate the notification of new-offer/rescind-offer
1213 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1214 * a special vmbus device, and hence we need the below flag to
1215 * indicate if the driver is the hvsock driver or not: we need to
1216 * specially treat the hvosck offer & driver in vmbus_match().
1217 */
1218 bool hvsock;
1219
35ea09c3 1220 /* the device type supported by this driver */
593db803 1221 guid_t dev_type;
2e2c1d17 1222 const struct hv_vmbus_device_id *id_table;
35ea09c3
S
1223
1224 struct device_driver driver;
1225
fc76936d
SH
1226 /* dynamic device GUID's */
1227 struct {
1228 spinlock_t lock;
1229 struct list_head list;
1230 } dynids;
1231
84946899 1232 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
35ea09c3
S
1233 int (*remove)(struct hv_device *);
1234 void (*shutdown)(struct hv_device *);
1235
271b2224
DC
1236 int (*suspend)(struct hv_device *);
1237 int (*resume)(struct hv_device *);
35ea09c3
S
1238
1239};
1240
1241/* Base device object */
1242struct hv_device {
1243 /* the device type id of this device */
593db803 1244 guid_t dev_type;
35ea09c3
S
1245
1246 /* the device instance id of this device */
593db803 1247 guid_t dev_instance;
7047f17d
S
1248 u16 vendor_id;
1249 u16 device_id;
35ea09c3
S
1250
1251 struct device device;
d765edbb 1252 char *driver_override; /* Driver name to force a match */
35ea09c3
S
1253
1254 struct vmbus_channel *channel;
c2e5df61 1255 struct kset *channels_kset;
af9ca6f9
BB
1256
1257 /* place holder to keep track of the dir for hv device in debugfs */
1258 struct dentry *debug_dir;
1259
35ea09c3
S
1260};
1261
27b5b3ca
S
1262
1263static inline struct hv_device *device_to_hv_device(struct device *d)
1264{
1265 return container_of(d, struct hv_device, device);
1266}
1267
1268static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1269{
1270 return container_of(d, struct hv_driver, driver);
1271}
1272
ab101e86
S
1273static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1274{
1275 dev_set_drvdata(&dev->device, data);
1276}
1277
1278static inline void *hv_get_drvdata(struct hv_device *dev)
1279{
1280 return dev_get_drvdata(&dev->device);
1281}
27b5b3ca 1282
4827ee1d
SH
1283struct hv_ring_buffer_debug_info {
1284 u32 current_interrupt_mask;
1285 u32 current_read_index;
1286 u32 current_write_index;
1287 u32 bytes_avail_toread;
1288 u32 bytes_avail_towrite;
1289};
1290
ba50bf1c 1291
14948e39 1292int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
ba50bf1c 1293 struct hv_ring_buffer_debug_info *debug_info);
4827ee1d 1294
27b5b3ca 1295/* Vmbus interface */
768fa219
GKH
1296#define vmbus_driver_register(driver) \
1297 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1298int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1299 struct module *owner,
1300 const char *mod_name);
1301void vmbus_driver_unregister(struct hv_driver *hv_driver);
27b5b3ca 1302
85d9aa70
DC
1303void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1304
35464483
JO
1305int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1306 resource_size_t min, resource_size_t max,
1307 resource_size_t size, resource_size_t align,
1308 bool fb_overlap_ok);
97fb77dc 1309void vmbus_free_mmio(resource_size_t start, resource_size_t size);
619848bd 1310
7fb96565
S
1311/*
1312 * GUID definitions of various offer types - services offered to the guest.
1313 */
1314
1315/*
1316 * Network GUID
1317 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1318 */
1319#define HV_NIC_GUID \
593db803
AS
1320 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1321 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
7fb96565
S
1322
1323/*
1324 * IDE GUID
1325 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1326 */
1327#define HV_IDE_GUID \
593db803
AS
1328 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1329 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
7fb96565
S
1330
1331/*
1332 * SCSI GUID
1333 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1334 */
1335#define HV_SCSI_GUID \
593db803
AS
1336 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1337 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
7fb96565
S
1338
1339/*
1340 * Shutdown GUID
1341 * {0e0b6031-5213-4934-818b-38d90ced39db}
1342 */
1343#define HV_SHUTDOWN_GUID \
593db803
AS
1344 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1345 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
7fb96565
S
1346
1347/*
1348 * Time Synch GUID
1349 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1350 */
1351#define HV_TS_GUID \
593db803
AS
1352 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1353 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
7fb96565
S
1354
1355/*
1356 * Heartbeat GUID
1357 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1358 */
1359#define HV_HEART_BEAT_GUID \
593db803
AS
1360 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1361 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
7fb96565
S
1362
1363/*
1364 * KVP GUID
1365 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1366 */
1367#define HV_KVP_GUID \
593db803
AS
1368 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1369 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
7fb96565
S
1370
1371/*
1372 * Dynamic memory GUID
1373 * {525074dc-8985-46e2-8057-a307dc18a502}
1374 */
1375#define HV_DM_GUID \
593db803
AS
1376 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1377 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
7fb96565
S
1378
1379/*
1380 * Mouse GUID
1381 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1382 */
1383#define HV_MOUSE_GUID \
593db803
AS
1384 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1385 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
7fb96565 1386
2048157a
DC
1387/*
1388 * Keyboard GUID
1389 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1390 */
1391#define HV_KBD_GUID \
593db803
AS
1392 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1393 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
2048157a 1394
96dd86fa
S
1395/*
1396 * VSS (Backup/Restore) GUID
1397 */
1398#define HV_VSS_GUID \
593db803
AS
1399 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1400 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
68a2d20b
HZ
1401/*
1402 * Synthetic Video GUID
1403 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1404 */
1405#define HV_SYNTHVID_GUID \
593db803
AS
1406 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1407 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
68a2d20b 1408
98b80d89
S
1409/*
1410 * Synthetic FC GUID
1411 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1412 */
1413#define HV_SYNTHFC_GUID \
593db803
AS
1414 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1415 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
98b80d89 1416
01325476
S
1417/*
1418 * Guest File Copy Service
1419 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1420 */
1421
1422#define HV_FCOPY_GUID \
593db803
AS
1423 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1424 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
01325476 1425
04653a00
S
1426/*
1427 * NetworkDirect. This is the guest RDMA service.
1428 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1429 */
1430#define HV_ND_GUID \
593db803
AS
1431 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1432 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
04653a00 1433
3053c762
JO
1434/*
1435 * PCI Express Pass Through
1436 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1437 */
1438
1439#define HV_PCIE_GUID \
593db803
AS
1440 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1441 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
3053c762 1442
0f98829a
DC
1443/*
1444 * Linux doesn't support the 3 devices: the first two are for
1445 * Automatic Virtual Machine Activation, and the third is for
1446 * Remote Desktop Virtualization.
1447 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1448 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1449 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1450 */
1451
1452#define HV_AVMA1_GUID \
593db803
AS
1453 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1454 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
0f98829a
DC
1455
1456#define HV_AVMA2_GUID \
593db803
AS
1457 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1458 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
0f98829a
DC
1459
1460#define HV_RDV_GUID \
593db803
AS
1461 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1462 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
0f98829a 1463
b189702d
S
1464/*
1465 * Common header for Hyper-V ICs
1466 */
1467
1468#define ICMSGTYPE_NEGOTIATE 0
1469#define ICMSGTYPE_HEARTBEAT 1
1470#define ICMSGTYPE_KVPEXCHANGE 2
1471#define ICMSGTYPE_SHUTDOWN 3
1472#define ICMSGTYPE_TIMESYNC 4
1473#define ICMSGTYPE_VSS 5
1474
1475#define ICMSGHDRFLAG_TRANSACTION 1
1476#define ICMSGHDRFLAG_REQUEST 2
1477#define ICMSGHDRFLAG_RESPONSE 4
1478
b189702d 1479
a29b643c
S
1480/*
1481 * While we want to handle util services as regular devices,
1482 * there is only one instance of each of these services; so
1483 * we statically allocate the service specific state.
1484 */
1485
1486struct hv_util_service {
1487 u8 *recv_buffer;
b9830d12 1488 void *channel;
a29b643c
S
1489 void (*util_cb)(void *);
1490 int (*util_init)(struct hv_util_service *);
1491 void (*util_deinit)(void);
54e19d34
DC
1492 int (*util_pre_suspend)(void);
1493 int (*util_pre_resume)(void);
a29b643c
S
1494};
1495
b189702d
S
1496struct vmbuspipe_hdr {
1497 u32 flags;
1498 u32 msgsize;
1499} __packed;
1500
1501struct ic_version {
1502 u16 major;
1503 u16 minor;
1504} __packed;
1505
1506struct icmsg_hdr {
1507 struct ic_version icverframe;
1508 u16 icmsgtype;
1509 struct ic_version icvermsg;
1510 u16 icmsgsize;
1511 u32 status;
1512 u8 ictransaction_id;
1513 u8 icflags;
1514 u8 reserved[2];
1515} __packed;
1516
1517struct icmsg_negotiate {
1518 u16 icframe_vercnt;
1519 u16 icmsg_vercnt;
1520 u32 reserved;
1521 struct ic_version icversion_data[1]; /* any size array */
1522} __packed;
1523
1524struct shutdown_msg_data {
1525 u32 reason_code;
1526 u32 timeout_seconds;
1527 u32 flags;
1528 u8 display_message[2048];
1529} __packed;
1530
1531struct heartbeat_msg_data {
1532 u64 seq_num;
1533 u32 reserved[8];
1534} __packed;
1535
1536/* Time Sync IC defs */
1537#define ICTIMESYNCFLAG_PROBE 0
1538#define ICTIMESYNCFLAG_SYNC 1
1539#define ICTIMESYNCFLAG_SAMPLE 2
1540
1541#ifdef __x86_64__
1542#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1543#else
1544#define WLTIMEDELTA 116444736000000000LL
1545#endif
1546
1547struct ictimesync_data {
1548 u64 parenttime;
1549 u64 childtime;
1550 u64 roundtriptime;
1551 u8 flags;
1552} __packed;
1553
8e1d2607
AN
1554struct ictimesync_ref_data {
1555 u64 parenttime;
1556 u64 vmreferencetime;
1557 u8 flags;
1558 char leapflags;
1559 char stratum;
1560 u8 reserved[3];
1561} __packed;
1562
b189702d
S
1563struct hyperv_service_callback {
1564 u8 msg_type;
1565 char *log_msg;
593db803 1566 guid_t data;
b189702d 1567 struct vmbus_channel *channel;
2a9d7de2 1568 void (*callback)(void *context);
b189702d
S
1569};
1570
c836d0ab 1571#define MAX_SRV_VER 0x7ffffff
a1656454
AN
1572extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1573 const int *fw_version, int fw_vercnt,
1574 const int *srv_version, int srv_vercnt,
1575 int *nego_fw_version, int *nego_srv_version);
b189702d 1576
800b9329 1577void hv_process_channel_removal(struct vmbus_channel *channel);
96dd86fa 1578
1f6ee4e7 1579void vmbus_setevent(struct vmbus_channel *channel);
37f7278b
S
1580/*
1581 * Negotiated version with the Host.
1582 */
1583
1584extern __u32 vmbus_proto_version;
1585
593db803
AS
1586int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1587 const guid_t *shv_host_servie_id);
75278105 1588int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
5cc47247 1589void vmbus_set_event(struct vmbus_channel *channel);
687f32e6
S
1590
1591/* Get the start of the ring buffer. */
1592static inline void *
e4165a0f 1593hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
687f32e6 1594{
e4165a0f 1595 return ring_info->ring_buffer->buffer;
687f32e6
S
1596}
1597
6e47dd3e
SH
1598/*
1599 * Mask off host interrupt callback notifications
1600 */
1601static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1602{
1603 rbi->ring_buffer->interrupt_mask = 1;
1604
1605 /* make sure mask update is not reordered */
1606 virt_mb();
1607}
1608
1609/*
1610 * Re-enable host callback and return number of outstanding bytes
1611 */
1612static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1613{
1614
1615 rbi->ring_buffer->interrupt_mask = 0;
1616
1617 /* make sure mask update is not reordered */
1618 virt_mb();
1619
1620 /*
1621 * Now check to see if the ring buffer is still empty.
1622 * If it is not, we raced and we need to process new
1623 * incoming messages.
1624 */
1625 return hv_get_bytes_to_read(rbi);
1626}
1627
ab028db4
S
1628/*
1629 * An API to support in-place processing of incoming VMBUS packets.
1630 */
ab028db4 1631
f3dd3f47 1632/* Get data payload associated with descriptor */
1633static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
ab028db4 1634{
f3dd3f47 1635 return (void *)((unsigned long)desc + (desc->offset8 << 3));
ab028db4
S
1636}
1637
f3dd3f47 1638/* Get data size associated with descriptor */
1639static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
ab028db4 1640{
f3dd3f47 1641 return (desc->len8 << 3) - (desc->offset8 << 3);
ab028db4
S
1642}
1643
f3dd3f47 1644
1645struct vmpacket_descriptor *
1646hv_pkt_iter_first(struct vmbus_channel *channel);
1647
1648struct vmpacket_descriptor *
1649__hv_pkt_iter_next(struct vmbus_channel *channel,
1650 const struct vmpacket_descriptor *pkt);
1651
1652void hv_pkt_iter_close(struct vmbus_channel *channel);
1653
ab028db4 1654/*
f3dd3f47 1655 * Get next packet descriptor from iterator
1656 * If at end of list, return NULL and update host.
ab028db4 1657 */
f3dd3f47 1658static inline struct vmpacket_descriptor *
1659hv_pkt_iter_next(struct vmbus_channel *channel,
1660 const struct vmpacket_descriptor *pkt)
ab028db4 1661{
f3dd3f47 1662 struct vmpacket_descriptor *nxt;
1663
1664 nxt = __hv_pkt_iter_next(channel, pkt);
1665 if (!nxt)
1666 hv_pkt_iter_close(channel);
ab028db4 1667
f3dd3f47 1668 return nxt;
ab028db4
S
1669}
1670
f3dd3f47 1671#define foreach_vmbus_pkt(pkt, channel) \
1672 for (pkt = hv_pkt_iter_first(channel); pkt; \
1673 pkt = hv_pkt_iter_next(channel, pkt))
ab028db4 1674
e5d2f910 1675/*
348dd93e 1676 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
e5d2f910
DC
1677 * sends requests to read and write blocks. Each block must be 128 bytes or
1678 * smaller. Optionally, the VF driver can register a callback function which
1679 * will be invoked when the host says that one or more of the first 64 block
1680 * IDs is "invalid" which means that the VF driver should reread them.
1681 */
1682#define HV_CONFIG_BLOCK_SIZE_MAX 128
348dd93e
HZ
1683
1684int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1685 unsigned int block_id, unsigned int *bytes_returned);
1686int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1687 unsigned int block_id);
1688int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1689 void (*block_invalidate)(void *context,
1690 u64 block_mask));
1691
1692struct hyperv_pci_block_ops {
1693 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1694 unsigned int block_id, unsigned int *bytes_returned);
1695 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1696 unsigned int block_id);
1697 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1698 void (*block_invalidate)(void *context,
1699 u64 block_mask));
1700};
1701
1702extern struct hyperv_pci_block_ops hvpci_block_ops;
1703
bca6b91d
BF
1704static inline unsigned long virt_to_hvpfn(void *addr)
1705{
1706 phys_addr_t paddr;
1707
1708 if (is_vmalloc_addr(addr))
1709 paddr = page_to_phys(vmalloc_to_page(addr)) +
1710 offset_in_page(addr);
1711 else
1712 paddr = __pa(addr);
1713
1714 return paddr >> HV_HYP_PAGE_SHIFT;
1715}
1716
106dee08
BF
1717#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1718#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1719#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1720#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1721
3f335ea2 1722#endif /* _HYPERV_H */