1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright (c) 2011, Microsoft Corporation.
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
15 #include <uapi/linux/hyperv.h>
18 #include <linux/types.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/timer.h>
22 #include <linux/completion.h>
23 #include <linux/device.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/interrupt.h>
26 #include <linux/reciprocal_div.h>
27 #include <asm/hyperv-tlfs.h>
29 #define MAX_PAGE_BUFFER_COUNT 32
30 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
35 * Types for GPADL, decides is how GPADL header is created.
37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38 * same as HV_HYP_PAGE_SIZE.
40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43 * HV_HYP_PAGE will be different between different types of GPADL, for example
44 * if PAGE_SIZE is 64K:
48 * gva: |-- 64k --|-- 64k --| ... |
49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50 * index: 0 1 2 15 16 17 18 .. 31 32 ...
51 * | | ... | | | ... | ...
53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
58 * | header | data | header | data |
59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
64 * | / / ... / ... | / ... /
68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
76 /* Single-page buffer */
77 struct hv_page_buffer
{
83 /* Multiple-page buffer */
84 struct hv_multipage_buffer
{
85 /* Length and Offset determines the # of pfns in the array */
88 u64 pfn_array
[MAX_MULTIPAGE_BUFFER_COUNT
];
92 * Multiple-page buffer array; the pfn array is variable size:
93 * The number of entries in the PFN array is determined by
97 /* Length and Offset determines the # of pfns in the array */
103 /* 0x18 includes the proprietary packet header */
104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
113 struct hv_ring_buffer
{
114 /* Offset in bytes from the start of ring data below */
117 /* Offset in bytes from the start of ring data below */
123 * WS2012/Win8 and later versions of Hyper-V implement interrupt
124 * driven flow management. The feature bit feat_pending_send_sz
125 * is set by the host on the host->guest ring buffer, and by the
126 * guest on the guest->host ring buffer.
128 * The meaning of the feature bit is a bit complex in that it has
129 * semantics that apply to both ring buffers. If the guest sets
130 * the feature bit in the guest->host ring buffer, the guest is
131 * telling the host that:
132 * 1) It will set the pending_send_sz field in the guest->host ring
133 * buffer when it is waiting for space to become available, and
134 * 2) It will read the pending_send_sz field in the host->guest
135 * ring buffer and interrupt the host when it frees enough space
137 * Similarly, if the host sets the feature bit in the host->guest
138 * ring buffer, the host is telling the guest that:
139 * 1) It will set the pending_send_sz field in the host->guest ring
140 * buffer when it is waiting for space to become available, and
141 * 2) It will read the pending_send_sz field in the guest->host
142 * ring buffer and interrupt the guest when it frees enough space
144 * If either the guest or host does not set the feature bit that it
145 * owns, that guest or host must do polling if it encounters a full
146 * ring buffer, and not signal the other end with an interrupt.
152 u32 feat_pending_send_sz
:1;
157 /* Pad it to PAGE_SIZE so that data starts on page boundary */
158 u8 reserved2
[PAGE_SIZE
- 68];
161 * Ring data starts here + RingDataStartOffset
162 * !!! DO NOT place any fields below this !!!
167 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
168 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
171 struct hv_ring_buffer_info
{
172 struct hv_ring_buffer
*ring_buffer
;
173 u32 ring_size
; /* Include the shared header */
174 struct reciprocal_value ring_size_div10_reciprocal
;
175 spinlock_t ring_lock
;
177 u32 ring_datasize
; /* < ring_size */
180 * The ring buffer mutex lock. This lock prevents the ring buffer from
181 * being freed while the ring buffer is being accessed.
183 struct mutex ring_buffer_mutex
;
187 static inline u32
hv_get_bytes_to_read(const struct hv_ring_buffer_info
*rbi
)
189 u32 read_loc
, write_loc
, dsize
, read
;
191 dsize
= rbi
->ring_datasize
;
192 read_loc
= rbi
->ring_buffer
->read_index
;
193 write_loc
= READ_ONCE(rbi
->ring_buffer
->write_index
);
195 read
= write_loc
>= read_loc
? (write_loc
- read_loc
) :
196 (dsize
- read_loc
) + write_loc
;
201 static inline u32
hv_get_bytes_to_write(const struct hv_ring_buffer_info
*rbi
)
203 u32 read_loc
, write_loc
, dsize
, write
;
205 dsize
= rbi
->ring_datasize
;
206 read_loc
= READ_ONCE(rbi
->ring_buffer
->read_index
);
207 write_loc
= rbi
->ring_buffer
->write_index
;
209 write
= write_loc
>= read_loc
? dsize
- (write_loc
- read_loc
) :
210 read_loc
- write_loc
;
214 static inline u32
hv_get_avail_to_write_percent(
215 const struct hv_ring_buffer_info
*rbi
)
217 u32 avail_write
= hv_get_bytes_to_write(rbi
);
219 return reciprocal_divide(
220 (avail_write
<< 3) + (avail_write
<< 1),
221 rbi
->ring_size_div10_reciprocal
);
225 * VMBUS version is 32 bit entity broken up into
226 * two 16 bit quantities: major_number. minor_number.
228 * 0 . 13 (Windows Server 2008)
231 * 3 . 0 (Windows 8 R2)
233 * 4 . 1 (Windows 10 RS3)
234 * 5 . 0 (Newer Windows 10)
235 * 5 . 1 (Windows 10 RS4)
236 * 5 . 2 (Windows Server 2019, RS5)
237 * 5 . 3 (Windows Server 2022)
240 #define VERSION_WS2008 ((0 << 16) | (13))
241 #define VERSION_WIN7 ((1 << 16) | (1))
242 #define VERSION_WIN8 ((2 << 16) | (4))
243 #define VERSION_WIN8_1 ((3 << 16) | (0))
244 #define VERSION_WIN10 ((4 << 16) | (0))
245 #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
246 #define VERSION_WIN10_V5 ((5 << 16) | (0))
247 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
248 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
249 #define VERSION_WIN10_V5_3 ((5 << 16) | (3))
251 /* Make maximum size of pipe payload of 16K */
252 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
254 /* Define PipeMode values. */
255 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
256 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
258 /* The size of the user defined data buffer for non-pipe offers. */
259 #define MAX_USER_DEFINED_BYTES 120
261 /* The size of the user defined data buffer for pipe offers. */
262 #define MAX_PIPE_USER_DEFINED_BYTES 116
265 * At the center of the Channel Management library is the Channel Offer. This
266 * struct contains the fundamental information about an offer.
268 struct vmbus_channel_offer
{
273 * These two fields are not currently used.
279 u16 mmio_megabytes
; /* in bytes * 1024 * 1024 */
282 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
284 unsigned char user_def
[MAX_USER_DEFINED_BYTES
];
289 * The following structure is an integrated pipe protocol, which
290 * is implemented on top of standard user-defined data. Pipe
291 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
296 unsigned char user_def
[MAX_PIPE_USER_DEFINED_BYTES
];
300 * The sub_channel_index is defined in Win8: a value of zero means a
301 * primary channel and a value of non-zero means a sub-channel.
303 * Before Win8, the field is reserved, meaning it's always zero.
305 u16 sub_channel_index
;
310 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
311 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
312 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
313 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
314 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
315 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
316 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
317 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
319 struct vmpacket_descriptor
{
327 struct vmpacket_header
{
328 u32 prev_pkt_start_offset
;
329 struct vmpacket_descriptor descriptor
;
332 struct vmtransfer_page_range
{
337 struct vmtransfer_page_packet_header
{
338 struct vmpacket_descriptor d
;
343 struct vmtransfer_page_range ranges
[1];
346 struct vmgpadl_packet_header
{
347 struct vmpacket_descriptor d
;
352 struct vmadd_remove_transfer_page_set
{
353 struct vmpacket_descriptor d
;
360 * This structure defines a range in guest physical space that can be made to
361 * look virtually contiguous.
370 * This is the format for an Establish Gpadl packet, which contains a handle by
371 * which this GPADL will be known and a set of GPA ranges associated with it.
372 * This can be converted to a MDL by the guest OS. If there are multiple GPA
373 * ranges, then the resulting MDL will be "chained," representing multiple VA
376 struct vmestablish_gpadl
{
377 struct vmpacket_descriptor d
;
380 struct gpa_range range
[1];
384 * This is the format for a Teardown Gpadl packet, which indicates that the
385 * GPADL handle in the Establish Gpadl packet will never be referenced again.
387 struct vmteardown_gpadl
{
388 struct vmpacket_descriptor d
;
390 u32 reserved
; /* for alignment to a 8-byte boundary */
394 * This is the format for a GPA-Direct packet, which contains a set of GPA
395 * ranges, in addition to commands and/or data.
397 struct vmdata_gpa_direct
{
398 struct vmpacket_descriptor d
;
401 struct gpa_range range
[1];
404 /* This is the format for a Additional Data Packet. */
405 struct vmadditional_data
{
406 struct vmpacket_descriptor d
;
410 unsigned char data
[1];
413 union vmpacket_largest_possible_header
{
414 struct vmpacket_descriptor simple_hdr
;
415 struct vmtransfer_page_packet_header xfer_page_hdr
;
416 struct vmgpadl_packet_header gpadl_hdr
;
417 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr
;
418 struct vmestablish_gpadl establish_gpadl_hdr
;
419 struct vmteardown_gpadl teardown_gpadl_hdr
;
420 struct vmdata_gpa_direct data_gpa_direct_hdr
;
423 #define VMPACKET_DATA_START_ADDRESS(__packet) \
424 (void *)(((unsigned char *)__packet) + \
425 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
427 #define VMPACKET_DATA_LENGTH(__packet) \
428 ((((struct vmpacket_descriptor)__packet)->len8 - \
429 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
431 #define VMPACKET_TRANSFER_MODE(__packet) \
432 (((struct IMPACT)__packet)->type)
434 enum vmbus_packet_type
{
435 VM_PKT_INVALID
= 0x0,
437 VM_PKT_ADD_XFER_PAGESET
= 0x2,
438 VM_PKT_RM_XFER_PAGESET
= 0x3,
439 VM_PKT_ESTABLISH_GPADL
= 0x4,
440 VM_PKT_TEARDOWN_GPADL
= 0x5,
441 VM_PKT_DATA_INBAND
= 0x6,
442 VM_PKT_DATA_USING_XFER_PAGES
= 0x7,
443 VM_PKT_DATA_USING_GPADL
= 0x8,
444 VM_PKT_DATA_USING_GPA_DIRECT
= 0x9,
445 VM_PKT_CANCEL_REQUEST
= 0xa,
447 VM_PKT_DATA_USING_ADDITIONAL_PKT
= 0xc,
448 VM_PKT_ADDITIONAL_DATA
= 0xd
451 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
454 /* Version 1 messages */
455 enum vmbus_channel_message_type
{
456 CHANNELMSG_INVALID
= 0,
457 CHANNELMSG_OFFERCHANNEL
= 1,
458 CHANNELMSG_RESCIND_CHANNELOFFER
= 2,
459 CHANNELMSG_REQUESTOFFERS
= 3,
460 CHANNELMSG_ALLOFFERS_DELIVERED
= 4,
461 CHANNELMSG_OPENCHANNEL
= 5,
462 CHANNELMSG_OPENCHANNEL_RESULT
= 6,
463 CHANNELMSG_CLOSECHANNEL
= 7,
464 CHANNELMSG_GPADL_HEADER
= 8,
465 CHANNELMSG_GPADL_BODY
= 9,
466 CHANNELMSG_GPADL_CREATED
= 10,
467 CHANNELMSG_GPADL_TEARDOWN
= 11,
468 CHANNELMSG_GPADL_TORNDOWN
= 12,
469 CHANNELMSG_RELID_RELEASED
= 13,
470 CHANNELMSG_INITIATE_CONTACT
= 14,
471 CHANNELMSG_VERSION_RESPONSE
= 15,
472 CHANNELMSG_UNLOAD
= 16,
473 CHANNELMSG_UNLOAD_RESPONSE
= 17,
477 CHANNELMSG_TL_CONNECT_REQUEST
= 21,
478 CHANNELMSG_MODIFYCHANNEL
= 22,
479 CHANNELMSG_TL_CONNECT_RESULT
= 23,
480 CHANNELMSG_MODIFYCHANNEL_RESPONSE
= 24,
484 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
485 #define INVALID_RELID U32_MAX
487 struct vmbus_channel_message_header
{
488 enum vmbus_channel_message_type msgtype
;
492 /* Query VMBus Version parameters */
493 struct vmbus_channel_query_vmbus_version
{
494 struct vmbus_channel_message_header header
;
498 /* VMBus Version Supported parameters */
499 struct vmbus_channel_version_supported
{
500 struct vmbus_channel_message_header header
;
501 u8 version_supported
;
504 /* Offer Channel parameters */
505 struct vmbus_channel_offer_channel
{
506 struct vmbus_channel_message_header header
;
507 struct vmbus_channel_offer offer
;
511 * win7 and beyond splits this field into a bit field.
513 u8 monitor_allocated
:1;
516 * These are new fields added in win7 and later.
517 * Do not access these fields without checking the
518 * negotiated protocol.
520 * If "is_dedicated_interrupt" is set, we must not set the
521 * associated bit in the channel bitmap while sending the
522 * interrupt to the host.
524 * connection_id is to be used in signaling the host.
526 u16 is_dedicated_interrupt
:1;
531 /* Rescind Offer parameters */
532 struct vmbus_channel_rescind_offer
{
533 struct vmbus_channel_message_header header
;
538 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info
*rbi
)
540 return rbi
->ring_buffer
->pending_send_sz
;
544 * Request Offer -- no parameters, SynIC message contains the partition ID
545 * Set Snoop -- no parameters, SynIC message contains the partition ID
546 * Clear Snoop -- no parameters, SynIC message contains the partition ID
547 * All Offers Delivered -- no parameters, SynIC message contains the partition
549 * Flush Client -- no parameters, SynIC message contains the partition ID
552 /* Open Channel parameters */
553 struct vmbus_channel_open_channel
{
554 struct vmbus_channel_message_header header
;
556 /* Identifies the specific VMBus channel that is being opened. */
559 /* ID making a particular open request at a channel offer unique. */
562 /* GPADL for the channel's ring buffer. */
563 u32 ringbuffer_gpadlhandle
;
566 * Starting with win8, this field will be used to specify
567 * the target virtual processor on which to deliver the interrupt for
568 * the host to guest communication.
569 * Prior to win8, incoming channel interrupts would only
570 * be delivered on cpu 0. Setting this value to 0 would
571 * preserve the earlier behavior.
576 * The upstream ring buffer begins at offset zero in the memory
577 * described by RingBufferGpadlHandle. The downstream ring buffer
578 * follows it at this offset (in pages).
580 u32 downstream_ringbuffer_pageoffset
;
582 /* User-specific data to be passed along to the server endpoint. */
583 unsigned char userdata
[MAX_USER_DEFINED_BYTES
];
586 /* Open Channel Result parameters */
587 struct vmbus_channel_open_result
{
588 struct vmbus_channel_message_header header
;
594 /* Modify Channel Result parameters */
595 struct vmbus_channel_modifychannel_response
{
596 struct vmbus_channel_message_header header
;
601 /* Close channel parameters; */
602 struct vmbus_channel_close_channel
{
603 struct vmbus_channel_message_header header
;
607 /* Channel Message GPADL */
608 #define GPADL_TYPE_RING_BUFFER 1
609 #define GPADL_TYPE_SERVER_SAVE_AREA 2
610 #define GPADL_TYPE_TRANSACTION 8
613 * The number of PFNs in a GPADL message is defined by the number of
614 * pages that would be spanned by ByteCount and ByteOffset. If the
615 * implied number of PFNs won't fit in this packet, there will be a
616 * follow-up packet that contains more.
618 struct vmbus_channel_gpadl_header
{
619 struct vmbus_channel_message_header header
;
624 struct gpa_range range
[];
627 /* This is the followup packet that contains more PFNs. */
628 struct vmbus_channel_gpadl_body
{
629 struct vmbus_channel_message_header header
;
635 struct vmbus_channel_gpadl_created
{
636 struct vmbus_channel_message_header header
;
642 struct vmbus_channel_gpadl_teardown
{
643 struct vmbus_channel_message_header header
;
648 struct vmbus_channel_gpadl_torndown
{
649 struct vmbus_channel_message_header header
;
653 struct vmbus_channel_relid_released
{
654 struct vmbus_channel_message_header header
;
658 struct vmbus_channel_initiate_contact
{
659 struct vmbus_channel_message_header header
;
660 u32 vmbus_version_requested
;
661 u32 target_vcpu
; /* The VCPU the host should respond to */
674 /* Hyper-V socket: guest's connect()-ing to host */
675 struct vmbus_channel_tl_connect_request
{
676 struct vmbus_channel_message_header header
;
677 guid_t guest_endpoint_id
;
678 guid_t host_service_id
;
681 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
682 struct vmbus_channel_modifychannel
{
683 struct vmbus_channel_message_header header
;
688 struct vmbus_channel_version_response
{
689 struct vmbus_channel_message_header header
;
690 u8 version_supported
;
696 * On new hosts that support VMBus protocol 5.0, we must use
697 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
698 * and for subsequent messages, we must use the Message Connection ID
699 * field in the host-returned Version Response Message.
701 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
706 enum vmbus_channel_state
{
708 CHANNEL_OPENING_STATE
,
710 CHANNEL_OPENED_STATE
,
714 * Represents each channel msg on the vmbus connection This is a
715 * variable-size data structure depending on the msg type itself
717 struct vmbus_channel_msginfo
{
718 /* Bookkeeping stuff */
719 struct list_head msglistentry
;
721 /* So far, this is only used to handle gpadl body message */
722 struct list_head submsglist
;
724 /* Synchronize the request/response if needed */
725 struct completion waitevent
;
726 struct vmbus_channel
*waiting_channel
;
728 struct vmbus_channel_version_supported version_supported
;
729 struct vmbus_channel_open_result open_result
;
730 struct vmbus_channel_gpadl_torndown gpadl_torndown
;
731 struct vmbus_channel_gpadl_created gpadl_created
;
732 struct vmbus_channel_version_response version_response
;
733 struct vmbus_channel_modifychannel_response modify_response
;
738 * The channel message that goes out on the "wire".
739 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
744 struct vmbus_close_msg
{
745 struct vmbus_channel_msginfo info
;
746 struct vmbus_channel_close_channel msg
;
749 /* Define connection identifier type. */
750 union hv_connection_id
{
758 enum vmbus_device_type
{
779 * Provides request ids for VMBus. Encapsulates guest memory
780 * addresses and stores the next available slot in req_arr
781 * to generate new ids in constant time.
783 struct vmbus_requestor
{
785 unsigned long *req_bitmap
; /* is a given slot available? */
788 spinlock_t req_lock
; /* provides atomicity */
791 #define VMBUS_NO_RQSTOR U64_MAX
792 #define VMBUS_RQST_ERROR (U64_MAX - 1)
793 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
795 struct vmbus_device
{
799 bool allowed_in_isolated
;
802 struct vmbus_channel
{
803 struct list_head listentry
;
805 struct hv_device
*device_obj
;
807 enum vmbus_channel_state state
;
809 struct vmbus_channel_offer_channel offermsg
;
811 * These are based on the OfferMsg.MonitorId.
812 * Save it here for easy access.
817 bool rescind
; /* got rescind msg */
818 bool rescind_ref
; /* got rescind msg, got channel reference */
819 struct completion rescind_event
;
821 u32 ringbuffer_gpadlhandle
;
823 /* Allocated memory for ring buffer */
824 struct page
*ringbuffer_page
;
825 u32 ringbuffer_pagecount
;
826 u32 ringbuffer_send_offset
;
827 struct hv_ring_buffer_info outbound
; /* send to parent */
828 struct hv_ring_buffer_info inbound
; /* receive from parent */
830 struct vmbus_close_msg close_msg
;
833 u64 interrupts
; /* Host to Guest interrupts */
834 u64 sig_events
; /* Guest to Host events */
837 * Guest to host interrupts caused by the outbound ring buffer changing
838 * from empty to not empty.
843 * Indicates that a full outbound ring buffer was encountered. The flag
844 * is set to true when a full outbound ring buffer is encountered and
845 * set to false when a write to the outbound ring buffer is completed.
849 /* Channel callback's invoked in softirq context */
850 struct tasklet_struct callback_event
;
851 void (*onchannel_callback
)(void *context
);
852 void *channel_callback_context
;
854 void (*change_target_cpu_callback
)(struct vmbus_channel
*channel
,
858 * Synchronize channel scheduling and channel removal; see the inline
859 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
861 spinlock_t sched_lock
;
864 * A channel can be marked for one of three modes of reading:
865 * BATCHED - callback called from taslket and should read
866 * channel until empty. Interrupts from the host
867 * are masked while read is in process (default).
868 * DIRECT - callback called from tasklet (softirq).
869 * ISR - callback called in interrupt context and must
870 * invoke its own deferred processing.
871 * Host interrupts are disabled and must be re-enabled
872 * when ring is empty.
874 enum hv_callback_mode
{
880 bool is_dedicated_interrupt
;
884 * Starting with win8, this field will be used to specify the
885 * target CPU on which to deliver the interrupt for the host
886 * to guest communication.
888 * Prior to win8, incoming channel interrupts would only be
889 * delivered on CPU 0. Setting this value to 0 would preserve
890 * the earlier behavior.
894 * Support for sub-channels. For high performance devices,
895 * it will be useful to have multiple sub-channels to support
896 * a scalable communication infrastructure with the host.
897 * The support for sub-channels is implemented as an extension
898 * to the current infrastructure.
899 * The initial offer is considered the primary channel and this
900 * offer message will indicate if the host supports sub-channels.
901 * The guest is free to ask for sub-channels to be offered and can
902 * open these sub-channels as a normal "primary" channel. However,
903 * all sub-channels will have the same type and instance guids as the
904 * primary channel. Requests sent on a given channel will result in a
905 * response on the same channel.
909 * Sub-channel creation callback. This callback will be called in
910 * process context when a sub-channel offer is received from the host.
911 * The guest can open the sub-channel in the context of this callback.
913 void (*sc_creation_callback
)(struct vmbus_channel
*new_sc
);
916 * Channel rescind callback. Some channels (the hvsock ones), need to
917 * register a callback which is invoked in vmbus_onoffer_rescind().
919 void (*chn_rescind_callback
)(struct vmbus_channel
*channel
);
922 * All Sub-channels of a primary channel are linked here.
924 struct list_head sc_list
;
926 * The primary channel this sub-channel belongs to.
927 * This will be NULL for the primary channel.
929 struct vmbus_channel
*primary_channel
;
931 * Support per-channel state for use by vmbus drivers.
933 void *per_channel_state
;
936 * Defer freeing channel until after all cpu's have
937 * gone through grace period.
942 * For sysfs per-channel properties.
947 * For performance critical channels (storage, networking
948 * etc,), Hyper-V has a mechanism to enhance the throughput
949 * at the expense of latency:
950 * When the host is to be signaled, we just set a bit in a shared page
951 * and this bit will be inspected by the hypervisor within a certain
952 * window and if the bit is set, the host will be signaled. The window
953 * of time is the monitor latency - currently around 100 usecs. This
954 * mechanism improves throughput by:
956 * A) Making the host more efficient - each time it wakes up,
957 * potentially it will process morev number of packets. The
958 * monitor latency allows a batch to build up.
959 * B) By deferring the hypercall to signal, we will also minimize
962 * Clearly, these optimizations improve throughput at the expense of
963 * latency. Furthermore, since the channel is shared for both
964 * control and data messages, control messages currently suffer
965 * unnecessary latency adversely impacting performance and boot
966 * time. To fix this issue, permit tagging the channel as being
967 * in "low latency" mode. In this mode, we will bypass the monitor
975 * Cache the device ID here for easy access; this is useful, in
976 * particular, in situations where the channel's device_obj has
977 * not been allocated/initialized yet.
982 * We must offload the handling of the primary/sub channels
983 * from the single-threaded vmbus_connection.work_queue to
984 * two different workqueue, otherwise we can block
985 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
987 struct work_struct add_channel_work
;
990 * Guest to host interrupts caused by the inbound ring buffer changing
991 * from full to not full while a packet is waiting.
996 * The total number of write operations that encountered a full
997 * outbound ring buffer.
1002 * The number of write operations that were the first to encounter a
1003 * full outbound ring buffer.
1007 /* enabling/disabling fuzz testing on the channel (default is false)*/
1008 bool fuzz_testing_state
;
1011 * Interrupt delay will delay the guest from emptying the ring buffer
1012 * for a specific amount of time. The delay is in microseconds and will
1013 * be between 1 to a maximum of 1000, its default is 0 (no delay).
1014 * The Message delay will delay guest reading on a per message basis
1015 * in microseconds between 1 to 1000 with the default being 0
1018 u32 fuzz_testing_interrupt_delay
;
1019 u32 fuzz_testing_message_delay
;
1021 /* request/transaction ids for VMBus */
1022 struct vmbus_requestor requestor
;
1026 u64
vmbus_next_request_id(struct vmbus_requestor
*rqstor
, u64 rqst_addr
);
1027 u64
vmbus_request_addr(struct vmbus_requestor
*rqstor
, u64 trans_id
);
1029 static inline bool is_hvsock_channel(const struct vmbus_channel
*c
)
1031 return !!(c
->offermsg
.offer
.chn_flags
&
1032 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
);
1035 static inline bool is_sub_channel(const struct vmbus_channel
*c
)
1037 return c
->offermsg
.offer
.sub_channel_index
!= 0;
1040 static inline void set_channel_read_mode(struct vmbus_channel
*c
,
1041 enum hv_callback_mode mode
)
1043 c
->callback_mode
= mode
;
1046 static inline void set_per_channel_state(struct vmbus_channel
*c
, void *s
)
1048 c
->per_channel_state
= s
;
1051 static inline void *get_per_channel_state(struct vmbus_channel
*c
)
1053 return c
->per_channel_state
;
1056 static inline void set_channel_pending_send_size(struct vmbus_channel
*c
,
1059 unsigned long flags
;
1062 spin_lock_irqsave(&c
->outbound
.ring_lock
, flags
);
1063 ++c
->out_full_total
;
1065 if (!c
->out_full_flag
) {
1066 ++c
->out_full_first
;
1067 c
->out_full_flag
= true;
1069 spin_unlock_irqrestore(&c
->outbound
.ring_lock
, flags
);
1071 c
->out_full_flag
= false;
1074 c
->outbound
.ring_buffer
->pending_send_sz
= size
;
1077 static inline void set_low_latency_mode(struct vmbus_channel
*c
)
1079 c
->low_latency
= true;
1082 static inline void clear_low_latency_mode(struct vmbus_channel
*c
)
1084 c
->low_latency
= false;
1087 void vmbus_onmessage(struct vmbus_channel_message_header
*hdr
);
1089 int vmbus_request_offers(void);
1092 * APIs for managing sub-channels.
1095 void vmbus_set_sc_create_callback(struct vmbus_channel
*primary_channel
,
1096 void (*sc_cr_cb
)(struct vmbus_channel
*new_sc
));
1098 void vmbus_set_chn_rescind_callback(struct vmbus_channel
*channel
,
1099 void (*chn_rescind_cb
)(struct vmbus_channel
*));
1102 * Check if sub-channels have already been offerred. This API will be useful
1103 * when the driver is unloaded after establishing sub-channels. In this case,
1104 * when the driver is re-loaded, the driver would have to check if the
1105 * subchannels have already been established before attempting to request
1106 * the creation of sub-channels.
1107 * This function returns TRUE to indicate that subchannels have already been
1109 * This function should be invoked after setting the callback function for
1110 * sub-channel creation.
1112 bool vmbus_are_subchannels_present(struct vmbus_channel
*primary
);
1114 /* The format must be the same as struct vmdata_gpa_direct */
1115 struct vmbus_channel_packet_page_buffer
{
1123 struct hv_page_buffer range
[MAX_PAGE_BUFFER_COUNT
];
1126 /* The format must be the same as struct vmdata_gpa_direct */
1127 struct vmbus_channel_packet_multipage_buffer
{
1134 u32 rangecount
; /* Always 1 in this case */
1135 struct hv_multipage_buffer range
;
1138 /* The format must be the same as struct vmdata_gpa_direct */
1139 struct vmbus_packet_mpb_array
{
1146 u32 rangecount
; /* Always 1 in this case */
1147 struct hv_mpb_array range
;
1150 int vmbus_alloc_ring(struct vmbus_channel
*channel
,
1151 u32 send_size
, u32 recv_size
);
1152 void vmbus_free_ring(struct vmbus_channel
*channel
);
1154 int vmbus_connect_ring(struct vmbus_channel
*channel
,
1155 void (*onchannel_callback
)(void *context
),
1157 int vmbus_disconnect_ring(struct vmbus_channel
*channel
);
1159 extern int vmbus_open(struct vmbus_channel
*channel
,
1160 u32 send_ringbuffersize
,
1161 u32 recv_ringbuffersize
,
1164 void (*onchannel_callback
)(void *context
),
1167 extern void vmbus_close(struct vmbus_channel
*channel
);
1169 extern int vmbus_sendpacket(struct vmbus_channel
*channel
,
1173 enum vmbus_packet_type type
,
1176 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel
*channel
,
1177 struct hv_page_buffer pagebuffers
[],
1183 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel
*channel
,
1184 struct vmbus_packet_mpb_array
*mpb
,
1190 extern int vmbus_establish_gpadl(struct vmbus_channel
*channel
,
1195 extern int vmbus_teardown_gpadl(struct vmbus_channel
*channel
,
1198 void vmbus_reset_channel_cb(struct vmbus_channel
*channel
);
1200 extern int vmbus_recvpacket(struct vmbus_channel
*channel
,
1203 u32
*buffer_actual_len
,
1206 extern int vmbus_recvpacket_raw(struct vmbus_channel
*channel
,
1209 u32
*buffer_actual_len
,
1213 extern void vmbus_ontimer(unsigned long data
);
1215 /* Base driver object */
1220 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1221 * channel flag, actually doesn't mean a synthetic device because the
1222 * offer's if_type/if_instance can change for every new hvsock
1225 * However, to facilitate the notification of new-offer/rescind-offer
1226 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1227 * a special vmbus device, and hence we need the below flag to
1228 * indicate if the driver is the hvsock driver or not: we need to
1229 * specially treat the hvosck offer & driver in vmbus_match().
1233 /* the device type supported by this driver */
1235 const struct hv_vmbus_device_id
*id_table
;
1237 struct device_driver driver
;
1239 /* dynamic device GUID's */
1242 struct list_head list
;
1245 int (*probe
)(struct hv_device
*, const struct hv_vmbus_device_id
*);
1246 int (*remove
)(struct hv_device
*);
1247 void (*shutdown
)(struct hv_device
*);
1249 int (*suspend
)(struct hv_device
*);
1250 int (*resume
)(struct hv_device
*);
1254 /* Base device object */
1256 /* the device type id of this device */
1259 /* the device instance id of this device */
1260 guid_t dev_instance
;
1264 struct device device
;
1265 char *driver_override
; /* Driver name to force a match */
1267 struct vmbus_channel
*channel
;
1268 struct kset
*channels_kset
;
1270 /* place holder to keep track of the dir for hv device in debugfs */
1271 struct dentry
*debug_dir
;
1276 static inline struct hv_device
*device_to_hv_device(struct device
*d
)
1278 return container_of(d
, struct hv_device
, device
);
1281 static inline struct hv_driver
*drv_to_hv_drv(struct device_driver
*d
)
1283 return container_of(d
, struct hv_driver
, driver
);
1286 static inline void hv_set_drvdata(struct hv_device
*dev
, void *data
)
1288 dev_set_drvdata(&dev
->device
, data
);
1291 static inline void *hv_get_drvdata(struct hv_device
*dev
)
1293 return dev_get_drvdata(&dev
->device
);
1296 struct hv_ring_buffer_debug_info
{
1297 u32 current_interrupt_mask
;
1298 u32 current_read_index
;
1299 u32 current_write_index
;
1300 u32 bytes_avail_toread
;
1301 u32 bytes_avail_towrite
;
1305 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
1306 struct hv_ring_buffer_debug_info
*debug_info
);
1308 /* Vmbus interface */
1309 #define vmbus_driver_register(driver) \
1310 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1311 int __must_check
__vmbus_driver_register(struct hv_driver
*hv_driver
,
1312 struct module
*owner
,
1313 const char *mod_name
);
1314 void vmbus_driver_unregister(struct hv_driver
*hv_driver
);
1316 void vmbus_hvsock_device_unregister(struct vmbus_channel
*channel
);
1318 int vmbus_allocate_mmio(struct resource
**new, struct hv_device
*device_obj
,
1319 resource_size_t min
, resource_size_t max
,
1320 resource_size_t size
, resource_size_t align
,
1321 bool fb_overlap_ok
);
1322 void vmbus_free_mmio(resource_size_t start
, resource_size_t size
);
1325 * GUID definitions of various offer types - services offered to the guest.
1330 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1332 #define HV_NIC_GUID \
1333 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1334 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1338 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1340 #define HV_IDE_GUID \
1341 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1342 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1346 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1348 #define HV_SCSI_GUID \
1349 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1350 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1354 * {0e0b6031-5213-4934-818b-38d90ced39db}
1356 #define HV_SHUTDOWN_GUID \
1357 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1358 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1362 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1364 #define HV_TS_GUID \
1365 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1366 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1370 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1372 #define HV_HEART_BEAT_GUID \
1373 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1374 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1378 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1380 #define HV_KVP_GUID \
1381 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1382 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1385 * Dynamic memory GUID
1386 * {525074dc-8985-46e2-8057-a307dc18a502}
1388 #define HV_DM_GUID \
1389 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1390 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1394 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1396 #define HV_MOUSE_GUID \
1397 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1398 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1402 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1404 #define HV_KBD_GUID \
1405 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1406 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1409 * VSS (Backup/Restore) GUID
1411 #define HV_VSS_GUID \
1412 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1413 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1415 * Synthetic Video GUID
1416 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1418 #define HV_SYNTHVID_GUID \
1419 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1420 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1424 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1426 #define HV_SYNTHFC_GUID \
1427 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1428 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1431 * Guest File Copy Service
1432 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1435 #define HV_FCOPY_GUID \
1436 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1437 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1440 * NetworkDirect. This is the guest RDMA service.
1441 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1443 #define HV_ND_GUID \
1444 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1445 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1448 * PCI Express Pass Through
1449 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1452 #define HV_PCIE_GUID \
1453 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1454 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1457 * Linux doesn't support the 3 devices: the first two are for
1458 * Automatic Virtual Machine Activation, and the third is for
1459 * Remote Desktop Virtualization.
1460 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1461 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1462 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1465 #define HV_AVMA1_GUID \
1466 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1467 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1469 #define HV_AVMA2_GUID \
1470 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1471 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1473 #define HV_RDV_GUID \
1474 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1475 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1478 * Common header for Hyper-V ICs
1481 #define ICMSGTYPE_NEGOTIATE 0
1482 #define ICMSGTYPE_HEARTBEAT 1
1483 #define ICMSGTYPE_KVPEXCHANGE 2
1484 #define ICMSGTYPE_SHUTDOWN 3
1485 #define ICMSGTYPE_TIMESYNC 4
1486 #define ICMSGTYPE_VSS 5
1487 #define ICMSGTYPE_FCOPY 7
1489 #define ICMSGHDRFLAG_TRANSACTION 1
1490 #define ICMSGHDRFLAG_REQUEST 2
1491 #define ICMSGHDRFLAG_RESPONSE 4
1495 * While we want to handle util services as regular devices,
1496 * there is only one instance of each of these services; so
1497 * we statically allocate the service specific state.
1500 struct hv_util_service
{
1503 void (*util_cb
)(void *);
1504 int (*util_init
)(struct hv_util_service
*);
1505 void (*util_deinit
)(void);
1506 int (*util_pre_suspend
)(void);
1507 int (*util_pre_resume
)(void);
1510 struct vmbuspipe_hdr
{
1521 struct ic_version icverframe
;
1523 struct ic_version icvermsg
;
1526 u8 ictransaction_id
;
1531 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1532 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1533 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1534 (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1535 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1537 struct icmsg_negotiate
{
1541 struct ic_version icversion_data
[]; /* any size array */
1544 struct shutdown_msg_data
{
1546 u32 timeout_seconds
;
1548 u8 display_message
[2048];
1551 struct heartbeat_msg_data
{
1556 /* Time Sync IC defs */
1557 #define ICTIMESYNCFLAG_PROBE 0
1558 #define ICTIMESYNCFLAG_SYNC 1
1559 #define ICTIMESYNCFLAG_SAMPLE 2
1562 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1564 #define WLTIMEDELTA 116444736000000000LL
1567 struct ictimesync_data
{
1574 struct ictimesync_ref_data
{
1576 u64 vmreferencetime
;
1583 struct hyperv_service_callback
{
1587 struct vmbus_channel
*channel
;
1588 void (*callback
)(void *context
);
1591 #define MAX_SRV_VER 0x7ffffff
1592 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr
*icmsghdrp
, u8
*buf
, u32 buflen
,
1593 const int *fw_version
, int fw_vercnt
,
1594 const int *srv_version
, int srv_vercnt
,
1595 int *nego_fw_version
, int *nego_srv_version
);
1597 void hv_process_channel_removal(struct vmbus_channel
*channel
);
1599 void vmbus_setevent(struct vmbus_channel
*channel
);
1601 * Negotiated version with the Host.
1604 extern __u32 vmbus_proto_version
;
1606 int vmbus_send_tl_connect_request(const guid_t
*shv_guest_servie_id
,
1607 const guid_t
*shv_host_servie_id
);
1608 int vmbus_send_modifychannel(struct vmbus_channel
*channel
, u32 target_vp
);
1609 void vmbus_set_event(struct vmbus_channel
*channel
);
1611 /* Get the start of the ring buffer. */
1612 static inline void *
1613 hv_get_ring_buffer(const struct hv_ring_buffer_info
*ring_info
)
1615 return ring_info
->ring_buffer
->buffer
;
1619 * Mask off host interrupt callback notifications
1621 static inline void hv_begin_read(struct hv_ring_buffer_info
*rbi
)
1623 rbi
->ring_buffer
->interrupt_mask
= 1;
1625 /* make sure mask update is not reordered */
1630 * Re-enable host callback and return number of outstanding bytes
1632 static inline u32
hv_end_read(struct hv_ring_buffer_info
*rbi
)
1635 rbi
->ring_buffer
->interrupt_mask
= 0;
1637 /* make sure mask update is not reordered */
1641 * Now check to see if the ring buffer is still empty.
1642 * If it is not, we raced and we need to process new
1643 * incoming messages.
1645 return hv_get_bytes_to_read(rbi
);
1649 * An API to support in-place processing of incoming VMBUS packets.
1652 /* Get data payload associated with descriptor */
1653 static inline void *hv_pkt_data(const struct vmpacket_descriptor
*desc
)
1655 return (void *)((unsigned long)desc
+ (desc
->offset8
<< 3));
1658 /* Get data size associated with descriptor */
1659 static inline u32
hv_pkt_datalen(const struct vmpacket_descriptor
*desc
)
1661 return (desc
->len8
<< 3) - (desc
->offset8
<< 3);
1665 struct vmpacket_descriptor
*
1666 hv_pkt_iter_first(struct vmbus_channel
*channel
);
1668 struct vmpacket_descriptor
*
1669 __hv_pkt_iter_next(struct vmbus_channel
*channel
,
1670 const struct vmpacket_descriptor
*pkt
);
1672 void hv_pkt_iter_close(struct vmbus_channel
*channel
);
1675 * Get next packet descriptor from iterator
1676 * If at end of list, return NULL and update host.
1678 static inline struct vmpacket_descriptor
*
1679 hv_pkt_iter_next(struct vmbus_channel
*channel
,
1680 const struct vmpacket_descriptor
*pkt
)
1682 struct vmpacket_descriptor
*nxt
;
1684 nxt
= __hv_pkt_iter_next(channel
, pkt
);
1686 hv_pkt_iter_close(channel
);
1691 #define foreach_vmbus_pkt(pkt, channel) \
1692 for (pkt = hv_pkt_iter_first(channel); pkt; \
1693 pkt = hv_pkt_iter_next(channel, pkt))
1696 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1697 * sends requests to read and write blocks. Each block must be 128 bytes or
1698 * smaller. Optionally, the VF driver can register a callback function which
1699 * will be invoked when the host says that one or more of the first 64 block
1700 * IDs is "invalid" which means that the VF driver should reread them.
1702 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1704 int hyperv_read_cfg_blk(struct pci_dev
*dev
, void *buf
, unsigned int buf_len
,
1705 unsigned int block_id
, unsigned int *bytes_returned
);
1706 int hyperv_write_cfg_blk(struct pci_dev
*dev
, void *buf
, unsigned int len
,
1707 unsigned int block_id
);
1708 int hyperv_reg_block_invalidate(struct pci_dev
*dev
, void *context
,
1709 void (*block_invalidate
)(void *context
,
1712 struct hyperv_pci_block_ops
{
1713 int (*read_block
)(struct pci_dev
*dev
, void *buf
, unsigned int buf_len
,
1714 unsigned int block_id
, unsigned int *bytes_returned
);
1715 int (*write_block
)(struct pci_dev
*dev
, void *buf
, unsigned int len
,
1716 unsigned int block_id
);
1717 int (*reg_blk_invalidate
)(struct pci_dev
*dev
, void *context
,
1718 void (*block_invalidate
)(void *context
,
1722 extern struct hyperv_pci_block_ops hvpci_block_ops
;
1724 static inline unsigned long virt_to_hvpfn(void *addr
)
1728 if (is_vmalloc_addr(addr
))
1729 paddr
= page_to_phys(vmalloc_to_page(addr
)) +
1730 offset_in_page(addr
);
1734 return paddr
>> HV_HYP_PAGE_SHIFT
;
1737 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1738 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1739 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1740 #define HVPFN_DOWN(x) ((x) >> HV_HYP_PAGE_SHIFT)
1741 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1743 #endif /* _HYPERV_H */