2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/hyperv.h>
30 #include <linux/uio.h>
31 #include <linux/interrupt.h>
33 #include "hyperv_vmbus.h"
35 #define NUM_PAGES_SPANNED(addr, len) \
36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
39 * vmbus_setevent- Trigger an event notification on the specified
42 void vmbus_setevent(struct vmbus_channel
*channel
)
44 struct hv_monitor_page
*monitorpage
;
47 * For channels marked as in "low latency" mode
48 * bypass the monitor page mechanism.
50 if ((channel
->offermsg
.monitor_allocated
) &&
51 (!channel
->low_latency
)) {
52 /* Each u32 represents 32 channels */
53 sync_set_bit(channel
->offermsg
.child_relid
& 31,
54 (unsigned long *) vmbus_connection
.send_int_page
+
55 (channel
->offermsg
.child_relid
>> 5));
57 /* Get the child to parent monitor page */
58 monitorpage
= vmbus_connection
.monitor_pages
[1];
60 sync_set_bit(channel
->monitor_bit
,
61 (unsigned long *)&monitorpage
->trigger_group
62 [channel
->monitor_grp
].pending
);
65 vmbus_set_event(channel
);
68 EXPORT_SYMBOL_GPL(vmbus_setevent
);
71 * vmbus_open - Open the specified channel.
73 int vmbus_open(struct vmbus_channel
*newchannel
, u32 send_ringbuffer_size
,
74 u32 recv_ringbuffer_size
, void *userdata
, u32 userdatalen
,
75 void (*onchannelcallback
)(void *context
), void *context
)
77 struct vmbus_channel_open_channel
*open_msg
;
78 struct vmbus_channel_msginfo
*open_info
= NULL
;
83 if (send_ringbuffer_size
% PAGE_SIZE
||
84 recv_ringbuffer_size
% PAGE_SIZE
)
87 spin_lock_irqsave(&newchannel
->lock
, flags
);
88 if (newchannel
->state
== CHANNEL_OPEN_STATE
) {
89 newchannel
->state
= CHANNEL_OPENING_STATE
;
91 spin_unlock_irqrestore(&newchannel
->lock
, flags
);
94 spin_unlock_irqrestore(&newchannel
->lock
, flags
);
96 newchannel
->onchannel_callback
= onchannelcallback
;
97 newchannel
->channel_callback_context
= context
;
99 /* Allocate the ring buffer */
100 page
= alloc_pages_node(cpu_to_node(newchannel
->target_cpu
),
101 GFP_KERNEL
|__GFP_ZERO
,
102 get_order(send_ringbuffer_size
+
103 recv_ringbuffer_size
));
106 page
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
,
107 get_order(send_ringbuffer_size
+
108 recv_ringbuffer_size
));
112 goto error_set_chnstate
;
115 newchannel
->ringbuffer_pages
= page_address(page
);
116 newchannel
->ringbuffer_pagecount
= (send_ringbuffer_size
+
117 recv_ringbuffer_size
) >> PAGE_SHIFT
;
119 ret
= hv_ringbuffer_init(&newchannel
->outbound
, page
,
120 send_ringbuffer_size
>> PAGE_SHIFT
);
124 goto error_free_pages
;
127 ret
= hv_ringbuffer_init(&newchannel
->inbound
,
128 &page
[send_ringbuffer_size
>> PAGE_SHIFT
],
129 recv_ringbuffer_size
>> PAGE_SHIFT
);
132 goto error_free_pages
;
136 /* Establish the gpadl for the ring buffer */
137 newchannel
->ringbuffer_gpadlhandle
= 0;
139 ret
= vmbus_establish_gpadl(newchannel
,
141 send_ringbuffer_size
+
142 recv_ringbuffer_size
,
143 &newchannel
->ringbuffer_gpadlhandle
);
147 goto error_free_pages
;
150 /* Create and init the channel open message */
151 open_info
= kmalloc(sizeof(*open_info
) +
152 sizeof(struct vmbus_channel_open_channel
),
156 goto error_free_gpadl
;
159 init_completion(&open_info
->waitevent
);
160 open_info
->waiting_channel
= newchannel
;
162 open_msg
= (struct vmbus_channel_open_channel
*)open_info
->msg
;
163 open_msg
->header
.msgtype
= CHANNELMSG_OPENCHANNEL
;
164 open_msg
->openid
= newchannel
->offermsg
.child_relid
;
165 open_msg
->child_relid
= newchannel
->offermsg
.child_relid
;
166 open_msg
->ringbuffer_gpadlhandle
= newchannel
->ringbuffer_gpadlhandle
;
167 open_msg
->downstream_ringbuffer_pageoffset
= send_ringbuffer_size
>>
169 open_msg
->target_vp
= newchannel
->target_vp
;
171 if (userdatalen
> MAX_USER_DEFINED_BYTES
) {
173 goto error_free_gpadl
;
177 memcpy(open_msg
->userdata
, userdata
, userdatalen
);
179 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
180 list_add_tail(&open_info
->msglistentry
,
181 &vmbus_connection
.chn_msg_list
);
182 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
184 ret
= vmbus_post_msg(open_msg
,
185 sizeof(struct vmbus_channel_open_channel
), true);
189 goto error_clean_msglist
;
192 wait_for_completion(&open_info
->waitevent
);
194 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
195 list_del(&open_info
->msglistentry
);
196 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
198 if (newchannel
->rescind
) {
200 goto error_free_gpadl
;
203 if (open_info
->response
.open_result
.status
) {
205 goto error_free_gpadl
;
208 newchannel
->state
= CHANNEL_OPENED_STATE
;
213 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
214 list_del(&open_info
->msglistentry
);
215 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
218 vmbus_teardown_gpadl(newchannel
, newchannel
->ringbuffer_gpadlhandle
);
221 hv_ringbuffer_cleanup(&newchannel
->outbound
);
222 hv_ringbuffer_cleanup(&newchannel
->inbound
);
224 get_order(send_ringbuffer_size
+ recv_ringbuffer_size
));
226 newchannel
->state
= CHANNEL_OPEN_STATE
;
229 EXPORT_SYMBOL_GPL(vmbus_open
);
231 /* Used for Hyper-V Socket: a guest client's connect() to the host */
232 int vmbus_send_tl_connect_request(const uuid_le
*shv_guest_servie_id
,
233 const uuid_le
*shv_host_servie_id
)
235 struct vmbus_channel_tl_connect_request conn_msg
;
237 memset(&conn_msg
, 0, sizeof(conn_msg
));
238 conn_msg
.header
.msgtype
= CHANNELMSG_TL_CONNECT_REQUEST
;
239 conn_msg
.guest_endpoint_id
= *shv_guest_servie_id
;
240 conn_msg
.host_service_id
= *shv_host_servie_id
;
242 return vmbus_post_msg(&conn_msg
, sizeof(conn_msg
), true);
244 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request
);
247 * create_gpadl_header - Creates a gpadl for the specified buffer
249 static int create_gpadl_header(void *kbuffer
, u32 size
,
250 struct vmbus_channel_msginfo
**msginfo
)
254 struct vmbus_channel_gpadl_header
*gpadl_header
;
255 struct vmbus_channel_gpadl_body
*gpadl_body
;
256 struct vmbus_channel_msginfo
*msgheader
;
257 struct vmbus_channel_msginfo
*msgbody
= NULL
;
260 int pfnsum
, pfncount
, pfnleft
, pfncurr
, pfnsize
;
262 pagecount
= size
>> PAGE_SHIFT
;
264 /* do we need a gpadl body msg */
265 pfnsize
= MAX_SIZE_CHANNEL_MESSAGE
-
266 sizeof(struct vmbus_channel_gpadl_header
) -
267 sizeof(struct gpa_range
);
268 pfncount
= pfnsize
/ sizeof(u64
);
270 if (pagecount
> pfncount
) {
271 /* we need a gpadl body */
272 /* fill in the header */
273 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
274 sizeof(struct vmbus_channel_gpadl_header
) +
275 sizeof(struct gpa_range
) + pfncount
* sizeof(u64
);
276 msgheader
= kzalloc(msgsize
, GFP_KERNEL
);
280 INIT_LIST_HEAD(&msgheader
->submsglist
);
281 msgheader
->msgsize
= msgsize
;
283 gpadl_header
= (struct vmbus_channel_gpadl_header
*)
285 gpadl_header
->rangecount
= 1;
286 gpadl_header
->range_buflen
= sizeof(struct gpa_range
) +
287 pagecount
* sizeof(u64
);
288 gpadl_header
->range
[0].byte_offset
= 0;
289 gpadl_header
->range
[0].byte_count
= size
;
290 for (i
= 0; i
< pfncount
; i
++)
291 gpadl_header
->range
[0].pfn_array
[i
] = slow_virt_to_phys(
292 kbuffer
+ PAGE_SIZE
* i
) >> PAGE_SHIFT
;
293 *msginfo
= msgheader
;
296 pfnleft
= pagecount
- pfncount
;
298 /* how many pfns can we fit */
299 pfnsize
= MAX_SIZE_CHANNEL_MESSAGE
-
300 sizeof(struct vmbus_channel_gpadl_body
);
301 pfncount
= pfnsize
/ sizeof(u64
);
303 /* fill in the body */
305 if (pfnleft
> pfncount
)
310 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
311 sizeof(struct vmbus_channel_gpadl_body
) +
312 pfncurr
* sizeof(u64
);
313 msgbody
= kzalloc(msgsize
, GFP_KERNEL
);
316 struct vmbus_channel_msginfo
*pos
= NULL
;
317 struct vmbus_channel_msginfo
*tmp
= NULL
;
319 * Free up all the allocated messages.
321 list_for_each_entry_safe(pos
, tmp
,
322 &msgheader
->submsglist
,
325 list_del(&pos
->msglistentry
);
332 msgbody
->msgsize
= msgsize
;
334 (struct vmbus_channel_gpadl_body
*)msgbody
->msg
;
337 * Gpadl is u32 and we are using a pointer which could
339 * This is governed by the guest/host protocol and
340 * so the hypervisor gurantees that this is ok.
342 for (i
= 0; i
< pfncurr
; i
++)
343 gpadl_body
->pfn
[i
] = slow_virt_to_phys(
344 kbuffer
+ PAGE_SIZE
* (pfnsum
+ i
)) >>
347 /* add to msg header */
348 list_add_tail(&msgbody
->msglistentry
,
349 &msgheader
->submsglist
);
354 /* everything fits in a header */
355 msgsize
= sizeof(struct vmbus_channel_msginfo
) +
356 sizeof(struct vmbus_channel_gpadl_header
) +
357 sizeof(struct gpa_range
) + pagecount
* sizeof(u64
);
358 msgheader
= kzalloc(msgsize
, GFP_KERNEL
);
359 if (msgheader
== NULL
)
362 INIT_LIST_HEAD(&msgheader
->submsglist
);
363 msgheader
->msgsize
= msgsize
;
365 gpadl_header
= (struct vmbus_channel_gpadl_header
*)
367 gpadl_header
->rangecount
= 1;
368 gpadl_header
->range_buflen
= sizeof(struct gpa_range
) +
369 pagecount
* sizeof(u64
);
370 gpadl_header
->range
[0].byte_offset
= 0;
371 gpadl_header
->range
[0].byte_count
= size
;
372 for (i
= 0; i
< pagecount
; i
++)
373 gpadl_header
->range
[0].pfn_array
[i
] = slow_virt_to_phys(
374 kbuffer
+ PAGE_SIZE
* i
) >> PAGE_SHIFT
;
376 *msginfo
= msgheader
;
387 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
389 * @channel: a channel
390 * @kbuffer: from kmalloc or vmalloc
391 * @size: page-size multiple
392 * @gpadl_handle: some funky thing
394 int vmbus_establish_gpadl(struct vmbus_channel
*channel
, void *kbuffer
,
395 u32 size
, u32
*gpadl_handle
)
397 struct vmbus_channel_gpadl_header
*gpadlmsg
;
398 struct vmbus_channel_gpadl_body
*gpadl_body
;
399 struct vmbus_channel_msginfo
*msginfo
= NULL
;
400 struct vmbus_channel_msginfo
*submsginfo
, *tmp
;
401 struct list_head
*curr
;
402 u32 next_gpadl_handle
;
407 (atomic_inc_return(&vmbus_connection
.next_gpadl_handle
) - 1);
409 ret
= create_gpadl_header(kbuffer
, size
, &msginfo
);
413 init_completion(&msginfo
->waitevent
);
414 msginfo
->waiting_channel
= channel
;
416 gpadlmsg
= (struct vmbus_channel_gpadl_header
*)msginfo
->msg
;
417 gpadlmsg
->header
.msgtype
= CHANNELMSG_GPADL_HEADER
;
418 gpadlmsg
->child_relid
= channel
->offermsg
.child_relid
;
419 gpadlmsg
->gpadl
= next_gpadl_handle
;
422 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
423 list_add_tail(&msginfo
->msglistentry
,
424 &vmbus_connection
.chn_msg_list
);
426 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
428 ret
= vmbus_post_msg(gpadlmsg
, msginfo
->msgsize
-
429 sizeof(*msginfo
), true);
433 list_for_each(curr
, &msginfo
->submsglist
) {
434 submsginfo
= (struct vmbus_channel_msginfo
*)curr
;
436 (struct vmbus_channel_gpadl_body
*)submsginfo
->msg
;
438 gpadl_body
->header
.msgtype
=
439 CHANNELMSG_GPADL_BODY
;
440 gpadl_body
->gpadl
= next_gpadl_handle
;
442 ret
= vmbus_post_msg(gpadl_body
,
443 submsginfo
->msgsize
- sizeof(*submsginfo
),
449 wait_for_completion(&msginfo
->waitevent
);
451 if (channel
->rescind
) {
456 /* At this point, we received the gpadl created msg */
457 *gpadl_handle
= gpadlmsg
->gpadl
;
460 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
461 list_del(&msginfo
->msglistentry
);
462 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
463 list_for_each_entry_safe(submsginfo
, tmp
, &msginfo
->submsglist
,
471 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl
);
474 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
476 int vmbus_teardown_gpadl(struct vmbus_channel
*channel
, u32 gpadl_handle
)
478 struct vmbus_channel_gpadl_teardown
*msg
;
479 struct vmbus_channel_msginfo
*info
;
483 info
= kmalloc(sizeof(*info
) +
484 sizeof(struct vmbus_channel_gpadl_teardown
), GFP_KERNEL
);
488 init_completion(&info
->waitevent
);
489 info
->waiting_channel
= channel
;
491 msg
= (struct vmbus_channel_gpadl_teardown
*)info
->msg
;
493 msg
->header
.msgtype
= CHANNELMSG_GPADL_TEARDOWN
;
494 msg
->child_relid
= channel
->offermsg
.child_relid
;
495 msg
->gpadl
= gpadl_handle
;
497 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
498 list_add_tail(&info
->msglistentry
,
499 &vmbus_connection
.chn_msg_list
);
500 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
501 ret
= vmbus_post_msg(msg
, sizeof(struct vmbus_channel_gpadl_teardown
),
507 wait_for_completion(&info
->waitevent
);
509 if (channel
->rescind
) {
515 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
516 list_del(&info
->msglistentry
);
517 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
522 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl
);
524 static void reset_channel_cb(void *arg
)
526 struct vmbus_channel
*channel
= arg
;
528 channel
->onchannel_callback
= NULL
;
531 static int vmbus_close_internal(struct vmbus_channel
*channel
)
533 struct vmbus_channel_close_channel
*msg
;
537 * process_chn_event(), running in the tasklet, can race
538 * with vmbus_close_internal() in the case of SMP guest, e.g., when
539 * the former is accessing channel->inbound.ring_buffer, the latter
540 * could be freeing the ring_buffer pages.
542 * To resolve the race, we can serialize them by disabling the
543 * tasklet when the latter is running here.
545 hv_event_tasklet_disable(channel
);
548 * In case a device driver's probe() fails (e.g.,
549 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
550 * rescinded later (e.g., we dynamically disble an Integrated Service
551 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
552 * here we should skip most of the below cleanup work.
554 if (channel
->state
!= CHANNEL_OPENED_STATE
) {
559 channel
->state
= CHANNEL_OPEN_STATE
;
560 channel
->sc_creation_callback
= NULL
;
561 /* Stop callback and cancel the timer asap */
562 if (channel
->target_cpu
!= get_cpu()) {
564 smp_call_function_single(channel
->target_cpu
, reset_channel_cb
,
567 reset_channel_cb(channel
);
571 /* Send a closing message */
573 msg
= &channel
->close_msg
.msg
;
575 msg
->header
.msgtype
= CHANNELMSG_CLOSECHANNEL
;
576 msg
->child_relid
= channel
->offermsg
.child_relid
;
578 ret
= vmbus_post_msg(msg
, sizeof(struct vmbus_channel_close_channel
),
582 pr_err("Close failed: close post msg return is %d\n", ret
);
584 * If we failed to post the close msg,
585 * it is perhaps better to leak memory.
590 /* Tear down the gpadl for the channel's ring buffer */
591 if (channel
->ringbuffer_gpadlhandle
) {
592 ret
= vmbus_teardown_gpadl(channel
,
593 channel
->ringbuffer_gpadlhandle
);
595 pr_err("Close failed: teardown gpadl return %d\n", ret
);
597 * If we failed to teardown gpadl,
598 * it is perhaps better to leak memory.
604 /* Cleanup the ring buffers for this channel */
605 hv_ringbuffer_cleanup(&channel
->outbound
);
606 hv_ringbuffer_cleanup(&channel
->inbound
);
608 free_pages((unsigned long)channel
->ringbuffer_pages
,
609 get_order(channel
->ringbuffer_pagecount
* PAGE_SIZE
));
612 hv_event_tasklet_enable(channel
);
618 * vmbus_close - Close the specified channel
620 void vmbus_close(struct vmbus_channel
*channel
)
622 struct list_head
*cur
, *tmp
;
623 struct vmbus_channel
*cur_channel
;
625 if (channel
->primary_channel
!= NULL
) {
627 * We will only close sub-channels when
628 * the primary is closed.
633 * Close all the sub-channels first and then close the
636 list_for_each_safe(cur
, tmp
, &channel
->sc_list
) {
637 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
638 if (cur_channel
->state
!= CHANNEL_OPENED_STATE
)
640 vmbus_close_internal(cur_channel
);
643 * Now close the primary.
645 vmbus_close_internal(channel
);
647 EXPORT_SYMBOL_GPL(vmbus_close
);
649 int vmbus_sendpacket_ctl(struct vmbus_channel
*channel
, void *buffer
,
650 u32 bufferlen
, u64 requestid
,
651 enum vmbus_packet_type type
, u32 flags
, bool kick_q
)
653 struct vmpacket_descriptor desc
;
654 u32 packetlen
= sizeof(struct vmpacket_descriptor
) + bufferlen
;
655 u32 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
656 struct kvec bufferlist
[3];
657 u64 aligned_data
= 0;
658 bool lock
= channel
->acquire_ring_lock
;
659 int num_vecs
= ((bufferlen
!= 0) ? 3 : 1);
662 /* Setup the descriptor */
663 desc
.type
= type
; /* VmbusPacketTypeDataInBand; */
664 desc
.flags
= flags
; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
665 /* in 8-bytes granularity */
666 desc
.offset8
= sizeof(struct vmpacket_descriptor
) >> 3;
667 desc
.len8
= (u16
)(packetlen_aligned
>> 3);
668 desc
.trans_id
= requestid
;
670 bufferlist
[0].iov_base
= &desc
;
671 bufferlist
[0].iov_len
= sizeof(struct vmpacket_descriptor
);
672 bufferlist
[1].iov_base
= buffer
;
673 bufferlist
[1].iov_len
= bufferlen
;
674 bufferlist
[2].iov_base
= &aligned_data
;
675 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
677 return hv_ringbuffer_write(channel
, bufferlist
, num_vecs
,
681 EXPORT_SYMBOL(vmbus_sendpacket_ctl
);
684 * vmbus_sendpacket() - Send the specified buffer on the given channel
685 * @channel: Pointer to vmbus_channel structure.
686 * @buffer: Pointer to the buffer you want to receive the data into.
687 * @bufferlen: Maximum size of what the the buffer will hold
688 * @requestid: Identifier of the request
689 * @type: Type of packet that is being send e.g. negotiate, time
692 * Sends data in @buffer directly to hyper-v via the vmbus
693 * This will send the data unparsed to hyper-v.
695 * Mainly used by Hyper-V drivers.
697 int vmbus_sendpacket(struct vmbus_channel
*channel
, void *buffer
,
698 u32 bufferlen
, u64 requestid
,
699 enum vmbus_packet_type type
, u32 flags
)
701 return vmbus_sendpacket_ctl(channel
, buffer
, bufferlen
, requestid
,
704 EXPORT_SYMBOL(vmbus_sendpacket
);
707 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
708 * packets using a GPADL Direct packet type. This interface allows you
709 * to control notifying the host. This will be useful for sending
710 * batched data. Also the sender can control the send flags
713 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel
*channel
,
714 struct hv_page_buffer pagebuffers
[],
715 u32 pagecount
, void *buffer
, u32 bufferlen
,
721 struct vmbus_channel_packet_page_buffer desc
;
724 u32 packetlen_aligned
;
725 struct kvec bufferlist
[3];
726 u64 aligned_data
= 0;
727 bool lock
= channel
->acquire_ring_lock
;
729 if (pagecount
> MAX_PAGE_BUFFER_COUNT
)
734 * Adjust the size down since vmbus_channel_packet_page_buffer is the
735 * largest size we support
737 descsize
= sizeof(struct vmbus_channel_packet_page_buffer
) -
738 ((MAX_PAGE_BUFFER_COUNT
- pagecount
) *
739 sizeof(struct hv_page_buffer
));
740 packetlen
= descsize
+ bufferlen
;
741 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
743 /* Setup the descriptor */
744 desc
.type
= VM_PKT_DATA_USING_GPA_DIRECT
;
746 desc
.dataoffset8
= descsize
>> 3; /* in 8-bytes grandularity */
747 desc
.length8
= (u16
)(packetlen_aligned
>> 3);
748 desc
.transactionid
= requestid
;
749 desc
.rangecount
= pagecount
;
751 for (i
= 0; i
< pagecount
; i
++) {
752 desc
.range
[i
].len
= pagebuffers
[i
].len
;
753 desc
.range
[i
].offset
= pagebuffers
[i
].offset
;
754 desc
.range
[i
].pfn
= pagebuffers
[i
].pfn
;
757 bufferlist
[0].iov_base
= &desc
;
758 bufferlist
[0].iov_len
= descsize
;
759 bufferlist
[1].iov_base
= buffer
;
760 bufferlist
[1].iov_len
= bufferlen
;
761 bufferlist
[2].iov_base
= &aligned_data
;
762 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
764 return hv_ringbuffer_write(channel
, bufferlist
, 3,
767 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl
);
770 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
771 * packets using a GPADL Direct packet type.
773 int vmbus_sendpacket_pagebuffer(struct vmbus_channel
*channel
,
774 struct hv_page_buffer pagebuffers
[],
775 u32 pagecount
, void *buffer
, u32 bufferlen
,
778 u32 flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
779 return vmbus_sendpacket_pagebuffer_ctl(channel
, pagebuffers
, pagecount
,
780 buffer
, bufferlen
, requestid
,
784 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer
);
787 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
788 * using a GPADL Direct packet type.
789 * The buffer includes the vmbus descriptor.
791 int vmbus_sendpacket_mpb_desc(struct vmbus_channel
*channel
,
792 struct vmbus_packet_mpb_array
*desc
,
794 void *buffer
, u32 bufferlen
, u64 requestid
)
797 u32 packetlen_aligned
;
798 struct kvec bufferlist
[3];
799 u64 aligned_data
= 0;
800 bool lock
= channel
->acquire_ring_lock
;
802 packetlen
= desc_size
+ bufferlen
;
803 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
805 /* Setup the descriptor */
806 desc
->type
= VM_PKT_DATA_USING_GPA_DIRECT
;
807 desc
->flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
808 desc
->dataoffset8
= desc_size
>> 3; /* in 8-bytes grandularity */
809 desc
->length8
= (u16
)(packetlen_aligned
>> 3);
810 desc
->transactionid
= requestid
;
811 desc
->rangecount
= 1;
813 bufferlist
[0].iov_base
= desc
;
814 bufferlist
[0].iov_len
= desc_size
;
815 bufferlist
[1].iov_base
= buffer
;
816 bufferlist
[1].iov_len
= bufferlen
;
817 bufferlist
[2].iov_base
= &aligned_data
;
818 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
820 return hv_ringbuffer_write(channel
, bufferlist
, 3,
823 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc
);
826 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
827 * using a GPADL Direct packet type.
829 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel
*channel
,
830 struct hv_multipage_buffer
*multi_pagebuffer
,
831 void *buffer
, u32 bufferlen
, u64 requestid
)
833 struct vmbus_channel_packet_multipage_buffer desc
;
836 u32 packetlen_aligned
;
837 struct kvec bufferlist
[3];
838 u64 aligned_data
= 0;
839 bool lock
= channel
->acquire_ring_lock
;
840 u32 pfncount
= NUM_PAGES_SPANNED(multi_pagebuffer
->offset
,
841 multi_pagebuffer
->len
);
843 if (pfncount
> MAX_MULTIPAGE_BUFFER_COUNT
)
847 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
848 * the largest size we support
850 descsize
= sizeof(struct vmbus_channel_packet_multipage_buffer
) -
851 ((MAX_MULTIPAGE_BUFFER_COUNT
- pfncount
) *
853 packetlen
= descsize
+ bufferlen
;
854 packetlen_aligned
= ALIGN(packetlen
, sizeof(u64
));
857 /* Setup the descriptor */
858 desc
.type
= VM_PKT_DATA_USING_GPA_DIRECT
;
859 desc
.flags
= VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
;
860 desc
.dataoffset8
= descsize
>> 3; /* in 8-bytes grandularity */
861 desc
.length8
= (u16
)(packetlen_aligned
>> 3);
862 desc
.transactionid
= requestid
;
865 desc
.range
.len
= multi_pagebuffer
->len
;
866 desc
.range
.offset
= multi_pagebuffer
->offset
;
868 memcpy(desc
.range
.pfn_array
, multi_pagebuffer
->pfn_array
,
869 pfncount
* sizeof(u64
));
871 bufferlist
[0].iov_base
= &desc
;
872 bufferlist
[0].iov_len
= descsize
;
873 bufferlist
[1].iov_base
= buffer
;
874 bufferlist
[1].iov_len
= bufferlen
;
875 bufferlist
[2].iov_base
= &aligned_data
;
876 bufferlist
[2].iov_len
= (packetlen_aligned
- packetlen
);
878 return hv_ringbuffer_write(channel
, bufferlist
, 3,
881 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer
);
884 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
885 * @channel: Pointer to vmbus_channel structure.
886 * @buffer: Pointer to the buffer you want to receive the data into.
887 * @bufferlen: Maximum size of what the the buffer will hold
888 * @buffer_actual_len: The actual size of the data after it was received
889 * @requestid: Identifier of the request
891 * Receives directly from the hyper-v vmbus and puts the data it received
892 * into Buffer. This will receive the data unparsed from hyper-v.
894 * Mainly used by Hyper-V drivers.
897 __vmbus_recvpacket(struct vmbus_channel
*channel
, void *buffer
,
898 u32 bufferlen
, u32
*buffer_actual_len
, u64
*requestid
,
901 return hv_ringbuffer_read(channel
, buffer
, bufferlen
,
902 buffer_actual_len
, requestid
, raw
);
906 int vmbus_recvpacket(struct vmbus_channel
*channel
, void *buffer
,
907 u32 bufferlen
, u32
*buffer_actual_len
,
910 return __vmbus_recvpacket(channel
, buffer
, bufferlen
,
911 buffer_actual_len
, requestid
, false);
913 EXPORT_SYMBOL(vmbus_recvpacket
);
916 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
918 int vmbus_recvpacket_raw(struct vmbus_channel
*channel
, void *buffer
,
919 u32 bufferlen
, u32
*buffer_actual_len
,
922 return __vmbus_recvpacket(channel
, buffer
, bufferlen
,
923 buffer_actual_len
, requestid
, true);
925 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw
);