2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
34 #include <asm/mshyperv.h>
36 #include "hyperv_vmbus.h"
38 static void init_vp_index(struct vmbus_channel
*channel
, u16 dev_type
);
40 static const struct vmbus_device vmbus_devs
[] = {
48 { .dev_type
= HV_SCSI
,
72 { .dev_type
= HV_PCIE
,
77 /* Synthetic Frame Buffer */
83 /* Synthetic Keyboard */
90 { .dev_type
= HV_MOUSE
,
104 .perf_device
= false,
110 .perf_device
= false,
114 { .dev_type
= HV_SHUTDOWN
,
116 .perf_device
= false,
120 { .dev_type
= HV_FCOPY
,
122 .perf_device
= false,
126 { .dev_type
= HV_BACKUP
,
128 .perf_device
= false,
134 .perf_device
= false,
138 { .dev_type
= HV_UNKNOWN
,
139 .perf_device
= false,
143 static const struct {
145 } vmbus_unsupported_devs
[] = {
152 * The rescinded channel may be blocked waiting for a response from the host;
155 static void vmbus_rescind_cleanup(struct vmbus_channel
*channel
)
157 struct vmbus_channel_msginfo
*msginfo
;
161 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
163 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
166 if (msginfo
->waiting_channel
== channel
) {
167 complete(&msginfo
->waitevent
);
171 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
174 static bool is_unsupported_vmbus_devs(const uuid_le
*guid
)
178 for (i
= 0; i
< ARRAY_SIZE(vmbus_unsupported_devs
); i
++)
179 if (!uuid_le_cmp(*guid
, vmbus_unsupported_devs
[i
].guid
))
184 static u16
hv_get_dev_type(const struct vmbus_channel
*channel
)
186 const uuid_le
*guid
= &channel
->offermsg
.offer
.if_type
;
189 if (is_hvsock_channel(channel
) || is_unsupported_vmbus_devs(guid
))
192 for (i
= HV_IDE
; i
< HV_UNKNOWN
; i
++) {
193 if (!uuid_le_cmp(*guid
, vmbus_devs
[i
].guid
))
196 pr_info("Unknown GUID: %pUl\n", guid
);
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data
206 * @icmsghdrp is of type &struct icmsg_hdr.
207 * Set up and fill in default negotiate response message.
209 * The fw_version and fw_vercnt specifies the framework version that
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
215 * Versions are given in decreasing order.
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
219 * Mainly used by Hyper-V drivers.
221 bool vmbus_prep_negotiate_resp(struct icmsg_hdr
*icmsghdrp
,
222 u8
*buf
, const int *fw_version
, int fw_vercnt
,
223 const int *srv_version
, int srv_vercnt
,
224 int *nego_fw_version
, int *nego_srv_version
)
226 int icframe_major
, icframe_minor
;
227 int icmsg_major
, icmsg_minor
;
228 int fw_major
, fw_minor
;
229 int srv_major
, srv_minor
;
231 bool found_match
= false;
232 struct icmsg_negotiate
*negop
;
234 icmsghdrp
->icmsgsize
= 0x10;
235 negop
= (struct icmsg_negotiate
*)&buf
[
236 sizeof(struct vmbuspipe_hdr
) +
237 sizeof(struct icmsg_hdr
)];
239 icframe_major
= negop
->icframe_vercnt
;
242 icmsg_major
= negop
->icmsg_vercnt
;
246 * Select the framework version number we will
250 for (i
= 0; i
< fw_vercnt
; i
++) {
251 fw_major
= (fw_version
[i
] >> 16);
252 fw_minor
= (fw_version
[i
] & 0xFFFF);
254 for (j
= 0; j
< negop
->icframe_vercnt
; j
++) {
255 if ((negop
->icversion_data
[j
].major
== fw_major
) &&
256 (negop
->icversion_data
[j
].minor
== fw_minor
)) {
257 icframe_major
= negop
->icversion_data
[j
].major
;
258 icframe_minor
= negop
->icversion_data
[j
].minor
;
273 for (i
= 0; i
< srv_vercnt
; i
++) {
274 srv_major
= (srv_version
[i
] >> 16);
275 srv_minor
= (srv_version
[i
] & 0xFFFF);
277 for (j
= negop
->icframe_vercnt
;
278 (j
< negop
->icframe_vercnt
+ negop
->icmsg_vercnt
);
281 if ((negop
->icversion_data
[j
].major
== srv_major
) &&
282 (negop
->icversion_data
[j
].minor
== srv_minor
)) {
284 icmsg_major
= negop
->icversion_data
[j
].major
;
285 icmsg_minor
= negop
->icversion_data
[j
].minor
;
296 * Respond with the framework and service
297 * version numbers we can support.
302 negop
->icframe_vercnt
= 0;
303 negop
->icmsg_vercnt
= 0;
305 negop
->icframe_vercnt
= 1;
306 negop
->icmsg_vercnt
= 1;
310 *nego_fw_version
= (icframe_major
<< 16) | icframe_minor
;
312 if (nego_srv_version
)
313 *nego_srv_version
= (icmsg_major
<< 16) | icmsg_minor
;
315 negop
->icversion_data
[0].major
= icframe_major
;
316 negop
->icversion_data
[0].minor
= icframe_minor
;
317 negop
->icversion_data
[1].major
= icmsg_major
;
318 negop
->icversion_data
[1].minor
= icmsg_minor
;
322 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp
);
325 * alloc_channel - Allocate and initialize a vmbus channel object
327 static struct vmbus_channel
*alloc_channel(void)
329 struct vmbus_channel
*channel
;
331 channel
= kzalloc(sizeof(*channel
), GFP_ATOMIC
);
335 channel
->acquire_ring_lock
= true;
336 spin_lock_init(&channel
->inbound_lock
);
337 spin_lock_init(&channel
->lock
);
339 INIT_LIST_HEAD(&channel
->sc_list
);
340 INIT_LIST_HEAD(&channel
->percpu_list
);
346 * free_channel - Release the resources used by the vmbus channel object
348 static void free_channel(struct vmbus_channel
*channel
)
353 static void percpu_channel_enq(void *arg
)
355 struct vmbus_channel
*channel
= arg
;
356 int cpu
= smp_processor_id();
358 list_add_tail(&channel
->percpu_list
, &hv_context
.percpu_list
[cpu
]);
361 static void percpu_channel_deq(void *arg
)
363 struct vmbus_channel
*channel
= arg
;
365 list_del(&channel
->percpu_list
);
369 static void vmbus_release_relid(u32 relid
)
371 struct vmbus_channel_relid_released msg
;
373 memset(&msg
, 0, sizeof(struct vmbus_channel_relid_released
));
374 msg
.child_relid
= relid
;
375 msg
.header
.msgtype
= CHANNELMSG_RELID_RELEASED
;
376 vmbus_post_msg(&msg
, sizeof(struct vmbus_channel_relid_released
),
380 void hv_event_tasklet_disable(struct vmbus_channel
*channel
)
382 struct tasklet_struct
*tasklet
;
383 tasklet
= hv_context
.event_dpc
[channel
->target_cpu
];
384 tasklet_disable(tasklet
);
387 void hv_event_tasklet_enable(struct vmbus_channel
*channel
)
389 struct tasklet_struct
*tasklet
;
390 tasklet
= hv_context
.event_dpc
[channel
->target_cpu
];
391 tasklet_enable(tasklet
);
393 /* In case there is any pending event */
394 tasklet_schedule(tasklet
);
397 void hv_process_channel_removal(struct vmbus_channel
*channel
, u32 relid
)
400 struct vmbus_channel
*primary_channel
;
402 BUG_ON(!channel
->rescind
);
403 BUG_ON(!mutex_is_locked(&vmbus_connection
.channel_mutex
));
405 hv_event_tasklet_disable(channel
);
406 if (channel
->target_cpu
!= get_cpu()) {
408 smp_call_function_single(channel
->target_cpu
,
409 percpu_channel_deq
, channel
, true);
411 percpu_channel_deq(channel
);
414 hv_event_tasklet_enable(channel
);
416 if (channel
->primary_channel
== NULL
) {
417 list_del(&channel
->listentry
);
419 primary_channel
= channel
;
421 primary_channel
= channel
->primary_channel
;
422 spin_lock_irqsave(&primary_channel
->lock
, flags
);
423 list_del(&channel
->sc_list
);
424 primary_channel
->num_sc
--;
425 spin_unlock_irqrestore(&primary_channel
->lock
, flags
);
429 * We need to free the bit for init_vp_index() to work in the case
430 * of sub-channel, when we reload drivers like hv_netvsc.
432 if (channel
->affinity_policy
== HV_LOCALIZED
)
433 cpumask_clear_cpu(channel
->target_cpu
,
434 &primary_channel
->alloced_cpus_in_node
);
436 vmbus_release_relid(relid
);
438 free_channel(channel
);
441 void vmbus_free_channels(void)
443 struct vmbus_channel
*channel
, *tmp
;
445 mutex_lock(&vmbus_connection
.channel_mutex
);
446 list_for_each_entry_safe(channel
, tmp
, &vmbus_connection
.chn_list
,
448 /* hv_process_channel_removal() needs this */
449 channel
->rescind
= true;
451 vmbus_device_unregister(channel
->device_obj
);
453 mutex_unlock(&vmbus_connection
.channel_mutex
);
457 * vmbus_process_offer - Process the offer by creating a channel/device
458 * associated with this offer
460 static void vmbus_process_offer(struct vmbus_channel
*newchannel
)
462 struct vmbus_channel
*channel
;
468 /* Make sure this is a new offer */
469 mutex_lock(&vmbus_connection
.channel_mutex
);
471 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
472 if (!uuid_le_cmp(channel
->offermsg
.offer
.if_type
,
473 newchannel
->offermsg
.offer
.if_type
) &&
474 !uuid_le_cmp(channel
->offermsg
.offer
.if_instance
,
475 newchannel
->offermsg
.offer
.if_instance
)) {
482 list_add_tail(&newchannel
->listentry
,
483 &vmbus_connection
.chn_list
);
485 mutex_unlock(&vmbus_connection
.channel_mutex
);
489 * Check to see if this is a sub-channel.
491 if (newchannel
->offermsg
.offer
.sub_channel_index
!= 0) {
493 * Process the sub-channel.
495 newchannel
->primary_channel
= channel
;
496 spin_lock_irqsave(&channel
->lock
, flags
);
497 list_add_tail(&newchannel
->sc_list
, &channel
->sc_list
);
499 spin_unlock_irqrestore(&channel
->lock
, flags
);
504 dev_type
= hv_get_dev_type(newchannel
);
506 init_vp_index(newchannel
, dev_type
);
508 hv_event_tasklet_disable(newchannel
);
509 if (newchannel
->target_cpu
!= get_cpu()) {
511 smp_call_function_single(newchannel
->target_cpu
,
515 percpu_channel_enq(newchannel
);
518 hv_event_tasklet_enable(newchannel
);
521 * This state is used to indicate a successful open
522 * so that when we do close the channel normally, we
523 * can cleanup properly
525 newchannel
->state
= CHANNEL_OPEN_STATE
;
528 if (channel
->sc_creation_callback
!= NULL
)
529 channel
->sc_creation_callback(newchannel
);
534 * Start the process of binding this offer to the driver
535 * We need to set the DeviceObject field before calling
536 * vmbus_child_dev_add()
538 newchannel
->device_obj
= vmbus_device_create(
539 &newchannel
->offermsg
.offer
.if_type
,
540 &newchannel
->offermsg
.offer
.if_instance
,
542 if (!newchannel
->device_obj
)
545 newchannel
->device_obj
->device_id
= dev_type
;
547 * Add the new device to the bus. This will kick off device-driver
548 * binding which eventually invokes the device driver's AddDevice()
551 mutex_lock(&vmbus_connection
.channel_mutex
);
552 ret
= vmbus_device_register(newchannel
->device_obj
);
553 mutex_unlock(&vmbus_connection
.channel_mutex
);
556 pr_err("unable to add child device object (relid %d)\n",
557 newchannel
->offermsg
.child_relid
);
558 kfree(newchannel
->device_obj
);
564 mutex_lock(&vmbus_connection
.channel_mutex
);
565 list_del(&newchannel
->listentry
);
566 mutex_unlock(&vmbus_connection
.channel_mutex
);
568 hv_event_tasklet_disable(newchannel
);
569 if (newchannel
->target_cpu
!= get_cpu()) {
571 smp_call_function_single(newchannel
->target_cpu
,
572 percpu_channel_deq
, newchannel
, true);
574 percpu_channel_deq(newchannel
);
577 hv_event_tasklet_enable(newchannel
);
579 vmbus_release_relid(newchannel
->offermsg
.child_relid
);
582 free_channel(newchannel
);
586 * We use this state to statically distribute the channel interrupt load.
588 static int next_numa_node_id
;
591 * Starting with Win8, we can statically distribute the incoming
592 * channel interrupt load by binding a channel to VCPU.
593 * We do this in a hierarchical fashion:
594 * First distribute the primary channels across available NUMA nodes
595 * and then distribute the subchannels amongst the CPUs in the NUMA
596 * node assigned to the primary channel.
598 * For pre-win8 hosts or non-performance critical channels we assign the
599 * first CPU in the first NUMA node.
601 static void init_vp_index(struct vmbus_channel
*channel
, u16 dev_type
)
604 bool perf_chn
= vmbus_devs
[dev_type
].perf_device
;
605 struct vmbus_channel
*primary
= channel
->primary_channel
;
607 struct cpumask available_mask
;
608 struct cpumask
*alloced_mask
;
610 if ((vmbus_proto_version
== VERSION_WS2008
) ||
611 (vmbus_proto_version
== VERSION_WIN7
) || (!perf_chn
)) {
613 * Prior to win8, all channel interrupts are
614 * delivered on cpu 0.
615 * Also if the channel is not a performance critical
616 * channel, bind it to cpu 0.
618 channel
->numa_node
= 0;
619 channel
->target_cpu
= 0;
620 channel
->target_vp
= hv_context
.vp_index
[0];
625 * Based on the channel affinity policy, we will assign the NUMA
629 if ((channel
->affinity_policy
== HV_BALANCED
) || (!primary
)) {
631 next_node
= next_numa_node_id
++;
632 if (next_node
== nr_node_ids
) {
633 next_node
= next_numa_node_id
= 0;
636 if (cpumask_empty(cpumask_of_node(next_node
)))
640 channel
->numa_node
= next_node
;
643 alloced_mask
= &hv_context
.hv_numa_map
[primary
->numa_node
];
645 if (cpumask_weight(alloced_mask
) ==
646 cpumask_weight(cpumask_of_node(primary
->numa_node
))) {
648 * We have cycled through all the CPUs in the node;
649 * reset the alloced map.
651 cpumask_clear(alloced_mask
);
654 cpumask_xor(&available_mask
, alloced_mask
,
655 cpumask_of_node(primary
->numa_node
));
659 if (primary
->affinity_policy
== HV_LOCALIZED
) {
661 * Normally Hyper-V host doesn't create more subchannels
662 * than there are VCPUs on the node but it is possible when not
663 * all present VCPUs on the node are initialized by guest.
664 * Clear the alloced_cpus_in_node to start over.
666 if (cpumask_equal(&primary
->alloced_cpus_in_node
,
667 cpumask_of_node(primary
->numa_node
)))
668 cpumask_clear(&primary
->alloced_cpus_in_node
);
672 cur_cpu
= cpumask_next(cur_cpu
, &available_mask
);
673 if (cur_cpu
>= nr_cpu_ids
) {
675 cpumask_copy(&available_mask
,
676 cpumask_of_node(primary
->numa_node
));
680 if (primary
->affinity_policy
== HV_LOCALIZED
) {
682 * NOTE: in the case of sub-channel, we clear the
683 * sub-channel related bit(s) in
684 * primary->alloced_cpus_in_node in
685 * hv_process_channel_removal(), so when we
686 * reload drivers like hv_netvsc in SMP guest, here
687 * we're able to re-allocate
688 * bit from primary->alloced_cpus_in_node.
690 if (!cpumask_test_cpu(cur_cpu
,
691 &primary
->alloced_cpus_in_node
)) {
692 cpumask_set_cpu(cur_cpu
,
693 &primary
->alloced_cpus_in_node
);
694 cpumask_set_cpu(cur_cpu
, alloced_mask
);
698 cpumask_set_cpu(cur_cpu
, alloced_mask
);
703 channel
->target_cpu
= cur_cpu
;
704 channel
->target_vp
= hv_context
.vp_index
[cur_cpu
];
707 static void vmbus_wait_for_unload(void)
711 struct hv_message
*msg
;
712 struct vmbus_channel_message_header
*hdr
;
716 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
717 * used for initial contact or to CPU0 depending on host version. When
718 * we're crashing on a different CPU let's hope that IRQ handler on
719 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
720 * functional and vmbus_unload_response() will complete
721 * vmbus_connection.unload_event. If not, the last thing we can do is
722 * read message pages for all CPUs directly.
725 if (completion_done(&vmbus_connection
.unload_event
))
728 for_each_online_cpu(cpu
) {
729 page_addr
= hv_context
.synic_message_page
[cpu
];
730 msg
= (struct hv_message
*)page_addr
+
733 message_type
= READ_ONCE(msg
->header
.message_type
);
734 if (message_type
== HVMSG_NONE
)
737 hdr
= (struct vmbus_channel_message_header
*)
740 if (hdr
->msgtype
== CHANNELMSG_UNLOAD_RESPONSE
)
741 complete(&vmbus_connection
.unload_event
);
743 vmbus_signal_eom(msg
, message_type
);
750 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
751 * maybe-pending messages on all CPUs to be able to receive new
752 * messages after we reconnect.
754 for_each_online_cpu(cpu
) {
755 page_addr
= hv_context
.synic_message_page
[cpu
];
756 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
757 msg
->header
.message_type
= HVMSG_NONE
;
762 * vmbus_unload_response - Handler for the unload response.
764 static void vmbus_unload_response(struct vmbus_channel_message_header
*hdr
)
767 * This is a global event; just wakeup the waiting thread.
768 * Once we successfully unload, we can cleanup the monitor state.
770 complete(&vmbus_connection
.unload_event
);
773 void vmbus_initiate_unload(bool crash
)
775 struct vmbus_channel_message_header hdr
;
777 /* Pre-Win2012R2 hosts don't support reconnect */
778 if (vmbus_proto_version
< VERSION_WIN8_1
)
781 init_completion(&vmbus_connection
.unload_event
);
782 memset(&hdr
, 0, sizeof(struct vmbus_channel_message_header
));
783 hdr
.msgtype
= CHANNELMSG_UNLOAD
;
784 vmbus_post_msg(&hdr
, sizeof(struct vmbus_channel_message_header
),
788 * vmbus_initiate_unload() is also called on crash and the crash can be
789 * happening in an interrupt context, where scheduling is impossible.
792 wait_for_completion(&vmbus_connection
.unload_event
);
794 vmbus_wait_for_unload();
798 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
801 static void vmbus_onoffer(struct vmbus_channel_message_header
*hdr
)
803 struct vmbus_channel_offer_channel
*offer
;
804 struct vmbus_channel
*newchannel
;
806 offer
= (struct vmbus_channel_offer_channel
*)hdr
;
808 /* Allocate the channel object and save this offer. */
809 newchannel
= alloc_channel();
811 vmbus_release_relid(offer
->child_relid
);
812 pr_err("Unable to allocate channel object\n");
817 * By default we setup state to enable batched
818 * reading. A specific service can choose to
819 * disable this prior to opening the channel.
821 newchannel
->batched_reading
= true;
824 * Setup state for signalling the host.
826 newchannel
->sig_event
= (struct hv_input_signal_event
*)
827 (ALIGN((unsigned long)
828 &newchannel
->sig_buf
,
829 HV_HYPERCALL_PARAM_ALIGN
));
831 newchannel
->sig_event
->connectionid
.asu32
= 0;
832 newchannel
->sig_event
->connectionid
.u
.id
= VMBUS_EVENT_CONNECTION_ID
;
833 newchannel
->sig_event
->flag_number
= 0;
834 newchannel
->sig_event
->rsvdz
= 0;
836 if (vmbus_proto_version
!= VERSION_WS2008
) {
837 newchannel
->is_dedicated_interrupt
=
838 (offer
->is_dedicated_interrupt
!= 0);
839 newchannel
->sig_event
->connectionid
.u
.id
=
840 offer
->connection_id
;
843 memcpy(&newchannel
->offermsg
, offer
,
844 sizeof(struct vmbus_channel_offer_channel
));
845 newchannel
->monitor_grp
= (u8
)offer
->monitorid
/ 32;
846 newchannel
->monitor_bit
= (u8
)offer
->monitorid
% 32;
848 vmbus_process_offer(newchannel
);
852 * vmbus_onoffer_rescind - Rescind offer handler.
854 * We queue a work item to process this offer synchronously
856 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header
*hdr
)
858 struct vmbus_channel_rescind_offer
*rescind
;
859 struct vmbus_channel
*channel
;
863 rescind
= (struct vmbus_channel_rescind_offer
*)hdr
;
865 mutex_lock(&vmbus_connection
.channel_mutex
);
866 channel
= relid2channel(rescind
->child_relid
);
868 if (channel
== NULL
) {
870 * This is very impossible, because in
871 * vmbus_process_offer(), we have already invoked
872 * vmbus_release_relid() on error.
877 spin_lock_irqsave(&channel
->lock
, flags
);
878 channel
->rescind
= true;
879 spin_unlock_irqrestore(&channel
->lock
, flags
);
881 vmbus_rescind_cleanup(channel
);
883 if (channel
->device_obj
) {
884 if (channel
->chn_rescind_callback
) {
885 channel
->chn_rescind_callback(channel
);
889 * We will have to unregister this device from the
892 dev
= get_device(&channel
->device_obj
->device
);
894 vmbus_device_unregister(channel
->device_obj
);
898 hv_process_channel_removal(channel
,
899 channel
->offermsg
.child_relid
);
903 mutex_unlock(&vmbus_connection
.channel_mutex
);
906 void vmbus_hvsock_device_unregister(struct vmbus_channel
*channel
)
908 mutex_lock(&vmbus_connection
.channel_mutex
);
910 BUG_ON(!is_hvsock_channel(channel
));
912 channel
->rescind
= true;
913 vmbus_device_unregister(channel
->device_obj
);
915 mutex_unlock(&vmbus_connection
.channel_mutex
);
917 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister
);
921 * vmbus_onoffers_delivered -
922 * This is invoked when all offers have been delivered.
924 * Nothing to do here.
926 static void vmbus_onoffers_delivered(
927 struct vmbus_channel_message_header
*hdr
)
932 * vmbus_onopen_result - Open result handler.
934 * This is invoked when we received a response to our channel open request.
935 * Find the matching request, copy the response and signal the requesting
938 static void vmbus_onopen_result(struct vmbus_channel_message_header
*hdr
)
940 struct vmbus_channel_open_result
*result
;
941 struct vmbus_channel_msginfo
*msginfo
;
942 struct vmbus_channel_message_header
*requestheader
;
943 struct vmbus_channel_open_channel
*openmsg
;
946 result
= (struct vmbus_channel_open_result
*)hdr
;
949 * Find the open msg, copy the result and signal/unblock the wait event
951 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
953 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
956 (struct vmbus_channel_message_header
*)msginfo
->msg
;
958 if (requestheader
->msgtype
== CHANNELMSG_OPENCHANNEL
) {
960 (struct vmbus_channel_open_channel
*)msginfo
->msg
;
961 if (openmsg
->child_relid
== result
->child_relid
&&
962 openmsg
->openid
== result
->openid
) {
963 memcpy(&msginfo
->response
.open_result
,
966 struct vmbus_channel_open_result
));
967 complete(&msginfo
->waitevent
);
972 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
976 * vmbus_ongpadl_created - GPADL created handler.
978 * This is invoked when we received a response to our gpadl create request.
979 * Find the matching request, copy the response and signal the requesting
982 static void vmbus_ongpadl_created(struct vmbus_channel_message_header
*hdr
)
984 struct vmbus_channel_gpadl_created
*gpadlcreated
;
985 struct vmbus_channel_msginfo
*msginfo
;
986 struct vmbus_channel_message_header
*requestheader
;
987 struct vmbus_channel_gpadl_header
*gpadlheader
;
990 gpadlcreated
= (struct vmbus_channel_gpadl_created
*)hdr
;
993 * Find the establish msg, copy the result and signal/unblock the wait
996 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
998 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1001 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1003 if (requestheader
->msgtype
== CHANNELMSG_GPADL_HEADER
) {
1005 (struct vmbus_channel_gpadl_header
*)requestheader
;
1007 if ((gpadlcreated
->child_relid
==
1008 gpadlheader
->child_relid
) &&
1009 (gpadlcreated
->gpadl
== gpadlheader
->gpadl
)) {
1010 memcpy(&msginfo
->response
.gpadl_created
,
1013 struct vmbus_channel_gpadl_created
));
1014 complete(&msginfo
->waitevent
);
1019 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1023 * vmbus_ongpadl_torndown - GPADL torndown handler.
1025 * This is invoked when we received a response to our gpadl teardown request.
1026 * Find the matching request, copy the response and signal the requesting
1029 static void vmbus_ongpadl_torndown(
1030 struct vmbus_channel_message_header
*hdr
)
1032 struct vmbus_channel_gpadl_torndown
*gpadl_torndown
;
1033 struct vmbus_channel_msginfo
*msginfo
;
1034 struct vmbus_channel_message_header
*requestheader
;
1035 struct vmbus_channel_gpadl_teardown
*gpadl_teardown
;
1036 unsigned long flags
;
1038 gpadl_torndown
= (struct vmbus_channel_gpadl_torndown
*)hdr
;
1041 * Find the open msg, copy the result and signal/unblock the wait event
1043 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1045 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1048 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1050 if (requestheader
->msgtype
== CHANNELMSG_GPADL_TEARDOWN
) {
1052 (struct vmbus_channel_gpadl_teardown
*)requestheader
;
1054 if (gpadl_torndown
->gpadl
== gpadl_teardown
->gpadl
) {
1055 memcpy(&msginfo
->response
.gpadl_torndown
,
1058 struct vmbus_channel_gpadl_torndown
));
1059 complete(&msginfo
->waitevent
);
1064 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1068 * vmbus_onversion_response - Version response handler
1070 * This is invoked when we received a response to our initiate contact request.
1071 * Find the matching request, copy the response and signal the requesting
1074 static void vmbus_onversion_response(
1075 struct vmbus_channel_message_header
*hdr
)
1077 struct vmbus_channel_msginfo
*msginfo
;
1078 struct vmbus_channel_message_header
*requestheader
;
1079 struct vmbus_channel_version_response
*version_response
;
1080 unsigned long flags
;
1082 version_response
= (struct vmbus_channel_version_response
*)hdr
;
1083 spin_lock_irqsave(&vmbus_connection
.channelmsg_lock
, flags
);
1085 list_for_each_entry(msginfo
, &vmbus_connection
.chn_msg_list
,
1088 (struct vmbus_channel_message_header
*)msginfo
->msg
;
1090 if (requestheader
->msgtype
==
1091 CHANNELMSG_INITIATE_CONTACT
) {
1092 memcpy(&msginfo
->response
.version_response
,
1094 sizeof(struct vmbus_channel_version_response
));
1095 complete(&msginfo
->waitevent
);
1098 spin_unlock_irqrestore(&vmbus_connection
.channelmsg_lock
, flags
);
1101 /* Channel message dispatch table */
1102 struct vmbus_channel_message_table_entry
1103 channel_message_table
[CHANNELMSG_COUNT
] = {
1104 {CHANNELMSG_INVALID
, 0, NULL
},
1105 {CHANNELMSG_OFFERCHANNEL
, 0, vmbus_onoffer
},
1106 {CHANNELMSG_RESCIND_CHANNELOFFER
, 0, vmbus_onoffer_rescind
},
1107 {CHANNELMSG_REQUESTOFFERS
, 0, NULL
},
1108 {CHANNELMSG_ALLOFFERS_DELIVERED
, 1, vmbus_onoffers_delivered
},
1109 {CHANNELMSG_OPENCHANNEL
, 0, NULL
},
1110 {CHANNELMSG_OPENCHANNEL_RESULT
, 1, vmbus_onopen_result
},
1111 {CHANNELMSG_CLOSECHANNEL
, 0, NULL
},
1112 {CHANNELMSG_GPADL_HEADER
, 0, NULL
},
1113 {CHANNELMSG_GPADL_BODY
, 0, NULL
},
1114 {CHANNELMSG_GPADL_CREATED
, 1, vmbus_ongpadl_created
},
1115 {CHANNELMSG_GPADL_TEARDOWN
, 0, NULL
},
1116 {CHANNELMSG_GPADL_TORNDOWN
, 1, vmbus_ongpadl_torndown
},
1117 {CHANNELMSG_RELID_RELEASED
, 0, NULL
},
1118 {CHANNELMSG_INITIATE_CONTACT
, 0, NULL
},
1119 {CHANNELMSG_VERSION_RESPONSE
, 1, vmbus_onversion_response
},
1120 {CHANNELMSG_UNLOAD
, 0, NULL
},
1121 {CHANNELMSG_UNLOAD_RESPONSE
, 1, vmbus_unload_response
},
1122 {CHANNELMSG_18
, 0, NULL
},
1123 {CHANNELMSG_19
, 0, NULL
},
1124 {CHANNELMSG_20
, 0, NULL
},
1125 {CHANNELMSG_TL_CONNECT_REQUEST
, 0, NULL
},
1129 * vmbus_onmessage - Handler for channel protocol messages.
1131 * This is invoked in the vmbus worker thread context.
1133 void vmbus_onmessage(void *context
)
1135 struct hv_message
*msg
= context
;
1136 struct vmbus_channel_message_header
*hdr
;
1139 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
1140 size
= msg
->header
.payload_size
;
1142 if (hdr
->msgtype
>= CHANNELMSG_COUNT
) {
1143 pr_err("Received invalid channel message type %d size %d\n",
1144 hdr
->msgtype
, size
);
1145 print_hex_dump_bytes("", DUMP_PREFIX_NONE
,
1146 (unsigned char *)msg
->u
.payload
, size
);
1150 if (channel_message_table
[hdr
->msgtype
].message_handler
)
1151 channel_message_table
[hdr
->msgtype
].message_handler(hdr
);
1153 pr_err("Unhandled channel message type %d\n", hdr
->msgtype
);
1157 * vmbus_request_offers - Send a request to get all our pending offers.
1159 int vmbus_request_offers(void)
1161 struct vmbus_channel_message_header
*msg
;
1162 struct vmbus_channel_msginfo
*msginfo
;
1165 msginfo
= kmalloc(sizeof(*msginfo
) +
1166 sizeof(struct vmbus_channel_message_header
),
1171 msg
= (struct vmbus_channel_message_header
*)msginfo
->msg
;
1173 msg
->msgtype
= CHANNELMSG_REQUESTOFFERS
;
1176 ret
= vmbus_post_msg(msg
, sizeof(struct vmbus_channel_message_header
),
1179 pr_err("Unable to request offers - %d\n", ret
);
1191 * Retrieve the (sub) channel on which to send an outgoing request.
1192 * When a primary channel has multiple sub-channels, we try to
1193 * distribute the load equally amongst all available channels.
1195 struct vmbus_channel
*vmbus_get_outgoing_channel(struct vmbus_channel
*primary
)
1197 struct list_head
*cur
, *tmp
;
1199 struct vmbus_channel
*cur_channel
;
1200 struct vmbus_channel
*outgoing_channel
= primary
;
1204 if (list_empty(&primary
->sc_list
))
1205 return outgoing_channel
;
1207 next_channel
= primary
->next_oc
++;
1209 if (next_channel
> (primary
->num_sc
)) {
1210 primary
->next_oc
= 0;
1211 return outgoing_channel
;
1214 cur_cpu
= hv_context
.vp_index
[get_cpu()];
1216 list_for_each_safe(cur
, tmp
, &primary
->sc_list
) {
1217 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
1218 if (cur_channel
->state
!= CHANNEL_OPENED_STATE
)
1221 if (cur_channel
->target_vp
== cur_cpu
)
1224 if (i
== next_channel
)
1230 return outgoing_channel
;
1232 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel
);
1234 static void invoke_sc_cb(struct vmbus_channel
*primary_channel
)
1236 struct list_head
*cur
, *tmp
;
1237 struct vmbus_channel
*cur_channel
;
1239 if (primary_channel
->sc_creation_callback
== NULL
)
1242 list_for_each_safe(cur
, tmp
, &primary_channel
->sc_list
) {
1243 cur_channel
= list_entry(cur
, struct vmbus_channel
, sc_list
);
1245 primary_channel
->sc_creation_callback(cur_channel
);
1249 void vmbus_set_sc_create_callback(struct vmbus_channel
*primary_channel
,
1250 void (*sc_cr_cb
)(struct vmbus_channel
*new_sc
))
1252 primary_channel
->sc_creation_callback
= sc_cr_cb
;
1254 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback
);
1256 bool vmbus_are_subchannels_present(struct vmbus_channel
*primary
)
1260 ret
= !list_empty(&primary
->sc_list
);
1264 * Invoke the callback on sub-channel creation.
1265 * This will present a uniform interface to the
1268 invoke_sc_cb(primary
);
1273 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present
);
1275 void vmbus_set_chn_rescind_callback(struct vmbus_channel
*channel
,
1276 void (*chn_rescind_cb
)(struct vmbus_channel
*))
1278 channel
->chn_rescind_callback
= chn_rescind_cb
;
1280 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback
);