]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/hv/channel_mgmt.c
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / drivers / hv / channel_mgmt.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/completion.h>
31 #include <linux/hyperv.h>
32
33 #include "hyperv_vmbus.h"
34
35 static void init_vp_index(struct vmbus_channel *channel,
36 const uuid_le *type_guid);
37
38 /**
39 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
40 * @icmsghdrp: Pointer to msg header structure
41 * @icmsg_negotiate: Pointer to negotiate message structure
42 * @buf: Raw buffer channel data
43 *
44 * @icmsghdrp is of type &struct icmsg_hdr.
45 * @negop is of type &struct icmsg_negotiate.
46 * Set up and fill in default negotiate response message.
47 *
48 * The fw_version specifies the framework version that
49 * we can support and srv_version specifies the service
50 * version we can support.
51 *
52 * Mainly used by Hyper-V drivers.
53 */
54 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
55 struct icmsg_negotiate *negop, u8 *buf,
56 int fw_version, int srv_version)
57 {
58 int icframe_major, icframe_minor;
59 int icmsg_major, icmsg_minor;
60 int fw_major, fw_minor;
61 int srv_major, srv_minor;
62 int i;
63 bool found_match = false;
64
65 icmsghdrp->icmsgsize = 0x10;
66 fw_major = (fw_version >> 16);
67 fw_minor = (fw_version & 0xFFFF);
68
69 srv_major = (srv_version >> 16);
70 srv_minor = (srv_version & 0xFFFF);
71
72 negop = (struct icmsg_negotiate *)&buf[
73 sizeof(struct vmbuspipe_hdr) +
74 sizeof(struct icmsg_hdr)];
75
76 icframe_major = negop->icframe_vercnt;
77 icframe_minor = 0;
78
79 icmsg_major = negop->icmsg_vercnt;
80 icmsg_minor = 0;
81
82 /*
83 * Select the framework version number we will
84 * support.
85 */
86
87 for (i = 0; i < negop->icframe_vercnt; i++) {
88 if ((negop->icversion_data[i].major == fw_major) &&
89 (negop->icversion_data[i].minor == fw_minor)) {
90 icframe_major = negop->icversion_data[i].major;
91 icframe_minor = negop->icversion_data[i].minor;
92 found_match = true;
93 }
94 }
95
96 if (!found_match)
97 goto fw_error;
98
99 found_match = false;
100
101 for (i = negop->icframe_vercnt;
102 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
103 if ((negop->icversion_data[i].major == srv_major) &&
104 (negop->icversion_data[i].minor == srv_minor)) {
105 icmsg_major = negop->icversion_data[i].major;
106 icmsg_minor = negop->icversion_data[i].minor;
107 found_match = true;
108 }
109 }
110
111 /*
112 * Respond with the framework and service
113 * version numbers we can support.
114 */
115
116 fw_error:
117 if (!found_match) {
118 negop->icframe_vercnt = 0;
119 negop->icmsg_vercnt = 0;
120 } else {
121 negop->icframe_vercnt = 1;
122 negop->icmsg_vercnt = 1;
123 }
124
125 negop->icversion_data[0].major = icframe_major;
126 negop->icversion_data[0].minor = icframe_minor;
127 negop->icversion_data[1].major = icmsg_major;
128 negop->icversion_data[1].minor = icmsg_minor;
129 return found_match;
130 }
131
132 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
133
134 /*
135 * alloc_channel - Allocate and initialize a vmbus channel object
136 */
137 static struct vmbus_channel *alloc_channel(void)
138 {
139 static atomic_t chan_num = ATOMIC_INIT(0);
140 struct vmbus_channel *channel;
141
142 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
143 if (!channel)
144 return NULL;
145
146 channel->id = atomic_inc_return(&chan_num);
147 spin_lock_init(&channel->inbound_lock);
148 spin_lock_init(&channel->lock);
149
150 INIT_LIST_HEAD(&channel->sc_list);
151 INIT_LIST_HEAD(&channel->percpu_list);
152
153 return channel;
154 }
155
156 /*
157 * free_channel - Release the resources used by the vmbus channel object
158 */
159 static void free_channel(struct vmbus_channel *channel)
160 {
161 kfree(channel);
162 }
163
164 static void percpu_channel_enq(void *arg)
165 {
166 struct vmbus_channel *channel = arg;
167 int cpu = smp_processor_id();
168
169 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
170 }
171
172 static void percpu_channel_deq(void *arg)
173 {
174 struct vmbus_channel *channel = arg;
175
176 list_del(&channel->percpu_list);
177 }
178
179
180 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
181 {
182 struct vmbus_channel_relid_released msg;
183 unsigned long flags;
184 struct vmbus_channel *primary_channel;
185
186 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
187 msg.child_relid = relid;
188 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
189 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
190
191 if (channel == NULL)
192 return;
193
194 if (channel->target_cpu != get_cpu()) {
195 put_cpu();
196 smp_call_function_single(channel->target_cpu,
197 percpu_channel_deq, channel, true);
198 } else {
199 percpu_channel_deq(channel);
200 put_cpu();
201 }
202
203 if (channel->primary_channel == NULL) {
204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
205 list_del(&channel->listentry);
206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
207 } else {
208 primary_channel = channel->primary_channel;
209 spin_lock_irqsave(&primary_channel->lock, flags);
210 list_del(&channel->sc_list);
211 primary_channel->num_sc--;
212 spin_unlock_irqrestore(&primary_channel->lock, flags);
213 }
214 free_channel(channel);
215 }
216
217 void vmbus_free_channels(void)
218 {
219 struct vmbus_channel *channel, *tmp;
220
221 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
222 listentry) {
223 /* if we don't set rescind to true, vmbus_close_internal()
224 * won't invoke hv_process_channel_removal().
225 */
226 channel->rescind = true;
227
228 vmbus_device_unregister(channel->device_obj);
229 }
230 }
231
232 /*
233 * vmbus_process_offer - Process the offer by creating a channel/device
234 * associated with this offer
235 */
236 static void vmbus_process_offer(struct vmbus_channel *newchannel)
237 {
238 struct vmbus_channel *channel;
239 bool fnew = true;
240 unsigned long flags;
241
242 /* Make sure this is a new offer */
243 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
244
245 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
246 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
247 newchannel->offermsg.offer.if_type) &&
248 !uuid_le_cmp(channel->offermsg.offer.if_instance,
249 newchannel->offermsg.offer.if_instance)) {
250 fnew = false;
251 break;
252 }
253 }
254
255 if (fnew)
256 list_add_tail(&newchannel->listentry,
257 &vmbus_connection.chn_list);
258
259 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
260
261 if (!fnew) {
262 /*
263 * Check to see if this is a sub-channel.
264 */
265 if (newchannel->offermsg.offer.sub_channel_index != 0) {
266 /*
267 * Process the sub-channel.
268 */
269 newchannel->primary_channel = channel;
270 spin_lock_irqsave(&channel->lock, flags);
271 list_add_tail(&newchannel->sc_list, &channel->sc_list);
272 channel->num_sc++;
273 spin_unlock_irqrestore(&channel->lock, flags);
274 } else
275 goto err_free_chan;
276 }
277
278 init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
279
280 if (newchannel->target_cpu != get_cpu()) {
281 put_cpu();
282 smp_call_function_single(newchannel->target_cpu,
283 percpu_channel_enq,
284 newchannel, true);
285 } else {
286 percpu_channel_enq(newchannel);
287 put_cpu();
288 }
289
290 /*
291 * This state is used to indicate a successful open
292 * so that when we do close the channel normally, we
293 * can cleanup properly
294 */
295 newchannel->state = CHANNEL_OPEN_STATE;
296
297 if (!fnew) {
298 if (channel->sc_creation_callback != NULL)
299 channel->sc_creation_callback(newchannel);
300 return;
301 }
302
303 /*
304 * Start the process of binding this offer to the driver
305 * We need to set the DeviceObject field before calling
306 * vmbus_child_dev_add()
307 */
308 newchannel->device_obj = vmbus_device_create(
309 &newchannel->offermsg.offer.if_type,
310 &newchannel->offermsg.offer.if_instance,
311 newchannel);
312 if (!newchannel->device_obj)
313 goto err_deq_chan;
314
315 /*
316 * Add the new device to the bus. This will kick off device-driver
317 * binding which eventually invokes the device driver's AddDevice()
318 * method.
319 */
320 if (vmbus_device_register(newchannel->device_obj) != 0) {
321 pr_err("unable to add child device object (relid %d)\n",
322 newchannel->offermsg.child_relid);
323 kfree(newchannel->device_obj);
324 goto err_deq_chan;
325 }
326 return;
327
328 err_deq_chan:
329 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
330 list_del(&newchannel->listentry);
331 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
332
333 if (newchannel->target_cpu != get_cpu()) {
334 put_cpu();
335 smp_call_function_single(newchannel->target_cpu,
336 percpu_channel_deq, newchannel, true);
337 } else {
338 percpu_channel_deq(newchannel);
339 put_cpu();
340 }
341
342 err_free_chan:
343 free_channel(newchannel);
344 }
345
346 enum {
347 IDE = 0,
348 SCSI,
349 NIC,
350 ND_NIC,
351 MAX_PERF_CHN,
352 };
353
354 /*
355 * This is an array of device_ids (device types) that are performance critical.
356 * We attempt to distribute the interrupt load for these devices across
357 * all available CPUs.
358 */
359 static const struct hv_vmbus_device_id hp_devs[] = {
360 /* IDE */
361 { HV_IDE_GUID, },
362 /* Storage - SCSI */
363 { HV_SCSI_GUID, },
364 /* Network */
365 { HV_NIC_GUID, },
366 /* NetworkDirect Guest RDMA */
367 { HV_ND_GUID, },
368 };
369
370
371 /*
372 * We use this state to statically distribute the channel interrupt load.
373 */
374 static int next_numa_node_id;
375
376 /*
377 * Starting with Win8, we can statically distribute the incoming
378 * channel interrupt load by binding a channel to VCPU.
379 * We do this in a hierarchical fashion:
380 * First distribute the primary channels across available NUMA nodes
381 * and then distribute the subchannels amongst the CPUs in the NUMA
382 * node assigned to the primary channel.
383 *
384 * For pre-win8 hosts or non-performance critical channels we assign the
385 * first CPU in the first NUMA node.
386 */
387 static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
388 {
389 u32 cur_cpu;
390 int i;
391 bool perf_chn = false;
392 struct vmbus_channel *primary = channel->primary_channel;
393 int next_node;
394 struct cpumask available_mask;
395 struct cpumask *alloced_mask;
396
397 for (i = IDE; i < MAX_PERF_CHN; i++) {
398 if (!memcmp(type_guid->b, hp_devs[i].guid,
399 sizeof(uuid_le))) {
400 perf_chn = true;
401 break;
402 }
403 }
404 if ((vmbus_proto_version == VERSION_WS2008) ||
405 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
406 /*
407 * Prior to win8, all channel interrupts are
408 * delivered on cpu 0.
409 * Also if the channel is not a performance critical
410 * channel, bind it to cpu 0.
411 */
412 channel->numa_node = 0;
413 channel->target_cpu = 0;
414 channel->target_vp = hv_context.vp_index[0];
415 return;
416 }
417
418 /*
419 * We distribute primary channels evenly across all the available
420 * NUMA nodes and within the assigned NUMA node we will assign the
421 * first available CPU to the primary channel.
422 * The sub-channels will be assigned to the CPUs available in the
423 * NUMA node evenly.
424 */
425 if (!primary) {
426 while (true) {
427 next_node = next_numa_node_id++;
428 if (next_node == nr_node_ids)
429 next_node = next_numa_node_id = 0;
430 if (cpumask_empty(cpumask_of_node(next_node)))
431 continue;
432 break;
433 }
434 channel->numa_node = next_node;
435 primary = channel;
436 }
437 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
438
439 if (cpumask_weight(alloced_mask) ==
440 cpumask_weight(cpumask_of_node(primary->numa_node))) {
441 /*
442 * We have cycled through all the CPUs in the node;
443 * reset the alloced map.
444 */
445 cpumask_clear(alloced_mask);
446 }
447
448 cpumask_xor(&available_mask, alloced_mask,
449 cpumask_of_node(primary->numa_node));
450
451 cur_cpu = -1;
452 while (true) {
453 cur_cpu = cpumask_next(cur_cpu, &available_mask);
454 if (cur_cpu >= nr_cpu_ids) {
455 cur_cpu = -1;
456 cpumask_copy(&available_mask,
457 cpumask_of_node(primary->numa_node));
458 continue;
459 }
460
461 if (!cpumask_test_cpu(cur_cpu,
462 &primary->alloced_cpus_in_node)) {
463 cpumask_set_cpu(cur_cpu,
464 &primary->alloced_cpus_in_node);
465 cpumask_set_cpu(cur_cpu, alloced_mask);
466 break;
467 }
468 }
469
470 channel->target_cpu = cur_cpu;
471 channel->target_vp = hv_context.vp_index[cur_cpu];
472 }
473
474 /*
475 * vmbus_unload_response - Handler for the unload response.
476 */
477 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
478 {
479 /*
480 * This is a global event; just wakeup the waiting thread.
481 * Once we successfully unload, we can cleanup the monitor state.
482 */
483 complete(&vmbus_connection.unload_event);
484 }
485
486 void vmbus_initiate_unload(void)
487 {
488 struct vmbus_channel_message_header hdr;
489
490 /* Pre-Win2012R2 hosts don't support reconnect */
491 if (vmbus_proto_version < VERSION_WIN8_1)
492 return;
493
494 init_completion(&vmbus_connection.unload_event);
495 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
496 hdr.msgtype = CHANNELMSG_UNLOAD;
497 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
498
499 wait_for_completion(&vmbus_connection.unload_event);
500 }
501
502 /*
503 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
504 *
505 */
506 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
507 {
508 struct vmbus_channel_offer_channel *offer;
509 struct vmbus_channel *newchannel;
510
511 offer = (struct vmbus_channel_offer_channel *)hdr;
512
513 /* Allocate the channel object and save this offer. */
514 newchannel = alloc_channel();
515 if (!newchannel) {
516 pr_err("Unable to allocate channel object\n");
517 return;
518 }
519
520 /*
521 * By default we setup state to enable batched
522 * reading. A specific service can choose to
523 * disable this prior to opening the channel.
524 */
525 newchannel->batched_reading = true;
526
527 /*
528 * Setup state for signalling the host.
529 */
530 newchannel->sig_event = (struct hv_input_signal_event *)
531 (ALIGN((unsigned long)
532 &newchannel->sig_buf,
533 HV_HYPERCALL_PARAM_ALIGN));
534
535 newchannel->sig_event->connectionid.asu32 = 0;
536 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
537 newchannel->sig_event->flag_number = 0;
538 newchannel->sig_event->rsvdz = 0;
539
540 if (vmbus_proto_version != VERSION_WS2008) {
541 newchannel->is_dedicated_interrupt =
542 (offer->is_dedicated_interrupt != 0);
543 newchannel->sig_event->connectionid.u.id =
544 offer->connection_id;
545 }
546
547 memcpy(&newchannel->offermsg, offer,
548 sizeof(struct vmbus_channel_offer_channel));
549 newchannel->monitor_grp = (u8)offer->monitorid / 32;
550 newchannel->monitor_bit = (u8)offer->monitorid % 32;
551
552 vmbus_process_offer(newchannel);
553 }
554
555 /*
556 * vmbus_onoffer_rescind - Rescind offer handler.
557 *
558 * We queue a work item to process this offer synchronously
559 */
560 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
561 {
562 struct vmbus_channel_rescind_offer *rescind;
563 struct vmbus_channel *channel;
564 unsigned long flags;
565 struct device *dev;
566
567 rescind = (struct vmbus_channel_rescind_offer *)hdr;
568 channel = relid2channel(rescind->child_relid);
569
570 if (channel == NULL) {
571 hv_process_channel_removal(NULL, rescind->child_relid);
572 return;
573 }
574
575 spin_lock_irqsave(&channel->lock, flags);
576 channel->rescind = true;
577 spin_unlock_irqrestore(&channel->lock, flags);
578
579 if (channel->device_obj) {
580 /*
581 * We will have to unregister this device from the
582 * driver core.
583 */
584 dev = get_device(&channel->device_obj->device);
585 if (dev) {
586 vmbus_device_unregister(channel->device_obj);
587 put_device(dev);
588 }
589 } else {
590 hv_process_channel_removal(channel,
591 channel->offermsg.child_relid);
592 }
593 }
594
595 /*
596 * vmbus_onoffers_delivered -
597 * This is invoked when all offers have been delivered.
598 *
599 * Nothing to do here.
600 */
601 static void vmbus_onoffers_delivered(
602 struct vmbus_channel_message_header *hdr)
603 {
604 }
605
606 /*
607 * vmbus_onopen_result - Open result handler.
608 *
609 * This is invoked when we received a response to our channel open request.
610 * Find the matching request, copy the response and signal the requesting
611 * thread.
612 */
613 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
614 {
615 struct vmbus_channel_open_result *result;
616 struct vmbus_channel_msginfo *msginfo;
617 struct vmbus_channel_message_header *requestheader;
618 struct vmbus_channel_open_channel *openmsg;
619 unsigned long flags;
620
621 result = (struct vmbus_channel_open_result *)hdr;
622
623 /*
624 * Find the open msg, copy the result and signal/unblock the wait event
625 */
626 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
627
628 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
629 msglistentry) {
630 requestheader =
631 (struct vmbus_channel_message_header *)msginfo->msg;
632
633 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
634 openmsg =
635 (struct vmbus_channel_open_channel *)msginfo->msg;
636 if (openmsg->child_relid == result->child_relid &&
637 openmsg->openid == result->openid) {
638 memcpy(&msginfo->response.open_result,
639 result,
640 sizeof(
641 struct vmbus_channel_open_result));
642 complete(&msginfo->waitevent);
643 break;
644 }
645 }
646 }
647 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
648 }
649
650 /*
651 * vmbus_ongpadl_created - GPADL created handler.
652 *
653 * This is invoked when we received a response to our gpadl create request.
654 * Find the matching request, copy the response and signal the requesting
655 * thread.
656 */
657 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
658 {
659 struct vmbus_channel_gpadl_created *gpadlcreated;
660 struct vmbus_channel_msginfo *msginfo;
661 struct vmbus_channel_message_header *requestheader;
662 struct vmbus_channel_gpadl_header *gpadlheader;
663 unsigned long flags;
664
665 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
666
667 /*
668 * Find the establish msg, copy the result and signal/unblock the wait
669 * event
670 */
671 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
672
673 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
674 msglistentry) {
675 requestheader =
676 (struct vmbus_channel_message_header *)msginfo->msg;
677
678 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
679 gpadlheader =
680 (struct vmbus_channel_gpadl_header *)requestheader;
681
682 if ((gpadlcreated->child_relid ==
683 gpadlheader->child_relid) &&
684 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
685 memcpy(&msginfo->response.gpadl_created,
686 gpadlcreated,
687 sizeof(
688 struct vmbus_channel_gpadl_created));
689 complete(&msginfo->waitevent);
690 break;
691 }
692 }
693 }
694 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
695 }
696
697 /*
698 * vmbus_ongpadl_torndown - GPADL torndown handler.
699 *
700 * This is invoked when we received a response to our gpadl teardown request.
701 * Find the matching request, copy the response and signal the requesting
702 * thread.
703 */
704 static void vmbus_ongpadl_torndown(
705 struct vmbus_channel_message_header *hdr)
706 {
707 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
708 struct vmbus_channel_msginfo *msginfo;
709 struct vmbus_channel_message_header *requestheader;
710 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
711 unsigned long flags;
712
713 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
714
715 /*
716 * Find the open msg, copy the result and signal/unblock the wait event
717 */
718 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
719
720 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
721 msglistentry) {
722 requestheader =
723 (struct vmbus_channel_message_header *)msginfo->msg;
724
725 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
726 gpadl_teardown =
727 (struct vmbus_channel_gpadl_teardown *)requestheader;
728
729 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
730 memcpy(&msginfo->response.gpadl_torndown,
731 gpadl_torndown,
732 sizeof(
733 struct vmbus_channel_gpadl_torndown));
734 complete(&msginfo->waitevent);
735 break;
736 }
737 }
738 }
739 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
740 }
741
742 /*
743 * vmbus_onversion_response - Version response handler
744 *
745 * This is invoked when we received a response to our initiate contact request.
746 * Find the matching request, copy the response and signal the requesting
747 * thread.
748 */
749 static void vmbus_onversion_response(
750 struct vmbus_channel_message_header *hdr)
751 {
752 struct vmbus_channel_msginfo *msginfo;
753 struct vmbus_channel_message_header *requestheader;
754 struct vmbus_channel_version_response *version_response;
755 unsigned long flags;
756
757 version_response = (struct vmbus_channel_version_response *)hdr;
758 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
759
760 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
761 msglistentry) {
762 requestheader =
763 (struct vmbus_channel_message_header *)msginfo->msg;
764
765 if (requestheader->msgtype ==
766 CHANNELMSG_INITIATE_CONTACT) {
767 memcpy(&msginfo->response.version_response,
768 version_response,
769 sizeof(struct vmbus_channel_version_response));
770 complete(&msginfo->waitevent);
771 }
772 }
773 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
774 }
775
776 /* Channel message dispatch table */
777 struct vmbus_channel_message_table_entry
778 channel_message_table[CHANNELMSG_COUNT] = {
779 {CHANNELMSG_INVALID, 0, NULL},
780 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
781 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
782 {CHANNELMSG_REQUESTOFFERS, 0, NULL},
783 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
784 {CHANNELMSG_OPENCHANNEL, 0, NULL},
785 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
786 {CHANNELMSG_CLOSECHANNEL, 0, NULL},
787 {CHANNELMSG_GPADL_HEADER, 0, NULL},
788 {CHANNELMSG_GPADL_BODY, 0, NULL},
789 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
790 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
791 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
792 {CHANNELMSG_RELID_RELEASED, 0, NULL},
793 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
794 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
795 {CHANNELMSG_UNLOAD, 0, NULL},
796 {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
797 };
798
799 /*
800 * vmbus_onmessage - Handler for channel protocol messages.
801 *
802 * This is invoked in the vmbus worker thread context.
803 */
804 void vmbus_onmessage(void *context)
805 {
806 struct hv_message *msg = context;
807 struct vmbus_channel_message_header *hdr;
808 int size;
809
810 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
811 size = msg->header.payload_size;
812
813 if (hdr->msgtype >= CHANNELMSG_COUNT) {
814 pr_err("Received invalid channel message type %d size %d\n",
815 hdr->msgtype, size);
816 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
817 (unsigned char *)msg->u.payload, size);
818 return;
819 }
820
821 if (channel_message_table[hdr->msgtype].message_handler)
822 channel_message_table[hdr->msgtype].message_handler(hdr);
823 else
824 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
825 }
826
827 /*
828 * vmbus_request_offers - Send a request to get all our pending offers.
829 */
830 int vmbus_request_offers(void)
831 {
832 struct vmbus_channel_message_header *msg;
833 struct vmbus_channel_msginfo *msginfo;
834 int ret;
835
836 msginfo = kmalloc(sizeof(*msginfo) +
837 sizeof(struct vmbus_channel_message_header),
838 GFP_KERNEL);
839 if (!msginfo)
840 return -ENOMEM;
841
842 msg = (struct vmbus_channel_message_header *)msginfo->msg;
843
844 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
845
846
847 ret = vmbus_post_msg(msg,
848 sizeof(struct vmbus_channel_message_header));
849 if (ret != 0) {
850 pr_err("Unable to request offers - %d\n", ret);
851
852 goto cleanup;
853 }
854
855 cleanup:
856 kfree(msginfo);
857
858 return ret;
859 }
860
861 /*
862 * Retrieve the (sub) channel on which to send an outgoing request.
863 * When a primary channel has multiple sub-channels, we try to
864 * distribute the load equally amongst all available channels.
865 */
866 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
867 {
868 struct list_head *cur, *tmp;
869 int cur_cpu;
870 struct vmbus_channel *cur_channel;
871 struct vmbus_channel *outgoing_channel = primary;
872 int next_channel;
873 int i = 1;
874
875 if (list_empty(&primary->sc_list))
876 return outgoing_channel;
877
878 next_channel = primary->next_oc++;
879
880 if (next_channel > (primary->num_sc)) {
881 primary->next_oc = 0;
882 return outgoing_channel;
883 }
884
885 cur_cpu = hv_context.vp_index[get_cpu()];
886 put_cpu();
887 list_for_each_safe(cur, tmp, &primary->sc_list) {
888 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
889 if (cur_channel->state != CHANNEL_OPENED_STATE)
890 continue;
891
892 if (cur_channel->target_vp == cur_cpu)
893 return cur_channel;
894
895 if (i == next_channel)
896 return cur_channel;
897
898 i++;
899 }
900
901 return outgoing_channel;
902 }
903 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
904
905 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
906 {
907 struct list_head *cur, *tmp;
908 struct vmbus_channel *cur_channel;
909
910 if (primary_channel->sc_creation_callback == NULL)
911 return;
912
913 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
914 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
915
916 primary_channel->sc_creation_callback(cur_channel);
917 }
918 }
919
920 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
921 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
922 {
923 primary_channel->sc_creation_callback = sc_cr_cb;
924 }
925 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
926
927 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
928 {
929 bool ret;
930
931 ret = !list_empty(&primary->sc_list);
932
933 if (ret) {
934 /*
935 * Invoke the callback on sub-channel creation.
936 * This will present a uniform interface to the
937 * clients.
938 */
939 invoke_sc_cb(primary);
940 }
941
942 return ret;
943 }
944 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);