1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
26 #include <linux/delay.h>
27 #include <linux/notifier.h>
28 #include <linux/ptrace.h>
29 #include <linux/screen_info.h>
30 #include <linux/kdebug.h>
31 #include <linux/efi.h>
32 #include <linux/random.h>
33 #include <linux/kernel.h>
34 #include <linux/syscore_ops.h>
35 #include <clocksource/hyperv_timer.h>
36 #include "hyperv_vmbus.h"
39 struct list_head node
;
40 struct hv_vmbus_device_id id
;
43 static struct acpi_device
*hv_acpi_dev
;
45 static struct completion probe_event
;
47 static int hyperv_cpuhp_online
;
49 static void *hv_panic_page
;
51 /* Values parsed from ACPI DSDT */
56 * Boolean to control whether to report panic messages over Hyper-V.
58 * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
60 static int sysctl_record_panic_msg
= 1;
62 static int hyperv_report_reg(void)
64 return !sysctl_record_panic_msg
|| !hv_panic_page
;
67 static int hyperv_panic_event(struct notifier_block
*nb
, unsigned long val
,
72 vmbus_initiate_unload(true);
75 * Hyper-V should be notified only once about a panic. If we will be
76 * doing hyperv_report_panic_msg() later with kmsg data, don't do
77 * the notification here.
79 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
80 && hyperv_report_reg()) {
81 regs
= current_pt_regs();
82 hyperv_report_panic(regs
, val
, false);
87 static int hyperv_die_event(struct notifier_block
*nb
, unsigned long val
,
90 struct die_args
*die
= args
;
91 struct pt_regs
*regs
= die
->regs
;
93 /* Don't notify Hyper-V if the die event is other than oops */
98 * Hyper-V should be notified only once about a panic. If we will be
99 * doing hyperv_report_panic_msg() later with kmsg data, don't do
100 * the notification here.
102 if (hyperv_report_reg())
103 hyperv_report_panic(regs
, val
, true);
107 static struct notifier_block hyperv_die_block
= {
108 .notifier_call
= hyperv_die_event
,
110 static struct notifier_block hyperv_panic_block
= {
111 .notifier_call
= hyperv_panic_event
,
114 static const char *fb_mmio_name
= "fb_range";
115 static struct resource
*fb_mmio
;
116 static struct resource
*hyperv_mmio
;
117 static DEFINE_MUTEX(hyperv_mmio_lock
);
119 static int vmbus_exists(void)
121 if (hv_acpi_dev
== NULL
)
127 static u8
channel_monitor_group(const struct vmbus_channel
*channel
)
129 return (u8
)channel
->offermsg
.monitorid
/ 32;
132 static u8
channel_monitor_offset(const struct vmbus_channel
*channel
)
134 return (u8
)channel
->offermsg
.monitorid
% 32;
137 static u32
channel_pending(const struct vmbus_channel
*channel
,
138 const struct hv_monitor_page
*monitor_page
)
140 u8 monitor_group
= channel_monitor_group(channel
);
142 return monitor_page
->trigger_group
[monitor_group
].pending
;
145 static u32
channel_latency(const struct vmbus_channel
*channel
,
146 const struct hv_monitor_page
*monitor_page
)
148 u8 monitor_group
= channel_monitor_group(channel
);
149 u8 monitor_offset
= channel_monitor_offset(channel
);
151 return monitor_page
->latency
[monitor_group
][monitor_offset
];
154 static u32
channel_conn_id(struct vmbus_channel
*channel
,
155 struct hv_monitor_page
*monitor_page
)
157 u8 monitor_group
= channel_monitor_group(channel
);
158 u8 monitor_offset
= channel_monitor_offset(channel
);
159 return monitor_page
->parameter
[monitor_group
][monitor_offset
].connectionid
.u
.id
;
162 static ssize_t
id_show(struct device
*dev
, struct device_attribute
*dev_attr
,
165 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
167 if (!hv_dev
->channel
)
169 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.child_relid
);
171 static DEVICE_ATTR_RO(id
);
173 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*dev_attr
,
176 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
178 if (!hv_dev
->channel
)
180 return sprintf(buf
, "%d\n", hv_dev
->channel
->state
);
182 static DEVICE_ATTR_RO(state
);
184 static ssize_t
monitor_id_show(struct device
*dev
,
185 struct device_attribute
*dev_attr
, char *buf
)
187 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
189 if (!hv_dev
->channel
)
191 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.monitorid
);
193 static DEVICE_ATTR_RO(monitor_id
);
195 static ssize_t
class_id_show(struct device
*dev
,
196 struct device_attribute
*dev_attr
, char *buf
)
198 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
200 if (!hv_dev
->channel
)
202 return sprintf(buf
, "{%pUl}\n",
203 &hv_dev
->channel
->offermsg
.offer
.if_type
);
205 static DEVICE_ATTR_RO(class_id
);
207 static ssize_t
device_id_show(struct device
*dev
,
208 struct device_attribute
*dev_attr
, char *buf
)
210 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
212 if (!hv_dev
->channel
)
214 return sprintf(buf
, "{%pUl}\n",
215 &hv_dev
->channel
->offermsg
.offer
.if_instance
);
217 static DEVICE_ATTR_RO(device_id
);
219 static ssize_t
modalias_show(struct device
*dev
,
220 struct device_attribute
*dev_attr
, char *buf
)
222 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
224 return sprintf(buf
, "vmbus:%*phN\n", UUID_SIZE
, &hv_dev
->dev_type
);
226 static DEVICE_ATTR_RO(modalias
);
229 static ssize_t
numa_node_show(struct device
*dev
,
230 struct device_attribute
*attr
, char *buf
)
232 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
234 if (!hv_dev
->channel
)
237 return sprintf(buf
, "%d\n", cpu_to_node(hv_dev
->channel
->target_cpu
));
239 static DEVICE_ATTR_RO(numa_node
);
242 static ssize_t
server_monitor_pending_show(struct device
*dev
,
243 struct device_attribute
*dev_attr
,
246 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
248 if (!hv_dev
->channel
)
250 return sprintf(buf
, "%d\n",
251 channel_pending(hv_dev
->channel
,
252 vmbus_connection
.monitor_pages
[0]));
254 static DEVICE_ATTR_RO(server_monitor_pending
);
256 static ssize_t
client_monitor_pending_show(struct device
*dev
,
257 struct device_attribute
*dev_attr
,
260 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
262 if (!hv_dev
->channel
)
264 return sprintf(buf
, "%d\n",
265 channel_pending(hv_dev
->channel
,
266 vmbus_connection
.monitor_pages
[1]));
268 static DEVICE_ATTR_RO(client_monitor_pending
);
270 static ssize_t
server_monitor_latency_show(struct device
*dev
,
271 struct device_attribute
*dev_attr
,
274 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
276 if (!hv_dev
->channel
)
278 return sprintf(buf
, "%d\n",
279 channel_latency(hv_dev
->channel
,
280 vmbus_connection
.monitor_pages
[0]));
282 static DEVICE_ATTR_RO(server_monitor_latency
);
284 static ssize_t
client_monitor_latency_show(struct device
*dev
,
285 struct device_attribute
*dev_attr
,
288 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
290 if (!hv_dev
->channel
)
292 return sprintf(buf
, "%d\n",
293 channel_latency(hv_dev
->channel
,
294 vmbus_connection
.monitor_pages
[1]));
296 static DEVICE_ATTR_RO(client_monitor_latency
);
298 static ssize_t
server_monitor_conn_id_show(struct device
*dev
,
299 struct device_attribute
*dev_attr
,
302 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
304 if (!hv_dev
->channel
)
306 return sprintf(buf
, "%d\n",
307 channel_conn_id(hv_dev
->channel
,
308 vmbus_connection
.monitor_pages
[0]));
310 static DEVICE_ATTR_RO(server_monitor_conn_id
);
312 static ssize_t
client_monitor_conn_id_show(struct device
*dev
,
313 struct device_attribute
*dev_attr
,
316 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
318 if (!hv_dev
->channel
)
320 return sprintf(buf
, "%d\n",
321 channel_conn_id(hv_dev
->channel
,
322 vmbus_connection
.monitor_pages
[1]));
324 static DEVICE_ATTR_RO(client_monitor_conn_id
);
326 static ssize_t
out_intr_mask_show(struct device
*dev
,
327 struct device_attribute
*dev_attr
, char *buf
)
329 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
330 struct hv_ring_buffer_debug_info outbound
;
333 if (!hv_dev
->channel
)
336 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
341 return sprintf(buf
, "%d\n", outbound
.current_interrupt_mask
);
343 static DEVICE_ATTR_RO(out_intr_mask
);
345 static ssize_t
out_read_index_show(struct device
*dev
,
346 struct device_attribute
*dev_attr
, char *buf
)
348 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
349 struct hv_ring_buffer_debug_info outbound
;
352 if (!hv_dev
->channel
)
355 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
359 return sprintf(buf
, "%d\n", outbound
.current_read_index
);
361 static DEVICE_ATTR_RO(out_read_index
);
363 static ssize_t
out_write_index_show(struct device
*dev
,
364 struct device_attribute
*dev_attr
,
367 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
368 struct hv_ring_buffer_debug_info outbound
;
371 if (!hv_dev
->channel
)
374 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
378 return sprintf(buf
, "%d\n", outbound
.current_write_index
);
380 static DEVICE_ATTR_RO(out_write_index
);
382 static ssize_t
out_read_bytes_avail_show(struct device
*dev
,
383 struct device_attribute
*dev_attr
,
386 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
387 struct hv_ring_buffer_debug_info outbound
;
390 if (!hv_dev
->channel
)
393 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
397 return sprintf(buf
, "%d\n", outbound
.bytes_avail_toread
);
399 static DEVICE_ATTR_RO(out_read_bytes_avail
);
401 static ssize_t
out_write_bytes_avail_show(struct device
*dev
,
402 struct device_attribute
*dev_attr
,
405 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
406 struct hv_ring_buffer_debug_info outbound
;
409 if (!hv_dev
->channel
)
412 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
416 return sprintf(buf
, "%d\n", outbound
.bytes_avail_towrite
);
418 static DEVICE_ATTR_RO(out_write_bytes_avail
);
420 static ssize_t
in_intr_mask_show(struct device
*dev
,
421 struct device_attribute
*dev_attr
, char *buf
)
423 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
424 struct hv_ring_buffer_debug_info inbound
;
427 if (!hv_dev
->channel
)
430 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
434 return sprintf(buf
, "%d\n", inbound
.current_interrupt_mask
);
436 static DEVICE_ATTR_RO(in_intr_mask
);
438 static ssize_t
in_read_index_show(struct device
*dev
,
439 struct device_attribute
*dev_attr
, char *buf
)
441 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
442 struct hv_ring_buffer_debug_info inbound
;
445 if (!hv_dev
->channel
)
448 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
452 return sprintf(buf
, "%d\n", inbound
.current_read_index
);
454 static DEVICE_ATTR_RO(in_read_index
);
456 static ssize_t
in_write_index_show(struct device
*dev
,
457 struct device_attribute
*dev_attr
, char *buf
)
459 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
460 struct hv_ring_buffer_debug_info inbound
;
463 if (!hv_dev
->channel
)
466 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
470 return sprintf(buf
, "%d\n", inbound
.current_write_index
);
472 static DEVICE_ATTR_RO(in_write_index
);
474 static ssize_t
in_read_bytes_avail_show(struct device
*dev
,
475 struct device_attribute
*dev_attr
,
478 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
479 struct hv_ring_buffer_debug_info inbound
;
482 if (!hv_dev
->channel
)
485 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
489 return sprintf(buf
, "%d\n", inbound
.bytes_avail_toread
);
491 static DEVICE_ATTR_RO(in_read_bytes_avail
);
493 static ssize_t
in_write_bytes_avail_show(struct device
*dev
,
494 struct device_attribute
*dev_attr
,
497 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
498 struct hv_ring_buffer_debug_info inbound
;
501 if (!hv_dev
->channel
)
504 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
508 return sprintf(buf
, "%d\n", inbound
.bytes_avail_towrite
);
510 static DEVICE_ATTR_RO(in_write_bytes_avail
);
512 static ssize_t
channel_vp_mapping_show(struct device
*dev
,
513 struct device_attribute
*dev_attr
,
516 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
517 struct vmbus_channel
*channel
= hv_dev
->channel
, *cur_sc
;
518 int buf_size
= PAGE_SIZE
, n_written
, tot_written
;
519 struct list_head
*cur
;
524 mutex_lock(&vmbus_connection
.channel_mutex
);
526 tot_written
= snprintf(buf
, buf_size
, "%u:%u\n",
527 channel
->offermsg
.child_relid
, channel
->target_cpu
);
529 list_for_each(cur
, &channel
->sc_list
) {
530 if (tot_written
>= buf_size
- 1)
533 cur_sc
= list_entry(cur
, struct vmbus_channel
, sc_list
);
534 n_written
= scnprintf(buf
+ tot_written
,
535 buf_size
- tot_written
,
537 cur_sc
->offermsg
.child_relid
,
539 tot_written
+= n_written
;
542 mutex_unlock(&vmbus_connection
.channel_mutex
);
546 static DEVICE_ATTR_RO(channel_vp_mapping
);
548 static ssize_t
vendor_show(struct device
*dev
,
549 struct device_attribute
*dev_attr
,
552 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
553 return sprintf(buf
, "0x%x\n", hv_dev
->vendor_id
);
555 static DEVICE_ATTR_RO(vendor
);
557 static ssize_t
device_show(struct device
*dev
,
558 struct device_attribute
*dev_attr
,
561 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
562 return sprintf(buf
, "0x%x\n", hv_dev
->device_id
);
564 static DEVICE_ATTR_RO(device
);
566 static ssize_t
driver_override_store(struct device
*dev
,
567 struct device_attribute
*attr
,
568 const char *buf
, size_t count
)
570 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
571 char *driver_override
, *old
, *cp
;
573 /* We need to keep extra room for a newline */
574 if (count
>= (PAGE_SIZE
- 1))
577 driver_override
= kstrndup(buf
, count
, GFP_KERNEL
);
578 if (!driver_override
)
581 cp
= strchr(driver_override
, '\n');
586 old
= hv_dev
->driver_override
;
587 if (strlen(driver_override
)) {
588 hv_dev
->driver_override
= driver_override
;
590 kfree(driver_override
);
591 hv_dev
->driver_override
= NULL
;
600 static ssize_t
driver_override_show(struct device
*dev
,
601 struct device_attribute
*attr
, char *buf
)
603 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
607 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", hv_dev
->driver_override
);
612 static DEVICE_ATTR_RW(driver_override
);
614 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
615 static struct attribute
*vmbus_dev_attrs
[] = {
617 &dev_attr_state
.attr
,
618 &dev_attr_monitor_id
.attr
,
619 &dev_attr_class_id
.attr
,
620 &dev_attr_device_id
.attr
,
621 &dev_attr_modalias
.attr
,
623 &dev_attr_numa_node
.attr
,
625 &dev_attr_server_monitor_pending
.attr
,
626 &dev_attr_client_monitor_pending
.attr
,
627 &dev_attr_server_monitor_latency
.attr
,
628 &dev_attr_client_monitor_latency
.attr
,
629 &dev_attr_server_monitor_conn_id
.attr
,
630 &dev_attr_client_monitor_conn_id
.attr
,
631 &dev_attr_out_intr_mask
.attr
,
632 &dev_attr_out_read_index
.attr
,
633 &dev_attr_out_write_index
.attr
,
634 &dev_attr_out_read_bytes_avail
.attr
,
635 &dev_attr_out_write_bytes_avail
.attr
,
636 &dev_attr_in_intr_mask
.attr
,
637 &dev_attr_in_read_index
.attr
,
638 &dev_attr_in_write_index
.attr
,
639 &dev_attr_in_read_bytes_avail
.attr
,
640 &dev_attr_in_write_bytes_avail
.attr
,
641 &dev_attr_channel_vp_mapping
.attr
,
642 &dev_attr_vendor
.attr
,
643 &dev_attr_device
.attr
,
644 &dev_attr_driver_override
.attr
,
649 * Device-level attribute_group callback function. Returns the permission for
650 * each attribute, and returns 0 if an attribute is not visible.
652 static umode_t
vmbus_dev_attr_is_visible(struct kobject
*kobj
,
653 struct attribute
*attr
, int idx
)
655 struct device
*dev
= kobj_to_dev(kobj
);
656 const struct hv_device
*hv_dev
= device_to_hv_device(dev
);
658 /* Hide the monitor attributes if the monitor mechanism is not used. */
659 if (!hv_dev
->channel
->offermsg
.monitor_allocated
&&
660 (attr
== &dev_attr_monitor_id
.attr
||
661 attr
== &dev_attr_server_monitor_pending
.attr
||
662 attr
== &dev_attr_client_monitor_pending
.attr
||
663 attr
== &dev_attr_server_monitor_latency
.attr
||
664 attr
== &dev_attr_client_monitor_latency
.attr
||
665 attr
== &dev_attr_server_monitor_conn_id
.attr
||
666 attr
== &dev_attr_client_monitor_conn_id
.attr
))
672 static const struct attribute_group vmbus_dev_group
= {
673 .attrs
= vmbus_dev_attrs
,
674 .is_visible
= vmbus_dev_attr_is_visible
676 __ATTRIBUTE_GROUPS(vmbus_dev
);
679 * vmbus_uevent - add uevent for our device
681 * This routine is invoked when a device is added or removed on the vmbus to
682 * generate a uevent to udev in the userspace. The udev will then look at its
683 * rule and the uevent generated here to load the appropriate driver
685 * The alias string will be of the form vmbus:guid where guid is the string
686 * representation of the device guid (each byte of the guid will be
687 * represented with two hex characters.
689 static int vmbus_uevent(struct device
*device
, struct kobj_uevent_env
*env
)
691 struct hv_device
*dev
= device_to_hv_device(device
);
692 const char *format
= "MODALIAS=vmbus:%*phN";
694 return add_uevent_var(env
, format
, UUID_SIZE
, &dev
->dev_type
);
697 static const struct hv_vmbus_device_id
*
698 hv_vmbus_dev_match(const struct hv_vmbus_device_id
*id
, const guid_t
*guid
)
701 return NULL
; /* empty device table */
703 for (; !guid_is_null(&id
->guid
); id
++)
704 if (guid_equal(&id
->guid
, guid
))
710 static const struct hv_vmbus_device_id
*
711 hv_vmbus_dynid_match(struct hv_driver
*drv
, const guid_t
*guid
)
713 const struct hv_vmbus_device_id
*id
= NULL
;
714 struct vmbus_dynid
*dynid
;
716 spin_lock(&drv
->dynids
.lock
);
717 list_for_each_entry(dynid
, &drv
->dynids
.list
, node
) {
718 if (guid_equal(&dynid
->id
.guid
, guid
)) {
723 spin_unlock(&drv
->dynids
.lock
);
728 static const struct hv_vmbus_device_id vmbus_device_null
;
731 * Return a matching hv_vmbus_device_id pointer.
732 * If there is no match, return NULL.
734 static const struct hv_vmbus_device_id
*hv_vmbus_get_id(struct hv_driver
*drv
,
735 struct hv_device
*dev
)
737 const guid_t
*guid
= &dev
->dev_type
;
738 const struct hv_vmbus_device_id
*id
;
740 /* When driver_override is set, only bind to the matching driver */
741 if (dev
->driver_override
&& strcmp(dev
->driver_override
, drv
->name
))
744 /* Look at the dynamic ids first, before the static ones */
745 id
= hv_vmbus_dynid_match(drv
, guid
);
747 id
= hv_vmbus_dev_match(drv
->id_table
, guid
);
749 /* driver_override will always match, send a dummy id */
750 if (!id
&& dev
->driver_override
)
751 id
= &vmbus_device_null
;
756 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
757 static int vmbus_add_dynid(struct hv_driver
*drv
, guid_t
*guid
)
759 struct vmbus_dynid
*dynid
;
761 dynid
= kzalloc(sizeof(*dynid
), GFP_KERNEL
);
765 dynid
->id
.guid
= *guid
;
767 spin_lock(&drv
->dynids
.lock
);
768 list_add_tail(&dynid
->node
, &drv
->dynids
.list
);
769 spin_unlock(&drv
->dynids
.lock
);
771 return driver_attach(&drv
->driver
);
774 static void vmbus_free_dynids(struct hv_driver
*drv
)
776 struct vmbus_dynid
*dynid
, *n
;
778 spin_lock(&drv
->dynids
.lock
);
779 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
780 list_del(&dynid
->node
);
783 spin_unlock(&drv
->dynids
.lock
);
787 * store_new_id - sysfs frontend to vmbus_add_dynid()
789 * Allow GUIDs to be added to an existing driver via sysfs.
791 static ssize_t
new_id_store(struct device_driver
*driver
, const char *buf
,
794 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
798 retval
= guid_parse(buf
, &guid
);
802 if (hv_vmbus_dynid_match(drv
, &guid
))
805 retval
= vmbus_add_dynid(drv
, &guid
);
810 static DRIVER_ATTR_WO(new_id
);
813 * store_remove_id - remove a PCI device ID from this driver
815 * Removes a dynamic pci device ID to this driver.
817 static ssize_t
remove_id_store(struct device_driver
*driver
, const char *buf
,
820 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
821 struct vmbus_dynid
*dynid
, *n
;
825 retval
= guid_parse(buf
, &guid
);
830 spin_lock(&drv
->dynids
.lock
);
831 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
832 struct hv_vmbus_device_id
*id
= &dynid
->id
;
834 if (guid_equal(&id
->guid
, &guid
)) {
835 list_del(&dynid
->node
);
841 spin_unlock(&drv
->dynids
.lock
);
845 static DRIVER_ATTR_WO(remove_id
);
847 static struct attribute
*vmbus_drv_attrs
[] = {
848 &driver_attr_new_id
.attr
,
849 &driver_attr_remove_id
.attr
,
852 ATTRIBUTE_GROUPS(vmbus_drv
);
856 * vmbus_match - Attempt to match the specified device to the specified driver
858 static int vmbus_match(struct device
*device
, struct device_driver
*driver
)
860 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
861 struct hv_device
*hv_dev
= device_to_hv_device(device
);
863 /* The hv_sock driver handles all hv_sock offers. */
864 if (is_hvsock_channel(hv_dev
->channel
))
867 if (hv_vmbus_get_id(drv
, hv_dev
))
874 * vmbus_probe - Add the new vmbus's child device
876 static int vmbus_probe(struct device
*child_device
)
879 struct hv_driver
*drv
=
880 drv_to_hv_drv(child_device
->driver
);
881 struct hv_device
*dev
= device_to_hv_device(child_device
);
882 const struct hv_vmbus_device_id
*dev_id
;
884 dev_id
= hv_vmbus_get_id(drv
, dev
);
886 ret
= drv
->probe(dev
, dev_id
);
888 pr_err("probe failed for device %s (%d)\n",
889 dev_name(child_device
), ret
);
892 pr_err("probe not set for driver %s\n",
893 dev_name(child_device
));
900 * vmbus_remove - Remove a vmbus device
902 static int vmbus_remove(struct device
*child_device
)
904 struct hv_driver
*drv
;
905 struct hv_device
*dev
= device_to_hv_device(child_device
);
907 if (child_device
->driver
) {
908 drv
= drv_to_hv_drv(child_device
->driver
);
918 * vmbus_shutdown - Shutdown a vmbus device
920 static void vmbus_shutdown(struct device
*child_device
)
922 struct hv_driver
*drv
;
923 struct hv_device
*dev
= device_to_hv_device(child_device
);
926 /* The device may not be attached yet */
927 if (!child_device
->driver
)
930 drv
= drv_to_hv_drv(child_device
->driver
);
936 #ifdef CONFIG_PM_SLEEP
938 * vmbus_suspend - Suspend a vmbus device
940 static int vmbus_suspend(struct device
*child_device
)
942 struct hv_driver
*drv
;
943 struct hv_device
*dev
= device_to_hv_device(child_device
);
945 /* The device may not be attached yet */
946 if (!child_device
->driver
)
949 drv
= drv_to_hv_drv(child_device
->driver
);
953 return drv
->suspend(dev
);
957 * vmbus_resume - Resume a vmbus device
959 static int vmbus_resume(struct device
*child_device
)
961 struct hv_driver
*drv
;
962 struct hv_device
*dev
= device_to_hv_device(child_device
);
964 /* The device may not be attached yet */
965 if (!child_device
->driver
)
968 drv
= drv_to_hv_drv(child_device
->driver
);
972 return drv
->resume(dev
);
975 #define vmbus_suspend NULL
976 #define vmbus_resume NULL
977 #endif /* CONFIG_PM_SLEEP */
980 * vmbus_device_release - Final callback release of the vmbus child device
982 static void vmbus_device_release(struct device
*device
)
984 struct hv_device
*hv_dev
= device_to_hv_device(device
);
985 struct vmbus_channel
*channel
= hv_dev
->channel
;
987 hv_debug_rm_dev_dir(hv_dev
);
989 mutex_lock(&vmbus_connection
.channel_mutex
);
990 hv_process_channel_removal(channel
);
991 mutex_unlock(&vmbus_connection
.channel_mutex
);
996 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
998 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
999 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1000 * is no way to wake up a Generation-2 VM.
1002 * The other 4 ops are for hibernation.
1005 static const struct dev_pm_ops vmbus_pm
= {
1006 .suspend_noirq
= NULL
,
1007 .resume_noirq
= NULL
,
1008 .freeze_noirq
= vmbus_suspend
,
1009 .thaw_noirq
= vmbus_resume
,
1010 .poweroff_noirq
= vmbus_suspend
,
1011 .restore_noirq
= vmbus_resume
,
1014 /* The one and only one */
1015 static struct bus_type hv_bus
= {
1017 .match
= vmbus_match
,
1018 .shutdown
= vmbus_shutdown
,
1019 .remove
= vmbus_remove
,
1020 .probe
= vmbus_probe
,
1021 .uevent
= vmbus_uevent
,
1022 .dev_groups
= vmbus_dev_groups
,
1023 .drv_groups
= vmbus_drv_groups
,
1027 struct onmessage_work_context
{
1028 struct work_struct work
;
1030 struct hv_message_header header
;
1035 static void vmbus_onmessage_work(struct work_struct
*work
)
1037 struct onmessage_work_context
*ctx
;
1039 /* Do not process messages if we're in DISCONNECTED state */
1040 if (vmbus_connection
.conn_state
== DISCONNECTED
)
1043 ctx
= container_of(work
, struct onmessage_work_context
,
1045 vmbus_onmessage((struct vmbus_channel_message_header
*)
1050 void vmbus_on_msg_dpc(unsigned long data
)
1052 struct hv_per_cpu_context
*hv_cpu
= (void *)data
;
1053 void *page_addr
= hv_cpu
->synic_message_page
;
1054 struct hv_message
*msg
= (struct hv_message
*)page_addr
+
1056 struct vmbus_channel_message_header
*hdr
;
1057 const struct vmbus_channel_message_table_entry
*entry
;
1058 struct onmessage_work_context
*ctx
;
1059 u32 message_type
= msg
->header
.message_type
;
1062 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1063 * it is being used in 'struct vmbus_channel_message_header' definition
1064 * which is supposed to match hypervisor ABI.
1066 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type
) != sizeof(u32
));
1068 if (message_type
== HVMSG_NONE
)
1072 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
1074 trace_vmbus_on_msg_dpc(hdr
);
1076 if (hdr
->msgtype
>= CHANNELMSG_COUNT
) {
1077 WARN_ONCE(1, "unknown msgtype=%d\n", hdr
->msgtype
);
1081 if (msg
->header
.payload_size
> HV_MESSAGE_PAYLOAD_BYTE_COUNT
) {
1082 WARN_ONCE(1, "payload size is too large (%d)\n",
1083 msg
->header
.payload_size
);
1087 entry
= &channel_message_table
[hdr
->msgtype
];
1089 if (!entry
->message_handler
)
1092 if (msg
->header
.payload_size
< entry
->min_payload_len
) {
1093 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
1094 hdr
->msgtype
, msg
->header
.payload_size
);
1098 if (entry
->handler_type
== VMHT_BLOCKING
) {
1099 ctx
= kmalloc(sizeof(*ctx
) + msg
->header
.payload_size
,
1104 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
1105 memcpy(&ctx
->msg
, msg
, sizeof(msg
->header
) +
1106 msg
->header
.payload_size
);
1109 * The host can generate a rescind message while we
1110 * may still be handling the original offer. We deal with
1111 * this condition by relying on the synchronization provided
1112 * by offer_in_progress and by channel_mutex. See also the
1113 * inline comments in vmbus_onoffer_rescind().
1115 switch (hdr
->msgtype
) {
1116 case CHANNELMSG_RESCIND_CHANNELOFFER
:
1118 * If we are handling the rescind message;
1119 * schedule the work on the global work queue.
1121 * The OFFER message and the RESCIND message should
1122 * not be handled by the same serialized work queue,
1123 * because the OFFER handler may call vmbus_open(),
1124 * which tries to open the channel by sending an
1125 * OPEN_CHANNEL message to the host and waits for
1126 * the host's response; however, if the host has
1127 * rescinded the channel before it receives the
1128 * OPEN_CHANNEL message, the host just silently
1129 * ignores the OPEN_CHANNEL message; as a result,
1130 * the guest's OFFER handler hangs for ever, if we
1131 * handle the RESCIND message in the same serialized
1132 * work queue: the RESCIND handler can not start to
1133 * run before the OFFER handler finishes.
1135 schedule_work(&ctx
->work
);
1138 case CHANNELMSG_OFFERCHANNEL
:
1140 * The host sends the offer message of a given channel
1141 * before sending the rescind message of the same
1142 * channel. These messages are sent to the guest's
1143 * connect CPU; the guest then starts processing them
1144 * in the tasklet handler on this CPU:
1148 * [vmbus_on_msg_dpc()]
1149 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1152 * [vmbus_on_msg_dpc()]
1153 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1155 * We rely on the memory-ordering properties of the
1156 * queue_work() and schedule_work() primitives, which
1157 * guarantee that the atomic increment will be visible
1158 * to the CPUs which will execute the offer & rescind
1159 * works by the time these works will start execution.
1161 atomic_inc(&vmbus_connection
.offer_in_progress
);
1165 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
1168 entry
->message_handler(hdr
);
1171 vmbus_signal_eom(msg
, message_type
);
1174 #ifdef CONFIG_PM_SLEEP
1176 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1177 * hibernation, because hv_sock connections can not persist across hibernation.
1179 static void vmbus_force_channel_rescinded(struct vmbus_channel
*channel
)
1181 struct onmessage_work_context
*ctx
;
1182 struct vmbus_channel_rescind_offer
*rescind
;
1184 WARN_ON(!is_hvsock_channel(channel
));
1187 * Allocation size is small and the allocation should really not fail,
1188 * otherwise the state of the hv_sock connections ends up in limbo.
1190 ctx
= kzalloc(sizeof(*ctx
) + sizeof(*rescind
),
1191 GFP_KERNEL
| __GFP_NOFAIL
);
1194 * So far, these are not really used by Linux. Just set them to the
1195 * reasonable values conforming to the definitions of the fields.
1197 ctx
->msg
.header
.message_type
= 1;
1198 ctx
->msg
.header
.payload_size
= sizeof(*rescind
);
1200 /* These values are actually used by Linux. */
1201 rescind
= (struct vmbus_channel_rescind_offer
*)ctx
->msg
.payload
;
1202 rescind
->header
.msgtype
= CHANNELMSG_RESCIND_CHANNELOFFER
;
1203 rescind
->child_relid
= channel
->offermsg
.child_relid
;
1205 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
1207 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
1209 #endif /* CONFIG_PM_SLEEP */
1212 * Schedule all channels with events pending
1214 static void vmbus_chan_sched(struct hv_per_cpu_context
*hv_cpu
)
1216 unsigned long *recv_int_page
;
1219 if (vmbus_proto_version
< VERSION_WIN8
) {
1220 maxbits
= MAX_NUM_CHANNELS_SUPPORTED
;
1221 recv_int_page
= vmbus_connection
.recv_int_page
;
1224 * When the host is win8 and beyond, the event page
1225 * can be directly checked to get the id of the channel
1226 * that has the interrupt pending.
1228 void *page_addr
= hv_cpu
->synic_event_page
;
1229 union hv_synic_event_flags
*event
1230 = (union hv_synic_event_flags
*)page_addr
+
1233 maxbits
= HV_EVENT_FLAGS_COUNT
;
1234 recv_int_page
= event
->flags
;
1237 if (unlikely(!recv_int_page
))
1240 for_each_set_bit(relid
, recv_int_page
, maxbits
) {
1241 void (*callback_fn
)(void *context
);
1242 struct vmbus_channel
*channel
;
1244 if (!sync_test_and_clear_bit(relid
, recv_int_page
))
1247 /* Special case - vmbus channel protocol msg */
1252 * Pairs with the kfree_rcu() in vmbus_chan_release().
1253 * Guarantees that the channel data structure doesn't
1254 * get freed while the channel pointer below is being
1259 /* Find channel based on relid */
1260 channel
= relid2channel(relid
);
1261 if (channel
== NULL
)
1262 goto sched_unlock_rcu
;
1264 if (channel
->rescind
)
1265 goto sched_unlock_rcu
;
1268 * Make sure that the ring buffer data structure doesn't get
1269 * freed while we dereference the ring buffer pointer. Test
1270 * for the channel's onchannel_callback being NULL within a
1271 * sched_lock critical section. See also the inline comments
1272 * in vmbus_reset_channel_cb().
1274 spin_lock(&channel
->sched_lock
);
1276 callback_fn
= channel
->onchannel_callback
;
1277 if (unlikely(callback_fn
== NULL
))
1280 trace_vmbus_chan_sched(channel
);
1282 ++channel
->interrupts
;
1284 switch (channel
->callback_mode
) {
1286 (*callback_fn
)(channel
->channel_callback_context
);
1289 case HV_CALL_BATCHED
:
1290 hv_begin_read(&channel
->inbound
);
1292 case HV_CALL_DIRECT
:
1293 tasklet_schedule(&channel
->callback_event
);
1297 spin_unlock(&channel
->sched_lock
);
1303 static void vmbus_isr(void)
1305 struct hv_per_cpu_context
*hv_cpu
1306 = this_cpu_ptr(hv_context
.cpu_context
);
1307 void *page_addr
= hv_cpu
->synic_event_page
;
1308 struct hv_message
*msg
;
1309 union hv_synic_event_flags
*event
;
1310 bool handled
= false;
1312 if (unlikely(page_addr
== NULL
))
1315 event
= (union hv_synic_event_flags
*)page_addr
+
1318 * Check for events before checking for messages. This is the order
1319 * in which events and messages are checked in Windows guests on
1320 * Hyper-V, and the Windows team suggested we do the same.
1323 if ((vmbus_proto_version
== VERSION_WS2008
) ||
1324 (vmbus_proto_version
== VERSION_WIN7
)) {
1326 /* Since we are a child, we only need to check bit 0 */
1327 if (sync_test_and_clear_bit(0, event
->flags
))
1331 * Our host is win8 or above. The signaling mechanism
1332 * has changed and we can directly look at the event page.
1333 * If bit n is set then we have an interrup on the channel
1340 vmbus_chan_sched(hv_cpu
);
1342 page_addr
= hv_cpu
->synic_message_page
;
1343 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
1345 /* Check if there are actual msgs to be processed */
1346 if (msg
->header
.message_type
!= HVMSG_NONE
) {
1347 if (msg
->header
.message_type
== HVMSG_TIMER_EXPIRED
) {
1349 vmbus_signal_eom(msg
, HVMSG_TIMER_EXPIRED
);
1351 tasklet_schedule(&hv_cpu
->msg_dpc
);
1354 add_interrupt_randomness(hv_get_vector(), 0);
1358 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1359 * buffer and call into Hyper-V to transfer the data.
1361 static void hv_kmsg_dump(struct kmsg_dumper
*dumper
,
1362 enum kmsg_dump_reason reason
)
1364 size_t bytes_written
;
1365 phys_addr_t panic_pa
;
1367 /* We are only interested in panics. */
1368 if ((reason
!= KMSG_DUMP_PANIC
) || (!sysctl_record_panic_msg
))
1371 panic_pa
= virt_to_phys(hv_panic_page
);
1374 * Write dump contents to the page. No need to synchronize; panic should
1375 * be single-threaded.
1377 kmsg_dump_get_buffer(dumper
, false, hv_panic_page
, HV_HYP_PAGE_SIZE
,
1380 hyperv_report_panic_msg(panic_pa
, bytes_written
);
1383 static struct kmsg_dumper hv_kmsg_dumper
= {
1384 .dump
= hv_kmsg_dump
,
1387 static struct ctl_table_header
*hv_ctl_table_hdr
;
1390 * sysctl option to allow the user to control whether kmsg data should be
1391 * reported to Hyper-V on panic.
1393 static struct ctl_table hv_ctl_table
[] = {
1395 .procname
= "hyperv_record_panic_msg",
1396 .data
= &sysctl_record_panic_msg
,
1397 .maxlen
= sizeof(int),
1399 .proc_handler
= proc_dointvec_minmax
,
1400 .extra1
= SYSCTL_ZERO
,
1401 .extra2
= SYSCTL_ONE
1406 static struct ctl_table hv_root_table
[] = {
1408 .procname
= "kernel",
1410 .child
= hv_ctl_table
1416 * vmbus_bus_init -Main vmbus driver initialization routine.
1419 * - initialize the vmbus driver context
1420 * - invoke the vmbus hv main init routine
1421 * - retrieve the channel offers
1423 static int vmbus_bus_init(void)
1429 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret
);
1433 ret
= bus_register(&hv_bus
);
1437 ret
= hv_setup_vmbus_irq(vmbus_irq
, vmbus_isr
);
1441 ret
= hv_synic_alloc();
1446 * Initialize the per-cpu interrupt state and stimer state.
1447 * Then connect to the host.
1449 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "hyperv/vmbus:online",
1450 hv_synic_init
, hv_synic_cleanup
);
1453 hyperv_cpuhp_online
= ret
;
1455 ret
= vmbus_connect();
1460 * Only register if the crash MSRs are available
1462 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
1463 u64 hyperv_crash_ctl
;
1465 * Sysctl registration is not fatal, since by default
1466 * reporting is enabled.
1468 hv_ctl_table_hdr
= register_sysctl_table(hv_root_table
);
1469 if (!hv_ctl_table_hdr
)
1470 pr_err("Hyper-V: sysctl table register error");
1473 * Register for panic kmsg callback only if the right
1474 * capability is supported by the hypervisor.
1476 hv_get_crash_ctl(hyperv_crash_ctl
);
1477 if (hyperv_crash_ctl
& HV_CRASH_CTL_CRASH_NOTIFY_MSG
) {
1478 hv_panic_page
= (void *)hv_alloc_hyperv_zeroed_page();
1479 if (hv_panic_page
) {
1480 ret
= kmsg_dump_register(&hv_kmsg_dumper
);
1482 pr_err("Hyper-V: kmsg dump register "
1483 "error 0x%x\n", ret
);
1484 hv_free_hyperv_page(
1485 (unsigned long)hv_panic_page
);
1486 hv_panic_page
= NULL
;
1489 pr_err("Hyper-V: panic message page memory "
1490 "allocation failed");
1493 register_die_notifier(&hyperv_die_block
);
1497 * Always register the panic notifier because we need to unload
1498 * the VMbus channel connection to prevent any VMbus
1499 * activity after the VM panics.
1501 atomic_notifier_chain_register(&panic_notifier_list
,
1502 &hyperv_panic_block
);
1504 vmbus_request_offers();
1509 cpuhp_remove_state(hyperv_cpuhp_online
);
1513 hv_remove_vmbus_irq();
1515 bus_unregister(&hv_bus
);
1516 unregister_sysctl_table(hv_ctl_table_hdr
);
1517 hv_ctl_table_hdr
= NULL
;
1522 * __vmbus_child_driver_register() - Register a vmbus's driver
1523 * @hv_driver: Pointer to driver structure you want to register
1524 * @owner: owner module of the drv
1525 * @mod_name: module name string
1527 * Registers the given driver with Linux through the 'driver_register()' call
1528 * and sets up the hyper-v vmbus handling for this driver.
1529 * It will return the state of the 'driver_register()' call.
1532 int __vmbus_driver_register(struct hv_driver
*hv_driver
, struct module
*owner
, const char *mod_name
)
1536 pr_info("registering driver %s\n", hv_driver
->name
);
1538 ret
= vmbus_exists();
1542 hv_driver
->driver
.name
= hv_driver
->name
;
1543 hv_driver
->driver
.owner
= owner
;
1544 hv_driver
->driver
.mod_name
= mod_name
;
1545 hv_driver
->driver
.bus
= &hv_bus
;
1547 spin_lock_init(&hv_driver
->dynids
.lock
);
1548 INIT_LIST_HEAD(&hv_driver
->dynids
.list
);
1550 ret
= driver_register(&hv_driver
->driver
);
1554 EXPORT_SYMBOL_GPL(__vmbus_driver_register
);
1557 * vmbus_driver_unregister() - Unregister a vmbus's driver
1558 * @hv_driver: Pointer to driver structure you want to
1561 * Un-register the given driver that was previous registered with a call to
1562 * vmbus_driver_register()
1564 void vmbus_driver_unregister(struct hv_driver
*hv_driver
)
1566 pr_info("unregistering driver %s\n", hv_driver
->name
);
1568 if (!vmbus_exists()) {
1569 driver_unregister(&hv_driver
->driver
);
1570 vmbus_free_dynids(hv_driver
);
1573 EXPORT_SYMBOL_GPL(vmbus_driver_unregister
);
1577 * Called when last reference to channel is gone.
1579 static void vmbus_chan_release(struct kobject
*kobj
)
1581 struct vmbus_channel
*channel
1582 = container_of(kobj
, struct vmbus_channel
, kobj
);
1584 kfree_rcu(channel
, rcu
);
1587 struct vmbus_chan_attribute
{
1588 struct attribute attr
;
1589 ssize_t (*show
)(struct vmbus_channel
*chan
, char *buf
);
1590 ssize_t (*store
)(struct vmbus_channel
*chan
,
1591 const char *buf
, size_t count
);
1593 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1594 struct vmbus_chan_attribute chan_attr_##_name \
1595 = __ATTR(_name, _mode, _show, _store)
1596 #define VMBUS_CHAN_ATTR_RW(_name) \
1597 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1598 #define VMBUS_CHAN_ATTR_RO(_name) \
1599 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1600 #define VMBUS_CHAN_ATTR_WO(_name) \
1601 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1603 static ssize_t
vmbus_chan_attr_show(struct kobject
*kobj
,
1604 struct attribute
*attr
, char *buf
)
1606 const struct vmbus_chan_attribute
*attribute
1607 = container_of(attr
, struct vmbus_chan_attribute
, attr
);
1608 struct vmbus_channel
*chan
1609 = container_of(kobj
, struct vmbus_channel
, kobj
);
1611 if (!attribute
->show
)
1614 return attribute
->show(chan
, buf
);
1617 static ssize_t
vmbus_chan_attr_store(struct kobject
*kobj
,
1618 struct attribute
*attr
, const char *buf
,
1621 const struct vmbus_chan_attribute
*attribute
1622 = container_of(attr
, struct vmbus_chan_attribute
, attr
);
1623 struct vmbus_channel
*chan
1624 = container_of(kobj
, struct vmbus_channel
, kobj
);
1626 if (!attribute
->store
)
1629 return attribute
->store(chan
, buf
, count
);
1632 static const struct sysfs_ops vmbus_chan_sysfs_ops
= {
1633 .show
= vmbus_chan_attr_show
,
1634 .store
= vmbus_chan_attr_store
,
1637 static ssize_t
out_mask_show(struct vmbus_channel
*channel
, char *buf
)
1639 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
1642 mutex_lock(&rbi
->ring_buffer_mutex
);
1643 if (!rbi
->ring_buffer
) {
1644 mutex_unlock(&rbi
->ring_buffer_mutex
);
1648 ret
= sprintf(buf
, "%u\n", rbi
->ring_buffer
->interrupt_mask
);
1649 mutex_unlock(&rbi
->ring_buffer_mutex
);
1652 static VMBUS_CHAN_ATTR_RO(out_mask
);
1654 static ssize_t
in_mask_show(struct vmbus_channel
*channel
, char *buf
)
1656 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1659 mutex_lock(&rbi
->ring_buffer_mutex
);
1660 if (!rbi
->ring_buffer
) {
1661 mutex_unlock(&rbi
->ring_buffer_mutex
);
1665 ret
= sprintf(buf
, "%u\n", rbi
->ring_buffer
->interrupt_mask
);
1666 mutex_unlock(&rbi
->ring_buffer_mutex
);
1669 static VMBUS_CHAN_ATTR_RO(in_mask
);
1671 static ssize_t
read_avail_show(struct vmbus_channel
*channel
, char *buf
)
1673 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1676 mutex_lock(&rbi
->ring_buffer_mutex
);
1677 if (!rbi
->ring_buffer
) {
1678 mutex_unlock(&rbi
->ring_buffer_mutex
);
1682 ret
= sprintf(buf
, "%u\n", hv_get_bytes_to_read(rbi
));
1683 mutex_unlock(&rbi
->ring_buffer_mutex
);
1686 static VMBUS_CHAN_ATTR_RO(read_avail
);
1688 static ssize_t
write_avail_show(struct vmbus_channel
*channel
, char *buf
)
1690 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
1693 mutex_lock(&rbi
->ring_buffer_mutex
);
1694 if (!rbi
->ring_buffer
) {
1695 mutex_unlock(&rbi
->ring_buffer_mutex
);
1699 ret
= sprintf(buf
, "%u\n", hv_get_bytes_to_write(rbi
));
1700 mutex_unlock(&rbi
->ring_buffer_mutex
);
1703 static VMBUS_CHAN_ATTR_RO(write_avail
);
1705 static ssize_t
target_cpu_show(struct vmbus_channel
*channel
, char *buf
)
1707 return sprintf(buf
, "%u\n", channel
->target_cpu
);
1709 static ssize_t
target_cpu_store(struct vmbus_channel
*channel
,
1710 const char *buf
, size_t count
)
1712 u32 target_cpu
, origin_cpu
;
1713 ssize_t ret
= count
;
1715 if (vmbus_proto_version
< VERSION_WIN10_V4_1
)
1718 if (sscanf(buf
, "%uu", &target_cpu
) != 1)
1721 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1722 if (target_cpu
>= nr_cpumask_bits
)
1725 /* No CPUs should come up or down during this. */
1728 if (!cpu_online(target_cpu
)) {
1734 * Synchronizes target_cpu_store() and channel closure:
1736 * { Initially: state = CHANNEL_OPENED }
1740 * [target_cpu_store()] [vmbus_disconnect_ring()]
1742 * LOCK channel_mutex LOCK channel_mutex
1743 * LOAD r1 = state LOAD r2 = state
1744 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1745 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1746 * [...] SEND CLOSECHANNEL
1747 * UNLOCK channel_mutex UNLOCK channel_mutex
1749 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1750 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1752 * Note. The host processes the channel messages "sequentially", in
1753 * the order in which they are received on a per-partition basis.
1755 mutex_lock(&vmbus_connection
.channel_mutex
);
1758 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1759 * avoid sending the message and fail here for such channels.
1761 if (channel
->state
!= CHANNEL_OPENED_STATE
) {
1763 goto cpu_store_unlock
;
1766 origin_cpu
= channel
->target_cpu
;
1767 if (target_cpu
== origin_cpu
)
1768 goto cpu_store_unlock
;
1770 if (vmbus_send_modifychannel(channel
->offermsg
.child_relid
,
1771 hv_cpu_number_to_vp_number(target_cpu
))) {
1773 goto cpu_store_unlock
;
1777 * Warning. At this point, there is *no* guarantee that the host will
1778 * have successfully processed the vmbus_send_modifychannel() request.
1779 * See the header comment of vmbus_send_modifychannel() for more info.
1781 * Lags in the processing of the above vmbus_send_modifychannel() can
1782 * result in missed interrupts if the "old" target CPU is taken offline
1783 * before Hyper-V starts sending interrupts to the "new" target CPU.
1784 * But apart from this offlining scenario, the code tolerates such
1785 * lags. It will function correctly even if a channel interrupt comes
1786 * in on a CPU that is different from the channel target_cpu value.
1789 channel
->target_cpu
= target_cpu
;
1791 /* See init_vp_index(). */
1792 if (hv_is_perf_channel(channel
))
1793 hv_update_alloced_cpus(origin_cpu
, target_cpu
);
1795 /* Currently set only for storvsc channels. */
1796 if (channel
->change_target_cpu_callback
) {
1797 (*channel
->change_target_cpu_callback
)(channel
,
1798 origin_cpu
, target_cpu
);
1802 mutex_unlock(&vmbus_connection
.channel_mutex
);
1806 static VMBUS_CHAN_ATTR(cpu
, 0644, target_cpu_show
, target_cpu_store
);
1808 static ssize_t
channel_pending_show(struct vmbus_channel
*channel
,
1811 return sprintf(buf
, "%d\n",
1812 channel_pending(channel
,
1813 vmbus_connection
.monitor_pages
[1]));
1815 static VMBUS_CHAN_ATTR(pending
, S_IRUGO
, channel_pending_show
, NULL
);
1817 static ssize_t
channel_latency_show(struct vmbus_channel
*channel
,
1820 return sprintf(buf
, "%d\n",
1821 channel_latency(channel
,
1822 vmbus_connection
.monitor_pages
[1]));
1824 static VMBUS_CHAN_ATTR(latency
, S_IRUGO
, channel_latency_show
, NULL
);
1826 static ssize_t
channel_interrupts_show(struct vmbus_channel
*channel
, char *buf
)
1828 return sprintf(buf
, "%llu\n", channel
->interrupts
);
1830 static VMBUS_CHAN_ATTR(interrupts
, S_IRUGO
, channel_interrupts_show
, NULL
);
1832 static ssize_t
channel_events_show(struct vmbus_channel
*channel
, char *buf
)
1834 return sprintf(buf
, "%llu\n", channel
->sig_events
);
1836 static VMBUS_CHAN_ATTR(events
, S_IRUGO
, channel_events_show
, NULL
);
1838 static ssize_t
channel_intr_in_full_show(struct vmbus_channel
*channel
,
1841 return sprintf(buf
, "%llu\n",
1842 (unsigned long long)channel
->intr_in_full
);
1844 static VMBUS_CHAN_ATTR(intr_in_full
, 0444, channel_intr_in_full_show
, NULL
);
1846 static ssize_t
channel_intr_out_empty_show(struct vmbus_channel
*channel
,
1849 return sprintf(buf
, "%llu\n",
1850 (unsigned long long)channel
->intr_out_empty
);
1852 static VMBUS_CHAN_ATTR(intr_out_empty
, 0444, channel_intr_out_empty_show
, NULL
);
1854 static ssize_t
channel_out_full_first_show(struct vmbus_channel
*channel
,
1857 return sprintf(buf
, "%llu\n",
1858 (unsigned long long)channel
->out_full_first
);
1860 static VMBUS_CHAN_ATTR(out_full_first
, 0444, channel_out_full_first_show
, NULL
);
1862 static ssize_t
channel_out_full_total_show(struct vmbus_channel
*channel
,
1865 return sprintf(buf
, "%llu\n",
1866 (unsigned long long)channel
->out_full_total
);
1868 static VMBUS_CHAN_ATTR(out_full_total
, 0444, channel_out_full_total_show
, NULL
);
1870 static ssize_t
subchannel_monitor_id_show(struct vmbus_channel
*channel
,
1873 return sprintf(buf
, "%u\n", channel
->offermsg
.monitorid
);
1875 static VMBUS_CHAN_ATTR(monitor_id
, S_IRUGO
, subchannel_monitor_id_show
, NULL
);
1877 static ssize_t
subchannel_id_show(struct vmbus_channel
*channel
,
1880 return sprintf(buf
, "%u\n",
1881 channel
->offermsg
.offer
.sub_channel_index
);
1883 static VMBUS_CHAN_ATTR_RO(subchannel_id
);
1885 static struct attribute
*vmbus_chan_attrs
[] = {
1886 &chan_attr_out_mask
.attr
,
1887 &chan_attr_in_mask
.attr
,
1888 &chan_attr_read_avail
.attr
,
1889 &chan_attr_write_avail
.attr
,
1890 &chan_attr_cpu
.attr
,
1891 &chan_attr_pending
.attr
,
1892 &chan_attr_latency
.attr
,
1893 &chan_attr_interrupts
.attr
,
1894 &chan_attr_events
.attr
,
1895 &chan_attr_intr_in_full
.attr
,
1896 &chan_attr_intr_out_empty
.attr
,
1897 &chan_attr_out_full_first
.attr
,
1898 &chan_attr_out_full_total
.attr
,
1899 &chan_attr_monitor_id
.attr
,
1900 &chan_attr_subchannel_id
.attr
,
1905 * Channel-level attribute_group callback function. Returns the permission for
1906 * each attribute, and returns 0 if an attribute is not visible.
1908 static umode_t
vmbus_chan_attr_is_visible(struct kobject
*kobj
,
1909 struct attribute
*attr
, int idx
)
1911 const struct vmbus_channel
*channel
=
1912 container_of(kobj
, struct vmbus_channel
, kobj
);
1914 /* Hide the monitor attributes if the monitor mechanism is not used. */
1915 if (!channel
->offermsg
.monitor_allocated
&&
1916 (attr
== &chan_attr_pending
.attr
||
1917 attr
== &chan_attr_latency
.attr
||
1918 attr
== &chan_attr_monitor_id
.attr
))
1924 static struct attribute_group vmbus_chan_group
= {
1925 .attrs
= vmbus_chan_attrs
,
1926 .is_visible
= vmbus_chan_attr_is_visible
1929 static struct kobj_type vmbus_chan_ktype
= {
1930 .sysfs_ops
= &vmbus_chan_sysfs_ops
,
1931 .release
= vmbus_chan_release
,
1935 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1937 int vmbus_add_channel_kobj(struct hv_device
*dev
, struct vmbus_channel
*channel
)
1939 const struct device
*device
= &dev
->device
;
1940 struct kobject
*kobj
= &channel
->kobj
;
1941 u32 relid
= channel
->offermsg
.child_relid
;
1944 kobj
->kset
= dev
->channels_kset
;
1945 ret
= kobject_init_and_add(kobj
, &vmbus_chan_ktype
, NULL
,
1950 ret
= sysfs_create_group(kobj
, &vmbus_chan_group
);
1954 * The calling functions' error handling paths will cleanup the
1955 * empty channel directory.
1957 dev_err(device
, "Unable to set up channel sysfs files\n");
1961 kobject_uevent(kobj
, KOBJ_ADD
);
1967 * vmbus_remove_channel_attr_group - remove the channel's attribute group
1969 void vmbus_remove_channel_attr_group(struct vmbus_channel
*channel
)
1971 sysfs_remove_group(&channel
->kobj
, &vmbus_chan_group
);
1975 * vmbus_device_create - Creates and registers a new child device
1978 struct hv_device
*vmbus_device_create(const guid_t
*type
,
1979 const guid_t
*instance
,
1980 struct vmbus_channel
*channel
)
1982 struct hv_device
*child_device_obj
;
1984 child_device_obj
= kzalloc(sizeof(struct hv_device
), GFP_KERNEL
);
1985 if (!child_device_obj
) {
1986 pr_err("Unable to allocate device object for child device\n");
1990 child_device_obj
->channel
= channel
;
1991 guid_copy(&child_device_obj
->dev_type
, type
);
1992 guid_copy(&child_device_obj
->dev_instance
, instance
);
1993 child_device_obj
->vendor_id
= 0x1414; /* MSFT vendor ID */
1995 return child_device_obj
;
1999 * vmbus_device_register - Register the child device
2001 int vmbus_device_register(struct hv_device
*child_device_obj
)
2003 struct kobject
*kobj
= &child_device_obj
->device
.kobj
;
2006 dev_set_name(&child_device_obj
->device
, "%pUl",
2007 &child_device_obj
->channel
->offermsg
.offer
.if_instance
);
2009 child_device_obj
->device
.bus
= &hv_bus
;
2010 child_device_obj
->device
.parent
= &hv_acpi_dev
->dev
;
2011 child_device_obj
->device
.release
= vmbus_device_release
;
2014 * Register with the LDM. This will kick off the driver/device
2015 * binding...which will eventually call vmbus_match() and vmbus_probe()
2017 ret
= device_register(&child_device_obj
->device
);
2019 pr_err("Unable to register child device\n");
2023 child_device_obj
->channels_kset
= kset_create_and_add("channels",
2025 if (!child_device_obj
->channels_kset
) {
2027 goto err_dev_unregister
;
2030 ret
= vmbus_add_channel_kobj(child_device_obj
,
2031 child_device_obj
->channel
);
2033 pr_err("Unable to register primary channeln");
2034 goto err_kset_unregister
;
2036 hv_debug_add_dev_dir(child_device_obj
);
2040 err_kset_unregister
:
2041 kset_unregister(child_device_obj
->channels_kset
);
2044 device_unregister(&child_device_obj
->device
);
2049 * vmbus_device_unregister - Remove the specified child device
2052 void vmbus_device_unregister(struct hv_device
*device_obj
)
2054 pr_debug("child device %s unregistered\n",
2055 dev_name(&device_obj
->device
));
2057 kset_unregister(device_obj
->channels_kset
);
2060 * Kick off the process of unregistering the device.
2061 * This will call vmbus_remove() and eventually vmbus_device_release()
2063 device_unregister(&device_obj
->device
);
2068 * VMBUS is an acpi enumerated device. Get the information we
2071 #define VTPM_BASE_ADDRESS 0xfed40000
2072 static acpi_status
vmbus_walk_resources(struct acpi_resource
*res
, void *ctx
)
2074 resource_size_t start
= 0;
2075 resource_size_t end
= 0;
2076 struct resource
*new_res
;
2077 struct resource
**old_res
= &hyperv_mmio
;
2078 struct resource
**prev_res
= NULL
;
2081 switch (res
->type
) {
2084 * "Address" descriptors are for bus windows. Ignore
2085 * "memory" descriptors, which are for registers on
2088 case ACPI_RESOURCE_TYPE_ADDRESS32
:
2089 start
= res
->data
.address32
.address
.minimum
;
2090 end
= res
->data
.address32
.address
.maximum
;
2093 case ACPI_RESOURCE_TYPE_ADDRESS64
:
2094 start
= res
->data
.address64
.address
.minimum
;
2095 end
= res
->data
.address64
.address
.maximum
;
2099 * The IRQ information is needed only on ARM64, which Hyper-V
2100 * sets up in the extended format. IRQ information is present
2101 * on x86/x64 in the non-extended format but it is not used by
2102 * Linux. So don't bother checking for the non-extended format.
2104 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ
:
2105 if (!acpi_dev_resource_interrupt(res
, 0, &r
)) {
2106 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2109 /* ARM64 INTID for VMbus */
2110 vmbus_interrupt
= res
->data
.extended_irq
.interrupts
[0];
2111 /* Linux IRQ number */
2112 vmbus_irq
= r
.start
;
2116 /* Unused resource type */
2121 * Ignore ranges that are below 1MB, as they're not
2122 * necessary or useful here.
2127 new_res
= kzalloc(sizeof(*new_res
), GFP_ATOMIC
);
2129 return AE_NO_MEMORY
;
2131 /* If this range overlaps the virtual TPM, truncate it. */
2132 if (end
> VTPM_BASE_ADDRESS
&& start
< VTPM_BASE_ADDRESS
)
2133 end
= VTPM_BASE_ADDRESS
;
2135 new_res
->name
= "hyperv mmio";
2136 new_res
->flags
= IORESOURCE_MEM
;
2137 new_res
->start
= start
;
2141 * If two ranges are adjacent, merge them.
2149 if (((*old_res
)->end
+ 1) == new_res
->start
) {
2150 (*old_res
)->end
= new_res
->end
;
2155 if ((*old_res
)->start
== new_res
->end
+ 1) {
2156 (*old_res
)->start
= new_res
->start
;
2161 if ((*old_res
)->start
> new_res
->end
) {
2162 new_res
->sibling
= *old_res
;
2164 (*prev_res
)->sibling
= new_res
;
2170 old_res
= &(*old_res
)->sibling
;
2177 static int vmbus_acpi_remove(struct acpi_device
*device
)
2179 struct resource
*cur_res
;
2180 struct resource
*next_res
;
2184 __release_region(hyperv_mmio
, fb_mmio
->start
,
2185 resource_size(fb_mmio
));
2189 for (cur_res
= hyperv_mmio
; cur_res
; cur_res
= next_res
) {
2190 next_res
= cur_res
->sibling
;
2198 static void vmbus_reserve_fb(void)
2202 * Make a claim for the frame buffer in the resource tree under the
2203 * first node, which will be the one below 4GB. The length seems to
2204 * be underreported, particularly in a Generation 1 VM. So start out
2205 * reserving a larger area and make it smaller until it succeeds.
2208 if (screen_info
.lfb_base
) {
2209 if (efi_enabled(EFI_BOOT
))
2210 size
= max_t(__u32
, screen_info
.lfb_size
, 0x800000);
2212 size
= max_t(__u32
, screen_info
.lfb_size
, 0x4000000);
2214 for (; !fb_mmio
&& (size
>= 0x100000); size
>>= 1) {
2215 fb_mmio
= __request_region(hyperv_mmio
,
2216 screen_info
.lfb_base
, size
,
2223 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2224 * @new: If successful, supplied a pointer to the
2225 * allocated MMIO space.
2226 * @device_obj: Identifies the caller
2227 * @min: Minimum guest physical address of the
2229 * @max: Maximum guest physical address
2230 * @size: Size of the range to be allocated
2231 * @align: Alignment of the range to be allocated
2232 * @fb_overlap_ok: Whether this allocation can be allowed
2233 * to overlap the video frame buffer.
2235 * This function walks the resources granted to VMBus by the
2236 * _CRS object in the ACPI namespace underneath the parent
2237 * "bridge" whether that's a root PCI bus in the Generation 1
2238 * case or a Module Device in the Generation 2 case. It then
2239 * attempts to allocate from the global MMIO pool in a way that
2240 * matches the constraints supplied in these parameters and by
2243 * Return: 0 on success, -errno on failure
2245 int vmbus_allocate_mmio(struct resource
**new, struct hv_device
*device_obj
,
2246 resource_size_t min
, resource_size_t max
,
2247 resource_size_t size
, resource_size_t align
,
2250 struct resource
*iter
, *shadow
;
2251 resource_size_t range_min
, range_max
, start
;
2252 const char *dev_n
= dev_name(&device_obj
->device
);
2256 mutex_lock(&hyperv_mmio_lock
);
2259 * If overlaps with frame buffers are allowed, then first attempt to
2260 * make the allocation from within the reserved region. Because it
2261 * is already reserved, no shadow allocation is necessary.
2263 if (fb_overlap_ok
&& fb_mmio
&& !(min
> fb_mmio
->end
) &&
2264 !(max
< fb_mmio
->start
)) {
2266 range_min
= fb_mmio
->start
;
2267 range_max
= fb_mmio
->end
;
2268 start
= (range_min
+ align
- 1) & ~(align
- 1);
2269 for (; start
+ size
- 1 <= range_max
; start
+= align
) {
2270 *new = request_mem_region_exclusive(start
, size
, dev_n
);
2278 for (iter
= hyperv_mmio
; iter
; iter
= iter
->sibling
) {
2279 if ((iter
->start
>= max
) || (iter
->end
<= min
))
2282 range_min
= iter
->start
;
2283 range_max
= iter
->end
;
2284 start
= (range_min
+ align
- 1) & ~(align
- 1);
2285 for (; start
+ size
- 1 <= range_max
; start
+= align
) {
2286 shadow
= __request_region(iter
, start
, size
, NULL
,
2291 *new = request_mem_region_exclusive(start
, size
, dev_n
);
2293 shadow
->name
= (char *)*new;
2298 __release_region(iter
, start
, size
);
2303 mutex_unlock(&hyperv_mmio_lock
);
2306 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio
);
2309 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2310 * @start: Base address of region to release.
2311 * @size: Size of the range to be allocated
2313 * This function releases anything requested by
2314 * vmbus_mmio_allocate().
2316 void vmbus_free_mmio(resource_size_t start
, resource_size_t size
)
2318 struct resource
*iter
;
2320 mutex_lock(&hyperv_mmio_lock
);
2321 for (iter
= hyperv_mmio
; iter
; iter
= iter
->sibling
) {
2322 if ((iter
->start
>= start
+ size
) || (iter
->end
<= start
))
2325 __release_region(iter
, start
, size
);
2327 release_mem_region(start
, size
);
2328 mutex_unlock(&hyperv_mmio_lock
);
2331 EXPORT_SYMBOL_GPL(vmbus_free_mmio
);
2333 static int vmbus_acpi_add(struct acpi_device
*device
)
2336 int ret_val
= -ENODEV
;
2337 struct acpi_device
*ancestor
;
2339 hv_acpi_dev
= device
;
2341 result
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
2342 vmbus_walk_resources
, NULL
);
2344 if (ACPI_FAILURE(result
))
2347 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2348 * firmware) is the VMOD that has the mmio ranges. Get that.
2350 for (ancestor
= device
->parent
; ancestor
; ancestor
= ancestor
->parent
) {
2351 result
= acpi_walk_resources(ancestor
->handle
, METHOD_NAME__CRS
,
2352 vmbus_walk_resources
, NULL
);
2354 if (ACPI_FAILURE(result
))
2364 complete(&probe_event
);
2366 vmbus_acpi_remove(device
);
2370 #ifdef CONFIG_PM_SLEEP
2371 static int vmbus_bus_suspend(struct device
*dev
)
2373 struct vmbus_channel
*channel
, *sc
;
2375 while (atomic_read(&vmbus_connection
.offer_in_progress
) != 0) {
2377 * We wait here until the completion of any channel
2378 * offers that are currently in progress.
2383 mutex_lock(&vmbus_connection
.channel_mutex
);
2384 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
2385 if (!is_hvsock_channel(channel
))
2388 vmbus_force_channel_rescinded(channel
);
2390 mutex_unlock(&vmbus_connection
.channel_mutex
);
2393 * Wait until all the sub-channels and hv_sock channels have been
2394 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2395 * they would conflict with the new sub-channels that will be created
2396 * in the resume path. hv_sock channels should also be destroyed, but
2397 * a hv_sock channel of an established hv_sock connection can not be
2398 * really destroyed since it may still be referenced by the userspace
2399 * application, so we just force the hv_sock channel to be rescinded
2400 * by vmbus_force_channel_rescinded(), and the userspace application
2401 * will thoroughly destroy the channel after hibernation.
2403 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2404 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2406 if (atomic_read(&vmbus_connection
.nr_chan_close_on_suspend
) > 0)
2407 wait_for_completion(&vmbus_connection
.ready_for_suspend_event
);
2409 if (atomic_read(&vmbus_connection
.nr_chan_fixup_on_resume
) != 0) {
2410 pr_err("Can not suspend due to a previous failed resuming\n");
2414 mutex_lock(&vmbus_connection
.channel_mutex
);
2416 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
2418 * Remove the channel from the array of channels and invalidate
2419 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2420 * up the relid (and other fields, if necessary) and add the
2421 * channel back to the array.
2423 vmbus_channel_unmap_relid(channel
);
2424 channel
->offermsg
.child_relid
= INVALID_RELID
;
2426 if (is_hvsock_channel(channel
)) {
2427 if (!channel
->rescind
) {
2428 pr_err("hv_sock channel not rescinded!\n");
2434 list_for_each_entry(sc
, &channel
->sc_list
, sc_list
) {
2435 pr_err("Sub-channel not deleted!\n");
2439 atomic_inc(&vmbus_connection
.nr_chan_fixup_on_resume
);
2442 mutex_unlock(&vmbus_connection
.channel_mutex
);
2444 vmbus_initiate_unload(false);
2446 /* Reset the event for the next resume. */
2447 reinit_completion(&vmbus_connection
.ready_for_resume_event
);
2452 static int vmbus_bus_resume(struct device
*dev
)
2454 struct vmbus_channel_msginfo
*msginfo
;
2459 * We only use the 'vmbus_proto_version', which was in use before
2460 * hibernation, to re-negotiate with the host.
2462 if (!vmbus_proto_version
) {
2463 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version
);
2467 msgsize
= sizeof(*msginfo
) +
2468 sizeof(struct vmbus_channel_initiate_contact
);
2470 msginfo
= kzalloc(msgsize
, GFP_KERNEL
);
2472 if (msginfo
== NULL
)
2475 ret
= vmbus_negotiate_version(msginfo
, vmbus_proto_version
);
2482 WARN_ON(atomic_read(&vmbus_connection
.nr_chan_fixup_on_resume
) == 0);
2484 vmbus_request_offers();
2486 if (wait_for_completion_timeout(
2487 &vmbus_connection
.ready_for_resume_event
, 10 * HZ
) == 0)
2488 pr_err("Some vmbus device is missing after suspending?\n");
2490 /* Reset the event for the next suspend. */
2491 reinit_completion(&vmbus_connection
.ready_for_suspend_event
);
2496 #define vmbus_bus_suspend NULL
2497 #define vmbus_bus_resume NULL
2498 #endif /* CONFIG_PM_SLEEP */
2500 static const struct acpi_device_id vmbus_acpi_device_ids
[] = {
2505 MODULE_DEVICE_TABLE(acpi
, vmbus_acpi_device_ids
);
2508 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2509 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2510 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2511 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2512 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2513 * resume callback must also run via the "noirq" ops.
2515 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2516 * earlier in this file before vmbus_pm.
2519 static const struct dev_pm_ops vmbus_bus_pm
= {
2520 .suspend_noirq
= NULL
,
2521 .resume_noirq
= NULL
,
2522 .freeze_noirq
= vmbus_bus_suspend
,
2523 .thaw_noirq
= vmbus_bus_resume
,
2524 .poweroff_noirq
= vmbus_bus_suspend
,
2525 .restore_noirq
= vmbus_bus_resume
2528 static struct acpi_driver vmbus_acpi_driver
= {
2530 .ids
= vmbus_acpi_device_ids
,
2532 .add
= vmbus_acpi_add
,
2533 .remove
= vmbus_acpi_remove
,
2535 .drv
.pm
= &vmbus_bus_pm
,
2538 static void hv_kexec_handler(void)
2540 hv_stimer_global_cleanup();
2541 vmbus_initiate_unload(false);
2542 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2544 cpuhp_remove_state(hyperv_cpuhp_online
);
2548 static void hv_crash_handler(struct pt_regs
*regs
)
2552 vmbus_initiate_unload(true);
2554 * In crash handler we can't schedule synic cleanup for all CPUs,
2555 * doing the cleanup for current CPU only. This should be sufficient
2558 cpu
= smp_processor_id();
2559 hv_stimer_cleanup(cpu
);
2560 hv_synic_disable_regs(cpu
);
2564 static int hv_synic_suspend(void)
2567 * When we reach here, all the non-boot CPUs have been offlined.
2568 * If we're in a legacy configuration where stimer Direct Mode is
2569 * not enabled, the stimers on the non-boot CPUs have been unbound
2570 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2571 * hv_stimer_cleanup() -> clockevents_unbind_device().
2573 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2574 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2575 * 1) it's unnecessary as interrupts remain disabled between
2576 * syscore_suspend() and syscore_resume(): see create_image() and
2577 * resume_target_kernel()
2578 * 2) the stimer on CPU0 is automatically disabled later by
2579 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2580 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2581 * 3) a warning would be triggered if we call
2582 * clockevents_unbind_device(), which may sleep, in an
2583 * interrupts-disabled context.
2586 hv_synic_disable_regs(0);
2591 static void hv_synic_resume(void)
2593 hv_synic_enable_regs(0);
2596 * Note: we don't need to call hv_stimer_init(0), because the timer
2597 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2598 * automatically re-enabled in timekeeping_resume().
2602 /* The callbacks run only on CPU0, with irqs_disabled. */
2603 static struct syscore_ops hv_synic_syscore_ops
= {
2604 .suspend
= hv_synic_suspend
,
2605 .resume
= hv_synic_resume
,
2608 static int __init
hv_acpi_init(void)
2612 if (!hv_is_hyperv_initialized())
2615 init_completion(&probe_event
);
2618 * Get ACPI resources first.
2620 ret
= acpi_bus_register_driver(&vmbus_acpi_driver
);
2625 t
= wait_for_completion_timeout(&probe_event
, 5*HZ
);
2632 ret
= vmbus_bus_init();
2636 hv_setup_kexec_handler(hv_kexec_handler
);
2637 hv_setup_crash_handler(hv_crash_handler
);
2639 register_syscore_ops(&hv_synic_syscore_ops
);
2644 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
2649 static void __exit
vmbus_exit(void)
2653 unregister_syscore_ops(&hv_synic_syscore_ops
);
2655 hv_remove_kexec_handler();
2656 hv_remove_crash_handler();
2657 vmbus_connection
.conn_state
= DISCONNECTED
;
2658 hv_stimer_global_cleanup();
2660 hv_remove_vmbus_irq();
2661 for_each_online_cpu(cpu
) {
2662 struct hv_per_cpu_context
*hv_cpu
2663 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
2665 tasklet_kill(&hv_cpu
->msg_dpc
);
2667 hv_debug_rm_all_dir();
2669 vmbus_free_channels();
2670 kfree(vmbus_connection
.channels
);
2672 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
2673 kmsg_dump_unregister(&hv_kmsg_dumper
);
2674 unregister_die_notifier(&hyperv_die_block
);
2675 atomic_notifier_chain_unregister(&panic_notifier_list
,
2676 &hyperv_panic_block
);
2679 free_page((unsigned long)hv_panic_page
);
2680 unregister_sysctl_table(hv_ctl_table_hdr
);
2681 hv_ctl_table_hdr
= NULL
;
2682 bus_unregister(&hv_bus
);
2684 cpuhp_remove_state(hyperv_cpuhp_online
);
2686 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
2690 MODULE_LICENSE("GPL");
2691 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2693 subsys_initcall(hv_acpi_init
);
2694 module_exit(vmbus_exit
);