1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
26 #include <asm/mshyperv.h>
27 #include <linux/delay.h>
28 #include <linux/notifier.h>
29 #include <linux/ptrace.h>
30 #include <linux/screen_info.h>
31 #include <linux/kdebug.h>
32 #include <linux/efi.h>
33 #include <linux/random.h>
34 #include <linux/kernel.h>
35 #include <linux/syscore_ops.h>
36 #include <clocksource/hyperv_timer.h>
37 #include "hyperv_vmbus.h"
40 struct list_head node
;
41 struct hv_vmbus_device_id id
;
44 static struct acpi_device
*hv_acpi_dev
;
46 static struct completion probe_event
;
48 static int hyperv_cpuhp_online
;
50 static void *hv_panic_page
;
53 * Boolean to control whether to report panic messages over Hyper-V.
55 * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
57 static int sysctl_record_panic_msg
= 1;
59 static int hyperv_report_reg(void)
61 return !sysctl_record_panic_msg
|| !hv_panic_page
;
64 static int hyperv_panic_event(struct notifier_block
*nb
, unsigned long val
,
69 vmbus_initiate_unload(true);
72 * Hyper-V should be notified only once about a panic. If we will be
73 * doing hyperv_report_panic_msg() later with kmsg data, don't do
74 * the notification here.
76 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
77 && hyperv_report_reg()) {
78 regs
= current_pt_regs();
79 hyperv_report_panic(regs
, val
, false);
84 static int hyperv_die_event(struct notifier_block
*nb
, unsigned long val
,
87 struct die_args
*die
= (struct die_args
*)args
;
88 struct pt_regs
*regs
= die
->regs
;
91 * Hyper-V should be notified only once about a panic. If we will be
92 * doing hyperv_report_panic_msg() later with kmsg data, don't do
93 * the notification here.
95 if (hyperv_report_reg())
96 hyperv_report_panic(regs
, val
, true);
100 static struct notifier_block hyperv_die_block
= {
101 .notifier_call
= hyperv_die_event
,
103 static struct notifier_block hyperv_panic_block
= {
104 .notifier_call
= hyperv_panic_event
,
107 static const char *fb_mmio_name
= "fb_range";
108 static struct resource
*fb_mmio
;
109 static struct resource
*hyperv_mmio
;
110 static DEFINE_MUTEX(hyperv_mmio_lock
);
112 static int vmbus_exists(void)
114 if (hv_acpi_dev
== NULL
)
120 static u8
channel_monitor_group(const struct vmbus_channel
*channel
)
122 return (u8
)channel
->offermsg
.monitorid
/ 32;
125 static u8
channel_monitor_offset(const struct vmbus_channel
*channel
)
127 return (u8
)channel
->offermsg
.monitorid
% 32;
130 static u32
channel_pending(const struct vmbus_channel
*channel
,
131 const struct hv_monitor_page
*monitor_page
)
133 u8 monitor_group
= channel_monitor_group(channel
);
135 return monitor_page
->trigger_group
[monitor_group
].pending
;
138 static u32
channel_latency(const struct vmbus_channel
*channel
,
139 const struct hv_monitor_page
*monitor_page
)
141 u8 monitor_group
= channel_monitor_group(channel
);
142 u8 monitor_offset
= channel_monitor_offset(channel
);
144 return monitor_page
->latency
[monitor_group
][monitor_offset
];
147 static u32
channel_conn_id(struct vmbus_channel
*channel
,
148 struct hv_monitor_page
*monitor_page
)
150 u8 monitor_group
= channel_monitor_group(channel
);
151 u8 monitor_offset
= channel_monitor_offset(channel
);
152 return monitor_page
->parameter
[monitor_group
][monitor_offset
].connectionid
.u
.id
;
155 static ssize_t
id_show(struct device
*dev
, struct device_attribute
*dev_attr
,
158 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
160 if (!hv_dev
->channel
)
162 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.child_relid
);
164 static DEVICE_ATTR_RO(id
);
166 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*dev_attr
,
169 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
171 if (!hv_dev
->channel
)
173 return sprintf(buf
, "%d\n", hv_dev
->channel
->state
);
175 static DEVICE_ATTR_RO(state
);
177 static ssize_t
monitor_id_show(struct device
*dev
,
178 struct device_attribute
*dev_attr
, char *buf
)
180 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
182 if (!hv_dev
->channel
)
184 return sprintf(buf
, "%d\n", hv_dev
->channel
->offermsg
.monitorid
);
186 static DEVICE_ATTR_RO(monitor_id
);
188 static ssize_t
class_id_show(struct device
*dev
,
189 struct device_attribute
*dev_attr
, char *buf
)
191 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
193 if (!hv_dev
->channel
)
195 return sprintf(buf
, "{%pUl}\n",
196 &hv_dev
->channel
->offermsg
.offer
.if_type
);
198 static DEVICE_ATTR_RO(class_id
);
200 static ssize_t
device_id_show(struct device
*dev
,
201 struct device_attribute
*dev_attr
, char *buf
)
203 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
205 if (!hv_dev
->channel
)
207 return sprintf(buf
, "{%pUl}\n",
208 &hv_dev
->channel
->offermsg
.offer
.if_instance
);
210 static DEVICE_ATTR_RO(device_id
);
212 static ssize_t
modalias_show(struct device
*dev
,
213 struct device_attribute
*dev_attr
, char *buf
)
215 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
217 return sprintf(buf
, "vmbus:%*phN\n", UUID_SIZE
, &hv_dev
->dev_type
);
219 static DEVICE_ATTR_RO(modalias
);
222 static ssize_t
numa_node_show(struct device
*dev
,
223 struct device_attribute
*attr
, char *buf
)
225 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
227 if (!hv_dev
->channel
)
230 return sprintf(buf
, "%d\n", hv_dev
->channel
->numa_node
);
232 static DEVICE_ATTR_RO(numa_node
);
235 static ssize_t
server_monitor_pending_show(struct device
*dev
,
236 struct device_attribute
*dev_attr
,
239 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
241 if (!hv_dev
->channel
)
243 return sprintf(buf
, "%d\n",
244 channel_pending(hv_dev
->channel
,
245 vmbus_connection
.monitor_pages
[0]));
247 static DEVICE_ATTR_RO(server_monitor_pending
);
249 static ssize_t
client_monitor_pending_show(struct device
*dev
,
250 struct device_attribute
*dev_attr
,
253 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
255 if (!hv_dev
->channel
)
257 return sprintf(buf
, "%d\n",
258 channel_pending(hv_dev
->channel
,
259 vmbus_connection
.monitor_pages
[1]));
261 static DEVICE_ATTR_RO(client_monitor_pending
);
263 static ssize_t
server_monitor_latency_show(struct device
*dev
,
264 struct device_attribute
*dev_attr
,
267 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
269 if (!hv_dev
->channel
)
271 return sprintf(buf
, "%d\n",
272 channel_latency(hv_dev
->channel
,
273 vmbus_connection
.monitor_pages
[0]));
275 static DEVICE_ATTR_RO(server_monitor_latency
);
277 static ssize_t
client_monitor_latency_show(struct device
*dev
,
278 struct device_attribute
*dev_attr
,
281 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
283 if (!hv_dev
->channel
)
285 return sprintf(buf
, "%d\n",
286 channel_latency(hv_dev
->channel
,
287 vmbus_connection
.monitor_pages
[1]));
289 static DEVICE_ATTR_RO(client_monitor_latency
);
291 static ssize_t
server_monitor_conn_id_show(struct device
*dev
,
292 struct device_attribute
*dev_attr
,
295 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
297 if (!hv_dev
->channel
)
299 return sprintf(buf
, "%d\n",
300 channel_conn_id(hv_dev
->channel
,
301 vmbus_connection
.monitor_pages
[0]));
303 static DEVICE_ATTR_RO(server_monitor_conn_id
);
305 static ssize_t
client_monitor_conn_id_show(struct device
*dev
,
306 struct device_attribute
*dev_attr
,
309 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
311 if (!hv_dev
->channel
)
313 return sprintf(buf
, "%d\n",
314 channel_conn_id(hv_dev
->channel
,
315 vmbus_connection
.monitor_pages
[1]));
317 static DEVICE_ATTR_RO(client_monitor_conn_id
);
319 static ssize_t
out_intr_mask_show(struct device
*dev
,
320 struct device_attribute
*dev_attr
, char *buf
)
322 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
323 struct hv_ring_buffer_debug_info outbound
;
326 if (!hv_dev
->channel
)
329 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
334 return sprintf(buf
, "%d\n", outbound
.current_interrupt_mask
);
336 static DEVICE_ATTR_RO(out_intr_mask
);
338 static ssize_t
out_read_index_show(struct device
*dev
,
339 struct device_attribute
*dev_attr
, char *buf
)
341 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
342 struct hv_ring_buffer_debug_info outbound
;
345 if (!hv_dev
->channel
)
348 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
352 return sprintf(buf
, "%d\n", outbound
.current_read_index
);
354 static DEVICE_ATTR_RO(out_read_index
);
356 static ssize_t
out_write_index_show(struct device
*dev
,
357 struct device_attribute
*dev_attr
,
360 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
361 struct hv_ring_buffer_debug_info outbound
;
364 if (!hv_dev
->channel
)
367 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
371 return sprintf(buf
, "%d\n", outbound
.current_write_index
);
373 static DEVICE_ATTR_RO(out_write_index
);
375 static ssize_t
out_read_bytes_avail_show(struct device
*dev
,
376 struct device_attribute
*dev_attr
,
379 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
380 struct hv_ring_buffer_debug_info outbound
;
383 if (!hv_dev
->channel
)
386 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
390 return sprintf(buf
, "%d\n", outbound
.bytes_avail_toread
);
392 static DEVICE_ATTR_RO(out_read_bytes_avail
);
394 static ssize_t
out_write_bytes_avail_show(struct device
*dev
,
395 struct device_attribute
*dev_attr
,
398 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
399 struct hv_ring_buffer_debug_info outbound
;
402 if (!hv_dev
->channel
)
405 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->outbound
,
409 return sprintf(buf
, "%d\n", outbound
.bytes_avail_towrite
);
411 static DEVICE_ATTR_RO(out_write_bytes_avail
);
413 static ssize_t
in_intr_mask_show(struct device
*dev
,
414 struct device_attribute
*dev_attr
, char *buf
)
416 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
417 struct hv_ring_buffer_debug_info inbound
;
420 if (!hv_dev
->channel
)
423 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
427 return sprintf(buf
, "%d\n", inbound
.current_interrupt_mask
);
429 static DEVICE_ATTR_RO(in_intr_mask
);
431 static ssize_t
in_read_index_show(struct device
*dev
,
432 struct device_attribute
*dev_attr
, char *buf
)
434 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
435 struct hv_ring_buffer_debug_info inbound
;
438 if (!hv_dev
->channel
)
441 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
445 return sprintf(buf
, "%d\n", inbound
.current_read_index
);
447 static DEVICE_ATTR_RO(in_read_index
);
449 static ssize_t
in_write_index_show(struct device
*dev
,
450 struct device_attribute
*dev_attr
, char *buf
)
452 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
453 struct hv_ring_buffer_debug_info inbound
;
456 if (!hv_dev
->channel
)
459 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
463 return sprintf(buf
, "%d\n", inbound
.current_write_index
);
465 static DEVICE_ATTR_RO(in_write_index
);
467 static ssize_t
in_read_bytes_avail_show(struct device
*dev
,
468 struct device_attribute
*dev_attr
,
471 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
472 struct hv_ring_buffer_debug_info inbound
;
475 if (!hv_dev
->channel
)
478 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
482 return sprintf(buf
, "%d\n", inbound
.bytes_avail_toread
);
484 static DEVICE_ATTR_RO(in_read_bytes_avail
);
486 static ssize_t
in_write_bytes_avail_show(struct device
*dev
,
487 struct device_attribute
*dev_attr
,
490 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
491 struct hv_ring_buffer_debug_info inbound
;
494 if (!hv_dev
->channel
)
497 ret
= hv_ringbuffer_get_debuginfo(&hv_dev
->channel
->inbound
, &inbound
);
501 return sprintf(buf
, "%d\n", inbound
.bytes_avail_towrite
);
503 static DEVICE_ATTR_RO(in_write_bytes_avail
);
505 static ssize_t
channel_vp_mapping_show(struct device
*dev
,
506 struct device_attribute
*dev_attr
,
509 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
510 struct vmbus_channel
*channel
= hv_dev
->channel
, *cur_sc
;
512 int buf_size
= PAGE_SIZE
, n_written
, tot_written
;
513 struct list_head
*cur
;
518 tot_written
= snprintf(buf
, buf_size
, "%u:%u\n",
519 channel
->offermsg
.child_relid
, channel
->target_cpu
);
521 spin_lock_irqsave(&channel
->lock
, flags
);
523 list_for_each(cur
, &channel
->sc_list
) {
524 if (tot_written
>= buf_size
- 1)
527 cur_sc
= list_entry(cur
, struct vmbus_channel
, sc_list
);
528 n_written
= scnprintf(buf
+ tot_written
,
529 buf_size
- tot_written
,
531 cur_sc
->offermsg
.child_relid
,
533 tot_written
+= n_written
;
536 spin_unlock_irqrestore(&channel
->lock
, flags
);
540 static DEVICE_ATTR_RO(channel_vp_mapping
);
542 static ssize_t
vendor_show(struct device
*dev
,
543 struct device_attribute
*dev_attr
,
546 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
547 return sprintf(buf
, "0x%x\n", hv_dev
->vendor_id
);
549 static DEVICE_ATTR_RO(vendor
);
551 static ssize_t
device_show(struct device
*dev
,
552 struct device_attribute
*dev_attr
,
555 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
556 return sprintf(buf
, "0x%x\n", hv_dev
->device_id
);
558 static DEVICE_ATTR_RO(device
);
560 static ssize_t
driver_override_store(struct device
*dev
,
561 struct device_attribute
*attr
,
562 const char *buf
, size_t count
)
564 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
565 char *driver_override
, *old
, *cp
;
567 /* We need to keep extra room for a newline */
568 if (count
>= (PAGE_SIZE
- 1))
571 driver_override
= kstrndup(buf
, count
, GFP_KERNEL
);
572 if (!driver_override
)
575 cp
= strchr(driver_override
, '\n');
580 old
= hv_dev
->driver_override
;
581 if (strlen(driver_override
)) {
582 hv_dev
->driver_override
= driver_override
;
584 kfree(driver_override
);
585 hv_dev
->driver_override
= NULL
;
594 static ssize_t
driver_override_show(struct device
*dev
,
595 struct device_attribute
*attr
, char *buf
)
597 struct hv_device
*hv_dev
= device_to_hv_device(dev
);
601 len
= snprintf(buf
, PAGE_SIZE
, "%s\n", hv_dev
->driver_override
);
606 static DEVICE_ATTR_RW(driver_override
);
608 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
609 static struct attribute
*vmbus_dev_attrs
[] = {
611 &dev_attr_state
.attr
,
612 &dev_attr_monitor_id
.attr
,
613 &dev_attr_class_id
.attr
,
614 &dev_attr_device_id
.attr
,
615 &dev_attr_modalias
.attr
,
617 &dev_attr_numa_node
.attr
,
619 &dev_attr_server_monitor_pending
.attr
,
620 &dev_attr_client_monitor_pending
.attr
,
621 &dev_attr_server_monitor_latency
.attr
,
622 &dev_attr_client_monitor_latency
.attr
,
623 &dev_attr_server_monitor_conn_id
.attr
,
624 &dev_attr_client_monitor_conn_id
.attr
,
625 &dev_attr_out_intr_mask
.attr
,
626 &dev_attr_out_read_index
.attr
,
627 &dev_attr_out_write_index
.attr
,
628 &dev_attr_out_read_bytes_avail
.attr
,
629 &dev_attr_out_write_bytes_avail
.attr
,
630 &dev_attr_in_intr_mask
.attr
,
631 &dev_attr_in_read_index
.attr
,
632 &dev_attr_in_write_index
.attr
,
633 &dev_attr_in_read_bytes_avail
.attr
,
634 &dev_attr_in_write_bytes_avail
.attr
,
635 &dev_attr_channel_vp_mapping
.attr
,
636 &dev_attr_vendor
.attr
,
637 &dev_attr_device
.attr
,
638 &dev_attr_driver_override
.attr
,
643 * Device-level attribute_group callback function. Returns the permission for
644 * each attribute, and returns 0 if an attribute is not visible.
646 static umode_t
vmbus_dev_attr_is_visible(struct kobject
*kobj
,
647 struct attribute
*attr
, int idx
)
649 struct device
*dev
= kobj_to_dev(kobj
);
650 const struct hv_device
*hv_dev
= device_to_hv_device(dev
);
652 /* Hide the monitor attributes if the monitor mechanism is not used. */
653 if (!hv_dev
->channel
->offermsg
.monitor_allocated
&&
654 (attr
== &dev_attr_monitor_id
.attr
||
655 attr
== &dev_attr_server_monitor_pending
.attr
||
656 attr
== &dev_attr_client_monitor_pending
.attr
||
657 attr
== &dev_attr_server_monitor_latency
.attr
||
658 attr
== &dev_attr_client_monitor_latency
.attr
||
659 attr
== &dev_attr_server_monitor_conn_id
.attr
||
660 attr
== &dev_attr_client_monitor_conn_id
.attr
))
666 static const struct attribute_group vmbus_dev_group
= {
667 .attrs
= vmbus_dev_attrs
,
668 .is_visible
= vmbus_dev_attr_is_visible
670 __ATTRIBUTE_GROUPS(vmbus_dev
);
673 * vmbus_uevent - add uevent for our device
675 * This routine is invoked when a device is added or removed on the vmbus to
676 * generate a uevent to udev in the userspace. The udev will then look at its
677 * rule and the uevent generated here to load the appropriate driver
679 * The alias string will be of the form vmbus:guid where guid is the string
680 * representation of the device guid (each byte of the guid will be
681 * represented with two hex characters.
683 static int vmbus_uevent(struct device
*device
, struct kobj_uevent_env
*env
)
685 struct hv_device
*dev
= device_to_hv_device(device
);
686 const char *format
= "MODALIAS=vmbus:%*phN";
688 return add_uevent_var(env
, format
, UUID_SIZE
, &dev
->dev_type
);
691 static const struct hv_vmbus_device_id
*
692 hv_vmbus_dev_match(const struct hv_vmbus_device_id
*id
, const guid_t
*guid
)
695 return NULL
; /* empty device table */
697 for (; !guid_is_null(&id
->guid
); id
++)
698 if (guid_equal(&id
->guid
, guid
))
704 static const struct hv_vmbus_device_id
*
705 hv_vmbus_dynid_match(struct hv_driver
*drv
, const guid_t
*guid
)
707 const struct hv_vmbus_device_id
*id
= NULL
;
708 struct vmbus_dynid
*dynid
;
710 spin_lock(&drv
->dynids
.lock
);
711 list_for_each_entry(dynid
, &drv
->dynids
.list
, node
) {
712 if (guid_equal(&dynid
->id
.guid
, guid
)) {
717 spin_unlock(&drv
->dynids
.lock
);
722 static const struct hv_vmbus_device_id vmbus_device_null
;
725 * Return a matching hv_vmbus_device_id pointer.
726 * If there is no match, return NULL.
728 static const struct hv_vmbus_device_id
*hv_vmbus_get_id(struct hv_driver
*drv
,
729 struct hv_device
*dev
)
731 const guid_t
*guid
= &dev
->dev_type
;
732 const struct hv_vmbus_device_id
*id
;
734 /* When driver_override is set, only bind to the matching driver */
735 if (dev
->driver_override
&& strcmp(dev
->driver_override
, drv
->name
))
738 /* Look at the dynamic ids first, before the static ones */
739 id
= hv_vmbus_dynid_match(drv
, guid
);
741 id
= hv_vmbus_dev_match(drv
->id_table
, guid
);
743 /* driver_override will always match, send a dummy id */
744 if (!id
&& dev
->driver_override
)
745 id
= &vmbus_device_null
;
750 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
751 static int vmbus_add_dynid(struct hv_driver
*drv
, guid_t
*guid
)
753 struct vmbus_dynid
*dynid
;
755 dynid
= kzalloc(sizeof(*dynid
), GFP_KERNEL
);
759 dynid
->id
.guid
= *guid
;
761 spin_lock(&drv
->dynids
.lock
);
762 list_add_tail(&dynid
->node
, &drv
->dynids
.list
);
763 spin_unlock(&drv
->dynids
.lock
);
765 return driver_attach(&drv
->driver
);
768 static void vmbus_free_dynids(struct hv_driver
*drv
)
770 struct vmbus_dynid
*dynid
, *n
;
772 spin_lock(&drv
->dynids
.lock
);
773 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
774 list_del(&dynid
->node
);
777 spin_unlock(&drv
->dynids
.lock
);
781 * store_new_id - sysfs frontend to vmbus_add_dynid()
783 * Allow GUIDs to be added to an existing driver via sysfs.
785 static ssize_t
new_id_store(struct device_driver
*driver
, const char *buf
,
788 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
792 retval
= guid_parse(buf
, &guid
);
796 if (hv_vmbus_dynid_match(drv
, &guid
))
799 retval
= vmbus_add_dynid(drv
, &guid
);
804 static DRIVER_ATTR_WO(new_id
);
807 * store_remove_id - remove a PCI device ID from this driver
809 * Removes a dynamic pci device ID to this driver.
811 static ssize_t
remove_id_store(struct device_driver
*driver
, const char *buf
,
814 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
815 struct vmbus_dynid
*dynid
, *n
;
819 retval
= guid_parse(buf
, &guid
);
824 spin_lock(&drv
->dynids
.lock
);
825 list_for_each_entry_safe(dynid
, n
, &drv
->dynids
.list
, node
) {
826 struct hv_vmbus_device_id
*id
= &dynid
->id
;
828 if (guid_equal(&id
->guid
, &guid
)) {
829 list_del(&dynid
->node
);
835 spin_unlock(&drv
->dynids
.lock
);
839 static DRIVER_ATTR_WO(remove_id
);
841 static struct attribute
*vmbus_drv_attrs
[] = {
842 &driver_attr_new_id
.attr
,
843 &driver_attr_remove_id
.attr
,
846 ATTRIBUTE_GROUPS(vmbus_drv
);
850 * vmbus_match - Attempt to match the specified device to the specified driver
852 static int vmbus_match(struct device
*device
, struct device_driver
*driver
)
854 struct hv_driver
*drv
= drv_to_hv_drv(driver
);
855 struct hv_device
*hv_dev
= device_to_hv_device(device
);
857 /* The hv_sock driver handles all hv_sock offers. */
858 if (is_hvsock_channel(hv_dev
->channel
))
861 if (hv_vmbus_get_id(drv
, hv_dev
))
868 * vmbus_probe - Add the new vmbus's child device
870 static int vmbus_probe(struct device
*child_device
)
873 struct hv_driver
*drv
=
874 drv_to_hv_drv(child_device
->driver
);
875 struct hv_device
*dev
= device_to_hv_device(child_device
);
876 const struct hv_vmbus_device_id
*dev_id
;
878 dev_id
= hv_vmbus_get_id(drv
, dev
);
880 ret
= drv
->probe(dev
, dev_id
);
882 pr_err("probe failed for device %s (%d)\n",
883 dev_name(child_device
), ret
);
886 pr_err("probe not set for driver %s\n",
887 dev_name(child_device
));
894 * vmbus_remove - Remove a vmbus device
896 static int vmbus_remove(struct device
*child_device
)
898 struct hv_driver
*drv
;
899 struct hv_device
*dev
= device_to_hv_device(child_device
);
901 if (child_device
->driver
) {
902 drv
= drv_to_hv_drv(child_device
->driver
);
912 * vmbus_shutdown - Shutdown a vmbus device
914 static void vmbus_shutdown(struct device
*child_device
)
916 struct hv_driver
*drv
;
917 struct hv_device
*dev
= device_to_hv_device(child_device
);
920 /* The device may not be attached yet */
921 if (!child_device
->driver
)
924 drv
= drv_to_hv_drv(child_device
->driver
);
930 #ifdef CONFIG_PM_SLEEP
932 * vmbus_suspend - Suspend a vmbus device
934 static int vmbus_suspend(struct device
*child_device
)
936 struct hv_driver
*drv
;
937 struct hv_device
*dev
= device_to_hv_device(child_device
);
939 /* The device may not be attached yet */
940 if (!child_device
->driver
)
943 drv
= drv_to_hv_drv(child_device
->driver
);
947 return drv
->suspend(dev
);
951 * vmbus_resume - Resume a vmbus device
953 static int vmbus_resume(struct device
*child_device
)
955 struct hv_driver
*drv
;
956 struct hv_device
*dev
= device_to_hv_device(child_device
);
958 /* The device may not be attached yet */
959 if (!child_device
->driver
)
962 drv
= drv_to_hv_drv(child_device
->driver
);
966 return drv
->resume(dev
);
969 #define vmbus_suspend NULL
970 #define vmbus_resume NULL
971 #endif /* CONFIG_PM_SLEEP */
974 * vmbus_device_release - Final callback release of the vmbus child device
976 static void vmbus_device_release(struct device
*device
)
978 struct hv_device
*hv_dev
= device_to_hv_device(device
);
979 struct vmbus_channel
*channel
= hv_dev
->channel
;
981 hv_debug_rm_dev_dir(hv_dev
);
983 mutex_lock(&vmbus_connection
.channel_mutex
);
984 hv_process_channel_removal(channel
);
985 mutex_unlock(&vmbus_connection
.channel_mutex
);
990 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
992 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
993 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
994 * is no way to wake up a Generation-2 VM.
996 * The other 4 ops are for hibernation.
999 static const struct dev_pm_ops vmbus_pm
= {
1000 .suspend_noirq
= NULL
,
1001 .resume_noirq
= NULL
,
1002 .freeze_noirq
= vmbus_suspend
,
1003 .thaw_noirq
= vmbus_resume
,
1004 .poweroff_noirq
= vmbus_suspend
,
1005 .restore_noirq
= vmbus_resume
,
1008 /* The one and only one */
1009 static struct bus_type hv_bus
= {
1011 .match
= vmbus_match
,
1012 .shutdown
= vmbus_shutdown
,
1013 .remove
= vmbus_remove
,
1014 .probe
= vmbus_probe
,
1015 .uevent
= vmbus_uevent
,
1016 .dev_groups
= vmbus_dev_groups
,
1017 .drv_groups
= vmbus_drv_groups
,
1021 struct onmessage_work_context
{
1022 struct work_struct work
;
1024 struct hv_message_header header
;
1029 static void vmbus_onmessage_work(struct work_struct
*work
)
1031 struct onmessage_work_context
*ctx
;
1033 /* Do not process messages if we're in DISCONNECTED state */
1034 if (vmbus_connection
.conn_state
== DISCONNECTED
)
1037 ctx
= container_of(work
, struct onmessage_work_context
,
1039 vmbus_onmessage((struct vmbus_channel_message_header
*)
1044 void vmbus_on_msg_dpc(unsigned long data
)
1046 struct hv_per_cpu_context
*hv_cpu
= (void *)data
;
1047 void *page_addr
= hv_cpu
->synic_message_page
;
1048 struct hv_message
*msg
= (struct hv_message
*)page_addr
+
1050 struct vmbus_channel_message_header
*hdr
;
1051 const struct vmbus_channel_message_table_entry
*entry
;
1052 struct onmessage_work_context
*ctx
;
1053 u32 message_type
= msg
->header
.message_type
;
1056 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1057 * it is being used in 'struct vmbus_channel_message_header' definition
1058 * which is supposed to match hypervisor ABI.
1060 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type
) != sizeof(u32
));
1062 if (message_type
== HVMSG_NONE
)
1066 hdr
= (struct vmbus_channel_message_header
*)msg
->u
.payload
;
1068 trace_vmbus_on_msg_dpc(hdr
);
1070 if (hdr
->msgtype
>= CHANNELMSG_COUNT
) {
1071 WARN_ONCE(1, "unknown msgtype=%d\n", hdr
->msgtype
);
1075 if (msg
->header
.payload_size
> HV_MESSAGE_PAYLOAD_BYTE_COUNT
) {
1076 WARN_ONCE(1, "payload size is too large (%d)\n",
1077 msg
->header
.payload_size
);
1081 entry
= &channel_message_table
[hdr
->msgtype
];
1083 if (!entry
->message_handler
)
1086 if (msg
->header
.payload_size
< entry
->min_payload_len
) {
1087 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
1088 hdr
->msgtype
, msg
->header
.payload_size
);
1092 if (entry
->handler_type
== VMHT_BLOCKING
) {
1093 ctx
= kmalloc(sizeof(*ctx
) + msg
->header
.payload_size
,
1098 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
1099 memcpy(&ctx
->msg
, msg
, sizeof(msg
->header
) +
1100 msg
->header
.payload_size
);
1103 * The host can generate a rescind message while we
1104 * may still be handling the original offer. We deal with
1105 * this condition by relying on the synchronization provided
1106 * by offer_in_progress and by channel_mutex. See also the
1107 * inline comments in vmbus_onoffer_rescind().
1109 switch (hdr
->msgtype
) {
1110 case CHANNELMSG_RESCIND_CHANNELOFFER
:
1112 * If we are handling the rescind message;
1113 * schedule the work on the global work queue.
1115 * The OFFER message and the RESCIND message should
1116 * not be handled by the same serialized work queue,
1117 * because the OFFER handler may call vmbus_open(),
1118 * which tries to open the channel by sending an
1119 * OPEN_CHANNEL message to the host and waits for
1120 * the host's response; however, if the host has
1121 * rescinded the channel before it receives the
1122 * OPEN_CHANNEL message, the host just silently
1123 * ignores the OPEN_CHANNEL message; as a result,
1124 * the guest's OFFER handler hangs for ever, if we
1125 * handle the RESCIND message in the same serialized
1126 * work queue: the RESCIND handler can not start to
1127 * run before the OFFER handler finishes.
1129 schedule_work(&ctx
->work
);
1132 case CHANNELMSG_OFFERCHANNEL
:
1134 * The host sends the offer message of a given channel
1135 * before sending the rescind message of the same
1136 * channel. These messages are sent to the guest's
1137 * connect CPU; the guest then starts processing them
1138 * in the tasklet handler on this CPU:
1142 * [vmbus_on_msg_dpc()]
1143 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1146 * [vmbus_on_msg_dpc()]
1147 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1149 * We rely on the memory-ordering properties of the
1150 * queue_work() and schedule_work() primitives, which
1151 * guarantee that the atomic increment will be visible
1152 * to the CPUs which will execute the offer & rescind
1153 * works by the time these works will start execution.
1155 atomic_inc(&vmbus_connection
.offer_in_progress
);
1159 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
1162 entry
->message_handler(hdr
);
1165 vmbus_signal_eom(msg
, message_type
);
1168 #ifdef CONFIG_PM_SLEEP
1170 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1171 * hibernation, because hv_sock connections can not persist across hibernation.
1173 static void vmbus_force_channel_rescinded(struct vmbus_channel
*channel
)
1175 struct onmessage_work_context
*ctx
;
1176 struct vmbus_channel_rescind_offer
*rescind
;
1178 WARN_ON(!is_hvsock_channel(channel
));
1181 * Allocation size is small and the allocation should really not fail,
1182 * otherwise the state of the hv_sock connections ends up in limbo.
1184 ctx
= kzalloc(sizeof(*ctx
) + sizeof(*rescind
),
1185 GFP_KERNEL
| __GFP_NOFAIL
);
1188 * So far, these are not really used by Linux. Just set them to the
1189 * reasonable values conforming to the definitions of the fields.
1191 ctx
->msg
.header
.message_type
= 1;
1192 ctx
->msg
.header
.payload_size
= sizeof(*rescind
);
1194 /* These values are actually used by Linux. */
1195 rescind
= (struct vmbus_channel_rescind_offer
*)ctx
->msg
.payload
;
1196 rescind
->header
.msgtype
= CHANNELMSG_RESCIND_CHANNELOFFER
;
1197 rescind
->child_relid
= channel
->offermsg
.child_relid
;
1199 INIT_WORK(&ctx
->work
, vmbus_onmessage_work
);
1201 queue_work(vmbus_connection
.work_queue
, &ctx
->work
);
1203 #endif /* CONFIG_PM_SLEEP */
1206 * Schedule all channels with events pending
1208 static void vmbus_chan_sched(struct hv_per_cpu_context
*hv_cpu
)
1210 unsigned long *recv_int_page
;
1213 if (vmbus_proto_version
< VERSION_WIN8
) {
1214 maxbits
= MAX_NUM_CHANNELS_SUPPORTED
;
1215 recv_int_page
= vmbus_connection
.recv_int_page
;
1218 * When the host is win8 and beyond, the event page
1219 * can be directly checked to get the id of the channel
1220 * that has the interrupt pending.
1222 void *page_addr
= hv_cpu
->synic_event_page
;
1223 union hv_synic_event_flags
*event
1224 = (union hv_synic_event_flags
*)page_addr
+
1227 maxbits
= HV_EVENT_FLAGS_COUNT
;
1228 recv_int_page
= event
->flags
;
1231 if (unlikely(!recv_int_page
))
1234 for_each_set_bit(relid
, recv_int_page
, maxbits
) {
1235 void (*callback_fn
)(void *context
);
1236 struct vmbus_channel
*channel
;
1238 if (!sync_test_and_clear_bit(relid
, recv_int_page
))
1241 /* Special case - vmbus channel protocol msg */
1246 * Pairs with the kfree_rcu() in vmbus_chan_release().
1247 * Guarantees that the channel data structure doesn't
1248 * get freed while the channel pointer below is being
1253 /* Find channel based on relid */
1254 channel
= relid2channel(relid
);
1255 if (channel
== NULL
)
1256 goto sched_unlock_rcu
;
1258 if (channel
->rescind
)
1259 goto sched_unlock_rcu
;
1262 * Make sure that the ring buffer data structure doesn't get
1263 * freed while we dereference the ring buffer pointer. Test
1264 * for the channel's onchannel_callback being NULL within a
1265 * sched_lock critical section. See also the inline comments
1266 * in vmbus_reset_channel_cb().
1268 spin_lock(&channel
->sched_lock
);
1270 callback_fn
= channel
->onchannel_callback
;
1271 if (unlikely(callback_fn
== NULL
))
1274 trace_vmbus_chan_sched(channel
);
1276 ++channel
->interrupts
;
1278 switch (channel
->callback_mode
) {
1280 (*callback_fn
)(channel
->channel_callback_context
);
1283 case HV_CALL_BATCHED
:
1284 hv_begin_read(&channel
->inbound
);
1286 case HV_CALL_DIRECT
:
1287 tasklet_schedule(&channel
->callback_event
);
1291 spin_unlock(&channel
->sched_lock
);
1297 static void vmbus_isr(void)
1299 struct hv_per_cpu_context
*hv_cpu
1300 = this_cpu_ptr(hv_context
.cpu_context
);
1301 void *page_addr
= hv_cpu
->synic_event_page
;
1302 struct hv_message
*msg
;
1303 union hv_synic_event_flags
*event
;
1304 bool handled
= false;
1306 if (unlikely(page_addr
== NULL
))
1309 event
= (union hv_synic_event_flags
*)page_addr
+
1312 * Check for events before checking for messages. This is the order
1313 * in which events and messages are checked in Windows guests on
1314 * Hyper-V, and the Windows team suggested we do the same.
1317 if ((vmbus_proto_version
== VERSION_WS2008
) ||
1318 (vmbus_proto_version
== VERSION_WIN7
)) {
1320 /* Since we are a child, we only need to check bit 0 */
1321 if (sync_test_and_clear_bit(0, event
->flags
))
1325 * Our host is win8 or above. The signaling mechanism
1326 * has changed and we can directly look at the event page.
1327 * If bit n is set then we have an interrup on the channel
1334 vmbus_chan_sched(hv_cpu
);
1336 page_addr
= hv_cpu
->synic_message_page
;
1337 msg
= (struct hv_message
*)page_addr
+ VMBUS_MESSAGE_SINT
;
1339 /* Check if there are actual msgs to be processed */
1340 if (msg
->header
.message_type
!= HVMSG_NONE
) {
1341 if (msg
->header
.message_type
== HVMSG_TIMER_EXPIRED
) {
1343 vmbus_signal_eom(msg
, HVMSG_TIMER_EXPIRED
);
1345 tasklet_schedule(&hv_cpu
->msg_dpc
);
1348 add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR
, 0);
1352 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1353 * buffer and call into Hyper-V to transfer the data.
1355 static void hv_kmsg_dump(struct kmsg_dumper
*dumper
,
1356 enum kmsg_dump_reason reason
)
1358 size_t bytes_written
;
1359 phys_addr_t panic_pa
;
1361 /* We are only interested in panics. */
1362 if ((reason
!= KMSG_DUMP_PANIC
) || (!sysctl_record_panic_msg
))
1365 panic_pa
= virt_to_phys(hv_panic_page
);
1368 * Write dump contents to the page. No need to synchronize; panic should
1369 * be single-threaded.
1371 kmsg_dump_get_buffer(dumper
, true, hv_panic_page
, HV_HYP_PAGE_SIZE
,
1374 hyperv_report_panic_msg(panic_pa
, bytes_written
);
1377 static struct kmsg_dumper hv_kmsg_dumper
= {
1378 .dump
= hv_kmsg_dump
,
1381 static struct ctl_table_header
*hv_ctl_table_hdr
;
1384 * sysctl option to allow the user to control whether kmsg data should be
1385 * reported to Hyper-V on panic.
1387 static struct ctl_table hv_ctl_table
[] = {
1389 .procname
= "hyperv_record_panic_msg",
1390 .data
= &sysctl_record_panic_msg
,
1391 .maxlen
= sizeof(int),
1393 .proc_handler
= proc_dointvec_minmax
,
1394 .extra1
= SYSCTL_ZERO
,
1395 .extra2
= SYSCTL_ONE
1400 static struct ctl_table hv_root_table
[] = {
1402 .procname
= "kernel",
1404 .child
= hv_ctl_table
1410 * vmbus_bus_init -Main vmbus driver initialization routine.
1413 * - initialize the vmbus driver context
1414 * - invoke the vmbus hv main init routine
1415 * - retrieve the channel offers
1417 static int vmbus_bus_init(void)
1423 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret
);
1427 ret
= bus_register(&hv_bus
);
1431 hv_setup_vmbus_irq(vmbus_isr
);
1433 ret
= hv_synic_alloc();
1438 * Initialize the per-cpu interrupt state and stimer state.
1439 * Then connect to the host.
1441 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "hyperv/vmbus:online",
1442 hv_synic_init
, hv_synic_cleanup
);
1445 hyperv_cpuhp_online
= ret
;
1447 ret
= vmbus_connect();
1452 * Only register if the crash MSRs are available
1454 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
1455 u64 hyperv_crash_ctl
;
1457 * Sysctl registration is not fatal, since by default
1458 * reporting is enabled.
1460 hv_ctl_table_hdr
= register_sysctl_table(hv_root_table
);
1461 if (!hv_ctl_table_hdr
)
1462 pr_err("Hyper-V: sysctl table register error");
1465 * Register for panic kmsg callback only if the right
1466 * capability is supported by the hypervisor.
1468 hv_get_crash_ctl(hyperv_crash_ctl
);
1469 if (hyperv_crash_ctl
& HV_CRASH_CTL_CRASH_NOTIFY_MSG
) {
1470 hv_panic_page
= (void *)hv_alloc_hyperv_zeroed_page();
1471 if (hv_panic_page
) {
1472 ret
= kmsg_dump_register(&hv_kmsg_dumper
);
1474 pr_err("Hyper-V: kmsg dump register "
1475 "error 0x%x\n", ret
);
1476 hv_free_hyperv_page(
1477 (unsigned long)hv_panic_page
);
1478 hv_panic_page
= NULL
;
1481 pr_err("Hyper-V: panic message page memory "
1482 "allocation failed");
1485 register_die_notifier(&hyperv_die_block
);
1489 * Always register the panic notifier because we need to unload
1490 * the VMbus channel connection to prevent any VMbus
1491 * activity after the VM panics.
1493 atomic_notifier_chain_register(&panic_notifier_list
,
1494 &hyperv_panic_block
);
1496 vmbus_request_offers();
1501 cpuhp_remove_state(hyperv_cpuhp_online
);
1505 hv_remove_vmbus_irq();
1507 bus_unregister(&hv_bus
);
1508 unregister_sysctl_table(hv_ctl_table_hdr
);
1509 hv_ctl_table_hdr
= NULL
;
1514 * __vmbus_child_driver_register() - Register a vmbus's driver
1515 * @hv_driver: Pointer to driver structure you want to register
1516 * @owner: owner module of the drv
1517 * @mod_name: module name string
1519 * Registers the given driver with Linux through the 'driver_register()' call
1520 * and sets up the hyper-v vmbus handling for this driver.
1521 * It will return the state of the 'driver_register()' call.
1524 int __vmbus_driver_register(struct hv_driver
*hv_driver
, struct module
*owner
, const char *mod_name
)
1528 pr_info("registering driver %s\n", hv_driver
->name
);
1530 ret
= vmbus_exists();
1534 hv_driver
->driver
.name
= hv_driver
->name
;
1535 hv_driver
->driver
.owner
= owner
;
1536 hv_driver
->driver
.mod_name
= mod_name
;
1537 hv_driver
->driver
.bus
= &hv_bus
;
1539 spin_lock_init(&hv_driver
->dynids
.lock
);
1540 INIT_LIST_HEAD(&hv_driver
->dynids
.list
);
1542 ret
= driver_register(&hv_driver
->driver
);
1546 EXPORT_SYMBOL_GPL(__vmbus_driver_register
);
1549 * vmbus_driver_unregister() - Unregister a vmbus's driver
1550 * @hv_driver: Pointer to driver structure you want to
1553 * Un-register the given driver that was previous registered with a call to
1554 * vmbus_driver_register()
1556 void vmbus_driver_unregister(struct hv_driver
*hv_driver
)
1558 pr_info("unregistering driver %s\n", hv_driver
->name
);
1560 if (!vmbus_exists()) {
1561 driver_unregister(&hv_driver
->driver
);
1562 vmbus_free_dynids(hv_driver
);
1565 EXPORT_SYMBOL_GPL(vmbus_driver_unregister
);
1569 * Called when last reference to channel is gone.
1571 static void vmbus_chan_release(struct kobject
*kobj
)
1573 struct vmbus_channel
*channel
1574 = container_of(kobj
, struct vmbus_channel
, kobj
);
1576 kfree_rcu(channel
, rcu
);
1579 struct vmbus_chan_attribute
{
1580 struct attribute attr
;
1581 ssize_t (*show
)(struct vmbus_channel
*chan
, char *buf
);
1582 ssize_t (*store
)(struct vmbus_channel
*chan
,
1583 const char *buf
, size_t count
);
1585 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1586 struct vmbus_chan_attribute chan_attr_##_name \
1587 = __ATTR(_name, _mode, _show, _store)
1588 #define VMBUS_CHAN_ATTR_RW(_name) \
1589 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1590 #define VMBUS_CHAN_ATTR_RO(_name) \
1591 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1592 #define VMBUS_CHAN_ATTR_WO(_name) \
1593 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1595 static ssize_t
vmbus_chan_attr_show(struct kobject
*kobj
,
1596 struct attribute
*attr
, char *buf
)
1598 const struct vmbus_chan_attribute
*attribute
1599 = container_of(attr
, struct vmbus_chan_attribute
, attr
);
1600 struct vmbus_channel
*chan
1601 = container_of(kobj
, struct vmbus_channel
, kobj
);
1603 if (!attribute
->show
)
1606 return attribute
->show(chan
, buf
);
1609 static ssize_t
vmbus_chan_attr_store(struct kobject
*kobj
,
1610 struct attribute
*attr
, const char *buf
,
1613 const struct vmbus_chan_attribute
*attribute
1614 = container_of(attr
, struct vmbus_chan_attribute
, attr
);
1615 struct vmbus_channel
*chan
1616 = container_of(kobj
, struct vmbus_channel
, kobj
);
1618 if (!attribute
->store
)
1621 return attribute
->store(chan
, buf
, count
);
1624 static const struct sysfs_ops vmbus_chan_sysfs_ops
= {
1625 .show
= vmbus_chan_attr_show
,
1626 .store
= vmbus_chan_attr_store
,
1629 static ssize_t
out_mask_show(struct vmbus_channel
*channel
, char *buf
)
1631 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
1634 mutex_lock(&rbi
->ring_buffer_mutex
);
1635 if (!rbi
->ring_buffer
) {
1636 mutex_unlock(&rbi
->ring_buffer_mutex
);
1640 ret
= sprintf(buf
, "%u\n", rbi
->ring_buffer
->interrupt_mask
);
1641 mutex_unlock(&rbi
->ring_buffer_mutex
);
1644 static VMBUS_CHAN_ATTR_RO(out_mask
);
1646 static ssize_t
in_mask_show(struct vmbus_channel
*channel
, char *buf
)
1648 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1651 mutex_lock(&rbi
->ring_buffer_mutex
);
1652 if (!rbi
->ring_buffer
) {
1653 mutex_unlock(&rbi
->ring_buffer_mutex
);
1657 ret
= sprintf(buf
, "%u\n", rbi
->ring_buffer
->interrupt_mask
);
1658 mutex_unlock(&rbi
->ring_buffer_mutex
);
1661 static VMBUS_CHAN_ATTR_RO(in_mask
);
1663 static ssize_t
read_avail_show(struct vmbus_channel
*channel
, char *buf
)
1665 struct hv_ring_buffer_info
*rbi
= &channel
->inbound
;
1668 mutex_lock(&rbi
->ring_buffer_mutex
);
1669 if (!rbi
->ring_buffer
) {
1670 mutex_unlock(&rbi
->ring_buffer_mutex
);
1674 ret
= sprintf(buf
, "%u\n", hv_get_bytes_to_read(rbi
));
1675 mutex_unlock(&rbi
->ring_buffer_mutex
);
1678 static VMBUS_CHAN_ATTR_RO(read_avail
);
1680 static ssize_t
write_avail_show(struct vmbus_channel
*channel
, char *buf
)
1682 struct hv_ring_buffer_info
*rbi
= &channel
->outbound
;
1685 mutex_lock(&rbi
->ring_buffer_mutex
);
1686 if (!rbi
->ring_buffer
) {
1687 mutex_unlock(&rbi
->ring_buffer_mutex
);
1691 ret
= sprintf(buf
, "%u\n", hv_get_bytes_to_write(rbi
));
1692 mutex_unlock(&rbi
->ring_buffer_mutex
);
1695 static VMBUS_CHAN_ATTR_RO(write_avail
);
1697 static ssize_t
target_cpu_show(struct vmbus_channel
*channel
, char *buf
)
1699 return sprintf(buf
, "%u\n", channel
->target_cpu
);
1701 static ssize_t
target_cpu_store(struct vmbus_channel
*channel
,
1702 const char *buf
, size_t count
)
1704 u32 target_cpu
, origin_cpu
;
1705 ssize_t ret
= count
;
1707 if (vmbus_proto_version
< VERSION_WIN10_V4_1
)
1710 if (sscanf(buf
, "%uu", &target_cpu
) != 1)
1713 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1714 if (target_cpu
>= nr_cpumask_bits
)
1717 /* No CPUs should come up or down during this. */
1720 if (!cpumask_test_cpu(target_cpu
, cpu_online_mask
)) {
1726 * Synchronizes target_cpu_store() and channel closure:
1728 * { Initially: state = CHANNEL_OPENED }
1732 * [target_cpu_store()] [vmbus_disconnect_ring()]
1734 * LOCK channel_mutex LOCK channel_mutex
1735 * LOAD r1 = state LOAD r2 = state
1736 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1737 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1738 * [...] SEND CLOSECHANNEL
1739 * UNLOCK channel_mutex UNLOCK channel_mutex
1741 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1742 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1744 * Note. The host processes the channel messages "sequentially", in
1745 * the order in which they are received on a per-partition basis.
1747 mutex_lock(&vmbus_connection
.channel_mutex
);
1750 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1751 * avoid sending the message and fail here for such channels.
1753 if (channel
->state
!= CHANNEL_OPENED_STATE
) {
1755 goto cpu_store_unlock
;
1758 origin_cpu
= channel
->target_cpu
;
1759 if (target_cpu
== origin_cpu
)
1760 goto cpu_store_unlock
;
1762 if (vmbus_send_modifychannel(channel
->offermsg
.child_relid
,
1763 hv_cpu_number_to_vp_number(target_cpu
))) {
1765 goto cpu_store_unlock
;
1769 * Warning. At this point, there is *no* guarantee that the host will
1770 * have successfully processed the vmbus_send_modifychannel() request.
1771 * See the header comment of vmbus_send_modifychannel() for more info.
1773 * Lags in the processing of the above vmbus_send_modifychannel() can
1774 * result in missed interrupts if the "old" target CPU is taken offline
1775 * before Hyper-V starts sending interrupts to the "new" target CPU.
1776 * But apart from this offlining scenario, the code tolerates such
1777 * lags. It will function correctly even if a channel interrupt comes
1778 * in on a CPU that is different from the channel target_cpu value.
1781 channel
->target_cpu
= target_cpu
;
1782 channel
->target_vp
= hv_cpu_number_to_vp_number(target_cpu
);
1783 channel
->numa_node
= cpu_to_node(target_cpu
);
1785 /* See init_vp_index(). */
1786 if (hv_is_perf_channel(channel
))
1787 hv_update_alloced_cpus(origin_cpu
, target_cpu
);
1789 /* Currently set only for storvsc channels. */
1790 if (channel
->change_target_cpu_callback
) {
1791 (*channel
->change_target_cpu_callback
)(channel
,
1792 origin_cpu
, target_cpu
);
1796 mutex_unlock(&vmbus_connection
.channel_mutex
);
1800 static VMBUS_CHAN_ATTR(cpu
, 0644, target_cpu_show
, target_cpu_store
);
1802 static ssize_t
channel_pending_show(struct vmbus_channel
*channel
,
1805 return sprintf(buf
, "%d\n",
1806 channel_pending(channel
,
1807 vmbus_connection
.monitor_pages
[1]));
1809 static VMBUS_CHAN_ATTR(pending
, S_IRUGO
, channel_pending_show
, NULL
);
1811 static ssize_t
channel_latency_show(struct vmbus_channel
*channel
,
1814 return sprintf(buf
, "%d\n",
1815 channel_latency(channel
,
1816 vmbus_connection
.monitor_pages
[1]));
1818 static VMBUS_CHAN_ATTR(latency
, S_IRUGO
, channel_latency_show
, NULL
);
1820 static ssize_t
channel_interrupts_show(struct vmbus_channel
*channel
, char *buf
)
1822 return sprintf(buf
, "%llu\n", channel
->interrupts
);
1824 static VMBUS_CHAN_ATTR(interrupts
, S_IRUGO
, channel_interrupts_show
, NULL
);
1826 static ssize_t
channel_events_show(struct vmbus_channel
*channel
, char *buf
)
1828 return sprintf(buf
, "%llu\n", channel
->sig_events
);
1830 static VMBUS_CHAN_ATTR(events
, S_IRUGO
, channel_events_show
, NULL
);
1832 static ssize_t
channel_intr_in_full_show(struct vmbus_channel
*channel
,
1835 return sprintf(buf
, "%llu\n",
1836 (unsigned long long)channel
->intr_in_full
);
1838 static VMBUS_CHAN_ATTR(intr_in_full
, 0444, channel_intr_in_full_show
, NULL
);
1840 static ssize_t
channel_intr_out_empty_show(struct vmbus_channel
*channel
,
1843 return sprintf(buf
, "%llu\n",
1844 (unsigned long long)channel
->intr_out_empty
);
1846 static VMBUS_CHAN_ATTR(intr_out_empty
, 0444, channel_intr_out_empty_show
, NULL
);
1848 static ssize_t
channel_out_full_first_show(struct vmbus_channel
*channel
,
1851 return sprintf(buf
, "%llu\n",
1852 (unsigned long long)channel
->out_full_first
);
1854 static VMBUS_CHAN_ATTR(out_full_first
, 0444, channel_out_full_first_show
, NULL
);
1856 static ssize_t
channel_out_full_total_show(struct vmbus_channel
*channel
,
1859 return sprintf(buf
, "%llu\n",
1860 (unsigned long long)channel
->out_full_total
);
1862 static VMBUS_CHAN_ATTR(out_full_total
, 0444, channel_out_full_total_show
, NULL
);
1864 static ssize_t
subchannel_monitor_id_show(struct vmbus_channel
*channel
,
1867 return sprintf(buf
, "%u\n", channel
->offermsg
.monitorid
);
1869 static VMBUS_CHAN_ATTR(monitor_id
, S_IRUGO
, subchannel_monitor_id_show
, NULL
);
1871 static ssize_t
subchannel_id_show(struct vmbus_channel
*channel
,
1874 return sprintf(buf
, "%u\n",
1875 channel
->offermsg
.offer
.sub_channel_index
);
1877 static VMBUS_CHAN_ATTR_RO(subchannel_id
);
1879 static struct attribute
*vmbus_chan_attrs
[] = {
1880 &chan_attr_out_mask
.attr
,
1881 &chan_attr_in_mask
.attr
,
1882 &chan_attr_read_avail
.attr
,
1883 &chan_attr_write_avail
.attr
,
1884 &chan_attr_cpu
.attr
,
1885 &chan_attr_pending
.attr
,
1886 &chan_attr_latency
.attr
,
1887 &chan_attr_interrupts
.attr
,
1888 &chan_attr_events
.attr
,
1889 &chan_attr_intr_in_full
.attr
,
1890 &chan_attr_intr_out_empty
.attr
,
1891 &chan_attr_out_full_first
.attr
,
1892 &chan_attr_out_full_total
.attr
,
1893 &chan_attr_monitor_id
.attr
,
1894 &chan_attr_subchannel_id
.attr
,
1899 * Channel-level attribute_group callback function. Returns the permission for
1900 * each attribute, and returns 0 if an attribute is not visible.
1902 static umode_t
vmbus_chan_attr_is_visible(struct kobject
*kobj
,
1903 struct attribute
*attr
, int idx
)
1905 const struct vmbus_channel
*channel
=
1906 container_of(kobj
, struct vmbus_channel
, kobj
);
1908 /* Hide the monitor attributes if the monitor mechanism is not used. */
1909 if (!channel
->offermsg
.monitor_allocated
&&
1910 (attr
== &chan_attr_pending
.attr
||
1911 attr
== &chan_attr_latency
.attr
||
1912 attr
== &chan_attr_monitor_id
.attr
))
1918 static struct attribute_group vmbus_chan_group
= {
1919 .attrs
= vmbus_chan_attrs
,
1920 .is_visible
= vmbus_chan_attr_is_visible
1923 static struct kobj_type vmbus_chan_ktype
= {
1924 .sysfs_ops
= &vmbus_chan_sysfs_ops
,
1925 .release
= vmbus_chan_release
,
1929 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1931 int vmbus_add_channel_kobj(struct hv_device
*dev
, struct vmbus_channel
*channel
)
1933 const struct device
*device
= &dev
->device
;
1934 struct kobject
*kobj
= &channel
->kobj
;
1935 u32 relid
= channel
->offermsg
.child_relid
;
1938 kobj
->kset
= dev
->channels_kset
;
1939 ret
= kobject_init_and_add(kobj
, &vmbus_chan_ktype
, NULL
,
1944 ret
= sysfs_create_group(kobj
, &vmbus_chan_group
);
1948 * The calling functions' error handling paths will cleanup the
1949 * empty channel directory.
1951 dev_err(device
, "Unable to set up channel sysfs files\n");
1955 kobject_uevent(kobj
, KOBJ_ADD
);
1961 * vmbus_remove_channel_attr_group - remove the channel's attribute group
1963 void vmbus_remove_channel_attr_group(struct vmbus_channel
*channel
)
1965 sysfs_remove_group(&channel
->kobj
, &vmbus_chan_group
);
1969 * vmbus_device_create - Creates and registers a new child device
1972 struct hv_device
*vmbus_device_create(const guid_t
*type
,
1973 const guid_t
*instance
,
1974 struct vmbus_channel
*channel
)
1976 struct hv_device
*child_device_obj
;
1978 child_device_obj
= kzalloc(sizeof(struct hv_device
), GFP_KERNEL
);
1979 if (!child_device_obj
) {
1980 pr_err("Unable to allocate device object for child device\n");
1984 child_device_obj
->channel
= channel
;
1985 guid_copy(&child_device_obj
->dev_type
, type
);
1986 guid_copy(&child_device_obj
->dev_instance
, instance
);
1987 child_device_obj
->vendor_id
= 0x1414; /* MSFT vendor ID */
1989 return child_device_obj
;
1993 * vmbus_device_register - Register the child device
1995 int vmbus_device_register(struct hv_device
*child_device_obj
)
1997 struct kobject
*kobj
= &child_device_obj
->device
.kobj
;
2000 dev_set_name(&child_device_obj
->device
, "%pUl",
2001 &child_device_obj
->channel
->offermsg
.offer
.if_instance
);
2003 child_device_obj
->device
.bus
= &hv_bus
;
2004 child_device_obj
->device
.parent
= &hv_acpi_dev
->dev
;
2005 child_device_obj
->device
.release
= vmbus_device_release
;
2008 * Register with the LDM. This will kick off the driver/device
2009 * binding...which will eventually call vmbus_match() and vmbus_probe()
2011 ret
= device_register(&child_device_obj
->device
);
2013 pr_err("Unable to register child device\n");
2017 child_device_obj
->channels_kset
= kset_create_and_add("channels",
2019 if (!child_device_obj
->channels_kset
) {
2021 goto err_dev_unregister
;
2024 ret
= vmbus_add_channel_kobj(child_device_obj
,
2025 child_device_obj
->channel
);
2027 pr_err("Unable to register primary channeln");
2028 goto err_kset_unregister
;
2030 hv_debug_add_dev_dir(child_device_obj
);
2034 err_kset_unregister
:
2035 kset_unregister(child_device_obj
->channels_kset
);
2038 device_unregister(&child_device_obj
->device
);
2043 * vmbus_device_unregister - Remove the specified child device
2046 void vmbus_device_unregister(struct hv_device
*device_obj
)
2048 pr_debug("child device %s unregistered\n",
2049 dev_name(&device_obj
->device
));
2051 kset_unregister(device_obj
->channels_kset
);
2054 * Kick off the process of unregistering the device.
2055 * This will call vmbus_remove() and eventually vmbus_device_release()
2057 device_unregister(&device_obj
->device
);
2062 * VMBUS is an acpi enumerated device. Get the information we
2065 #define VTPM_BASE_ADDRESS 0xfed40000
2066 static acpi_status
vmbus_walk_resources(struct acpi_resource
*res
, void *ctx
)
2068 resource_size_t start
= 0;
2069 resource_size_t end
= 0;
2070 struct resource
*new_res
;
2071 struct resource
**old_res
= &hyperv_mmio
;
2072 struct resource
**prev_res
= NULL
;
2074 switch (res
->type
) {
2077 * "Address" descriptors are for bus windows. Ignore
2078 * "memory" descriptors, which are for registers on
2081 case ACPI_RESOURCE_TYPE_ADDRESS32
:
2082 start
= res
->data
.address32
.address
.minimum
;
2083 end
= res
->data
.address32
.address
.maximum
;
2086 case ACPI_RESOURCE_TYPE_ADDRESS64
:
2087 start
= res
->data
.address64
.address
.minimum
;
2088 end
= res
->data
.address64
.address
.maximum
;
2092 /* Unused resource type */
2097 * Ignore ranges that are below 1MB, as they're not
2098 * necessary or useful here.
2103 new_res
= kzalloc(sizeof(*new_res
), GFP_ATOMIC
);
2105 return AE_NO_MEMORY
;
2107 /* If this range overlaps the virtual TPM, truncate it. */
2108 if (end
> VTPM_BASE_ADDRESS
&& start
< VTPM_BASE_ADDRESS
)
2109 end
= VTPM_BASE_ADDRESS
;
2111 new_res
->name
= "hyperv mmio";
2112 new_res
->flags
= IORESOURCE_MEM
;
2113 new_res
->start
= start
;
2117 * If two ranges are adjacent, merge them.
2125 if (((*old_res
)->end
+ 1) == new_res
->start
) {
2126 (*old_res
)->end
= new_res
->end
;
2131 if ((*old_res
)->start
== new_res
->end
+ 1) {
2132 (*old_res
)->start
= new_res
->start
;
2137 if ((*old_res
)->start
> new_res
->end
) {
2138 new_res
->sibling
= *old_res
;
2140 (*prev_res
)->sibling
= new_res
;
2146 old_res
= &(*old_res
)->sibling
;
2153 static int vmbus_acpi_remove(struct acpi_device
*device
)
2155 struct resource
*cur_res
;
2156 struct resource
*next_res
;
2160 __release_region(hyperv_mmio
, fb_mmio
->start
,
2161 resource_size(fb_mmio
));
2165 for (cur_res
= hyperv_mmio
; cur_res
; cur_res
= next_res
) {
2166 next_res
= cur_res
->sibling
;
2174 static void vmbus_reserve_fb(void)
2178 * Make a claim for the frame buffer in the resource tree under the
2179 * first node, which will be the one below 4GB. The length seems to
2180 * be underreported, particularly in a Generation 1 VM. So start out
2181 * reserving a larger area and make it smaller until it succeeds.
2184 if (screen_info
.lfb_base
) {
2185 if (efi_enabled(EFI_BOOT
))
2186 size
= max_t(__u32
, screen_info
.lfb_size
, 0x800000);
2188 size
= max_t(__u32
, screen_info
.lfb_size
, 0x4000000);
2190 for (; !fb_mmio
&& (size
>= 0x100000); size
>>= 1) {
2191 fb_mmio
= __request_region(hyperv_mmio
,
2192 screen_info
.lfb_base
, size
,
2199 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2200 * @new: If successful, supplied a pointer to the
2201 * allocated MMIO space.
2202 * @device_obj: Identifies the caller
2203 * @min: Minimum guest physical address of the
2205 * @max: Maximum guest physical address
2206 * @size: Size of the range to be allocated
2207 * @align: Alignment of the range to be allocated
2208 * @fb_overlap_ok: Whether this allocation can be allowed
2209 * to overlap the video frame buffer.
2211 * This function walks the resources granted to VMBus by the
2212 * _CRS object in the ACPI namespace underneath the parent
2213 * "bridge" whether that's a root PCI bus in the Generation 1
2214 * case or a Module Device in the Generation 2 case. It then
2215 * attempts to allocate from the global MMIO pool in a way that
2216 * matches the constraints supplied in these parameters and by
2219 * Return: 0 on success, -errno on failure
2221 int vmbus_allocate_mmio(struct resource
**new, struct hv_device
*device_obj
,
2222 resource_size_t min
, resource_size_t max
,
2223 resource_size_t size
, resource_size_t align
,
2226 struct resource
*iter
, *shadow
;
2227 resource_size_t range_min
, range_max
, start
;
2228 const char *dev_n
= dev_name(&device_obj
->device
);
2232 mutex_lock(&hyperv_mmio_lock
);
2235 * If overlaps with frame buffers are allowed, then first attempt to
2236 * make the allocation from within the reserved region. Because it
2237 * is already reserved, no shadow allocation is necessary.
2239 if (fb_overlap_ok
&& fb_mmio
&& !(min
> fb_mmio
->end
) &&
2240 !(max
< fb_mmio
->start
)) {
2242 range_min
= fb_mmio
->start
;
2243 range_max
= fb_mmio
->end
;
2244 start
= (range_min
+ align
- 1) & ~(align
- 1);
2245 for (; start
+ size
- 1 <= range_max
; start
+= align
) {
2246 *new = request_mem_region_exclusive(start
, size
, dev_n
);
2254 for (iter
= hyperv_mmio
; iter
; iter
= iter
->sibling
) {
2255 if ((iter
->start
>= max
) || (iter
->end
<= min
))
2258 range_min
= iter
->start
;
2259 range_max
= iter
->end
;
2260 start
= (range_min
+ align
- 1) & ~(align
- 1);
2261 for (; start
+ size
- 1 <= range_max
; start
+= align
) {
2262 shadow
= __request_region(iter
, start
, size
, NULL
,
2267 *new = request_mem_region_exclusive(start
, size
, dev_n
);
2269 shadow
->name
= (char *)*new;
2274 __release_region(iter
, start
, size
);
2279 mutex_unlock(&hyperv_mmio_lock
);
2282 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio
);
2285 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2286 * @start: Base address of region to release.
2287 * @size: Size of the range to be allocated
2289 * This function releases anything requested by
2290 * vmbus_mmio_allocate().
2292 void vmbus_free_mmio(resource_size_t start
, resource_size_t size
)
2294 struct resource
*iter
;
2296 mutex_lock(&hyperv_mmio_lock
);
2297 for (iter
= hyperv_mmio
; iter
; iter
= iter
->sibling
) {
2298 if ((iter
->start
>= start
+ size
) || (iter
->end
<= start
))
2301 __release_region(iter
, start
, size
);
2303 release_mem_region(start
, size
);
2304 mutex_unlock(&hyperv_mmio_lock
);
2307 EXPORT_SYMBOL_GPL(vmbus_free_mmio
);
2309 static int vmbus_acpi_add(struct acpi_device
*device
)
2312 int ret_val
= -ENODEV
;
2313 struct acpi_device
*ancestor
;
2315 hv_acpi_dev
= device
;
2317 result
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
2318 vmbus_walk_resources
, NULL
);
2320 if (ACPI_FAILURE(result
))
2323 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2324 * firmware) is the VMOD that has the mmio ranges. Get that.
2326 for (ancestor
= device
->parent
; ancestor
; ancestor
= ancestor
->parent
) {
2327 result
= acpi_walk_resources(ancestor
->handle
, METHOD_NAME__CRS
,
2328 vmbus_walk_resources
, NULL
);
2330 if (ACPI_FAILURE(result
))
2340 complete(&probe_event
);
2342 vmbus_acpi_remove(device
);
2346 #ifdef CONFIG_PM_SLEEP
2347 static int vmbus_bus_suspend(struct device
*dev
)
2349 struct vmbus_channel
*channel
, *sc
;
2350 unsigned long flags
;
2352 while (atomic_read(&vmbus_connection
.offer_in_progress
) != 0) {
2354 * We wait here until the completion of any channel
2355 * offers that are currently in progress.
2360 mutex_lock(&vmbus_connection
.channel_mutex
);
2361 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
2362 if (!is_hvsock_channel(channel
))
2365 vmbus_force_channel_rescinded(channel
);
2367 mutex_unlock(&vmbus_connection
.channel_mutex
);
2370 * Wait until all the sub-channels and hv_sock channels have been
2371 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2372 * they would conflict with the new sub-channels that will be created
2373 * in the resume path. hv_sock channels should also be destroyed, but
2374 * a hv_sock channel of an established hv_sock connection can not be
2375 * really destroyed since it may still be referenced by the userspace
2376 * application, so we just force the hv_sock channel to be rescinded
2377 * by vmbus_force_channel_rescinded(), and the userspace application
2378 * will thoroughly destroy the channel after hibernation.
2380 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2381 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2383 if (atomic_read(&vmbus_connection
.nr_chan_close_on_suspend
) > 0)
2384 wait_for_completion(&vmbus_connection
.ready_for_suspend_event
);
2386 WARN_ON(atomic_read(&vmbus_connection
.nr_chan_fixup_on_resume
) != 0);
2388 mutex_lock(&vmbus_connection
.channel_mutex
);
2390 list_for_each_entry(channel
, &vmbus_connection
.chn_list
, listentry
) {
2392 * Remove the channel from the array of channels and invalidate
2393 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2394 * up the relid (and other fields, if necessary) and add the
2395 * channel back to the array.
2397 vmbus_channel_unmap_relid(channel
);
2398 channel
->offermsg
.child_relid
= INVALID_RELID
;
2400 if (is_hvsock_channel(channel
)) {
2401 if (!channel
->rescind
) {
2402 pr_err("hv_sock channel not rescinded!\n");
2408 spin_lock_irqsave(&channel
->lock
, flags
);
2409 list_for_each_entry(sc
, &channel
->sc_list
, sc_list
) {
2410 pr_err("Sub-channel not deleted!\n");
2413 spin_unlock_irqrestore(&channel
->lock
, flags
);
2415 atomic_inc(&vmbus_connection
.nr_chan_fixup_on_resume
);
2418 mutex_unlock(&vmbus_connection
.channel_mutex
);
2420 vmbus_initiate_unload(false);
2422 /* Reset the event for the next resume. */
2423 reinit_completion(&vmbus_connection
.ready_for_resume_event
);
2428 static int vmbus_bus_resume(struct device
*dev
)
2430 struct vmbus_channel_msginfo
*msginfo
;
2435 * We only use the 'vmbus_proto_version', which was in use before
2436 * hibernation, to re-negotiate with the host.
2438 if (!vmbus_proto_version
) {
2439 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version
);
2443 msgsize
= sizeof(*msginfo
) +
2444 sizeof(struct vmbus_channel_initiate_contact
);
2446 msginfo
= kzalloc(msgsize
, GFP_KERNEL
);
2448 if (msginfo
== NULL
)
2451 ret
= vmbus_negotiate_version(msginfo
, vmbus_proto_version
);
2458 WARN_ON(atomic_read(&vmbus_connection
.nr_chan_fixup_on_resume
) == 0);
2460 vmbus_request_offers();
2462 wait_for_completion(&vmbus_connection
.ready_for_resume_event
);
2464 /* Reset the event for the next suspend. */
2465 reinit_completion(&vmbus_connection
.ready_for_suspend_event
);
2470 #define vmbus_bus_suspend NULL
2471 #define vmbus_bus_resume NULL
2472 #endif /* CONFIG_PM_SLEEP */
2474 static const struct acpi_device_id vmbus_acpi_device_ids
[] = {
2479 MODULE_DEVICE_TABLE(acpi
, vmbus_acpi_device_ids
);
2482 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2483 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2484 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2485 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2486 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2487 * resume callback must also run via the "noirq" ops.
2489 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2490 * earlier in this file before vmbus_pm.
2493 static const struct dev_pm_ops vmbus_bus_pm
= {
2494 .suspend_noirq
= NULL
,
2495 .resume_noirq
= NULL
,
2496 .freeze_noirq
= vmbus_bus_suspend
,
2497 .thaw_noirq
= vmbus_bus_resume
,
2498 .poweroff_noirq
= vmbus_bus_suspend
,
2499 .restore_noirq
= vmbus_bus_resume
2502 static struct acpi_driver vmbus_acpi_driver
= {
2504 .ids
= vmbus_acpi_device_ids
,
2506 .add
= vmbus_acpi_add
,
2507 .remove
= vmbus_acpi_remove
,
2509 .drv
.pm
= &vmbus_bus_pm
,
2512 static void hv_kexec_handler(void)
2514 hv_stimer_global_cleanup();
2515 vmbus_initiate_unload(false);
2516 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2518 cpuhp_remove_state(hyperv_cpuhp_online
);
2522 static void hv_crash_handler(struct pt_regs
*regs
)
2526 vmbus_initiate_unload(true);
2528 * In crash handler we can't schedule synic cleanup for all CPUs,
2529 * doing the cleanup for current CPU only. This should be sufficient
2532 cpu
= smp_processor_id();
2533 hv_stimer_cleanup(cpu
);
2534 hv_synic_disable_regs(cpu
);
2538 static int hv_synic_suspend(void)
2541 * When we reach here, all the non-boot CPUs have been offlined.
2542 * If we're in a legacy configuration where stimer Direct Mode is
2543 * not enabled, the stimers on the non-boot CPUs have been unbound
2544 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2545 * hv_stimer_cleanup() -> clockevents_unbind_device().
2547 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2548 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2549 * 1) it's unnecessary as interrupts remain disabled between
2550 * syscore_suspend() and syscore_resume(): see create_image() and
2551 * resume_target_kernel()
2552 * 2) the stimer on CPU0 is automatically disabled later by
2553 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2554 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2555 * 3) a warning would be triggered if we call
2556 * clockevents_unbind_device(), which may sleep, in an
2557 * interrupts-disabled context.
2560 hv_synic_disable_regs(0);
2565 static void hv_synic_resume(void)
2567 hv_synic_enable_regs(0);
2570 * Note: we don't need to call hv_stimer_init(0), because the timer
2571 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2572 * automatically re-enabled in timekeeping_resume().
2576 /* The callbacks run only on CPU0, with irqs_disabled. */
2577 static struct syscore_ops hv_synic_syscore_ops
= {
2578 .suspend
= hv_synic_suspend
,
2579 .resume
= hv_synic_resume
,
2582 static int __init
hv_acpi_init(void)
2586 if (!hv_is_hyperv_initialized())
2589 init_completion(&probe_event
);
2592 * Get ACPI resources first.
2594 ret
= acpi_bus_register_driver(&vmbus_acpi_driver
);
2599 t
= wait_for_completion_timeout(&probe_event
, 5*HZ
);
2606 ret
= vmbus_bus_init();
2610 hv_setup_kexec_handler(hv_kexec_handler
);
2611 hv_setup_crash_handler(hv_crash_handler
);
2613 register_syscore_ops(&hv_synic_syscore_ops
);
2618 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
2623 static void __exit
vmbus_exit(void)
2627 unregister_syscore_ops(&hv_synic_syscore_ops
);
2629 hv_remove_kexec_handler();
2630 hv_remove_crash_handler();
2631 vmbus_connection
.conn_state
= DISCONNECTED
;
2632 hv_stimer_global_cleanup();
2634 hv_remove_vmbus_irq();
2635 for_each_online_cpu(cpu
) {
2636 struct hv_per_cpu_context
*hv_cpu
2637 = per_cpu_ptr(hv_context
.cpu_context
, cpu
);
2639 tasklet_kill(&hv_cpu
->msg_dpc
);
2641 hv_debug_rm_all_dir();
2643 vmbus_free_channels();
2644 kfree(vmbus_connection
.channels
);
2646 if (ms_hyperv
.misc_features
& HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
) {
2647 kmsg_dump_unregister(&hv_kmsg_dumper
);
2648 unregister_die_notifier(&hyperv_die_block
);
2649 atomic_notifier_chain_unregister(&panic_notifier_list
,
2650 &hyperv_panic_block
);
2653 free_page((unsigned long)hv_panic_page
);
2654 unregister_sysctl_table(hv_ctl_table_hdr
);
2655 hv_ctl_table_hdr
= NULL
;
2656 bus_unregister(&hv_bus
);
2658 cpuhp_remove_state(hyperv_cpuhp_online
);
2660 acpi_bus_unregister_driver(&vmbus_acpi_driver
);
2664 MODULE_LICENSE("GPL");
2665 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2667 subsys_initcall(hv_acpi_init
);
2668 module_exit(vmbus_exit
);