1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) Microsoft Corporation.
6 * Jake Oshins <jakeo@microsoft.com>
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irqdomain.h>
47 #include <asm/irqdomain.h>
49 #include <linux/irq.h>
50 #include <linux/msi.h>
51 #include <linux/hyperv.h>
52 #include <linux/refcount.h>
53 #include <asm/mshyperv.h>
56 * Protocol versions. The low word is the minor version, the high word the
60 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
61 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
62 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
64 enum pci_protocol_version_t
{
65 PCI_PROTOCOL_VERSION_1_1
= PCI_MAKE_VERSION(1, 1), /* Win10 */
66 PCI_PROTOCOL_VERSION_1_2
= PCI_MAKE_VERSION(1, 2), /* RS1 */
67 PCI_PROTOCOL_VERSION_1_3
= PCI_MAKE_VERSION(1, 3), /* Vibranium */
68 PCI_PROTOCOL_VERSION_1_4
= PCI_MAKE_VERSION(1, 4), /* WS2022 */
71 #define CPU_AFFINITY_ALL -1ULL
74 * Supported protocol versions in the order of probing - highest go
77 static enum pci_protocol_version_t pci_protocol_versions
[] = {
78 PCI_PROTOCOL_VERSION_1_4
,
79 PCI_PROTOCOL_VERSION_1_3
,
80 PCI_PROTOCOL_VERSION_1_2
,
81 PCI_PROTOCOL_VERSION_1_1
,
84 #define PCI_CONFIG_MMIO_LENGTH 0x2000
85 #define CFG_PAGE_OFFSET 0x1000
86 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
88 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
90 #define STATUS_REVISION_MISMATCH 0xC0000059
92 /* space for 32bit serial number as string */
93 #define SLOT_NAME_SIZE 11
99 enum pci_message_type
{
103 PCI_MESSAGE_BASE
= 0x42490000,
104 PCI_BUS_RELATIONS
= PCI_MESSAGE_BASE
+ 0,
105 PCI_QUERY_BUS_RELATIONS
= PCI_MESSAGE_BASE
+ 1,
106 PCI_POWER_STATE_CHANGE
= PCI_MESSAGE_BASE
+ 4,
107 PCI_QUERY_RESOURCE_REQUIREMENTS
= PCI_MESSAGE_BASE
+ 5,
108 PCI_QUERY_RESOURCE_RESOURCES
= PCI_MESSAGE_BASE
+ 6,
109 PCI_BUS_D0ENTRY
= PCI_MESSAGE_BASE
+ 7,
110 PCI_BUS_D0EXIT
= PCI_MESSAGE_BASE
+ 8,
111 PCI_READ_BLOCK
= PCI_MESSAGE_BASE
+ 9,
112 PCI_WRITE_BLOCK
= PCI_MESSAGE_BASE
+ 0xA,
113 PCI_EJECT
= PCI_MESSAGE_BASE
+ 0xB,
114 PCI_QUERY_STOP
= PCI_MESSAGE_BASE
+ 0xC,
115 PCI_REENABLE
= PCI_MESSAGE_BASE
+ 0xD,
116 PCI_QUERY_STOP_FAILED
= PCI_MESSAGE_BASE
+ 0xE,
117 PCI_EJECTION_COMPLETE
= PCI_MESSAGE_BASE
+ 0xF,
118 PCI_RESOURCES_ASSIGNED
= PCI_MESSAGE_BASE
+ 0x10,
119 PCI_RESOURCES_RELEASED
= PCI_MESSAGE_BASE
+ 0x11,
120 PCI_INVALIDATE_BLOCK
= PCI_MESSAGE_BASE
+ 0x12,
121 PCI_QUERY_PROTOCOL_VERSION
= PCI_MESSAGE_BASE
+ 0x13,
122 PCI_CREATE_INTERRUPT_MESSAGE
= PCI_MESSAGE_BASE
+ 0x14,
123 PCI_DELETE_INTERRUPT_MESSAGE
= PCI_MESSAGE_BASE
+ 0x15,
124 PCI_RESOURCES_ASSIGNED2
= PCI_MESSAGE_BASE
+ 0x16,
125 PCI_CREATE_INTERRUPT_MESSAGE2
= PCI_MESSAGE_BASE
+ 0x17,
126 PCI_DELETE_INTERRUPT_MESSAGE2
= PCI_MESSAGE_BASE
+ 0x18, /* unused */
127 PCI_BUS_RELATIONS2
= PCI_MESSAGE_BASE
+ 0x19,
128 PCI_RESOURCES_ASSIGNED3
= PCI_MESSAGE_BASE
+ 0x1A,
129 PCI_CREATE_INTERRUPT_MESSAGE3
= PCI_MESSAGE_BASE
+ 0x1B,
134 * Structures defining the virtual PCI Express protocol.
146 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
147 * which is all this driver does. This representation is the one used in
148 * Windows, which is what is expected when sending this back and forth with
149 * the Hyper-V parent partition.
151 union win_slot_encoding
{
161 * Pretty much as defined in the PCI Specifications.
163 struct pci_function_description
{
164 u16 v_id
; /* vendor ID */
165 u16 d_id
; /* device ID */
171 union win_slot_encoding win_slot
;
172 u32 ser
; /* serial number */
175 enum pci_device_description_flags
{
176 HV_PCI_DEVICE_FLAG_NONE
= 0x0,
177 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY
= 0x1,
180 struct pci_function_description2
{
181 u16 v_id
; /* vendor ID */
182 u16 d_id
; /* device ID */
188 union win_slot_encoding win_slot
;
189 u32 ser
; /* serial number */
191 u16 virtual_numa_node
;
198 * @delivery_mode: As defined in Intel's Programmer's
199 * Reference Manual, Volume 3, Chapter 8.
200 * @vector_count: Number of contiguous entries in the
201 * Interrupt Descriptor Table that are
202 * occupied by this Message-Signaled
203 * Interrupt. For "MSI", as first defined
204 * in PCI 2.2, this can be between 1 and
205 * 32. For "MSI-X," as first defined in PCI
206 * 3.0, this must be 1, as each MSI-X table
207 * entry would have its own descriptor.
208 * @reserved: Empty space
209 * @cpu_mask: All the target virtual processors.
220 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
222 * @delivery_mode: As defined in Intel's Programmer's
223 * Reference Manual, Volume 3, Chapter 8.
224 * @vector_count: Number of contiguous entries in the
225 * Interrupt Descriptor Table that are
226 * occupied by this Message-Signaled
227 * Interrupt. For "MSI", as first defined
228 * in PCI 2.2, this can be between 1 and
229 * 32. For "MSI-X," as first defined in PCI
230 * 3.0, this must be 1, as each MSI-X table
231 * entry would have its own descriptor.
232 * @processor_count: number of bits enabled in array.
233 * @processor_array: All the target virtual processors.
235 struct hv_msi_desc2
{
240 u16 processor_array
[32];
244 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
245 * Everything is the same as in 'hv_msi_desc2' except that the size of the
246 * 'vector' field is larger to support bigger vector values. For ex: LPI
249 struct hv_msi_desc3
{
255 u16 processor_array
[32];
259 * struct tran_int_desc
260 * @reserved: unused, padding
261 * @vector_count: same as in hv_msi_desc
262 * @data: This is the "data payload" value that is
263 * written by the device when it generates
264 * a message-signaled interrupt, either MSI
266 * @address: This is the address to which the data
267 * payload is written on interrupt
270 struct tran_int_desc
{
278 * A generic message format for virtual PCI.
279 * Specific message formats are defined later in the file.
286 struct pci_child_message
{
287 struct pci_message message_type
;
288 union win_slot_encoding wslot
;
291 struct pci_incoming_message
{
292 struct vmpacket_descriptor hdr
;
293 struct pci_message message_type
;
296 struct pci_response
{
297 struct vmpacket_descriptor hdr
;
298 s32 status
; /* negative values are failures */
302 void (*completion_func
)(void *context
, struct pci_response
*resp
,
303 int resp_packet_size
);
306 struct pci_message message
[];
310 * Specific message types supporting the PCI protocol.
314 * Version negotiation message. Sent from the guest to the host.
315 * The guest is free to try different versions until the host
316 * accepts the version.
318 * pci_version: The protocol version requested.
319 * is_last_attempt: If TRUE, this is the last version guest will request.
320 * reservedz: Reserved field, set to zero.
323 struct pci_version_request
{
324 struct pci_message message_type
;
325 u32 protocol_version
;
329 * Bus D0 Entry. This is sent from the guest to the host when the virtual
330 * bus (PCI Express port) is ready for action.
333 struct pci_bus_d0_entry
{
334 struct pci_message message_type
;
339 struct pci_bus_relations
{
340 struct pci_incoming_message incoming
;
342 struct pci_function_description func
[];
345 struct pci_bus_relations2
{
346 struct pci_incoming_message incoming
;
348 struct pci_function_description2 func
[];
351 struct pci_q_res_req_response
{
352 struct vmpacket_descriptor hdr
;
353 s32 status
; /* negative values are failures */
354 u32 probed_bar
[PCI_STD_NUM_BARS
];
357 struct pci_set_power
{
358 struct pci_message message_type
;
359 union win_slot_encoding wslot
;
360 u32 power_state
; /* In Windows terms */
364 struct pci_set_power_response
{
365 struct vmpacket_descriptor hdr
;
366 s32 status
; /* negative values are failures */
367 union win_slot_encoding wslot
;
368 u32 resultant_state
; /* In Windows terms */
372 struct pci_resources_assigned
{
373 struct pci_message message_type
;
374 union win_slot_encoding wslot
;
375 u8 memory_range
[0x14][6]; /* not used here */
380 struct pci_resources_assigned2
{
381 struct pci_message message_type
;
382 union win_slot_encoding wslot
;
383 u8 memory_range
[0x14][6]; /* not used here */
384 u32 msi_descriptor_count
;
388 struct pci_create_interrupt
{
389 struct pci_message message_type
;
390 union win_slot_encoding wslot
;
391 struct hv_msi_desc int_desc
;
394 struct pci_create_int_response
{
395 struct pci_response response
;
397 struct tran_int_desc int_desc
;
400 struct pci_create_interrupt2
{
401 struct pci_message message_type
;
402 union win_slot_encoding wslot
;
403 struct hv_msi_desc2 int_desc
;
406 struct pci_create_interrupt3
{
407 struct pci_message message_type
;
408 union win_slot_encoding wslot
;
409 struct hv_msi_desc3 int_desc
;
412 struct pci_delete_interrupt
{
413 struct pci_message message_type
;
414 union win_slot_encoding wslot
;
415 struct tran_int_desc int_desc
;
419 * Note: the VM must pass a valid block id, wslot and bytes_requested.
421 struct pci_read_block
{
422 struct pci_message message_type
;
424 union win_slot_encoding wslot
;
428 struct pci_read_block_response
{
429 struct vmpacket_descriptor hdr
;
431 u8 bytes
[HV_CONFIG_BLOCK_SIZE_MAX
];
435 * Note: the VM must pass a valid block id, wslot and byte_count.
437 struct pci_write_block
{
438 struct pci_message message_type
;
440 union win_slot_encoding wslot
;
442 u8 bytes
[HV_CONFIG_BLOCK_SIZE_MAX
];
445 struct pci_dev_inval_block
{
446 struct pci_incoming_message incoming
;
447 union win_slot_encoding wslot
;
451 struct pci_dev_incoming
{
452 struct pci_incoming_message incoming
;
453 union win_slot_encoding wslot
;
456 struct pci_eject_response
{
457 struct pci_message message_type
;
458 union win_slot_encoding wslot
;
462 static int pci_ring_size
= (4 * PAGE_SIZE
);
465 * Driver specific state.
468 enum hv_pcibus_state
{
476 struct hv_pcibus_device
{
478 struct pci_sysdata sysdata
;
479 #elif defined(CONFIG_ARM64)
480 struct pci_config_window sysdata
;
482 struct pci_host_bridge
*bridge
;
483 struct fwnode_handle
*fwnode
;
484 /* Protocol version negotiated with the host */
485 enum pci_protocol_version_t protocol_version
;
486 enum hv_pcibus_state state
;
487 struct hv_device
*hdev
;
488 resource_size_t low_mmio_space
;
489 resource_size_t high_mmio_space
;
490 struct resource
*mem_config
;
491 struct resource
*low_mmio_res
;
492 struct resource
*high_mmio_res
;
493 struct completion
*survey_event
;
494 struct pci_bus
*pci_bus
;
495 spinlock_t config_lock
; /* Avoid two threads writing index page */
496 spinlock_t device_list_lock
; /* Protect lists below */
497 void __iomem
*cfg_addr
;
499 struct list_head children
;
500 struct list_head dr_list
;
502 struct msi_domain_info msi_info
;
503 struct irq_domain
*irq_domain
;
505 spinlock_t retarget_msi_interrupt_lock
;
507 struct workqueue_struct
*wq
;
509 /* Highest slot of child device with resources allocated */
510 int wslot_res_allocated
;
512 /* hypercall arg, must not cross page boundary */
513 struct hv_retarget_device_interrupt retarget_msi_interrupt_params
;
516 * Don't put anything here: retarget_msi_interrupt_params must be last
521 * Tracks "Device Relations" messages from the host, which must be both
522 * processed in order and deferred so that they don't run in the context
523 * of the incoming packet callback.
526 struct work_struct wrk
;
527 struct hv_pcibus_device
*bus
;
530 struct hv_pcidev_description
{
531 u16 v_id
; /* vendor ID */
532 u16 d_id
; /* device ID */
538 union win_slot_encoding win_slot
;
539 u32 ser
; /* serial number */
541 u16 virtual_numa_node
;
545 struct list_head list_entry
;
547 struct hv_pcidev_description func
[];
550 enum hv_pcichild_state
{
551 hv_pcichild_init
= 0,
552 hv_pcichild_requirements
,
553 hv_pcichild_resourced
,
554 hv_pcichild_ejecting
,
559 /* List protected by pci_rescan_remove_lock */
560 struct list_head list_entry
;
562 enum hv_pcichild_state state
;
563 struct pci_slot
*pci_slot
;
564 struct hv_pcidev_description desc
;
565 bool reported_missing
;
566 struct hv_pcibus_device
*hbus
;
567 struct work_struct wrk
;
569 void (*block_invalidate
)(void *context
, u64 block_mask
);
570 void *invalidate_context
;
573 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
574 * read it back, for each of the BAR offsets within config space.
576 u32 probed_bar
[PCI_STD_NUM_BARS
];
579 struct hv_pci_compl
{
580 struct completion host_event
;
581 s32 completion_status
;
584 static void hv_pci_onchannelcallback(void *context
);
587 * hv_pci_generic_compl() - Invoked for a completion packet
588 * @context: Set up by the sender of the packet.
589 * @resp: The response packet
590 * @resp_packet_size: Size in bytes of the packet
592 * This function is used to trigger an event and report status
593 * for any message for which the completion packet contains a
594 * status and nothing else.
596 static void hv_pci_generic_compl(void *context
, struct pci_response
*resp
,
597 int resp_packet_size
)
599 struct hv_pci_compl
*comp_pkt
= context
;
601 if (resp_packet_size
>= offsetofend(struct pci_response
, status
))
602 comp_pkt
->completion_status
= resp
->status
;
604 comp_pkt
->completion_status
= -1;
606 complete(&comp_pkt
->host_event
);
609 static struct hv_pci_dev
*get_pcichild_wslot(struct hv_pcibus_device
*hbus
,
612 static void get_pcichild(struct hv_pci_dev
*hpdev
)
614 refcount_inc(&hpdev
->refs
);
617 static void put_pcichild(struct hv_pci_dev
*hpdev
)
619 if (refcount_dec_and_test(&hpdev
->refs
))
624 * There is no good way to get notified from vmbus_onoffer_rescind(),
625 * so let's use polling here, since this is not a hot path.
627 static int wait_for_response(struct hv_device
*hdev
,
628 struct completion
*comp
)
631 if (hdev
->channel
->rescind
) {
632 dev_warn_once(&hdev
->device
, "The device is gone.\n");
636 if (wait_for_completion_timeout(comp
, HZ
/ 10))
644 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
645 * @devfn: The Linux representation of PCI slot
647 * Windows uses a slightly different representation of PCI slot.
649 * Return: The Windows representation
651 static u32
devfn_to_wslot(int devfn
)
653 union win_slot_encoding wslot
;
656 wslot
.bits
.dev
= PCI_SLOT(devfn
);
657 wslot
.bits
.func
= PCI_FUNC(devfn
);
663 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
664 * @wslot: The Windows representation of PCI slot
666 * Windows uses a slightly different representation of PCI slot.
668 * Return: The Linux representation
670 static int wslot_to_devfn(u32 wslot
)
672 union win_slot_encoding slot_no
;
674 slot_no
.slot
= wslot
;
675 return PCI_DEVFN(slot_no
.bits
.dev
, slot_no
.bits
.func
);
679 * PCI Configuration Space for these root PCI buses is implemented as a pair
680 * of pages in memory-mapped I/O space. Writing to the first page chooses
681 * the PCI function being written or read. Once the first page has been
682 * written to, the following page maps in the entire configuration space of
687 * _hv_pcifront_read_config() - Internal PCI config read
688 * @hpdev: The PCI driver's representation of the device
689 * @where: Offset within config space
690 * @size: Size of the transfer
691 * @val: Pointer to the buffer receiving the data
693 static void _hv_pcifront_read_config(struct hv_pci_dev
*hpdev
, int where
,
697 void __iomem
*addr
= hpdev
->hbus
->cfg_addr
+ CFG_PAGE_OFFSET
+ where
;
700 * If the attempt is to read the IDs or the ROM BAR, simulate that.
702 if (where
+ size
<= PCI_COMMAND
) {
703 memcpy(val
, ((u8
*)&hpdev
->desc
.v_id
) + where
, size
);
704 } else if (where
>= PCI_CLASS_REVISION
&& where
+ size
<=
705 PCI_CACHE_LINE_SIZE
) {
706 memcpy(val
, ((u8
*)&hpdev
->desc
.rev
) + where
-
707 PCI_CLASS_REVISION
, size
);
708 } else if (where
>= PCI_SUBSYSTEM_VENDOR_ID
&& where
+ size
<=
710 memcpy(val
, (u8
*)&hpdev
->desc
.subsystem_id
+ where
-
711 PCI_SUBSYSTEM_VENDOR_ID
, size
);
712 } else if (where
>= PCI_ROM_ADDRESS
&& where
+ size
<=
713 PCI_CAPABILITY_LIST
) {
714 /* ROM BARs are unimplemented */
716 } else if (where
>= PCI_INTERRUPT_LINE
&& where
+ size
<=
719 * Interrupt Line and Interrupt PIN are hard-wired to zero
720 * because this front-end only supports message-signaled
724 } else if (where
+ size
<= CFG_PAGE_SIZE
) {
725 spin_lock_irqsave(&hpdev
->hbus
->config_lock
, flags
);
726 /* Choose the function to be read. (See comment above) */
727 writel(hpdev
->desc
.win_slot
.slot
, hpdev
->hbus
->cfg_addr
);
728 /* Make sure the function was chosen before we start reading. */
730 /* Read from that function's config space. */
743 * Make sure the read was done before we release the spinlock
744 * allowing consecutive reads/writes.
747 spin_unlock_irqrestore(&hpdev
->hbus
->config_lock
, flags
);
749 dev_err(&hpdev
->hbus
->hdev
->device
,
750 "Attempt to read beyond a function's config space.\n");
754 static u16
hv_pcifront_get_vendor_id(struct hv_pci_dev
*hpdev
)
758 void __iomem
*addr
= hpdev
->hbus
->cfg_addr
+ CFG_PAGE_OFFSET
+
761 spin_lock_irqsave(&hpdev
->hbus
->config_lock
, flags
);
763 /* Choose the function to be read. (See comment above) */
764 writel(hpdev
->desc
.win_slot
.slot
, hpdev
->hbus
->cfg_addr
);
765 /* Make sure the function was chosen before we start reading. */
767 /* Read from that function's config space. */
770 * mb() is not required here, because the spin_unlock_irqrestore()
774 spin_unlock_irqrestore(&hpdev
->hbus
->config_lock
, flags
);
780 * _hv_pcifront_write_config() - Internal PCI config write
781 * @hpdev: The PCI driver's representation of the device
782 * @where: Offset within config space
783 * @size: Size of the transfer
784 * @val: The data being transferred
786 static void _hv_pcifront_write_config(struct hv_pci_dev
*hpdev
, int where
,
790 void __iomem
*addr
= hpdev
->hbus
->cfg_addr
+ CFG_PAGE_OFFSET
+ where
;
792 if (where
>= PCI_SUBSYSTEM_VENDOR_ID
&&
793 where
+ size
<= PCI_CAPABILITY_LIST
) {
794 /* SSIDs and ROM BARs are read-only */
795 } else if (where
>= PCI_COMMAND
&& where
+ size
<= CFG_PAGE_SIZE
) {
796 spin_lock_irqsave(&hpdev
->hbus
->config_lock
, flags
);
797 /* Choose the function to be written. (See comment above) */
798 writel(hpdev
->desc
.win_slot
.slot
, hpdev
->hbus
->cfg_addr
);
799 /* Make sure the function was chosen before we start writing. */
801 /* Write to that function's config space. */
814 * Make sure the write was done before we release the spinlock
815 * allowing consecutive reads/writes.
818 spin_unlock_irqrestore(&hpdev
->hbus
->config_lock
, flags
);
820 dev_err(&hpdev
->hbus
->hdev
->device
,
821 "Attempt to write beyond a function's config space.\n");
826 * hv_pcifront_read_config() - Read configuration space
827 * @bus: PCI Bus structure
828 * @devfn: Device/function
829 * @where: Offset from base
830 * @size: Byte/word/dword
831 * @val: Value to be read
833 * Return: PCIBIOS_SUCCESSFUL on success
834 * PCIBIOS_DEVICE_NOT_FOUND on failure
836 static int hv_pcifront_read_config(struct pci_bus
*bus
, unsigned int devfn
,
837 int where
, int size
, u32
*val
)
839 struct hv_pcibus_device
*hbus
=
840 container_of(bus
->sysdata
, struct hv_pcibus_device
, sysdata
);
841 struct hv_pci_dev
*hpdev
;
843 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(devfn
));
845 return PCIBIOS_DEVICE_NOT_FOUND
;
847 _hv_pcifront_read_config(hpdev
, where
, size
, val
);
850 return PCIBIOS_SUCCESSFUL
;
854 * hv_pcifront_write_config() - Write configuration space
855 * @bus: PCI Bus structure
856 * @devfn: Device/function
857 * @where: Offset from base
858 * @size: Byte/word/dword
859 * @val: Value to be written to device
861 * Return: PCIBIOS_SUCCESSFUL on success
862 * PCIBIOS_DEVICE_NOT_FOUND on failure
864 static int hv_pcifront_write_config(struct pci_bus
*bus
, unsigned int devfn
,
865 int where
, int size
, u32 val
)
867 struct hv_pcibus_device
*hbus
=
868 container_of(bus
->sysdata
, struct hv_pcibus_device
, sysdata
);
869 struct hv_pci_dev
*hpdev
;
871 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(devfn
));
873 return PCIBIOS_DEVICE_NOT_FOUND
;
875 _hv_pcifront_write_config(hpdev
, where
, size
, val
);
878 return PCIBIOS_SUCCESSFUL
;
881 /* PCIe operations */
882 static struct pci_ops hv_pcifront_ops
= {
883 .read
= hv_pcifront_read_config
,
884 .write
= hv_pcifront_write_config
,
888 * Paravirtual backchannel
890 * Hyper-V SR-IOV provides a backchannel mechanism in software for
891 * communication between a VF driver and a PF driver. These
892 * "configuration blocks" are similar in concept to PCI configuration space,
893 * but instead of doing reads and writes in 32-bit chunks through a very slow
894 * path, packets of up to 128 bytes can be sent or received asynchronously.
896 * Nearly every SR-IOV device contains just such a communications channel in
897 * hardware, so using this one in software is usually optional. Using the
898 * software channel, however, allows driver implementers to leverage software
899 * tools that fuzz the communications channel looking for vulnerabilities.
901 * The usage model for these packets puts the responsibility for reading or
902 * writing on the VF driver. The VF driver sends a read or a write packet,
903 * indicating which "block" is being referred to by number.
905 * If the PF driver wishes to initiate communication, it can "invalidate" one or
906 * more of the first 64 blocks. This invalidation is delivered via a callback
907 * supplied by the VF driver by this driver.
909 * No protocol is implied, except that supplied by the PF and VF drivers.
912 struct hv_read_config_compl
{
913 struct hv_pci_compl comp_pkt
;
916 unsigned int bytes_returned
;
920 * hv_pci_read_config_compl() - Invoked when a response packet
921 * for a read config block operation arrives.
922 * @context: Identifies the read config operation
923 * @resp: The response packet itself
924 * @resp_packet_size: Size in bytes of the response packet
926 static void hv_pci_read_config_compl(void *context
, struct pci_response
*resp
,
927 int resp_packet_size
)
929 struct hv_read_config_compl
*comp
= context
;
930 struct pci_read_block_response
*read_resp
=
931 (struct pci_read_block_response
*)resp
;
932 unsigned int data_len
, hdr_len
;
934 hdr_len
= offsetof(struct pci_read_block_response
, bytes
);
935 if (resp_packet_size
< hdr_len
) {
936 comp
->comp_pkt
.completion_status
= -1;
940 data_len
= resp_packet_size
- hdr_len
;
941 if (data_len
> 0 && read_resp
->status
== 0) {
942 comp
->bytes_returned
= min(comp
->len
, data_len
);
943 memcpy(comp
->buf
, read_resp
->bytes
, comp
->bytes_returned
);
945 comp
->bytes_returned
= 0;
948 comp
->comp_pkt
.completion_status
= read_resp
->status
;
950 complete(&comp
->comp_pkt
.host_event
);
954 * hv_read_config_block() - Sends a read config block request to
955 * the back-end driver running in the Hyper-V parent partition.
956 * @pdev: The PCI driver's representation for this device.
957 * @buf: Buffer into which the config block will be copied.
958 * @len: Size in bytes of buf.
959 * @block_id: Identifies the config block which has been requested.
960 * @bytes_returned: Size which came back from the back-end driver.
962 * Return: 0 on success, -errno on failure
964 static int hv_read_config_block(struct pci_dev
*pdev
, void *buf
,
965 unsigned int len
, unsigned int block_id
,
966 unsigned int *bytes_returned
)
968 struct hv_pcibus_device
*hbus
=
969 container_of(pdev
->bus
->sysdata
, struct hv_pcibus_device
,
972 struct pci_packet pkt
;
973 char buf
[sizeof(struct pci_read_block
)];
975 struct hv_read_config_compl comp_pkt
;
976 struct pci_read_block
*read_blk
;
979 if (len
== 0 || len
> HV_CONFIG_BLOCK_SIZE_MAX
)
982 init_completion(&comp_pkt
.comp_pkt
.host_event
);
986 memset(&pkt
, 0, sizeof(pkt
));
987 pkt
.pkt
.completion_func
= hv_pci_read_config_compl
;
988 pkt
.pkt
.compl_ctxt
= &comp_pkt
;
989 read_blk
= (struct pci_read_block
*)&pkt
.pkt
.message
;
990 read_blk
->message_type
.type
= PCI_READ_BLOCK
;
991 read_blk
->wslot
.slot
= devfn_to_wslot(pdev
->devfn
);
992 read_blk
->block_id
= block_id
;
993 read_blk
->bytes_requested
= len
;
995 ret
= vmbus_sendpacket(hbus
->hdev
->channel
, read_blk
,
996 sizeof(*read_blk
), (unsigned long)&pkt
.pkt
,
998 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
1002 ret
= wait_for_response(hbus
->hdev
, &comp_pkt
.comp_pkt
.host_event
);
1006 if (comp_pkt
.comp_pkt
.completion_status
!= 0 ||
1007 comp_pkt
.bytes_returned
== 0) {
1008 dev_err(&hbus
->hdev
->device
,
1009 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
1010 comp_pkt
.comp_pkt
.completion_status
,
1011 comp_pkt
.bytes_returned
);
1015 *bytes_returned
= comp_pkt
.bytes_returned
;
1020 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1021 * config block operation arrives.
1022 * @context: Identifies the write config operation
1023 * @resp: The response packet itself
1024 * @resp_packet_size: Size in bytes of the response packet
1026 static void hv_pci_write_config_compl(void *context
, struct pci_response
*resp
,
1027 int resp_packet_size
)
1029 struct hv_pci_compl
*comp_pkt
= context
;
1031 comp_pkt
->completion_status
= resp
->status
;
1032 complete(&comp_pkt
->host_event
);
1036 * hv_write_config_block() - Sends a write config block request to the
1037 * back-end driver running in the Hyper-V parent partition.
1038 * @pdev: The PCI driver's representation for this device.
1039 * @buf: Buffer from which the config block will be copied.
1040 * @len: Size in bytes of buf.
1041 * @block_id: Identifies the config block which is being written.
1043 * Return: 0 on success, -errno on failure
1045 static int hv_write_config_block(struct pci_dev
*pdev
, void *buf
,
1046 unsigned int len
, unsigned int block_id
)
1048 struct hv_pcibus_device
*hbus
=
1049 container_of(pdev
->bus
->sysdata
, struct hv_pcibus_device
,
1052 struct pci_packet pkt
;
1053 char buf
[sizeof(struct pci_write_block
)];
1056 struct hv_pci_compl comp_pkt
;
1057 struct pci_write_block
*write_blk
;
1061 if (len
== 0 || len
> HV_CONFIG_BLOCK_SIZE_MAX
)
1064 init_completion(&comp_pkt
.host_event
);
1066 memset(&pkt
, 0, sizeof(pkt
));
1067 pkt
.pkt
.completion_func
= hv_pci_write_config_compl
;
1068 pkt
.pkt
.compl_ctxt
= &comp_pkt
;
1069 write_blk
= (struct pci_write_block
*)&pkt
.pkt
.message
;
1070 write_blk
->message_type
.type
= PCI_WRITE_BLOCK
;
1071 write_blk
->wslot
.slot
= devfn_to_wslot(pdev
->devfn
);
1072 write_blk
->block_id
= block_id
;
1073 write_blk
->byte_count
= len
;
1074 memcpy(write_blk
->bytes
, buf
, len
);
1075 pkt_size
= offsetof(struct pci_write_block
, bytes
) + len
;
1077 * This quirk is required on some hosts shipped around 2018, because
1078 * these hosts don't check the pkt_size correctly (new hosts have been
1079 * fixed since early 2019). The quirk is also safe on very old hosts
1080 * and new hosts, because, on them, what really matters is the length
1081 * specified in write_blk->byte_count.
1083 pkt_size
+= sizeof(pkt
.reserved
);
1085 ret
= vmbus_sendpacket(hbus
->hdev
->channel
, write_blk
, pkt_size
,
1086 (unsigned long)&pkt
.pkt
, VM_PKT_DATA_INBAND
,
1087 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
1091 ret
= wait_for_response(hbus
->hdev
, &comp_pkt
.host_event
);
1095 if (comp_pkt
.completion_status
!= 0) {
1096 dev_err(&hbus
->hdev
->device
,
1097 "Write Config Block failed: 0x%x\n",
1098 comp_pkt
.completion_status
);
1106 * hv_register_block_invalidate() - Invoked when a config block invalidation
1107 * arrives from the back-end driver.
1108 * @pdev: The PCI driver's representation for this device.
1109 * @context: Identifies the device.
1110 * @block_invalidate: Identifies all of the blocks being invalidated.
1112 * Return: 0 on success, -errno on failure
1114 static int hv_register_block_invalidate(struct pci_dev
*pdev
, void *context
,
1115 void (*block_invalidate
)(void *context
,
1118 struct hv_pcibus_device
*hbus
=
1119 container_of(pdev
->bus
->sysdata
, struct hv_pcibus_device
,
1121 struct hv_pci_dev
*hpdev
;
1123 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(pdev
->devfn
));
1127 hpdev
->block_invalidate
= block_invalidate
;
1128 hpdev
->invalidate_context
= context
;
1130 put_pcichild(hpdev
);
1135 /* Interrupt management hooks */
1136 static void hv_int_desc_free(struct hv_pci_dev
*hpdev
,
1137 struct tran_int_desc
*int_desc
)
1139 struct pci_delete_interrupt
*int_pkt
;
1141 struct pci_packet pkt
;
1142 u8 buffer
[sizeof(struct pci_delete_interrupt
)];
1145 memset(&ctxt
, 0, sizeof(ctxt
));
1146 int_pkt
= (struct pci_delete_interrupt
*)&ctxt
.pkt
.message
;
1147 int_pkt
->message_type
.type
=
1148 PCI_DELETE_INTERRUPT_MESSAGE
;
1149 int_pkt
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
1150 int_pkt
->int_desc
= *int_desc
;
1151 vmbus_sendpacket(hpdev
->hbus
->hdev
->channel
, int_pkt
, sizeof(*int_pkt
),
1152 (unsigned long)&ctxt
.pkt
, VM_PKT_DATA_INBAND
, 0);
1157 * hv_msi_free() - Free the MSI.
1158 * @domain: The interrupt domain pointer
1159 * @info: Extra MSI-related context
1160 * @irq: Identifies the IRQ.
1162 * The Hyper-V parent partition and hypervisor are tracking the
1163 * messages that are in use, keeping the interrupt redirection
1164 * table up to date. This callback sends a message that frees
1165 * the IRT entry and related tracking nonsense.
1167 static void hv_msi_free(struct irq_domain
*domain
, struct msi_domain_info
*info
,
1170 struct hv_pcibus_device
*hbus
;
1171 struct hv_pci_dev
*hpdev
;
1172 struct pci_dev
*pdev
;
1173 struct tran_int_desc
*int_desc
;
1174 struct irq_data
*irq_data
= irq_domain_get_irq_data(domain
, irq
);
1175 struct msi_desc
*msi
= irq_data_get_msi_desc(irq_data
);
1177 pdev
= msi_desc_to_pci_dev(msi
);
1179 int_desc
= irq_data_get_irq_chip_data(irq_data
);
1183 irq_data
->chip_data
= NULL
;
1184 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(pdev
->devfn
));
1190 hv_int_desc_free(hpdev
, int_desc
);
1191 put_pcichild(hpdev
);
1194 static int hv_set_affinity(struct irq_data
*data
, const struct cpumask
*dest
,
1197 struct irq_data
*parent
= data
->parent_data
;
1199 return parent
->chip
->irq_set_affinity(parent
, dest
, force
);
1202 static void hv_irq_mask(struct irq_data
*data
)
1204 pci_msi_mask_irq(data
);
1208 * hv_irq_unmask() - "Unmask" the IRQ by setting its current
1210 * @data: Describes the IRQ
1212 * Build new a destination for the MSI and make a hypercall to
1213 * update the Interrupt Redirection Table. "Device Logical ID"
1214 * is built out of this PCI bus's instance GUID and the function
1215 * number of the device.
1217 static void hv_irq_unmask(struct irq_data
*data
)
1219 struct msi_desc
*msi_desc
= irq_data_get_msi_desc(data
);
1220 struct irq_cfg
*cfg
= irqd_cfg(data
);
1221 struct hv_retarget_device_interrupt
*params
;
1222 struct hv_pcibus_device
*hbus
;
1223 struct cpumask
*dest
;
1225 struct pci_bus
*pbus
;
1226 struct pci_dev
*pdev
;
1227 unsigned long flags
;
1232 dest
= irq_data_get_effective_affinity_mask(data
);
1233 pdev
= msi_desc_to_pci_dev(msi_desc
);
1235 hbus
= container_of(pbus
->sysdata
, struct hv_pcibus_device
, sysdata
);
1237 spin_lock_irqsave(&hbus
->retarget_msi_interrupt_lock
, flags
);
1239 params
= &hbus
->retarget_msi_interrupt_params
;
1240 memset(params
, 0, sizeof(*params
));
1241 params
->partition_id
= HV_PARTITION_ID_SELF
;
1242 params
->int_entry
.source
= HV_INTERRUPT_SOURCE_MSI
;
1243 hv_set_msi_entry_from_desc(¶ms
->int_entry
.msi_entry
, msi_desc
);
1244 params
->device_id
= (hbus
->hdev
->dev_instance
.b
[5] << 24) |
1245 (hbus
->hdev
->dev_instance
.b
[4] << 16) |
1246 (hbus
->hdev
->dev_instance
.b
[7] << 8) |
1247 (hbus
->hdev
->dev_instance
.b
[6] & 0xf8) |
1248 PCI_FUNC(pdev
->devfn
);
1249 params
->int_target
.vector
= cfg
->vector
;
1252 * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
1253 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
1254 * spurious interrupt storm. Not doing so does not seem to have a
1255 * negative effect (yet?).
1258 if (hbus
->protocol_version
>= PCI_PROTOCOL_VERSION_1_2
) {
1260 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
1261 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
1262 * with >64 VP support.
1263 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
1264 * is not sufficient for this hypercall.
1266 params
->int_target
.flags
|=
1267 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET
;
1269 if (!alloc_cpumask_var(&tmp
, GFP_ATOMIC
)) {
1274 cpumask_and(tmp
, dest
, cpu_online_mask
);
1275 nr_bank
= cpumask_to_vpset(¶ms
->int_target
.vp_set
, tmp
);
1276 free_cpumask_var(tmp
);
1284 * var-sized hypercall, var-size starts after vp_mask (thus
1285 * vp_set.format does not count, but vp_set.valid_bank_mask
1288 var_size
= 1 + nr_bank
;
1290 for_each_cpu_and(cpu
, dest
, cpu_online_mask
) {
1291 params
->int_target
.vp_mask
|=
1292 (1ULL << hv_cpu_number_to_vp_number(cpu
));
1296 res
= hv_do_hypercall(HVCALL_RETARGET_INTERRUPT
| (var_size
<< 17),
1300 spin_unlock_irqrestore(&hbus
->retarget_msi_interrupt_lock
, flags
);
1303 * During hibernation, when a CPU is offlined, the kernel tries
1304 * to move the interrupt to the remaining CPUs that haven't
1305 * been offlined yet. In this case, the below hv_do_hypercall()
1306 * always fails since the vmbus channel has been closed:
1307 * refer to cpu_disable_common() -> fixup_irqs() ->
1308 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
1310 * Suppress the error message for hibernation because the failure
1311 * during hibernation does not matter (at this time all the devices
1312 * have been frozen). Note: the correct affinity info is still updated
1313 * into the irqdata data structure in migrate_one_irq() ->
1314 * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM
1315 * resumes, hv_pci_restore_msi_state() is able to correctly restore
1316 * the interrupt with the correct affinity.
1318 if (!hv_result_success(res
) && hbus
->state
!= hv_pcibus_removing
)
1319 dev_err(&hbus
->hdev
->device
,
1320 "%s() failed: %#llx", __func__
, res
);
1322 pci_msi_unmask_irq(data
);
1325 struct compose_comp_ctxt
{
1326 struct hv_pci_compl comp_pkt
;
1327 struct tran_int_desc int_desc
;
1330 static void hv_pci_compose_compl(void *context
, struct pci_response
*resp
,
1331 int resp_packet_size
)
1333 struct compose_comp_ctxt
*comp_pkt
= context
;
1334 struct pci_create_int_response
*int_resp
=
1335 (struct pci_create_int_response
*)resp
;
1337 comp_pkt
->comp_pkt
.completion_status
= resp
->status
;
1338 comp_pkt
->int_desc
= int_resp
->int_desc
;
1339 complete(&comp_pkt
->comp_pkt
.host_event
);
1342 static u32
hv_compose_msi_req_v1(
1343 struct pci_create_interrupt
*int_pkt
, struct cpumask
*affinity
,
1344 u32 slot
, u8 vector
)
1346 int_pkt
->message_type
.type
= PCI_CREATE_INTERRUPT_MESSAGE
;
1347 int_pkt
->wslot
.slot
= slot
;
1348 int_pkt
->int_desc
.vector
= vector
;
1349 int_pkt
->int_desc
.vector_count
= 1;
1350 int_pkt
->int_desc
.delivery_mode
= APIC_DELIVERY_MODE_FIXED
;
1353 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1356 int_pkt
->int_desc
.cpu_mask
= CPU_AFFINITY_ALL
;
1358 return sizeof(*int_pkt
);
1362 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1363 * by subsequent retarget in hv_irq_unmask().
1365 static int hv_compose_msi_req_get_cpu(struct cpumask
*affinity
)
1367 return cpumask_first_and(affinity
, cpu_online_mask
);
1370 static u32
hv_compose_msi_req_v2(
1371 struct pci_create_interrupt2
*int_pkt
, struct cpumask
*affinity
,
1372 u32 slot
, u8 vector
)
1376 int_pkt
->message_type
.type
= PCI_CREATE_INTERRUPT_MESSAGE2
;
1377 int_pkt
->wslot
.slot
= slot
;
1378 int_pkt
->int_desc
.vector
= vector
;
1379 int_pkt
->int_desc
.vector_count
= 1;
1380 int_pkt
->int_desc
.delivery_mode
= APIC_DELIVERY_MODE_FIXED
;
1381 cpu
= hv_compose_msi_req_get_cpu(affinity
);
1382 int_pkt
->int_desc
.processor_array
[0] =
1383 hv_cpu_number_to_vp_number(cpu
);
1384 int_pkt
->int_desc
.processor_count
= 1;
1386 return sizeof(*int_pkt
);
1389 static u32
hv_compose_msi_req_v3(
1390 struct pci_create_interrupt3
*int_pkt
, struct cpumask
*affinity
,
1391 u32 slot
, u32 vector
)
1395 int_pkt
->message_type
.type
= PCI_CREATE_INTERRUPT_MESSAGE3
;
1396 int_pkt
->wslot
.slot
= slot
;
1397 int_pkt
->int_desc
.vector
= vector
;
1398 int_pkt
->int_desc
.reserved
= 0;
1399 int_pkt
->int_desc
.vector_count
= 1;
1400 int_pkt
->int_desc
.delivery_mode
= APIC_DELIVERY_MODE_FIXED
;
1401 cpu
= hv_compose_msi_req_get_cpu(affinity
);
1402 int_pkt
->int_desc
.processor_array
[0] =
1403 hv_cpu_number_to_vp_number(cpu
);
1404 int_pkt
->int_desc
.processor_count
= 1;
1406 return sizeof(*int_pkt
);
1410 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1411 * @data: Everything about this MSI
1412 * @msg: Buffer that is filled in by this function
1414 * This function unpacks the IRQ looking for target CPU set, IDT
1415 * vector and mode and sends a message to the parent partition
1416 * asking for a mapping for that tuple in this partition. The
1417 * response supplies a data value and address to which that data
1418 * should be written to trigger that interrupt.
1420 static void hv_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
1422 struct irq_cfg
*cfg
= irqd_cfg(data
);
1423 struct hv_pcibus_device
*hbus
;
1424 struct vmbus_channel
*channel
;
1425 struct hv_pci_dev
*hpdev
;
1426 struct pci_bus
*pbus
;
1427 struct pci_dev
*pdev
;
1428 struct cpumask
*dest
;
1429 struct compose_comp_ctxt comp
;
1430 struct tran_int_desc
*int_desc
;
1432 struct pci_packet pci_pkt
;
1434 struct pci_create_interrupt v1
;
1435 struct pci_create_interrupt2 v2
;
1436 struct pci_create_interrupt3 v3
;
1443 pdev
= msi_desc_to_pci_dev(irq_data_get_msi_desc(data
));
1444 dest
= irq_data_get_effective_affinity_mask(data
);
1446 hbus
= container_of(pbus
->sysdata
, struct hv_pcibus_device
, sysdata
);
1447 channel
= hbus
->hdev
->channel
;
1448 hpdev
= get_pcichild_wslot(hbus
, devfn_to_wslot(pdev
->devfn
));
1450 goto return_null_message
;
1452 /* Free any previous message that might have already been composed. */
1453 if (data
->chip_data
) {
1454 int_desc
= data
->chip_data
;
1455 data
->chip_data
= NULL
;
1456 hv_int_desc_free(hpdev
, int_desc
);
1459 int_desc
= kzalloc(sizeof(*int_desc
), GFP_ATOMIC
);
1461 goto drop_reference
;
1463 memset(&ctxt
, 0, sizeof(ctxt
));
1464 init_completion(&comp
.comp_pkt
.host_event
);
1465 ctxt
.pci_pkt
.completion_func
= hv_pci_compose_compl
;
1466 ctxt
.pci_pkt
.compl_ctxt
= &comp
;
1468 switch (hbus
->protocol_version
) {
1469 case PCI_PROTOCOL_VERSION_1_1
:
1470 size
= hv_compose_msi_req_v1(&ctxt
.int_pkts
.v1
,
1472 hpdev
->desc
.win_slot
.slot
,
1476 case PCI_PROTOCOL_VERSION_1_2
:
1477 case PCI_PROTOCOL_VERSION_1_3
:
1478 size
= hv_compose_msi_req_v2(&ctxt
.int_pkts
.v2
,
1480 hpdev
->desc
.win_slot
.slot
,
1484 case PCI_PROTOCOL_VERSION_1_4
:
1485 size
= hv_compose_msi_req_v3(&ctxt
.int_pkts
.v3
,
1487 hpdev
->desc
.win_slot
.slot
,
1492 /* As we only negotiate protocol versions known to this driver,
1493 * this path should never hit. However, this is it not a hot
1494 * path so we print a message to aid future updates.
1496 dev_err(&hbus
->hdev
->device
,
1497 "Unexpected vPCI protocol, update driver.");
1501 ret
= vmbus_sendpacket(hpdev
->hbus
->hdev
->channel
, &ctxt
.int_pkts
,
1502 size
, (unsigned long)&ctxt
.pci_pkt
,
1504 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
1506 dev_err(&hbus
->hdev
->device
,
1507 "Sending request for interrupt failed: 0x%x",
1508 comp
.comp_pkt
.completion_status
);
1513 * Prevents hv_pci_onchannelcallback() from running concurrently
1516 tasklet_disable_in_atomic(&channel
->callback_event
);
1519 * Since this function is called with IRQ locks held, can't
1520 * do normal wait for completion; instead poll.
1522 while (!try_wait_for_completion(&comp
.comp_pkt
.host_event
)) {
1523 unsigned long flags
;
1525 /* 0xFFFF means an invalid PCI VENDOR ID. */
1526 if (hv_pcifront_get_vendor_id(hpdev
) == 0xFFFF) {
1527 dev_err_once(&hbus
->hdev
->device
,
1528 "the device has gone\n");
1529 goto enable_tasklet
;
1533 * Make sure that the ring buffer data structure doesn't get
1534 * freed while we dereference the ring buffer pointer. Test
1535 * for the channel's onchannel_callback being NULL within a
1536 * sched_lock critical section. See also the inline comments
1537 * in vmbus_reset_channel_cb().
1539 spin_lock_irqsave(&channel
->sched_lock
, flags
);
1540 if (unlikely(channel
->onchannel_callback
== NULL
)) {
1541 spin_unlock_irqrestore(&channel
->sched_lock
, flags
);
1542 goto enable_tasklet
;
1544 hv_pci_onchannelcallback(hbus
);
1545 spin_unlock_irqrestore(&channel
->sched_lock
, flags
);
1547 if (hpdev
->state
== hv_pcichild_ejecting
) {
1548 dev_err_once(&hbus
->hdev
->device
,
1549 "the device is being ejected\n");
1550 goto enable_tasklet
;
1556 tasklet_enable(&channel
->callback_event
);
1558 if (comp
.comp_pkt
.completion_status
< 0) {
1559 dev_err(&hbus
->hdev
->device
,
1560 "Request for interrupt failed: 0x%x",
1561 comp
.comp_pkt
.completion_status
);
1566 * Record the assignment so that this can be unwound later. Using
1567 * irq_set_chip_data() here would be appropriate, but the lock it takes
1570 *int_desc
= comp
.int_desc
;
1571 data
->chip_data
= int_desc
;
1573 /* Pass up the result. */
1574 msg
->address_hi
= comp
.int_desc
.address
>> 32;
1575 msg
->address_lo
= comp
.int_desc
.address
& 0xffffffff;
1576 msg
->data
= comp
.int_desc
.data
;
1578 put_pcichild(hpdev
);
1582 tasklet_enable(&channel
->callback_event
);
1586 put_pcichild(hpdev
);
1587 return_null_message
:
1588 msg
->address_hi
= 0;
1589 msg
->address_lo
= 0;
1593 /* HW Interrupt Chip Descriptor */
1594 static struct irq_chip hv_msi_irq_chip
= {
1595 .name
= "Hyper-V PCIe MSI",
1596 .irq_compose_msi_msg
= hv_compose_msi_msg
,
1597 .irq_set_affinity
= hv_set_affinity
,
1598 .irq_ack
= irq_chip_ack_parent
,
1599 .irq_mask
= hv_irq_mask
,
1600 .irq_unmask
= hv_irq_unmask
,
1603 static struct msi_domain_ops hv_msi_ops
= {
1604 .msi_prepare
= pci_msi_prepare
,
1605 .msi_free
= hv_msi_free
,
1609 * hv_pcie_init_irq_domain() - Initialize IRQ domain
1610 * @hbus: The root PCI bus
1612 * This function creates an IRQ domain which will be used for
1613 * interrupts from devices that have been passed through. These
1614 * devices only support MSI and MSI-X, not line-based interrupts
1615 * or simulations of line-based interrupts through PCIe's
1616 * fabric-layer messages. Because interrupts are remapped, we
1617 * can support multi-message MSI here.
1619 * Return: '0' on success and error value on failure
1621 static int hv_pcie_init_irq_domain(struct hv_pcibus_device
*hbus
)
1623 hbus
->msi_info
.chip
= &hv_msi_irq_chip
;
1624 hbus
->msi_info
.ops
= &hv_msi_ops
;
1625 hbus
->msi_info
.flags
= (MSI_FLAG_USE_DEF_DOM_OPS
|
1626 MSI_FLAG_USE_DEF_CHIP_OPS
| MSI_FLAG_MULTI_PCI_MSI
|
1628 hbus
->msi_info
.handler
= handle_edge_irq
;
1629 hbus
->msi_info
.handler_name
= "edge";
1630 hbus
->msi_info
.data
= hbus
;
1631 hbus
->irq_domain
= pci_msi_create_irq_domain(hbus
->fwnode
,
1634 if (!hbus
->irq_domain
) {
1635 dev_err(&hbus
->hdev
->device
,
1636 "Failed to build an MSI IRQ domain\n");
1640 dev_set_msi_domain(&hbus
->bridge
->dev
, hbus
->irq_domain
);
1646 * get_bar_size() - Get the address space consumed by a BAR
1647 * @bar_val: Value that a BAR returned after -1 was written
1650 * This function returns the size of the BAR, rounded up to 1
1651 * page. It has to be rounded up because the hypervisor's page
1652 * table entry that maps the BAR into the VM can't specify an
1653 * offset within a page. The invariant is that the hypervisor
1654 * must place any BARs of smaller than page length at the
1655 * beginning of a page.
1657 * Return: Size in bytes of the consumed MMIO space.
1659 static u64
get_bar_size(u64 bar_val
)
1661 return round_up((1 + ~(bar_val
& PCI_BASE_ADDRESS_MEM_MASK
)),
1666 * survey_child_resources() - Total all MMIO requirements
1667 * @hbus: Root PCI bus, as understood by this driver
1669 static void survey_child_resources(struct hv_pcibus_device
*hbus
)
1671 struct hv_pci_dev
*hpdev
;
1672 resource_size_t bar_size
= 0;
1673 unsigned long flags
;
1674 struct completion
*event
;
1678 /* If nobody is waiting on the answer, don't compute it. */
1679 event
= xchg(&hbus
->survey_event
, NULL
);
1683 /* If the answer has already been computed, go with it. */
1684 if (hbus
->low_mmio_space
|| hbus
->high_mmio_space
) {
1689 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1692 * Due to an interesting quirk of the PCI spec, all memory regions
1693 * for a child device are a power of 2 in size and aligned in memory,
1694 * so it's sufficient to just add them up without tracking alignment.
1696 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
1697 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
1698 if (hpdev
->probed_bar
[i
] & PCI_BASE_ADDRESS_SPACE_IO
)
1699 dev_err(&hbus
->hdev
->device
,
1700 "There's an I/O BAR in this list!\n");
1702 if (hpdev
->probed_bar
[i
] != 0) {
1704 * A probed BAR has all the upper bits set that
1708 bar_val
= hpdev
->probed_bar
[i
];
1709 if (bar_val
& PCI_BASE_ADDRESS_MEM_TYPE_64
)
1711 ((u64
)hpdev
->probed_bar
[++i
] << 32);
1713 bar_val
|= 0xffffffff00000000ULL
;
1715 bar_size
= get_bar_size(bar_val
);
1717 if (bar_val
& PCI_BASE_ADDRESS_MEM_TYPE_64
)
1718 hbus
->high_mmio_space
+= bar_size
;
1720 hbus
->low_mmio_space
+= bar_size
;
1725 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1730 * prepopulate_bars() - Fill in BARs with defaults
1731 * @hbus: Root PCI bus, as understood by this driver
1733 * The core PCI driver code seems much, much happier if the BARs
1734 * for a device have values upon first scan. So fill them in.
1735 * The algorithm below works down from large sizes to small,
1736 * attempting to pack the assignments optimally. The assumption,
1737 * enforced in other parts of the code, is that the beginning of
1738 * the memory-mapped I/O space will be aligned on the largest
1741 static void prepopulate_bars(struct hv_pcibus_device
*hbus
)
1743 resource_size_t high_size
= 0;
1744 resource_size_t low_size
= 0;
1745 resource_size_t high_base
= 0;
1746 resource_size_t low_base
= 0;
1747 resource_size_t bar_size
;
1748 struct hv_pci_dev
*hpdev
;
1749 unsigned long flags
;
1755 if (hbus
->low_mmio_space
) {
1756 low_size
= 1ULL << (63 - __builtin_clzll(hbus
->low_mmio_space
));
1757 low_base
= hbus
->low_mmio_res
->start
;
1760 if (hbus
->high_mmio_space
) {
1762 (63 - __builtin_clzll(hbus
->high_mmio_space
));
1763 high_base
= hbus
->high_mmio_res
->start
;
1766 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
1769 * Clear the memory enable bit, in case it's already set. This occurs
1770 * in the suspend path of hibernation, where the device is suspended,
1771 * resumed and suspended again: see hibernation_snapshot() and
1772 * hibernation_platform_enter().
1774 * If the memory enable bit is already set, Hyper-V silently ignores
1775 * the below BAR updates, and the related PCI device driver can not
1776 * work, because reading from the device register(s) always returns
1779 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
1780 _hv_pcifront_read_config(hpdev
, PCI_COMMAND
, 2, &command
);
1781 command
&= ~PCI_COMMAND_MEMORY
;
1782 _hv_pcifront_write_config(hpdev
, PCI_COMMAND
, 2, command
);
1785 /* Pick addresses for the BARs. */
1787 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
1788 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
1789 bar_val
= hpdev
->probed_bar
[i
];
1792 high
= bar_val
& PCI_BASE_ADDRESS_MEM_TYPE_64
;
1795 ((u64
)hpdev
->probed_bar
[i
+ 1]
1798 bar_val
|= 0xffffffffULL
<< 32;
1800 bar_size
= get_bar_size(bar_val
);
1802 if (high_size
!= bar_size
) {
1806 _hv_pcifront_write_config(hpdev
,
1807 PCI_BASE_ADDRESS_0
+ (4 * i
),
1809 (u32
)(high_base
& 0xffffff00));
1811 _hv_pcifront_write_config(hpdev
,
1812 PCI_BASE_ADDRESS_0
+ (4 * i
),
1813 4, (u32
)(high_base
>> 32));
1814 high_base
+= bar_size
;
1816 if (low_size
!= bar_size
)
1818 _hv_pcifront_write_config(hpdev
,
1819 PCI_BASE_ADDRESS_0
+ (4 * i
),
1821 (u32
)(low_base
& 0xffffff00));
1822 low_base
+= bar_size
;
1825 if (high_size
<= 1 && low_size
<= 1) {
1826 /* Set the memory enable bit. */
1827 _hv_pcifront_read_config(hpdev
, PCI_COMMAND
, 2,
1829 command
|= PCI_COMMAND_MEMORY
;
1830 _hv_pcifront_write_config(hpdev
, PCI_COMMAND
, 2,
1838 } while (high_size
|| low_size
);
1840 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
1844 * Assign entries in sysfs pci slot directory.
1846 * Note that this function does not need to lock the children list
1847 * because it is called from pci_devices_present_work which
1848 * is serialized with hv_eject_device_work because they are on the
1849 * same ordered workqueue. Therefore hbus->children list will not change
1850 * even when pci_create_slot sleeps.
1852 static void hv_pci_assign_slots(struct hv_pcibus_device
*hbus
)
1854 struct hv_pci_dev
*hpdev
;
1855 char name
[SLOT_NAME_SIZE
];
1858 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
1859 if (hpdev
->pci_slot
)
1862 slot_nr
= PCI_SLOT(wslot_to_devfn(hpdev
->desc
.win_slot
.slot
));
1863 snprintf(name
, SLOT_NAME_SIZE
, "%u", hpdev
->desc
.ser
);
1864 hpdev
->pci_slot
= pci_create_slot(hbus
->bridge
->bus
, slot_nr
,
1866 if (IS_ERR(hpdev
->pci_slot
)) {
1867 pr_warn("pci_create slot %s failed\n", name
);
1868 hpdev
->pci_slot
= NULL
;
1874 * Remove entries in sysfs pci slot directory.
1876 static void hv_pci_remove_slots(struct hv_pcibus_device
*hbus
)
1878 struct hv_pci_dev
*hpdev
;
1880 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
1881 if (!hpdev
->pci_slot
)
1883 pci_destroy_slot(hpdev
->pci_slot
);
1884 hpdev
->pci_slot
= NULL
;
1889 * Set NUMA node for the devices on the bus
1891 static void hv_pci_assign_numa_node(struct hv_pcibus_device
*hbus
)
1893 struct pci_dev
*dev
;
1894 struct pci_bus
*bus
= hbus
->bridge
->bus
;
1895 struct hv_pci_dev
*hv_dev
;
1897 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
1898 hv_dev
= get_pcichild_wslot(hbus
, devfn_to_wslot(dev
->devfn
));
1902 if (hv_dev
->desc
.flags
& HV_PCI_DEVICE_FLAG_NUMA_AFFINITY
&&
1903 hv_dev
->desc
.virtual_numa_node
< num_possible_nodes())
1905 * The kernel may boot with some NUMA nodes offline
1906 * (e.g. in a KDUMP kernel) or with NUMA disabled via
1907 * "numa=off". In those cases, adjust the host provided
1908 * NUMA node to a valid NUMA node used by the kernel.
1910 set_dev_node(&dev
->dev
,
1911 numa_map_to_online_node(
1912 hv_dev
->desc
.virtual_numa_node
));
1914 put_pcichild(hv_dev
);
1919 * create_root_hv_pci_bus() - Expose a new root PCI bus
1920 * @hbus: Root PCI bus, as understood by this driver
1922 * Return: 0 on success, -errno on failure
1924 static int create_root_hv_pci_bus(struct hv_pcibus_device
*hbus
)
1927 struct pci_host_bridge
*bridge
= hbus
->bridge
;
1929 bridge
->dev
.parent
= &hbus
->hdev
->device
;
1930 bridge
->sysdata
= &hbus
->sysdata
;
1931 bridge
->ops
= &hv_pcifront_ops
;
1933 error
= pci_scan_root_bus_bridge(bridge
);
1937 pci_lock_rescan_remove();
1938 hv_pci_assign_numa_node(hbus
);
1939 pci_bus_assign_resources(bridge
->bus
);
1940 hv_pci_assign_slots(hbus
);
1941 pci_bus_add_devices(bridge
->bus
);
1942 pci_unlock_rescan_remove();
1943 hbus
->state
= hv_pcibus_installed
;
1947 struct q_res_req_compl
{
1948 struct completion host_event
;
1949 struct hv_pci_dev
*hpdev
;
1953 * q_resource_requirements() - Query Resource Requirements
1954 * @context: The completion context.
1955 * @resp: The response that came from the host.
1956 * @resp_packet_size: The size in bytes of resp.
1958 * This function is invoked on completion of a Query Resource
1959 * Requirements packet.
1961 static void q_resource_requirements(void *context
, struct pci_response
*resp
,
1962 int resp_packet_size
)
1964 struct q_res_req_compl
*completion
= context
;
1965 struct pci_q_res_req_response
*q_res_req
=
1966 (struct pci_q_res_req_response
*)resp
;
1969 if (resp
->status
< 0) {
1970 dev_err(&completion
->hpdev
->hbus
->hdev
->device
,
1971 "query resource requirements failed: %x\n",
1974 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
1975 completion
->hpdev
->probed_bar
[i
] =
1976 q_res_req
->probed_bar
[i
];
1980 complete(&completion
->host_event
);
1984 * new_pcichild_device() - Create a new child device
1985 * @hbus: The internal struct tracking this root PCI bus.
1986 * @desc: The information supplied so far from the host
1989 * This function creates the tracking structure for a new child
1990 * device and kicks off the process of figuring out what it is.
1992 * Return: Pointer to the new tracking struct
1994 static struct hv_pci_dev
*new_pcichild_device(struct hv_pcibus_device
*hbus
,
1995 struct hv_pcidev_description
*desc
)
1997 struct hv_pci_dev
*hpdev
;
1998 struct pci_child_message
*res_req
;
1999 struct q_res_req_compl comp_pkt
;
2001 struct pci_packet init_packet
;
2002 u8 buffer
[sizeof(struct pci_child_message
)];
2004 unsigned long flags
;
2007 hpdev
= kzalloc(sizeof(*hpdev
), GFP_KERNEL
);
2013 memset(&pkt
, 0, sizeof(pkt
));
2014 init_completion(&comp_pkt
.host_event
);
2015 comp_pkt
.hpdev
= hpdev
;
2016 pkt
.init_packet
.compl_ctxt
= &comp_pkt
;
2017 pkt
.init_packet
.completion_func
= q_resource_requirements
;
2018 res_req
= (struct pci_child_message
*)&pkt
.init_packet
.message
;
2019 res_req
->message_type
.type
= PCI_QUERY_RESOURCE_REQUIREMENTS
;
2020 res_req
->wslot
.slot
= desc
->win_slot
.slot
;
2022 ret
= vmbus_sendpacket(hbus
->hdev
->channel
, res_req
,
2023 sizeof(struct pci_child_message
),
2024 (unsigned long)&pkt
.init_packet
,
2026 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2030 if (wait_for_response(hbus
->hdev
, &comp_pkt
.host_event
))
2033 hpdev
->desc
= *desc
;
2034 refcount_set(&hpdev
->refs
, 1);
2035 get_pcichild(hpdev
);
2036 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2038 list_add_tail(&hpdev
->list_entry
, &hbus
->children
);
2039 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2048 * get_pcichild_wslot() - Find device from slot
2049 * @hbus: Root PCI bus, as understood by this driver
2050 * @wslot: Location on the bus
2052 * This function looks up a PCI device and returns the internal
2053 * representation of it. It acquires a reference on it, so that
2054 * the device won't be deleted while somebody is using it. The
2055 * caller is responsible for calling put_pcichild() to release
2058 * Return: Internal representation of a PCI device
2060 static struct hv_pci_dev
*get_pcichild_wslot(struct hv_pcibus_device
*hbus
,
2063 unsigned long flags
;
2064 struct hv_pci_dev
*iter
, *hpdev
= NULL
;
2066 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2067 list_for_each_entry(iter
, &hbus
->children
, list_entry
) {
2068 if (iter
->desc
.win_slot
.slot
== wslot
) {
2070 get_pcichild(hpdev
);
2074 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2080 * pci_devices_present_work() - Handle new list of child devices
2081 * @work: Work struct embedded in struct hv_dr_work
2083 * "Bus Relations" is the Windows term for "children of this
2084 * bus." The terminology is preserved here for people trying to
2085 * debug the interaction between Hyper-V and Linux. This
2086 * function is called when the parent partition reports a list
2087 * of functions that should be observed under this PCI Express
2090 * This function updates the list, and must tolerate being
2091 * called multiple times with the same information. The typical
2092 * number of child devices is one, with very atypical cases
2093 * involving three or four, so the algorithms used here can be
2094 * simple and inefficient.
2096 * It must also treat the omission of a previously observed device as
2097 * notification that the device no longer exists.
2099 * Note that this function is serialized with hv_eject_device_work(),
2100 * because both are pushed to the ordered workqueue hbus->wq.
2102 static void pci_devices_present_work(struct work_struct
*work
)
2106 struct hv_pcidev_description
*new_desc
;
2107 struct hv_pci_dev
*hpdev
;
2108 struct hv_pcibus_device
*hbus
;
2109 struct list_head removed
;
2110 struct hv_dr_work
*dr_wrk
;
2111 struct hv_dr_state
*dr
= NULL
;
2112 unsigned long flags
;
2114 dr_wrk
= container_of(work
, struct hv_dr_work
, wrk
);
2118 INIT_LIST_HEAD(&removed
);
2120 /* Pull this off the queue and process it if it was the last one. */
2121 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2122 while (!list_empty(&hbus
->dr_list
)) {
2123 dr
= list_first_entry(&hbus
->dr_list
, struct hv_dr_state
,
2125 list_del(&dr
->list_entry
);
2127 /* Throw this away if the list still has stuff in it. */
2128 if (!list_empty(&hbus
->dr_list
)) {
2133 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2138 /* First, mark all existing children as reported missing. */
2139 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2140 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
2141 hpdev
->reported_missing
= true;
2143 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2145 /* Next, add back any reported devices. */
2146 for (child_no
= 0; child_no
< dr
->device_count
; child_no
++) {
2148 new_desc
= &dr
->func
[child_no
];
2150 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2151 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
2152 if ((hpdev
->desc
.win_slot
.slot
== new_desc
->win_slot
.slot
) &&
2153 (hpdev
->desc
.v_id
== new_desc
->v_id
) &&
2154 (hpdev
->desc
.d_id
== new_desc
->d_id
) &&
2155 (hpdev
->desc
.ser
== new_desc
->ser
)) {
2156 hpdev
->reported_missing
= false;
2160 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2163 hpdev
= new_pcichild_device(hbus
, new_desc
);
2165 dev_err(&hbus
->hdev
->device
,
2166 "couldn't record a child device.\n");
2170 /* Move missing children to a list on the stack. */
2171 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2174 list_for_each_entry(hpdev
, &hbus
->children
, list_entry
) {
2175 if (hpdev
->reported_missing
) {
2177 put_pcichild(hpdev
);
2178 list_move_tail(&hpdev
->list_entry
, &removed
);
2183 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2185 /* Delete everything that should no longer exist. */
2186 while (!list_empty(&removed
)) {
2187 hpdev
= list_first_entry(&removed
, struct hv_pci_dev
,
2189 list_del(&hpdev
->list_entry
);
2191 if (hpdev
->pci_slot
)
2192 pci_destroy_slot(hpdev
->pci_slot
);
2194 put_pcichild(hpdev
);
2197 switch (hbus
->state
) {
2198 case hv_pcibus_installed
:
2200 * Tell the core to rescan bus
2201 * because there may have been changes.
2203 pci_lock_rescan_remove();
2204 pci_scan_child_bus(hbus
->bridge
->bus
);
2205 hv_pci_assign_numa_node(hbus
);
2206 hv_pci_assign_slots(hbus
);
2207 pci_unlock_rescan_remove();
2210 case hv_pcibus_init
:
2211 case hv_pcibus_probed
:
2212 survey_child_resources(hbus
);
2223 * hv_pci_start_relations_work() - Queue work to start device discovery
2224 * @hbus: Root PCI bus, as understood by this driver
2225 * @dr: The list of children returned from host
2227 * Return: 0 on success, -errno on failure
2229 static int hv_pci_start_relations_work(struct hv_pcibus_device
*hbus
,
2230 struct hv_dr_state
*dr
)
2232 struct hv_dr_work
*dr_wrk
;
2233 unsigned long flags
;
2236 if (hbus
->state
== hv_pcibus_removing
) {
2237 dev_info(&hbus
->hdev
->device
,
2238 "PCI VMBus BUS_RELATIONS: ignored\n");
2242 dr_wrk
= kzalloc(sizeof(*dr_wrk
), GFP_NOWAIT
);
2246 INIT_WORK(&dr_wrk
->wrk
, pci_devices_present_work
);
2249 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2251 * If pending_dr is true, we have already queued a work,
2252 * which will see the new dr. Otherwise, we need to
2255 pending_dr
= !list_empty(&hbus
->dr_list
);
2256 list_add_tail(&dr
->list_entry
, &hbus
->dr_list
);
2257 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2262 queue_work(hbus
->wq
, &dr_wrk
->wrk
);
2268 * hv_pci_devices_present() - Handle list of new children
2269 * @hbus: Root PCI bus, as understood by this driver
2270 * @relations: Packet from host listing children
2272 * Process a new list of devices on the bus. The list of devices is
2273 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2274 * whenever a new list of devices for this bus appears.
2276 static void hv_pci_devices_present(struct hv_pcibus_device
*hbus
,
2277 struct pci_bus_relations
*relations
)
2279 struct hv_dr_state
*dr
;
2282 dr
= kzalloc(struct_size(dr
, func
, relations
->device_count
),
2287 dr
->device_count
= relations
->device_count
;
2288 for (i
= 0; i
< dr
->device_count
; i
++) {
2289 dr
->func
[i
].v_id
= relations
->func
[i
].v_id
;
2290 dr
->func
[i
].d_id
= relations
->func
[i
].d_id
;
2291 dr
->func
[i
].rev
= relations
->func
[i
].rev
;
2292 dr
->func
[i
].prog_intf
= relations
->func
[i
].prog_intf
;
2293 dr
->func
[i
].subclass
= relations
->func
[i
].subclass
;
2294 dr
->func
[i
].base_class
= relations
->func
[i
].base_class
;
2295 dr
->func
[i
].subsystem_id
= relations
->func
[i
].subsystem_id
;
2296 dr
->func
[i
].win_slot
= relations
->func
[i
].win_slot
;
2297 dr
->func
[i
].ser
= relations
->func
[i
].ser
;
2300 if (hv_pci_start_relations_work(hbus
, dr
))
2305 * hv_pci_devices_present2() - Handle list of new children
2306 * @hbus: Root PCI bus, as understood by this driver
2307 * @relations: Packet from host listing children
2309 * This function is the v2 version of hv_pci_devices_present()
2311 static void hv_pci_devices_present2(struct hv_pcibus_device
*hbus
,
2312 struct pci_bus_relations2
*relations
)
2314 struct hv_dr_state
*dr
;
2317 dr
= kzalloc(struct_size(dr
, func
, relations
->device_count
),
2322 dr
->device_count
= relations
->device_count
;
2323 for (i
= 0; i
< dr
->device_count
; i
++) {
2324 dr
->func
[i
].v_id
= relations
->func
[i
].v_id
;
2325 dr
->func
[i
].d_id
= relations
->func
[i
].d_id
;
2326 dr
->func
[i
].rev
= relations
->func
[i
].rev
;
2327 dr
->func
[i
].prog_intf
= relations
->func
[i
].prog_intf
;
2328 dr
->func
[i
].subclass
= relations
->func
[i
].subclass
;
2329 dr
->func
[i
].base_class
= relations
->func
[i
].base_class
;
2330 dr
->func
[i
].subsystem_id
= relations
->func
[i
].subsystem_id
;
2331 dr
->func
[i
].win_slot
= relations
->func
[i
].win_slot
;
2332 dr
->func
[i
].ser
= relations
->func
[i
].ser
;
2333 dr
->func
[i
].flags
= relations
->func
[i
].flags
;
2334 dr
->func
[i
].virtual_numa_node
=
2335 relations
->func
[i
].virtual_numa_node
;
2338 if (hv_pci_start_relations_work(hbus
, dr
))
2343 * hv_eject_device_work() - Asynchronously handles ejection
2344 * @work: Work struct embedded in internal device struct
2346 * This function handles ejecting a device. Windows will
2347 * attempt to gracefully eject a device, waiting 60 seconds to
2348 * hear back from the guest OS that this completed successfully.
2349 * If this timer expires, the device will be forcibly removed.
2351 static void hv_eject_device_work(struct work_struct
*work
)
2353 struct pci_eject_response
*ejct_pkt
;
2354 struct hv_pcibus_device
*hbus
;
2355 struct hv_pci_dev
*hpdev
;
2356 struct pci_dev
*pdev
;
2357 unsigned long flags
;
2360 struct pci_packet pkt
;
2361 u8 buffer
[sizeof(struct pci_eject_response
)];
2364 hpdev
= container_of(work
, struct hv_pci_dev
, wrk
);
2367 WARN_ON(hpdev
->state
!= hv_pcichild_ejecting
);
2370 * Ejection can come before or after the PCI bus has been set up, so
2371 * attempt to find it and tear down the bus state, if it exists. This
2372 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2373 * because hbus->bridge->bus may not exist yet.
2375 wslot
= wslot_to_devfn(hpdev
->desc
.win_slot
.slot
);
2376 pdev
= pci_get_domain_bus_and_slot(hbus
->bridge
->domain_nr
, 0, wslot
);
2378 pci_lock_rescan_remove();
2379 pci_stop_and_remove_bus_device(pdev
);
2381 pci_unlock_rescan_remove();
2384 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
2385 list_del(&hpdev
->list_entry
);
2386 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
2388 if (hpdev
->pci_slot
)
2389 pci_destroy_slot(hpdev
->pci_slot
);
2391 memset(&ctxt
, 0, sizeof(ctxt
));
2392 ejct_pkt
= (struct pci_eject_response
*)&ctxt
.pkt
.message
;
2393 ejct_pkt
->message_type
.type
= PCI_EJECTION_COMPLETE
;
2394 ejct_pkt
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
2395 vmbus_sendpacket(hbus
->hdev
->channel
, ejct_pkt
,
2396 sizeof(*ejct_pkt
), (unsigned long)&ctxt
.pkt
,
2397 VM_PKT_DATA_INBAND
, 0);
2399 /* For the get_pcichild() in hv_pci_eject_device() */
2400 put_pcichild(hpdev
);
2401 /* For the two refs got in new_pcichild_device() */
2402 put_pcichild(hpdev
);
2403 put_pcichild(hpdev
);
2404 /* hpdev has been freed. Do not use it any more. */
2408 * hv_pci_eject_device() - Handles device ejection
2409 * @hpdev: Internal device tracking struct
2411 * This function is invoked when an ejection packet arrives. It
2412 * just schedules work so that we don't re-enter the packet
2413 * delivery code handling the ejection.
2415 static void hv_pci_eject_device(struct hv_pci_dev
*hpdev
)
2417 struct hv_pcibus_device
*hbus
= hpdev
->hbus
;
2418 struct hv_device
*hdev
= hbus
->hdev
;
2420 if (hbus
->state
== hv_pcibus_removing
) {
2421 dev_info(&hdev
->device
, "PCI VMBus EJECT: ignored\n");
2425 hpdev
->state
= hv_pcichild_ejecting
;
2426 get_pcichild(hpdev
);
2427 INIT_WORK(&hpdev
->wrk
, hv_eject_device_work
);
2428 queue_work(hbus
->wq
, &hpdev
->wrk
);
2432 * hv_pci_onchannelcallback() - Handles incoming packets
2433 * @context: Internal bus tracking struct
2435 * This function is invoked whenever the host sends a packet to
2436 * this channel (which is private to this root PCI bus).
2438 static void hv_pci_onchannelcallback(void *context
)
2440 const int packet_size
= 0x100;
2442 struct hv_pcibus_device
*hbus
= context
;
2445 struct vmpacket_descriptor
*desc
;
2446 unsigned char *buffer
;
2447 int bufferlen
= packet_size
;
2448 struct pci_packet
*comp_packet
;
2449 struct pci_response
*response
;
2450 struct pci_incoming_message
*new_message
;
2451 struct pci_bus_relations
*bus_rel
;
2452 struct pci_bus_relations2
*bus_rel2
;
2453 struct pci_dev_inval_block
*inval
;
2454 struct pci_dev_incoming
*dev_message
;
2455 struct hv_pci_dev
*hpdev
;
2457 buffer
= kmalloc(bufferlen
, GFP_ATOMIC
);
2462 ret
= vmbus_recvpacket_raw(hbus
->hdev
->channel
, buffer
,
2463 bufferlen
, &bytes_recvd
, &req_id
);
2465 if (ret
== -ENOBUFS
) {
2467 /* Handle large packet */
2468 bufferlen
= bytes_recvd
;
2469 buffer
= kmalloc(bytes_recvd
, GFP_ATOMIC
);
2475 /* Zero length indicates there are no more packets. */
2476 if (ret
|| !bytes_recvd
)
2480 * All incoming packets must be at least as large as a
2483 if (bytes_recvd
<= sizeof(struct pci_response
))
2485 desc
= (struct vmpacket_descriptor
*)buffer
;
2487 switch (desc
->type
) {
2491 * The host is trusted, and thus it's safe to interpret
2492 * this transaction ID as a pointer.
2494 comp_packet
= (struct pci_packet
*)req_id
;
2495 response
= (struct pci_response
*)buffer
;
2496 comp_packet
->completion_func(comp_packet
->compl_ctxt
,
2501 case VM_PKT_DATA_INBAND
:
2503 new_message
= (struct pci_incoming_message
*)buffer
;
2504 switch (new_message
->message_type
.type
) {
2505 case PCI_BUS_RELATIONS
:
2507 bus_rel
= (struct pci_bus_relations
*)buffer
;
2509 struct_size(bus_rel
, func
,
2510 bus_rel
->device_count
)) {
2511 dev_err(&hbus
->hdev
->device
,
2512 "bus relations too small\n");
2516 hv_pci_devices_present(hbus
, bus_rel
);
2519 case PCI_BUS_RELATIONS2
:
2521 bus_rel2
= (struct pci_bus_relations2
*)buffer
;
2523 struct_size(bus_rel2
, func
,
2524 bus_rel2
->device_count
)) {
2525 dev_err(&hbus
->hdev
->device
,
2526 "bus relations v2 too small\n");
2530 hv_pci_devices_present2(hbus
, bus_rel2
);
2535 dev_message
= (struct pci_dev_incoming
*)buffer
;
2536 hpdev
= get_pcichild_wslot(hbus
,
2537 dev_message
->wslot
.slot
);
2539 hv_pci_eject_device(hpdev
);
2540 put_pcichild(hpdev
);
2544 case PCI_INVALIDATE_BLOCK
:
2546 inval
= (struct pci_dev_inval_block
*)buffer
;
2547 hpdev
= get_pcichild_wslot(hbus
,
2550 if (hpdev
->block_invalidate
) {
2551 hpdev
->block_invalidate(
2552 hpdev
->invalidate_context
,
2555 put_pcichild(hpdev
);
2560 dev_warn(&hbus
->hdev
->device
,
2561 "Unimplemented protocol message %x\n",
2562 new_message
->message_type
.type
);
2568 dev_err(&hbus
->hdev
->device
,
2569 "unhandled packet type %d, tid %llx len %d\n",
2570 desc
->type
, req_id
, bytes_recvd
);
2579 * hv_pci_protocol_negotiation() - Set up protocol
2580 * @hdev: VMBus's tracking struct for this root PCI bus.
2581 * @version: Array of supported channel protocol versions in
2582 * the order of probing - highest go first.
2583 * @num_version: Number of elements in the version array.
2585 * This driver is intended to support running on Windows 10
2586 * (server) and later versions. It will not run on earlier
2587 * versions, as they assume that many of the operations which
2588 * Linux needs accomplished with a spinlock held were done via
2589 * asynchronous messaging via VMBus. Windows 10 increases the
2590 * surface area of PCI emulation so that these actions can take
2591 * place by suspending a virtual processor for their duration.
2593 * This function negotiates the channel protocol version,
2594 * failing if the host doesn't support the necessary protocol
2597 static int hv_pci_protocol_negotiation(struct hv_device
*hdev
,
2598 enum pci_protocol_version_t version
[],
2601 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2602 struct pci_version_request
*version_req
;
2603 struct hv_pci_compl comp_pkt
;
2604 struct pci_packet
*pkt
;
2609 * Initiate the handshake with the host and negotiate
2610 * a version that the host can support. We start with the
2611 * highest version number and go down if the host cannot
2614 pkt
= kzalloc(sizeof(*pkt
) + sizeof(*version_req
), GFP_KERNEL
);
2618 init_completion(&comp_pkt
.host_event
);
2619 pkt
->completion_func
= hv_pci_generic_compl
;
2620 pkt
->compl_ctxt
= &comp_pkt
;
2621 version_req
= (struct pci_version_request
*)&pkt
->message
;
2622 version_req
->message_type
.type
= PCI_QUERY_PROTOCOL_VERSION
;
2624 for (i
= 0; i
< num_version
; i
++) {
2625 version_req
->protocol_version
= version
[i
];
2626 ret
= vmbus_sendpacket(hdev
->channel
, version_req
,
2627 sizeof(struct pci_version_request
),
2628 (unsigned long)pkt
, VM_PKT_DATA_INBAND
,
2629 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2631 ret
= wait_for_response(hdev
, &comp_pkt
.host_event
);
2634 dev_err(&hdev
->device
,
2635 "PCI Pass-through VSP failed to request version: %d",
2640 if (comp_pkt
.completion_status
>= 0) {
2641 hbus
->protocol_version
= version
[i
];
2642 dev_info(&hdev
->device
,
2643 "PCI VMBus probing: Using version %#x\n",
2644 hbus
->protocol_version
);
2648 if (comp_pkt
.completion_status
!= STATUS_REVISION_MISMATCH
) {
2649 dev_err(&hdev
->device
,
2650 "PCI Pass-through VSP failed version request: %#x",
2651 comp_pkt
.completion_status
);
2656 reinit_completion(&comp_pkt
.host_event
);
2659 dev_err(&hdev
->device
,
2660 "PCI pass-through VSP failed to find supported version");
2669 * hv_pci_free_bridge_windows() - Release memory regions for the
2671 * @hbus: Root PCI bus, as understood by this driver
2673 static void hv_pci_free_bridge_windows(struct hv_pcibus_device
*hbus
)
2676 * Set the resources back to the way they looked when they
2677 * were allocated by setting IORESOURCE_BUSY again.
2680 if (hbus
->low_mmio_space
&& hbus
->low_mmio_res
) {
2681 hbus
->low_mmio_res
->flags
|= IORESOURCE_BUSY
;
2682 vmbus_free_mmio(hbus
->low_mmio_res
->start
,
2683 resource_size(hbus
->low_mmio_res
));
2686 if (hbus
->high_mmio_space
&& hbus
->high_mmio_res
) {
2687 hbus
->high_mmio_res
->flags
|= IORESOURCE_BUSY
;
2688 vmbus_free_mmio(hbus
->high_mmio_res
->start
,
2689 resource_size(hbus
->high_mmio_res
));
2694 * hv_pci_allocate_bridge_windows() - Allocate memory regions
2696 * @hbus: Root PCI bus, as understood by this driver
2698 * This function calls vmbus_allocate_mmio(), which is itself a
2699 * bit of a compromise. Ideally, we might change the pnp layer
2700 * in the kernel such that it comprehends either PCI devices
2701 * which are "grandchildren of ACPI," with some intermediate bus
2702 * node (in this case, VMBus) or change it such that it
2703 * understands VMBus. The pnp layer, however, has been declared
2704 * deprecated, and not subject to change.
2706 * The workaround, implemented here, is to ask VMBus to allocate
2707 * MMIO space for this bus. VMBus itself knows which ranges are
2708 * appropriate by looking at its own ACPI objects. Then, after
2709 * these ranges are claimed, they're modified to look like they
2710 * would have looked if the ACPI and pnp code had allocated
2711 * bridge windows. These descriptors have to exist in this form
2712 * in order to satisfy the code which will get invoked when the
2713 * endpoint PCI function driver calls request_mem_region() or
2714 * request_mem_region_exclusive().
2716 * Return: 0 on success, -errno on failure
2718 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device
*hbus
)
2720 resource_size_t align
;
2723 if (hbus
->low_mmio_space
) {
2724 align
= 1ULL << (63 - __builtin_clzll(hbus
->low_mmio_space
));
2725 ret
= vmbus_allocate_mmio(&hbus
->low_mmio_res
, hbus
->hdev
, 0,
2726 (u64
)(u32
)0xffffffff,
2727 hbus
->low_mmio_space
,
2730 dev_err(&hbus
->hdev
->device
,
2731 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
2732 hbus
->low_mmio_space
);
2736 /* Modify this resource to become a bridge window. */
2737 hbus
->low_mmio_res
->flags
|= IORESOURCE_WINDOW
;
2738 hbus
->low_mmio_res
->flags
&= ~IORESOURCE_BUSY
;
2739 pci_add_resource(&hbus
->bridge
->windows
, hbus
->low_mmio_res
);
2742 if (hbus
->high_mmio_space
) {
2743 align
= 1ULL << (63 - __builtin_clzll(hbus
->high_mmio_space
));
2744 ret
= vmbus_allocate_mmio(&hbus
->high_mmio_res
, hbus
->hdev
,
2746 hbus
->high_mmio_space
, align
,
2749 dev_err(&hbus
->hdev
->device
,
2750 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
2751 hbus
->high_mmio_space
);
2752 goto release_low_mmio
;
2755 /* Modify this resource to become a bridge window. */
2756 hbus
->high_mmio_res
->flags
|= IORESOURCE_WINDOW
;
2757 hbus
->high_mmio_res
->flags
&= ~IORESOURCE_BUSY
;
2758 pci_add_resource(&hbus
->bridge
->windows
, hbus
->high_mmio_res
);
2764 if (hbus
->low_mmio_res
) {
2765 vmbus_free_mmio(hbus
->low_mmio_res
->start
,
2766 resource_size(hbus
->low_mmio_res
));
2773 * hv_allocate_config_window() - Find MMIO space for PCI Config
2774 * @hbus: Root PCI bus, as understood by this driver
2776 * This function claims memory-mapped I/O space for accessing
2777 * configuration space for the functions on this bus.
2779 * Return: 0 on success, -errno on failure
2781 static int hv_allocate_config_window(struct hv_pcibus_device
*hbus
)
2786 * Set up a region of MMIO space to use for accessing configuration
2789 ret
= vmbus_allocate_mmio(&hbus
->mem_config
, hbus
->hdev
, 0, -1,
2790 PCI_CONFIG_MMIO_LENGTH
, 0x1000, false);
2795 * vmbus_allocate_mmio() gets used for allocating both device endpoint
2796 * resource claims (those which cannot be overlapped) and the ranges
2797 * which are valid for the children of this bus, which are intended
2798 * to be overlapped by those children. Set the flag on this claim
2799 * meaning that this region can't be overlapped.
2802 hbus
->mem_config
->flags
|= IORESOURCE_BUSY
;
2807 static void hv_free_config_window(struct hv_pcibus_device
*hbus
)
2809 vmbus_free_mmio(hbus
->mem_config
->start
, PCI_CONFIG_MMIO_LENGTH
);
2812 static int hv_pci_bus_exit(struct hv_device
*hdev
, bool keep_devs
);
2815 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
2816 * @hdev: VMBus's tracking struct for this root PCI bus
2818 * Return: 0 on success, -errno on failure
2820 static int hv_pci_enter_d0(struct hv_device
*hdev
)
2822 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2823 struct pci_bus_d0_entry
*d0_entry
;
2824 struct hv_pci_compl comp_pkt
;
2825 struct pci_packet
*pkt
;
2829 * Tell the host that the bus is ready to use, and moved into the
2830 * powered-on state. This includes telling the host which region
2831 * of memory-mapped I/O space has been chosen for configuration space
2834 pkt
= kzalloc(sizeof(*pkt
) + sizeof(*d0_entry
), GFP_KERNEL
);
2838 init_completion(&comp_pkt
.host_event
);
2839 pkt
->completion_func
= hv_pci_generic_compl
;
2840 pkt
->compl_ctxt
= &comp_pkt
;
2841 d0_entry
= (struct pci_bus_d0_entry
*)&pkt
->message
;
2842 d0_entry
->message_type
.type
= PCI_BUS_D0ENTRY
;
2843 d0_entry
->mmio_base
= hbus
->mem_config
->start
;
2845 ret
= vmbus_sendpacket(hdev
->channel
, d0_entry
, sizeof(*d0_entry
),
2846 (unsigned long)pkt
, VM_PKT_DATA_INBAND
,
2847 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2849 ret
= wait_for_response(hdev
, &comp_pkt
.host_event
);
2854 if (comp_pkt
.completion_status
< 0) {
2855 dev_err(&hdev
->device
,
2856 "PCI Pass-through VSP failed D0 Entry with status %x\n",
2857 comp_pkt
.completion_status
);
2870 * hv_pci_query_relations() - Ask host to send list of child
2872 * @hdev: VMBus's tracking struct for this root PCI bus
2874 * Return: 0 on success, -errno on failure
2876 static int hv_pci_query_relations(struct hv_device
*hdev
)
2878 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2879 struct pci_message message
;
2880 struct completion comp
;
2883 /* Ask the host to send along the list of child devices */
2884 init_completion(&comp
);
2885 if (cmpxchg(&hbus
->survey_event
, NULL
, &comp
))
2888 memset(&message
, 0, sizeof(message
));
2889 message
.type
= PCI_QUERY_BUS_RELATIONS
;
2891 ret
= vmbus_sendpacket(hdev
->channel
, &message
, sizeof(message
),
2892 0, VM_PKT_DATA_INBAND
, 0);
2894 ret
= wait_for_response(hdev
, &comp
);
2900 * hv_send_resources_allocated() - Report local resource choices
2901 * @hdev: VMBus's tracking struct for this root PCI bus
2903 * The host OS is expecting to be sent a request as a message
2904 * which contains all the resources that the device will use.
2905 * The response contains those same resources, "translated"
2906 * which is to say, the values which should be used by the
2907 * hardware, when it delivers an interrupt. (MMIO resources are
2908 * used in local terms.) This is nice for Windows, and lines up
2909 * with the FDO/PDO split, which doesn't exist in Linux. Linux
2910 * is deeply expecting to scan an emulated PCI configuration
2911 * space. So this message is sent here only to drive the state
2912 * machine on the host forward.
2914 * Return: 0 on success, -errno on failure
2916 static int hv_send_resources_allocated(struct hv_device
*hdev
)
2918 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2919 struct pci_resources_assigned
*res_assigned
;
2920 struct pci_resources_assigned2
*res_assigned2
;
2921 struct hv_pci_compl comp_pkt
;
2922 struct hv_pci_dev
*hpdev
;
2923 struct pci_packet
*pkt
;
2928 size_res
= (hbus
->protocol_version
< PCI_PROTOCOL_VERSION_1_2
)
2929 ? sizeof(*res_assigned
) : sizeof(*res_assigned2
);
2931 pkt
= kmalloc(sizeof(*pkt
) + size_res
, GFP_KERNEL
);
2937 for (wslot
= 0; wslot
< 256; wslot
++) {
2938 hpdev
= get_pcichild_wslot(hbus
, wslot
);
2942 memset(pkt
, 0, sizeof(*pkt
) + size_res
);
2943 init_completion(&comp_pkt
.host_event
);
2944 pkt
->completion_func
= hv_pci_generic_compl
;
2945 pkt
->compl_ctxt
= &comp_pkt
;
2947 if (hbus
->protocol_version
< PCI_PROTOCOL_VERSION_1_2
) {
2949 (struct pci_resources_assigned
*)&pkt
->message
;
2950 res_assigned
->message_type
.type
=
2951 PCI_RESOURCES_ASSIGNED
;
2952 res_assigned
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
2955 (struct pci_resources_assigned2
*)&pkt
->message
;
2956 res_assigned2
->message_type
.type
=
2957 PCI_RESOURCES_ASSIGNED2
;
2958 res_assigned2
->wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
2960 put_pcichild(hpdev
);
2962 ret
= vmbus_sendpacket(hdev
->channel
, &pkt
->message
,
2963 size_res
, (unsigned long)pkt
,
2965 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
2967 ret
= wait_for_response(hdev
, &comp_pkt
.host_event
);
2971 if (comp_pkt
.completion_status
< 0) {
2973 dev_err(&hdev
->device
,
2974 "resource allocated returned 0x%x",
2975 comp_pkt
.completion_status
);
2979 hbus
->wslot_res_allocated
= wslot
;
2987 * hv_send_resources_released() - Report local resources
2989 * @hdev: VMBus's tracking struct for this root PCI bus
2991 * Return: 0 on success, -errno on failure
2993 static int hv_send_resources_released(struct hv_device
*hdev
)
2995 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
2996 struct pci_child_message pkt
;
2997 struct hv_pci_dev
*hpdev
;
3001 for (wslot
= hbus
->wslot_res_allocated
; wslot
>= 0; wslot
--) {
3002 hpdev
= get_pcichild_wslot(hbus
, wslot
);
3006 memset(&pkt
, 0, sizeof(pkt
));
3007 pkt
.message_type
.type
= PCI_RESOURCES_RELEASED
;
3008 pkt
.wslot
.slot
= hpdev
->desc
.win_slot
.slot
;
3010 put_pcichild(hpdev
);
3012 ret
= vmbus_sendpacket(hdev
->channel
, &pkt
, sizeof(pkt
), 0,
3013 VM_PKT_DATA_INBAND
, 0);
3017 hbus
->wslot_res_allocated
= wslot
- 1;
3020 hbus
->wslot_res_allocated
= -1;
3025 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3026 static DECLARE_BITMAP(hvpci_dom_map
, HVPCI_DOM_MAP_SIZE
);
3029 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3030 * as invalid for passthrough PCI devices of this driver.
3032 #define HVPCI_DOM_INVALID 0
3035 * hv_get_dom_num() - Get a valid PCI domain number
3036 * Check if the PCI domain number is in use, and return another number if
3039 * @dom: Requested domain number
3041 * return: domain number on success, HVPCI_DOM_INVALID on failure
3043 static u16
hv_get_dom_num(u16 dom
)
3047 if (test_and_set_bit(dom
, hvpci_dom_map
) == 0)
3050 for_each_clear_bit(i
, hvpci_dom_map
, HVPCI_DOM_MAP_SIZE
) {
3051 if (test_and_set_bit(i
, hvpci_dom_map
) == 0)
3055 return HVPCI_DOM_INVALID
;
3059 * hv_put_dom_num() - Mark the PCI domain number as free
3060 * @dom: Domain number to be freed
3062 static void hv_put_dom_num(u16 dom
)
3064 clear_bit(dom
, hvpci_dom_map
);
3068 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3069 * @hdev: VMBus's tracking struct for this root PCI bus
3070 * @dev_id: Identifies the device itself
3072 * Return: 0 on success, -errno on failure
3074 static int hv_pci_probe(struct hv_device
*hdev
,
3075 const struct hv_vmbus_device_id
*dev_id
)
3077 struct pci_host_bridge
*bridge
;
3078 struct hv_pcibus_device
*hbus
;
3081 bool enter_d0_retry
= true;
3085 * hv_pcibus_device contains the hypercall arguments for retargeting in
3086 * hv_irq_unmask(). Those must not cross a page boundary.
3088 BUILD_BUG_ON(sizeof(*hbus
) > HV_HYP_PAGE_SIZE
);
3090 bridge
= devm_pci_alloc_host_bridge(&hdev
->device
, 0);
3095 * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
3096 * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
3097 * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
3098 * alignment of hbus is important because hbus's field
3099 * retarget_msi_interrupt_params must not cross a 4KB page boundary.
3101 * Here we prefer kzalloc to get_zeroed_page(), because a buffer
3102 * allocated by the latter is not tracked and scanned by kmemleak, and
3103 * hence kmemleak reports the pointer contained in the hbus buffer
3104 * (i.e. the hpdev struct, which is created in new_pcichild_device() and
3105 * is tracked by hbus->children) as memory leak (false positive).
3107 * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
3108 * used to allocate the hbus buffer and we can avoid the kmemleak false
3109 * positive by using kmemleak_alloc() and kmemleak_free() to ask
3110 * kmemleak to track and scan the hbus buffer.
3112 hbus
= kzalloc(HV_HYP_PAGE_SIZE
, GFP_KERNEL
);
3116 hbus
->bridge
= bridge
;
3117 hbus
->state
= hv_pcibus_init
;
3118 hbus
->wslot_res_allocated
= -1;
3121 * The PCI bus "domain" is what is called "segment" in ACPI and other
3122 * specs. Pull it from the instance ID, to get something usually
3123 * unique. In rare cases of collision, we will find out another number
3126 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3127 * together with this guest driver can guarantee that (1) The only
3128 * domain used by Gen1 VMs for something that looks like a physical
3129 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3130 * (2) There will be no overlap between domains (after fixing possible
3131 * collisions) in the same VM.
3133 dom_req
= hdev
->dev_instance
.b
[5] << 8 | hdev
->dev_instance
.b
[4];
3134 dom
= hv_get_dom_num(dom_req
);
3136 if (dom
== HVPCI_DOM_INVALID
) {
3137 dev_err(&hdev
->device
,
3138 "Unable to use dom# 0x%hx or other numbers", dom_req
);
3144 dev_info(&hdev
->device
,
3145 "PCI dom# 0x%hx has collision, using 0x%hx",
3148 hbus
->bridge
->domain_nr
= dom
;
3150 hbus
->sysdata
.domain
= dom
;
3151 #elif defined(CONFIG_ARM64)
3153 * Set the PCI bus parent to be the corresponding VMbus
3154 * device. Then the VMbus device will be assigned as the
3155 * ACPI companion in pcibios_root_bridge_prepare() and
3156 * pci_dma_configure() will propagate device coherence
3157 * information to devices created on the bus.
3159 hbus
->sysdata
.parent
= hdev
->device
.parent
;
3163 INIT_LIST_HEAD(&hbus
->children
);
3164 INIT_LIST_HEAD(&hbus
->dr_list
);
3165 spin_lock_init(&hbus
->config_lock
);
3166 spin_lock_init(&hbus
->device_list_lock
);
3167 spin_lock_init(&hbus
->retarget_msi_interrupt_lock
);
3168 hbus
->wq
= alloc_ordered_workqueue("hv_pci_%x", 0,
3169 hbus
->bridge
->domain_nr
);
3175 ret
= vmbus_open(hdev
->channel
, pci_ring_size
, pci_ring_size
, NULL
, 0,
3176 hv_pci_onchannelcallback
, hbus
);
3180 hv_set_drvdata(hdev
, hbus
);
3182 ret
= hv_pci_protocol_negotiation(hdev
, pci_protocol_versions
,
3183 ARRAY_SIZE(pci_protocol_versions
));
3187 ret
= hv_allocate_config_window(hbus
);
3191 hbus
->cfg_addr
= ioremap(hbus
->mem_config
->start
,
3192 PCI_CONFIG_MMIO_LENGTH
);
3193 if (!hbus
->cfg_addr
) {
3194 dev_err(&hdev
->device
,
3195 "Unable to map a virtual address for config space\n");
3200 name
= kasprintf(GFP_KERNEL
, "%pUL", &hdev
->dev_instance
);
3206 hbus
->fwnode
= irq_domain_alloc_named_fwnode(name
);
3208 if (!hbus
->fwnode
) {
3213 ret
= hv_pcie_init_irq_domain(hbus
);
3218 ret
= hv_pci_query_relations(hdev
);
3220 goto free_irq_domain
;
3222 ret
= hv_pci_enter_d0(hdev
);
3224 * In certain case (Kdump) the pci device of interest was
3225 * not cleanly shut down and resource is still held on host
3226 * side, the host could return invalid device status.
3227 * We need to explicitly request host to release the resource
3228 * and try to enter D0 again.
3229 * Since the hv_pci_bus_exit() call releases structures
3230 * of all its child devices, we need to start the retry from
3231 * hv_pci_query_relations() call, requesting host to send
3232 * the synchronous child device relations message before this
3233 * information is needed in hv_send_resources_allocated()
3236 if (ret
== -EPROTO
&& enter_d0_retry
) {
3237 enter_d0_retry
= false;
3239 dev_err(&hdev
->device
, "Retrying D0 Entry\n");
3242 * Hv_pci_bus_exit() calls hv_send_resources_released()
3243 * to free up resources of its child devices.
3244 * In the kdump kernel we need to set the
3245 * wslot_res_allocated to 255 so it scans all child
3246 * devices to release resources allocated in the
3247 * normal kernel before panic happened.
3249 hbus
->wslot_res_allocated
= 255;
3250 ret
= hv_pci_bus_exit(hdev
, true);
3255 dev_err(&hdev
->device
,
3256 "Retrying D0 failed with ret %d\n", ret
);
3259 goto free_irq_domain
;
3261 ret
= hv_pci_allocate_bridge_windows(hbus
);
3265 ret
= hv_send_resources_allocated(hdev
);
3269 prepopulate_bars(hbus
);
3271 hbus
->state
= hv_pcibus_probed
;
3273 ret
= create_root_hv_pci_bus(hbus
);
3280 hv_pci_free_bridge_windows(hbus
);
3282 (void) hv_pci_bus_exit(hdev
, true);
3284 irq_domain_remove(hbus
->irq_domain
);
3286 irq_domain_free_fwnode(hbus
->fwnode
);
3288 iounmap(hbus
->cfg_addr
);
3290 hv_free_config_window(hbus
);
3292 vmbus_close(hdev
->channel
);
3294 destroy_workqueue(hbus
->wq
);
3296 hv_put_dom_num(hbus
->bridge
->domain_nr
);
3302 static int hv_pci_bus_exit(struct hv_device
*hdev
, bool keep_devs
)
3304 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
3306 struct pci_packet teardown_packet
;
3307 u8 buffer
[sizeof(struct pci_message
)];
3309 struct hv_pci_compl comp_pkt
;
3310 struct hv_pci_dev
*hpdev
, *tmp
;
3311 unsigned long flags
;
3315 * After the host sends the RESCIND_CHANNEL message, it doesn't
3316 * access the per-channel ringbuffer any longer.
3318 if (hdev
->channel
->rescind
)
3322 struct list_head removed
;
3324 /* Move all present children to the list on stack */
3325 INIT_LIST_HEAD(&removed
);
3326 spin_lock_irqsave(&hbus
->device_list_lock
, flags
);
3327 list_for_each_entry_safe(hpdev
, tmp
, &hbus
->children
, list_entry
)
3328 list_move_tail(&hpdev
->list_entry
, &removed
);
3329 spin_unlock_irqrestore(&hbus
->device_list_lock
, flags
);
3331 /* Remove all children in the list */
3332 list_for_each_entry_safe(hpdev
, tmp
, &removed
, list_entry
) {
3333 list_del(&hpdev
->list_entry
);
3334 if (hpdev
->pci_slot
)
3335 pci_destroy_slot(hpdev
->pci_slot
);
3336 /* For the two refs got in new_pcichild_device() */
3337 put_pcichild(hpdev
);
3338 put_pcichild(hpdev
);
3342 ret
= hv_send_resources_released(hdev
);
3344 dev_err(&hdev
->device
,
3345 "Couldn't send resources released packet(s)\n");
3349 memset(&pkt
.teardown_packet
, 0, sizeof(pkt
.teardown_packet
));
3350 init_completion(&comp_pkt
.host_event
);
3351 pkt
.teardown_packet
.completion_func
= hv_pci_generic_compl
;
3352 pkt
.teardown_packet
.compl_ctxt
= &comp_pkt
;
3353 pkt
.teardown_packet
.message
[0].type
= PCI_BUS_D0EXIT
;
3355 ret
= vmbus_sendpacket(hdev
->channel
, &pkt
.teardown_packet
.message
,
3356 sizeof(struct pci_message
),
3357 (unsigned long)&pkt
.teardown_packet
,
3359 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
3363 if (wait_for_completion_timeout(&comp_pkt
.host_event
, 10 * HZ
) == 0)
3370 * hv_pci_remove() - Remove routine for this VMBus channel
3371 * @hdev: VMBus's tracking struct for this root PCI bus
3373 * Return: 0 on success, -errno on failure
3375 static int hv_pci_remove(struct hv_device
*hdev
)
3377 struct hv_pcibus_device
*hbus
;
3380 hbus
= hv_get_drvdata(hdev
);
3381 if (hbus
->state
== hv_pcibus_installed
) {
3382 tasklet_disable(&hdev
->channel
->callback_event
);
3383 hbus
->state
= hv_pcibus_removing
;
3384 tasklet_enable(&hdev
->channel
->callback_event
);
3385 destroy_workqueue(hbus
->wq
);
3388 * At this point, no work is running or can be scheduled
3389 * on hbus-wq. We can't race with hv_pci_devices_present()
3390 * or hv_pci_eject_device(), it's safe to proceed.
3393 /* Remove the bus from PCI's point of view. */
3394 pci_lock_rescan_remove();
3395 pci_stop_root_bus(hbus
->bridge
->bus
);
3396 hv_pci_remove_slots(hbus
);
3397 pci_remove_root_bus(hbus
->bridge
->bus
);
3398 pci_unlock_rescan_remove();
3401 ret
= hv_pci_bus_exit(hdev
, false);
3403 vmbus_close(hdev
->channel
);
3405 iounmap(hbus
->cfg_addr
);
3406 hv_free_config_window(hbus
);
3407 hv_pci_free_bridge_windows(hbus
);
3408 irq_domain_remove(hbus
->irq_domain
);
3409 irq_domain_free_fwnode(hbus
->fwnode
);
3411 hv_put_dom_num(hbus
->bridge
->domain_nr
);
3417 static int hv_pci_suspend(struct hv_device
*hdev
)
3419 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
3420 enum hv_pcibus_state old_state
;
3424 * hv_pci_suspend() must make sure there are no pending work items
3425 * before calling vmbus_close(), since it runs in a process context
3426 * as a callback in dpm_suspend(). When it starts to run, the channel
3427 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3428 * context, can be still running concurrently and scheduling new work
3429 * items onto hbus->wq in hv_pci_devices_present() and
3430 * hv_pci_eject_device(), and the work item handlers can access the
3431 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3432 * the work item handler pci_devices_present_work() ->
3433 * new_pcichild_device() writes to the vmbus channel.
3435 * To eliminate the race, hv_pci_suspend() disables the channel
3436 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3437 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3438 * it knows that no new work item can be scheduled, and then it flushes
3439 * hbus->wq and safely closes the vmbus channel.
3441 tasklet_disable(&hdev
->channel
->callback_event
);
3443 /* Change the hbus state to prevent new work items. */
3444 old_state
= hbus
->state
;
3445 if (hbus
->state
== hv_pcibus_installed
)
3446 hbus
->state
= hv_pcibus_removing
;
3448 tasklet_enable(&hdev
->channel
->callback_event
);
3450 if (old_state
!= hv_pcibus_installed
)
3453 flush_workqueue(hbus
->wq
);
3455 ret
= hv_pci_bus_exit(hdev
, true);
3459 vmbus_close(hdev
->channel
);
3464 static int hv_pci_restore_msi_msg(struct pci_dev
*pdev
, void *arg
)
3466 struct msi_desc
*entry
;
3467 struct irq_data
*irq_data
;
3469 for_each_pci_msi_entry(entry
, pdev
) {
3470 irq_data
= irq_get_irq_data(entry
->irq
);
3471 if (WARN_ON_ONCE(!irq_data
))
3474 hv_compose_msi_msg(irq_data
, &entry
->msg
);
3481 * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
3482 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
3483 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
3484 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
3487 static void hv_pci_restore_msi_state(struct hv_pcibus_device
*hbus
)
3489 pci_walk_bus(hbus
->bridge
->bus
, hv_pci_restore_msi_msg
, NULL
);
3492 static int hv_pci_resume(struct hv_device
*hdev
)
3494 struct hv_pcibus_device
*hbus
= hv_get_drvdata(hdev
);
3495 enum pci_protocol_version_t version
[1];
3498 hbus
->state
= hv_pcibus_init
;
3500 ret
= vmbus_open(hdev
->channel
, pci_ring_size
, pci_ring_size
, NULL
, 0,
3501 hv_pci_onchannelcallback
, hbus
);
3505 /* Only use the version that was in use before hibernation. */
3506 version
[0] = hbus
->protocol_version
;
3507 ret
= hv_pci_protocol_negotiation(hdev
, version
, 1);
3511 ret
= hv_pci_query_relations(hdev
);
3515 ret
= hv_pci_enter_d0(hdev
);
3519 ret
= hv_send_resources_allocated(hdev
);
3523 prepopulate_bars(hbus
);
3525 hv_pci_restore_msi_state(hbus
);
3527 hbus
->state
= hv_pcibus_installed
;
3530 vmbus_close(hdev
->channel
);
3534 static const struct hv_vmbus_device_id hv_pci_id_table
[] = {
3535 /* PCI Pass-through Class ID */
3536 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
3541 MODULE_DEVICE_TABLE(vmbus
, hv_pci_id_table
);
3543 static struct hv_driver hv_pci_drv
= {
3545 .id_table
= hv_pci_id_table
,
3546 .probe
= hv_pci_probe
,
3547 .remove
= hv_pci_remove
,
3548 .suspend
= hv_pci_suspend
,
3549 .resume
= hv_pci_resume
,
3552 static void __exit
exit_hv_pci_drv(void)
3554 vmbus_driver_unregister(&hv_pci_drv
);
3556 hvpci_block_ops
.read_block
= NULL
;
3557 hvpci_block_ops
.write_block
= NULL
;
3558 hvpci_block_ops
.reg_blk_invalidate
= NULL
;
3561 static int __init
init_hv_pci_drv(void)
3563 if (!hv_is_hyperv_initialized())
3566 /* Set the invalid domain number's bit, so it will not be used */
3567 set_bit(HVPCI_DOM_INVALID
, hvpci_dom_map
);
3569 /* Initialize PCI block r/w interface */
3570 hvpci_block_ops
.read_block
= hv_read_config_block
;
3571 hvpci_block_ops
.write_block
= hv_write_config_block
;
3572 hvpci_block_ops
.reg_blk_invalidate
= hv_register_block_invalidate
;
3574 return vmbus_driver_register(&hv_pci_drv
);
3577 module_init(init_hv_pci_drv
);
3578 module_exit(exit_hv_pci_drv
);
3580 MODULE_DESCRIPTION("Hyper-V PCI");
3581 MODULE_LICENSE("GPL v2");