4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/visitor.h"
17 #include "hw/s390x/s390-pci-bus.h"
18 #include "hw/s390x/s390-pci-inst.h"
19 #include "hw/s390x/s390-pci-kvm.h"
20 #include "hw/s390x/s390-pci-vfio.h"
21 #include "hw/pci/pci_bus.h"
22 #include "hw/qdev-properties.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/msi.h"
25 #include "qemu/error-report.h"
26 #include "qemu/module.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/runstate.h"
32 S390pciState
*s390_get_phb(void)
34 static S390pciState
*phb
;
37 phb
= S390_PCI_HOST_BRIDGE(
38 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE
, NULL
));
45 int pci_chsc_sei_nt2_get_event(void *res
)
47 ChscSeiNt2Res
*nt2_res
= (ChscSeiNt2Res
*)res
;
51 SeiContainer
*sei_cont
;
52 S390pciState
*s
= s390_get_phb();
54 sei_cont
= QTAILQ_FIRST(&s
->pending_sei
);
56 QTAILQ_REMOVE(&s
->pending_sei
, sei_cont
, link
);
58 nt2_res
->cc
= sei_cont
->cc
;
59 nt2_res
->length
= cpu_to_be16(sizeof(ChscSeiNt2Res
));
60 switch (sei_cont
->cc
) {
61 case 1: /* error event */
62 eccdf
= (PciCcdfErr
*)nt2_res
->ccdf
;
63 eccdf
->fid
= cpu_to_be32(sei_cont
->fid
);
64 eccdf
->fh
= cpu_to_be32(sei_cont
->fh
);
65 eccdf
->e
= cpu_to_be32(sei_cont
->e
);
66 eccdf
->faddr
= cpu_to_be64(sei_cont
->faddr
);
67 eccdf
->pec
= cpu_to_be16(sei_cont
->pec
);
69 case 2: /* availability event */
70 accdf
= (PciCcdfAvail
*)nt2_res
->ccdf
;
71 accdf
->fid
= cpu_to_be32(sei_cont
->fid
);
72 accdf
->fh
= cpu_to_be32(sei_cont
->fh
);
73 accdf
->pec
= cpu_to_be16(sei_cont
->pec
);
85 int pci_chsc_sei_nt2_have_event(void)
87 S390pciState
*s
= s390_get_phb();
89 return !QTAILQ_EMPTY(&s
->pending_sei
);
92 S390PCIBusDevice
*s390_pci_find_next_avail_dev(S390pciState
*s
,
93 S390PCIBusDevice
*pbdev
)
95 S390PCIBusDevice
*ret
= pbdev
? QTAILQ_NEXT(pbdev
, link
) :
96 QTAILQ_FIRST(&s
->zpci_devs
);
98 while (ret
&& ret
->state
== ZPCI_FS_RESERVED
) {
99 ret
= QTAILQ_NEXT(ret
, link
);
105 S390PCIBusDevice
*s390_pci_find_dev_by_fid(S390pciState
*s
, uint32_t fid
)
107 S390PCIBusDevice
*pbdev
;
109 QTAILQ_FOREACH(pbdev
, &s
->zpci_devs
, link
) {
110 if (pbdev
->fid
== fid
) {
118 void s390_pci_sclp_configure(SCCB
*sccb
)
120 IoaCfgSccb
*psccb
= (IoaCfgSccb
*)sccb
;
121 S390PCIBusDevice
*pbdev
= s390_pci_find_dev_by_fid(s390_get_phb(),
122 be32_to_cpu(psccb
->aid
));
126 trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb
->aid
));
127 rc
= SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED
;
131 switch (pbdev
->state
) {
132 case ZPCI_FS_RESERVED
:
133 rc
= SCLP_RC_ADAPTER_IN_RESERVED_STATE
;
135 case ZPCI_FS_STANDBY
:
136 pbdev
->state
= ZPCI_FS_DISABLED
;
137 rc
= SCLP_RC_NORMAL_COMPLETION
;
140 rc
= SCLP_RC_NO_ACTION_REQUIRED
;
143 psccb
->header
.response_code
= cpu_to_be16(rc
);
146 static void s390_pci_shutdown_notifier(Notifier
*n
, void *opaque
)
148 S390PCIBusDevice
*pbdev
= container_of(n
, S390PCIBusDevice
,
151 pci_device_reset(pbdev
->pdev
);
154 static void s390_pci_reset_cb(void *opaque
)
156 S390PCIBusDevice
*pbdev
= opaque
;
158 pci_device_reset(pbdev
->pdev
);
161 static void s390_pci_perform_unplug(S390PCIBusDevice
*pbdev
)
163 HotplugHandler
*hotplug_ctrl
;
165 if (pbdev
->pft
== ZPCI_PFT_ISM
) {
166 notifier_remove(&pbdev
->shutdown_notifier
);
167 qemu_unregister_reset(s390_pci_reset_cb
, pbdev
);
170 /* Unplug the PCI device */
172 DeviceState
*pdev
= DEVICE(pbdev
->pdev
);
174 hotplug_ctrl
= qdev_get_hotplug_handler(pdev
);
175 hotplug_handler_unplug(hotplug_ctrl
, pdev
, &error_abort
);
176 object_unparent(OBJECT(pdev
));
179 /* Unplug the zPCI device */
180 hotplug_ctrl
= qdev_get_hotplug_handler(DEVICE(pbdev
));
181 hotplug_handler_unplug(hotplug_ctrl
, DEVICE(pbdev
), &error_abort
);
182 object_unparent(OBJECT(pbdev
));
185 void s390_pci_sclp_deconfigure(SCCB
*sccb
)
187 IoaCfgSccb
*psccb
= (IoaCfgSccb
*)sccb
;
188 S390PCIBusDevice
*pbdev
= s390_pci_find_dev_by_fid(s390_get_phb(),
189 be32_to_cpu(psccb
->aid
));
193 trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb
->aid
));
194 rc
= SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED
;
198 switch (pbdev
->state
) {
199 case ZPCI_FS_RESERVED
:
200 rc
= SCLP_RC_ADAPTER_IN_RESERVED_STATE
;
202 case ZPCI_FS_STANDBY
:
203 rc
= SCLP_RC_NO_ACTION_REQUIRED
;
206 if (pbdev
->interp
&& (pbdev
->fh
& FH_MASK_ENABLE
)) {
207 /* Interpreted devices were using interrupt forwarding */
208 s390_pci_kvm_aif_disable(pbdev
);
209 } else if (pbdev
->summary_ind
) {
210 pci_dereg_irqs(pbdev
);
212 if (pbdev
->iommu
->enabled
) {
213 pci_dereg_ioat(pbdev
->iommu
);
215 pbdev
->state
= ZPCI_FS_STANDBY
;
216 rc
= SCLP_RC_NORMAL_COMPLETION
;
218 if (pbdev
->unplug_requested
) {
219 s390_pci_perform_unplug(pbdev
);
223 psccb
->header
.response_code
= cpu_to_be16(rc
);
226 static S390PCIBusDevice
*s390_pci_find_dev_by_uid(S390pciState
*s
, uint16_t uid
)
228 S390PCIBusDevice
*pbdev
;
230 QTAILQ_FOREACH(pbdev
, &s
->zpci_devs
, link
) {
231 if (pbdev
->uid
== uid
) {
239 S390PCIBusDevice
*s390_pci_find_dev_by_target(S390pciState
*s
,
242 S390PCIBusDevice
*pbdev
;
248 QTAILQ_FOREACH(pbdev
, &s
->zpci_devs
, link
) {
249 if (!strcmp(pbdev
->target
, target
)) {
257 static S390PCIBusDevice
*s390_pci_find_dev_by_pci(S390pciState
*s
,
260 S390PCIBusDevice
*pbdev
;
266 QTAILQ_FOREACH(pbdev
, &s
->zpci_devs
, link
) {
267 if (pbdev
->pdev
== pci_dev
) {
275 S390PCIBusDevice
*s390_pci_find_dev_by_idx(S390pciState
*s
, uint32_t idx
)
277 return g_hash_table_lookup(s
->zpci_table
, &idx
);
280 S390PCIBusDevice
*s390_pci_find_dev_by_fh(S390pciState
*s
, uint32_t fh
)
282 uint32_t idx
= FH_MASK_INDEX
& fh
;
283 S390PCIBusDevice
*pbdev
= s390_pci_find_dev_by_idx(s
, idx
);
285 if (pbdev
&& pbdev
->fh
== fh
) {
292 static void s390_pci_generate_event(uint8_t cc
, uint16_t pec
, uint32_t fh
,
293 uint32_t fid
, uint64_t faddr
, uint32_t e
)
295 SeiContainer
*sei_cont
;
296 S390pciState
*s
= s390_get_phb();
298 sei_cont
= g_new0(SeiContainer
, 1);
303 sei_cont
->faddr
= faddr
;
306 QTAILQ_INSERT_TAIL(&s
->pending_sei
, sei_cont
, link
);
307 css_generate_css_crws(0);
310 static void s390_pci_generate_plug_event(uint16_t pec
, uint32_t fh
,
313 s390_pci_generate_event(2, pec
, fh
, fid
, 0, 0);
316 void s390_pci_generate_error_event(uint16_t pec
, uint32_t fh
, uint32_t fid
,
317 uint64_t faddr
, uint32_t e
)
319 s390_pci_generate_event(1, pec
, fh
, fid
, faddr
, e
);
322 static void s390_pci_set_irq(void *opaque
, int irq
, int level
)
327 static int s390_pci_map_irq(PCIDevice
*pci_dev
, int irq_num
)
333 static uint64_t s390_pci_get_table_origin(uint64_t iota
)
335 return iota
& ~ZPCI_IOTA_RTTO_FLAG
;
338 static unsigned int calc_rtx(dma_addr_t ptr
)
340 return ((unsigned long) ptr
>> ZPCI_RT_SHIFT
) & ZPCI_INDEX_MASK
;
343 static unsigned int calc_sx(dma_addr_t ptr
)
345 return ((unsigned long) ptr
>> ZPCI_ST_SHIFT
) & ZPCI_INDEX_MASK
;
348 static unsigned int calc_px(dma_addr_t ptr
)
350 return ((unsigned long) ptr
>> TARGET_PAGE_BITS
) & ZPCI_PT_MASK
;
353 static uint64_t get_rt_sto(uint64_t entry
)
355 return ((entry
& ZPCI_TABLE_TYPE_MASK
) == ZPCI_TABLE_TYPE_RTX
)
356 ? (entry
& ZPCI_RTE_ADDR_MASK
)
360 static uint64_t get_st_pto(uint64_t entry
)
362 return ((entry
& ZPCI_TABLE_TYPE_MASK
) == ZPCI_TABLE_TYPE_SX
)
363 ? (entry
& ZPCI_STE_ADDR_MASK
)
367 static bool rt_entry_isvalid(uint64_t entry
)
369 return (entry
& ZPCI_TABLE_VALID_MASK
) == ZPCI_TABLE_VALID
;
372 static bool pt_entry_isvalid(uint64_t entry
)
374 return (entry
& ZPCI_PTE_VALID_MASK
) == ZPCI_PTE_VALID
;
377 static bool entry_isprotected(uint64_t entry
)
379 return (entry
& ZPCI_TABLE_PROT_MASK
) == ZPCI_TABLE_PROTECTED
;
382 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */
383 static uint64_t get_table_index(uint64_t iova
, int8_t ett
)
387 return calc_px(iova
);
389 return calc_sx(iova
);
391 return calc_rtx(iova
);
397 static bool entry_isvalid(uint64_t entry
, int8_t ett
)
401 return pt_entry_isvalid(entry
);
404 return rt_entry_isvalid(entry
);
410 /* Return true if address translation is done */
411 static bool translate_iscomplete(uint64_t entry
, int8_t ett
)
415 return (entry
& ZPCI_TABLE_FC
) ? true : false;
423 static uint64_t get_frame_size(int8_t ett
)
437 static uint64_t get_next_table_origin(uint64_t entry
, int8_t ett
)
441 return entry
& ZPCI_PTE_ADDR_MASK
;
443 return get_st_pto(entry
);
445 return get_rt_sto(entry
);
452 * table_translate: do translation within one table and return the following
455 * @entry: the entry being translated, the result is stored in this.
456 * @to: the address of table origin.
457 * @ett: expected table type, 1 region table, 0 segment table and -1 page table.
460 static uint64_t table_translate(S390IOTLBEntry
*entry
, uint64_t to
, int8_t ett
,
463 uint64_t tx
, te
, nto
= 0;
466 tx
= get_table_index(entry
->iova
, ett
);
467 te
= address_space_ldq(&address_space_memory
, to
+ tx
* sizeof(uint64_t),
468 MEMTXATTRS_UNSPECIFIED
, NULL
);
471 err
= ERR_EVENT_INVALTE
;
475 if (!entry_isvalid(te
, ett
)) {
476 entry
->perm
&= IOMMU_NONE
;
480 if (ett
== ZPCI_ETT_RT
&& ((te
& ZPCI_TABLE_LEN_RTX
) != ZPCI_TABLE_LEN_RTX
481 || te
& ZPCI_TABLE_OFFSET_MASK
)) {
482 err
= ERR_EVENT_INVALTL
;
486 nto
= get_next_table_origin(te
, ett
);
492 if (entry_isprotected(te
)) {
493 entry
->perm
&= IOMMU_RO
;
495 entry
->perm
&= IOMMU_RW
;
498 if (translate_iscomplete(te
, ett
)) {
501 entry
->translated_addr
= te
& ZPCI_PTE_ADDR_MASK
;
504 entry
->translated_addr
= (te
& ZPCI_SFAA_MASK
) |
505 (entry
->iova
& ~ZPCI_SFAA_MASK
);
512 entry
->perm
= IOMMU_NONE
;
515 entry
->len
= get_frame_size(ett
);
519 uint16_t s390_guest_io_table_walk(uint64_t g_iota
, hwaddr addr
,
520 S390IOTLBEntry
*entry
)
522 uint64_t to
= s390_pci_get_table_origin(g_iota
);
526 entry
->iova
= addr
& TARGET_PAGE_MASK
;
527 entry
->translated_addr
= 0;
528 entry
->perm
= IOMMU_RW
;
530 if (entry_isprotected(g_iota
)) {
531 entry
->perm
&= IOMMU_RO
;
535 to
= table_translate(entry
, to
, ett
--, &error
);
541 static IOMMUTLBEntry
s390_translate_iommu(IOMMUMemoryRegion
*mr
, hwaddr addr
,
542 IOMMUAccessFlags flag
, int iommu_idx
)
544 S390PCIIOMMU
*iommu
= container_of(mr
, S390PCIIOMMU
, iommu_mr
);
545 S390IOTLBEntry
*entry
;
546 uint64_t iova
= addr
& TARGET_PAGE_MASK
;
548 IOMMUTLBEntry ret
= {
549 .target_as
= &address_space_memory
,
551 .translated_addr
= 0,
552 .addr_mask
= ~(hwaddr
)0,
556 switch (iommu
->pbdev
->state
) {
557 case ZPCI_FS_ENABLED
:
558 case ZPCI_FS_BLOCKED
:
559 if (!iommu
->enabled
) {
567 trace_s390_pci_iommu_xlate(addr
);
569 if (addr
< iommu
->pba
|| addr
> iommu
->pal
) {
570 error
= ERR_EVENT_OORANGE
;
574 entry
= g_hash_table_lookup(iommu
->iotlb
, &iova
);
576 ret
.iova
= entry
->iova
;
577 ret
.translated_addr
= entry
->translated_addr
;
578 ret
.addr_mask
= entry
->len
- 1;
579 ret
.perm
= entry
->perm
;
582 ret
.addr_mask
= ~TARGET_PAGE_MASK
;
583 ret
.perm
= IOMMU_NONE
;
586 if (flag
!= IOMMU_NONE
&& !(flag
& ret
.perm
)) {
587 error
= ERR_EVENT_TPROTE
;
591 iommu
->pbdev
->state
= ZPCI_FS_ERROR
;
592 s390_pci_generate_error_event(error
, iommu
->pbdev
->fh
,
593 iommu
->pbdev
->fid
, addr
, 0);
598 static void s390_pci_iommu_replay(IOMMUMemoryRegion
*iommu
,
599 IOMMUNotifier
*notifier
)
601 /* It's impossible to plug a pci device on s390x that already has iommu
602 * mappings which need to be replayed, that is due to the "one iommu per
603 * zpci device" construct. But when we support migration of vfio-pci
604 * devices in future, we need to revisit this.
609 static S390PCIIOMMU
*s390_pci_get_iommu(S390pciState
*s
, PCIBus
*bus
,
612 uint64_t key
= (uintptr_t)bus
;
613 S390PCIIOMMUTable
*table
= g_hash_table_lookup(s
->iommu_table
, &key
);
617 table
= g_new0(S390PCIIOMMUTable
, 1);
619 g_hash_table_insert(s
->iommu_table
, &table
->key
, table
);
622 iommu
= table
->iommu
[PCI_SLOT(devfn
)];
624 iommu
= S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU
));
626 char *mr_name
= g_strdup_printf("iommu-root-%02x:%02x.%01x",
630 char *as_name
= g_strdup_printf("iommu-pci-%02x:%02x.%01x",
634 memory_region_init(&iommu
->mr
, OBJECT(iommu
), mr_name
, UINT64_MAX
);
635 address_space_init(&iommu
->as
, &iommu
->mr
, as_name
);
636 iommu
->iotlb
= g_hash_table_new_full(g_int64_hash
, g_int64_equal
,
638 table
->iommu
[PCI_SLOT(devfn
)] = iommu
;
647 static AddressSpace
*s390_pci_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
649 S390pciState
*s
= opaque
;
650 S390PCIIOMMU
*iommu
= s390_pci_get_iommu(s
, bus
, devfn
);
655 static const PCIIOMMUOps s390_iommu_ops
= {
656 .get_address_space
= s390_pci_dma_iommu
,
659 static uint8_t set_ind_atomic(uint64_t ind_loc
, uint8_t to_be_set
)
661 uint8_t expected
, actual
;
663 /* avoid multiple fetches */
664 uint8_t volatile *ind_addr
;
666 ind_addr
= cpu_physical_memory_map(ind_loc
, &len
, true);
668 s390_pci_generate_error_event(ERR_EVENT_AIRERR
, 0, 0, 0, 0);
674 actual
= qatomic_cmpxchg(ind_addr
, expected
, expected
| to_be_set
);
675 } while (actual
!= expected
);
676 cpu_physical_memory_unmap((void *)ind_addr
, len
, 1, len
);
681 static void s390_msi_ctrl_write(void *opaque
, hwaddr addr
, uint64_t data
,
684 S390PCIBusDevice
*pbdev
= opaque
;
685 uint32_t vec
= data
& ZPCI_MSI_VEC_MASK
;
691 trace_s390_pci_msi_ctrl_write(data
, pbdev
->idx
, vec
);
693 if (pbdev
->state
!= ZPCI_FS_ENABLED
) {
697 ind_bit
= pbdev
->routes
.adapter
.ind_offset
;
698 sum_bit
= pbdev
->routes
.adapter
.summary_offset
;
700 set_ind_atomic(pbdev
->routes
.adapter
.ind_addr
+ (ind_bit
+ vec
) / 8,
701 0x80 >> ((ind_bit
+ vec
) % 8));
702 if (!set_ind_atomic(pbdev
->routes
.adapter
.summary_addr
+ sum_bit
/ 8,
703 0x80 >> (sum_bit
% 8))) {
704 css_adapter_interrupt(CSS_IO_ADAPTER_PCI
, pbdev
->isc
);
708 static uint64_t s390_msi_ctrl_read(void *opaque
, hwaddr addr
, unsigned size
)
713 static const MemoryRegionOps s390_msi_ctrl_ops
= {
714 .write
= s390_msi_ctrl_write
,
715 .read
= s390_msi_ctrl_read
,
716 .endianness
= DEVICE_LITTLE_ENDIAN
,
719 void s390_pci_iommu_enable(S390PCIIOMMU
*iommu
)
722 * The iommu region is initialized against a 0-mapped address space,
723 * so the smallest IOMMU region we can define runs from 0 to the end
724 * of the PCI address space.
726 char *name
= g_strdup_printf("iommu-s390-%04x", iommu
->pbdev
->uid
);
727 memory_region_init_iommu(&iommu
->iommu_mr
, sizeof(iommu
->iommu_mr
),
728 TYPE_S390_IOMMU_MEMORY_REGION
, OBJECT(&iommu
->mr
),
729 name
, iommu
->pal
+ 1);
730 iommu
->enabled
= true;
731 memory_region_add_subregion(&iommu
->mr
, 0, MEMORY_REGION(&iommu
->iommu_mr
));
735 void s390_pci_iommu_disable(S390PCIIOMMU
*iommu
)
737 iommu
->enabled
= false;
738 g_hash_table_remove_all(iommu
->iotlb
);
739 memory_region_del_subregion(&iommu
->mr
, MEMORY_REGION(&iommu
->iommu_mr
));
740 object_unparent(OBJECT(&iommu
->iommu_mr
));
743 static void s390_pci_iommu_free(S390pciState
*s
, PCIBus
*bus
, int32_t devfn
)
745 uint64_t key
= (uintptr_t)bus
;
746 S390PCIIOMMUTable
*table
= g_hash_table_lookup(s
->iommu_table
, &key
);
747 S390PCIIOMMU
*iommu
= table
? table
->iommu
[PCI_SLOT(devfn
)] : NULL
;
749 if (!table
|| !iommu
) {
753 table
->iommu
[PCI_SLOT(devfn
)] = NULL
;
754 g_hash_table_destroy(iommu
->iotlb
);
756 * An attached PCI device may have memory listeners, eg. VFIO PCI.
757 * The associated subregion will already have been unmapped in
758 * s390_pci_iommu_disable in response to the guest deconfigure request.
759 * Remove the listeners now before destroying the address space.
761 address_space_remove_listeners(&iommu
->as
);
762 address_space_destroy(&iommu
->as
);
763 object_unparent(OBJECT(&iommu
->mr
));
764 object_unparent(OBJECT(iommu
));
765 object_unref(OBJECT(iommu
));
768 S390PCIGroup
*s390_group_create(int id
, int host_id
)
771 S390pciState
*s
= s390_get_phb();
773 group
= g_new0(S390PCIGroup
, 1);
775 group
->host_id
= host_id
;
776 QTAILQ_INSERT_TAIL(&s
->zpci_groups
, group
, link
);
780 S390PCIGroup
*s390_group_find(int id
)
783 S390pciState
*s
= s390_get_phb();
785 QTAILQ_FOREACH(group
, &s
->zpci_groups
, link
) {
786 if (group
->id
== id
) {
793 S390PCIGroup
*s390_group_find_host_sim(int host_id
)
796 S390pciState
*s
= s390_get_phb();
798 QTAILQ_FOREACH(group
, &s
->zpci_groups
, link
) {
799 if (group
->id
>= ZPCI_SIM_GRP_START
&& group
->host_id
== host_id
) {
806 static void s390_pci_init_default_group(void)
809 ClpRspQueryPciGrp
*resgrp
;
811 group
= s390_group_create(ZPCI_DEFAULT_FN_GRP
, ZPCI_DEFAULT_FN_GRP
);
812 resgrp
= &group
->zpci_group
;
815 resgrp
->msia
= ZPCI_MSI_ADDR
;
816 resgrp
->mui
= DEFAULT_MUI
;
818 resgrp
->maxstbl
= 128;
820 resgrp
->dtsm
= ZPCI_DTSM
;
823 static void set_pbdev_info(S390PCIBusDevice
*pbdev
)
825 pbdev
->zpci_fn
.sdma
= ZPCI_SDMA_ADDR
;
826 pbdev
->zpci_fn
.edma
= ZPCI_EDMA_ADDR
;
827 pbdev
->zpci_fn
.pchid
= 0;
828 pbdev
->zpci_fn
.pfgid
= ZPCI_DEFAULT_FN_GRP
;
829 pbdev
->zpci_fn
.fid
= pbdev
->fid
;
830 pbdev
->zpci_fn
.uid
= pbdev
->uid
;
831 pbdev
->pci_group
= s390_group_find(ZPCI_DEFAULT_FN_GRP
);
834 static void s390_pcihost_realize(DeviceState
*dev
, Error
**errp
)
838 PCIHostState
*phb
= PCI_HOST_BRIDGE(dev
);
839 S390pciState
*s
= S390_PCI_HOST_BRIDGE(dev
);
841 trace_s390_pcihost("realize");
843 b
= pci_register_root_bus(dev
, NULL
, s390_pci_set_irq
, s390_pci_map_irq
,
844 NULL
, get_system_memory(), get_system_io(), 0,
846 pci_setup_iommu(b
, &s390_iommu_ops
, s
);
849 qbus_set_hotplug_handler(bus
, OBJECT(dev
));
852 s
->bus
= S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS
, dev
, NULL
));
853 qbus_set_hotplug_handler(BUS(s
->bus
), OBJECT(dev
));
855 s
->iommu_table
= g_hash_table_new_full(g_int64_hash
, g_int64_equal
,
857 s
->zpci_table
= g_hash_table_new_full(g_int_hash
, g_int_equal
, NULL
, NULL
);
859 s
->next_sim_grp
= ZPCI_SIM_GRP_START
;
860 QTAILQ_INIT(&s
->pending_sei
);
861 QTAILQ_INIT(&s
->zpci_devs
);
862 QTAILQ_INIT(&s
->zpci_dma_limit
);
863 QTAILQ_INIT(&s
->zpci_groups
);
865 s390_pci_init_default_group();
866 css_register_io_adapters(CSS_IO_ADAPTER_PCI
, true, false,
867 S390_ADAPTER_SUPPRESSIBLE
, errp
);
870 static void s390_pcihost_unrealize(DeviceState
*dev
)
873 S390pciState
*s
= S390_PCI_HOST_BRIDGE(dev
);
875 while (!QTAILQ_EMPTY(&s
->zpci_groups
)) {
876 group
= QTAILQ_FIRST(&s
->zpci_groups
);
877 QTAILQ_REMOVE(&s
->zpci_groups
, group
, link
);
881 static int s390_pci_msix_init(S390PCIBusDevice
*pbdev
)
888 pos
= pci_find_capability(pbdev
->pdev
, PCI_CAP_ID_MSIX
);
893 ctrl
= pci_host_config_read_common(pbdev
->pdev
, pos
+ PCI_MSIX_FLAGS
,
894 pci_config_size(pbdev
->pdev
), sizeof(ctrl
));
895 table
= pci_host_config_read_common(pbdev
->pdev
, pos
+ PCI_MSIX_TABLE
,
896 pci_config_size(pbdev
->pdev
), sizeof(table
));
897 pba
= pci_host_config_read_common(pbdev
->pdev
, pos
+ PCI_MSIX_PBA
,
898 pci_config_size(pbdev
->pdev
), sizeof(pba
));
900 pbdev
->msix
.table_bar
= table
& PCI_MSIX_FLAGS_BIRMASK
;
901 pbdev
->msix
.table_offset
= table
& ~PCI_MSIX_FLAGS_BIRMASK
;
902 pbdev
->msix
.pba_bar
= pba
& PCI_MSIX_FLAGS_BIRMASK
;
903 pbdev
->msix
.pba_offset
= pba
& ~PCI_MSIX_FLAGS_BIRMASK
;
904 pbdev
->msix
.entries
= (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
906 name
= g_strdup_printf("msix-s390-%04x", pbdev
->uid
);
907 memory_region_init_io(&pbdev
->msix_notify_mr
, OBJECT(pbdev
),
908 &s390_msi_ctrl_ops
, pbdev
, name
, TARGET_PAGE_SIZE
);
909 memory_region_add_subregion(&pbdev
->iommu
->mr
,
910 pbdev
->pci_group
->zpci_group
.msia
,
911 &pbdev
->msix_notify_mr
);
917 static void s390_pci_msix_free(S390PCIBusDevice
*pbdev
)
919 if (pbdev
->msix
.entries
== 0) {
923 memory_region_del_subregion(&pbdev
->iommu
->mr
, &pbdev
->msix_notify_mr
);
924 object_unparent(OBJECT(&pbdev
->msix_notify_mr
));
927 static S390PCIBusDevice
*s390_pci_device_new(S390pciState
*s
,
928 const char *target
, Error
**errp
)
930 Error
*local_err
= NULL
;
933 dev
= qdev_try_new(TYPE_S390_PCI_DEVICE
);
935 error_setg(errp
, "zPCI device could not be created");
939 if (!object_property_set_str(OBJECT(dev
), "target", target
, &local_err
)) {
940 object_unparent(OBJECT(dev
));
941 error_propagate_prepend(errp
, local_err
,
942 "zPCI device could not be created: ");
945 if (!qdev_realize_and_unref(dev
, BUS(s
->bus
), &local_err
)) {
946 object_unparent(OBJECT(dev
));
947 error_propagate_prepend(errp
, local_err
,
948 "zPCI device could not be created: ");
952 return S390_PCI_DEVICE(dev
);
955 static bool s390_pci_alloc_idx(S390pciState
*s
, S390PCIBusDevice
*pbdev
)
960 while (s390_pci_find_dev_by_idx(s
, idx
)) {
961 idx
= (idx
+ 1) & FH_MASK_INDEX
;
962 if (idx
== s
->next_idx
) {
971 static void s390_pcihost_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
974 S390pciState
*s
= S390_PCI_HOST_BRIDGE(hotplug_dev
);
976 if (!s390_has_feat(S390_FEAT_ZPCI
)) {
977 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU "
978 "feature enabled; the guest will not be able to see/use "
982 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
983 PCIDevice
*pdev
= PCI_DEVICE(dev
);
985 if (pdev
->cap_present
& QEMU_PCI_CAP_MULTIFUNCTION
) {
986 error_setg(errp
, "multifunction not supported in s390");
989 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_S390_PCI_DEVICE
)) {
990 S390PCIBusDevice
*pbdev
= S390_PCI_DEVICE(dev
);
992 if (!s390_pci_alloc_idx(s
, pbdev
)) {
993 error_setg(errp
, "no slot for plugging zpci device");
999 static void s390_pci_update_subordinate(PCIDevice
*dev
, uint32_t nr
)
1003 pci_default_write_config(dev
, PCI_SUBORDINATE_BUS
, nr
, 1);
1004 while (!pci_bus_is_root(pci_get_bus(dev
))) {
1005 dev
= pci_get_bus(dev
)->parent_dev
;
1007 old_nr
= pci_default_read_config(dev
, PCI_SUBORDINATE_BUS
, 1);
1009 pci_default_write_config(dev
, PCI_SUBORDINATE_BUS
, nr
, 1);
1014 static int s390_pci_interp_plug(S390pciState
*s
, S390PCIBusDevice
*pbdev
)
1018 if (!s390_pci_get_host_fh(pbdev
, &fh
)) {
1023 * The host device is already in an enabled state, but we always present
1024 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED).
1025 * Therefore, mask off the enable bit from the passthrough handle until
1026 * the guest issues a CLP SET PCI FN later to enable the device.
1028 pbdev
->fh
= fh
& ~FH_MASK_ENABLE
;
1030 /* Next, see if the idx is already in-use */
1031 idx
= pbdev
->fh
& FH_MASK_INDEX
;
1032 if (pbdev
->idx
!= idx
) {
1033 if (s390_pci_find_dev_by_idx(s
, idx
)) {
1037 * Update the idx entry with the passed through idx
1038 * If the relinquished idx is lower than next_idx, use it
1039 * to replace next_idx
1041 g_hash_table_remove(s
->zpci_table
, &pbdev
->idx
);
1042 if (idx
< s
->next_idx
) {
1046 g_hash_table_insert(s
->zpci_table
, &pbdev
->idx
, pbdev
);
1052 static void s390_pcihost_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
1055 S390pciState
*s
= S390_PCI_HOST_BRIDGE(hotplug_dev
);
1056 PCIDevice
*pdev
= NULL
;
1057 S390PCIBusDevice
*pbdev
= NULL
;
1060 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_BRIDGE
)) {
1061 PCIBridge
*pb
= PCI_BRIDGE(dev
);
1063 pdev
= PCI_DEVICE(dev
);
1064 pci_bridge_map_irq(pb
, dev
->id
, s390_pci_map_irq
);
1065 pci_setup_iommu(&pb
->sec_bus
, &s390_iommu_ops
, s
);
1067 qbus_set_hotplug_handler(BUS(&pb
->sec_bus
), OBJECT(s
));
1069 if (dev
->hotplugged
) {
1070 pci_default_write_config(pdev
, PCI_PRIMARY_BUS
,
1071 pci_dev_bus_num(pdev
), 1);
1073 pci_default_write_config(pdev
, PCI_SECONDARY_BUS
, s
->bus_no
, 1);
1075 s390_pci_update_subordinate(pdev
, s
->bus_no
);
1077 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
1078 pdev
= PCI_DEVICE(dev
);
1081 /* In the case the PCI device does not define an id */
1082 /* we generate one based on the PCI address */
1083 dev
->id
= g_strdup_printf("auto_%02x:%02x.%01x",
1084 pci_dev_bus_num(pdev
),
1085 PCI_SLOT(pdev
->devfn
),
1086 PCI_FUNC(pdev
->devfn
));
1089 pbdev
= s390_pci_find_dev_by_target(s
, dev
->id
);
1091 pbdev
= s390_pci_device_new(s
, dev
->id
, errp
);
1098 pbdev
->iommu
= s390_pci_get_iommu(s
, pci_get_bus(pdev
), pdev
->devfn
);
1099 pbdev
->iommu
->pbdev
= pbdev
;
1100 pbdev
->state
= ZPCI_FS_DISABLED
;
1101 set_pbdev_info(pbdev
);
1103 if (object_dynamic_cast(OBJECT(dev
), "vfio-pci")) {
1105 * By default, interpretation is always requested; if the available
1106 * facilities indicate it is not available, fallback to the
1107 * interception model.
1109 if (pbdev
->interp
) {
1110 if (s390_pci_kvm_interp_allowed()) {
1111 rc
= s390_pci_interp_plug(s
, pbdev
);
1113 error_setg(errp
, "Plug failed for zPCI device in "
1114 "interpretation mode: %d", rc
);
1118 trace_s390_pcihost("zPCI interpretation missing");
1119 pbdev
->interp
= false;
1120 pbdev
->forwarding_assist
= false;
1123 pbdev
->iommu
->dma_limit
= s390_pci_start_dma_count(s
, pbdev
);
1124 /* Fill in CLP information passed via the vfio region */
1125 s390_pci_get_clp_info(pbdev
);
1126 if (!pbdev
->interp
) {
1127 /* Do vfio passthrough but intercept for I/O */
1128 pbdev
->fh
|= FH_SHM_VFIO
;
1129 pbdev
->forwarding_assist
= false;
1131 /* Register shutdown notifier and reset callback for ISM devices */
1132 if (pbdev
->pft
== ZPCI_PFT_ISM
) {
1133 pbdev
->shutdown_notifier
.notify
= s390_pci_shutdown_notifier
;
1134 qemu_register_shutdown_notifier(&pbdev
->shutdown_notifier
);
1135 qemu_register_reset(s390_pci_reset_cb
, pbdev
);
1138 pbdev
->fh
|= FH_SHM_EMUL
;
1139 /* Always intercept emulated devices */
1140 pbdev
->interp
= false;
1141 pbdev
->forwarding_assist
= false;
1144 if (s390_pci_msix_init(pbdev
) && !pbdev
->interp
) {
1145 error_setg(errp
, "MSI-X support is mandatory "
1146 "in the S390 architecture");
1150 if (dev
->hotplugged
) {
1151 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED
,
1152 pbdev
->fh
, pbdev
->fid
);
1154 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_S390_PCI_DEVICE
)) {
1155 pbdev
= S390_PCI_DEVICE(dev
);
1157 /* the allocated idx is actually getting used */
1158 s
->next_idx
= (pbdev
->idx
+ 1) & FH_MASK_INDEX
;
1159 pbdev
->fh
= pbdev
->idx
;
1160 QTAILQ_INSERT_TAIL(&s
->zpci_devs
, pbdev
, link
);
1161 g_hash_table_insert(s
->zpci_table
, &pbdev
->idx
, pbdev
);
1163 g_assert_not_reached();
1167 static void s390_pcihost_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
1170 S390pciState
*s
= S390_PCI_HOST_BRIDGE(hotplug_dev
);
1171 S390PCIBusDevice
*pbdev
= NULL
;
1173 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
1174 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
1178 pbdev
= s390_pci_find_dev_by_pci(s
, PCI_DEVICE(dev
));
1181 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED
,
1182 pbdev
->fh
, pbdev
->fid
);
1183 bus
= pci_get_bus(pci_dev
);
1184 devfn
= pci_dev
->devfn
;
1185 qdev_unrealize(dev
);
1187 s390_pci_msix_free(pbdev
);
1188 s390_pci_iommu_free(s
, bus
, devfn
);
1190 pbdev
->state
= ZPCI_FS_RESERVED
;
1191 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_S390_PCI_DEVICE
)) {
1192 pbdev
= S390_PCI_DEVICE(dev
);
1194 QTAILQ_REMOVE(&s
->zpci_devs
, pbdev
, link
);
1195 g_hash_table_remove(s
->zpci_table
, &pbdev
->idx
);
1196 if (pbdev
->iommu
->dma_limit
) {
1197 s390_pci_end_dma_count(s
, pbdev
->iommu
->dma_limit
);
1199 qdev_unrealize(dev
);
1203 static void s390_pcihost_unplug_request(HotplugHandler
*hotplug_dev
,
1207 S390pciState
*s
= S390_PCI_HOST_BRIDGE(hotplug_dev
);
1208 S390PCIBusDevice
*pbdev
;
1210 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_BRIDGE
)) {
1211 error_setg(errp
, "PCI bridge hot unplug currently not supported");
1212 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
1214 * Redirect the unplug request to the zPCI device and remember that
1215 * we've checked the PCI device already (to prevent endless recursion).
1217 pbdev
= s390_pci_find_dev_by_pci(s
, PCI_DEVICE(dev
));
1219 pbdev
->pci_unplug_request_processed
= true;
1220 qdev_unplug(DEVICE(pbdev
), errp
);
1221 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_S390_PCI_DEVICE
)) {
1222 pbdev
= S390_PCI_DEVICE(dev
);
1225 * If unplug was initially requested for the zPCI device, we
1226 * first have to redirect to the PCI device, which will in return
1227 * redirect back to us after performing its checks (if the request
1228 * is not blocked, e.g. because it's a PCI bridge).
1230 if (pbdev
->pdev
&& !pbdev
->pci_unplug_request_processed
) {
1231 qdev_unplug(DEVICE(pbdev
->pdev
), errp
);
1234 pbdev
->pci_unplug_request_processed
= false;
1236 switch (pbdev
->state
) {
1237 case ZPCI_FS_STANDBY
:
1238 case ZPCI_FS_RESERVED
:
1239 s390_pci_perform_unplug(pbdev
);
1243 * Allow to send multiple requests, e.g. if the guest crashed
1244 * before releasing the device, we would not be able to send
1245 * another request to the same VM (e.g. fresh OS).
1247 pbdev
->unplug_requested
= true;
1248 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST
,
1249 pbdev
->fh
, pbdev
->fid
);
1252 g_assert_not_reached();
1256 static void s390_pci_enumerate_bridge(PCIBus
*bus
, PCIDevice
*pdev
,
1259 S390pciState
*s
= opaque
;
1260 PCIBus
*sec_bus
= NULL
;
1262 if ((pci_default_read_config(pdev
, PCI_HEADER_TYPE
, 1) !=
1263 PCI_HEADER_TYPE_BRIDGE
)) {
1268 pci_default_write_config(pdev
, PCI_PRIMARY_BUS
, pci_dev_bus_num(pdev
), 1);
1269 pci_default_write_config(pdev
, PCI_SECONDARY_BUS
, s
->bus_no
, 1);
1270 pci_default_write_config(pdev
, PCI_SUBORDINATE_BUS
, s
->bus_no
, 1);
1272 sec_bus
= pci_bridge_get_sec_bus(PCI_BRIDGE(pdev
));
1277 /* Assign numbers to all child bridges. The last is the highest number. */
1278 pci_for_each_device_under_bus(sec_bus
, s390_pci_enumerate_bridge
, s
);
1279 pci_default_write_config(pdev
, PCI_SUBORDINATE_BUS
, s
->bus_no
, 1);
1282 static void s390_pcihost_reset(DeviceState
*dev
)
1284 S390pciState
*s
= S390_PCI_HOST_BRIDGE(dev
);
1285 PCIBus
*bus
= s
->parent_obj
.bus
;
1286 S390PCIBusDevice
*pbdev
, *next
;
1288 /* Process all pending unplug requests */
1289 QTAILQ_FOREACH_SAFE(pbdev
, &s
->zpci_devs
, link
, next
) {
1290 if (pbdev
->unplug_requested
) {
1291 if (pbdev
->interp
&& (pbdev
->fh
& FH_MASK_ENABLE
)) {
1292 /* Interpreted devices were using interrupt forwarding */
1293 s390_pci_kvm_aif_disable(pbdev
);
1294 } else if (pbdev
->summary_ind
) {
1295 pci_dereg_irqs(pbdev
);
1297 if (pbdev
->iommu
->enabled
) {
1298 pci_dereg_ioat(pbdev
->iommu
);
1300 pbdev
->state
= ZPCI_FS_STANDBY
;
1301 s390_pci_perform_unplug(pbdev
);
1306 * When resetting a PCI bridge, the assigned numbers are set to 0. So
1307 * on every system reset, we also have to reassign numbers.
1310 pci_for_each_device_under_bus(bus
, s390_pci_enumerate_bridge
, s
);
1313 static void s390_pcihost_class_init(ObjectClass
*klass
, void *data
)
1315 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1316 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(klass
);
1318 dc
->reset
= s390_pcihost_reset
;
1319 dc
->realize
= s390_pcihost_realize
;
1320 dc
->unrealize
= s390_pcihost_unrealize
;
1321 hc
->pre_plug
= s390_pcihost_pre_plug
;
1322 hc
->plug
= s390_pcihost_plug
;
1323 hc
->unplug_request
= s390_pcihost_unplug_request
;
1324 hc
->unplug
= s390_pcihost_unplug
;
1325 msi_nonbroken
= true;
1328 static const TypeInfo s390_pcihost_info
= {
1329 .name
= TYPE_S390_PCI_HOST_BRIDGE
,
1330 .parent
= TYPE_PCI_HOST_BRIDGE
,
1331 .instance_size
= sizeof(S390pciState
),
1332 .class_init
= s390_pcihost_class_init
,
1333 .interfaces
= (InterfaceInfo
[]) {
1334 { TYPE_HOTPLUG_HANDLER
},
1339 static const TypeInfo s390_pcibus_info
= {
1340 .name
= TYPE_S390_PCI_BUS
,
1342 .instance_size
= sizeof(S390PCIBus
),
1345 static uint16_t s390_pci_generate_uid(S390pciState
*s
)
1351 if (!s390_pci_find_dev_by_uid(s
, uid
)) {
1354 } while (uid
< ZPCI_MAX_UID
);
1356 return UID_UNDEFINED
;
1359 static uint32_t s390_pci_generate_fid(S390pciState
*s
, Error
**errp
)
1364 if (!s390_pci_find_dev_by_fid(s
, fid
)) {
1367 } while (fid
++ != ZPCI_MAX_FID
);
1369 error_setg(errp
, "no free fid could be found");
1373 static void s390_pci_device_realize(DeviceState
*dev
, Error
**errp
)
1375 S390PCIBusDevice
*zpci
= S390_PCI_DEVICE(dev
);
1376 S390pciState
*s
= s390_get_phb();
1378 if (!zpci
->target
) {
1379 error_setg(errp
, "target must be defined");
1383 if (s390_pci_find_dev_by_target(s
, zpci
->target
)) {
1384 error_setg(errp
, "target %s already has an associated zpci device",
1389 if (zpci
->uid
== UID_UNDEFINED
) {
1390 zpci
->uid
= s390_pci_generate_uid(s
);
1392 error_setg(errp
, "no free uid could be found");
1395 } else if (s390_pci_find_dev_by_uid(s
, zpci
->uid
)) {
1396 error_setg(errp
, "uid %u already in use", zpci
->uid
);
1400 if (!zpci
->fid_defined
) {
1401 Error
*local_error
= NULL
;
1403 zpci
->fid
= s390_pci_generate_fid(s
, &local_error
);
1405 error_propagate(errp
, local_error
);
1408 } else if (s390_pci_find_dev_by_fid(s
, zpci
->fid
)) {
1409 error_setg(errp
, "fid %u already in use", zpci
->fid
);
1413 zpci
->state
= ZPCI_FS_RESERVED
;
1414 zpci
->fmb
.format
= ZPCI_FMB_FORMAT
;
1417 static void s390_pci_device_reset(DeviceState
*dev
)
1419 S390PCIBusDevice
*pbdev
= S390_PCI_DEVICE(dev
);
1421 switch (pbdev
->state
) {
1422 case ZPCI_FS_RESERVED
:
1424 case ZPCI_FS_STANDBY
:
1427 pbdev
->fh
&= ~FH_MASK_ENABLE
;
1428 pbdev
->state
= ZPCI_FS_DISABLED
;
1432 if (pbdev
->interp
&& (pbdev
->fh
& FH_MASK_ENABLE
)) {
1433 /* Interpreted devices were using interrupt forwarding */
1434 s390_pci_kvm_aif_disable(pbdev
);
1435 } else if (pbdev
->summary_ind
) {
1436 pci_dereg_irqs(pbdev
);
1438 if (pbdev
->iommu
->enabled
) {
1439 pci_dereg_ioat(pbdev
->iommu
);
1442 fmb_timer_free(pbdev
);
1445 static void s390_pci_get_fid(Object
*obj
, Visitor
*v
, const char *name
,
1446 void *opaque
, Error
**errp
)
1448 Property
*prop
= opaque
;
1449 uint32_t *ptr
= object_field_prop_ptr(obj
, prop
);
1451 visit_type_uint32(v
, name
, ptr
, errp
);
1454 static void s390_pci_set_fid(Object
*obj
, Visitor
*v
, const char *name
,
1455 void *opaque
, Error
**errp
)
1457 S390PCIBusDevice
*zpci
= S390_PCI_DEVICE(obj
);
1458 Property
*prop
= opaque
;
1459 uint32_t *ptr
= object_field_prop_ptr(obj
, prop
);
1461 if (!visit_type_uint32(v
, name
, ptr
, errp
)) {
1464 zpci
->fid_defined
= true;
1467 static const PropertyInfo s390_pci_fid_propinfo
= {
1469 .get
= s390_pci_get_fid
,
1470 .set
= s390_pci_set_fid
,
1473 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \
1474 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t)
1476 static Property s390_pci_device_properties
[] = {
1477 DEFINE_PROP_UINT16("uid", S390PCIBusDevice
, uid
, UID_UNDEFINED
),
1478 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice
, fid
),
1479 DEFINE_PROP_STRING("target", S390PCIBusDevice
, target
),
1480 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice
, interp
, true),
1481 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice
, forwarding_assist
,
1483 DEFINE_PROP_END_OF_LIST(),
1486 static const VMStateDescription s390_pci_device_vmstate
= {
1487 .name
= TYPE_S390_PCI_DEVICE
,
1489 * TODO: add state handling here, so migration works at least with
1490 * emulated pci devices on s390x
1495 static void s390_pci_device_class_init(ObjectClass
*klass
, void *data
)
1497 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1499 dc
->desc
= "zpci device";
1500 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
1501 dc
->reset
= s390_pci_device_reset
;
1502 dc
->bus_type
= TYPE_S390_PCI_BUS
;
1503 dc
->realize
= s390_pci_device_realize
;
1504 device_class_set_props(dc
, s390_pci_device_properties
);
1505 dc
->vmsd
= &s390_pci_device_vmstate
;
1508 static const TypeInfo s390_pci_device_info
= {
1509 .name
= TYPE_S390_PCI_DEVICE
,
1510 .parent
= TYPE_DEVICE
,
1511 .instance_size
= sizeof(S390PCIBusDevice
),
1512 .class_init
= s390_pci_device_class_init
,
1515 static const TypeInfo s390_pci_iommu_info
= {
1516 .name
= TYPE_S390_PCI_IOMMU
,
1517 .parent
= TYPE_OBJECT
,
1518 .instance_size
= sizeof(S390PCIIOMMU
),
1521 static void s390_iommu_memory_region_class_init(ObjectClass
*klass
, void *data
)
1523 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1525 imrc
->translate
= s390_translate_iommu
;
1526 imrc
->replay
= s390_pci_iommu_replay
;
1529 static const TypeInfo s390_iommu_memory_region_info
= {
1530 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1531 .name
= TYPE_S390_IOMMU_MEMORY_REGION
,
1532 .class_init
= s390_iommu_memory_region_class_init
,
1535 static void s390_pci_register_types(void)
1537 type_register_static(&s390_pcihost_info
);
1538 type_register_static(&s390_pcibus_info
);
1539 type_register_static(&s390_pci_device_info
);
1540 type_register_static(&s390_pci_iommu_info
);
1541 type_register_static(&s390_iommu_memory_region_info
);
1544 type_init(s390_pci_register_types
)