]> git.proxmox.com Git - mirror_qemu.git/blob - hw/s390x/s390-pci-bus.c
s390x/pci: enable adapter event notification for interpreted devices
[mirror_qemu.git] / hw / s390x / s390-pci-bus.c
1 /*
2 * s390 PCI BUS
3 *
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qapi/visitor.h"
17 #include "hw/s390x/s390-pci-bus.h"
18 #include "hw/s390x/s390-pci-inst.h"
19 #include "hw/s390x/s390-pci-kvm.h"
20 #include "hw/s390x/s390-pci-vfio.h"
21 #include "hw/pci/pci_bus.h"
22 #include "hw/qdev-properties.h"
23 #include "hw/pci/pci_bridge.h"
24 #include "hw/pci/msi.h"
25 #include "qemu/error-report.h"
26 #include "qemu/module.h"
27
28 #ifndef DEBUG_S390PCI_BUS
29 #define DEBUG_S390PCI_BUS 0
30 #endif
31
32 #define DPRINTF(fmt, ...) \
33 do { \
34 if (DEBUG_S390PCI_BUS) { \
35 fprintf(stderr, "S390pci-bus: " fmt, ## __VA_ARGS__); \
36 } \
37 } while (0)
38
39 S390pciState *s390_get_phb(void)
40 {
41 static S390pciState *phb;
42
43 if (!phb) {
44 phb = S390_PCI_HOST_BRIDGE(
45 object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
46 assert(phb != NULL);
47 }
48
49 return phb;
50 }
51
52 int pci_chsc_sei_nt2_get_event(void *res)
53 {
54 ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
55 PciCcdfAvail *accdf;
56 PciCcdfErr *eccdf;
57 int rc = 1;
58 SeiContainer *sei_cont;
59 S390pciState *s = s390_get_phb();
60
61 sei_cont = QTAILQ_FIRST(&s->pending_sei);
62 if (sei_cont) {
63 QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
64 nt2_res->nt = 2;
65 nt2_res->cc = sei_cont->cc;
66 nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res));
67 switch (sei_cont->cc) {
68 case 1: /* error event */
69 eccdf = (PciCcdfErr *)nt2_res->ccdf;
70 eccdf->fid = cpu_to_be32(sei_cont->fid);
71 eccdf->fh = cpu_to_be32(sei_cont->fh);
72 eccdf->e = cpu_to_be32(sei_cont->e);
73 eccdf->faddr = cpu_to_be64(sei_cont->faddr);
74 eccdf->pec = cpu_to_be16(sei_cont->pec);
75 break;
76 case 2: /* availability event */
77 accdf = (PciCcdfAvail *)nt2_res->ccdf;
78 accdf->fid = cpu_to_be32(sei_cont->fid);
79 accdf->fh = cpu_to_be32(sei_cont->fh);
80 accdf->pec = cpu_to_be16(sei_cont->pec);
81 break;
82 default:
83 abort();
84 }
85 g_free(sei_cont);
86 rc = 0;
87 }
88
89 return rc;
90 }
91
92 int pci_chsc_sei_nt2_have_event(void)
93 {
94 S390pciState *s = s390_get_phb();
95
96 return !QTAILQ_EMPTY(&s->pending_sei);
97 }
98
99 S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
100 S390PCIBusDevice *pbdev)
101 {
102 S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) :
103 QTAILQ_FIRST(&s->zpci_devs);
104
105 while (ret && ret->state == ZPCI_FS_RESERVED) {
106 ret = QTAILQ_NEXT(ret, link);
107 }
108
109 return ret;
110 }
111
112 S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid)
113 {
114 S390PCIBusDevice *pbdev;
115
116 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
117 if (pbdev->fid == fid) {
118 return pbdev;
119 }
120 }
121
122 return NULL;
123 }
124
125 void s390_pci_sclp_configure(SCCB *sccb)
126 {
127 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
128 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
129 be32_to_cpu(psccb->aid));
130 uint16_t rc;
131
132 if (!pbdev) {
133 DPRINTF("sclp config no dev found\n");
134 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
135 goto out;
136 }
137
138 switch (pbdev->state) {
139 case ZPCI_FS_RESERVED:
140 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
141 break;
142 case ZPCI_FS_STANDBY:
143 pbdev->state = ZPCI_FS_DISABLED;
144 rc = SCLP_RC_NORMAL_COMPLETION;
145 break;
146 default:
147 rc = SCLP_RC_NO_ACTION_REQUIRED;
148 }
149 out:
150 psccb->header.response_code = cpu_to_be16(rc);
151 }
152
153 static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev)
154 {
155 HotplugHandler *hotplug_ctrl;
156
157 /* Unplug the PCI device */
158 if (pbdev->pdev) {
159 DeviceState *pdev = DEVICE(pbdev->pdev);
160
161 hotplug_ctrl = qdev_get_hotplug_handler(pdev);
162 hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort);
163 object_unparent(OBJECT(pdev));
164 }
165
166 /* Unplug the zPCI device */
167 hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev));
168 hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort);
169 object_unparent(OBJECT(pbdev));
170 }
171
172 void s390_pci_sclp_deconfigure(SCCB *sccb)
173 {
174 IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
175 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
176 be32_to_cpu(psccb->aid));
177 uint16_t rc;
178
179 if (!pbdev) {
180 DPRINTF("sclp deconfig no dev found\n");
181 rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
182 goto out;
183 }
184
185 switch (pbdev->state) {
186 case ZPCI_FS_RESERVED:
187 rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
188 break;
189 case ZPCI_FS_STANDBY:
190 rc = SCLP_RC_NO_ACTION_REQUIRED;
191 break;
192 default:
193 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
194 /* Interpreted devices were using interrupt forwarding */
195 s390_pci_kvm_aif_disable(pbdev);
196 } else if (pbdev->summary_ind) {
197 pci_dereg_irqs(pbdev);
198 }
199 if (pbdev->iommu->enabled) {
200 pci_dereg_ioat(pbdev->iommu);
201 }
202 pbdev->state = ZPCI_FS_STANDBY;
203 rc = SCLP_RC_NORMAL_COMPLETION;
204
205 if (pbdev->unplug_requested) {
206 s390_pci_perform_unplug(pbdev);
207 }
208 }
209 out:
210 psccb->header.response_code = cpu_to_be16(rc);
211 }
212
213 static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid)
214 {
215 S390PCIBusDevice *pbdev;
216
217 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
218 if (pbdev->uid == uid) {
219 return pbdev;
220 }
221 }
222
223 return NULL;
224 }
225
226 S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
227 const char *target)
228 {
229 S390PCIBusDevice *pbdev;
230
231 if (!target) {
232 return NULL;
233 }
234
235 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
236 if (!strcmp(pbdev->target, target)) {
237 return pbdev;
238 }
239 }
240
241 return NULL;
242 }
243
244 static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s,
245 PCIDevice *pci_dev)
246 {
247 S390PCIBusDevice *pbdev;
248
249 if (!pci_dev) {
250 return NULL;
251 }
252
253 QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
254 if (pbdev->pdev == pci_dev) {
255 return pbdev;
256 }
257 }
258
259 return NULL;
260 }
261
262 S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx)
263 {
264 return g_hash_table_lookup(s->zpci_table, &idx);
265 }
266
267 S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh)
268 {
269 uint32_t idx = FH_MASK_INDEX & fh;
270 S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx);
271
272 if (pbdev && pbdev->fh == fh) {
273 return pbdev;
274 }
275
276 return NULL;
277 }
278
279 static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh,
280 uint32_t fid, uint64_t faddr, uint32_t e)
281 {
282 SeiContainer *sei_cont;
283 S390pciState *s = s390_get_phb();
284
285 sei_cont = g_new0(SeiContainer, 1);
286 sei_cont->fh = fh;
287 sei_cont->fid = fid;
288 sei_cont->cc = cc;
289 sei_cont->pec = pec;
290 sei_cont->faddr = faddr;
291 sei_cont->e = e;
292
293 QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link);
294 css_generate_css_crws(0);
295 }
296
297 static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh,
298 uint32_t fid)
299 {
300 s390_pci_generate_event(2, pec, fh, fid, 0, 0);
301 }
302
303 void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
304 uint64_t faddr, uint32_t e)
305 {
306 s390_pci_generate_event(1, pec, fh, fid, faddr, e);
307 }
308
309 static void s390_pci_set_irq(void *opaque, int irq, int level)
310 {
311 /* nothing to do */
312 }
313
314 static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num)
315 {
316 /* nothing to do */
317 return 0;
318 }
319
320 static uint64_t s390_pci_get_table_origin(uint64_t iota)
321 {
322 return iota & ~ZPCI_IOTA_RTTO_FLAG;
323 }
324
325 static unsigned int calc_rtx(dma_addr_t ptr)
326 {
327 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
328 }
329
330 static unsigned int calc_sx(dma_addr_t ptr)
331 {
332 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
333 }
334
335 static unsigned int calc_px(dma_addr_t ptr)
336 {
337 return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK;
338 }
339
340 static uint64_t get_rt_sto(uint64_t entry)
341 {
342 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
343 ? (entry & ZPCI_RTE_ADDR_MASK)
344 : 0;
345 }
346
347 static uint64_t get_st_pto(uint64_t entry)
348 {
349 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
350 ? (entry & ZPCI_STE_ADDR_MASK)
351 : 0;
352 }
353
354 static bool rt_entry_isvalid(uint64_t entry)
355 {
356 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
357 }
358
359 static bool pt_entry_isvalid(uint64_t entry)
360 {
361 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
362 }
363
364 static bool entry_isprotected(uint64_t entry)
365 {
366 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
367 }
368
369 /* ett is expected table type, -1 page table, 0 segment table, 1 region table */
370 static uint64_t get_table_index(uint64_t iova, int8_t ett)
371 {
372 switch (ett) {
373 case ZPCI_ETT_PT:
374 return calc_px(iova);
375 case ZPCI_ETT_ST:
376 return calc_sx(iova);
377 case ZPCI_ETT_RT:
378 return calc_rtx(iova);
379 }
380
381 return -1;
382 }
383
384 static bool entry_isvalid(uint64_t entry, int8_t ett)
385 {
386 switch (ett) {
387 case ZPCI_ETT_PT:
388 return pt_entry_isvalid(entry);
389 case ZPCI_ETT_ST:
390 case ZPCI_ETT_RT:
391 return rt_entry_isvalid(entry);
392 }
393
394 return false;
395 }
396
397 /* Return true if address translation is done */
398 static bool translate_iscomplete(uint64_t entry, int8_t ett)
399 {
400 switch (ett) {
401 case 0:
402 return (entry & ZPCI_TABLE_FC) ? true : false;
403 case 1:
404 return false;
405 }
406
407 return true;
408 }
409
410 static uint64_t get_frame_size(int8_t ett)
411 {
412 switch (ett) {
413 case ZPCI_ETT_PT:
414 return 1ULL << 12;
415 case ZPCI_ETT_ST:
416 return 1ULL << 20;
417 case ZPCI_ETT_RT:
418 return 1ULL << 31;
419 }
420
421 return 0;
422 }
423
424 static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
425 {
426 switch (ett) {
427 case ZPCI_ETT_PT:
428 return entry & ZPCI_PTE_ADDR_MASK;
429 case ZPCI_ETT_ST:
430 return get_st_pto(entry);
431 case ZPCI_ETT_RT:
432 return get_rt_sto(entry);
433 }
434
435 return 0;
436 }
437
438 /**
439 * table_translate: do translation within one table and return the following
440 * table origin
441 *
442 * @entry: the entry being translated, the result is stored in this.
443 * @to: the address of table origin.
444 * @ett: expected table type, 1 region table, 0 segment table and -1 page table.
445 * @error: error code
446 */
447 static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett,
448 uint16_t *error)
449 {
450 uint64_t tx, te, nto = 0;
451 uint16_t err = 0;
452
453 tx = get_table_index(entry->iova, ett);
454 te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t),
455 MEMTXATTRS_UNSPECIFIED, NULL);
456
457 if (!te) {
458 err = ERR_EVENT_INVALTE;
459 goto out;
460 }
461
462 if (!entry_isvalid(te, ett)) {
463 entry->perm &= IOMMU_NONE;
464 goto out;
465 }
466
467 if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX
468 || te & ZPCI_TABLE_OFFSET_MASK)) {
469 err = ERR_EVENT_INVALTL;
470 goto out;
471 }
472
473 nto = get_next_table_origin(te, ett);
474 if (!nto) {
475 err = ERR_EVENT_TT;
476 goto out;
477 }
478
479 if (entry_isprotected(te)) {
480 entry->perm &= IOMMU_RO;
481 } else {
482 entry->perm &= IOMMU_RW;
483 }
484
485 if (translate_iscomplete(te, ett)) {
486 switch (ett) {
487 case ZPCI_ETT_PT:
488 entry->translated_addr = te & ZPCI_PTE_ADDR_MASK;
489 break;
490 case ZPCI_ETT_ST:
491 entry->translated_addr = (te & ZPCI_SFAA_MASK) |
492 (entry->iova & ~ZPCI_SFAA_MASK);
493 break;
494 }
495 nto = 0;
496 }
497 out:
498 if (err) {
499 entry->perm = IOMMU_NONE;
500 *error = err;
501 }
502 entry->len = get_frame_size(ett);
503 return nto;
504 }
505
506 uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
507 S390IOTLBEntry *entry)
508 {
509 uint64_t to = s390_pci_get_table_origin(g_iota);
510 int8_t ett = 1;
511 uint16_t error = 0;
512
513 entry->iova = addr & TARGET_PAGE_MASK;
514 entry->translated_addr = 0;
515 entry->perm = IOMMU_RW;
516
517 if (entry_isprotected(g_iota)) {
518 entry->perm &= IOMMU_RO;
519 }
520
521 while (to) {
522 to = table_translate(entry, to, ett--, &error);
523 }
524
525 return error;
526 }
527
528 static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
529 IOMMUAccessFlags flag, int iommu_idx)
530 {
531 S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
532 S390IOTLBEntry *entry;
533 uint64_t iova = addr & TARGET_PAGE_MASK;
534 uint16_t error = 0;
535 IOMMUTLBEntry ret = {
536 .target_as = &address_space_memory,
537 .iova = 0,
538 .translated_addr = 0,
539 .addr_mask = ~(hwaddr)0,
540 .perm = IOMMU_NONE,
541 };
542
543 switch (iommu->pbdev->state) {
544 case ZPCI_FS_ENABLED:
545 case ZPCI_FS_BLOCKED:
546 if (!iommu->enabled) {
547 return ret;
548 }
549 break;
550 default:
551 return ret;
552 }
553
554 DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
555
556 if (addr < iommu->pba || addr > iommu->pal) {
557 error = ERR_EVENT_OORANGE;
558 goto err;
559 }
560
561 entry = g_hash_table_lookup(iommu->iotlb, &iova);
562 if (entry) {
563 ret.iova = entry->iova;
564 ret.translated_addr = entry->translated_addr;
565 ret.addr_mask = entry->len - 1;
566 ret.perm = entry->perm;
567 } else {
568 ret.iova = iova;
569 ret.addr_mask = ~TARGET_PAGE_MASK;
570 ret.perm = IOMMU_NONE;
571 }
572
573 if (flag != IOMMU_NONE && !(flag & ret.perm)) {
574 error = ERR_EVENT_TPROTE;
575 }
576 err:
577 if (error) {
578 iommu->pbdev->state = ZPCI_FS_ERROR;
579 s390_pci_generate_error_event(error, iommu->pbdev->fh,
580 iommu->pbdev->fid, addr, 0);
581 }
582 return ret;
583 }
584
585 static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu,
586 IOMMUNotifier *notifier)
587 {
588 /* It's impossible to plug a pci device on s390x that already has iommu
589 * mappings which need to be replayed, that is due to the "one iommu per
590 * zpci device" construct. But when we support migration of vfio-pci
591 * devices in future, we need to revisit this.
592 */
593 return;
594 }
595
596 static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
597 int devfn)
598 {
599 uint64_t key = (uintptr_t)bus;
600 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
601 S390PCIIOMMU *iommu;
602
603 if (!table) {
604 table = g_new0(S390PCIIOMMUTable, 1);
605 table->key = key;
606 g_hash_table_insert(s->iommu_table, &table->key, table);
607 }
608
609 iommu = table->iommu[PCI_SLOT(devfn)];
610 if (!iommu) {
611 iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU));
612
613 char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x",
614 pci_bus_num(bus),
615 PCI_SLOT(devfn),
616 PCI_FUNC(devfn));
617 char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x",
618 pci_bus_num(bus),
619 PCI_SLOT(devfn),
620 PCI_FUNC(devfn));
621 memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX);
622 address_space_init(&iommu->as, &iommu->mr, as_name);
623 iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal,
624 NULL, g_free);
625 table->iommu[PCI_SLOT(devfn)] = iommu;
626
627 g_free(mr_name);
628 g_free(as_name);
629 }
630
631 return iommu;
632 }
633
634 static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
635 {
636 S390pciState *s = opaque;
637 S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn);
638
639 return &iommu->as;
640 }
641
642 static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
643 {
644 uint8_t expected, actual;
645 hwaddr len = 1;
646 /* avoid multiple fetches */
647 uint8_t volatile *ind_addr;
648
649 ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
650 if (!ind_addr) {
651 s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0);
652 return -1;
653 }
654 actual = *ind_addr;
655 do {
656 expected = actual;
657 actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
658 } while (actual != expected);
659 cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
660
661 return actual;
662 }
663
664 static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
665 unsigned int size)
666 {
667 S390PCIBusDevice *pbdev = opaque;
668 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
669 uint64_t ind_bit;
670 uint32_t sum_bit;
671
672 assert(pbdev);
673 DPRINTF("write_msix data 0x%" PRIx64 " idx %d vec 0x%x\n", data,
674 pbdev->idx, vec);
675
676 if (pbdev->state != ZPCI_FS_ENABLED) {
677 return;
678 }
679
680 ind_bit = pbdev->routes.adapter.ind_offset;
681 sum_bit = pbdev->routes.adapter.summary_offset;
682
683 set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8,
684 0x80 >> ((ind_bit + vec) % 8));
685 if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
686 0x80 >> (sum_bit % 8))) {
687 css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc);
688 }
689 }
690
691 static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size)
692 {
693 return 0xffffffff;
694 }
695
696 static const MemoryRegionOps s390_msi_ctrl_ops = {
697 .write = s390_msi_ctrl_write,
698 .read = s390_msi_ctrl_read,
699 .endianness = DEVICE_LITTLE_ENDIAN,
700 };
701
702 void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
703 {
704 /*
705 * The iommu region is initialized against a 0-mapped address space,
706 * so the smallest IOMMU region we can define runs from 0 to the end
707 * of the PCI address space.
708 */
709 char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
710 memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
711 TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
712 name, iommu->pal + 1);
713 iommu->enabled = true;
714 memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
715 g_free(name);
716 }
717
718 void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
719 {
720 iommu->enabled = false;
721 g_hash_table_remove_all(iommu->iotlb);
722 memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr));
723 object_unparent(OBJECT(&iommu->iommu_mr));
724 }
725
726 static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
727 {
728 uint64_t key = (uintptr_t)bus;
729 S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
730 S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL;
731
732 if (!table || !iommu) {
733 return;
734 }
735
736 table->iommu[PCI_SLOT(devfn)] = NULL;
737 g_hash_table_destroy(iommu->iotlb);
738 /*
739 * An attached PCI device may have memory listeners, eg. VFIO PCI.
740 * The associated subregion will already have been unmapped in
741 * s390_pci_iommu_disable in response to the guest deconfigure request.
742 * Remove the listeners now before destroying the address space.
743 */
744 address_space_remove_listeners(&iommu->as);
745 address_space_destroy(&iommu->as);
746 object_unparent(OBJECT(&iommu->mr));
747 object_unparent(OBJECT(iommu));
748 object_unref(OBJECT(iommu));
749 }
750
751 S390PCIGroup *s390_group_create(int id)
752 {
753 S390PCIGroup *group;
754 S390pciState *s = s390_get_phb();
755
756 group = g_new0(S390PCIGroup, 1);
757 group->id = id;
758 QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link);
759 return group;
760 }
761
762 S390PCIGroup *s390_group_find(int id)
763 {
764 S390PCIGroup *group;
765 S390pciState *s = s390_get_phb();
766
767 QTAILQ_FOREACH(group, &s->zpci_groups, link) {
768 if (group->id == id) {
769 return group;
770 }
771 }
772 return NULL;
773 }
774
775 static void s390_pci_init_default_group(void)
776 {
777 S390PCIGroup *group;
778 ClpRspQueryPciGrp *resgrp;
779
780 group = s390_group_create(ZPCI_DEFAULT_FN_GRP);
781 resgrp = &group->zpci_group;
782 resgrp->fr = 1;
783 resgrp->dasm = 0;
784 resgrp->msia = ZPCI_MSI_ADDR;
785 resgrp->mui = DEFAULT_MUI;
786 resgrp->i = 128;
787 resgrp->maxstbl = 128;
788 resgrp->version = 0;
789 resgrp->dtsm = ZPCI_DTSM;
790 }
791
792 static void set_pbdev_info(S390PCIBusDevice *pbdev)
793 {
794 pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR;
795 pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR;
796 pbdev->zpci_fn.pchid = 0;
797 pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP;
798 pbdev->zpci_fn.fid = pbdev->fid;
799 pbdev->zpci_fn.uid = pbdev->uid;
800 pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP);
801 }
802
803 static void s390_pcihost_realize(DeviceState *dev, Error **errp)
804 {
805 PCIBus *b;
806 BusState *bus;
807 PCIHostState *phb = PCI_HOST_BRIDGE(dev);
808 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
809
810 DPRINTF("host_init\n");
811
812 b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq,
813 NULL, get_system_memory(), get_system_io(), 0,
814 64, TYPE_PCI_BUS);
815 pci_setup_iommu(b, s390_pci_dma_iommu, s);
816
817 bus = BUS(b);
818 qbus_set_hotplug_handler(bus, OBJECT(dev));
819 phb->bus = b;
820
821 s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL));
822 qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev));
823
824 s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal,
825 NULL, g_free);
826 s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL);
827 s->bus_no = 0;
828 QTAILQ_INIT(&s->pending_sei);
829 QTAILQ_INIT(&s->zpci_devs);
830 QTAILQ_INIT(&s->zpci_dma_limit);
831 QTAILQ_INIT(&s->zpci_groups);
832
833 s390_pci_init_default_group();
834 css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false,
835 S390_ADAPTER_SUPPRESSIBLE, errp);
836 }
837
838 static void s390_pcihost_unrealize(DeviceState *dev)
839 {
840 S390PCIGroup *group;
841 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
842
843 while (!QTAILQ_EMPTY(&s->zpci_groups)) {
844 group = QTAILQ_FIRST(&s->zpci_groups);
845 QTAILQ_REMOVE(&s->zpci_groups, group, link);
846 }
847 }
848
849 static int s390_pci_msix_init(S390PCIBusDevice *pbdev)
850 {
851 char *name;
852 uint8_t pos;
853 uint16_t ctrl;
854 uint32_t table, pba;
855
856 pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
857 if (!pos) {
858 return -1;
859 }
860
861 ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS,
862 pci_config_size(pbdev->pdev), sizeof(ctrl));
863 table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE,
864 pci_config_size(pbdev->pdev), sizeof(table));
865 pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA,
866 pci_config_size(pbdev->pdev), sizeof(pba));
867
868 pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
869 pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
870 pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
871 pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
872 pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
873
874 name = g_strdup_printf("msix-s390-%04x", pbdev->uid);
875 memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev),
876 &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE);
877 memory_region_add_subregion(&pbdev->iommu->mr,
878 pbdev->pci_group->zpci_group.msia,
879 &pbdev->msix_notify_mr);
880 g_free(name);
881
882 return 0;
883 }
884
885 static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
886 {
887 if (pbdev->msix.entries == 0) {
888 return;
889 }
890
891 memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr);
892 object_unparent(OBJECT(&pbdev->msix_notify_mr));
893 }
894
895 static S390PCIBusDevice *s390_pci_device_new(S390pciState *s,
896 const char *target, Error **errp)
897 {
898 Error *local_err = NULL;
899 DeviceState *dev;
900
901 dev = qdev_try_new(TYPE_S390_PCI_DEVICE);
902 if (!dev) {
903 error_setg(errp, "zPCI device could not be created");
904 return NULL;
905 }
906
907 if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) {
908 object_unparent(OBJECT(dev));
909 error_propagate_prepend(errp, local_err,
910 "zPCI device could not be created: ");
911 return NULL;
912 }
913 if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) {
914 object_unparent(OBJECT(dev));
915 error_propagate_prepend(errp, local_err,
916 "zPCI device could not be created: ");
917 return NULL;
918 }
919
920 return S390_PCI_DEVICE(dev);
921 }
922
923 static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev)
924 {
925 uint32_t idx;
926
927 idx = s->next_idx;
928 while (s390_pci_find_dev_by_idx(s, idx)) {
929 idx = (idx + 1) & FH_MASK_INDEX;
930 if (idx == s->next_idx) {
931 return false;
932 }
933 }
934
935 pbdev->idx = idx;
936 return true;
937 }
938
939 static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
940 Error **errp)
941 {
942 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
943
944 if (!s390_has_feat(S390_FEAT_ZPCI)) {
945 warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU "
946 "feature enabled; the guest will not be able to see/use "
947 "this device");
948 }
949
950 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
951 PCIDevice *pdev = PCI_DEVICE(dev);
952
953 if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
954 error_setg(errp, "multifunction not supported in s390");
955 return;
956 }
957 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
958 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
959
960 if (!s390_pci_alloc_idx(s, pbdev)) {
961 error_setg(errp, "no slot for plugging zpci device");
962 return;
963 }
964 }
965 }
966
967 static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr)
968 {
969 uint32_t old_nr;
970
971 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
972 while (!pci_bus_is_root(pci_get_bus(dev))) {
973 dev = pci_get_bus(dev)->parent_dev;
974
975 old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1);
976 if (old_nr < nr) {
977 pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
978 }
979 }
980 }
981
982 static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev)
983 {
984 uint32_t idx, fh;
985
986 if (!s390_pci_get_host_fh(pbdev, &fh)) {
987 return -EPERM;
988 }
989
990 /*
991 * The host device is already in an enabled state, but we always present
992 * the initial device state to the guest as disabled (ZPCI_FS_DISABLED).
993 * Therefore, mask off the enable bit from the passthrough handle until
994 * the guest issues a CLP SET PCI FN later to enable the device.
995 */
996 pbdev->fh = fh & ~FH_MASK_ENABLE;
997
998 /* Next, see if the idx is already in-use */
999 idx = pbdev->fh & FH_MASK_INDEX;
1000 if (pbdev->idx != idx) {
1001 if (s390_pci_find_dev_by_idx(s, idx)) {
1002 return -EINVAL;
1003 }
1004 /*
1005 * Update the idx entry with the passed through idx
1006 * If the relinquished idx is lower than next_idx, use it
1007 * to replace next_idx
1008 */
1009 g_hash_table_remove(s->zpci_table, &pbdev->idx);
1010 if (idx < s->next_idx) {
1011 s->next_idx = idx;
1012 }
1013 pbdev->idx = idx;
1014 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
1015 }
1016
1017 return 0;
1018 }
1019
1020 static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
1021 Error **errp)
1022 {
1023 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1024 PCIDevice *pdev = NULL;
1025 S390PCIBusDevice *pbdev = NULL;
1026 int rc;
1027
1028 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
1029 PCIBridge *pb = PCI_BRIDGE(dev);
1030
1031 pdev = PCI_DEVICE(dev);
1032 pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq);
1033 pci_setup_iommu(&pb->sec_bus, s390_pci_dma_iommu, s);
1034
1035 qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s));
1036
1037 if (dev->hotplugged) {
1038 pci_default_write_config(pdev, PCI_PRIMARY_BUS,
1039 pci_dev_bus_num(pdev), 1);
1040 s->bus_no += 1;
1041 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
1042
1043 s390_pci_update_subordinate(pdev, s->bus_no);
1044 }
1045 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1046 pdev = PCI_DEVICE(dev);
1047
1048 if (!dev->id) {
1049 /* In the case the PCI device does not define an id */
1050 /* we generate one based on the PCI address */
1051 dev->id = g_strdup_printf("auto_%02x:%02x.%01x",
1052 pci_dev_bus_num(pdev),
1053 PCI_SLOT(pdev->devfn),
1054 PCI_FUNC(pdev->devfn));
1055 }
1056
1057 pbdev = s390_pci_find_dev_by_target(s, dev->id);
1058 if (!pbdev) {
1059 pbdev = s390_pci_device_new(s, dev->id, errp);
1060 if (!pbdev) {
1061 return;
1062 }
1063 }
1064
1065 pbdev->pdev = pdev;
1066 pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn);
1067 pbdev->iommu->pbdev = pbdev;
1068 pbdev->state = ZPCI_FS_DISABLED;
1069 set_pbdev_info(pbdev);
1070
1071 if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) {
1072 /*
1073 * By default, interpretation is always requested; if the available
1074 * facilities indicate it is not available, fallback to the
1075 * interception model.
1076 */
1077 if (pbdev->interp) {
1078 if (s390_pci_kvm_interp_allowed()) {
1079 rc = s390_pci_interp_plug(s, pbdev);
1080 if (rc) {
1081 error_setg(errp, "Plug failed for zPCI device in "
1082 "interpretation mode: %d", rc);
1083 return;
1084 }
1085 } else {
1086 DPRINTF("zPCI interpretation facilities missing.\n");
1087 pbdev->interp = false;
1088 pbdev->forwarding_assist = false;
1089 }
1090 }
1091 pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev);
1092 /* Fill in CLP information passed via the vfio region */
1093 s390_pci_get_clp_info(pbdev);
1094 if (!pbdev->interp) {
1095 /* Do vfio passthrough but intercept for I/O */
1096 pbdev->fh |= FH_SHM_VFIO;
1097 pbdev->forwarding_assist = false;
1098 }
1099 } else {
1100 pbdev->fh |= FH_SHM_EMUL;
1101 /* Always intercept emulated devices */
1102 pbdev->interp = false;
1103 pbdev->forwarding_assist = false;
1104 }
1105
1106 if (s390_pci_msix_init(pbdev) && !pbdev->interp) {
1107 error_setg(errp, "MSI-X support is mandatory "
1108 "in the S390 architecture");
1109 return;
1110 }
1111
1112 if (dev->hotplugged) {
1113 s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED ,
1114 pbdev->fh, pbdev->fid);
1115 }
1116 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1117 pbdev = S390_PCI_DEVICE(dev);
1118
1119 /* the allocated idx is actually getting used */
1120 s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX;
1121 pbdev->fh = pbdev->idx;
1122 QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link);
1123 g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
1124 } else {
1125 g_assert_not_reached();
1126 }
1127 }
1128
1129 static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
1130 Error **errp)
1131 {
1132 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1133 S390PCIBusDevice *pbdev = NULL;
1134
1135 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1136 PCIDevice *pci_dev = PCI_DEVICE(dev);
1137 PCIBus *bus;
1138 int32_t devfn;
1139
1140 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
1141 g_assert(pbdev);
1142
1143 s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
1144 pbdev->fh, pbdev->fid);
1145 bus = pci_get_bus(pci_dev);
1146 devfn = pci_dev->devfn;
1147 qdev_unrealize(dev);
1148
1149 s390_pci_msix_free(pbdev);
1150 s390_pci_iommu_free(s, bus, devfn);
1151 pbdev->pdev = NULL;
1152 pbdev->state = ZPCI_FS_RESERVED;
1153 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1154 pbdev = S390_PCI_DEVICE(dev);
1155 pbdev->fid = 0;
1156 QTAILQ_REMOVE(&s->zpci_devs, pbdev, link);
1157 g_hash_table_remove(s->zpci_table, &pbdev->idx);
1158 if (pbdev->iommu->dma_limit) {
1159 s390_pci_end_dma_count(s, pbdev->iommu->dma_limit);
1160 }
1161 qdev_unrealize(dev);
1162 }
1163 }
1164
1165 static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev,
1166 DeviceState *dev,
1167 Error **errp)
1168 {
1169 S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
1170 S390PCIBusDevice *pbdev;
1171
1172 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
1173 error_setg(errp, "PCI bridge hot unplug currently not supported");
1174 } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
1175 /*
1176 * Redirect the unplug request to the zPCI device and remember that
1177 * we've checked the PCI device already (to prevent endless recursion).
1178 */
1179 pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
1180 g_assert(pbdev);
1181 pbdev->pci_unplug_request_processed = true;
1182 qdev_unplug(DEVICE(pbdev), errp);
1183 } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
1184 pbdev = S390_PCI_DEVICE(dev);
1185
1186 /*
1187 * If unplug was initially requested for the zPCI device, we
1188 * first have to redirect to the PCI device, which will in return
1189 * redirect back to us after performing its checks (if the request
1190 * is not blocked, e.g. because it's a PCI bridge).
1191 */
1192 if (pbdev->pdev && !pbdev->pci_unplug_request_processed) {
1193 qdev_unplug(DEVICE(pbdev->pdev), errp);
1194 return;
1195 }
1196 pbdev->pci_unplug_request_processed = false;
1197
1198 switch (pbdev->state) {
1199 case ZPCI_FS_STANDBY:
1200 case ZPCI_FS_RESERVED:
1201 s390_pci_perform_unplug(pbdev);
1202 break;
1203 default:
1204 /*
1205 * Allow to send multiple requests, e.g. if the guest crashed
1206 * before releasing the device, we would not be able to send
1207 * another request to the same VM (e.g. fresh OS).
1208 */
1209 pbdev->unplug_requested = true;
1210 s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST,
1211 pbdev->fh, pbdev->fid);
1212 }
1213 } else {
1214 g_assert_not_reached();
1215 }
1216 }
1217
1218 static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
1219 void *opaque)
1220 {
1221 S390pciState *s = opaque;
1222 PCIBus *sec_bus = NULL;
1223
1224 if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
1225 PCI_HEADER_TYPE_BRIDGE)) {
1226 return;
1227 }
1228
1229 (s->bus_no)++;
1230 pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1);
1231 pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
1232 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
1233
1234 sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
1235 if (!sec_bus) {
1236 return;
1237 }
1238
1239 /* Assign numbers to all child bridges. The last is the highest number. */
1240 pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s);
1241 pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
1242 }
1243
1244 static void s390_pcihost_reset(DeviceState *dev)
1245 {
1246 S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
1247 PCIBus *bus = s->parent_obj.bus;
1248 S390PCIBusDevice *pbdev, *next;
1249
1250 /* Process all pending unplug requests */
1251 QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) {
1252 if (pbdev->unplug_requested) {
1253 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
1254 /* Interpreted devices were using interrupt forwarding */
1255 s390_pci_kvm_aif_disable(pbdev);
1256 } else if (pbdev->summary_ind) {
1257 pci_dereg_irqs(pbdev);
1258 }
1259 if (pbdev->iommu->enabled) {
1260 pci_dereg_ioat(pbdev->iommu);
1261 }
1262 pbdev->state = ZPCI_FS_STANDBY;
1263 s390_pci_perform_unplug(pbdev);
1264 }
1265 }
1266
1267 /*
1268 * When resetting a PCI bridge, the assigned numbers are set to 0. So
1269 * on every system reset, we also have to reassign numbers.
1270 */
1271 s->bus_no = 0;
1272 pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
1273 }
1274
1275 static void s390_pcihost_class_init(ObjectClass *klass, void *data)
1276 {
1277 DeviceClass *dc = DEVICE_CLASS(klass);
1278 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
1279
1280 dc->reset = s390_pcihost_reset;
1281 dc->realize = s390_pcihost_realize;
1282 dc->unrealize = s390_pcihost_unrealize;
1283 hc->pre_plug = s390_pcihost_pre_plug;
1284 hc->plug = s390_pcihost_plug;
1285 hc->unplug_request = s390_pcihost_unplug_request;
1286 hc->unplug = s390_pcihost_unplug;
1287 msi_nonbroken = true;
1288 }
1289
1290 static const TypeInfo s390_pcihost_info = {
1291 .name = TYPE_S390_PCI_HOST_BRIDGE,
1292 .parent = TYPE_PCI_HOST_BRIDGE,
1293 .instance_size = sizeof(S390pciState),
1294 .class_init = s390_pcihost_class_init,
1295 .interfaces = (InterfaceInfo[]) {
1296 { TYPE_HOTPLUG_HANDLER },
1297 { }
1298 }
1299 };
1300
1301 static const TypeInfo s390_pcibus_info = {
1302 .name = TYPE_S390_PCI_BUS,
1303 .parent = TYPE_BUS,
1304 .instance_size = sizeof(S390PCIBus),
1305 };
1306
1307 static uint16_t s390_pci_generate_uid(S390pciState *s)
1308 {
1309 uint16_t uid = 0;
1310
1311 do {
1312 uid++;
1313 if (!s390_pci_find_dev_by_uid(s, uid)) {
1314 return uid;
1315 }
1316 } while (uid < ZPCI_MAX_UID);
1317
1318 return UID_UNDEFINED;
1319 }
1320
1321 static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp)
1322 {
1323 uint32_t fid = 0;
1324
1325 do {
1326 if (!s390_pci_find_dev_by_fid(s, fid)) {
1327 return fid;
1328 }
1329 } while (fid++ != ZPCI_MAX_FID);
1330
1331 error_setg(errp, "no free fid could be found");
1332 return 0;
1333 }
1334
1335 static void s390_pci_device_realize(DeviceState *dev, Error **errp)
1336 {
1337 S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev);
1338 S390pciState *s = s390_get_phb();
1339
1340 if (!zpci->target) {
1341 error_setg(errp, "target must be defined");
1342 return;
1343 }
1344
1345 if (s390_pci_find_dev_by_target(s, zpci->target)) {
1346 error_setg(errp, "target %s already has an associated zpci device",
1347 zpci->target);
1348 return;
1349 }
1350
1351 if (zpci->uid == UID_UNDEFINED) {
1352 zpci->uid = s390_pci_generate_uid(s);
1353 if (!zpci->uid) {
1354 error_setg(errp, "no free uid could be found");
1355 return;
1356 }
1357 } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) {
1358 error_setg(errp, "uid %u already in use", zpci->uid);
1359 return;
1360 }
1361
1362 if (!zpci->fid_defined) {
1363 Error *local_error = NULL;
1364
1365 zpci->fid = s390_pci_generate_fid(s, &local_error);
1366 if (local_error) {
1367 error_propagate(errp, local_error);
1368 return;
1369 }
1370 } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) {
1371 error_setg(errp, "fid %u already in use", zpci->fid);
1372 return;
1373 }
1374
1375 zpci->state = ZPCI_FS_RESERVED;
1376 zpci->fmb.format = ZPCI_FMB_FORMAT;
1377 }
1378
1379 static void s390_pci_device_reset(DeviceState *dev)
1380 {
1381 S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
1382
1383 switch (pbdev->state) {
1384 case ZPCI_FS_RESERVED:
1385 return;
1386 case ZPCI_FS_STANDBY:
1387 break;
1388 default:
1389 pbdev->fh &= ~FH_MASK_ENABLE;
1390 pbdev->state = ZPCI_FS_DISABLED;
1391 break;
1392 }
1393
1394 if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
1395 /* Interpreted devices were using interrupt forwarding */
1396 s390_pci_kvm_aif_disable(pbdev);
1397 } else if (pbdev->summary_ind) {
1398 pci_dereg_irqs(pbdev);
1399 }
1400 if (pbdev->iommu->enabled) {
1401 pci_dereg_ioat(pbdev->iommu);
1402 }
1403
1404 fmb_timer_free(pbdev);
1405 }
1406
1407 static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name,
1408 void *opaque, Error **errp)
1409 {
1410 Property *prop = opaque;
1411 uint32_t *ptr = object_field_prop_ptr(obj, prop);
1412
1413 visit_type_uint32(v, name, ptr, errp);
1414 }
1415
1416 static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name,
1417 void *opaque, Error **errp)
1418 {
1419 S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj);
1420 Property *prop = opaque;
1421 uint32_t *ptr = object_field_prop_ptr(obj, prop);
1422
1423 if (!visit_type_uint32(v, name, ptr, errp)) {
1424 return;
1425 }
1426 zpci->fid_defined = true;
1427 }
1428
1429 static const PropertyInfo s390_pci_fid_propinfo = {
1430 .name = "zpci_fid",
1431 .get = s390_pci_get_fid,
1432 .set = s390_pci_set_fid,
1433 };
1434
1435 #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \
1436 DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t)
1437
1438 static Property s390_pci_device_properties[] = {
1439 DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED),
1440 DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid),
1441 DEFINE_PROP_STRING("target", S390PCIBusDevice, target),
1442 DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true),
1443 DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist,
1444 true),
1445 DEFINE_PROP_END_OF_LIST(),
1446 };
1447
1448 static const VMStateDescription s390_pci_device_vmstate = {
1449 .name = TYPE_S390_PCI_DEVICE,
1450 /*
1451 * TODO: add state handling here, so migration works at least with
1452 * emulated pci devices on s390x
1453 */
1454 .unmigratable = 1,
1455 };
1456
1457 static void s390_pci_device_class_init(ObjectClass *klass, void *data)
1458 {
1459 DeviceClass *dc = DEVICE_CLASS(klass);
1460
1461 dc->desc = "zpci device";
1462 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1463 dc->reset = s390_pci_device_reset;
1464 dc->bus_type = TYPE_S390_PCI_BUS;
1465 dc->realize = s390_pci_device_realize;
1466 device_class_set_props(dc, s390_pci_device_properties);
1467 dc->vmsd = &s390_pci_device_vmstate;
1468 }
1469
1470 static const TypeInfo s390_pci_device_info = {
1471 .name = TYPE_S390_PCI_DEVICE,
1472 .parent = TYPE_DEVICE,
1473 .instance_size = sizeof(S390PCIBusDevice),
1474 .class_init = s390_pci_device_class_init,
1475 };
1476
1477 static const TypeInfo s390_pci_iommu_info = {
1478 .name = TYPE_S390_PCI_IOMMU,
1479 .parent = TYPE_OBJECT,
1480 .instance_size = sizeof(S390PCIIOMMU),
1481 };
1482
1483 static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data)
1484 {
1485 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1486
1487 imrc->translate = s390_translate_iommu;
1488 imrc->replay = s390_pci_iommu_replay;
1489 }
1490
1491 static const TypeInfo s390_iommu_memory_region_info = {
1492 .parent = TYPE_IOMMU_MEMORY_REGION,
1493 .name = TYPE_S390_IOMMU_MEMORY_REGION,
1494 .class_init = s390_iommu_memory_region_class_init,
1495 };
1496
1497 static void s390_pci_register_types(void)
1498 {
1499 type_register_static(&s390_pcihost_info);
1500 type_register_static(&s390_pcibus_info);
1501 type_register_static(&s390_pci_device_info);
1502 type_register_static(&s390_pci_iommu_info);
1503 type_register_static(&s390_iommu_memory_region_info);
1504 }
1505
1506 type_init(s390_pci_register_types)