]> git.proxmox.com Git - mirror_qemu.git/blob - hw/xen/xen_pt.c
linux-user: Allow gdbstub to ignore page protection
[mirror_qemu.git] / hw / xen / xen_pt.c
1 /*
2 * Copyright (c) 2007, Neocleus Corporation.
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 *
8 * Alex Novik <alex@neocleus.com>
9 * Allen Kay <allen.m.kay@intel.com>
10 * Guy Zana <guy@neocleus.com>
11 *
12 * This file implements direct PCI assignment to a HVM guest
13 */
14
15 /*
16 * Interrupt Disable policy:
17 *
18 * INTx interrupt:
19 * Initialize(register_real_device)
20 * Map INTx(xc_physdev_map_pirq):
21 * <fail>
22 * - Set real Interrupt Disable bit to '1'.
23 * - Set machine_irq and assigned_device->machine_irq to '0'.
24 * * Don't bind INTx.
25 *
26 * Bind INTx(xc_domain_bind_pt_pci_irq):
27 * <fail>
28 * - Set real Interrupt Disable bit to '1'.
29 * - Unmap INTx.
30 * - Decrement xen_pt_mapped_machine_irq[machine_irq]
31 * - Set assigned_device->machine_irq to '0'.
32 *
33 * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write)
34 * Write '0'
35 * - Set real bit to '0' if assigned_device->machine_irq isn't '0'.
36 *
37 * Write '1'
38 * - Set real bit to '1'.
39 *
40 * MSI interrupt:
41 * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update)
42 * Bind MSI(xc_domain_update_msi_irq)
43 * <fail>
44 * - Unmap MSI.
45 * - Set dev->msi->pirq to '-1'.
46 *
47 * MSI-X interrupt:
48 * Initialize MSI-X register(xen_pt_msix_update_one)
49 * Bind MSI-X(xc_domain_update_msi_irq)
50 * <fail>
51 * - Unmap MSI-X.
52 * - Set entry->pirq to '-1'.
53 */
54
55 #include "qemu/osdep.h"
56 #include "qapi/error.h"
57 #include <sys/ioctl.h>
58
59 #include "hw/pci/pci.h"
60 #include "hw/qdev-properties.h"
61 #include "hw/qdev-properties-system.h"
62 #include "xen_pt.h"
63 #include "hw/xen/xen.h"
64 #include "hw/xen/xen-legacy-backend.h"
65 #include "qemu/range.h"
66
67 static bool has_igd_gfx_passthru;
68
69 bool xen_igd_gfx_pt_enabled(void)
70 {
71 return has_igd_gfx_passthru;
72 }
73
74 void xen_igd_gfx_pt_set(bool value, Error **errp)
75 {
76 has_igd_gfx_passthru = value;
77 }
78
79 #define XEN_PT_NR_IRQS (256)
80 static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
81
82 void xen_pt_log(const PCIDevice *d, const char *f, ...)
83 {
84 va_list ap;
85
86 va_start(ap, f);
87 if (d) {
88 fprintf(stderr, "[%02x:%02x.%d] ", pci_dev_bus_num(d),
89 PCI_SLOT(d->devfn), PCI_FUNC(d->devfn));
90 }
91 vfprintf(stderr, f, ap);
92 va_end(ap);
93 }
94
95 /* Config Space */
96
97 static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len)
98 {
99 /* check offset range */
100 if (addr > 0xFF) {
101 XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. "
102 "(addr: 0x%02x, len: %d)\n", addr, len);
103 return -1;
104 }
105
106 /* check read size */
107 if ((len != 1) && (len != 2) && (len != 4)) {
108 XEN_PT_ERR(d, "Failed to access register with invalid access length. "
109 "(addr: 0x%02x, len: %d)\n", addr, len);
110 return -1;
111 }
112
113 /* check offset alignment */
114 if (addr & (len - 1)) {
115 XEN_PT_ERR(d, "Failed to access register with invalid access size "
116 "alignment. (addr: 0x%02x, len: %d)\n", addr, len);
117 return -1;
118 }
119
120 return 0;
121 }
122
123 int xen_pt_bar_offset_to_index(uint32_t offset)
124 {
125 int index = 0;
126
127 /* check Exp ROM BAR */
128 if (offset == PCI_ROM_ADDRESS) {
129 return PCI_ROM_SLOT;
130 }
131
132 /* calculate BAR index */
133 index = (offset - PCI_BASE_ADDRESS_0) >> 2;
134 if (index >= PCI_NUM_REGIONS) {
135 return -1;
136 }
137
138 return index;
139 }
140
141 static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len)
142 {
143 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
144 uint32_t val = 0;
145 XenPTRegGroup *reg_grp_entry = NULL;
146 XenPTReg *reg_entry = NULL;
147 int rc = 0;
148 int emul_len = 0;
149 uint32_t find_addr = addr;
150
151 if (xen_pt_pci_config_access_check(d, addr, len)) {
152 goto exit;
153 }
154
155 /* find register group entry */
156 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
157 if (reg_grp_entry) {
158 /* check 0-Hardwired register group */
159 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
160 /* no need to emulate, just return 0 */
161 val = 0;
162 goto exit;
163 }
164 }
165
166 /* read I/O device register value */
167 rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len);
168 if (rc < 0) {
169 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
170 memset(&val, 0xff, len);
171 }
172
173 /* just return the I/O device register value for
174 * passthrough type register group */
175 if (reg_grp_entry == NULL) {
176 goto exit;
177 }
178
179 /* adjust the read value to appropriate CFC-CFF window */
180 val <<= (addr & 3) << 3;
181 emul_len = len;
182
183 /* loop around the guest requested size */
184 while (emul_len > 0) {
185 /* find register entry to be emulated */
186 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
187 if (reg_entry) {
188 XenPTRegInfo *reg = reg_entry->reg;
189 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
190 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
191 uint8_t *ptr_val = NULL;
192
193 valid_mask <<= (find_addr - real_offset) << 3;
194 ptr_val = (uint8_t *)&val + (real_offset & 3);
195
196 /* do emulation based on register size */
197 switch (reg->size) {
198 case 1:
199 if (reg->u.b.read) {
200 rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask);
201 }
202 break;
203 case 2:
204 if (reg->u.w.read) {
205 rc = reg->u.w.read(s, reg_entry,
206 (uint16_t *)ptr_val, valid_mask);
207 }
208 break;
209 case 4:
210 if (reg->u.dw.read) {
211 rc = reg->u.dw.read(s, reg_entry,
212 (uint32_t *)ptr_val, valid_mask);
213 }
214 break;
215 }
216
217 if (rc < 0) {
218 xen_shutdown_fatal_error("Internal error: Invalid read "
219 "emulation. (%s, rc: %d)\n",
220 __func__, rc);
221 return 0;
222 }
223
224 /* calculate next address to find */
225 emul_len -= reg->size;
226 if (emul_len > 0) {
227 find_addr = real_offset + reg->size;
228 }
229 } else {
230 /* nothing to do with passthrough type register,
231 * continue to find next byte */
232 emul_len--;
233 find_addr++;
234 }
235 }
236
237 /* need to shift back before returning them to pci bus emulator */
238 val >>= ((addr & 3) << 3);
239
240 exit:
241 XEN_PT_LOG_CONFIG(d, addr, val, len);
242 return val;
243 }
244
245 static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
246 uint32_t val, int len)
247 {
248 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
249 int index = 0;
250 XenPTRegGroup *reg_grp_entry = NULL;
251 int rc = 0;
252 uint32_t read_val = 0, wb_mask;
253 int emul_len = 0;
254 XenPTReg *reg_entry = NULL;
255 uint32_t find_addr = addr;
256 XenPTRegInfo *reg = NULL;
257 bool wp_flag = false;
258
259 if (xen_pt_pci_config_access_check(d, addr, len)) {
260 return;
261 }
262
263 XEN_PT_LOG_CONFIG(d, addr, val, len);
264
265 /* check unused BAR register */
266 index = xen_pt_bar_offset_to_index(addr);
267 if ((index >= 0) && (val != 0)) {
268 uint32_t chk = val;
269
270 if (index == PCI_ROM_SLOT)
271 chk |= (uint32_t)~PCI_ROM_ADDRESS_MASK;
272
273 if ((chk != XEN_PT_BAR_ALLF) &&
274 (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) {
275 XEN_PT_WARN(d, "Guest attempt to set address to unused "
276 "Base Address Register. (addr: 0x%02x, len: %d)\n",
277 addr, len);
278 }
279 }
280
281 /* find register group entry */
282 reg_grp_entry = xen_pt_find_reg_grp(s, addr);
283 if (reg_grp_entry) {
284 /* check 0-Hardwired register group */
285 if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
286 /* ignore silently */
287 XEN_PT_WARN(d, "Access to 0-Hardwired register. "
288 "(addr: 0x%02x, len: %d)\n", addr, len);
289 return;
290 }
291 }
292
293 rc = xen_host_pci_get_block(&s->real_device, addr,
294 (uint8_t *)&read_val, len);
295 if (rc < 0) {
296 XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
297 memset(&read_val, 0xff, len);
298 wb_mask = 0;
299 } else {
300 wb_mask = 0xFFFFFFFF >> ((4 - len) << 3);
301 }
302
303 /* pass directly to the real device for passthrough type register group */
304 if (reg_grp_entry == NULL) {
305 if (!s->permissive) {
306 wb_mask = 0;
307 wp_flag = true;
308 }
309 goto out;
310 }
311
312 memory_region_transaction_begin();
313 pci_default_write_config(d, addr, val, len);
314
315 /* adjust the read and write value to appropriate CFC-CFF window */
316 read_val <<= (addr & 3) << 3;
317 val <<= (addr & 3) << 3;
318 emul_len = len;
319
320 /* loop around the guest requested size */
321 while (emul_len > 0) {
322 /* find register entry to be emulated */
323 reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
324 if (reg_entry) {
325 reg = reg_entry->reg;
326 uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
327 uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
328 uint8_t *ptr_val = NULL;
329 uint32_t wp_mask = reg->emu_mask | reg->ro_mask;
330
331 valid_mask <<= (find_addr - real_offset) << 3;
332 ptr_val = (uint8_t *)&val + (real_offset & 3);
333 if (!s->permissive) {
334 wp_mask |= reg->res_mask;
335 }
336 if (wp_mask == (0xFFFFFFFF >> ((4 - reg->size) << 3))) {
337 wb_mask &= ~((wp_mask >> ((find_addr - real_offset) << 3))
338 << ((len - emul_len) << 3));
339 }
340
341 /* do emulation based on register size */
342 switch (reg->size) {
343 case 1:
344 if (reg->u.b.write) {
345 rc = reg->u.b.write(s, reg_entry, ptr_val,
346 read_val >> ((real_offset & 3) << 3),
347 valid_mask);
348 }
349 break;
350 case 2:
351 if (reg->u.w.write) {
352 rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val,
353 (read_val >> ((real_offset & 3) << 3)),
354 valid_mask);
355 }
356 break;
357 case 4:
358 if (reg->u.dw.write) {
359 rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val,
360 (read_val >> ((real_offset & 3) << 3)),
361 valid_mask);
362 }
363 break;
364 }
365
366 if (rc < 0) {
367 xen_shutdown_fatal_error("Internal error: Invalid write"
368 " emulation. (%s, rc: %d)\n",
369 __func__, rc);
370 return;
371 }
372
373 /* calculate next address to find */
374 emul_len -= reg->size;
375 if (emul_len > 0) {
376 find_addr = real_offset + reg->size;
377 }
378 } else {
379 /* nothing to do with passthrough type register,
380 * continue to find next byte */
381 if (!s->permissive) {
382 wb_mask &= ~(0xff << ((len - emul_len) << 3));
383 /* Unused BARs will make it here, but we don't want to issue
384 * warnings for writes to them (bogus writes get dealt with
385 * above).
386 */
387 if (index < 0) {
388 wp_flag = true;
389 }
390 }
391 emul_len--;
392 find_addr++;
393 }
394 }
395
396 /* need to shift back before passing them to xen_host_pci_set_block. */
397 val >>= (addr & 3) << 3;
398
399 memory_region_transaction_commit();
400
401 out:
402 if (wp_flag && !s->permissive_warned) {
403 s->permissive_warned = true;
404 xen_pt_log(d, "Write-back to unknown field 0x%02x (partially) inhibited (0x%0*x)\n",
405 addr, len * 2, wb_mask);
406 xen_pt_log(d, "If the device doesn't work, try enabling permissive mode\n");
407 xen_pt_log(d, "(unsafe) and if it helps report the problem to xen-devel\n");
408 }
409 for (index = 0; wb_mask; index += len) {
410 /* unknown regs are passed through */
411 while (!(wb_mask & 0xff)) {
412 index++;
413 wb_mask >>= 8;
414 }
415 len = 0;
416 do {
417 len++;
418 wb_mask >>= 8;
419 } while (wb_mask & 0xff);
420 rc = xen_host_pci_set_block(&s->real_device, addr + index,
421 (uint8_t *)&val + index, len);
422
423 if (rc < 0) {
424 XEN_PT_ERR(d, "xen_host_pci_set_block failed. return value: %d.\n", rc);
425 }
426 }
427 }
428
429 /* register regions */
430
431 static uint64_t xen_pt_bar_read(void *o, hwaddr addr,
432 unsigned size)
433 {
434 PCIDevice *d = o;
435 /* if this function is called, that probably means that there is a
436 * misconfiguration of the IOMMU. */
437 XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"HWADDR_FMT_plx"\n",
438 addr);
439 return 0;
440 }
441 static void xen_pt_bar_write(void *o, hwaddr addr, uint64_t val,
442 unsigned size)
443 {
444 PCIDevice *d = o;
445 /* Same comment as xen_pt_bar_read function */
446 XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"HWADDR_FMT_plx"\n",
447 addr);
448 }
449
450 static const MemoryRegionOps ops = {
451 .endianness = DEVICE_NATIVE_ENDIAN,
452 .read = xen_pt_bar_read,
453 .write = xen_pt_bar_write,
454 };
455
456 static int xen_pt_register_regions(XenPCIPassthroughState *s, uint16_t *cmd)
457 {
458 int i = 0;
459 XenHostPCIDevice *d = &s->real_device;
460
461 /* Register PIO/MMIO BARs */
462 for (i = 0; i < PCI_ROM_SLOT; i++) {
463 XenHostPCIIORegion *r = &d->io_regions[i];
464 uint8_t type;
465
466 if (r->base_addr == 0 || r->size == 0) {
467 continue;
468 }
469
470 s->bases[i].access.u = r->base_addr;
471
472 if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) {
473 type = PCI_BASE_ADDRESS_SPACE_IO;
474 *cmd |= PCI_COMMAND_IO;
475 } else {
476 type = PCI_BASE_ADDRESS_SPACE_MEMORY;
477 if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) {
478 type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
479 }
480 if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) {
481 type |= PCI_BASE_ADDRESS_MEM_TYPE_64;
482 }
483 *cmd |= PCI_COMMAND_MEMORY;
484 }
485
486 memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev,
487 "xen-pci-pt-bar", r->size);
488 pci_register_bar(&s->dev, i, type, &s->bar[i]);
489
490 XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64
491 " base_addr=0x%08"PRIx64" type: 0x%x)\n",
492 i, r->size, r->base_addr, type);
493 }
494
495 /* Register expansion ROM address */
496 if (d->rom.base_addr && d->rom.size) {
497 uint32_t bar_data = 0;
498
499 /* Re-set BAR reported by OS, otherwise ROM can't be read. */
500 if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) {
501 return 0;
502 }
503 if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) {
504 bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK;
505 xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data);
506 }
507
508 s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr;
509
510 memory_region_init_io(&s->rom, OBJECT(s), &ops, &s->dev,
511 "xen-pci-pt-rom", d->rom.size);
512 pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH,
513 &s->rom);
514
515 XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64
516 " base_addr=0x%08"PRIx64")\n",
517 d->rom.size, d->rom.base_addr);
518 }
519
520 xen_pt_register_vga_regions(d);
521 return 0;
522 }
523
524 /* region mapping */
525
526 static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr)
527 {
528 int i = 0;
529
530 for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
531 if (mr == &s->bar[i]) {
532 return i;
533 }
534 }
535 if (mr == &s->rom) {
536 return PCI_ROM_SLOT;
537 }
538 return -1;
539 }
540
541 /*
542 * This function checks if an io_region overlaps an io_region from another
543 * device. The io_region to check is provided with (addr, size and type)
544 * A callback can be provided and will be called for every region that is
545 * overlapped.
546 * The return value indicates if the region is overlappsed */
547 struct CheckBarArgs {
548 XenPCIPassthroughState *s;
549 pcibus_t addr;
550 pcibus_t size;
551 uint8_t type;
552 bool rc;
553 };
554 static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque)
555 {
556 struct CheckBarArgs *arg = opaque;
557 XenPCIPassthroughState *s = arg->s;
558 uint8_t type = arg->type;
559 int i;
560
561 if (d->devfn == s->dev.devfn) {
562 return;
563 }
564
565 /* xxx: This ignores bridges. */
566 for (i = 0; i < PCI_NUM_REGIONS; i++) {
567 const PCIIORegion *r = &d->io_regions[i];
568
569 if (!r->size) {
570 continue;
571 }
572 if ((type & PCI_BASE_ADDRESS_SPACE_IO)
573 != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) {
574 continue;
575 }
576
577 if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) {
578 XEN_PT_WARN(&s->dev,
579 "Overlapped to device [%02x:%02x.%d] Region: %i"
580 " (addr: 0x%"FMT_PCIBUS", len: 0x%"FMT_PCIBUS")\n",
581 pci_bus_num(bus), PCI_SLOT(d->devfn),
582 PCI_FUNC(d->devfn), i, r->addr, r->size);
583 arg->rc = true;
584 }
585 }
586 }
587
588 static void xen_pt_region_update(XenPCIPassthroughState *s,
589 MemoryRegionSection *sec, bool adding)
590 {
591 PCIDevice *d = &s->dev;
592 MemoryRegion *mr = sec->mr;
593 int bar = -1;
594 int rc;
595 int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING;
596 struct CheckBarArgs args = {
597 .s = s,
598 .addr = sec->offset_within_address_space,
599 .size = int128_get64(sec->size),
600 .rc = false,
601 };
602
603 bar = xen_pt_bar_from_region(s, mr);
604 if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) {
605 return;
606 }
607
608 if (s->msix && &s->msix->mmio == mr) {
609 if (adding) {
610 s->msix->mmio_base_addr = sec->offset_within_address_space;
611 rc = xen_pt_msix_update_remap(s, s->msix->bar_index);
612 }
613 return;
614 }
615
616 args.type = d->io_regions[bar].type;
617 pci_for_each_device_under_bus(pci_get_bus(d),
618 xen_pt_check_bar_overlap, &args);
619 if (args.rc) {
620 XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS
621 ", len: 0x%"FMT_PCIBUS") is overlapped.\n",
622 bar, sec->offset_within_address_space,
623 int128_get64(sec->size));
624 }
625
626 if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) {
627 uint32_t guest_port = sec->offset_within_address_space;
628 uint32_t machine_port = s->bases[bar].access.pio_base;
629 uint32_t size = int128_get64(sec->size);
630 rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
631 guest_port, machine_port, size,
632 op);
633 if (rc) {
634 XEN_PT_ERR(d, "%s ioport mapping failed! (err: %i)\n",
635 adding ? "create new" : "remove old", errno);
636 }
637 } else {
638 pcibus_t guest_addr = sec->offset_within_address_space;
639 pcibus_t machine_addr = s->bases[bar].access.maddr
640 + sec->offset_within_region;
641 pcibus_t size = int128_get64(sec->size);
642 rc = xc_domain_memory_mapping(xen_xc, xen_domid,
643 XEN_PFN(guest_addr + XC_PAGE_SIZE - 1),
644 XEN_PFN(machine_addr + XC_PAGE_SIZE - 1),
645 XEN_PFN(size + XC_PAGE_SIZE - 1),
646 op);
647 if (rc) {
648 XEN_PT_ERR(d, "%s mem mapping failed! (err: %i)\n",
649 adding ? "create new" : "remove old", errno);
650 }
651 }
652 }
653
654 static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec)
655 {
656 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
657 memory_listener);
658
659 memory_region_ref(sec->mr);
660 xen_pt_region_update(s, sec, true);
661 }
662
663 static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec)
664 {
665 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
666 memory_listener);
667
668 xen_pt_region_update(s, sec, false);
669 memory_region_unref(sec->mr);
670 }
671
672 static void xen_pt_io_region_add(MemoryListener *l, MemoryRegionSection *sec)
673 {
674 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
675 io_listener);
676
677 memory_region_ref(sec->mr);
678 xen_pt_region_update(s, sec, true);
679 }
680
681 static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec)
682 {
683 XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
684 io_listener);
685
686 xen_pt_region_update(s, sec, false);
687 memory_region_unref(sec->mr);
688 }
689
690 static const MemoryListener xen_pt_memory_listener = {
691 .name = "xen-pt-mem",
692 .region_add = xen_pt_region_add,
693 .region_del = xen_pt_region_del,
694 .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
695 };
696
697 static const MemoryListener xen_pt_io_listener = {
698 .name = "xen-pt-io",
699 .region_add = xen_pt_io_region_add,
700 .region_del = xen_pt_io_region_del,
701 .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
702 };
703
704 /* destroy. */
705 static void xen_pt_destroy(PCIDevice *d) {
706
707 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
708 XenHostPCIDevice *host_dev = &s->real_device;
709 uint8_t machine_irq = s->machine_irq;
710 uint8_t intx;
711 int rc;
712
713 if (machine_irq && !xen_host_pci_device_closed(&s->real_device)) {
714 intx = xen_pt_pci_intx(s);
715 rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq,
716 PT_IRQ_TYPE_PCI,
717 pci_dev_bus_num(d),
718 PCI_SLOT(s->dev.devfn),
719 intx,
720 0 /* isa_irq */);
721 if (rc < 0) {
722 XEN_PT_ERR(d, "unbinding of interrupt INT%c failed."
723 " (machine irq: %i, err: %d)"
724 " But bravely continuing on..\n",
725 'a' + intx, machine_irq, errno);
726 }
727 }
728
729 /* N.B. xen_pt_config_delete takes care of freeing them. */
730 if (s->msi) {
731 xen_pt_msi_disable(s);
732 }
733 if (s->msix) {
734 xen_pt_msix_disable(s);
735 }
736
737 if (machine_irq) {
738 xen_pt_mapped_machine_irq[machine_irq]--;
739
740 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
741 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq);
742
743 if (rc < 0) {
744 XEN_PT_ERR(d, "unmapping of interrupt %i failed. (err: %d)"
745 " But bravely continuing on..\n",
746 machine_irq, errno);
747 }
748 }
749 s->machine_irq = 0;
750 }
751
752 /* delete all emulated config registers */
753 xen_pt_config_delete(s);
754
755 xen_pt_unregister_vga_regions(host_dev);
756
757 if (s->listener_set) {
758 memory_listener_unregister(&s->memory_listener);
759 memory_listener_unregister(&s->io_listener);
760 s->listener_set = false;
761 }
762 if (!xen_host_pci_device_closed(&s->real_device)) {
763 xen_host_pci_device_put(&s->real_device);
764 }
765 }
766 /* init */
767
768 static void xen_pt_realize(PCIDevice *d, Error **errp)
769 {
770 ERRP_GUARD();
771 XenPCIPassthroughState *s = XEN_PT_DEVICE(d);
772 int i, rc = 0;
773 uint8_t machine_irq = 0, scratch;
774 uint16_t cmd = 0;
775 int pirq = XEN_PT_UNASSIGNED_PIRQ;
776
777 /* register real device */
778 XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d"
779 " to devfn 0x%x\n",
780 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
781 s->dev.devfn);
782
783 s->is_virtfn = s->real_device.is_virtfn;
784 if (s->is_virtfn) {
785 XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
786 s->real_device.domain, s->real_device.bus,
787 s->real_device.dev, s->real_device.func);
788 }
789
790 /* Initialize virtualized PCI configuration (Extended 256 Bytes) */
791 memset(d->config, 0, PCI_CONFIG_SPACE_SIZE);
792
793 s->memory_listener = xen_pt_memory_listener;
794 s->io_listener = xen_pt_io_listener;
795
796 /* Setup VGA bios for passthrough GFX */
797 if ((s->real_device.domain == XEN_PCI_IGD_DOMAIN) &&
798 (s->real_device.bus == XEN_PCI_IGD_BUS) &&
799 (s->real_device.dev == XEN_PCI_IGD_DEV) &&
800 (s->real_device.func == XEN_PCI_IGD_FN)) {
801 if (!is_igd_vga_passthrough(&s->real_device)) {
802 error_setg(errp, "Need to enable igd-passthru if you're trying"
803 " to passthrough IGD GFX");
804 xen_host_pci_device_put(&s->real_device);
805 return;
806 }
807
808 xen_pt_setup_vga(s, &s->real_device, errp);
809 if (*errp) {
810 error_append_hint(errp, "Setup VGA BIOS of passthrough"
811 " GFX failed");
812 xen_host_pci_device_put(&s->real_device);
813 return;
814 }
815
816 /* Register ISA bridge for passthrough GFX. */
817 xen_igd_passthrough_isa_bridge_create(s, &s->real_device);
818 }
819
820 /* Handle real device's MMIO/PIO BARs */
821 xen_pt_register_regions(s, &cmd);
822
823 /* reinitialize each config register to be emulated */
824 xen_pt_config_init(s, errp);
825 if (*errp) {
826 error_append_hint(errp, "PCI Config space initialisation failed");
827 rc = -1;
828 goto err_out;
829 }
830
831 /* Bind interrupt */
832 rc = xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &scratch);
833 if (rc) {
834 error_setg_errno(errp, errno, "Failed to read PCI_INTERRUPT_PIN");
835 goto err_out;
836 }
837 if (!scratch) {
838 XEN_PT_LOG(d, "no pin interrupt\n");
839 goto out;
840 }
841
842 machine_irq = s->real_device.irq;
843 if (machine_irq == 0) {
844 XEN_PT_LOG(d, "machine irq is 0\n");
845 cmd |= PCI_COMMAND_INTX_DISABLE;
846 goto out;
847 }
848
849 rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
850 if (rc < 0) {
851 XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (err: %d)\n",
852 machine_irq, pirq, errno);
853
854 /* Disable PCI intx assertion (turn on bit10 of devctl) */
855 cmd |= PCI_COMMAND_INTX_DISABLE;
856 machine_irq = 0;
857 s->machine_irq = 0;
858 } else {
859 machine_irq = pirq;
860 s->machine_irq = pirq;
861 xen_pt_mapped_machine_irq[machine_irq]++;
862 }
863
864 /* bind machine_irq to device */
865 if (machine_irq != 0) {
866 uint8_t e_intx = xen_pt_pci_intx(s);
867
868 rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq,
869 pci_dev_bus_num(d),
870 PCI_SLOT(d->devfn),
871 e_intx);
872 if (rc < 0) {
873 XEN_PT_ERR(d, "Binding of interrupt %i failed! (err: %d)\n",
874 e_intx, errno);
875
876 /* Disable PCI intx assertion (turn on bit10 of devctl) */
877 cmd |= PCI_COMMAND_INTX_DISABLE;
878 xen_pt_mapped_machine_irq[machine_irq]--;
879
880 if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
881 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) {
882 XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!"
883 " (err: %d)\n", machine_irq, errno);
884 }
885 }
886 s->machine_irq = 0;
887 }
888 }
889
890 out:
891 if (cmd) {
892 uint16_t val;
893
894 rc = xen_host_pci_get_word(&s->real_device, PCI_COMMAND, &val);
895 if (rc) {
896 error_setg_errno(errp, errno, "Failed to read PCI_COMMAND");
897 goto err_out;
898 } else {
899 val |= cmd;
900 rc = xen_host_pci_set_word(&s->real_device, PCI_COMMAND, val);
901 if (rc) {
902 error_setg_errno(errp, errno, "Failed to write PCI_COMMAND"
903 " val = 0x%x", val);
904 goto err_out;
905 }
906 }
907 }
908
909 memory_listener_register(&s->memory_listener, &address_space_memory);
910 memory_listener_register(&s->io_listener, &address_space_io);
911 s->listener_set = true;
912 XEN_PT_LOG(d,
913 "Real physical device %02x:%02x.%d registered successfully\n",
914 s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function);
915
916 return;
917
918 err_out:
919 for (i = 0; i < PCI_ROM_SLOT; i++) {
920 object_unparent(OBJECT(&s->bar[i]));
921 }
922 object_unparent(OBJECT(&s->rom));
923
924 xen_pt_destroy(d);
925 assert(rc);
926 }
927
928 static void xen_pt_unregister_device(PCIDevice *d)
929 {
930 xen_pt_destroy(d);
931 }
932
933 static Property xen_pci_passthrough_properties[] = {
934 DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
935 DEFINE_PROP_BOOL("permissive", XenPCIPassthroughState, permissive, false),
936 DEFINE_PROP_END_OF_LIST(),
937 };
938
939 static void xen_pci_passthrough_instance_init(Object *obj)
940 {
941 /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
942 * line, therefore, no need to wait to realize like other devices */
943 PCI_DEVICE(obj)->cap_present |= QEMU_PCI_CAP_EXPRESS;
944 }
945
946 void xen_igd_reserve_slot(PCIBus *pci_bus)
947 {
948 if (!xen_igd_gfx_pt_enabled()) {
949 return;
950 }
951
952 XEN_PT_LOG(0, "Reserving PCI slot 2 for IGD\n");
953 pci_bus_set_slot_reserved_mask(pci_bus, XEN_PCI_IGD_SLOT_MASK);
954 }
955
956 static void xen_igd_clear_slot(DeviceState *qdev, Error **errp)
957 {
958 ERRP_GUARD();
959 PCIDevice *pci_dev = (PCIDevice *)qdev;
960 XenPCIPassthroughState *s = XEN_PT_DEVICE(pci_dev);
961 XenPTDeviceClass *xpdc = XEN_PT_DEVICE_GET_CLASS(s);
962 PCIBus *pci_bus = pci_get_bus(pci_dev);
963
964 xen_host_pci_device_get(&s->real_device,
965 s->hostaddr.domain, s->hostaddr.bus,
966 s->hostaddr.slot, s->hostaddr.function,
967 errp);
968 if (*errp) {
969 error_append_hint(errp, "Failed to \"open\" the real pci device");
970 return;
971 }
972
973 if (!(pci_bus_get_slot_reserved_mask(pci_bus) & XEN_PCI_IGD_SLOT_MASK)) {
974 xpdc->pci_qdev_realize(qdev, errp);
975 return;
976 }
977
978 if (is_igd_vga_passthrough(&s->real_device) &&
979 s->real_device.domain == XEN_PCI_IGD_DOMAIN &&
980 s->real_device.bus == XEN_PCI_IGD_BUS &&
981 s->real_device.dev == XEN_PCI_IGD_DEV &&
982 s->real_device.func == XEN_PCI_IGD_FN &&
983 s->real_device.vendor_id == PCI_VENDOR_ID_INTEL) {
984 pci_bus_clear_slot_reserved_mask(pci_bus, XEN_PCI_IGD_SLOT_MASK);
985 XEN_PT_LOG(pci_dev, "Intel IGD found, using slot 2\n");
986 }
987 xpdc->pci_qdev_realize(qdev, errp);
988 }
989
990 static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
991 {
992 DeviceClass *dc = DEVICE_CLASS(klass);
993 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
994
995 XenPTDeviceClass *xpdc = XEN_PT_DEVICE_CLASS(klass);
996 xpdc->pci_qdev_realize = dc->realize;
997 dc->realize = xen_igd_clear_slot;
998 k->realize = xen_pt_realize;
999 k->exit = xen_pt_unregister_device;
1000 k->config_read = xen_pt_pci_read_config;
1001 k->config_write = xen_pt_pci_write_config;
1002 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1003 dc->desc = "Assign an host PCI device with Xen";
1004 device_class_set_props(dc, xen_pci_passthrough_properties);
1005 };
1006
1007 static void xen_pci_passthrough_finalize(Object *obj)
1008 {
1009 XenPCIPassthroughState *s = XEN_PT_DEVICE(obj);
1010
1011 xen_pt_msix_delete(s);
1012 }
1013
1014 static const TypeInfo xen_pci_passthrough_info = {
1015 .name = TYPE_XEN_PT_DEVICE,
1016 .parent = TYPE_PCI_DEVICE,
1017 .instance_size = sizeof(XenPCIPassthroughState),
1018 .instance_finalize = xen_pci_passthrough_finalize,
1019 .class_init = xen_pci_passthrough_class_init,
1020 .class_size = sizeof(XenPTDeviceClass),
1021 .instance_init = xen_pci_passthrough_instance_init,
1022 .interfaces = (InterfaceInfo[]) {
1023 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1024 { INTERFACE_PCIE_DEVICE },
1025 { },
1026 },
1027 };
1028
1029 static void xen_pci_passthrough_register_types(void)
1030 {
1031 type_register_static(&xen_pci_passthrough_info);
1032 }
1033
1034 type_init(xen_pci_passthrough_register_types)