]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
Remove broken Xen PV domain builder
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
22
23 extern xc_interface *xen_xc;
24
25 /*
26 * We don't support Xen prior to 4.2.0.
27 */
28
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
31
32 typedef xc_interface xenforeignmemory_handle;
33 typedef xc_evtchn xenevtchn_handle;
34 typedef xc_gnttab xengnttab_handle;
35 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
36
37 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
38 #define xenevtchn_close(h) xc_evtchn_close(h)
39 #define xenevtchn_fd(h) xc_evtchn_fd(h)
40 #define xenevtchn_pending(h) xc_evtchn_pending(h)
41 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
42 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
43 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
44 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
45
46 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
47 #define xengnttab_close(h) xc_gnttab_close(h)
48 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
49 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
50 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
51 #define xengnttab_map_grant_refs(h, c, d, r, p) \
52 xc_gnttab_map_grant_refs(h, c, d, r, p)
53 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
54 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
55
56 #define xenforeignmemory_open(l, f) xen_xc
57 #define xenforeignmemory_close(h)
58
59 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
60 int prot, size_t pages,
61 const xen_pfn_t arr[/*pages*/],
62 int err[/*pages*/])
63 {
64 if (err)
65 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
66 else
67 return xc_map_foreign_pages(h, dom, prot, arr, pages);
68 }
69
70 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
71
72 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
73
74 #include <xenevtchn.h>
75 #include <xengnttab.h>
76 #include <xenforeignmemory.h>
77
78 #endif
79
80 extern xenforeignmemory_handle *xen_fmem;
81
82 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
83
84 typedef xc_interface xendevicemodel_handle;
85
86 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
87
88 #undef XC_WANT_COMPAT_DEVICEMODEL_API
89 #include <xendevicemodel.h>
90
91 #endif
92
93 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
94
95 static inline int xendevicemodel_relocate_memory(
96 xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
97 uint64_t dst_gfn)
98 {
99 uint32_t i;
100 int rc;
101
102 for (i = 0; i < size; i++) {
103 unsigned long idx = src_gfn + i;
104 xen_pfn_t gpfn = dst_gfn + i;
105
106 rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
107 gpfn);
108 if (rc) {
109 return rc;
110 }
111 }
112
113 return 0;
114 }
115
116 static inline int xendevicemodel_pin_memory_cacheattr(
117 xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
118 uint32_t type)
119 {
120 return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
121 }
122
123 typedef void xenforeignmemory_resource_handle;
124
125 #define XENMEM_resource_ioreq_server 0
126
127 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
128 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
129
130 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
131 xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
132 unsigned int id, unsigned long frame, unsigned long nr_frames,
133 void **paddr, int prot, int flags)
134 {
135 errno = EOPNOTSUPP;
136 return NULL;
137 }
138
139 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
140
141 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
142
143 #define XEN_COMPAT_PHYSMAP
144 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
145 uint32_t dom, void *addr,
146 int prot, int flags, size_t pages,
147 const xen_pfn_t arr[/*pages*/],
148 int err[/*pages*/])
149 {
150 assert(addr == NULL && flags == 0);
151 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
152 }
153
154 static inline int xentoolcore_restrict_all(domid_t domid)
155 {
156 errno = ENOTTY;
157 return -1;
158 }
159
160 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
161 domid_t domid, unsigned int reason)
162 {
163 errno = ENOTTY;
164 return -1;
165 }
166
167 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
168
169 #include <xentoolcore.h>
170
171 #endif
172
173 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
174
175 static inline xendevicemodel_handle *xendevicemodel_open(
176 struct xentoollog_logger *logger, unsigned int open_flags)
177 {
178 return xen_xc;
179 }
180
181 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
182
183 static inline int xendevicemodel_create_ioreq_server(
184 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
185 ioservid_t *id)
186 {
187 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
188 id);
189 }
190
191 static inline int xendevicemodel_get_ioreq_server_info(
192 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
193 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
194 evtchn_port_t *bufioreq_port)
195 {
196 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
197 bufioreq_pfn, bufioreq_port);
198 }
199
200 static inline int xendevicemodel_map_io_range_to_ioreq_server(
201 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
202 uint64_t start, uint64_t end)
203 {
204 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
205 start, end);
206 }
207
208 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
209 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
210 uint64_t start, uint64_t end)
211 {
212 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
213 start, end);
214 }
215
216 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
217 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
218 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
219 {
220 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
221 bus, device, function);
222 }
223
224 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
225 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
226 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
227 {
228 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
229 bus, device, function);
230 }
231
232 static inline int xendevicemodel_destroy_ioreq_server(
233 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
234 {
235 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
236 }
237
238 static inline int xendevicemodel_set_ioreq_server_state(
239 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
240 {
241 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
242 }
243
244 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
245
246 static inline int xendevicemodel_set_pci_intx_level(
247 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
248 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
249 {
250 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
251 intx, level);
252 }
253
254 static inline int xendevicemodel_set_isa_irq_level(
255 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
256 unsigned int level)
257 {
258 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
259 }
260
261 static inline int xendevicemodel_set_pci_link_route(
262 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
263 {
264 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
265 }
266
267 static inline int xendevicemodel_inject_msi(
268 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
269 uint32_t msi_data)
270 {
271 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
272 }
273
274 static inline int xendevicemodel_track_dirty_vram(
275 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
276 uint32_t nr, unsigned long *dirty_bitmap)
277 {
278 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
279 dirty_bitmap);
280 }
281
282 static inline int xendevicemodel_modified_memory(
283 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
284 uint32_t nr)
285 {
286 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
287 }
288
289 static inline int xendevicemodel_set_mem_type(
290 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
291 uint64_t first_pfn, uint32_t nr)
292 {
293 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
294 }
295
296 #endif
297
298 extern xendevicemodel_handle *xen_dmod;
299
300 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
301 uint64_t first_pfn, uint32_t nr)
302 {
303 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
304 nr);
305 }
306
307 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
308 uint8_t bus, uint8_t device,
309 uint8_t intx, unsigned int level)
310 {
311 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
312 device, intx, level);
313 }
314
315 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
316 uint8_t irq)
317 {
318 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
319 }
320
321 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
322 uint32_t msi_data)
323 {
324 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
325 }
326
327 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
328 unsigned int level)
329 {
330 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
331 }
332
333 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
334 uint32_t nr, unsigned long *bitmap)
335 {
336 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
337 bitmap);
338 }
339
340 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
341 uint32_t nr)
342 {
343 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
344 }
345
346 static inline int xen_restrict(domid_t domid)
347 {
348 int rc;
349 rc = xentoolcore_restrict_all(domid);
350 trace_xen_domid_restrict(rc ? errno : 0);
351 return rc;
352 }
353
354 void destroy_hvm_domain(bool reboot);
355
356 /* shutdown/destroy current domain because of an error */
357 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
358
359 #ifdef HVM_PARAM_VMPORT_REGS_PFN
360 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
361 xen_pfn_t *vmport_regs_pfn)
362 {
363 int rc;
364 uint64_t value;
365 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
366 if (rc >= 0) {
367 *vmport_regs_pfn = (xen_pfn_t) value;
368 }
369 return rc;
370 }
371 #else
372 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
373 xen_pfn_t *vmport_regs_pfn)
374 {
375 return -ENOSYS;
376 }
377 #endif
378
379 /* Xen before 4.6 */
380 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
381
382 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
383 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
384 #endif
385
386 #endif
387
388 static inline int xen_get_default_ioreq_server_info(domid_t dom,
389 xen_pfn_t *ioreq_pfn,
390 xen_pfn_t *bufioreq_pfn,
391 evtchn_port_t
392 *bufioreq_evtchn)
393 {
394 unsigned long param;
395 int rc;
396
397 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
398 if (rc < 0) {
399 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
400 return -1;
401 }
402
403 *ioreq_pfn = param;
404
405 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
406 if (rc < 0) {
407 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
408 return -1;
409 }
410
411 *bufioreq_pfn = param;
412
413 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
414 &param);
415 if (rc < 0) {
416 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
417 return -1;
418 }
419
420 *bufioreq_evtchn = param;
421
422 return 0;
423 }
424
425 /* Xen before 4.5 */
426 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
427
428 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
429 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
430 #endif
431
432 #define IOREQ_TYPE_PCI_CONFIG 2
433
434 typedef uint16_t ioservid_t;
435
436 static inline void xen_map_memory_section(domid_t dom,
437 ioservid_t ioservid,
438 MemoryRegionSection *section)
439 {
440 }
441
442 static inline void xen_unmap_memory_section(domid_t dom,
443 ioservid_t ioservid,
444 MemoryRegionSection *section)
445 {
446 }
447
448 static inline void xen_map_io_section(domid_t dom,
449 ioservid_t ioservid,
450 MemoryRegionSection *section)
451 {
452 }
453
454 static inline void xen_unmap_io_section(domid_t dom,
455 ioservid_t ioservid,
456 MemoryRegionSection *section)
457 {
458 }
459
460 static inline void xen_map_pcidev(domid_t dom,
461 ioservid_t ioservid,
462 PCIDevice *pci_dev)
463 {
464 }
465
466 static inline void xen_unmap_pcidev(domid_t dom,
467 ioservid_t ioservid,
468 PCIDevice *pci_dev)
469 {
470 }
471
472 static inline void xen_create_ioreq_server(domid_t dom,
473 ioservid_t *ioservid)
474 {
475 }
476
477 static inline void xen_destroy_ioreq_server(domid_t dom,
478 ioservid_t ioservid)
479 {
480 }
481
482 static inline int xen_get_ioreq_server_info(domid_t dom,
483 ioservid_t ioservid,
484 xen_pfn_t *ioreq_pfn,
485 xen_pfn_t *bufioreq_pfn,
486 evtchn_port_t *bufioreq_evtchn)
487 {
488 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
489 bufioreq_pfn,
490 bufioreq_evtchn);
491 }
492
493 static inline int xen_set_ioreq_server_state(domid_t dom,
494 ioservid_t ioservid,
495 bool enable)
496 {
497 return 0;
498 }
499
500 /* Xen 4.5 */
501 #else
502
503 static bool use_default_ioreq_server;
504
505 static inline void xen_map_memory_section(domid_t dom,
506 ioservid_t ioservid,
507 MemoryRegionSection *section)
508 {
509 hwaddr start_addr = section->offset_within_address_space;
510 ram_addr_t size = int128_get64(section->size);
511 hwaddr end_addr = start_addr + size - 1;
512
513 if (use_default_ioreq_server) {
514 return;
515 }
516
517 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
518 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
519 start_addr, end_addr);
520 }
521
522 static inline void xen_unmap_memory_section(domid_t dom,
523 ioservid_t ioservid,
524 MemoryRegionSection *section)
525 {
526 hwaddr start_addr = section->offset_within_address_space;
527 ram_addr_t size = int128_get64(section->size);
528 hwaddr end_addr = start_addr + size - 1;
529
530 if (use_default_ioreq_server) {
531 return;
532 }
533
534 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
535 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
536 1, start_addr, end_addr);
537 }
538
539 static inline void xen_map_io_section(domid_t dom,
540 ioservid_t ioservid,
541 MemoryRegionSection *section)
542 {
543 hwaddr start_addr = section->offset_within_address_space;
544 ram_addr_t size = int128_get64(section->size);
545 hwaddr end_addr = start_addr + size - 1;
546
547 if (use_default_ioreq_server) {
548 return;
549 }
550
551 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
552 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
553 start_addr, end_addr);
554 }
555
556 static inline void xen_unmap_io_section(domid_t dom,
557 ioservid_t ioservid,
558 MemoryRegionSection *section)
559 {
560 hwaddr start_addr = section->offset_within_address_space;
561 ram_addr_t size = int128_get64(section->size);
562 hwaddr end_addr = start_addr + size - 1;
563
564 if (use_default_ioreq_server) {
565 return;
566 }
567
568 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
569 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
570 0, start_addr, end_addr);
571 }
572
573 static inline void xen_map_pcidev(domid_t dom,
574 ioservid_t ioservid,
575 PCIDevice *pci_dev)
576 {
577 if (use_default_ioreq_server) {
578 return;
579 }
580
581 trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
582 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
583 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
584 pci_dev_bus_num(pci_dev),
585 PCI_SLOT(pci_dev->devfn),
586 PCI_FUNC(pci_dev->devfn));
587 }
588
589 static inline void xen_unmap_pcidev(domid_t dom,
590 ioservid_t ioservid,
591 PCIDevice *pci_dev)
592 {
593 if (use_default_ioreq_server) {
594 return;
595 }
596
597 trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
598 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
599 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
600 pci_dev_bus_num(pci_dev),
601 PCI_SLOT(pci_dev->devfn),
602 PCI_FUNC(pci_dev->devfn));
603 }
604
605 static inline void xen_create_ioreq_server(domid_t dom,
606 ioservid_t *ioservid)
607 {
608 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
609 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
610 ioservid);
611
612 if (rc == 0) {
613 trace_xen_ioreq_server_create(*ioservid);
614 return;
615 }
616
617 *ioservid = 0;
618 use_default_ioreq_server = true;
619 trace_xen_default_ioreq_server();
620 }
621
622 static inline void xen_destroy_ioreq_server(domid_t dom,
623 ioservid_t ioservid)
624 {
625 if (use_default_ioreq_server) {
626 return;
627 }
628
629 trace_xen_ioreq_server_destroy(ioservid);
630 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
631 }
632
633 static inline int xen_get_ioreq_server_info(domid_t dom,
634 ioservid_t ioservid,
635 xen_pfn_t *ioreq_pfn,
636 xen_pfn_t *bufioreq_pfn,
637 evtchn_port_t *bufioreq_evtchn)
638 {
639 if (use_default_ioreq_server) {
640 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
641 bufioreq_pfn,
642 bufioreq_evtchn);
643 }
644
645 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
646 ioreq_pfn, bufioreq_pfn,
647 bufioreq_evtchn);
648 }
649
650 static inline int xen_set_ioreq_server_state(domid_t dom,
651 ioservid_t ioservid,
652 bool enable)
653 {
654 if (use_default_ioreq_server) {
655 return 0;
656 }
657
658 trace_xen_ioreq_server_state(ioservid, enable);
659 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
660 enable);
661 }
662
663 #endif
664
665 /* Xen before 4.8 */
666
667 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
668
669 struct xengnttab_grant_copy_segment {
670 union xengnttab_copy_ptr {
671 void *virt;
672 struct {
673 uint32_t ref;
674 uint16_t offset;
675 uint16_t domid;
676 } foreign;
677 } source, dest;
678 uint16_t len;
679 uint16_t flags;
680 int16_t status;
681 };
682
683 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
684
685 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
686 xengnttab_grant_copy_segment_t *segs)
687 {
688 return -ENOSYS;
689 }
690 #endif
691
692 #endif /* QEMU_HW_XEN_COMMON_H */