]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
virtio: fix reachable assertion due to stale value of cached region size
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include "hw/xen/interface/io/xenbus.h"
16
17 #include "hw/xen/xen.h"
18 #include "hw/pci/pci_device.h"
19 #include "hw/xen/trace.h"
20
21 extern xc_interface *xen_xc;
22
23 /*
24 * We don't support Xen prior to 4.2.0.
25 */
26
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
29
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
33 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t;
34
35 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
36 #define xenevtchn_close(h) xc_evtchn_close(h)
37 #define xenevtchn_fd(h) xc_evtchn_fd(h)
38 #define xenevtchn_pending(h) xc_evtchn_pending(h)
39 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
40 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
41 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
42 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
43
44 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
45 #define xengnttab_close(h) xc_gnttab_close(h)
46 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
47 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
48 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
49 #define xengnttab_map_grant_refs(h, c, d, r, p) \
50 xc_gnttab_map_grant_refs(h, c, d, r, p)
51 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
52 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
53
54 #define xenforeignmemory_open(l, f) xen_xc
55 #define xenforeignmemory_close(h)
56
57 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
58 int prot, size_t pages,
59 const xen_pfn_t arr[/*pages*/],
60 int err[/*pages*/])
61 {
62 if (err)
63 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
64 else
65 return xc_map_foreign_pages(h, dom, prot, arr, pages);
66 }
67
68 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
69
70 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
71
72 #include <xenevtchn.h>
73 #include <xengnttab.h>
74 #include <xenforeignmemory.h>
75
76 #endif
77
78 extern xenforeignmemory_handle *xen_fmem;
79
80 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
81
82 typedef xc_interface xendevicemodel_handle;
83
84 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
85
86 #undef XC_WANT_COMPAT_DEVICEMODEL_API
87 #include <xendevicemodel.h>
88
89 #endif
90
91 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
92
93 static inline int xendevicemodel_relocate_memory(
94 xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
95 uint64_t dst_gfn)
96 {
97 uint32_t i;
98 int rc;
99
100 for (i = 0; i < size; i++) {
101 unsigned long idx = src_gfn + i;
102 xen_pfn_t gpfn = dst_gfn + i;
103
104 rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
105 gpfn);
106 if (rc) {
107 return rc;
108 }
109 }
110
111 return 0;
112 }
113
114 static inline int xendevicemodel_pin_memory_cacheattr(
115 xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
116 uint32_t type)
117 {
118 return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
119 }
120
121 typedef void xenforeignmemory_resource_handle;
122
123 #define XENMEM_resource_ioreq_server 0
124
125 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
126 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
127
128 static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
129 xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
130 unsigned int id, unsigned long frame, unsigned long nr_frames,
131 void **paddr, int prot, int flags)
132 {
133 errno = EOPNOTSUPP;
134 return NULL;
135 }
136
137 static inline int xenforeignmemory_unmap_resource(
138 xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
139 {
140 return 0;
141 }
142
143 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
144
145 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
146
147 #define XEN_COMPAT_PHYSMAP
148 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
149 uint32_t dom, void *addr,
150 int prot, int flags, size_t pages,
151 const xen_pfn_t arr[/*pages*/],
152 int err[/*pages*/])
153 {
154 assert(addr == NULL && flags == 0);
155 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
156 }
157
158 static inline int xentoolcore_restrict_all(domid_t domid)
159 {
160 errno = ENOTTY;
161 return -1;
162 }
163
164 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
165 domid_t domid, unsigned int reason)
166 {
167 errno = ENOTTY;
168 return -1;
169 }
170
171 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
172
173 #include <xentoolcore.h>
174
175 #endif
176
177 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
178
179 static inline xendevicemodel_handle *xendevicemodel_open(
180 struct xentoollog_logger *logger, unsigned int open_flags)
181 {
182 return xen_xc;
183 }
184
185 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
186
187 static inline int xendevicemodel_create_ioreq_server(
188 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
189 ioservid_t *id)
190 {
191 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
192 id);
193 }
194
195 static inline int xendevicemodel_get_ioreq_server_info(
196 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
197 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
198 evtchn_port_t *bufioreq_port)
199 {
200 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
201 bufioreq_pfn, bufioreq_port);
202 }
203
204 static inline int xendevicemodel_map_io_range_to_ioreq_server(
205 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
206 uint64_t start, uint64_t end)
207 {
208 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
209 start, end);
210 }
211
212 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
213 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
214 uint64_t start, uint64_t end)
215 {
216 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
217 start, end);
218 }
219
220 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
221 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
222 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
223 {
224 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
225 bus, device, function);
226 }
227
228 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
229 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
230 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
231 {
232 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
233 bus, device, function);
234 }
235
236 static inline int xendevicemodel_destroy_ioreq_server(
237 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
238 {
239 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
240 }
241
242 static inline int xendevicemodel_set_ioreq_server_state(
243 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
244 {
245 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
246 }
247
248 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
249
250 static inline int xendevicemodel_set_pci_intx_level(
251 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
252 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
253 {
254 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
255 intx, level);
256 }
257
258 static inline int xendevicemodel_set_isa_irq_level(
259 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
260 unsigned int level)
261 {
262 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
263 }
264
265 static inline int xendevicemodel_set_pci_link_route(
266 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
267 {
268 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
269 }
270
271 static inline int xendevicemodel_inject_msi(
272 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
273 uint32_t msi_data)
274 {
275 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
276 }
277
278 static inline int xendevicemodel_track_dirty_vram(
279 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
280 uint32_t nr, unsigned long *dirty_bitmap)
281 {
282 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
283 dirty_bitmap);
284 }
285
286 static inline int xendevicemodel_modified_memory(
287 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
288 uint32_t nr)
289 {
290 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
291 }
292
293 static inline int xendevicemodel_set_mem_type(
294 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
295 uint64_t first_pfn, uint32_t nr)
296 {
297 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
298 }
299
300 #endif
301
302 extern xendevicemodel_handle *xen_dmod;
303
304 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
305 uint64_t first_pfn, uint32_t nr)
306 {
307 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
308 nr);
309 }
310
311 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
312 uint8_t bus, uint8_t device,
313 uint8_t intx, unsigned int level)
314 {
315 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
316 device, intx, level);
317 }
318
319 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
320 uint32_t msi_data)
321 {
322 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
323 }
324
325 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
326 unsigned int level)
327 {
328 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
329 }
330
331 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
332 uint32_t nr, unsigned long *bitmap)
333 {
334 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
335 bitmap);
336 }
337
338 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
339 uint32_t nr)
340 {
341 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
342 }
343
344 static inline int xen_restrict(domid_t domid)
345 {
346 int rc;
347 rc = xentoolcore_restrict_all(domid);
348 trace_xen_domid_restrict(rc ? errno : 0);
349 return rc;
350 }
351
352 void destroy_hvm_domain(bool reboot);
353
354 /* shutdown/destroy current domain because of an error */
355 void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
356
357 #ifdef HVM_PARAM_VMPORT_REGS_PFN
358 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
359 xen_pfn_t *vmport_regs_pfn)
360 {
361 int rc;
362 uint64_t value;
363 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
364 if (rc >= 0) {
365 *vmport_regs_pfn = (xen_pfn_t) value;
366 }
367 return rc;
368 }
369 #else
370 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
371 xen_pfn_t *vmport_regs_pfn)
372 {
373 return -ENOSYS;
374 }
375 #endif
376
377 /* Xen before 4.6 */
378 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
379
380 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
381 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
382 #endif
383
384 #endif
385
386 static inline int xen_get_default_ioreq_server_info(domid_t dom,
387 xen_pfn_t *ioreq_pfn,
388 xen_pfn_t *bufioreq_pfn,
389 evtchn_port_t
390 *bufioreq_evtchn)
391 {
392 unsigned long param;
393 int rc;
394
395 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
396 if (rc < 0) {
397 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
398 return -1;
399 }
400
401 *ioreq_pfn = param;
402
403 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
404 if (rc < 0) {
405 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
406 return -1;
407 }
408
409 *bufioreq_pfn = param;
410
411 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
412 &param);
413 if (rc < 0) {
414 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
415 return -1;
416 }
417
418 *bufioreq_evtchn = param;
419
420 return 0;
421 }
422
423 /* Xen before 4.5 */
424 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
425
426 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
427 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
428 #endif
429
430 #define IOREQ_TYPE_PCI_CONFIG 2
431
432 typedef uint16_t ioservid_t;
433
434 static inline void xen_map_memory_section(domid_t dom,
435 ioservid_t ioservid,
436 MemoryRegionSection *section)
437 {
438 }
439
440 static inline void xen_unmap_memory_section(domid_t dom,
441 ioservid_t ioservid,
442 MemoryRegionSection *section)
443 {
444 }
445
446 static inline void xen_map_io_section(domid_t dom,
447 ioservid_t ioservid,
448 MemoryRegionSection *section)
449 {
450 }
451
452 static inline void xen_unmap_io_section(domid_t dom,
453 ioservid_t ioservid,
454 MemoryRegionSection *section)
455 {
456 }
457
458 static inline void xen_map_pcidev(domid_t dom,
459 ioservid_t ioservid,
460 PCIDevice *pci_dev)
461 {
462 }
463
464 static inline void xen_unmap_pcidev(domid_t dom,
465 ioservid_t ioservid,
466 PCIDevice *pci_dev)
467 {
468 }
469
470 static inline void xen_create_ioreq_server(domid_t dom,
471 ioservid_t *ioservid)
472 {
473 }
474
475 static inline void xen_destroy_ioreq_server(domid_t dom,
476 ioservid_t ioservid)
477 {
478 }
479
480 static inline int xen_get_ioreq_server_info(domid_t dom,
481 ioservid_t ioservid,
482 xen_pfn_t *ioreq_pfn,
483 xen_pfn_t *bufioreq_pfn,
484 evtchn_port_t *bufioreq_evtchn)
485 {
486 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
487 bufioreq_pfn,
488 bufioreq_evtchn);
489 }
490
491 static inline int xen_set_ioreq_server_state(domid_t dom,
492 ioservid_t ioservid,
493 bool enable)
494 {
495 return 0;
496 }
497
498 /* Xen 4.5 */
499 #else
500
501 static bool use_default_ioreq_server;
502
503 static inline void xen_map_memory_section(domid_t dom,
504 ioservid_t ioservid,
505 MemoryRegionSection *section)
506 {
507 hwaddr start_addr = section->offset_within_address_space;
508 ram_addr_t size = int128_get64(section->size);
509 hwaddr end_addr = start_addr + size - 1;
510
511 if (use_default_ioreq_server) {
512 return;
513 }
514
515 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
516 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
517 start_addr, end_addr);
518 }
519
520 static inline void xen_unmap_memory_section(domid_t dom,
521 ioservid_t ioservid,
522 MemoryRegionSection *section)
523 {
524 hwaddr start_addr = section->offset_within_address_space;
525 ram_addr_t size = int128_get64(section->size);
526 hwaddr end_addr = start_addr + size - 1;
527
528 if (use_default_ioreq_server) {
529 return;
530 }
531
532 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
533 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
534 1, start_addr, end_addr);
535 }
536
537 static inline void xen_map_io_section(domid_t dom,
538 ioservid_t ioservid,
539 MemoryRegionSection *section)
540 {
541 hwaddr start_addr = section->offset_within_address_space;
542 ram_addr_t size = int128_get64(section->size);
543 hwaddr end_addr = start_addr + size - 1;
544
545 if (use_default_ioreq_server) {
546 return;
547 }
548
549 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
550 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
551 start_addr, end_addr);
552 }
553
554 static inline void xen_unmap_io_section(domid_t dom,
555 ioservid_t ioservid,
556 MemoryRegionSection *section)
557 {
558 hwaddr start_addr = section->offset_within_address_space;
559 ram_addr_t size = int128_get64(section->size);
560 hwaddr end_addr = start_addr + size - 1;
561
562 if (use_default_ioreq_server) {
563 return;
564 }
565
566 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
567 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
568 0, start_addr, end_addr);
569 }
570
571 static inline void xen_map_pcidev(domid_t dom,
572 ioservid_t ioservid,
573 PCIDevice *pci_dev)
574 {
575 if (use_default_ioreq_server) {
576 return;
577 }
578
579 trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
580 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
581 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
582 pci_dev_bus_num(pci_dev),
583 PCI_SLOT(pci_dev->devfn),
584 PCI_FUNC(pci_dev->devfn));
585 }
586
587 static inline void xen_unmap_pcidev(domid_t dom,
588 ioservid_t ioservid,
589 PCIDevice *pci_dev)
590 {
591 if (use_default_ioreq_server) {
592 return;
593 }
594
595 trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
596 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
597 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
598 pci_dev_bus_num(pci_dev),
599 PCI_SLOT(pci_dev->devfn),
600 PCI_FUNC(pci_dev->devfn));
601 }
602
603 static inline void xen_create_ioreq_server(domid_t dom,
604 ioservid_t *ioservid)
605 {
606 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
607 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
608 ioservid);
609
610 if (rc == 0) {
611 trace_xen_ioreq_server_create(*ioservid);
612 return;
613 }
614
615 *ioservid = 0;
616 use_default_ioreq_server = true;
617 trace_xen_default_ioreq_server();
618 }
619
620 static inline void xen_destroy_ioreq_server(domid_t dom,
621 ioservid_t ioservid)
622 {
623 if (use_default_ioreq_server) {
624 return;
625 }
626
627 trace_xen_ioreq_server_destroy(ioservid);
628 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
629 }
630
631 static inline int xen_get_ioreq_server_info(domid_t dom,
632 ioservid_t ioservid,
633 xen_pfn_t *ioreq_pfn,
634 xen_pfn_t *bufioreq_pfn,
635 evtchn_port_t *bufioreq_evtchn)
636 {
637 if (use_default_ioreq_server) {
638 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
639 bufioreq_pfn,
640 bufioreq_evtchn);
641 }
642
643 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
644 ioreq_pfn, bufioreq_pfn,
645 bufioreq_evtchn);
646 }
647
648 static inline int xen_set_ioreq_server_state(domid_t dom,
649 ioservid_t ioservid,
650 bool enable)
651 {
652 if (use_default_ioreq_server) {
653 return 0;
654 }
655
656 trace_xen_ioreq_server_state(ioservid, enable);
657 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
658 enable);
659 }
660
661 #endif
662
663 /* Xen before 4.8 */
664
665 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
666
667 struct xengnttab_grant_copy_segment {
668 union xengnttab_copy_ptr {
669 void *virt;
670 struct {
671 uint32_t ref;
672 uint16_t offset;
673 uint16_t domid;
674 } foreign;
675 } source, dest;
676 uint16_t len;
677 uint16_t flags;
678 int16_t status;
679 };
680
681 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t;
682
683 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
684 xengnttab_grant_copy_segment_t *segs)
685 {
686 return -ENOSYS;
687 }
688 #endif
689
690 #endif /* QEMU_HW_XEN_COMMON_H */