]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
xen: Remove now-obsolete xen_xc_domain_add_to_physmap
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
22
23 extern xc_interface *xen_xc;
24
25 /*
26 * We don't support Xen prior to 4.2.0.
27 */
28
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
31
32 typedef xc_interface xenforeignmemory_handle;
33 typedef xc_evtchn xenevtchn_handle;
34 typedef xc_gnttab xengnttab_handle;
35
36 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37 #define xenevtchn_close(h) xc_evtchn_close(h)
38 #define xenevtchn_fd(h) xc_evtchn_fd(h)
39 #define xenevtchn_pending(h) xc_evtchn_pending(h)
40 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
44
45 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
46 #define xengnttab_close(h) xc_gnttab_close(h)
47 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50 #define xengnttab_map_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_grant_refs(h, c, d, r, p)
52 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
54
55 #define xenforeignmemory_open(l, f) xen_xc
56 #define xenforeignmemory_close(h)
57
58 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
59 int prot, size_t pages,
60 const xen_pfn_t arr[/*pages*/],
61 int err[/*pages*/])
62 {
63 if (err)
64 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
65 else
66 return xc_map_foreign_pages(h, dom, prot, arr, pages);
67 }
68
69 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
70
71 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
72
73 #include <xenevtchn.h>
74 #include <xengnttab.h>
75 #include <xenforeignmemory.h>
76
77 #endif
78
79 extern xenforeignmemory_handle *xen_fmem;
80
81 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
82
83 typedef xc_interface xendevicemodel_handle;
84
85 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
86
87 #undef XC_WANT_COMPAT_DEVICEMODEL_API
88 #include <xendevicemodel.h>
89
90 #endif
91
92 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
93
94 static inline int xendevicemodel_relocate_memory(
95 xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
96 uint64_t dst_gfn)
97 {
98 uint32_t i;
99 int rc;
100
101 for (i = 0; i < size; i++) {
102 unsigned long idx = src_gfn + i;
103 xen_pfn_t gpfn = dst_gfn + i;
104
105 rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
106 gpfn);
107 if (rc) {
108 return rc;
109 }
110 }
111
112 return 0;
113 }
114
115 static inline int xendevicemodel_pin_memory_cacheattr(
116 xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
117 uint32_t type)
118 {
119 return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
120 }
121
122 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
123
124 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
125
126 #define XEN_COMPAT_PHYSMAP
127 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
128 uint32_t dom, void *addr,
129 int prot, int flags, size_t pages,
130 const xen_pfn_t arr[/*pages*/],
131 int err[/*pages*/])
132 {
133 assert(addr == NULL && flags == 0);
134 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
135 }
136
137 static inline int xentoolcore_restrict_all(domid_t domid)
138 {
139 errno = ENOTTY;
140 return -1;
141 }
142
143 static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
144 domid_t domid, unsigned int reason)
145 {
146 errno = ENOTTY;
147 return -1;
148 }
149
150 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
151
152 #include <xentoolcore.h>
153
154 #endif
155
156 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
157
158 static inline xendevicemodel_handle *xendevicemodel_open(
159 struct xentoollog_logger *logger, unsigned int open_flags)
160 {
161 return xen_xc;
162 }
163
164 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
165
166 static inline int xendevicemodel_create_ioreq_server(
167 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
168 ioservid_t *id)
169 {
170 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
171 id);
172 }
173
174 static inline int xendevicemodel_get_ioreq_server_info(
175 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
176 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
177 evtchn_port_t *bufioreq_port)
178 {
179 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
180 bufioreq_pfn, bufioreq_port);
181 }
182
183 static inline int xendevicemodel_map_io_range_to_ioreq_server(
184 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
185 uint64_t start, uint64_t end)
186 {
187 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
188 start, end);
189 }
190
191 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
192 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
193 uint64_t start, uint64_t end)
194 {
195 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
196 start, end);
197 }
198
199 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
200 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
201 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
202 {
203 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
204 bus, device, function);
205 }
206
207 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
208 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
209 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
210 {
211 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
212 bus, device, function);
213 }
214
215 static inline int xendevicemodel_destroy_ioreq_server(
216 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
217 {
218 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
219 }
220
221 static inline int xendevicemodel_set_ioreq_server_state(
222 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
223 {
224 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
225 }
226
227 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
228
229 static inline int xendevicemodel_set_pci_intx_level(
230 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
231 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
232 {
233 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
234 intx, level);
235 }
236
237 static inline int xendevicemodel_set_isa_irq_level(
238 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
239 unsigned int level)
240 {
241 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
242 }
243
244 static inline int xendevicemodel_set_pci_link_route(
245 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
246 {
247 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
248 }
249
250 static inline int xendevicemodel_inject_msi(
251 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
252 uint32_t msi_data)
253 {
254 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
255 }
256
257 static inline int xendevicemodel_track_dirty_vram(
258 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
259 uint32_t nr, unsigned long *dirty_bitmap)
260 {
261 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
262 dirty_bitmap);
263 }
264
265 static inline int xendevicemodel_modified_memory(
266 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
267 uint32_t nr)
268 {
269 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
270 }
271
272 static inline int xendevicemodel_set_mem_type(
273 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
274 uint64_t first_pfn, uint32_t nr)
275 {
276 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
277 }
278
279 #endif
280
281 extern xendevicemodel_handle *xen_dmod;
282
283 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
284 uint64_t first_pfn, uint32_t nr)
285 {
286 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
287 nr);
288 }
289
290 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
291 uint8_t bus, uint8_t device,
292 uint8_t intx, unsigned int level)
293 {
294 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
295 device, intx, level);
296 }
297
298 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
299 uint8_t irq)
300 {
301 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
302 }
303
304 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
305 uint32_t msi_data)
306 {
307 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
308 }
309
310 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
311 unsigned int level)
312 {
313 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
314 }
315
316 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
317 uint32_t nr, unsigned long *bitmap)
318 {
319 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
320 bitmap);
321 }
322
323 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
324 uint32_t nr)
325 {
326 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
327 }
328
329 static inline int xen_restrict(domid_t domid)
330 {
331 int rc;
332 rc = xentoolcore_restrict_all(domid);
333 trace_xen_domid_restrict(rc ? errno : 0);
334 return rc;
335 }
336
337 void destroy_hvm_domain(bool reboot);
338
339 /* shutdown/destroy current domain because of an error */
340 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
341
342 #ifdef HVM_PARAM_VMPORT_REGS_PFN
343 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
344 xen_pfn_t *vmport_regs_pfn)
345 {
346 int rc;
347 uint64_t value;
348 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
349 if (rc >= 0) {
350 *vmport_regs_pfn = (xen_pfn_t) value;
351 }
352 return rc;
353 }
354 #else
355 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
356 xen_pfn_t *vmport_regs_pfn)
357 {
358 return -ENOSYS;
359 }
360 #endif
361
362 /* Xen before 4.6 */
363 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
364
365 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
366 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
367 #endif
368
369 #endif
370
371 static inline int xen_get_default_ioreq_server_info(domid_t dom,
372 xen_pfn_t *ioreq_pfn,
373 xen_pfn_t *bufioreq_pfn,
374 evtchn_port_t
375 *bufioreq_evtchn)
376 {
377 unsigned long param;
378 int rc;
379
380 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
381 if (rc < 0) {
382 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
383 return -1;
384 }
385
386 *ioreq_pfn = param;
387
388 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
389 if (rc < 0) {
390 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
391 return -1;
392 }
393
394 *bufioreq_pfn = param;
395
396 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
397 &param);
398 if (rc < 0) {
399 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
400 return -1;
401 }
402
403 *bufioreq_evtchn = param;
404
405 return 0;
406 }
407
408 /* Xen before 4.5 */
409 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
410
411 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
412 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
413 #endif
414
415 #define IOREQ_TYPE_PCI_CONFIG 2
416
417 typedef uint16_t ioservid_t;
418
419 static inline void xen_map_memory_section(domid_t dom,
420 ioservid_t ioservid,
421 MemoryRegionSection *section)
422 {
423 }
424
425 static inline void xen_unmap_memory_section(domid_t dom,
426 ioservid_t ioservid,
427 MemoryRegionSection *section)
428 {
429 }
430
431 static inline void xen_map_io_section(domid_t dom,
432 ioservid_t ioservid,
433 MemoryRegionSection *section)
434 {
435 }
436
437 static inline void xen_unmap_io_section(domid_t dom,
438 ioservid_t ioservid,
439 MemoryRegionSection *section)
440 {
441 }
442
443 static inline void xen_map_pcidev(domid_t dom,
444 ioservid_t ioservid,
445 PCIDevice *pci_dev)
446 {
447 }
448
449 static inline void xen_unmap_pcidev(domid_t dom,
450 ioservid_t ioservid,
451 PCIDevice *pci_dev)
452 {
453 }
454
455 static inline void xen_create_ioreq_server(domid_t dom,
456 ioservid_t *ioservid)
457 {
458 }
459
460 static inline void xen_destroy_ioreq_server(domid_t dom,
461 ioservid_t ioservid)
462 {
463 }
464
465 static inline int xen_get_ioreq_server_info(domid_t dom,
466 ioservid_t ioservid,
467 xen_pfn_t *ioreq_pfn,
468 xen_pfn_t *bufioreq_pfn,
469 evtchn_port_t *bufioreq_evtchn)
470 {
471 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
472 bufioreq_pfn,
473 bufioreq_evtchn);
474 }
475
476 static inline int xen_set_ioreq_server_state(domid_t dom,
477 ioservid_t ioservid,
478 bool enable)
479 {
480 return 0;
481 }
482
483 /* Xen 4.5 */
484 #else
485
486 static bool use_default_ioreq_server;
487
488 static inline void xen_map_memory_section(domid_t dom,
489 ioservid_t ioservid,
490 MemoryRegionSection *section)
491 {
492 hwaddr start_addr = section->offset_within_address_space;
493 ram_addr_t size = int128_get64(section->size);
494 hwaddr end_addr = start_addr + size - 1;
495
496 if (use_default_ioreq_server) {
497 return;
498 }
499
500 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
501 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
502 start_addr, end_addr);
503 }
504
505 static inline void xen_unmap_memory_section(domid_t dom,
506 ioservid_t ioservid,
507 MemoryRegionSection *section)
508 {
509 hwaddr start_addr = section->offset_within_address_space;
510 ram_addr_t size = int128_get64(section->size);
511 hwaddr end_addr = start_addr + size - 1;
512
513 if (use_default_ioreq_server) {
514 return;
515 }
516
517 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
518 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
519 1, start_addr, end_addr);
520 }
521
522 static inline void xen_map_io_section(domid_t dom,
523 ioservid_t ioservid,
524 MemoryRegionSection *section)
525 {
526 hwaddr start_addr = section->offset_within_address_space;
527 ram_addr_t size = int128_get64(section->size);
528 hwaddr end_addr = start_addr + size - 1;
529
530 if (use_default_ioreq_server) {
531 return;
532 }
533
534 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
535 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
536 start_addr, end_addr);
537 }
538
539 static inline void xen_unmap_io_section(domid_t dom,
540 ioservid_t ioservid,
541 MemoryRegionSection *section)
542 {
543 hwaddr start_addr = section->offset_within_address_space;
544 ram_addr_t size = int128_get64(section->size);
545 hwaddr end_addr = start_addr + size - 1;
546
547 if (use_default_ioreq_server) {
548 return;
549 }
550
551 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
552 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
553 0, start_addr, end_addr);
554 }
555
556 static inline void xen_map_pcidev(domid_t dom,
557 ioservid_t ioservid,
558 PCIDevice *pci_dev)
559 {
560 if (use_default_ioreq_server) {
561 return;
562 }
563
564 trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
565 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
566 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
567 pci_dev_bus_num(pci_dev),
568 PCI_SLOT(pci_dev->devfn),
569 PCI_FUNC(pci_dev->devfn));
570 }
571
572 static inline void xen_unmap_pcidev(domid_t dom,
573 ioservid_t ioservid,
574 PCIDevice *pci_dev)
575 {
576 if (use_default_ioreq_server) {
577 return;
578 }
579
580 trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
581 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
582 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
583 pci_dev_bus_num(pci_dev),
584 PCI_SLOT(pci_dev->devfn),
585 PCI_FUNC(pci_dev->devfn));
586 }
587
588 static inline void xen_create_ioreq_server(domid_t dom,
589 ioservid_t *ioservid)
590 {
591 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
592 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
593 ioservid);
594
595 if (rc == 0) {
596 trace_xen_ioreq_server_create(*ioservid);
597 return;
598 }
599
600 *ioservid = 0;
601 use_default_ioreq_server = true;
602 trace_xen_default_ioreq_server();
603 }
604
605 static inline void xen_destroy_ioreq_server(domid_t dom,
606 ioservid_t ioservid)
607 {
608 if (use_default_ioreq_server) {
609 return;
610 }
611
612 trace_xen_ioreq_server_destroy(ioservid);
613 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
614 }
615
616 static inline int xen_get_ioreq_server_info(domid_t dom,
617 ioservid_t ioservid,
618 xen_pfn_t *ioreq_pfn,
619 xen_pfn_t *bufioreq_pfn,
620 evtchn_port_t *bufioreq_evtchn)
621 {
622 if (use_default_ioreq_server) {
623 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
624 bufioreq_pfn,
625 bufioreq_evtchn);
626 }
627
628 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
629 ioreq_pfn, bufioreq_pfn,
630 bufioreq_evtchn);
631 }
632
633 static inline int xen_set_ioreq_server_state(domid_t dom,
634 ioservid_t ioservid,
635 bool enable)
636 {
637 if (use_default_ioreq_server) {
638 return 0;
639 }
640
641 trace_xen_ioreq_server_state(ioservid, enable);
642 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
643 enable);
644 }
645
646 #endif
647
648 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
649 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
650 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
651 xen_domain_handle_t handle, uint32_t flags,
652 uint32_t *pdomid)
653 {
654 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
655 }
656 #else
657 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
658 xen_domain_handle_t handle, uint32_t flags,
659 uint32_t *pdomid)
660 {
661 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
662 }
663 #endif
664 #endif
665
666 /* Xen before 4.8 */
667
668 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
669
670
671 typedef void *xengnttab_grant_copy_segment_t;
672
673 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
674 xengnttab_grant_copy_segment_t *segs)
675 {
676 return -ENOSYS;
677 }
678 #endif
679
680 #endif /* QEMU_HW_XEN_COMMON_H */