]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
22
23 extern xc_interface *xen_xc;
24
25 /*
26 * We don't support Xen prior to 4.2.0.
27 */
28
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
31
32 typedef xc_interface xenforeignmemory_handle;
33 typedef xc_evtchn xenevtchn_handle;
34 typedef xc_gnttab xengnttab_handle;
35
36 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37 #define xenevtchn_close(h) xc_evtchn_close(h)
38 #define xenevtchn_fd(h) xc_evtchn_fd(h)
39 #define xenevtchn_pending(h) xc_evtchn_pending(h)
40 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
44
45 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
46 #define xengnttab_close(h) xc_gnttab_close(h)
47 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50 #define xengnttab_map_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_grant_refs(h, c, d, r, p)
52 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
54
55 #define xenforeignmemory_open(l, f) xen_xc
56 #define xenforeignmemory_close(h)
57
58 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
59 int prot, size_t pages,
60 const xen_pfn_t arr[/*pages*/],
61 int err[/*pages*/])
62 {
63 if (err)
64 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
65 else
66 return xc_map_foreign_pages(h, dom, prot, arr, pages);
67 }
68
69 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
70
71 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
72
73 #include <xenevtchn.h>
74 #include <xengnttab.h>
75 #include <xenforeignmemory.h>
76
77 #endif
78
79 extern xenforeignmemory_handle *xen_fmem;
80
81 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
82
83 #define XEN_COMPAT_PHYSMAP
84 static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
85 uint32_t dom, void *addr,
86 int prot, int flags, size_t pages,
87 const xen_pfn_t arr[/*pages*/],
88 int err[/*pages*/])
89 {
90 assert(addr == NULL && flags == 0);
91 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
92 }
93
94 #endif
95
96 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
97
98 typedef xc_interface xendevicemodel_handle;
99
100 static inline xendevicemodel_handle *xendevicemodel_open(
101 struct xentoollog_logger *logger, unsigned int open_flags)
102 {
103 return xen_xc;
104 }
105
106 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
107
108 static inline int xendevicemodel_create_ioreq_server(
109 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
110 ioservid_t *id)
111 {
112 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
113 id);
114 }
115
116 static inline int xendevicemodel_get_ioreq_server_info(
117 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
118 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
119 evtchn_port_t *bufioreq_port)
120 {
121 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
122 bufioreq_pfn, bufioreq_port);
123 }
124
125 static inline int xendevicemodel_map_io_range_to_ioreq_server(
126 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
127 uint64_t start, uint64_t end)
128 {
129 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
130 start, end);
131 }
132
133 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
134 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
135 uint64_t start, uint64_t end)
136 {
137 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
138 start, end);
139 }
140
141 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
142 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
143 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
144 {
145 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
146 bus, device, function);
147 }
148
149 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
150 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
151 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
152 {
153 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
154 bus, device, function);
155 }
156
157 static inline int xendevicemodel_destroy_ioreq_server(
158 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
159 {
160 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
161 }
162
163 static inline int xendevicemodel_set_ioreq_server_state(
164 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
165 {
166 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
167 }
168
169 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
170
171 static inline int xendevicemodel_set_pci_intx_level(
172 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
173 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
174 {
175 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
176 intx, level);
177 }
178
179 static inline int xendevicemodel_set_isa_irq_level(
180 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
181 unsigned int level)
182 {
183 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
184 }
185
186 static inline int xendevicemodel_set_pci_link_route(
187 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
188 {
189 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
190 }
191
192 static inline int xendevicemodel_inject_msi(
193 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
194 uint32_t msi_data)
195 {
196 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
197 }
198
199 static inline int xendevicemodel_track_dirty_vram(
200 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
201 uint32_t nr, unsigned long *dirty_bitmap)
202 {
203 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
204 dirty_bitmap);
205 }
206
207 static inline int xendevicemodel_modified_memory(
208 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
209 uint32_t nr)
210 {
211 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
212 }
213
214 static inline int xendevicemodel_set_mem_type(
215 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
216 uint64_t first_pfn, uint32_t nr)
217 {
218 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
219 }
220
221 static inline int xendevicemodel_restrict(
222 xendevicemodel_handle *dmod, domid_t domid)
223 {
224 errno = ENOTTY;
225 return -1;
226 }
227
228 static inline int xenforeignmemory_restrict(
229 xenforeignmemory_handle *fmem, domid_t domid)
230 {
231 errno = ENOTTY;
232 return -1;
233 }
234
235 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
236
237 #undef XC_WANT_COMPAT_DEVICEMODEL_API
238 #include <xendevicemodel.h>
239
240 #endif
241
242 extern xendevicemodel_handle *xen_dmod;
243
244 static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
245 uint64_t first_pfn, uint32_t nr)
246 {
247 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
248 nr);
249 }
250
251 static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
252 uint8_t bus, uint8_t device,
253 uint8_t intx, unsigned int level)
254 {
255 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
256 device, intx, level);
257 }
258
259 static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
260 uint8_t irq)
261 {
262 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
263 }
264
265 static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
266 uint32_t msi_data)
267 {
268 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
269 }
270
271 static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
272 unsigned int level)
273 {
274 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
275 }
276
277 static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
278 uint32_t nr, unsigned long *bitmap)
279 {
280 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
281 bitmap);
282 }
283
284 static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
285 uint32_t nr)
286 {
287 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
288 }
289
290 static inline int xen_restrict(domid_t domid)
291 {
292 int rc;
293
294 /* Attempt to restrict devicemodel operations */
295 rc = xendevicemodel_restrict(xen_dmod, domid);
296 trace_xen_domid_restrict(rc ? errno : 0);
297
298 if (rc < 0) {
299 /*
300 * If errno is ENOTTY then restriction is not implemented so
301 * there's no point in trying to restrict other types of
302 * operation, but it should not be treated as a failure.
303 */
304 if (errno == ENOTTY) {
305 return 0;
306 }
307
308 return rc;
309 }
310
311 /* Restrict foreignmemory operations */
312 rc = xenforeignmemory_restrict(xen_fmem, domid);
313 trace_xen_domid_restrict(rc ? errno : 0);
314
315 return rc;
316 }
317
318 void destroy_hvm_domain(bool reboot);
319
320 /* shutdown/destroy current domain because of an error */
321 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
322
323 #ifdef HVM_PARAM_VMPORT_REGS_PFN
324 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
325 xen_pfn_t *vmport_regs_pfn)
326 {
327 int rc;
328 uint64_t value;
329 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
330 if (rc >= 0) {
331 *vmport_regs_pfn = (xen_pfn_t) value;
332 }
333 return rc;
334 }
335 #else
336 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
337 xen_pfn_t *vmport_regs_pfn)
338 {
339 return -ENOSYS;
340 }
341 #endif
342
343 /* Xen before 4.6 */
344 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
345
346 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
347 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
348 #endif
349
350 #endif
351
352 static inline int xen_get_default_ioreq_server_info(domid_t dom,
353 xen_pfn_t *ioreq_pfn,
354 xen_pfn_t *bufioreq_pfn,
355 evtchn_port_t
356 *bufioreq_evtchn)
357 {
358 unsigned long param;
359 int rc;
360
361 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
362 if (rc < 0) {
363 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
364 return -1;
365 }
366
367 *ioreq_pfn = param;
368
369 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
370 if (rc < 0) {
371 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
372 return -1;
373 }
374
375 *bufioreq_pfn = param;
376
377 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
378 &param);
379 if (rc < 0) {
380 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
381 return -1;
382 }
383
384 *bufioreq_evtchn = param;
385
386 return 0;
387 }
388
389 /* Xen before 4.5 */
390 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
391
392 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
393 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
394 #endif
395
396 #define IOREQ_TYPE_PCI_CONFIG 2
397
398 typedef uint16_t ioservid_t;
399
400 static inline void xen_map_memory_section(domid_t dom,
401 ioservid_t ioservid,
402 MemoryRegionSection *section)
403 {
404 }
405
406 static inline void xen_unmap_memory_section(domid_t dom,
407 ioservid_t ioservid,
408 MemoryRegionSection *section)
409 {
410 }
411
412 static inline void xen_map_io_section(domid_t dom,
413 ioservid_t ioservid,
414 MemoryRegionSection *section)
415 {
416 }
417
418 static inline void xen_unmap_io_section(domid_t dom,
419 ioservid_t ioservid,
420 MemoryRegionSection *section)
421 {
422 }
423
424 static inline void xen_map_pcidev(domid_t dom,
425 ioservid_t ioservid,
426 PCIDevice *pci_dev)
427 {
428 }
429
430 static inline void xen_unmap_pcidev(domid_t dom,
431 ioservid_t ioservid,
432 PCIDevice *pci_dev)
433 {
434 }
435
436 static inline void xen_create_ioreq_server(domid_t dom,
437 ioservid_t *ioservid)
438 {
439 }
440
441 static inline void xen_destroy_ioreq_server(domid_t dom,
442 ioservid_t ioservid)
443 {
444 }
445
446 static inline int xen_get_ioreq_server_info(domid_t dom,
447 ioservid_t ioservid,
448 xen_pfn_t *ioreq_pfn,
449 xen_pfn_t *bufioreq_pfn,
450 evtchn_port_t *bufioreq_evtchn)
451 {
452 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
453 bufioreq_pfn,
454 bufioreq_evtchn);
455 }
456
457 static inline int xen_set_ioreq_server_state(domid_t dom,
458 ioservid_t ioservid,
459 bool enable)
460 {
461 return 0;
462 }
463
464 /* Xen 4.5 */
465 #else
466
467 static bool use_default_ioreq_server;
468
469 static inline void xen_map_memory_section(domid_t dom,
470 ioservid_t ioservid,
471 MemoryRegionSection *section)
472 {
473 hwaddr start_addr = section->offset_within_address_space;
474 ram_addr_t size = int128_get64(section->size);
475 hwaddr end_addr = start_addr + size - 1;
476
477 if (use_default_ioreq_server) {
478 return;
479 }
480
481 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
482 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
483 start_addr, end_addr);
484 }
485
486 static inline void xen_unmap_memory_section(domid_t dom,
487 ioservid_t ioservid,
488 MemoryRegionSection *section)
489 {
490 hwaddr start_addr = section->offset_within_address_space;
491 ram_addr_t size = int128_get64(section->size);
492 hwaddr end_addr = start_addr + size - 1;
493
494 if (use_default_ioreq_server) {
495 return;
496 }
497
498 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
499 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
500 1, start_addr, end_addr);
501 }
502
503 static inline void xen_map_io_section(domid_t dom,
504 ioservid_t ioservid,
505 MemoryRegionSection *section)
506 {
507 hwaddr start_addr = section->offset_within_address_space;
508 ram_addr_t size = int128_get64(section->size);
509 hwaddr end_addr = start_addr + size - 1;
510
511 if (use_default_ioreq_server) {
512 return;
513 }
514
515 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
516 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
517 start_addr, end_addr);
518 }
519
520 static inline void xen_unmap_io_section(domid_t dom,
521 ioservid_t ioservid,
522 MemoryRegionSection *section)
523 {
524 hwaddr start_addr = section->offset_within_address_space;
525 ram_addr_t size = int128_get64(section->size);
526 hwaddr end_addr = start_addr + size - 1;
527
528 if (use_default_ioreq_server) {
529 return;
530 }
531
532 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
533 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
534 0, start_addr, end_addr);
535 }
536
537 static inline void xen_map_pcidev(domid_t dom,
538 ioservid_t ioservid,
539 PCIDevice *pci_dev)
540 {
541 if (use_default_ioreq_server) {
542 return;
543 }
544
545 trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
546 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
547 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
548 pci_dev_bus_num(pci_dev),
549 PCI_SLOT(pci_dev->devfn),
550 PCI_FUNC(pci_dev->devfn));
551 }
552
553 static inline void xen_unmap_pcidev(domid_t dom,
554 ioservid_t ioservid,
555 PCIDevice *pci_dev)
556 {
557 if (use_default_ioreq_server) {
558 return;
559 }
560
561 trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
562 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
563 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
564 pci_dev_bus_num(pci_dev),
565 PCI_SLOT(pci_dev->devfn),
566 PCI_FUNC(pci_dev->devfn));
567 }
568
569 static inline void xen_create_ioreq_server(domid_t dom,
570 ioservid_t *ioservid)
571 {
572 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
573 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
574 ioservid);
575
576 if (rc == 0) {
577 trace_xen_ioreq_server_create(*ioservid);
578 return;
579 }
580
581 *ioservid = 0;
582 use_default_ioreq_server = true;
583 trace_xen_default_ioreq_server();
584 }
585
586 static inline void xen_destroy_ioreq_server(domid_t dom,
587 ioservid_t ioservid)
588 {
589 if (use_default_ioreq_server) {
590 return;
591 }
592
593 trace_xen_ioreq_server_destroy(ioservid);
594 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
595 }
596
597 static inline int xen_get_ioreq_server_info(domid_t dom,
598 ioservid_t ioservid,
599 xen_pfn_t *ioreq_pfn,
600 xen_pfn_t *bufioreq_pfn,
601 evtchn_port_t *bufioreq_evtchn)
602 {
603 if (use_default_ioreq_server) {
604 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
605 bufioreq_pfn,
606 bufioreq_evtchn);
607 }
608
609 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
610 ioreq_pfn, bufioreq_pfn,
611 bufioreq_evtchn);
612 }
613
614 static inline int xen_set_ioreq_server_state(domid_t dom,
615 ioservid_t ioservid,
616 bool enable)
617 {
618 if (use_default_ioreq_server) {
619 return 0;
620 }
621
622 trace_xen_ioreq_server_state(ioservid, enable);
623 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
624 enable);
625 }
626
627 #endif
628
629 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
630 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
631 unsigned int space,
632 unsigned long idx,
633 xen_pfn_t gpfn)
634 {
635 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
636 }
637 #else
638 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
639 unsigned int space,
640 unsigned long idx,
641 xen_pfn_t gpfn)
642 {
643 /* In Xen 4.6 rc is -1 and errno contains the error value. */
644 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
645 if (rc == -1)
646 return errno;
647 return rc;
648 }
649 #endif
650
651 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
652 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
653 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
654 xen_domain_handle_t handle, uint32_t flags,
655 uint32_t *pdomid)
656 {
657 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
658 }
659 #else
660 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
661 xen_domain_handle_t handle, uint32_t flags,
662 uint32_t *pdomid)
663 {
664 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
665 }
666 #endif
667 #endif
668
669 /* Xen before 4.8 */
670
671 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
672
673
674 typedef void *xengnttab_grant_copy_segment_t;
675
676 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
677 xengnttab_grant_copy_segment_t *segs)
678 {
679 return -ENOSYS;
680 }
681 #endif
682
683 #endif /* QEMU_HW_XEN_COMMON_H */