]> git.proxmox.com Git - mirror_qemu.git/blame - include/hw/xen/xen_common.h
xen: use libxendevice model to restrict operations
[mirror_qemu.git] / include / hw / xen / xen_common.h
CommitLineData
d94f9486 1#ifndef QEMU_HW_XEN_COMMON_H
175de524 2#define QEMU_HW_XEN_COMMON_H
d94f9486 3
5eeb39c2
IC
4/*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9#undef XC_WANT_COMPAT_EVTCHN_API
10#undef XC_WANT_COMPAT_GNTTAB_API
11#undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
d94f9486 13#include <xenctrl.h>
edfb07ed 14#include <xenstore.h>
d94f9486
AL
15#include <xen/io/xenbus.h>
16
83c9f4ca 17#include "hw/hw.h"
0d09e41a 18#include "hw/xen/xen.h"
3996e85c 19#include "hw/pci/pci.h"
1de7afc9 20#include "qemu/queue.h"
0ab8ed18 21#include "hw/xen/trace.h"
d94f9486 22
260cabed
PD
23extern xc_interface *xen_xc;
24
d94f9486 25/*
edfb07ed 26 * We don't support Xen prior to 4.2.0.
d94f9486 27 */
d5b93ddf 28
f1167ee6 29#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
d655f34e
PD
30
31typedef xc_interface xendevicemodel_handle;
32
33static inline xendevicemodel_handle *xendevicemodel_open(
34 struct xentoollog_logger *logger, unsigned int open_flags)
35{
36 return xen_xc;
37}
38
f1167ee6 39#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
d655f34e
PD
40
41static inline int xendevicemodel_create_ioreq_server(
42 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
43 ioservid_t *id)
44{
45 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
46 id);
47}
48
49static inline int xendevicemodel_get_ioreq_server_info(
50 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
51 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
52 evtchn_port_t *bufioreq_port)
53{
54 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
55 bufioreq_pfn, bufioreq_port);
56}
57
58static inline int xendevicemodel_map_io_range_to_ioreq_server(
59 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
60 uint64_t start, uint64_t end)
61{
62 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
63 start, end);
64}
65
66static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
67 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
68 uint64_t start, uint64_t end)
69{
70 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
71 start, end);
72}
73
74static inline int xendevicemodel_map_pcidev_to_ioreq_server(
75 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
76 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
77{
78 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
79 bus, device, function);
80}
81
82static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
83 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
84 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
85{
86 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
87 bus, device, function);
88}
89
90static inline int xendevicemodel_destroy_ioreq_server(
91 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
92{
93 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
94}
95
96static inline int xendevicemodel_set_ioreq_server_state(
97 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
98{
99 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
100}
101
f1167ee6 102#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
d655f34e
PD
103
104static inline int xendevicemodel_set_pci_intx_level(
105 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
106 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
107{
108 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
109 intx, level);
110}
111
112static inline int xendevicemodel_set_isa_irq_level(
113 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
114 unsigned int level)
115{
116 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
117}
118
119static inline int xendevicemodel_set_pci_link_route(
120 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
121{
122 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
123}
124
125static inline int xendevicemodel_inject_msi(
126 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
127 uint32_t msi_data)
128{
129 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
130}
131
132static inline int xendevicemodel_track_dirty_vram(
133 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
134 uint32_t nr, unsigned long *dirty_bitmap)
135{
136 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
137 dirty_bitmap);
138}
139
140static inline int xendevicemodel_modified_memory(
141 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
142 uint32_t nr)
143{
144 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
145}
146
147static inline int xendevicemodel_set_mem_type(
148 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
149 uint64_t first_pfn, uint32_t nr)
150{
151 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
152}
153
1c599472
PD
154static inline int xendevicemodel_restrict(
155 xendevicemodel_handle *dmod, domid_t domid)
156{
157 errno = ENOTTY;
158 return -1;
159}
160
f1167ee6 161#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
d655f34e
PD
162
163#undef XC_WANT_COMPAT_DEVICEMODEL_API
164#include <xendevicemodel.h>
165
166#endif
167
168extern xendevicemodel_handle *xen_dmod;
169
8f25e754
PD
170static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
171 uint64_t first_pfn, uint32_t nr)
172{
d655f34e
PD
173 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
174 nr);
8f25e754
PD
175}
176
177static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
178 uint8_t bus, uint8_t device,
179 uint8_t intx, unsigned int level)
180{
d655f34e
PD
181 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
182 device, intx, level);
8f25e754
PD
183}
184
185static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
186 uint8_t irq)
187{
d655f34e 188 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
8f25e754
PD
189}
190
191static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
192 uint32_t msi_data)
193{
d655f34e 194 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
8f25e754
PD
195}
196
197static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
198 unsigned int level)
199{
d655f34e 200 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
8f25e754
PD
201}
202
203static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
204 uint32_t nr, unsigned long *bitmap)
205{
d655f34e
PD
206 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
207 bitmap);
8f25e754
PD
208}
209
210static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
211 uint32_t nr)
212{
d655f34e 213 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
8f25e754
PD
214}
215
1c599472
PD
216static inline int xen_restrict(domid_t domid)
217{
218 int rc = xendevicemodel_restrict(xen_dmod, domid);
219
220 trace_xen_domid_restrict(errno);
221
222 if (errno == ENOTTY) {
223 return 0;
224 }
225
226 return rc;
227}
228
cb8d4c8f 229/* Xen 4.2 through 4.6 */
f1167ee6 230#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
d5b93ddf 231
81daba58 232typedef xc_interface xenforeignmemory_handle;
a2db2a1e 233typedef xc_evtchn xenevtchn_handle;
c1345a88 234typedef xc_gnttab xengnttab_handle;
d5b93ddf 235
a2db2a1e
IC
236#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
237#define xenevtchn_close(h) xc_evtchn_close(h)
238#define xenevtchn_fd(h) xc_evtchn_fd(h)
239#define xenevtchn_pending(h) xc_evtchn_pending(h)
240#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
241#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
242#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
243#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
d5b93ddf 244
c1345a88
IC
245#define xengnttab_open(l, f) xc_gnttab_open(l, f)
246#define xengnttab_close(h) xc_gnttab_close(h)
247#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
248#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
249#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
250#define xengnttab_map_grant_refs(h, c, d, r, p) \
251 xc_gnttab_map_grant_refs(h, c, d, r, p)
816ac92e
JG
252#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
253 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
d5b93ddf 254
6aa0205e 255#define xenforeignmemory_open(l, f) xen_xc
d655f34e 256#define xenforeignmemory_close(h)
6aa0205e
IC
257
258static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
259 int prot, size_t pages,
260 const xen_pfn_t arr[/*pages*/],
261 int err[/*pages*/])
262{
263 if (err)
264 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
265 else
266 return xc_map_foreign_pages(h, dom, prot, arr, pages);
267}
268
269#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
e0cb42ae 270
f1167ee6 271#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
5eeb39c2 272
5eeb39c2
IC
273#include <xenevtchn.h>
274#include <xengnttab.h>
275#include <xenforeignmemory.h>
276
d94f9486
AL
277#endif
278
260cabed
PD
279extern xenforeignmemory_handle *xen_fmem;
280
180640ea 281void destroy_hvm_domain(bool reboot);
9ce94e7c 282
eaab4d60
AK
283/* shutdown/destroy current domain because of an error */
284void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
285
37f9e258 286#ifdef HVM_PARAM_VMPORT_REGS_PFN
81daba58 287static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 288 xen_pfn_t *vmport_regs_pfn)
37f9e258 289{
d01a5a3f
SS
290 int rc;
291 uint64_t value;
292 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
293 if (rc >= 0) {
294 *vmport_regs_pfn = (xen_pfn_t) value;
295 }
296 return rc;
37f9e258
DS
297}
298#else
81daba58 299static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 300 xen_pfn_t *vmport_regs_pfn)
37f9e258
DS
301{
302 return -ENOSYS;
303}
304#endif
305
d8b441a3 306/* Xen before 4.6 */
f1167ee6 307#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
d8b441a3
JB
308
309#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
310#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
311#endif
312
313#endif
314
260cabed 315static inline int xen_get_default_ioreq_server_info(domid_t dom,
b7665c60
PD
316 xen_pfn_t *ioreq_pfn,
317 xen_pfn_t *bufioreq_pfn,
318 evtchn_port_t
319 *bufioreq_evtchn)
320{
321 unsigned long param;
322 int rc;
323
260cabed 324 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
b7665c60
PD
325 if (rc < 0) {
326 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
327 return -1;
328 }
329
330 *ioreq_pfn = param;
331
260cabed 332 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
b7665c60
PD
333 if (rc < 0) {
334 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
335 return -1;
336 }
337
338 *bufioreq_pfn = param;
339
260cabed 340 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
b7665c60
PD
341 &param);
342 if (rc < 0) {
343 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
344 return -1;
345 }
346
347 *bufioreq_evtchn = param;
348
349 return 0;
350}
351
3996e85c 352/* Xen before 4.5 */
f1167ee6 353#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
3996e85c
PD
354
355#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
356#define HVM_PARAM_BUFIOREQ_EVTCHN 26
357#endif
358
359#define IOREQ_TYPE_PCI_CONFIG 2
360
d09952ee 361typedef uint16_t ioservid_t;
3996e85c 362
260cabed 363static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
364 ioservid_t ioservid,
365 MemoryRegionSection *section)
366{
367}
368
260cabed 369static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
370 ioservid_t ioservid,
371 MemoryRegionSection *section)
372{
373}
374
260cabed 375static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
376 ioservid_t ioservid,
377 MemoryRegionSection *section)
378{
379}
380
260cabed 381static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
382 ioservid_t ioservid,
383 MemoryRegionSection *section)
384{
385}
386
260cabed 387static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
388 ioservid_t ioservid,
389 PCIDevice *pci_dev)
390{
391}
392
260cabed 393static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
394 ioservid_t ioservid,
395 PCIDevice *pci_dev)
396{
397}
398
260cabed 399static inline void xen_create_ioreq_server(domid_t dom,
b7665c60 400 ioservid_t *ioservid)
3996e85c 401{
3996e85c
PD
402}
403
260cabed 404static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
405 ioservid_t ioservid)
406{
407}
408
260cabed 409static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
410 ioservid_t ioservid,
411 xen_pfn_t *ioreq_pfn,
412 xen_pfn_t *bufioreq_pfn,
413 evtchn_port_t *bufioreq_evtchn)
414{
260cabed
PD
415 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
416 bufioreq_pfn,
b7665c60 417 bufioreq_evtchn);
3996e85c
PD
418}
419
260cabed 420static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
421 ioservid_t ioservid,
422 bool enable)
423{
424 return 0;
425}
426
427/* Xen 4.5 */
428#else
429
b7665c60
PD
430static bool use_default_ioreq_server;
431
260cabed 432static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
433 ioservid_t ioservid,
434 MemoryRegionSection *section)
435{
436 hwaddr start_addr = section->offset_within_address_space;
437 ram_addr_t size = int128_get64(section->size);
438 hwaddr end_addr = start_addr + size - 1;
439
b7665c60
PD
440 if (use_default_ioreq_server) {
441 return;
442 }
443
3996e85c 444 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
d655f34e
PD
445 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
446 start_addr, end_addr);
3996e85c
PD
447}
448
260cabed 449static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
450 ioservid_t ioservid,
451 MemoryRegionSection *section)
452{
453 hwaddr start_addr = section->offset_within_address_space;
454 ram_addr_t size = int128_get64(section->size);
455 hwaddr end_addr = start_addr + size - 1;
456
b7665c60
PD
457 if (use_default_ioreq_server) {
458 return;
459 }
460
3996e85c 461 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
d655f34e
PD
462 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
463 1, start_addr, end_addr);
3996e85c
PD
464}
465
260cabed 466static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
467 ioservid_t ioservid,
468 MemoryRegionSection *section)
469{
470 hwaddr start_addr = section->offset_within_address_space;
471 ram_addr_t size = int128_get64(section->size);
472 hwaddr end_addr = start_addr + size - 1;
473
b7665c60
PD
474 if (use_default_ioreq_server) {
475 return;
476 }
477
3996e85c 478 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
d655f34e
PD
479 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
480 start_addr, end_addr);
3996e85c
PD
481}
482
260cabed 483static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
484 ioservid_t ioservid,
485 MemoryRegionSection *section)
486{
487 hwaddr start_addr = section->offset_within_address_space;
488 ram_addr_t size = int128_get64(section->size);
489 hwaddr end_addr = start_addr + size - 1;
490
b7665c60
PD
491 if (use_default_ioreq_server) {
492 return;
493 }
494
3996e85c 495 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
d655f34e
PD
496 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
497 0, start_addr, end_addr);
3996e85c
PD
498}
499
260cabed 500static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
501 ioservid_t ioservid,
502 PCIDevice *pci_dev)
503{
b7665c60
PD
504 if (use_default_ioreq_server) {
505 return;
506 }
507
3996e85c
PD
508 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
509 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
d655f34e
PD
510 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
511 pci_bus_num(pci_dev->bus),
512 PCI_SLOT(pci_dev->devfn),
513 PCI_FUNC(pci_dev->devfn));
3996e85c
PD
514}
515
260cabed 516static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
517 ioservid_t ioservid,
518 PCIDevice *pci_dev)
519{
b7665c60
PD
520 if (use_default_ioreq_server) {
521 return;
522 }
523
3996e85c
PD
524 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
525 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
d655f34e
PD
526 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
527 pci_bus_num(pci_dev->bus),
528 PCI_SLOT(pci_dev->devfn),
529 PCI_FUNC(pci_dev->devfn));
3996e85c
PD
530}
531
260cabed 532static inline void xen_create_ioreq_server(domid_t dom,
b7665c60 533 ioservid_t *ioservid)
3996e85c 534{
d655f34e
PD
535 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
536 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
537 ioservid);
3996e85c
PD
538
539 if (rc == 0) {
540 trace_xen_ioreq_server_create(*ioservid);
b7665c60 541 return;
3996e85c
PD
542 }
543
b7665c60
PD
544 *ioservid = 0;
545 use_default_ioreq_server = true;
546 trace_xen_default_ioreq_server();
3996e85c
PD
547}
548
260cabed 549static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
550 ioservid_t ioservid)
551{
b7665c60
PD
552 if (use_default_ioreq_server) {
553 return;
554 }
555
3996e85c 556 trace_xen_ioreq_server_destroy(ioservid);
d655f34e 557 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
3996e85c
PD
558}
559
260cabed 560static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
561 ioservid_t ioservid,
562 xen_pfn_t *ioreq_pfn,
563 xen_pfn_t *bufioreq_pfn,
564 evtchn_port_t *bufioreq_evtchn)
565{
b7665c60 566 if (use_default_ioreq_server) {
260cabed 567 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
b7665c60
PD
568 bufioreq_pfn,
569 bufioreq_evtchn);
570 }
571
d655f34e
PD
572 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
573 ioreq_pfn, bufioreq_pfn,
574 bufioreq_evtchn);
3996e85c
PD
575}
576
260cabed 577static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
578 ioservid_t ioservid,
579 bool enable)
580{
b7665c60
PD
581 if (use_default_ioreq_server) {
582 return 0;
583 }
584
3996e85c 585 trace_xen_ioreq_server_state(ioservid, enable);
d655f34e
PD
586 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
587 enable);
3996e85c
PD
588}
589
590#endif
591
f1167ee6 592#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
81daba58 593static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
20a544c7
KRW
594 unsigned int space,
595 unsigned long idx,
596 xen_pfn_t gpfn)
597{
598 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
599}
600#else
81daba58 601static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
20a544c7
KRW
602 unsigned int space,
603 unsigned long idx,
604 xen_pfn_t gpfn)
605{
606 /* In Xen 4.6 rc is -1 and errno contains the error value. */
607 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
608 if (rc == -1)
609 return errno;
610 return rc;
611}
612#endif
613
64a7ad6f 614#ifdef CONFIG_XEN_PV_DOMAIN_BUILD
f1167ee6 615#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
81daba58 616static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
cdadde39
RPM
617 xen_domain_handle_t handle, uint32_t flags,
618 uint32_t *pdomid)
619{
620 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
621}
622#else
81daba58 623static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
cdadde39
RPM
624 xen_domain_handle_t handle, uint32_t flags,
625 uint32_t *pdomid)
626{
627 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
628}
629#endif
64a7ad6f 630#endif
cdadde39 631
b6eb9b45
PS
632/* Xen before 4.8 */
633
f1167ee6 634#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
b6eb9b45
PS
635
636
637typedef void *xengnttab_grant_copy_segment_t;
638
639static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
640 xengnttab_grant_copy_segment_t *segs)
641{
642 return -ENOSYS;
643}
644#endif
645
d94f9486 646#endif /* QEMU_HW_XEN_COMMON_H */