]> git.proxmox.com Git - mirror_qemu.git/blame - include/hw/xen/xen_common.h
xen: use 5 digit xen versions
[mirror_qemu.git] / include / hw / xen / xen_common.h
CommitLineData
d94f9486 1#ifndef QEMU_HW_XEN_COMMON_H
175de524 2#define QEMU_HW_XEN_COMMON_H
d94f9486 3
5eeb39c2
IC
4/*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9#undef XC_WANT_COMPAT_EVTCHN_API
10#undef XC_WANT_COMPAT_GNTTAB_API
11#undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
d94f9486 13#include <xenctrl.h>
edfb07ed 14#include <xenstore.h>
d94f9486
AL
15#include <xen/io/xenbus.h>
16
83c9f4ca 17#include "hw/hw.h"
0d09e41a 18#include "hw/xen/xen.h"
3996e85c 19#include "hw/pci/pci.h"
1de7afc9 20#include "qemu/queue.h"
0ab8ed18 21#include "hw/xen/trace.h"
d94f9486 22
260cabed
PD
23extern xc_interface *xen_xc;
24
d94f9486 25/*
edfb07ed 26 * We don't support Xen prior to 4.2.0.
d94f9486 27 */
d5b93ddf 28
f1167ee6 29#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
d655f34e
PD
30
31typedef xc_interface xendevicemodel_handle;
32
33static inline xendevicemodel_handle *xendevicemodel_open(
34 struct xentoollog_logger *logger, unsigned int open_flags)
35{
36 return xen_xc;
37}
38
f1167ee6 39#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
d655f34e
PD
40
41static inline int xendevicemodel_create_ioreq_server(
42 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
43 ioservid_t *id)
44{
45 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
46 id);
47}
48
49static inline int xendevicemodel_get_ioreq_server_info(
50 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
51 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
52 evtchn_port_t *bufioreq_port)
53{
54 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
55 bufioreq_pfn, bufioreq_port);
56}
57
58static inline int xendevicemodel_map_io_range_to_ioreq_server(
59 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
60 uint64_t start, uint64_t end)
61{
62 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
63 start, end);
64}
65
66static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
67 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
68 uint64_t start, uint64_t end)
69{
70 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
71 start, end);
72}
73
74static inline int xendevicemodel_map_pcidev_to_ioreq_server(
75 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
76 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
77{
78 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
79 bus, device, function);
80}
81
82static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
83 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
84 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
85{
86 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
87 bus, device, function);
88}
89
90static inline int xendevicemodel_destroy_ioreq_server(
91 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
92{
93 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
94}
95
96static inline int xendevicemodel_set_ioreq_server_state(
97 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
98{
99 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
100}
101
f1167ee6 102#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
d655f34e
PD
103
104static inline int xendevicemodel_set_pci_intx_level(
105 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
106 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
107{
108 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
109 intx, level);
110}
111
112static inline int xendevicemodel_set_isa_irq_level(
113 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
114 unsigned int level)
115{
116 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
117}
118
119static inline int xendevicemodel_set_pci_link_route(
120 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
121{
122 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
123}
124
125static inline int xendevicemodel_inject_msi(
126 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
127 uint32_t msi_data)
128{
129 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
130}
131
132static inline int xendevicemodel_track_dirty_vram(
133 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
134 uint32_t nr, unsigned long *dirty_bitmap)
135{
136 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
137 dirty_bitmap);
138}
139
140static inline int xendevicemodel_modified_memory(
141 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
142 uint32_t nr)
143{
144 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
145}
146
147static inline int xendevicemodel_set_mem_type(
148 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
149 uint64_t first_pfn, uint32_t nr)
150{
151 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
152}
153
f1167ee6 154#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
d655f34e
PD
155
156#undef XC_WANT_COMPAT_DEVICEMODEL_API
157#include <xendevicemodel.h>
158
159#endif
160
161extern xendevicemodel_handle *xen_dmod;
162
8f25e754
PD
163static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
164 uint64_t first_pfn, uint32_t nr)
165{
d655f34e
PD
166 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
167 nr);
8f25e754
PD
168}
169
170static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
171 uint8_t bus, uint8_t device,
172 uint8_t intx, unsigned int level)
173{
d655f34e
PD
174 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
175 device, intx, level);
8f25e754
PD
176}
177
178static inline int xen_set_pci_link_route(domid_t domid, uint8_t link,
179 uint8_t irq)
180{
d655f34e 181 return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq);
8f25e754
PD
182}
183
184static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
185 uint32_t msi_data)
186{
d655f34e 187 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
8f25e754
PD
188}
189
190static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
191 unsigned int level)
192{
d655f34e 193 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
8f25e754
PD
194}
195
196static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
197 uint32_t nr, unsigned long *bitmap)
198{
d655f34e
PD
199 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
200 bitmap);
8f25e754
PD
201}
202
203static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
204 uint32_t nr)
205{
d655f34e 206 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
8f25e754
PD
207}
208
cb8d4c8f 209/* Xen 4.2 through 4.6 */
f1167ee6 210#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
d5b93ddf 211
81daba58 212typedef xc_interface xenforeignmemory_handle;
a2db2a1e 213typedef xc_evtchn xenevtchn_handle;
c1345a88 214typedef xc_gnttab xengnttab_handle;
d5b93ddf 215
a2db2a1e
IC
216#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
217#define xenevtchn_close(h) xc_evtchn_close(h)
218#define xenevtchn_fd(h) xc_evtchn_fd(h)
219#define xenevtchn_pending(h) xc_evtchn_pending(h)
220#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
221#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
222#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
223#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
d5b93ddf 224
c1345a88
IC
225#define xengnttab_open(l, f) xc_gnttab_open(l, f)
226#define xengnttab_close(h) xc_gnttab_close(h)
227#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
228#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
229#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
230#define xengnttab_map_grant_refs(h, c, d, r, p) \
231 xc_gnttab_map_grant_refs(h, c, d, r, p)
816ac92e
JG
232#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
233 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
d5b93ddf 234
6aa0205e 235#define xenforeignmemory_open(l, f) xen_xc
d655f34e 236#define xenforeignmemory_close(h)
6aa0205e
IC
237
238static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
239 int prot, size_t pages,
240 const xen_pfn_t arr[/*pages*/],
241 int err[/*pages*/])
242{
243 if (err)
244 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
245 else
246 return xc_map_foreign_pages(h, dom, prot, arr, pages);
247}
248
249#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
e0cb42ae 250
f1167ee6 251#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
5eeb39c2 252
5eeb39c2
IC
253#include <xenevtchn.h>
254#include <xengnttab.h>
255#include <xenforeignmemory.h>
256
d94f9486
AL
257#endif
258
260cabed
PD
259extern xenforeignmemory_handle *xen_fmem;
260
180640ea 261void destroy_hvm_domain(bool reboot);
9ce94e7c 262
eaab4d60
AK
263/* shutdown/destroy current domain because of an error */
264void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
265
37f9e258 266#ifdef HVM_PARAM_VMPORT_REGS_PFN
81daba58 267static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 268 xen_pfn_t *vmport_regs_pfn)
37f9e258 269{
d01a5a3f
SS
270 int rc;
271 uint64_t value;
272 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
273 if (rc >= 0) {
274 *vmport_regs_pfn = (xen_pfn_t) value;
275 }
276 return rc;
37f9e258
DS
277}
278#else
81daba58 279static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 280 xen_pfn_t *vmport_regs_pfn)
37f9e258
DS
281{
282 return -ENOSYS;
283}
284#endif
285
d8b441a3 286/* Xen before 4.6 */
f1167ee6 287#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
d8b441a3
JB
288
289#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
290#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
291#endif
292
293#endif
294
260cabed 295static inline int xen_get_default_ioreq_server_info(domid_t dom,
b7665c60
PD
296 xen_pfn_t *ioreq_pfn,
297 xen_pfn_t *bufioreq_pfn,
298 evtchn_port_t
299 *bufioreq_evtchn)
300{
301 unsigned long param;
302 int rc;
303
260cabed 304 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
b7665c60
PD
305 if (rc < 0) {
306 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
307 return -1;
308 }
309
310 *ioreq_pfn = param;
311
260cabed 312 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
b7665c60
PD
313 if (rc < 0) {
314 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
315 return -1;
316 }
317
318 *bufioreq_pfn = param;
319
260cabed 320 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
b7665c60
PD
321 &param);
322 if (rc < 0) {
323 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
324 return -1;
325 }
326
327 *bufioreq_evtchn = param;
328
329 return 0;
330}
331
3996e85c 332/* Xen before 4.5 */
f1167ee6 333#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
3996e85c
PD
334
335#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
336#define HVM_PARAM_BUFIOREQ_EVTCHN 26
337#endif
338
339#define IOREQ_TYPE_PCI_CONFIG 2
340
d09952ee 341typedef uint16_t ioservid_t;
3996e85c 342
260cabed 343static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
344 ioservid_t ioservid,
345 MemoryRegionSection *section)
346{
347}
348
260cabed 349static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
350 ioservid_t ioservid,
351 MemoryRegionSection *section)
352{
353}
354
260cabed 355static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
356 ioservid_t ioservid,
357 MemoryRegionSection *section)
358{
359}
360
260cabed 361static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
362 ioservid_t ioservid,
363 MemoryRegionSection *section)
364{
365}
366
260cabed 367static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
368 ioservid_t ioservid,
369 PCIDevice *pci_dev)
370{
371}
372
260cabed 373static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
374 ioservid_t ioservid,
375 PCIDevice *pci_dev)
376{
377}
378
260cabed 379static inline void xen_create_ioreq_server(domid_t dom,
b7665c60 380 ioservid_t *ioservid)
3996e85c 381{
3996e85c
PD
382}
383
260cabed 384static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
385 ioservid_t ioservid)
386{
387}
388
260cabed 389static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
390 ioservid_t ioservid,
391 xen_pfn_t *ioreq_pfn,
392 xen_pfn_t *bufioreq_pfn,
393 evtchn_port_t *bufioreq_evtchn)
394{
260cabed
PD
395 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
396 bufioreq_pfn,
b7665c60 397 bufioreq_evtchn);
3996e85c
PD
398}
399
260cabed 400static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
401 ioservid_t ioservid,
402 bool enable)
403{
404 return 0;
405}
406
407/* Xen 4.5 */
408#else
409
b7665c60
PD
410static bool use_default_ioreq_server;
411
260cabed 412static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
413 ioservid_t ioservid,
414 MemoryRegionSection *section)
415{
416 hwaddr start_addr = section->offset_within_address_space;
417 ram_addr_t size = int128_get64(section->size);
418 hwaddr end_addr = start_addr + size - 1;
419
b7665c60
PD
420 if (use_default_ioreq_server) {
421 return;
422 }
423
3996e85c 424 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
d655f34e
PD
425 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
426 start_addr, end_addr);
3996e85c
PD
427}
428
260cabed 429static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
430 ioservid_t ioservid,
431 MemoryRegionSection *section)
432{
433 hwaddr start_addr = section->offset_within_address_space;
434 ram_addr_t size = int128_get64(section->size);
435 hwaddr end_addr = start_addr + size - 1;
436
b7665c60
PD
437 if (use_default_ioreq_server) {
438 return;
439 }
440
3996e85c 441 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
d655f34e
PD
442 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
443 1, start_addr, end_addr);
3996e85c
PD
444}
445
260cabed 446static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
447 ioservid_t ioservid,
448 MemoryRegionSection *section)
449{
450 hwaddr start_addr = section->offset_within_address_space;
451 ram_addr_t size = int128_get64(section->size);
452 hwaddr end_addr = start_addr + size - 1;
453
b7665c60
PD
454 if (use_default_ioreq_server) {
455 return;
456 }
457
3996e85c 458 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
d655f34e
PD
459 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
460 start_addr, end_addr);
3996e85c
PD
461}
462
260cabed 463static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
464 ioservid_t ioservid,
465 MemoryRegionSection *section)
466{
467 hwaddr start_addr = section->offset_within_address_space;
468 ram_addr_t size = int128_get64(section->size);
469 hwaddr end_addr = start_addr + size - 1;
470
b7665c60
PD
471 if (use_default_ioreq_server) {
472 return;
473 }
474
3996e85c 475 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
d655f34e
PD
476 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
477 0, start_addr, end_addr);
3996e85c
PD
478}
479
260cabed 480static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
481 ioservid_t ioservid,
482 PCIDevice *pci_dev)
483{
b7665c60
PD
484 if (use_default_ioreq_server) {
485 return;
486 }
487
3996e85c
PD
488 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
489 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
d655f34e
PD
490 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
491 pci_bus_num(pci_dev->bus),
492 PCI_SLOT(pci_dev->devfn),
493 PCI_FUNC(pci_dev->devfn));
3996e85c
PD
494}
495
260cabed 496static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
497 ioservid_t ioservid,
498 PCIDevice *pci_dev)
499{
b7665c60
PD
500 if (use_default_ioreq_server) {
501 return;
502 }
503
3996e85c
PD
504 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
505 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
d655f34e
PD
506 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
507 pci_bus_num(pci_dev->bus),
508 PCI_SLOT(pci_dev->devfn),
509 PCI_FUNC(pci_dev->devfn));
3996e85c
PD
510}
511
260cabed 512static inline void xen_create_ioreq_server(domid_t dom,
b7665c60 513 ioservid_t *ioservid)
3996e85c 514{
d655f34e
PD
515 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
516 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
517 ioservid);
3996e85c
PD
518
519 if (rc == 0) {
520 trace_xen_ioreq_server_create(*ioservid);
b7665c60 521 return;
3996e85c
PD
522 }
523
b7665c60
PD
524 *ioservid = 0;
525 use_default_ioreq_server = true;
526 trace_xen_default_ioreq_server();
3996e85c
PD
527}
528
260cabed 529static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
530 ioservid_t ioservid)
531{
b7665c60
PD
532 if (use_default_ioreq_server) {
533 return;
534 }
535
3996e85c 536 trace_xen_ioreq_server_destroy(ioservid);
d655f34e 537 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
3996e85c
PD
538}
539
260cabed 540static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
541 ioservid_t ioservid,
542 xen_pfn_t *ioreq_pfn,
543 xen_pfn_t *bufioreq_pfn,
544 evtchn_port_t *bufioreq_evtchn)
545{
b7665c60 546 if (use_default_ioreq_server) {
260cabed 547 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
b7665c60
PD
548 bufioreq_pfn,
549 bufioreq_evtchn);
550 }
551
d655f34e
PD
552 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
553 ioreq_pfn, bufioreq_pfn,
554 bufioreq_evtchn);
3996e85c
PD
555}
556
260cabed 557static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
558 ioservid_t ioservid,
559 bool enable)
560{
b7665c60
PD
561 if (use_default_ioreq_server) {
562 return 0;
563 }
564
3996e85c 565 trace_xen_ioreq_server_state(ioservid, enable);
d655f34e
PD
566 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
567 enable);
3996e85c
PD
568}
569
570#endif
571
f1167ee6 572#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
81daba58 573static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
20a544c7
KRW
574 unsigned int space,
575 unsigned long idx,
576 xen_pfn_t gpfn)
577{
578 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
579}
580#else
81daba58 581static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
20a544c7
KRW
582 unsigned int space,
583 unsigned long idx,
584 xen_pfn_t gpfn)
585{
586 /* In Xen 4.6 rc is -1 and errno contains the error value. */
587 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
588 if (rc == -1)
589 return errno;
590 return rc;
591}
592#endif
593
64a7ad6f 594#ifdef CONFIG_XEN_PV_DOMAIN_BUILD
f1167ee6 595#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
81daba58 596static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
cdadde39
RPM
597 xen_domain_handle_t handle, uint32_t flags,
598 uint32_t *pdomid)
599{
600 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
601}
602#else
81daba58 603static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
cdadde39
RPM
604 xen_domain_handle_t handle, uint32_t flags,
605 uint32_t *pdomid)
606{
607 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
608}
609#endif
64a7ad6f 610#endif
cdadde39 611
b6eb9b45
PS
612/* Xen before 4.8 */
613
f1167ee6 614#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
b6eb9b45
PS
615
616
617typedef void *xengnttab_grant_copy_segment_t;
618
619static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
620 xengnttab_grant_copy_segment_t *segs)
621{
622 return -ENOSYS;
623}
624#endif
625
d94f9486 626#endif /* QEMU_HW_XEN_COMMON_H */