]>
Commit | Line | Data |
---|---|---|
d94f9486 AL |
1 | #ifndef QEMU_HW_XEN_COMMON_H |
2 | #define QEMU_HW_XEN_COMMON_H 1 | |
3 | ||
d5b93ddf AP |
4 | #include "config-host.h" |
5 | ||
d94f9486 AL |
6 | #include <stddef.h> |
7 | #include <inttypes.h> | |
8 | ||
5eeb39c2 IC |
9 | /* |
10 | * If we have new enough libxenctrl then we do not want/need these compat | |
11 | * interfaces, despite what the user supplied cflags might say. They | |
12 | * must be undefined before including xenctrl.h | |
13 | */ | |
14 | #undef XC_WANT_COMPAT_EVTCHN_API | |
15 | #undef XC_WANT_COMPAT_GNTTAB_API | |
16 | #undef XC_WANT_COMPAT_MAP_FOREIGN_API | |
17 | ||
d94f9486 | 18 | #include <xenctrl.h> |
e108a3c1 AP |
19 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420 |
20 | # include <xs.h> | |
21 | #else | |
22 | # include <xenstore.h> | |
23 | #endif | |
d94f9486 AL |
24 | #include <xen/io/xenbus.h> |
25 | ||
83c9f4ca | 26 | #include "hw/hw.h" |
0d09e41a | 27 | #include "hw/xen/xen.h" |
3996e85c | 28 | #include "hw/pci/pci.h" |
1de7afc9 | 29 | #include "qemu/queue.h" |
3996e85c | 30 | #include "trace.h" |
d94f9486 AL |
31 | |
32 | /* | |
d5b93ddf | 33 | * We don't support Xen prior to 3.3.0. |
d94f9486 | 34 | */ |
d5b93ddf AP |
35 | |
36 | /* Xen before 4.0 */ | |
37 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 400 | |
38 | static inline void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot, | |
39 | xen_pfn_t *arr, int *err, | |
40 | unsigned int num) | |
41 | { | |
42 | return xc_map_foreign_batch(xc_handle, dom, prot, arr, num); | |
43 | } | |
d94f9486 | 44 | #endif |
d5b93ddf AP |
45 | |
46 | ||
47 | /* Xen before 4.1 */ | |
48 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410 | |
49 | ||
50 | typedef int XenXC; | |
a2db2a1e | 51 | typedef int xenevtchn_handle; |
c1345a88 | 52 | typedef int xengnttab_handle; |
e0cb42ae | 53 | typedef int xenforeignmemory_handle; |
d5b93ddf AP |
54 | |
55 | # define XC_INTERFACE_FMT "%i" | |
56 | # define XC_HANDLER_INITIAL_VALUE -1 | |
57 | ||
a2db2a1e IC |
58 | static inline xenevtchn_handle *xenevtchn_open(void *logger, |
59 | unsigned int open_flags) | |
60 | { | |
61 | xenevtchn_handle *h = malloc(sizeof(*h)); | |
62 | if (!h) { | |
63 | return NULL; | |
64 | } | |
65 | *h = xc_evtchn_open(); | |
66 | if (*h == -1) { | |
67 | free(h); | |
68 | h = NULL; | |
69 | } | |
70 | return h; | |
71 | } | |
72 | static inline int xenevtchn_close(xenevtchn_handle *h) | |
d5b93ddf | 73 | { |
a2db2a1e IC |
74 | int rc = xc_evtchn_close(*h); |
75 | free(h); | |
76 | return rc; | |
d5b93ddf | 77 | } |
a2db2a1e IC |
78 | #define xenevtchn_fd(h) xc_evtchn_fd(*h) |
79 | #define xenevtchn_pending(h) xc_evtchn_pending(*h) | |
80 | #define xenevtchn_notify(h, p) xc_evtchn_notify(*h, p) | |
81 | #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(*h, d, p) | |
82 | #define xenevtchn_unmask(h, p) xc_evtchn_unmask(*h, p) | |
83 | #define xenevtchn_unbind(h, p) xc_evtchn_unmask(*h, p) | |
d5b93ddf | 84 | |
c1345a88 IC |
85 | static inline xengnttab_handle *xengnttab_open(void *logger, |
86 | unsigned int open_flags) | |
d5b93ddf | 87 | { |
c1345a88 IC |
88 | xengnttab_handle *h = malloc(sizeof(*h)); |
89 | if (!h) { | |
90 | return NULL; | |
91 | } | |
92 | *h = xc_gnttab_open(); | |
93 | if (*h == -1) { | |
94 | free(h); | |
95 | h = NULL; | |
96 | } | |
97 | return h; | |
d5b93ddf | 98 | } |
c1345a88 IC |
99 | static inline int xengnttab_close(xengnttab_handle *h) |
100 | { | |
101 | int rc = xc_gnttab_close(*h); | |
102 | free(h); | |
103 | return rc; | |
104 | } | |
105 | #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(*h, n) | |
106 | #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(*h, d, r, p) | |
107 | #define xengnttab_map_grant_refs(h, c, d, r, p) \ | |
108 | xc_gnttab_map_grant_refs(*h, c, d, r, p) | |
109 | #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(*h, a, n) | |
d5b93ddf AP |
110 | |
111 | static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger, | |
112 | unsigned int open_flags) | |
113 | { | |
114 | return xc_interface_open(); | |
115 | } | |
116 | ||
e0cb42ae IC |
117 | /* See below for xenforeignmemory_* APIs */ |
118 | ||
432d268c JN |
119 | static inline int xc_domain_populate_physmap_exact |
120 | (XenXC xc_handle, uint32_t domid, unsigned long nr_extents, | |
121 | unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start) | |
122 | { | |
123 | return xc_domain_memory_populate_physmap | |
124 | (xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start); | |
125 | } | |
126 | ||
b87de24e AP |
127 | static inline int xc_domain_add_to_physmap(int xc_handle, uint32_t domid, |
128 | unsigned int space, unsigned long idx, | |
129 | xen_pfn_t gpfn) | |
130 | { | |
131 | struct xen_add_to_physmap xatp = { | |
132 | .domid = domid, | |
133 | .space = space, | |
134 | .idx = idx, | |
135 | .gpfn = gpfn, | |
136 | }; | |
137 | ||
138 | return xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp); | |
139 | } | |
140 | ||
0f51726a SS |
141 | static inline struct xs_handle *xs_open(unsigned long flags) |
142 | { | |
143 | return xs_daemon_open(); | |
144 | } | |
145 | ||
146 | static inline void xs_close(struct xs_handle *xsh) | |
147 | { | |
148 | if (xsh != NULL) { | |
149 | xs_daemon_close(xsh); | |
150 | } | |
151 | } | |
152 | ||
432d268c | 153 | |
5eeb39c2 IC |
154 | /* Xen 4.1 thru 4.6 */ |
155 | #elif CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 | |
d5b93ddf AP |
156 | |
157 | typedef xc_interface *XenXC; | |
e0cb42ae | 158 | typedef xc_interface *xenforeignmemory_handle; |
a2db2a1e | 159 | typedef xc_evtchn xenevtchn_handle; |
c1345a88 | 160 | typedef xc_gnttab xengnttab_handle; |
d5b93ddf AP |
161 | |
162 | # define XC_INTERFACE_FMT "%p" | |
163 | # define XC_HANDLER_INITIAL_VALUE NULL | |
164 | ||
a2db2a1e IC |
165 | #define xenevtchn_open(l, f) xc_evtchn_open(l, f); |
166 | #define xenevtchn_close(h) xc_evtchn_close(h) | |
167 | #define xenevtchn_fd(h) xc_evtchn_fd(h) | |
168 | #define xenevtchn_pending(h) xc_evtchn_pending(h) | |
169 | #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) | |
170 | #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) | |
171 | #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) | |
172 | #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) | |
d5b93ddf | 173 | |
c1345a88 IC |
174 | #define xengnttab_open(l, f) xc_gnttab_open(l, f) |
175 | #define xengnttab_close(h) xc_gnttab_close(h) | |
176 | #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) | |
177 | #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) | |
178 | #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) | |
179 | #define xengnttab_map_grant_refs(h, c, d, r, p) \ | |
180 | xc_gnttab_map_grant_refs(h, c, d, r, p) | |
d5b93ddf AP |
181 | |
182 | static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger, | |
183 | unsigned int open_flags) | |
184 | { | |
185 | return xc_interface_open(logger, dombuild_logger, open_flags); | |
186 | } | |
187 | ||
e0cb42ae IC |
188 | /* See below for xenforeignmemory_* APIs */ |
189 | ||
5eeb39c2 IC |
190 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */ |
191 | ||
192 | typedef xc_interface *XenXC; | |
193 | ||
194 | # define XC_INTERFACE_FMT "%p" | |
195 | # define XC_HANDLER_INITIAL_VALUE NULL | |
196 | ||
197 | #include <xenevtchn.h> | |
198 | #include <xengnttab.h> | |
199 | #include <xenforeignmemory.h> | |
200 | ||
201 | static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger, | |
202 | unsigned int open_flags) | |
203 | { | |
204 | return xc_interface_open(logger, dombuild_logger, open_flags); | |
205 | } | |
d94f9486 AL |
206 | #endif |
207 | ||
4c9f8d1b SS |
208 | /* Xen before 4.2 */ |
209 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420 | |
210 | static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom, | |
211 | uint64_t addr, uint32_t data) | |
212 | { | |
213 | return -ENOSYS; | |
214 | } | |
f3135204 OH |
215 | /* The followings are only to compile op_discard related code on older |
216 | * Xen releases. */ | |
217 | #define BLKIF_OP_DISCARD 5 | |
218 | struct blkif_request_discard { | |
219 | uint64_t nr_sectors; | |
220 | uint64_t sector_number; | |
221 | }; | |
4c9f8d1b SS |
222 | #else |
223 | static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom, | |
224 | uint64_t addr, uint32_t data) | |
225 | { | |
226 | return xc_hvm_inject_msi(xen_xc, dom, addr, data); | |
227 | } | |
228 | #endif | |
229 | ||
180640ea | 230 | void destroy_hvm_domain(bool reboot); |
9ce94e7c | 231 | |
eaab4d60 AK |
232 | /* shutdown/destroy current domain because of an error */ |
233 | void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); | |
234 | ||
37f9e258 DS |
235 | #ifdef HVM_PARAM_VMPORT_REGS_PFN |
236 | static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom, | |
d01a5a3f | 237 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 | 238 | { |
d01a5a3f SS |
239 | int rc; |
240 | uint64_t value; | |
241 | rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value); | |
242 | if (rc >= 0) { | |
243 | *vmport_regs_pfn = (xen_pfn_t) value; | |
244 | } | |
245 | return rc; | |
37f9e258 DS |
246 | } |
247 | #else | |
248 | static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom, | |
d01a5a3f | 249 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 DS |
250 | { |
251 | return -ENOSYS; | |
252 | } | |
253 | #endif | |
254 | ||
d8b441a3 JB |
255 | /* Xen before 4.6 */ |
256 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 | |
257 | ||
258 | #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC | |
259 | #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 | |
260 | #endif | |
261 | ||
262 | #endif | |
263 | ||
3996e85c PD |
264 | /* Xen before 4.5 */ |
265 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450 | |
266 | ||
267 | #ifndef HVM_PARAM_BUFIOREQ_EVTCHN | |
268 | #define HVM_PARAM_BUFIOREQ_EVTCHN 26 | |
269 | #endif | |
270 | ||
271 | #define IOREQ_TYPE_PCI_CONFIG 2 | |
272 | ||
d09952ee | 273 | typedef uint16_t ioservid_t; |
3996e85c PD |
274 | |
275 | static inline void xen_map_memory_section(XenXC xc, domid_t dom, | |
276 | ioservid_t ioservid, | |
277 | MemoryRegionSection *section) | |
278 | { | |
279 | } | |
280 | ||
281 | static inline void xen_unmap_memory_section(XenXC xc, domid_t dom, | |
282 | ioservid_t ioservid, | |
283 | MemoryRegionSection *section) | |
284 | { | |
285 | } | |
286 | ||
287 | static inline void xen_map_io_section(XenXC xc, domid_t dom, | |
288 | ioservid_t ioservid, | |
289 | MemoryRegionSection *section) | |
290 | { | |
291 | } | |
292 | ||
293 | static inline void xen_unmap_io_section(XenXC xc, domid_t dom, | |
294 | ioservid_t ioservid, | |
295 | MemoryRegionSection *section) | |
296 | { | |
297 | } | |
298 | ||
299 | static inline void xen_map_pcidev(XenXC xc, domid_t dom, | |
300 | ioservid_t ioservid, | |
301 | PCIDevice *pci_dev) | |
302 | { | |
303 | } | |
304 | ||
305 | static inline void xen_unmap_pcidev(XenXC xc, domid_t dom, | |
306 | ioservid_t ioservid, | |
307 | PCIDevice *pci_dev) | |
308 | { | |
309 | } | |
310 | ||
311 | static inline int xen_create_ioreq_server(XenXC xc, domid_t dom, | |
312 | ioservid_t *ioservid) | |
313 | { | |
314 | return 0; | |
315 | } | |
316 | ||
317 | static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom, | |
318 | ioservid_t ioservid) | |
319 | { | |
320 | } | |
321 | ||
322 | static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom, | |
323 | ioservid_t ioservid, | |
324 | xen_pfn_t *ioreq_pfn, | |
325 | xen_pfn_t *bufioreq_pfn, | |
326 | evtchn_port_t *bufioreq_evtchn) | |
327 | { | |
328 | unsigned long param; | |
329 | int rc; | |
330 | ||
331 | rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); | |
332 | if (rc < 0) { | |
333 | fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n"); | |
334 | return -1; | |
335 | } | |
336 | ||
337 | *ioreq_pfn = param; | |
338 | ||
339 | rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); | |
340 | if (rc < 0) { | |
341 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n"); | |
342 | return -1; | |
343 | } | |
344 | ||
345 | *bufioreq_pfn = param; | |
346 | ||
347 | rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, | |
348 | ¶m); | |
349 | if (rc < 0) { | |
350 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); | |
351 | return -1; | |
352 | } | |
353 | ||
354 | *bufioreq_evtchn = param; | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
359 | static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom, | |
360 | ioservid_t ioservid, | |
361 | bool enable) | |
362 | { | |
363 | return 0; | |
364 | } | |
365 | ||
366 | /* Xen 4.5 */ | |
367 | #else | |
368 | ||
369 | static inline void xen_map_memory_section(XenXC xc, domid_t dom, | |
370 | ioservid_t ioservid, | |
371 | MemoryRegionSection *section) | |
372 | { | |
373 | hwaddr start_addr = section->offset_within_address_space; | |
374 | ram_addr_t size = int128_get64(section->size); | |
375 | hwaddr end_addr = start_addr + size - 1; | |
376 | ||
377 | trace_xen_map_mmio_range(ioservid, start_addr, end_addr); | |
378 | xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1, | |
379 | start_addr, end_addr); | |
380 | } | |
381 | ||
382 | static inline void xen_unmap_memory_section(XenXC xc, domid_t dom, | |
383 | ioservid_t ioservid, | |
384 | MemoryRegionSection *section) | |
385 | { | |
386 | hwaddr start_addr = section->offset_within_address_space; | |
387 | ram_addr_t size = int128_get64(section->size); | |
388 | hwaddr end_addr = start_addr + size - 1; | |
389 | ||
390 | trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr); | |
391 | xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1, | |
392 | start_addr, end_addr); | |
393 | } | |
394 | ||
395 | static inline void xen_map_io_section(XenXC xc, domid_t dom, | |
396 | ioservid_t ioservid, | |
397 | MemoryRegionSection *section) | |
398 | { | |
399 | hwaddr start_addr = section->offset_within_address_space; | |
400 | ram_addr_t size = int128_get64(section->size); | |
401 | hwaddr end_addr = start_addr + size - 1; | |
402 | ||
403 | trace_xen_map_portio_range(ioservid, start_addr, end_addr); | |
404 | xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0, | |
405 | start_addr, end_addr); | |
406 | } | |
407 | ||
408 | static inline void xen_unmap_io_section(XenXC xc, domid_t dom, | |
409 | ioservid_t ioservid, | |
410 | MemoryRegionSection *section) | |
411 | { | |
412 | hwaddr start_addr = section->offset_within_address_space; | |
413 | ram_addr_t size = int128_get64(section->size); | |
414 | hwaddr end_addr = start_addr + size - 1; | |
415 | ||
416 | trace_xen_unmap_portio_range(ioservid, start_addr, end_addr); | |
417 | xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0, | |
418 | start_addr, end_addr); | |
419 | } | |
420 | ||
421 | static inline void xen_map_pcidev(XenXC xc, domid_t dom, | |
422 | ioservid_t ioservid, | |
423 | PCIDevice *pci_dev) | |
424 | { | |
425 | trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus), | |
426 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); | |
427 | xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid, | |
428 | 0, pci_bus_num(pci_dev->bus), | |
429 | PCI_SLOT(pci_dev->devfn), | |
430 | PCI_FUNC(pci_dev->devfn)); | |
431 | } | |
432 | ||
433 | static inline void xen_unmap_pcidev(XenXC xc, domid_t dom, | |
434 | ioservid_t ioservid, | |
435 | PCIDevice *pci_dev) | |
436 | { | |
437 | trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus), | |
438 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); | |
439 | xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid, | |
440 | 0, pci_bus_num(pci_dev->bus), | |
441 | PCI_SLOT(pci_dev->devfn), | |
442 | PCI_FUNC(pci_dev->devfn)); | |
443 | } | |
444 | ||
445 | static inline int xen_create_ioreq_server(XenXC xc, domid_t dom, | |
446 | ioservid_t *ioservid) | |
447 | { | |
d8b441a3 JB |
448 | int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC, |
449 | ioservid); | |
3996e85c PD |
450 | |
451 | if (rc == 0) { | |
452 | trace_xen_ioreq_server_create(*ioservid); | |
453 | } | |
454 | ||
455 | return rc; | |
456 | } | |
457 | ||
458 | static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom, | |
459 | ioservid_t ioservid) | |
460 | { | |
461 | trace_xen_ioreq_server_destroy(ioservid); | |
462 | xc_hvm_destroy_ioreq_server(xc, dom, ioservid); | |
463 | } | |
464 | ||
465 | static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom, | |
466 | ioservid_t ioservid, | |
467 | xen_pfn_t *ioreq_pfn, | |
468 | xen_pfn_t *bufioreq_pfn, | |
469 | evtchn_port_t *bufioreq_evtchn) | |
470 | { | |
471 | return xc_hvm_get_ioreq_server_info(xc, dom, ioservid, | |
472 | ioreq_pfn, bufioreq_pfn, | |
473 | bufioreq_evtchn); | |
474 | } | |
475 | ||
476 | static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom, | |
477 | ioservid_t ioservid, | |
478 | bool enable) | |
479 | { | |
480 | trace_xen_ioreq_server_state(ioservid, enable); | |
481 | return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable); | |
482 | } | |
483 | ||
484 | #endif | |
485 | ||
20a544c7 KRW |
486 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 |
487 | static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid, | |
488 | unsigned int space, | |
489 | unsigned long idx, | |
490 | xen_pfn_t gpfn) | |
491 | { | |
492 | return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn); | |
493 | } | |
494 | #else | |
495 | static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid, | |
496 | unsigned int space, | |
497 | unsigned long idx, | |
498 | xen_pfn_t gpfn) | |
499 | { | |
500 | /* In Xen 4.6 rc is -1 and errno contains the error value. */ | |
501 | int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn); | |
502 | if (rc == -1) | |
503 | return errno; | |
504 | return rc; | |
505 | } | |
506 | #endif | |
507 | ||
64a7ad6f | 508 | #ifdef CONFIG_XEN_PV_DOMAIN_BUILD |
cdadde39 RPM |
509 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470 |
510 | static inline int xen_domain_create(XenXC xc, uint32_t ssidref, | |
511 | xen_domain_handle_t handle, uint32_t flags, | |
512 | uint32_t *pdomid) | |
513 | { | |
514 | return xc_domain_create(xc, ssidref, handle, flags, pdomid); | |
515 | } | |
516 | #else | |
517 | static inline int xen_domain_create(XenXC xc, uint32_t ssidref, | |
518 | xen_domain_handle_t handle, uint32_t flags, | |
519 | uint32_t *pdomid) | |
520 | { | |
521 | return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL); | |
522 | } | |
523 | #endif | |
64a7ad6f | 524 | #endif |
cdadde39 | 525 | |
e0cb42ae IC |
526 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 |
527 | ||
528 | #define xenforeignmemory_open(l, f) &xen_xc | |
529 | ||
530 | static inline void *xenforeignmemory_map(XenXC *h, uint32_t dom, | |
531 | int prot, size_t pages, | |
532 | const xen_pfn_t arr[/*pages*/], | |
533 | int err[/*pages*/]) | |
534 | { | |
535 | if (err) | |
536 | return xc_map_foreign_bulk(*h, dom, prot, arr, err, pages); | |
537 | else | |
538 | return xc_map_foreign_pages(*h, dom, prot, arr, pages); | |
539 | } | |
540 | ||
541 | #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) | |
542 | ||
543 | #endif | |
544 | ||
d94f9486 | 545 | #endif /* QEMU_HW_XEN_COMMON_H */ |