]> git.proxmox.com Git - mirror_qemu.git/blame - include/hw/xen/xen_common.h
xen: Switch uses of xc_map_foreign_{pages,bulk} to use libxenforeignmemory API.
[mirror_qemu.git] / include / hw / xen / xen_common.h
CommitLineData
d94f9486
AL
1#ifndef QEMU_HW_XEN_COMMON_H
2#define QEMU_HW_XEN_COMMON_H 1
3
d5b93ddf
AP
4#include "config-host.h"
5
d94f9486
AL
6#include <stddef.h>
7#include <inttypes.h>
8
9#include <xenctrl.h>
e108a3c1
AP
10#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
11# include <xs.h>
12#else
13# include <xenstore.h>
14#endif
d94f9486
AL
15#include <xen/io/xenbus.h>
16
83c9f4ca 17#include "hw/hw.h"
0d09e41a 18#include "hw/xen/xen.h"
3996e85c 19#include "hw/pci/pci.h"
1de7afc9 20#include "qemu/queue.h"
3996e85c 21#include "trace.h"
d94f9486
AL
22
23/*
d5b93ddf 24 * We don't support Xen prior to 3.3.0.
d94f9486 25 */
d5b93ddf
AP
26
27/* Xen before 4.0 */
28#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 400
29static inline void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
30 xen_pfn_t *arr, int *err,
31 unsigned int num)
32{
33 return xc_map_foreign_batch(xc_handle, dom, prot, arr, num);
34}
d94f9486 35#endif
d5b93ddf
AP
36
37
38/* Xen before 4.1 */
39#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410
40
41typedef int XenXC;
a2db2a1e 42typedef int xenevtchn_handle;
c1345a88 43typedef int xengnttab_handle;
e0cb42ae 44typedef int xenforeignmemory_handle;
d5b93ddf
AP
45
46# define XC_INTERFACE_FMT "%i"
47# define XC_HANDLER_INITIAL_VALUE -1
48
a2db2a1e
IC
49static inline xenevtchn_handle *xenevtchn_open(void *logger,
50 unsigned int open_flags)
51{
52 xenevtchn_handle *h = malloc(sizeof(*h));
53 if (!h) {
54 return NULL;
55 }
56 *h = xc_evtchn_open();
57 if (*h == -1) {
58 free(h);
59 h = NULL;
60 }
61 return h;
62}
63static inline int xenevtchn_close(xenevtchn_handle *h)
d5b93ddf 64{
a2db2a1e
IC
65 int rc = xc_evtchn_close(*h);
66 free(h);
67 return rc;
d5b93ddf 68}
a2db2a1e
IC
69#define xenevtchn_fd(h) xc_evtchn_fd(*h)
70#define xenevtchn_pending(h) xc_evtchn_pending(*h)
71#define xenevtchn_notify(h, p) xc_evtchn_notify(*h, p)
72#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(*h, d, p)
73#define xenevtchn_unmask(h, p) xc_evtchn_unmask(*h, p)
74#define xenevtchn_unbind(h, p) xc_evtchn_unmask(*h, p)
d5b93ddf 75
c1345a88
IC
76static inline xengnttab_handle *xengnttab_open(void *logger,
77 unsigned int open_flags)
d5b93ddf 78{
c1345a88
IC
79 xengnttab_handle *h = malloc(sizeof(*h));
80 if (!h) {
81 return NULL;
82 }
83 *h = xc_gnttab_open();
84 if (*h == -1) {
85 free(h);
86 h = NULL;
87 }
88 return h;
d5b93ddf 89}
c1345a88
IC
90static inline int xengnttab_close(xengnttab_handle *h)
91{
92 int rc = xc_gnttab_close(*h);
93 free(h);
94 return rc;
95}
96#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(*h, n)
97#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(*h, d, r, p)
98#define xengnttab_map_grant_refs(h, c, d, r, p) \
99 xc_gnttab_map_grant_refs(*h, c, d, r, p)
100#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(*h, a, n)
d5b93ddf
AP
101
102static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
103 unsigned int open_flags)
104{
105 return xc_interface_open();
106}
107
e0cb42ae
IC
108/* See below for xenforeignmemory_* APIs */
109
d5b93ddf
AP
110static inline int xc_fd(int xen_xc)
111{
112 return xen_xc;
113}
114
115
432d268c
JN
116static inline int xc_domain_populate_physmap_exact
117 (XenXC xc_handle, uint32_t domid, unsigned long nr_extents,
118 unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start)
119{
120 return xc_domain_memory_populate_physmap
121 (xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start);
122}
123
b87de24e
AP
124static inline int xc_domain_add_to_physmap(int xc_handle, uint32_t domid,
125 unsigned int space, unsigned long idx,
126 xen_pfn_t gpfn)
127{
128 struct xen_add_to_physmap xatp = {
129 .domid = domid,
130 .space = space,
131 .idx = idx,
132 .gpfn = gpfn,
133 };
134
135 return xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
136}
137
0f51726a
SS
138static inline struct xs_handle *xs_open(unsigned long flags)
139{
140 return xs_daemon_open();
141}
142
143static inline void xs_close(struct xs_handle *xsh)
144{
145 if (xsh != NULL) {
146 xs_daemon_close(xsh);
147 }
148}
149
432d268c 150
d5b93ddf
AP
151/* Xen 4.1 */
152#else
153
154typedef xc_interface *XenXC;
e0cb42ae 155typedef xc_interface *xenforeignmemory_handle;
a2db2a1e 156typedef xc_evtchn xenevtchn_handle;
c1345a88 157typedef xc_gnttab xengnttab_handle;
d5b93ddf
AP
158
159# define XC_INTERFACE_FMT "%p"
160# define XC_HANDLER_INITIAL_VALUE NULL
161
a2db2a1e
IC
162#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
163#define xenevtchn_close(h) xc_evtchn_close(h)
164#define xenevtchn_fd(h) xc_evtchn_fd(h)
165#define xenevtchn_pending(h) xc_evtchn_pending(h)
166#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
167#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
168#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
169#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
d5b93ddf 170
c1345a88
IC
171#define xengnttab_open(l, f) xc_gnttab_open(l, f)
172#define xengnttab_close(h) xc_gnttab_close(h)
173#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
174#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
175#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
176#define xengnttab_map_grant_refs(h, c, d, r, p) \
177 xc_gnttab_map_grant_refs(h, c, d, r, p)
d5b93ddf
AP
178
179static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
180 unsigned int open_flags)
181{
182 return xc_interface_open(logger, dombuild_logger, open_flags);
183}
184
e0cb42ae
IC
185/* See below for xenforeignmemory_* APIs */
186
d5b93ddf
AP
187/* FIXME There is now way to have the xen fd */
188static inline int xc_fd(xc_interface *xen_xc)
189{
190 return -1;
191}
d94f9486
AL
192#endif
193
4c9f8d1b
SS
194/* Xen before 4.2 */
195#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
196static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
197 uint64_t addr, uint32_t data)
198{
199 return -ENOSYS;
200}
f3135204
OH
201/* The followings are only to compile op_discard related code on older
202 * Xen releases. */
203#define BLKIF_OP_DISCARD 5
204struct blkif_request_discard {
205 uint64_t nr_sectors;
206 uint64_t sector_number;
207};
4c9f8d1b
SS
208#else
209static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
210 uint64_t addr, uint32_t data)
211{
212 return xc_hvm_inject_msi(xen_xc, dom, addr, data);
213}
214#endif
215
180640ea 216void destroy_hvm_domain(bool reboot);
9ce94e7c 217
eaab4d60
AK
218/* shutdown/destroy current domain because of an error */
219void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
220
37f9e258
DS
221#ifdef HVM_PARAM_VMPORT_REGS_PFN
222static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
d01a5a3f 223 xen_pfn_t *vmport_regs_pfn)
37f9e258 224{
d01a5a3f
SS
225 int rc;
226 uint64_t value;
227 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
228 if (rc >= 0) {
229 *vmport_regs_pfn = (xen_pfn_t) value;
230 }
231 return rc;
37f9e258
DS
232}
233#else
234static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
d01a5a3f 235 xen_pfn_t *vmport_regs_pfn)
37f9e258
DS
236{
237 return -ENOSYS;
238}
239#endif
240
d8b441a3
JB
241/* Xen before 4.6 */
242#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
243
244#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
245#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
246#endif
247
248#endif
249
3996e85c
PD
250/* Xen before 4.5 */
251#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
252
253#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
254#define HVM_PARAM_BUFIOREQ_EVTCHN 26
255#endif
256
257#define IOREQ_TYPE_PCI_CONFIG 2
258
d09952ee 259typedef uint16_t ioservid_t;
3996e85c
PD
260
261static inline void xen_map_memory_section(XenXC xc, domid_t dom,
262 ioservid_t ioservid,
263 MemoryRegionSection *section)
264{
265}
266
267static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
268 ioservid_t ioservid,
269 MemoryRegionSection *section)
270{
271}
272
273static inline void xen_map_io_section(XenXC xc, domid_t dom,
274 ioservid_t ioservid,
275 MemoryRegionSection *section)
276{
277}
278
279static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
280 ioservid_t ioservid,
281 MemoryRegionSection *section)
282{
283}
284
285static inline void xen_map_pcidev(XenXC xc, domid_t dom,
286 ioservid_t ioservid,
287 PCIDevice *pci_dev)
288{
289}
290
291static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
292 ioservid_t ioservid,
293 PCIDevice *pci_dev)
294{
295}
296
297static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
298 ioservid_t *ioservid)
299{
300 return 0;
301}
302
303static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
304 ioservid_t ioservid)
305{
306}
307
308static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
309 ioservid_t ioservid,
310 xen_pfn_t *ioreq_pfn,
311 xen_pfn_t *bufioreq_pfn,
312 evtchn_port_t *bufioreq_evtchn)
313{
314 unsigned long param;
315 int rc;
316
317 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
318 if (rc < 0) {
319 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
320 return -1;
321 }
322
323 *ioreq_pfn = param;
324
325 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
326 if (rc < 0) {
327 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
328 return -1;
329 }
330
331 *bufioreq_pfn = param;
332
333 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
334 &param);
335 if (rc < 0) {
336 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
337 return -1;
338 }
339
340 *bufioreq_evtchn = param;
341
342 return 0;
343}
344
345static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
346 ioservid_t ioservid,
347 bool enable)
348{
349 return 0;
350}
351
352/* Xen 4.5 */
353#else
354
355static inline void xen_map_memory_section(XenXC xc, domid_t dom,
356 ioservid_t ioservid,
357 MemoryRegionSection *section)
358{
359 hwaddr start_addr = section->offset_within_address_space;
360 ram_addr_t size = int128_get64(section->size);
361 hwaddr end_addr = start_addr + size - 1;
362
363 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
364 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
365 start_addr, end_addr);
366}
367
368static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
369 ioservid_t ioservid,
370 MemoryRegionSection *section)
371{
372 hwaddr start_addr = section->offset_within_address_space;
373 ram_addr_t size = int128_get64(section->size);
374 hwaddr end_addr = start_addr + size - 1;
375
376 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
377 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
378 start_addr, end_addr);
379}
380
381static inline void xen_map_io_section(XenXC xc, domid_t dom,
382 ioservid_t ioservid,
383 MemoryRegionSection *section)
384{
385 hwaddr start_addr = section->offset_within_address_space;
386 ram_addr_t size = int128_get64(section->size);
387 hwaddr end_addr = start_addr + size - 1;
388
389 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
390 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
391 start_addr, end_addr);
392}
393
394static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
395 ioservid_t ioservid,
396 MemoryRegionSection *section)
397{
398 hwaddr start_addr = section->offset_within_address_space;
399 ram_addr_t size = int128_get64(section->size);
400 hwaddr end_addr = start_addr + size - 1;
401
402 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
403 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
404 start_addr, end_addr);
405}
406
407static inline void xen_map_pcidev(XenXC xc, domid_t dom,
408 ioservid_t ioservid,
409 PCIDevice *pci_dev)
410{
411 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
412 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
413 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
414 0, pci_bus_num(pci_dev->bus),
415 PCI_SLOT(pci_dev->devfn),
416 PCI_FUNC(pci_dev->devfn));
417}
418
419static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
420 ioservid_t ioservid,
421 PCIDevice *pci_dev)
422{
423 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
424 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
425 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
426 0, pci_bus_num(pci_dev->bus),
427 PCI_SLOT(pci_dev->devfn),
428 PCI_FUNC(pci_dev->devfn));
429}
430
431static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
432 ioservid_t *ioservid)
433{
d8b441a3
JB
434 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
435 ioservid);
3996e85c
PD
436
437 if (rc == 0) {
438 trace_xen_ioreq_server_create(*ioservid);
439 }
440
441 return rc;
442}
443
444static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
445 ioservid_t ioservid)
446{
447 trace_xen_ioreq_server_destroy(ioservid);
448 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
449}
450
451static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
452 ioservid_t ioservid,
453 xen_pfn_t *ioreq_pfn,
454 xen_pfn_t *bufioreq_pfn,
455 evtchn_port_t *bufioreq_evtchn)
456{
457 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
458 ioreq_pfn, bufioreq_pfn,
459 bufioreq_evtchn);
460}
461
462static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
463 ioservid_t ioservid,
464 bool enable)
465{
466 trace_xen_ioreq_server_state(ioservid, enable);
467 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
468}
469
470#endif
471
20a544c7
KRW
472#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
473static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
474 unsigned int space,
475 unsigned long idx,
476 xen_pfn_t gpfn)
477{
478 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
479}
480#else
481static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
482 unsigned int space,
483 unsigned long idx,
484 xen_pfn_t gpfn)
485{
486 /* In Xen 4.6 rc is -1 and errno contains the error value. */
487 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
488 if (rc == -1)
489 return errno;
490 return rc;
491}
492#endif
493
cdadde39
RPM
494#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
495static inline int xen_domain_create(XenXC xc, uint32_t ssidref,
496 xen_domain_handle_t handle, uint32_t flags,
497 uint32_t *pdomid)
498{
499 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
500}
501#else
502static inline int xen_domain_create(XenXC xc, uint32_t ssidref,
503 xen_domain_handle_t handle, uint32_t flags,
504 uint32_t *pdomid)
505{
506 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
507}
508#endif
509
e0cb42ae
IC
510#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
511
512#define xenforeignmemory_open(l, f) &xen_xc
513
514static inline void *xenforeignmemory_map(XenXC *h, uint32_t dom,
515 int prot, size_t pages,
516 const xen_pfn_t arr[/*pages*/],
517 int err[/*pages*/])
518{
519 if (err)
520 return xc_map_foreign_bulk(*h, dom, prot, arr, err, pages);
521 else
522 return xc_map_foreign_pages(*h, dom, prot, arr, pages);
523}
524
525#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
526
527#endif
528
d94f9486 529#endif /* QEMU_HW_XEN_COMMON_H */