]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
xen: Switch to libxengnttab interface for compat shims.
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H 1
3
4 #include "config-host.h"
5
6 #include <stddef.h>
7 #include <inttypes.h>
8
9 #include <xenctrl.h>
10 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
11 # include <xs.h>
12 #else
13 # include <xenstore.h>
14 #endif
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "trace.h"
22
23 /*
24 * We don't support Xen prior to 3.3.0.
25 */
26
27 /* Xen before 4.0 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 400
29 static inline void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
30 xen_pfn_t *arr, int *err,
31 unsigned int num)
32 {
33 return xc_map_foreign_batch(xc_handle, dom, prot, arr, num);
34 }
35 #endif
36
37
38 /* Xen before 4.1 */
39 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410
40
41 typedef int XenXC;
42 typedef int xenevtchn_handle;
43 typedef int xengnttab_handle;
44
45 # define XC_INTERFACE_FMT "%i"
46 # define XC_HANDLER_INITIAL_VALUE -1
47
48 static inline xenevtchn_handle *xenevtchn_open(void *logger,
49 unsigned int open_flags)
50 {
51 xenevtchn_handle *h = malloc(sizeof(*h));
52 if (!h) {
53 return NULL;
54 }
55 *h = xc_evtchn_open();
56 if (*h == -1) {
57 free(h);
58 h = NULL;
59 }
60 return h;
61 }
62 static inline int xenevtchn_close(xenevtchn_handle *h)
63 {
64 int rc = xc_evtchn_close(*h);
65 free(h);
66 return rc;
67 }
68 #define xenevtchn_fd(h) xc_evtchn_fd(*h)
69 #define xenevtchn_pending(h) xc_evtchn_pending(*h)
70 #define xenevtchn_notify(h, p) xc_evtchn_notify(*h, p)
71 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(*h, d, p)
72 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(*h, p)
73 #define xenevtchn_unbind(h, p) xc_evtchn_unmask(*h, p)
74
75 static inline xengnttab_handle *xengnttab_open(void *logger,
76 unsigned int open_flags)
77 {
78 xengnttab_handle *h = malloc(sizeof(*h));
79 if (!h) {
80 return NULL;
81 }
82 *h = xc_gnttab_open();
83 if (*h == -1) {
84 free(h);
85 h = NULL;
86 }
87 return h;
88 }
89 static inline int xengnttab_close(xengnttab_handle *h)
90 {
91 int rc = xc_gnttab_close(*h);
92 free(h);
93 return rc;
94 }
95 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(*h, n)
96 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(*h, d, r, p)
97 #define xengnttab_map_grant_refs(h, c, d, r, p) \
98 xc_gnttab_map_grant_refs(*h, c, d, r, p)
99 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(*h, a, n)
100
101 static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
102 unsigned int open_flags)
103 {
104 return xc_interface_open();
105 }
106
107 static inline int xc_fd(int xen_xc)
108 {
109 return xen_xc;
110 }
111
112
113 static inline int xc_domain_populate_physmap_exact
114 (XenXC xc_handle, uint32_t domid, unsigned long nr_extents,
115 unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start)
116 {
117 return xc_domain_memory_populate_physmap
118 (xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start);
119 }
120
121 static inline int xc_domain_add_to_physmap(int xc_handle, uint32_t domid,
122 unsigned int space, unsigned long idx,
123 xen_pfn_t gpfn)
124 {
125 struct xen_add_to_physmap xatp = {
126 .domid = domid,
127 .space = space,
128 .idx = idx,
129 .gpfn = gpfn,
130 };
131
132 return xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
133 }
134
135 static inline struct xs_handle *xs_open(unsigned long flags)
136 {
137 return xs_daemon_open();
138 }
139
140 static inline void xs_close(struct xs_handle *xsh)
141 {
142 if (xsh != NULL) {
143 xs_daemon_close(xsh);
144 }
145 }
146
147
148 /* Xen 4.1 */
149 #else
150
151 typedef xc_interface *XenXC;
152 typedef xc_evtchn xenevtchn_handle;
153 typedef xc_gnttab xengnttab_handle;
154
155 # define XC_INTERFACE_FMT "%p"
156 # define XC_HANDLER_INITIAL_VALUE NULL
157
158 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
159 #define xenevtchn_close(h) xc_evtchn_close(h)
160 #define xenevtchn_fd(h) xc_evtchn_fd(h)
161 #define xenevtchn_pending(h) xc_evtchn_pending(h)
162 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
163 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
164 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
165 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
166
167 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
168 #define xengnttab_close(h) xc_gnttab_close(h)
169 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
170 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
171 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
172 #define xengnttab_map_grant_refs(h, c, d, r, p) \
173 xc_gnttab_map_grant_refs(h, c, d, r, p)
174
175 static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
176 unsigned int open_flags)
177 {
178 return xc_interface_open(logger, dombuild_logger, open_flags);
179 }
180
181 /* FIXME There is now way to have the xen fd */
182 static inline int xc_fd(xc_interface *xen_xc)
183 {
184 return -1;
185 }
186 #endif
187
188 /* Xen before 4.2 */
189 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
190 static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
191 uint64_t addr, uint32_t data)
192 {
193 return -ENOSYS;
194 }
195 /* The followings are only to compile op_discard related code on older
196 * Xen releases. */
197 #define BLKIF_OP_DISCARD 5
198 struct blkif_request_discard {
199 uint64_t nr_sectors;
200 uint64_t sector_number;
201 };
202 #else
203 static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
204 uint64_t addr, uint32_t data)
205 {
206 return xc_hvm_inject_msi(xen_xc, dom, addr, data);
207 }
208 #endif
209
210 void destroy_hvm_domain(bool reboot);
211
212 /* shutdown/destroy current domain because of an error */
213 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
214
215 #ifdef HVM_PARAM_VMPORT_REGS_PFN
216 static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
217 xen_pfn_t *vmport_regs_pfn)
218 {
219 int rc;
220 uint64_t value;
221 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
222 if (rc >= 0) {
223 *vmport_regs_pfn = (xen_pfn_t) value;
224 }
225 return rc;
226 }
227 #else
228 static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
229 xen_pfn_t *vmport_regs_pfn)
230 {
231 return -ENOSYS;
232 }
233 #endif
234
235 /* Xen before 4.6 */
236 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
237
238 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
239 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
240 #endif
241
242 #endif
243
244 /* Xen before 4.5 */
245 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
246
247 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
248 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
249 #endif
250
251 #define IOREQ_TYPE_PCI_CONFIG 2
252
253 typedef uint16_t ioservid_t;
254
255 static inline void xen_map_memory_section(XenXC xc, domid_t dom,
256 ioservid_t ioservid,
257 MemoryRegionSection *section)
258 {
259 }
260
261 static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
262 ioservid_t ioservid,
263 MemoryRegionSection *section)
264 {
265 }
266
267 static inline void xen_map_io_section(XenXC xc, domid_t dom,
268 ioservid_t ioservid,
269 MemoryRegionSection *section)
270 {
271 }
272
273 static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
274 ioservid_t ioservid,
275 MemoryRegionSection *section)
276 {
277 }
278
279 static inline void xen_map_pcidev(XenXC xc, domid_t dom,
280 ioservid_t ioservid,
281 PCIDevice *pci_dev)
282 {
283 }
284
285 static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
286 ioservid_t ioservid,
287 PCIDevice *pci_dev)
288 {
289 }
290
291 static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
292 ioservid_t *ioservid)
293 {
294 return 0;
295 }
296
297 static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
298 ioservid_t ioservid)
299 {
300 }
301
302 static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
303 ioservid_t ioservid,
304 xen_pfn_t *ioreq_pfn,
305 xen_pfn_t *bufioreq_pfn,
306 evtchn_port_t *bufioreq_evtchn)
307 {
308 unsigned long param;
309 int rc;
310
311 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
312 if (rc < 0) {
313 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
314 return -1;
315 }
316
317 *ioreq_pfn = param;
318
319 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
320 if (rc < 0) {
321 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
322 return -1;
323 }
324
325 *bufioreq_pfn = param;
326
327 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
328 &param);
329 if (rc < 0) {
330 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
331 return -1;
332 }
333
334 *bufioreq_evtchn = param;
335
336 return 0;
337 }
338
339 static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
340 ioservid_t ioservid,
341 bool enable)
342 {
343 return 0;
344 }
345
346 /* Xen 4.5 */
347 #else
348
349 static inline void xen_map_memory_section(XenXC xc, domid_t dom,
350 ioservid_t ioservid,
351 MemoryRegionSection *section)
352 {
353 hwaddr start_addr = section->offset_within_address_space;
354 ram_addr_t size = int128_get64(section->size);
355 hwaddr end_addr = start_addr + size - 1;
356
357 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
358 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
359 start_addr, end_addr);
360 }
361
362 static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
363 ioservid_t ioservid,
364 MemoryRegionSection *section)
365 {
366 hwaddr start_addr = section->offset_within_address_space;
367 ram_addr_t size = int128_get64(section->size);
368 hwaddr end_addr = start_addr + size - 1;
369
370 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
371 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
372 start_addr, end_addr);
373 }
374
375 static inline void xen_map_io_section(XenXC xc, domid_t dom,
376 ioservid_t ioservid,
377 MemoryRegionSection *section)
378 {
379 hwaddr start_addr = section->offset_within_address_space;
380 ram_addr_t size = int128_get64(section->size);
381 hwaddr end_addr = start_addr + size - 1;
382
383 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
384 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
385 start_addr, end_addr);
386 }
387
388 static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
389 ioservid_t ioservid,
390 MemoryRegionSection *section)
391 {
392 hwaddr start_addr = section->offset_within_address_space;
393 ram_addr_t size = int128_get64(section->size);
394 hwaddr end_addr = start_addr + size - 1;
395
396 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
397 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
398 start_addr, end_addr);
399 }
400
401 static inline void xen_map_pcidev(XenXC xc, domid_t dom,
402 ioservid_t ioservid,
403 PCIDevice *pci_dev)
404 {
405 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
406 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
407 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
408 0, pci_bus_num(pci_dev->bus),
409 PCI_SLOT(pci_dev->devfn),
410 PCI_FUNC(pci_dev->devfn));
411 }
412
413 static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
414 ioservid_t ioservid,
415 PCIDevice *pci_dev)
416 {
417 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
418 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
419 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
420 0, pci_bus_num(pci_dev->bus),
421 PCI_SLOT(pci_dev->devfn),
422 PCI_FUNC(pci_dev->devfn));
423 }
424
425 static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
426 ioservid_t *ioservid)
427 {
428 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
429 ioservid);
430
431 if (rc == 0) {
432 trace_xen_ioreq_server_create(*ioservid);
433 }
434
435 return rc;
436 }
437
438 static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
439 ioservid_t ioservid)
440 {
441 trace_xen_ioreq_server_destroy(ioservid);
442 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
443 }
444
445 static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
446 ioservid_t ioservid,
447 xen_pfn_t *ioreq_pfn,
448 xen_pfn_t *bufioreq_pfn,
449 evtchn_port_t *bufioreq_evtchn)
450 {
451 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
452 ioreq_pfn, bufioreq_pfn,
453 bufioreq_evtchn);
454 }
455
456 static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
457 ioservid_t ioservid,
458 bool enable)
459 {
460 trace_xen_ioreq_server_state(ioservid, enable);
461 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
462 }
463
464 #endif
465
466 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
467 static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
468 unsigned int space,
469 unsigned long idx,
470 xen_pfn_t gpfn)
471 {
472 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
473 }
474 #else
475 static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
476 unsigned int space,
477 unsigned long idx,
478 xen_pfn_t gpfn)
479 {
480 /* In Xen 4.6 rc is -1 and errno contains the error value. */
481 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
482 if (rc == -1)
483 return errno;
484 return rc;
485 }
486 #endif
487
488 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
489 static inline int xen_domain_create(XenXC xc, uint32_t ssidref,
490 xen_domain_handle_t handle, uint32_t flags,
491 uint32_t *pdomid)
492 {
493 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
494 }
495 #else
496 static inline int xen_domain_create(XenXC xc, uint32_t ssidref,
497 xen_domain_handle_t handle, uint32_t flags,
498 uint32_t *pdomid)
499 {
500 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
501 }
502 #endif
503
504 #endif /* QEMU_HW_XEN_COMMON_H */