1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
15 #include <xen/io/xenbus.h>
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
23 extern xc_interface
*xen_xc
;
26 * We don't support Xen prior to 4.2.0.
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
32 typedef xc_interface xenforeignmemory_handle
;
33 typedef xc_evtchn xenevtchn_handle
;
34 typedef xc_gnttab xengnttab_handle
;
36 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37 #define xenevtchn_close(h) xc_evtchn_close(h)
38 #define xenevtchn_fd(h) xc_evtchn_fd(h)
39 #define xenevtchn_pending(h) xc_evtchn_pending(h)
40 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
45 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
46 #define xengnttab_close(h) xc_gnttab_close(h)
47 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50 #define xengnttab_map_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_grant_refs(h, c, d, r, p)
52 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
55 #define xenforeignmemory_open(l, f) xen_xc
56 #define xenforeignmemory_close(h)
58 static inline void *xenforeignmemory_map(xc_interface
*h
, uint32_t dom
,
59 int prot
, size_t pages
,
60 const xen_pfn_t arr
[/*pages*/],
64 return xc_map_foreign_bulk(h
, dom
, prot
, arr
, err
, pages
);
66 return xc_map_foreign_pages(h
, dom
, prot
, arr
, pages
);
69 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
71 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
73 #include <xenevtchn.h>
74 #include <xengnttab.h>
75 #include <xenforeignmemory.h>
79 extern xenforeignmemory_handle
*xen_fmem
;
81 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
83 #define XEN_COMPAT_PHYSMAP
84 static inline void *xenforeignmemory_map2(xenforeignmemory_handle
*h
,
85 uint32_t dom
, void *addr
,
86 int prot
, int flags
, size_t pages
,
87 const xen_pfn_t arr
[/*pages*/],
90 assert(addr
== NULL
&& flags
== 0);
91 return xenforeignmemory_map(h
, dom
, prot
, pages
, arr
, err
);
96 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
98 typedef xc_interface xendevicemodel_handle
;
100 static inline xendevicemodel_handle
*xendevicemodel_open(
101 struct xentoollog_logger
*logger
, unsigned int open_flags
)
106 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
108 static inline int xendevicemodel_create_ioreq_server(
109 xendevicemodel_handle
*dmod
, domid_t domid
, int handle_bufioreq
,
112 return xc_hvm_create_ioreq_server(dmod
, domid
, handle_bufioreq
,
116 static inline int xendevicemodel_get_ioreq_server_info(
117 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
118 xen_pfn_t
*ioreq_pfn
, xen_pfn_t
*bufioreq_pfn
,
119 evtchn_port_t
*bufioreq_port
)
121 return xc_hvm_get_ioreq_server_info(dmod
, domid
, id
, ioreq_pfn
,
122 bufioreq_pfn
, bufioreq_port
);
125 static inline int xendevicemodel_map_io_range_to_ioreq_server(
126 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
127 uint64_t start
, uint64_t end
)
129 return xc_hvm_map_io_range_to_ioreq_server(dmod
, domid
, id
, is_mmio
,
133 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
134 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
135 uint64_t start
, uint64_t end
)
137 return xc_hvm_unmap_io_range_from_ioreq_server(dmod
, domid
, id
, is_mmio
,
141 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
142 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
143 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
145 return xc_hvm_map_pcidev_to_ioreq_server(dmod
, domid
, id
, segment
,
146 bus
, device
, function
);
149 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
150 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
151 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
153 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod
, domid
, id
, segment
,
154 bus
, device
, function
);
157 static inline int xendevicemodel_destroy_ioreq_server(
158 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
)
160 return xc_hvm_destroy_ioreq_server(dmod
, domid
, id
);
163 static inline int xendevicemodel_set_ioreq_server_state(
164 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int enabled
)
166 return xc_hvm_set_ioreq_server_state(dmod
, domid
, id
, enabled
);
169 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
171 static inline int xendevicemodel_set_pci_intx_level(
172 xendevicemodel_handle
*dmod
, domid_t domid
, uint16_t segment
,
173 uint8_t bus
, uint8_t device
, uint8_t intx
, unsigned int level
)
175 return xc_hvm_set_pci_intx_level(dmod
, domid
, segment
, bus
, device
,
179 static inline int xendevicemodel_set_isa_irq_level(
180 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t irq
,
183 return xc_hvm_set_isa_irq_level(dmod
, domid
, irq
, level
);
186 static inline int xendevicemodel_set_pci_link_route(
187 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t link
, uint8_t irq
)
189 return xc_hvm_set_pci_link_route(dmod
, domid
, link
, irq
);
192 static inline int xendevicemodel_inject_msi(
193 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t msi_addr
,
196 return xc_hvm_inject_msi(dmod
, domid
, msi_addr
, msi_data
);
199 static inline int xendevicemodel_track_dirty_vram(
200 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
201 uint32_t nr
, unsigned long *dirty_bitmap
)
203 return xc_hvm_track_dirty_vram(dmod
, domid
, first_pfn
, nr
,
207 static inline int xendevicemodel_modified_memory(
208 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
211 return xc_hvm_modified_memory(dmod
, domid
, first_pfn
, nr
);
214 static inline int xendevicemodel_set_mem_type(
215 xendevicemodel_handle
*dmod
, domid_t domid
, hvmmem_type_t mem_type
,
216 uint64_t first_pfn
, uint32_t nr
)
218 return xc_hvm_set_mem_type(dmod
, domid
, mem_type
, first_pfn
, nr
);
221 static inline int xendevicemodel_restrict(
222 xendevicemodel_handle
*dmod
, domid_t domid
)
228 static inline int xenforeignmemory_restrict(
229 xenforeignmemory_handle
*fmem
, domid_t domid
)
235 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
237 #undef XC_WANT_COMPAT_DEVICEMODEL_API
238 #include <xendevicemodel.h>
242 extern xendevicemodel_handle
*xen_dmod
;
244 static inline int xen_set_mem_type(domid_t domid
, hvmmem_type_t type
,
245 uint64_t first_pfn
, uint32_t nr
)
247 return xendevicemodel_set_mem_type(xen_dmod
, domid
, type
, first_pfn
,
251 static inline int xen_set_pci_intx_level(domid_t domid
, uint16_t segment
,
252 uint8_t bus
, uint8_t device
,
253 uint8_t intx
, unsigned int level
)
255 return xendevicemodel_set_pci_intx_level(xen_dmod
, domid
, segment
, bus
,
256 device
, intx
, level
);
259 static inline int xen_set_pci_link_route(domid_t domid
, uint8_t link
,
262 return xendevicemodel_set_pci_link_route(xen_dmod
, domid
, link
, irq
);
265 static inline int xen_inject_msi(domid_t domid
, uint64_t msi_addr
,
268 return xendevicemodel_inject_msi(xen_dmod
, domid
, msi_addr
, msi_data
);
271 static inline int xen_set_isa_irq_level(domid_t domid
, uint8_t irq
,
274 return xendevicemodel_set_isa_irq_level(xen_dmod
, domid
, irq
, level
);
277 static inline int xen_track_dirty_vram(domid_t domid
, uint64_t first_pfn
,
278 uint32_t nr
, unsigned long *bitmap
)
280 return xendevicemodel_track_dirty_vram(xen_dmod
, domid
, first_pfn
, nr
,
284 static inline int xen_modified_memory(domid_t domid
, uint64_t first_pfn
,
287 return xendevicemodel_modified_memory(xen_dmod
, domid
, first_pfn
, nr
);
290 static inline int xen_restrict(domid_t domid
)
294 /* Attempt to restrict devicemodel operations */
295 rc
= xendevicemodel_restrict(xen_dmod
, domid
);
296 trace_xen_domid_restrict(rc
? errno
: 0);
300 * If errno is ENOTTY then restriction is not implemented so
301 * there's no point in trying to restrict other types of
302 * operation, but it should not be treated as a failure.
304 if (errno
== ENOTTY
) {
311 /* Restrict foreignmemory operations */
312 rc
= xenforeignmemory_restrict(xen_fmem
, domid
);
313 trace_xen_domid_restrict(rc
? errno
: 0);
318 void destroy_hvm_domain(bool reboot
);
320 /* shutdown/destroy current domain because of an error */
321 void xen_shutdown_fatal_error(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
323 #ifdef HVM_PARAM_VMPORT_REGS_PFN
324 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
325 xen_pfn_t
*vmport_regs_pfn
)
329 rc
= xc_hvm_param_get(xc
, dom
, HVM_PARAM_VMPORT_REGS_PFN
, &value
);
331 *vmport_regs_pfn
= (xen_pfn_t
) value
;
336 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
337 xen_pfn_t
*vmport_regs_pfn
)
344 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
346 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
347 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
352 static inline int xen_get_default_ioreq_server_info(domid_t dom
,
353 xen_pfn_t
*ioreq_pfn
,
354 xen_pfn_t
*bufioreq_pfn
,
361 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_IOREQ_PFN
, ¶m
);
363 fprintf(stderr
, "failed to get HVM_PARAM_IOREQ_PFN\n");
369 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_PFN
, ¶m
);
371 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
375 *bufioreq_pfn
= param
;
377 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_EVTCHN
,
380 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
384 *bufioreq_evtchn
= param
;
390 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
392 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
393 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
396 #define IOREQ_TYPE_PCI_CONFIG 2
398 typedef uint16_t ioservid_t
;
400 static inline void xen_map_memory_section(domid_t dom
,
402 MemoryRegionSection
*section
)
406 static inline void xen_unmap_memory_section(domid_t dom
,
408 MemoryRegionSection
*section
)
412 static inline void xen_map_io_section(domid_t dom
,
414 MemoryRegionSection
*section
)
418 static inline void xen_unmap_io_section(domid_t dom
,
420 MemoryRegionSection
*section
)
424 static inline void xen_map_pcidev(domid_t dom
,
430 static inline void xen_unmap_pcidev(domid_t dom
,
436 static inline void xen_create_ioreq_server(domid_t dom
,
437 ioservid_t
*ioservid
)
441 static inline void xen_destroy_ioreq_server(domid_t dom
,
446 static inline int xen_get_ioreq_server_info(domid_t dom
,
448 xen_pfn_t
*ioreq_pfn
,
449 xen_pfn_t
*bufioreq_pfn
,
450 evtchn_port_t
*bufioreq_evtchn
)
452 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
457 static inline int xen_set_ioreq_server_state(domid_t dom
,
467 static bool use_default_ioreq_server
;
469 static inline void xen_map_memory_section(domid_t dom
,
471 MemoryRegionSection
*section
)
473 hwaddr start_addr
= section
->offset_within_address_space
;
474 ram_addr_t size
= int128_get64(section
->size
);
475 hwaddr end_addr
= start_addr
+ size
- 1;
477 if (use_default_ioreq_server
) {
481 trace_xen_map_mmio_range(ioservid
, start_addr
, end_addr
);
482 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 1,
483 start_addr
, end_addr
);
486 static inline void xen_unmap_memory_section(domid_t dom
,
488 MemoryRegionSection
*section
)
490 hwaddr start_addr
= section
->offset_within_address_space
;
491 ram_addr_t size
= int128_get64(section
->size
);
492 hwaddr end_addr
= start_addr
+ size
- 1;
494 if (use_default_ioreq_server
) {
498 trace_xen_unmap_mmio_range(ioservid
, start_addr
, end_addr
);
499 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
500 1, start_addr
, end_addr
);
503 static inline void xen_map_io_section(domid_t dom
,
505 MemoryRegionSection
*section
)
507 hwaddr start_addr
= section
->offset_within_address_space
;
508 ram_addr_t size
= int128_get64(section
->size
);
509 hwaddr end_addr
= start_addr
+ size
- 1;
511 if (use_default_ioreq_server
) {
515 trace_xen_map_portio_range(ioservid
, start_addr
, end_addr
);
516 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
517 start_addr
, end_addr
);
520 static inline void xen_unmap_io_section(domid_t dom
,
522 MemoryRegionSection
*section
)
524 hwaddr start_addr
= section
->offset_within_address_space
;
525 ram_addr_t size
= int128_get64(section
->size
);
526 hwaddr end_addr
= start_addr
+ size
- 1;
528 if (use_default_ioreq_server
) {
532 trace_xen_unmap_portio_range(ioservid
, start_addr
, end_addr
);
533 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
534 0, start_addr
, end_addr
);
537 static inline void xen_map_pcidev(domid_t dom
,
541 if (use_default_ioreq_server
) {
545 trace_xen_map_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
546 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
547 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
548 pci_dev_bus_num(pci_dev
),
549 PCI_SLOT(pci_dev
->devfn
),
550 PCI_FUNC(pci_dev
->devfn
));
553 static inline void xen_unmap_pcidev(domid_t dom
,
557 if (use_default_ioreq_server
) {
561 trace_xen_unmap_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
562 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
563 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
564 pci_dev_bus_num(pci_dev
),
565 PCI_SLOT(pci_dev
->devfn
),
566 PCI_FUNC(pci_dev
->devfn
));
569 static inline void xen_create_ioreq_server(domid_t dom
,
570 ioservid_t
*ioservid
)
572 int rc
= xendevicemodel_create_ioreq_server(xen_dmod
, dom
,
573 HVM_IOREQSRV_BUFIOREQ_ATOMIC
,
577 trace_xen_ioreq_server_create(*ioservid
);
582 use_default_ioreq_server
= true;
583 trace_xen_default_ioreq_server();
586 static inline void xen_destroy_ioreq_server(domid_t dom
,
589 if (use_default_ioreq_server
) {
593 trace_xen_ioreq_server_destroy(ioservid
);
594 xendevicemodel_destroy_ioreq_server(xen_dmod
, dom
, ioservid
);
597 static inline int xen_get_ioreq_server_info(domid_t dom
,
599 xen_pfn_t
*ioreq_pfn
,
600 xen_pfn_t
*bufioreq_pfn
,
601 evtchn_port_t
*bufioreq_evtchn
)
603 if (use_default_ioreq_server
) {
604 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
609 return xendevicemodel_get_ioreq_server_info(xen_dmod
, dom
, ioservid
,
610 ioreq_pfn
, bufioreq_pfn
,
614 static inline int xen_set_ioreq_server_state(domid_t dom
,
618 if (use_default_ioreq_server
) {
622 trace_xen_ioreq_server_state(ioservid
, enable
);
623 return xendevicemodel_set_ioreq_server_state(xen_dmod
, dom
, ioservid
,
629 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
630 static inline int xen_xc_domain_add_to_physmap(xc_interface
*xch
, uint32_t domid
,
635 return xc_domain_add_to_physmap(xch
, domid
, space
, idx
, gpfn
);
638 static inline int xen_xc_domain_add_to_physmap(xc_interface
*xch
, uint32_t domid
,
643 /* In Xen 4.6 rc is -1 and errno contains the error value. */
644 int rc
= xc_domain_add_to_physmap(xch
, domid
, space
, idx
, gpfn
);
651 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
652 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
653 static inline int xen_domain_create(xc_interface
*xc
, uint32_t ssidref
,
654 xen_domain_handle_t handle
, uint32_t flags
,
657 return xc_domain_create(xc
, ssidref
, handle
, flags
, pdomid
);
660 static inline int xen_domain_create(xc_interface
*xc
, uint32_t ssidref
,
661 xen_domain_handle_t handle
, uint32_t flags
,
664 return xc_domain_create(xc
, ssidref
, handle
, flags
, pdomid
, NULL
);
671 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
674 typedef void *xengnttab_grant_copy_segment_t
;
676 static inline int xengnttab_grant_copy(xengnttab_handle
*xgt
, uint32_t count
,
677 xengnttab_grant_copy_segment_t
*segs
)
683 #endif /* QEMU_HW_XEN_COMMON_H */