1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
15 #include "hw/xen/interface/io/xenbus.h"
17 #include "hw/xen/xen.h"
18 #include "hw/pci/pci.h"
19 #include "hw/xen/trace.h"
21 extern xc_interface
*xen_xc
;
24 * We don't support Xen prior to 4.2.0.
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
30 typedef xc_interface xenforeignmemory_handle
;
31 typedef xc_evtchn xenevtchn_handle
;
32 typedef xc_gnttab xengnttab_handle
;
33 typedef evtchn_port_or_error_t xenevtchn_port_or_error_t
;
35 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
36 #define xenevtchn_close(h) xc_evtchn_close(h)
37 #define xenevtchn_fd(h) xc_evtchn_fd(h)
38 #define xenevtchn_pending(h) xc_evtchn_pending(h)
39 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
40 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
41 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
42 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
44 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
45 #define xengnttab_close(h) xc_gnttab_close(h)
46 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
47 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
48 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
49 #define xengnttab_map_grant_refs(h, c, d, r, p) \
50 xc_gnttab_map_grant_refs(h, c, d, r, p)
51 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
52 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
54 #define xenforeignmemory_open(l, f) xen_xc
55 #define xenforeignmemory_close(h)
57 static inline void *xenforeignmemory_map(xc_interface
*h
, uint32_t dom
,
58 int prot
, size_t pages
,
59 const xen_pfn_t arr
[/*pages*/],
63 return xc_map_foreign_bulk(h
, dom
, prot
, arr
, err
, pages
);
65 return xc_map_foreign_pages(h
, dom
, prot
, arr
, pages
);
68 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
70 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
72 #include <xenevtchn.h>
73 #include <xengnttab.h>
74 #include <xenforeignmemory.h>
78 extern xenforeignmemory_handle
*xen_fmem
;
80 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
82 typedef xc_interface xendevicemodel_handle
;
84 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
86 #undef XC_WANT_COMPAT_DEVICEMODEL_API
87 #include <xendevicemodel.h>
91 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
93 static inline int xendevicemodel_relocate_memory(
94 xendevicemodel_handle
*dmod
, domid_t domid
, uint32_t size
, uint64_t src_gfn
,
100 for (i
= 0; i
< size
; i
++) {
101 unsigned long idx
= src_gfn
+ i
;
102 xen_pfn_t gpfn
= dst_gfn
+ i
;
104 rc
= xc_domain_add_to_physmap(xen_xc
, domid
, XENMAPSPACE_gmfn
, idx
,
114 static inline int xendevicemodel_pin_memory_cacheattr(
115 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t start
, uint64_t end
,
118 return xc_domain_pin_memory_cacheattr(xen_xc
, domid
, start
, end
, type
);
121 typedef void xenforeignmemory_resource_handle
;
123 #define XENMEM_resource_ioreq_server 0
125 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
126 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
128 static inline xenforeignmemory_resource_handle
*xenforeignmemory_map_resource(
129 xenforeignmemory_handle
*fmem
, domid_t domid
, unsigned int type
,
130 unsigned int id
, unsigned long frame
, unsigned long nr_frames
,
131 void **paddr
, int prot
, int flags
)
137 static inline int xenforeignmemory_unmap_resource(
138 xenforeignmemory_handle
*fmem
, xenforeignmemory_resource_handle
*fres
)
143 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
145 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
147 #define XEN_COMPAT_PHYSMAP
148 static inline void *xenforeignmemory_map2(xenforeignmemory_handle
*h
,
149 uint32_t dom
, void *addr
,
150 int prot
, int flags
, size_t pages
,
151 const xen_pfn_t arr
[/*pages*/],
154 assert(addr
== NULL
&& flags
== 0);
155 return xenforeignmemory_map(h
, dom
, prot
, pages
, arr
, err
);
158 static inline int xentoolcore_restrict_all(domid_t domid
)
164 static inline int xendevicemodel_shutdown(xendevicemodel_handle
*dmod
,
165 domid_t domid
, unsigned int reason
)
171 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
173 #include <xentoolcore.h>
177 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
179 static inline xendevicemodel_handle
*xendevicemodel_open(
180 struct xentoollog_logger
*logger
, unsigned int open_flags
)
185 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
187 static inline int xendevicemodel_create_ioreq_server(
188 xendevicemodel_handle
*dmod
, domid_t domid
, int handle_bufioreq
,
191 return xc_hvm_create_ioreq_server(dmod
, domid
, handle_bufioreq
,
195 static inline int xendevicemodel_get_ioreq_server_info(
196 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
197 xen_pfn_t
*ioreq_pfn
, xen_pfn_t
*bufioreq_pfn
,
198 evtchn_port_t
*bufioreq_port
)
200 return xc_hvm_get_ioreq_server_info(dmod
, domid
, id
, ioreq_pfn
,
201 bufioreq_pfn
, bufioreq_port
);
204 static inline int xendevicemodel_map_io_range_to_ioreq_server(
205 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
206 uint64_t start
, uint64_t end
)
208 return xc_hvm_map_io_range_to_ioreq_server(dmod
, domid
, id
, is_mmio
,
212 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
213 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
214 uint64_t start
, uint64_t end
)
216 return xc_hvm_unmap_io_range_from_ioreq_server(dmod
, domid
, id
, is_mmio
,
220 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
221 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
222 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
224 return xc_hvm_map_pcidev_to_ioreq_server(dmod
, domid
, id
, segment
,
225 bus
, device
, function
);
228 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
229 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
230 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
232 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod
, domid
, id
, segment
,
233 bus
, device
, function
);
236 static inline int xendevicemodel_destroy_ioreq_server(
237 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
)
239 return xc_hvm_destroy_ioreq_server(dmod
, domid
, id
);
242 static inline int xendevicemodel_set_ioreq_server_state(
243 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int enabled
)
245 return xc_hvm_set_ioreq_server_state(dmod
, domid
, id
, enabled
);
248 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
250 static inline int xendevicemodel_set_pci_intx_level(
251 xendevicemodel_handle
*dmod
, domid_t domid
, uint16_t segment
,
252 uint8_t bus
, uint8_t device
, uint8_t intx
, unsigned int level
)
254 return xc_hvm_set_pci_intx_level(dmod
, domid
, segment
, bus
, device
,
258 static inline int xendevicemodel_set_isa_irq_level(
259 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t irq
,
262 return xc_hvm_set_isa_irq_level(dmod
, domid
, irq
, level
);
265 static inline int xendevicemodel_set_pci_link_route(
266 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t link
, uint8_t irq
)
268 return xc_hvm_set_pci_link_route(dmod
, domid
, link
, irq
);
271 static inline int xendevicemodel_inject_msi(
272 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t msi_addr
,
275 return xc_hvm_inject_msi(dmod
, domid
, msi_addr
, msi_data
);
278 static inline int xendevicemodel_track_dirty_vram(
279 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
280 uint32_t nr
, unsigned long *dirty_bitmap
)
282 return xc_hvm_track_dirty_vram(dmod
, domid
, first_pfn
, nr
,
286 static inline int xendevicemodel_modified_memory(
287 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
290 return xc_hvm_modified_memory(dmod
, domid
, first_pfn
, nr
);
293 static inline int xendevicemodel_set_mem_type(
294 xendevicemodel_handle
*dmod
, domid_t domid
, hvmmem_type_t mem_type
,
295 uint64_t first_pfn
, uint32_t nr
)
297 return xc_hvm_set_mem_type(dmod
, domid
, mem_type
, first_pfn
, nr
);
302 extern xendevicemodel_handle
*xen_dmod
;
304 static inline int xen_set_mem_type(domid_t domid
, hvmmem_type_t type
,
305 uint64_t first_pfn
, uint32_t nr
)
307 return xendevicemodel_set_mem_type(xen_dmod
, domid
, type
, first_pfn
,
311 static inline int xen_set_pci_intx_level(domid_t domid
, uint16_t segment
,
312 uint8_t bus
, uint8_t device
,
313 uint8_t intx
, unsigned int level
)
315 return xendevicemodel_set_pci_intx_level(xen_dmod
, domid
, segment
, bus
,
316 device
, intx
, level
);
319 static inline int xen_set_pci_link_route(domid_t domid
, uint8_t link
,
322 return xendevicemodel_set_pci_link_route(xen_dmod
, domid
, link
, irq
);
325 static inline int xen_inject_msi(domid_t domid
, uint64_t msi_addr
,
328 return xendevicemodel_inject_msi(xen_dmod
, domid
, msi_addr
, msi_data
);
331 static inline int xen_set_isa_irq_level(domid_t domid
, uint8_t irq
,
334 return xendevicemodel_set_isa_irq_level(xen_dmod
, domid
, irq
, level
);
337 static inline int xen_track_dirty_vram(domid_t domid
, uint64_t first_pfn
,
338 uint32_t nr
, unsigned long *bitmap
)
340 return xendevicemodel_track_dirty_vram(xen_dmod
, domid
, first_pfn
, nr
,
344 static inline int xen_modified_memory(domid_t domid
, uint64_t first_pfn
,
347 return xendevicemodel_modified_memory(xen_dmod
, domid
, first_pfn
, nr
);
350 static inline int xen_restrict(domid_t domid
)
353 rc
= xentoolcore_restrict_all(domid
);
354 trace_xen_domid_restrict(rc
? errno
: 0);
358 void destroy_hvm_domain(bool reboot
);
360 /* shutdown/destroy current domain because of an error */
361 void xen_shutdown_fatal_error(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
363 #ifdef HVM_PARAM_VMPORT_REGS_PFN
364 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
365 xen_pfn_t
*vmport_regs_pfn
)
369 rc
= xc_hvm_param_get(xc
, dom
, HVM_PARAM_VMPORT_REGS_PFN
, &value
);
371 *vmport_regs_pfn
= (xen_pfn_t
) value
;
376 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
377 xen_pfn_t
*vmport_regs_pfn
)
384 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
386 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
387 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
392 static inline int xen_get_default_ioreq_server_info(domid_t dom
,
393 xen_pfn_t
*ioreq_pfn
,
394 xen_pfn_t
*bufioreq_pfn
,
401 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_IOREQ_PFN
, ¶m
);
403 fprintf(stderr
, "failed to get HVM_PARAM_IOREQ_PFN\n");
409 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_PFN
, ¶m
);
411 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
415 *bufioreq_pfn
= param
;
417 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_EVTCHN
,
420 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
424 *bufioreq_evtchn
= param
;
430 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
432 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
433 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
436 #define IOREQ_TYPE_PCI_CONFIG 2
438 typedef uint16_t ioservid_t
;
440 static inline void xen_map_memory_section(domid_t dom
,
442 MemoryRegionSection
*section
)
446 static inline void xen_unmap_memory_section(domid_t dom
,
448 MemoryRegionSection
*section
)
452 static inline void xen_map_io_section(domid_t dom
,
454 MemoryRegionSection
*section
)
458 static inline void xen_unmap_io_section(domid_t dom
,
460 MemoryRegionSection
*section
)
464 static inline void xen_map_pcidev(domid_t dom
,
470 static inline void xen_unmap_pcidev(domid_t dom
,
476 static inline void xen_create_ioreq_server(domid_t dom
,
477 ioservid_t
*ioservid
)
481 static inline void xen_destroy_ioreq_server(domid_t dom
,
486 static inline int xen_get_ioreq_server_info(domid_t dom
,
488 xen_pfn_t
*ioreq_pfn
,
489 xen_pfn_t
*bufioreq_pfn
,
490 evtchn_port_t
*bufioreq_evtchn
)
492 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
497 static inline int xen_set_ioreq_server_state(domid_t dom
,
507 static bool use_default_ioreq_server
;
509 static inline void xen_map_memory_section(domid_t dom
,
511 MemoryRegionSection
*section
)
513 hwaddr start_addr
= section
->offset_within_address_space
;
514 ram_addr_t size
= int128_get64(section
->size
);
515 hwaddr end_addr
= start_addr
+ size
- 1;
517 if (use_default_ioreq_server
) {
521 trace_xen_map_mmio_range(ioservid
, start_addr
, end_addr
);
522 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 1,
523 start_addr
, end_addr
);
526 static inline void xen_unmap_memory_section(domid_t dom
,
528 MemoryRegionSection
*section
)
530 hwaddr start_addr
= section
->offset_within_address_space
;
531 ram_addr_t size
= int128_get64(section
->size
);
532 hwaddr end_addr
= start_addr
+ size
- 1;
534 if (use_default_ioreq_server
) {
538 trace_xen_unmap_mmio_range(ioservid
, start_addr
, end_addr
);
539 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
540 1, start_addr
, end_addr
);
543 static inline void xen_map_io_section(domid_t dom
,
545 MemoryRegionSection
*section
)
547 hwaddr start_addr
= section
->offset_within_address_space
;
548 ram_addr_t size
= int128_get64(section
->size
);
549 hwaddr end_addr
= start_addr
+ size
- 1;
551 if (use_default_ioreq_server
) {
555 trace_xen_map_portio_range(ioservid
, start_addr
, end_addr
);
556 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
557 start_addr
, end_addr
);
560 static inline void xen_unmap_io_section(domid_t dom
,
562 MemoryRegionSection
*section
)
564 hwaddr start_addr
= section
->offset_within_address_space
;
565 ram_addr_t size
= int128_get64(section
->size
);
566 hwaddr end_addr
= start_addr
+ size
- 1;
568 if (use_default_ioreq_server
) {
572 trace_xen_unmap_portio_range(ioservid
, start_addr
, end_addr
);
573 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
574 0, start_addr
, end_addr
);
577 static inline void xen_map_pcidev(domid_t dom
,
581 if (use_default_ioreq_server
) {
585 trace_xen_map_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
586 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
587 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
588 pci_dev_bus_num(pci_dev
),
589 PCI_SLOT(pci_dev
->devfn
),
590 PCI_FUNC(pci_dev
->devfn
));
593 static inline void xen_unmap_pcidev(domid_t dom
,
597 if (use_default_ioreq_server
) {
601 trace_xen_unmap_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
602 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
603 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
604 pci_dev_bus_num(pci_dev
),
605 PCI_SLOT(pci_dev
->devfn
),
606 PCI_FUNC(pci_dev
->devfn
));
609 static inline void xen_create_ioreq_server(domid_t dom
,
610 ioservid_t
*ioservid
)
612 int rc
= xendevicemodel_create_ioreq_server(xen_dmod
, dom
,
613 HVM_IOREQSRV_BUFIOREQ_ATOMIC
,
617 trace_xen_ioreq_server_create(*ioservid
);
622 use_default_ioreq_server
= true;
623 trace_xen_default_ioreq_server();
626 static inline void xen_destroy_ioreq_server(domid_t dom
,
629 if (use_default_ioreq_server
) {
633 trace_xen_ioreq_server_destroy(ioservid
);
634 xendevicemodel_destroy_ioreq_server(xen_dmod
, dom
, ioservid
);
637 static inline int xen_get_ioreq_server_info(domid_t dom
,
639 xen_pfn_t
*ioreq_pfn
,
640 xen_pfn_t
*bufioreq_pfn
,
641 evtchn_port_t
*bufioreq_evtchn
)
643 if (use_default_ioreq_server
) {
644 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
649 return xendevicemodel_get_ioreq_server_info(xen_dmod
, dom
, ioservid
,
650 ioreq_pfn
, bufioreq_pfn
,
654 static inline int xen_set_ioreq_server_state(domid_t dom
,
658 if (use_default_ioreq_server
) {
662 trace_xen_ioreq_server_state(ioservid
, enable
);
663 return xendevicemodel_set_ioreq_server_state(xen_dmod
, dom
, ioservid
,
671 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
673 struct xengnttab_grant_copy_segment
{
674 union xengnttab_copy_ptr
{
687 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t
;
689 static inline int xengnttab_grant_copy(xengnttab_handle
*xgt
, uint32_t count
,
690 xengnttab_grant_copy_segment_t
*segs
)
696 #endif /* QEMU_HW_XEN_COMMON_H */