]> git.proxmox.com Git - mirror_qemu.git/blame - include/hw/xen/xen_common.h
xen: rename xen_modified_memory() to xen_hvm_modified_memory()
[mirror_qemu.git] / include / hw / xen / xen_common.h
CommitLineData
d94f9486 1#ifndef QEMU_HW_XEN_COMMON_H
175de524 2#define QEMU_HW_XEN_COMMON_H
d94f9486 3
5eeb39c2
IC
4/*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9#undef XC_WANT_COMPAT_EVTCHN_API
10#undef XC_WANT_COMPAT_GNTTAB_API
11#undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
d94f9486 13#include <xenctrl.h>
edfb07ed 14#include <xenstore.h>
d94f9486
AL
15#include <xen/io/xenbus.h>
16
83c9f4ca 17#include "hw/hw.h"
0d09e41a 18#include "hw/xen/xen.h"
3996e85c 19#include "hw/pci/pci.h"
1de7afc9 20#include "qemu/queue.h"
0ab8ed18 21#include "hw/xen/trace.h"
d94f9486 22
260cabed
PD
23extern xc_interface *xen_xc;
24
d94f9486 25/*
edfb07ed 26 * We don't support Xen prior to 4.2.0.
d94f9486 27 */
d5b93ddf 28
cb8d4c8f 29/* Xen 4.2 through 4.6 */
edfb07ed 30#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
d5b93ddf 31
81daba58 32typedef xc_interface xenforeignmemory_handle;
a2db2a1e 33typedef xc_evtchn xenevtchn_handle;
c1345a88 34typedef xc_gnttab xengnttab_handle;
d5b93ddf 35
a2db2a1e
IC
36#define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37#define xenevtchn_close(h) xc_evtchn_close(h)
38#define xenevtchn_fd(h) xc_evtchn_fd(h)
39#define xenevtchn_pending(h) xc_evtchn_pending(h)
40#define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41#define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42#define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43#define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
d5b93ddf 44
c1345a88
IC
45#define xengnttab_open(l, f) xc_gnttab_open(l, f)
46#define xengnttab_close(h) xc_gnttab_close(h)
47#define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48#define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49#define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50#define xengnttab_map_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_grant_refs(h, c, d, r, p)
816ac92e
JG
52#define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
d5b93ddf 54
6aa0205e
IC
55#define xenforeignmemory_open(l, f) xen_xc
56
57static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
58 int prot, size_t pages,
59 const xen_pfn_t arr[/*pages*/],
60 int err[/*pages*/])
61{
62 if (err)
63 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
64 else
65 return xc_map_foreign_pages(h, dom, prot, arr, pages);
66}
67
68#define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
e0cb42ae 69
5eeb39c2
IC
70#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
71
5eeb39c2
IC
72#include <xenevtchn.h>
73#include <xengnttab.h>
74#include <xenforeignmemory.h>
75
d94f9486
AL
76#endif
77
260cabed
PD
78extern xenforeignmemory_handle *xen_fmem;
79
180640ea 80void destroy_hvm_domain(bool reboot);
9ce94e7c 81
eaab4d60
AK
82/* shutdown/destroy current domain because of an error */
83void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
84
37f9e258 85#ifdef HVM_PARAM_VMPORT_REGS_PFN
81daba58 86static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 87 xen_pfn_t *vmport_regs_pfn)
37f9e258 88{
d01a5a3f
SS
89 int rc;
90 uint64_t value;
91 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
92 if (rc >= 0) {
93 *vmport_regs_pfn = (xen_pfn_t) value;
94 }
95 return rc;
37f9e258
DS
96}
97#else
81daba58 98static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 99 xen_pfn_t *vmport_regs_pfn)
37f9e258
DS
100{
101 return -ENOSYS;
102}
103#endif
104
d8b441a3
JB
105/* Xen before 4.6 */
106#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
107
108#ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
109#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
110#endif
111
112#endif
113
260cabed 114static inline int xen_get_default_ioreq_server_info(domid_t dom,
b7665c60
PD
115 xen_pfn_t *ioreq_pfn,
116 xen_pfn_t *bufioreq_pfn,
117 evtchn_port_t
118 *bufioreq_evtchn)
119{
120 unsigned long param;
121 int rc;
122
260cabed 123 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
b7665c60
PD
124 if (rc < 0) {
125 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
126 return -1;
127 }
128
129 *ioreq_pfn = param;
130
260cabed 131 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
b7665c60
PD
132 if (rc < 0) {
133 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
134 return -1;
135 }
136
137 *bufioreq_pfn = param;
138
260cabed 139 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
b7665c60
PD
140 &param);
141 if (rc < 0) {
142 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
143 return -1;
144 }
145
146 *bufioreq_evtchn = param;
147
148 return 0;
149}
150
3996e85c
PD
151/* Xen before 4.5 */
152#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
153
154#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
155#define HVM_PARAM_BUFIOREQ_EVTCHN 26
156#endif
157
158#define IOREQ_TYPE_PCI_CONFIG 2
159
d09952ee 160typedef uint16_t ioservid_t;
3996e85c 161
260cabed 162static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
163 ioservid_t ioservid,
164 MemoryRegionSection *section)
165{
166}
167
260cabed 168static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
169 ioservid_t ioservid,
170 MemoryRegionSection *section)
171{
172}
173
260cabed 174static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
175 ioservid_t ioservid,
176 MemoryRegionSection *section)
177{
178}
179
260cabed 180static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
181 ioservid_t ioservid,
182 MemoryRegionSection *section)
183{
184}
185
260cabed 186static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
187 ioservid_t ioservid,
188 PCIDevice *pci_dev)
189{
190}
191
260cabed 192static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
193 ioservid_t ioservid,
194 PCIDevice *pci_dev)
195{
196}
197
260cabed 198static inline void xen_create_ioreq_server(domid_t dom,
b7665c60 199 ioservid_t *ioservid)
3996e85c 200{
3996e85c
PD
201}
202
260cabed 203static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
204 ioservid_t ioservid)
205{
206}
207
260cabed 208static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
209 ioservid_t ioservid,
210 xen_pfn_t *ioreq_pfn,
211 xen_pfn_t *bufioreq_pfn,
212 evtchn_port_t *bufioreq_evtchn)
213{
260cabed
PD
214 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
215 bufioreq_pfn,
b7665c60 216 bufioreq_evtchn);
3996e85c
PD
217}
218
260cabed 219static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
220 ioservid_t ioservid,
221 bool enable)
222{
223 return 0;
224}
225
226/* Xen 4.5 */
227#else
228
b7665c60
PD
229static bool use_default_ioreq_server;
230
260cabed 231static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
232 ioservid_t ioservid,
233 MemoryRegionSection *section)
234{
235 hwaddr start_addr = section->offset_within_address_space;
236 ram_addr_t size = int128_get64(section->size);
237 hwaddr end_addr = start_addr + size - 1;
238
b7665c60
PD
239 if (use_default_ioreq_server) {
240 return;
241 }
242
3996e85c 243 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
260cabed 244 xc_hvm_map_io_range_to_ioreq_server(xen_xc, dom, ioservid, 1,
3996e85c
PD
245 start_addr, end_addr);
246}
247
260cabed 248static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
249 ioservid_t ioservid,
250 MemoryRegionSection *section)
251{
252 hwaddr start_addr = section->offset_within_address_space;
253 ram_addr_t size = int128_get64(section->size);
254 hwaddr end_addr = start_addr + size - 1;
255
b7665c60
PD
256 if (use_default_ioreq_server) {
257 return;
258 }
259
3996e85c 260 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
260cabed
PD
261 xc_hvm_unmap_io_range_from_ioreq_server(xen_xc, dom, ioservid,
262 1, start_addr, end_addr);
3996e85c
PD
263}
264
260cabed 265static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
266 ioservid_t ioservid,
267 MemoryRegionSection *section)
268{
269 hwaddr start_addr = section->offset_within_address_space;
270 ram_addr_t size = int128_get64(section->size);
271 hwaddr end_addr = start_addr + size - 1;
272
b7665c60
PD
273 if (use_default_ioreq_server) {
274 return;
275 }
276
3996e85c 277 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
260cabed 278 xc_hvm_map_io_range_to_ioreq_server(xen_xc, dom, ioservid, 0,
3996e85c
PD
279 start_addr, end_addr);
280}
281
260cabed 282static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
283 ioservid_t ioservid,
284 MemoryRegionSection *section)
285{
286 hwaddr start_addr = section->offset_within_address_space;
287 ram_addr_t size = int128_get64(section->size);
288 hwaddr end_addr = start_addr + size - 1;
289
b7665c60
PD
290 if (use_default_ioreq_server) {
291 return;
292 }
293
3996e85c 294 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
260cabed
PD
295 xc_hvm_unmap_io_range_from_ioreq_server(xen_xc, dom, ioservid,
296 0, start_addr, end_addr);
3996e85c
PD
297}
298
260cabed 299static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
300 ioservid_t ioservid,
301 PCIDevice *pci_dev)
302{
b7665c60
PD
303 if (use_default_ioreq_server) {
304 return;
305 }
306
3996e85c
PD
307 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
308 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
260cabed
PD
309 xc_hvm_map_pcidev_to_ioreq_server(xen_xc, dom, ioservid, 0,
310 pci_bus_num(pci_dev->bus),
3996e85c
PD
311 PCI_SLOT(pci_dev->devfn),
312 PCI_FUNC(pci_dev->devfn));
313}
314
260cabed 315static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
316 ioservid_t ioservid,
317 PCIDevice *pci_dev)
318{
b7665c60
PD
319 if (use_default_ioreq_server) {
320 return;
321 }
322
3996e85c
PD
323 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
324 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
260cabed
PD
325 xc_hvm_unmap_pcidev_from_ioreq_server(xen_xc, dom, ioservid, 0,
326 pci_bus_num(pci_dev->bus),
3996e85c
PD
327 PCI_SLOT(pci_dev->devfn),
328 PCI_FUNC(pci_dev->devfn));
329}
330
260cabed 331static inline void xen_create_ioreq_server(domid_t dom,
b7665c60 332 ioservid_t *ioservid)
3996e85c 333{
260cabed
PD
334 int rc = xc_hvm_create_ioreq_server(xen_xc, dom,
335 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
d8b441a3 336 ioservid);
3996e85c
PD
337
338 if (rc == 0) {
339 trace_xen_ioreq_server_create(*ioservid);
b7665c60 340 return;
3996e85c
PD
341 }
342
b7665c60
PD
343 *ioservid = 0;
344 use_default_ioreq_server = true;
345 trace_xen_default_ioreq_server();
3996e85c
PD
346}
347
260cabed 348static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
349 ioservid_t ioservid)
350{
b7665c60
PD
351 if (use_default_ioreq_server) {
352 return;
353 }
354
3996e85c 355 trace_xen_ioreq_server_destroy(ioservid);
260cabed 356 xc_hvm_destroy_ioreq_server(xen_xc, dom, ioservid);
3996e85c
PD
357}
358
260cabed 359static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
360 ioservid_t ioservid,
361 xen_pfn_t *ioreq_pfn,
362 xen_pfn_t *bufioreq_pfn,
363 evtchn_port_t *bufioreq_evtchn)
364{
b7665c60 365 if (use_default_ioreq_server) {
260cabed 366 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
b7665c60
PD
367 bufioreq_pfn,
368 bufioreq_evtchn);
369 }
370
260cabed 371 return xc_hvm_get_ioreq_server_info(xen_xc, dom, ioservid,
3996e85c
PD
372 ioreq_pfn, bufioreq_pfn,
373 bufioreq_evtchn);
374}
375
260cabed 376static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
377 ioservid_t ioservid,
378 bool enable)
379{
b7665c60
PD
380 if (use_default_ioreq_server) {
381 return 0;
382 }
383
3996e85c 384 trace_xen_ioreq_server_state(ioservid, enable);
260cabed
PD
385 return xc_hvm_set_ioreq_server_state(xen_xc, dom, ioservid,
386 enable);
3996e85c
PD
387}
388
389#endif
390
20a544c7 391#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
81daba58 392static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
20a544c7
KRW
393 unsigned int space,
394 unsigned long idx,
395 xen_pfn_t gpfn)
396{
397 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
398}
399#else
81daba58 400static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
20a544c7
KRW
401 unsigned int space,
402 unsigned long idx,
403 xen_pfn_t gpfn)
404{
405 /* In Xen 4.6 rc is -1 and errno contains the error value. */
406 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
407 if (rc == -1)
408 return errno;
409 return rc;
410}
411#endif
412
64a7ad6f 413#ifdef CONFIG_XEN_PV_DOMAIN_BUILD
cdadde39 414#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
81daba58 415static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
cdadde39
RPM
416 xen_domain_handle_t handle, uint32_t flags,
417 uint32_t *pdomid)
418{
419 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
420}
421#else
81daba58 422static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
cdadde39
RPM
423 xen_domain_handle_t handle, uint32_t flags,
424 uint32_t *pdomid)
425{
426 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
427}
428#endif
64a7ad6f 429#endif
cdadde39 430
b6eb9b45
PS
431/* Xen before 4.8 */
432
433#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480
434
435
436typedef void *xengnttab_grant_copy_segment_t;
437
438static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
439 xengnttab_grant_copy_segment_t *segs)
440{
441 return -ENOSYS;
442}
443#endif
444
d94f9486 445#endif /* QEMU_HW_XEN_COMMON_H */