]> git.proxmox.com Git - mirror_qemu.git/blame - include/hw/xen/xen_native.h
xen: fix condition for skipping virtio-mmio defines
[mirror_qemu.git] / include / hw / xen / xen_native.h
CommitLineData
e2abfe5e
DW
1#ifndef QEMU_HW_XEN_NATIVE_H
2#define QEMU_HW_XEN_NATIVE_H
3
4#ifdef __XEN_INTERFACE_VERSION__
5#error In Xen native files, include xen_native.h before other Xen headers
6#endif
d94f9486 7
5eeb39c2
IC
8/*
9 * If we have new enough libxenctrl then we do not want/need these compat
10 * interfaces, despite what the user supplied cflags might say. They
11 * must be undefined before including xenctrl.h
12 */
13#undef XC_WANT_COMPAT_EVTCHN_API
14#undef XC_WANT_COMPAT_GNTTAB_API
15#undef XC_WANT_COMPAT_MAP_FOREIGN_API
16
d94f9486 17#include <xenctrl.h>
edfb07ed 18#include <xenstore.h>
d94f9486 19
0d09e41a 20#include "hw/xen/xen.h"
edf5ca5d 21#include "hw/pci/pci_device.h"
0ab8ed18 22#include "hw/xen/trace.h"
d94f9486 23
260cabed
PD
24extern xc_interface *xen_xc;
25
d94f9486 26/*
2f20b173 27 * We don't support Xen prior to 4.7.1.
d94f9486 28 */
d5b93ddf 29
14d015b6
PD
30#include <xenforeignmemory.h>
31
14d015b6
PD
32extern xenforeignmemory_handle *xen_fmem;
33
85f3c64d
IJ
34#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
35
36typedef xc_interface xendevicemodel_handle;
37
38#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
39
40#undef XC_WANT_COMPAT_DEVICEMODEL_API
41#include <xendevicemodel.h>
42
43#endif
44
2cbf8903
RL
45#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
46
47static inline int xendevicemodel_relocate_memory(
48 xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
49 uint64_t dst_gfn)
50{
51 uint32_t i;
52 int rc;
53
54 for (i = 0; i < size; i++) {
55 unsigned long idx = src_gfn + i;
56 xen_pfn_t gpfn = dst_gfn + i;
57
58 rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx,
59 gpfn);
60 if (rc) {
61 return rc;
62 }
63 }
64
65 return 0;
66}
67
68static inline int xendevicemodel_pin_memory_cacheattr(
69 xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
70 uint32_t type)
71{
72 return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type);
73}
74
d3c49ebb
PD
75typedef void xenforeignmemory_resource_handle;
76
77#define XENMEM_resource_ioreq_server 0
78
79#define XENMEM_resource_ioreq_server_frame_bufioreq 0
80#define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
81
82static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource(
83 xenforeignmemory_handle *fmem, domid_t domid, unsigned int type,
84 unsigned int id, unsigned long frame, unsigned long nr_frames,
85 void **paddr, int prot, int flags)
86{
87 errno = EOPNOTSUPP;
88 return NULL;
89}
90
f1e43b60
AP
91static inline int xenforeignmemory_unmap_resource(
92 xenforeignmemory_handle *fmem, xenforeignmemory_resource_handle *fres)
93{
94 return 0;
95}
96
2cbf8903
RL
97#endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
98
5ba3d756
ID
99#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
100
331b5189 101#define XEN_COMPAT_PHYSMAP
5ba3d756
ID
102static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h,
103 uint32_t dom, void *addr,
104 int prot, int flags, size_t pages,
105 const xen_pfn_t arr[/*pages*/],
106 int err[/*pages*/])
107{
108 assert(addr == NULL && flags == 0);
109 return xenforeignmemory_map(h, dom, prot, pages, arr, err);
110}
111
0ef4d87d
IJ
112static inline int xentoolcore_restrict_all(domid_t domid)
113{
114 errno = ENOTTY;
115 return -1;
116}
117
6b47c2aa
IJ
118static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod,
119 domid_t domid, unsigned int reason)
120{
121 errno = ENOTTY;
122 return -1;
123}
124
0ef4d87d
IJ
125#else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
126
127#include <xentoolcore.h>
128
5ba3d756
ID
129#endif
130
f1167ee6 131#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
d655f34e 132
d655f34e
PD
133static inline xendevicemodel_handle *xendevicemodel_open(
134 struct xentoollog_logger *logger, unsigned int open_flags)
135{
136 return xen_xc;
137}
138
d655f34e
PD
139static inline int xendevicemodel_create_ioreq_server(
140 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
141 ioservid_t *id)
142{
143 return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq,
144 id);
145}
146
147static inline int xendevicemodel_get_ioreq_server_info(
148 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
149 xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn,
150 evtchn_port_t *bufioreq_port)
151{
152 return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn,
153 bufioreq_pfn, bufioreq_port);
154}
155
156static inline int xendevicemodel_map_io_range_to_ioreq_server(
157 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
158 uint64_t start, uint64_t end)
159{
160 return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio,
161 start, end);
162}
163
164static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
165 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
166 uint64_t start, uint64_t end)
167{
168 return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio,
169 start, end);
170}
171
172static inline int xendevicemodel_map_pcidev_to_ioreq_server(
173 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
174 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
175{
176 return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment,
177 bus, device, function);
178}
179
180static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
181 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
182 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
183{
184 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment,
185 bus, device, function);
186}
187
188static inline int xendevicemodel_destroy_ioreq_server(
189 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
190{
191 return xc_hvm_destroy_ioreq_server(dmod, domid, id);
192}
193
194static inline int xendevicemodel_set_ioreq_server_state(
195 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
196{
197 return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled);
198}
199
d655f34e
PD
200static inline int xendevicemodel_set_pci_intx_level(
201 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
202 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
203{
204 return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device,
205 intx, level);
206}
207
208static inline int xendevicemodel_set_isa_irq_level(
209 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
210 unsigned int level)
211{
212 return xc_hvm_set_isa_irq_level(dmod, domid, irq, level);
213}
214
215static inline int xendevicemodel_set_pci_link_route(
216 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
217{
218 return xc_hvm_set_pci_link_route(dmod, domid, link, irq);
219}
220
221static inline int xendevicemodel_inject_msi(
222 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
223 uint32_t msi_data)
224{
225 return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data);
226}
227
228static inline int xendevicemodel_track_dirty_vram(
229 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
230 uint32_t nr, unsigned long *dirty_bitmap)
231{
232 return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr,
233 dirty_bitmap);
234}
235
236static inline int xendevicemodel_modified_memory(
237 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
238 uint32_t nr)
239{
240 return xc_hvm_modified_memory(dmod, domid, first_pfn, nr);
241}
242
243static inline int xendevicemodel_set_mem_type(
244 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
245 uint64_t first_pfn, uint32_t nr)
246{
247 return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr);
248}
249
d655f34e
PD
250#endif
251
252extern xendevicemodel_handle *xen_dmod;
253
8f25e754
PD
254static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type,
255 uint64_t first_pfn, uint32_t nr)
256{
d655f34e
PD
257 return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn,
258 nr);
8f25e754
PD
259}
260
261static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment,
262 uint8_t bus, uint8_t device,
263 uint8_t intx, unsigned int level)
264{
d655f34e
PD
265 return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus,
266 device, intx, level);
8f25e754
PD
267}
268
8f25e754
PD
269static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr,
270 uint32_t msi_data)
271{
d655f34e 272 return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data);
8f25e754
PD
273}
274
275static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq,
276 unsigned int level)
277{
d655f34e 278 return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level);
8f25e754
PD
279}
280
281static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn,
282 uint32_t nr, unsigned long *bitmap)
283{
d655f34e
PD
284 return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr,
285 bitmap);
8f25e754
PD
286}
287
288static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn,
289 uint32_t nr)
290{
d655f34e 291 return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr);
8f25e754
PD
292}
293
1c599472
PD
294static inline int xen_restrict(domid_t domid)
295{
14d015b6 296 int rc;
0ef4d87d 297 rc = xentoolcore_restrict_all(domid);
14d015b6 298 trace_xen_domid_restrict(rc ? errno : 0);
14d015b6 299 return rc;
6aa0205e
IC
300}
301
180640ea 302void destroy_hvm_domain(bool reboot);
9ce94e7c 303
eaab4d60 304/* shutdown/destroy current domain because of an error */
9edc6313 305void xen_shutdown_fatal_error(const char *fmt, ...) G_GNUC_PRINTF(1, 2);
eaab4d60 306
37f9e258 307#ifdef HVM_PARAM_VMPORT_REGS_PFN
81daba58 308static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 309 xen_pfn_t *vmport_regs_pfn)
37f9e258 310{
d01a5a3f
SS
311 int rc;
312 uint64_t value;
313 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
314 if (rc >= 0) {
315 *vmport_regs_pfn = (xen_pfn_t) value;
316 }
317 return rc;
37f9e258
DS
318}
319#else
81daba58 320static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
d01a5a3f 321 xen_pfn_t *vmport_regs_pfn)
37f9e258
DS
322{
323 return -ENOSYS;
324}
325#endif
326
260cabed 327static inline int xen_get_default_ioreq_server_info(domid_t dom,
b7665c60
PD
328 xen_pfn_t *ioreq_pfn,
329 xen_pfn_t *bufioreq_pfn,
330 evtchn_port_t
331 *bufioreq_evtchn)
332{
333 unsigned long param;
334 int rc;
335
260cabed 336 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, &param);
b7665c60
PD
337 if (rc < 0) {
338 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
339 return -1;
340 }
341
342 *ioreq_pfn = param;
343
260cabed 344 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
b7665c60
PD
345 if (rc < 0) {
346 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
347 return -1;
348 }
349
350 *bufioreq_pfn = param;
351
260cabed 352 rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
b7665c60
PD
353 &param);
354 if (rc < 0) {
355 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
356 return -1;
357 }
358
359 *bufioreq_evtchn = param;
360
361 return 0;
362}
363
b7665c60
PD
364static bool use_default_ioreq_server;
365
260cabed 366static inline void xen_map_memory_section(domid_t dom,
3996e85c
PD
367 ioservid_t ioservid,
368 MemoryRegionSection *section)
369{
370 hwaddr start_addr = section->offset_within_address_space;
371 ram_addr_t size = int128_get64(section->size);
372 hwaddr end_addr = start_addr + size - 1;
373
b7665c60
PD
374 if (use_default_ioreq_server) {
375 return;
376 }
377
3996e85c 378 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
d655f34e
PD
379 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1,
380 start_addr, end_addr);
3996e85c
PD
381}
382
260cabed 383static inline void xen_unmap_memory_section(domid_t dom,
3996e85c
PD
384 ioservid_t ioservid,
385 MemoryRegionSection *section)
386{
387 hwaddr start_addr = section->offset_within_address_space;
388 ram_addr_t size = int128_get64(section->size);
389 hwaddr end_addr = start_addr + size - 1;
390
b7665c60
PD
391 if (use_default_ioreq_server) {
392 return;
393 }
394
3996e85c 395 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
d655f34e
PD
396 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
397 1, start_addr, end_addr);
3996e85c
PD
398}
399
260cabed 400static inline void xen_map_io_section(domid_t dom,
3996e85c
PD
401 ioservid_t ioservid,
402 MemoryRegionSection *section)
403{
404 hwaddr start_addr = section->offset_within_address_space;
405 ram_addr_t size = int128_get64(section->size);
406 hwaddr end_addr = start_addr + size - 1;
407
b7665c60
PD
408 if (use_default_ioreq_server) {
409 return;
410 }
411
3996e85c 412 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
d655f34e
PD
413 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0,
414 start_addr, end_addr);
3996e85c
PD
415}
416
260cabed 417static inline void xen_unmap_io_section(domid_t dom,
3996e85c
PD
418 ioservid_t ioservid,
419 MemoryRegionSection *section)
420{
421 hwaddr start_addr = section->offset_within_address_space;
422 ram_addr_t size = int128_get64(section->size);
423 hwaddr end_addr = start_addr + size - 1;
424
b7665c60
PD
425 if (use_default_ioreq_server) {
426 return;
427 }
428
3996e85c 429 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
d655f34e
PD
430 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid,
431 0, start_addr, end_addr);
3996e85c
PD
432}
433
260cabed 434static inline void xen_map_pcidev(domid_t dom,
3996e85c
PD
435 ioservid_t ioservid,
436 PCIDevice *pci_dev)
437{
b7665c60
PD
438 if (use_default_ioreq_server) {
439 return;
440 }
441
cdc57472 442 trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev),
3996e85c 443 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
d655f34e 444 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0,
cdc57472 445 pci_dev_bus_num(pci_dev),
d655f34e
PD
446 PCI_SLOT(pci_dev->devfn),
447 PCI_FUNC(pci_dev->devfn));
3996e85c
PD
448}
449
260cabed 450static inline void xen_unmap_pcidev(domid_t dom,
3996e85c
PD
451 ioservid_t ioservid,
452 PCIDevice *pci_dev)
453{
b7665c60
PD
454 if (use_default_ioreq_server) {
455 return;
456 }
457
cdc57472 458 trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev),
3996e85c 459 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
d655f34e 460 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0,
cdc57472 461 pci_dev_bus_num(pci_dev),
d655f34e
PD
462 PCI_SLOT(pci_dev->devfn),
463 PCI_FUNC(pci_dev->devfn));
3996e85c
PD
464}
465
420927c2
SS
466static inline int xen_create_ioreq_server(domid_t dom,
467 ioservid_t *ioservid)
3996e85c 468{
d655f34e
PD
469 int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom,
470 HVM_IOREQSRV_BUFIOREQ_ATOMIC,
471 ioservid);
3996e85c
PD
472
473 if (rc == 0) {
474 trace_xen_ioreq_server_create(*ioservid);
420927c2 475 return rc;
3996e85c
PD
476 }
477
b7665c60
PD
478 *ioservid = 0;
479 use_default_ioreq_server = true;
480 trace_xen_default_ioreq_server();
420927c2
SS
481
482 return rc;
3996e85c
PD
483}
484
260cabed 485static inline void xen_destroy_ioreq_server(domid_t dom,
3996e85c
PD
486 ioservid_t ioservid)
487{
b7665c60
PD
488 if (use_default_ioreq_server) {
489 return;
490 }
491
3996e85c 492 trace_xen_ioreq_server_destroy(ioservid);
d655f34e 493 xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid);
3996e85c
PD
494}
495
260cabed 496static inline int xen_get_ioreq_server_info(domid_t dom,
3996e85c
PD
497 ioservid_t ioservid,
498 xen_pfn_t *ioreq_pfn,
499 xen_pfn_t *bufioreq_pfn,
500 evtchn_port_t *bufioreq_evtchn)
501{
b7665c60 502 if (use_default_ioreq_server) {
260cabed 503 return xen_get_default_ioreq_server_info(dom, ioreq_pfn,
b7665c60
PD
504 bufioreq_pfn,
505 bufioreq_evtchn);
506 }
507
d655f34e
PD
508 return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid,
509 ioreq_pfn, bufioreq_pfn,
510 bufioreq_evtchn);
3996e85c
PD
511}
512
260cabed 513static inline int xen_set_ioreq_server_state(domid_t dom,
3996e85c
PD
514 ioservid_t ioservid,
515 bool enable)
516{
b7665c60
PD
517 if (use_default_ioreq_server) {
518 return 0;
519 }
520
3996e85c 521 trace_xen_ioreq_server_state(ioservid, enable);
d655f34e
PD
522 return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid,
523 enable);
3996e85c
PD
524}
525
01bb72af 526#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41500
0c8ab1cd
OT
527static inline int xendevicemodel_set_irq_level(xendevicemodel_handle *dmod,
528 domid_t domid, uint32_t irq,
529 unsigned int level)
530{
01bb72af 531 return -1;
0c8ab1cd
OT
532}
533#endif
534
4efb13f8 535#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41700
0c8ab1cd
OT
536#define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000)
537#define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000)
538#define GUEST_VIRTIO_MMIO_SPI_FIRST 33
539#define GUEST_VIRTIO_MMIO_SPI_LAST 43
540#endif
541
56014219
OT
542#if defined(__i386__) || defined(__x86_64__)
543#define GUEST_RAM_BANKS 2
544#define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
545#define GUEST_RAM0_SIZE 0xc0000000ULL
546#define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
547#define GUEST_RAM1_SIZE 0xfe00000000ULL
548#endif
549
e2abfe5e 550#endif /* QEMU_HW_XEN_NATIVE_H */