]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
ec3ca566dbde6836da202dd32eeb4901d8242450
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H 1
3
4 #include "config-host.h"
5
6 #include <stddef.h>
7 #include <inttypes.h>
8
9 /*
10 * If we have new enough libxenctrl then we do not want/need these compat
11 * interfaces, despite what the user supplied cflags might say. They
12 * must be undefined before including xenctrl.h
13 */
14 #undef XC_WANT_COMPAT_EVTCHN_API
15 #undef XC_WANT_COMPAT_GNTTAB_API
16 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
17
18 #include <xenctrl.h>
19 #include <xenstore.h>
20 #include <xen/io/xenbus.h>
21
22 #include "hw/hw.h"
23 #include "hw/xen/xen.h"
24 #include "hw/pci/pci.h"
25 #include "qemu/queue.h"
26 #include "trace.h"
27
28 /*
29 * We don't support Xen prior to 4.2.0.
30 */
31
32 /* Xen 4.2 thru 4.6 */
33 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
34
35 typedef xc_interface *XenXC;
36 typedef xc_interface *xenforeignmemory_handle;
37 typedef xc_evtchn xenevtchn_handle;
38 typedef xc_gnttab xengnttab_handle;
39
40 # define XC_INTERFACE_FMT "%p"
41 # define XC_HANDLER_INITIAL_VALUE NULL
42
43 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
44 #define xenevtchn_close(h) xc_evtchn_close(h)
45 #define xenevtchn_fd(h) xc_evtchn_fd(h)
46 #define xenevtchn_pending(h) xc_evtchn_pending(h)
47 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
48 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
49 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
50 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
51
52 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
53 #define xengnttab_close(h) xc_gnttab_close(h)
54 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
55 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
56 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
57 #define xengnttab_map_grant_refs(h, c, d, r, p) \
58 xc_gnttab_map_grant_refs(h, c, d, r, p)
59
60 static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
61 unsigned int open_flags)
62 {
63 return xc_interface_open(logger, dombuild_logger, open_flags);
64 }
65
66 /* See below for xenforeignmemory_* APIs */
67
68 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
69
70 typedef xc_interface *XenXC;
71
72 # define XC_INTERFACE_FMT "%p"
73 # define XC_HANDLER_INITIAL_VALUE NULL
74
75 #include <xenevtchn.h>
76 #include <xengnttab.h>
77 #include <xenforeignmemory.h>
78
79 static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
80 unsigned int open_flags)
81 {
82 return xc_interface_open(logger, dombuild_logger, open_flags);
83 }
84 #endif
85
86 void destroy_hvm_domain(bool reboot);
87
88 /* shutdown/destroy current domain because of an error */
89 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
90
91 #ifdef HVM_PARAM_VMPORT_REGS_PFN
92 static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
93 xen_pfn_t *vmport_regs_pfn)
94 {
95 int rc;
96 uint64_t value;
97 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
98 if (rc >= 0) {
99 *vmport_regs_pfn = (xen_pfn_t) value;
100 }
101 return rc;
102 }
103 #else
104 static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
105 xen_pfn_t *vmport_regs_pfn)
106 {
107 return -ENOSYS;
108 }
109 #endif
110
111 /* Xen before 4.6 */
112 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
113
114 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
115 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
116 #endif
117
118 #endif
119
120 /* Xen before 4.5 */
121 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
122
123 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
124 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
125 #endif
126
127 #define IOREQ_TYPE_PCI_CONFIG 2
128
129 typedef uint16_t ioservid_t;
130
131 static inline void xen_map_memory_section(XenXC xc, domid_t dom,
132 ioservid_t ioservid,
133 MemoryRegionSection *section)
134 {
135 }
136
137 static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
138 ioservid_t ioservid,
139 MemoryRegionSection *section)
140 {
141 }
142
143 static inline void xen_map_io_section(XenXC xc, domid_t dom,
144 ioservid_t ioservid,
145 MemoryRegionSection *section)
146 {
147 }
148
149 static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
150 ioservid_t ioservid,
151 MemoryRegionSection *section)
152 {
153 }
154
155 static inline void xen_map_pcidev(XenXC xc, domid_t dom,
156 ioservid_t ioservid,
157 PCIDevice *pci_dev)
158 {
159 }
160
161 static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
162 ioservid_t ioservid,
163 PCIDevice *pci_dev)
164 {
165 }
166
167 static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
168 ioservid_t *ioservid)
169 {
170 return 0;
171 }
172
173 static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
174 ioservid_t ioservid)
175 {
176 }
177
178 static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
179 ioservid_t ioservid,
180 xen_pfn_t *ioreq_pfn,
181 xen_pfn_t *bufioreq_pfn,
182 evtchn_port_t *bufioreq_evtchn)
183 {
184 unsigned long param;
185 int rc;
186
187 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
188 if (rc < 0) {
189 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
190 return -1;
191 }
192
193 *ioreq_pfn = param;
194
195 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
196 if (rc < 0) {
197 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
198 return -1;
199 }
200
201 *bufioreq_pfn = param;
202
203 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
204 &param);
205 if (rc < 0) {
206 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
207 return -1;
208 }
209
210 *bufioreq_evtchn = param;
211
212 return 0;
213 }
214
215 static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
216 ioservid_t ioservid,
217 bool enable)
218 {
219 return 0;
220 }
221
222 /* Xen 4.5 */
223 #else
224
225 static inline void xen_map_memory_section(XenXC xc, domid_t dom,
226 ioservid_t ioservid,
227 MemoryRegionSection *section)
228 {
229 hwaddr start_addr = section->offset_within_address_space;
230 ram_addr_t size = int128_get64(section->size);
231 hwaddr end_addr = start_addr + size - 1;
232
233 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
234 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
235 start_addr, end_addr);
236 }
237
238 static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
239 ioservid_t ioservid,
240 MemoryRegionSection *section)
241 {
242 hwaddr start_addr = section->offset_within_address_space;
243 ram_addr_t size = int128_get64(section->size);
244 hwaddr end_addr = start_addr + size - 1;
245
246 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
247 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
248 start_addr, end_addr);
249 }
250
251 static inline void xen_map_io_section(XenXC xc, domid_t dom,
252 ioservid_t ioservid,
253 MemoryRegionSection *section)
254 {
255 hwaddr start_addr = section->offset_within_address_space;
256 ram_addr_t size = int128_get64(section->size);
257 hwaddr end_addr = start_addr + size - 1;
258
259 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
260 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
261 start_addr, end_addr);
262 }
263
264 static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
265 ioservid_t ioservid,
266 MemoryRegionSection *section)
267 {
268 hwaddr start_addr = section->offset_within_address_space;
269 ram_addr_t size = int128_get64(section->size);
270 hwaddr end_addr = start_addr + size - 1;
271
272 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
273 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
274 start_addr, end_addr);
275 }
276
277 static inline void xen_map_pcidev(XenXC xc, domid_t dom,
278 ioservid_t ioservid,
279 PCIDevice *pci_dev)
280 {
281 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
282 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
283 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
284 0, pci_bus_num(pci_dev->bus),
285 PCI_SLOT(pci_dev->devfn),
286 PCI_FUNC(pci_dev->devfn));
287 }
288
289 static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
290 ioservid_t ioservid,
291 PCIDevice *pci_dev)
292 {
293 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
294 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
295 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
296 0, pci_bus_num(pci_dev->bus),
297 PCI_SLOT(pci_dev->devfn),
298 PCI_FUNC(pci_dev->devfn));
299 }
300
301 static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
302 ioservid_t *ioservid)
303 {
304 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
305 ioservid);
306
307 if (rc == 0) {
308 trace_xen_ioreq_server_create(*ioservid);
309 }
310
311 return rc;
312 }
313
314 static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
315 ioservid_t ioservid)
316 {
317 trace_xen_ioreq_server_destroy(ioservid);
318 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
319 }
320
321 static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
322 ioservid_t ioservid,
323 xen_pfn_t *ioreq_pfn,
324 xen_pfn_t *bufioreq_pfn,
325 evtchn_port_t *bufioreq_evtchn)
326 {
327 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
328 ioreq_pfn, bufioreq_pfn,
329 bufioreq_evtchn);
330 }
331
332 static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
333 ioservid_t ioservid,
334 bool enable)
335 {
336 trace_xen_ioreq_server_state(ioservid, enable);
337 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
338 }
339
340 #endif
341
342 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
343 static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
344 unsigned int space,
345 unsigned long idx,
346 xen_pfn_t gpfn)
347 {
348 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
349 }
350 #else
351 static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
352 unsigned int space,
353 unsigned long idx,
354 xen_pfn_t gpfn)
355 {
356 /* In Xen 4.6 rc is -1 and errno contains the error value. */
357 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
358 if (rc == -1)
359 return errno;
360 return rc;
361 }
362 #endif
363
364 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
365 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
366 static inline int xen_domain_create(XenXC xc, uint32_t ssidref,
367 xen_domain_handle_t handle, uint32_t flags,
368 uint32_t *pdomid)
369 {
370 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
371 }
372 #else
373 static inline int xen_domain_create(XenXC xc, uint32_t ssidref,
374 xen_domain_handle_t handle, uint32_t flags,
375 uint32_t *pdomid)
376 {
377 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
378 }
379 #endif
380 #endif
381
382 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
383
384 #define xenforeignmemory_open(l, f) &xen_xc
385
386 static inline void *xenforeignmemory_map(XenXC *h, uint32_t dom,
387 int prot, size_t pages,
388 const xen_pfn_t arr[/*pages*/],
389 int err[/*pages*/])
390 {
391 if (err)
392 return xc_map_foreign_bulk(*h, dom, prot, arr, err, pages);
393 else
394 return xc_map_foreign_pages(*h, dom, prot, arr, pages);
395 }
396
397 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
398
399 #endif
400
401 #endif /* QEMU_HW_XEN_COMMON_H */