]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
qdisk - hw/block/xen_disk: grant copy implementation
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "trace.h"
22
23 /*
24 * We don't support Xen prior to 4.2.0.
25 */
26
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
29
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
33
34 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
35 #define xenevtchn_close(h) xc_evtchn_close(h)
36 #define xenevtchn_fd(h) xc_evtchn_fd(h)
37 #define xenevtchn_pending(h) xc_evtchn_pending(h)
38 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
39 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
40 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
41 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
42
43 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
44 #define xengnttab_close(h) xc_gnttab_close(h)
45 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
46 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
47 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
48 #define xengnttab_map_grant_refs(h, c, d, r, p) \
49 xc_gnttab_map_grant_refs(h, c, d, r, p)
50 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
52
53 #define xenforeignmemory_open(l, f) xen_xc
54
55 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
56 int prot, size_t pages,
57 const xen_pfn_t arr[/*pages*/],
58 int err[/*pages*/])
59 {
60 if (err)
61 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
62 else
63 return xc_map_foreign_pages(h, dom, prot, arr, pages);
64 }
65
66 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
67
68 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
69
70 #include <xenevtchn.h>
71 #include <xengnttab.h>
72 #include <xenforeignmemory.h>
73
74 #endif
75
76 void destroy_hvm_domain(bool reboot);
77
78 /* shutdown/destroy current domain because of an error */
79 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
80
81 #ifdef HVM_PARAM_VMPORT_REGS_PFN
82 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
83 xen_pfn_t *vmport_regs_pfn)
84 {
85 int rc;
86 uint64_t value;
87 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
88 if (rc >= 0) {
89 *vmport_regs_pfn = (xen_pfn_t) value;
90 }
91 return rc;
92 }
93 #else
94 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
95 xen_pfn_t *vmport_regs_pfn)
96 {
97 return -ENOSYS;
98 }
99 #endif
100
101 /* Xen before 4.6 */
102 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
103
104 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
105 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
106 #endif
107
108 #endif
109
110 static inline int xen_get_default_ioreq_server_info(xc_interface *xc,
111 domid_t dom,
112 xen_pfn_t *ioreq_pfn,
113 xen_pfn_t *bufioreq_pfn,
114 evtchn_port_t
115 *bufioreq_evtchn)
116 {
117 unsigned long param;
118 int rc;
119
120 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
121 if (rc < 0) {
122 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
123 return -1;
124 }
125
126 *ioreq_pfn = param;
127
128 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
129 if (rc < 0) {
130 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
131 return -1;
132 }
133
134 *bufioreq_pfn = param;
135
136 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
137 &param);
138 if (rc < 0) {
139 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
140 return -1;
141 }
142
143 *bufioreq_evtchn = param;
144
145 return 0;
146 }
147
148 /* Xen before 4.5 */
149 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
150
151 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
152 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
153 #endif
154
155 #define IOREQ_TYPE_PCI_CONFIG 2
156
157 typedef uint16_t ioservid_t;
158
159 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
160 ioservid_t ioservid,
161 MemoryRegionSection *section)
162 {
163 }
164
165 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
166 ioservid_t ioservid,
167 MemoryRegionSection *section)
168 {
169 }
170
171 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
172 ioservid_t ioservid,
173 MemoryRegionSection *section)
174 {
175 }
176
177 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
178 ioservid_t ioservid,
179 MemoryRegionSection *section)
180 {
181 }
182
183 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
184 ioservid_t ioservid,
185 PCIDevice *pci_dev)
186 {
187 }
188
189 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
190 ioservid_t ioservid,
191 PCIDevice *pci_dev)
192 {
193 }
194
195 static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom,
196 ioservid_t *ioservid)
197 {
198 }
199
200 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
201 ioservid_t ioservid)
202 {
203 }
204
205 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
206 ioservid_t ioservid,
207 xen_pfn_t *ioreq_pfn,
208 xen_pfn_t *bufioreq_pfn,
209 evtchn_port_t *bufioreq_evtchn)
210 {
211 return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, bufioreq_pfn,
212 bufioreq_evtchn);
213 }
214
215 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
216 ioservid_t ioservid,
217 bool enable)
218 {
219 return 0;
220 }
221
222 /* Xen 4.5 */
223 #else
224
225 static bool use_default_ioreq_server;
226
227 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
228 ioservid_t ioservid,
229 MemoryRegionSection *section)
230 {
231 hwaddr start_addr = section->offset_within_address_space;
232 ram_addr_t size = int128_get64(section->size);
233 hwaddr end_addr = start_addr + size - 1;
234
235 if (use_default_ioreq_server) {
236 return;
237 }
238
239 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
240 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
241 start_addr, end_addr);
242 }
243
244 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
245 ioservid_t ioservid,
246 MemoryRegionSection *section)
247 {
248 hwaddr start_addr = section->offset_within_address_space;
249 ram_addr_t size = int128_get64(section->size);
250 hwaddr end_addr = start_addr + size - 1;
251
252 if (use_default_ioreq_server) {
253 return;
254 }
255
256
257 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
258 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
259 start_addr, end_addr);
260 }
261
262 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
263 ioservid_t ioservid,
264 MemoryRegionSection *section)
265 {
266 hwaddr start_addr = section->offset_within_address_space;
267 ram_addr_t size = int128_get64(section->size);
268 hwaddr end_addr = start_addr + size - 1;
269
270 if (use_default_ioreq_server) {
271 return;
272 }
273
274
275 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
276 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
277 start_addr, end_addr);
278 }
279
280 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
281 ioservid_t ioservid,
282 MemoryRegionSection *section)
283 {
284 hwaddr start_addr = section->offset_within_address_space;
285 ram_addr_t size = int128_get64(section->size);
286 hwaddr end_addr = start_addr + size - 1;
287
288 if (use_default_ioreq_server) {
289 return;
290 }
291
292 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
293 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
294 start_addr, end_addr);
295 }
296
297 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
298 ioservid_t ioservid,
299 PCIDevice *pci_dev)
300 {
301 if (use_default_ioreq_server) {
302 return;
303 }
304
305 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
306 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
307 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
308 0, pci_bus_num(pci_dev->bus),
309 PCI_SLOT(pci_dev->devfn),
310 PCI_FUNC(pci_dev->devfn));
311 }
312
313 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
314 ioservid_t ioservid,
315 PCIDevice *pci_dev)
316 {
317 if (use_default_ioreq_server) {
318 return;
319 }
320
321 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
322 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
323 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
324 0, pci_bus_num(pci_dev->bus),
325 PCI_SLOT(pci_dev->devfn),
326 PCI_FUNC(pci_dev->devfn));
327 }
328
329 static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom,
330 ioservid_t *ioservid)
331 {
332 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
333 ioservid);
334
335 if (rc == 0) {
336 trace_xen_ioreq_server_create(*ioservid);
337 return;
338 }
339
340 *ioservid = 0;
341 use_default_ioreq_server = true;
342 trace_xen_default_ioreq_server();
343 }
344
345 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
346 ioservid_t ioservid)
347 {
348 if (use_default_ioreq_server) {
349 return;
350 }
351
352 trace_xen_ioreq_server_destroy(ioservid);
353 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
354 }
355
356 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
357 ioservid_t ioservid,
358 xen_pfn_t *ioreq_pfn,
359 xen_pfn_t *bufioreq_pfn,
360 evtchn_port_t *bufioreq_evtchn)
361 {
362 if (use_default_ioreq_server) {
363 return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn,
364 bufioreq_pfn,
365 bufioreq_evtchn);
366 }
367
368 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
369 ioreq_pfn, bufioreq_pfn,
370 bufioreq_evtchn);
371 }
372
373 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
374 ioservid_t ioservid,
375 bool enable)
376 {
377 if (use_default_ioreq_server) {
378 return 0;
379 }
380
381 trace_xen_ioreq_server_state(ioservid, enable);
382 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
383 }
384
385 #endif
386
387 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
388 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
389 unsigned int space,
390 unsigned long idx,
391 xen_pfn_t gpfn)
392 {
393 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
394 }
395 #else
396 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
397 unsigned int space,
398 unsigned long idx,
399 xen_pfn_t gpfn)
400 {
401 /* In Xen 4.6 rc is -1 and errno contains the error value. */
402 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
403 if (rc == -1)
404 return errno;
405 return rc;
406 }
407 #endif
408
409 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
410 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
411 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
412 xen_domain_handle_t handle, uint32_t flags,
413 uint32_t *pdomid)
414 {
415 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
416 }
417 #else
418 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
419 xen_domain_handle_t handle, uint32_t flags,
420 uint32_t *pdomid)
421 {
422 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
423 }
424 #endif
425 #endif
426
427 /* Xen before 4.8 */
428
429 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480
430
431
432 typedef void *xengnttab_grant_copy_segment_t;
433
434 static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count,
435 xengnttab_grant_copy_segment_t *segs)
436 {
437 return -ENOSYS;
438 }
439 #endif
440
441 #endif /* QEMU_HW_XEN_COMMON_H */