]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
xen: drop XenXC and associated interface wrappers
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H 1
3
4 #include "config-host.h"
5
6 #include <stddef.h>
7 #include <inttypes.h>
8
9 /*
10 * If we have new enough libxenctrl then we do not want/need these compat
11 * interfaces, despite what the user supplied cflags might say. They
12 * must be undefined before including xenctrl.h
13 */
14 #undef XC_WANT_COMPAT_EVTCHN_API
15 #undef XC_WANT_COMPAT_GNTTAB_API
16 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
17
18 #include <xenctrl.h>
19 #include <xenstore.h>
20 #include <xen/io/xenbus.h>
21
22 #include "hw/hw.h"
23 #include "hw/xen/xen.h"
24 #include "hw/pci/pci.h"
25 #include "qemu/queue.h"
26 #include "trace.h"
27
28 /*
29 * We don't support Xen prior to 4.2.0.
30 */
31
32 /* Xen 4.2 thru 4.6 */
33 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
34
35 typedef xc_interface xenforeignmemory_handle;
36 typedef xc_evtchn xenevtchn_handle;
37 typedef xc_gnttab xengnttab_handle;
38
39 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
40 #define xenevtchn_close(h) xc_evtchn_close(h)
41 #define xenevtchn_fd(h) xc_evtchn_fd(h)
42 #define xenevtchn_pending(h) xc_evtchn_pending(h)
43 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
44 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
45 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
46 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
47
48 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
49 #define xengnttab_close(h) xc_gnttab_close(h)
50 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
51 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
52 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
53 #define xengnttab_map_grant_refs(h, c, d, r, p) \
54 xc_gnttab_map_grant_refs(h, c, d, r, p)
55
56 /* See below for xenforeignmemory_* APIs */
57
58 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
59
60 #include <xenevtchn.h>
61 #include <xengnttab.h>
62 #include <xenforeignmemory.h>
63
64 #endif
65
66 void destroy_hvm_domain(bool reboot);
67
68 /* shutdown/destroy current domain because of an error */
69 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
70
71 #ifdef HVM_PARAM_VMPORT_REGS_PFN
72 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
73 xen_pfn_t *vmport_regs_pfn)
74 {
75 int rc;
76 uint64_t value;
77 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
78 if (rc >= 0) {
79 *vmport_regs_pfn = (xen_pfn_t) value;
80 }
81 return rc;
82 }
83 #else
84 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
85 xen_pfn_t *vmport_regs_pfn)
86 {
87 return -ENOSYS;
88 }
89 #endif
90
91 /* Xen before 4.6 */
92 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
93
94 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
95 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
96 #endif
97
98 #endif
99
100 /* Xen before 4.5 */
101 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
102
103 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
104 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
105 #endif
106
107 #define IOREQ_TYPE_PCI_CONFIG 2
108
109 typedef uint16_t ioservid_t;
110
111 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
112 ioservid_t ioservid,
113 MemoryRegionSection *section)
114 {
115 }
116
117 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
118 ioservid_t ioservid,
119 MemoryRegionSection *section)
120 {
121 }
122
123 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
124 ioservid_t ioservid,
125 MemoryRegionSection *section)
126 {
127 }
128
129 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
130 ioservid_t ioservid,
131 MemoryRegionSection *section)
132 {
133 }
134
135 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
136 ioservid_t ioservid,
137 PCIDevice *pci_dev)
138 {
139 }
140
141 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
142 ioservid_t ioservid,
143 PCIDevice *pci_dev)
144 {
145 }
146
147 static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
148 ioservid_t *ioservid)
149 {
150 return 0;
151 }
152
153 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
154 ioservid_t ioservid)
155 {
156 }
157
158 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
159 ioservid_t ioservid,
160 xen_pfn_t *ioreq_pfn,
161 xen_pfn_t *bufioreq_pfn,
162 evtchn_port_t *bufioreq_evtchn)
163 {
164 unsigned long param;
165 int rc;
166
167 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
168 if (rc < 0) {
169 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
170 return -1;
171 }
172
173 *ioreq_pfn = param;
174
175 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
176 if (rc < 0) {
177 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
178 return -1;
179 }
180
181 *bufioreq_pfn = param;
182
183 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
184 &param);
185 if (rc < 0) {
186 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
187 return -1;
188 }
189
190 *bufioreq_evtchn = param;
191
192 return 0;
193 }
194
195 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
196 ioservid_t ioservid,
197 bool enable)
198 {
199 return 0;
200 }
201
202 /* Xen 4.5 */
203 #else
204
205 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
206 ioservid_t ioservid,
207 MemoryRegionSection *section)
208 {
209 hwaddr start_addr = section->offset_within_address_space;
210 ram_addr_t size = int128_get64(section->size);
211 hwaddr end_addr = start_addr + size - 1;
212
213 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
214 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
215 start_addr, end_addr);
216 }
217
218 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
219 ioservid_t ioservid,
220 MemoryRegionSection *section)
221 {
222 hwaddr start_addr = section->offset_within_address_space;
223 ram_addr_t size = int128_get64(section->size);
224 hwaddr end_addr = start_addr + size - 1;
225
226 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
227 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
228 start_addr, end_addr);
229 }
230
231 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
232 ioservid_t ioservid,
233 MemoryRegionSection *section)
234 {
235 hwaddr start_addr = section->offset_within_address_space;
236 ram_addr_t size = int128_get64(section->size);
237 hwaddr end_addr = start_addr + size - 1;
238
239 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
240 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
241 start_addr, end_addr);
242 }
243
244 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
245 ioservid_t ioservid,
246 MemoryRegionSection *section)
247 {
248 hwaddr start_addr = section->offset_within_address_space;
249 ram_addr_t size = int128_get64(section->size);
250 hwaddr end_addr = start_addr + size - 1;
251
252 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
253 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
254 start_addr, end_addr);
255 }
256
257 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
258 ioservid_t ioservid,
259 PCIDevice *pci_dev)
260 {
261 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
262 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
263 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
264 0, pci_bus_num(pci_dev->bus),
265 PCI_SLOT(pci_dev->devfn),
266 PCI_FUNC(pci_dev->devfn));
267 }
268
269 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
270 ioservid_t ioservid,
271 PCIDevice *pci_dev)
272 {
273 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
274 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
275 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
276 0, pci_bus_num(pci_dev->bus),
277 PCI_SLOT(pci_dev->devfn),
278 PCI_FUNC(pci_dev->devfn));
279 }
280
281 static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
282 ioservid_t *ioservid)
283 {
284 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
285 ioservid);
286
287 if (rc == 0) {
288 trace_xen_ioreq_server_create(*ioservid);
289 }
290
291 return rc;
292 }
293
294 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
295 ioservid_t ioservid)
296 {
297 trace_xen_ioreq_server_destroy(ioservid);
298 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
299 }
300
301 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
302 ioservid_t ioservid,
303 xen_pfn_t *ioreq_pfn,
304 xen_pfn_t *bufioreq_pfn,
305 evtchn_port_t *bufioreq_evtchn)
306 {
307 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
308 ioreq_pfn, bufioreq_pfn,
309 bufioreq_evtchn);
310 }
311
312 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
313 ioservid_t ioservid,
314 bool enable)
315 {
316 trace_xen_ioreq_server_state(ioservid, enable);
317 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
318 }
319
320 #endif
321
322 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
323 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
324 unsigned int space,
325 unsigned long idx,
326 xen_pfn_t gpfn)
327 {
328 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
329 }
330 #else
331 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
332 unsigned int space,
333 unsigned long idx,
334 xen_pfn_t gpfn)
335 {
336 /* In Xen 4.6 rc is -1 and errno contains the error value. */
337 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
338 if (rc == -1)
339 return errno;
340 return rc;
341 }
342 #endif
343
344 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
345 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
346 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
347 xen_domain_handle_t handle, uint32_t flags,
348 uint32_t *pdomid)
349 {
350 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
351 }
352 #else
353 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
354 xen_domain_handle_t handle, uint32_t flags,
355 uint32_t *pdomid)
356 {
357 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
358 }
359 #endif
360 #endif
361
362 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
363
364 #define xenforeignmemory_open(l, f) xen_xc
365
366 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
367 int prot, size_t pages,
368 const xen_pfn_t arr[/*pages*/],
369 int err[/*pages*/])
370 {
371 if (err)
372 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
373 else
374 return xc_map_foreign_pages(h, dom, prot, arr, pages);
375 }
376
377 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
378
379 #endif
380
381 #endif /* QEMU_HW_XEN_COMMON_H */