]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
Merge remote-tracking branch 'remotes/armbru/tags/pull-include-2016-07-12' into staging
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
3
4 /*
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
8 */
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
12
13 #include <xenctrl.h>
14 #include <xenstore.h>
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "trace.h"
22
23 /*
24 * We don't support Xen prior to 4.2.0.
25 */
26
27 /* Xen 4.2 through 4.6 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471
29
30 typedef xc_interface xenforeignmemory_handle;
31 typedef xc_evtchn xenevtchn_handle;
32 typedef xc_gnttab xengnttab_handle;
33
34 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
35 #define xenevtchn_close(h) xc_evtchn_close(h)
36 #define xenevtchn_fd(h) xc_evtchn_fd(h)
37 #define xenevtchn_pending(h) xc_evtchn_pending(h)
38 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
39 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
40 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
41 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
42
43 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
44 #define xengnttab_close(h) xc_gnttab_close(h)
45 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
46 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
47 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
48 #define xengnttab_map_grant_refs(h, c, d, r, p) \
49 xc_gnttab_map_grant_refs(h, c, d, r, p)
50 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
52
53 #define xenforeignmemory_open(l, f) xen_xc
54
55 static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom,
56 int prot, size_t pages,
57 const xen_pfn_t arr[/*pages*/],
58 int err[/*pages*/])
59 {
60 if (err)
61 return xc_map_foreign_bulk(h, dom, prot, arr, err, pages);
62 else
63 return xc_map_foreign_pages(h, dom, prot, arr, pages);
64 }
65
66 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
67
68 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */
69
70 #include <xenevtchn.h>
71 #include <xengnttab.h>
72 #include <xenforeignmemory.h>
73
74 #endif
75
76 void destroy_hvm_domain(bool reboot);
77
78 /* shutdown/destroy current domain because of an error */
79 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
80
81 #ifdef HVM_PARAM_VMPORT_REGS_PFN
82 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
83 xen_pfn_t *vmport_regs_pfn)
84 {
85 int rc;
86 uint64_t value;
87 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
88 if (rc >= 0) {
89 *vmport_regs_pfn = (xen_pfn_t) value;
90 }
91 return rc;
92 }
93 #else
94 static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom,
95 xen_pfn_t *vmport_regs_pfn)
96 {
97 return -ENOSYS;
98 }
99 #endif
100
101 /* Xen before 4.6 */
102 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
103
104 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
105 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
106 #endif
107
108 #endif
109
110 /* Xen before 4.5 */
111 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
112
113 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
114 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
115 #endif
116
117 #define IOREQ_TYPE_PCI_CONFIG 2
118
119 typedef uint16_t ioservid_t;
120
121 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
122 ioservid_t ioservid,
123 MemoryRegionSection *section)
124 {
125 }
126
127 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
128 ioservid_t ioservid,
129 MemoryRegionSection *section)
130 {
131 }
132
133 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
134 ioservid_t ioservid,
135 MemoryRegionSection *section)
136 {
137 }
138
139 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
140 ioservid_t ioservid,
141 MemoryRegionSection *section)
142 {
143 }
144
145 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
146 ioservid_t ioservid,
147 PCIDevice *pci_dev)
148 {
149 }
150
151 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
152 ioservid_t ioservid,
153 PCIDevice *pci_dev)
154 {
155 }
156
157 static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
158 ioservid_t *ioservid)
159 {
160 return 0;
161 }
162
163 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
164 ioservid_t ioservid)
165 {
166 }
167
168 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
169 ioservid_t ioservid,
170 xen_pfn_t *ioreq_pfn,
171 xen_pfn_t *bufioreq_pfn,
172 evtchn_port_t *bufioreq_evtchn)
173 {
174 unsigned long param;
175 int rc;
176
177 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
178 if (rc < 0) {
179 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
180 return -1;
181 }
182
183 *ioreq_pfn = param;
184
185 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
186 if (rc < 0) {
187 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
188 return -1;
189 }
190
191 *bufioreq_pfn = param;
192
193 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
194 &param);
195 if (rc < 0) {
196 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
197 return -1;
198 }
199
200 *bufioreq_evtchn = param;
201
202 return 0;
203 }
204
205 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
206 ioservid_t ioservid,
207 bool enable)
208 {
209 return 0;
210 }
211
212 /* Xen 4.5 */
213 #else
214
215 static inline void xen_map_memory_section(xc_interface *xc, domid_t dom,
216 ioservid_t ioservid,
217 MemoryRegionSection *section)
218 {
219 hwaddr start_addr = section->offset_within_address_space;
220 ram_addr_t size = int128_get64(section->size);
221 hwaddr end_addr = start_addr + size - 1;
222
223 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
224 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
225 start_addr, end_addr);
226 }
227
228 static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom,
229 ioservid_t ioservid,
230 MemoryRegionSection *section)
231 {
232 hwaddr start_addr = section->offset_within_address_space;
233 ram_addr_t size = int128_get64(section->size);
234 hwaddr end_addr = start_addr + size - 1;
235
236 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
237 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
238 start_addr, end_addr);
239 }
240
241 static inline void xen_map_io_section(xc_interface *xc, domid_t dom,
242 ioservid_t ioservid,
243 MemoryRegionSection *section)
244 {
245 hwaddr start_addr = section->offset_within_address_space;
246 ram_addr_t size = int128_get64(section->size);
247 hwaddr end_addr = start_addr + size - 1;
248
249 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
250 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
251 start_addr, end_addr);
252 }
253
254 static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom,
255 ioservid_t ioservid,
256 MemoryRegionSection *section)
257 {
258 hwaddr start_addr = section->offset_within_address_space;
259 ram_addr_t size = int128_get64(section->size);
260 hwaddr end_addr = start_addr + size - 1;
261
262 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
263 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
264 start_addr, end_addr);
265 }
266
267 static inline void xen_map_pcidev(xc_interface *xc, domid_t dom,
268 ioservid_t ioservid,
269 PCIDevice *pci_dev)
270 {
271 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
272 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
273 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
274 0, pci_bus_num(pci_dev->bus),
275 PCI_SLOT(pci_dev->devfn),
276 PCI_FUNC(pci_dev->devfn));
277 }
278
279 static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom,
280 ioservid_t ioservid,
281 PCIDevice *pci_dev)
282 {
283 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
284 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
285 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
286 0, pci_bus_num(pci_dev->bus),
287 PCI_SLOT(pci_dev->devfn),
288 PCI_FUNC(pci_dev->devfn));
289 }
290
291 static inline int xen_create_ioreq_server(xc_interface *xc, domid_t dom,
292 ioservid_t *ioservid)
293 {
294 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
295 ioservid);
296
297 if (rc == 0) {
298 trace_xen_ioreq_server_create(*ioservid);
299 }
300
301 return rc;
302 }
303
304 static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom,
305 ioservid_t ioservid)
306 {
307 trace_xen_ioreq_server_destroy(ioservid);
308 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
309 }
310
311 static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom,
312 ioservid_t ioservid,
313 xen_pfn_t *ioreq_pfn,
314 xen_pfn_t *bufioreq_pfn,
315 evtchn_port_t *bufioreq_evtchn)
316 {
317 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
318 ioreq_pfn, bufioreq_pfn,
319 bufioreq_evtchn);
320 }
321
322 static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom,
323 ioservid_t ioservid,
324 bool enable)
325 {
326 trace_xen_ioreq_server_state(ioservid, enable);
327 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
328 }
329
330 #endif
331
332 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
333 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
334 unsigned int space,
335 unsigned long idx,
336 xen_pfn_t gpfn)
337 {
338 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
339 }
340 #else
341 static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid,
342 unsigned int space,
343 unsigned long idx,
344 xen_pfn_t gpfn)
345 {
346 /* In Xen 4.6 rc is -1 and errno contains the error value. */
347 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
348 if (rc == -1)
349 return errno;
350 return rc;
351 }
352 #endif
353
354 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
355 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470
356 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
357 xen_domain_handle_t handle, uint32_t flags,
358 uint32_t *pdomid)
359 {
360 return xc_domain_create(xc, ssidref, handle, flags, pdomid);
361 }
362 #else
363 static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref,
364 xen_domain_handle_t handle, uint32_t flags,
365 uint32_t *pdomid)
366 {
367 return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL);
368 }
369 #endif
370 #endif
371
372 #endif /* QEMU_HW_XEN_COMMON_H */