]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/xen/xen_common.h
Merge remote-tracking branch 'remotes/kraxel/tags/pull-ipxe-20150903-1' into staging
[mirror_qemu.git] / include / hw / xen / xen_common.h
1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H 1
3
4 #include "config-host.h"
5
6 #include <stddef.h>
7 #include <inttypes.h>
8
9 #include <xenctrl.h>
10 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
11 # include <xs.h>
12 #else
13 # include <xenstore.h>
14 #endif
15 #include <xen/io/xenbus.h>
16
17 #include "hw/hw.h"
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "trace.h"
22
23 /*
24 * We don't support Xen prior to 3.3.0.
25 */
26
27 /* Xen before 4.0 */
28 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 400
29 static inline void *xc_map_foreign_bulk(int xc_handle, uint32_t dom, int prot,
30 xen_pfn_t *arr, int *err,
31 unsigned int num)
32 {
33 return xc_map_foreign_batch(xc_handle, dom, prot, arr, num);
34 }
35 #endif
36
37
38 /* Xen before 4.1 */
39 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 410
40
41 typedef int XenXC;
42 typedef int XenEvtchn;
43 typedef int XenGnttab;
44
45 # define XC_INTERFACE_FMT "%i"
46 # define XC_HANDLER_INITIAL_VALUE -1
47
48 static inline XenEvtchn xen_xc_evtchn_open(void *logger,
49 unsigned int open_flags)
50 {
51 return xc_evtchn_open();
52 }
53
54 static inline XenGnttab xen_xc_gnttab_open(void *logger,
55 unsigned int open_flags)
56 {
57 return xc_gnttab_open();
58 }
59
60 static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
61 unsigned int open_flags)
62 {
63 return xc_interface_open();
64 }
65
66 static inline int xc_fd(int xen_xc)
67 {
68 return xen_xc;
69 }
70
71
72 static inline int xc_domain_populate_physmap_exact
73 (XenXC xc_handle, uint32_t domid, unsigned long nr_extents,
74 unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start)
75 {
76 return xc_domain_memory_populate_physmap
77 (xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start);
78 }
79
80 static inline int xc_domain_add_to_physmap(int xc_handle, uint32_t domid,
81 unsigned int space, unsigned long idx,
82 xen_pfn_t gpfn)
83 {
84 struct xen_add_to_physmap xatp = {
85 .domid = domid,
86 .space = space,
87 .idx = idx,
88 .gpfn = gpfn,
89 };
90
91 return xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
92 }
93
94 static inline struct xs_handle *xs_open(unsigned long flags)
95 {
96 return xs_daemon_open();
97 }
98
99 static inline void xs_close(struct xs_handle *xsh)
100 {
101 if (xsh != NULL) {
102 xs_daemon_close(xsh);
103 }
104 }
105
106
107 /* Xen 4.1 */
108 #else
109
110 typedef xc_interface *XenXC;
111 typedef xc_evtchn *XenEvtchn;
112 typedef xc_gnttab *XenGnttab;
113
114 # define XC_INTERFACE_FMT "%p"
115 # define XC_HANDLER_INITIAL_VALUE NULL
116
117 static inline XenEvtchn xen_xc_evtchn_open(void *logger,
118 unsigned int open_flags)
119 {
120 return xc_evtchn_open(logger, open_flags);
121 }
122
123 static inline XenGnttab xen_xc_gnttab_open(void *logger,
124 unsigned int open_flags)
125 {
126 return xc_gnttab_open(logger, open_flags);
127 }
128
129 static inline XenXC xen_xc_interface_open(void *logger, void *dombuild_logger,
130 unsigned int open_flags)
131 {
132 return xc_interface_open(logger, dombuild_logger, open_flags);
133 }
134
135 /* FIXME There is now way to have the xen fd */
136 static inline int xc_fd(xc_interface *xen_xc)
137 {
138 return -1;
139 }
140 #endif
141
142 /* Xen before 4.2 */
143 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
144 static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
145 uint64_t addr, uint32_t data)
146 {
147 return -ENOSYS;
148 }
149 /* The followings are only to compile op_discard related code on older
150 * Xen releases. */
151 #define BLKIF_OP_DISCARD 5
152 struct blkif_request_discard {
153 uint64_t nr_sectors;
154 uint64_t sector_number;
155 };
156 #else
157 static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
158 uint64_t addr, uint32_t data)
159 {
160 return xc_hvm_inject_msi(xen_xc, dom, addr, data);
161 }
162 #endif
163
164 void destroy_hvm_domain(bool reboot);
165
166 /* shutdown/destroy current domain because of an error */
167 void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
168
169 #ifdef HVM_PARAM_VMPORT_REGS_PFN
170 static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
171 xen_pfn_t *vmport_regs_pfn)
172 {
173 int rc;
174 uint64_t value;
175 rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value);
176 if (rc >= 0) {
177 *vmport_regs_pfn = (xen_pfn_t) value;
178 }
179 return rc;
180 }
181 #else
182 static inline int xen_get_vmport_regs_pfn(XenXC xc, domid_t dom,
183 xen_pfn_t *vmport_regs_pfn)
184 {
185 return -ENOSYS;
186 }
187 #endif
188
189 /* Xen before 4.6 */
190 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
191
192 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
193 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
194 #endif
195
196 #endif
197
198 /* Xen before 4.5 */
199 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450
200
201 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
202 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
203 #endif
204
205 #define IOREQ_TYPE_PCI_CONFIG 2
206
207 typedef uint16_t ioservid_t;
208
209 static inline void xen_map_memory_section(XenXC xc, domid_t dom,
210 ioservid_t ioservid,
211 MemoryRegionSection *section)
212 {
213 }
214
215 static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
216 ioservid_t ioservid,
217 MemoryRegionSection *section)
218 {
219 }
220
221 static inline void xen_map_io_section(XenXC xc, domid_t dom,
222 ioservid_t ioservid,
223 MemoryRegionSection *section)
224 {
225 }
226
227 static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
228 ioservid_t ioservid,
229 MemoryRegionSection *section)
230 {
231 }
232
233 static inline void xen_map_pcidev(XenXC xc, domid_t dom,
234 ioservid_t ioservid,
235 PCIDevice *pci_dev)
236 {
237 }
238
239 static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
240 ioservid_t ioservid,
241 PCIDevice *pci_dev)
242 {
243 }
244
245 static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
246 ioservid_t *ioservid)
247 {
248 return 0;
249 }
250
251 static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
252 ioservid_t ioservid)
253 {
254 }
255
256 static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
257 ioservid_t ioservid,
258 xen_pfn_t *ioreq_pfn,
259 xen_pfn_t *bufioreq_pfn,
260 evtchn_port_t *bufioreq_evtchn)
261 {
262 unsigned long param;
263 int rc;
264
265 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, &param);
266 if (rc < 0) {
267 fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n");
268 return -1;
269 }
270
271 *ioreq_pfn = param;
272
273 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, &param);
274 if (rc < 0) {
275 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
276 return -1;
277 }
278
279 *bufioreq_pfn = param;
280
281 rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN,
282 &param);
283 if (rc < 0) {
284 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
285 return -1;
286 }
287
288 *bufioreq_evtchn = param;
289
290 return 0;
291 }
292
293 static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
294 ioservid_t ioservid,
295 bool enable)
296 {
297 return 0;
298 }
299
300 /* Xen 4.5 */
301 #else
302
303 static inline void xen_map_memory_section(XenXC xc, domid_t dom,
304 ioservid_t ioservid,
305 MemoryRegionSection *section)
306 {
307 hwaddr start_addr = section->offset_within_address_space;
308 ram_addr_t size = int128_get64(section->size);
309 hwaddr end_addr = start_addr + size - 1;
310
311 trace_xen_map_mmio_range(ioservid, start_addr, end_addr);
312 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1,
313 start_addr, end_addr);
314 }
315
316 static inline void xen_unmap_memory_section(XenXC xc, domid_t dom,
317 ioservid_t ioservid,
318 MemoryRegionSection *section)
319 {
320 hwaddr start_addr = section->offset_within_address_space;
321 ram_addr_t size = int128_get64(section->size);
322 hwaddr end_addr = start_addr + size - 1;
323
324 trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr);
325 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1,
326 start_addr, end_addr);
327 }
328
329 static inline void xen_map_io_section(XenXC xc, domid_t dom,
330 ioservid_t ioservid,
331 MemoryRegionSection *section)
332 {
333 hwaddr start_addr = section->offset_within_address_space;
334 ram_addr_t size = int128_get64(section->size);
335 hwaddr end_addr = start_addr + size - 1;
336
337 trace_xen_map_portio_range(ioservid, start_addr, end_addr);
338 xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0,
339 start_addr, end_addr);
340 }
341
342 static inline void xen_unmap_io_section(XenXC xc, domid_t dom,
343 ioservid_t ioservid,
344 MemoryRegionSection *section)
345 {
346 hwaddr start_addr = section->offset_within_address_space;
347 ram_addr_t size = int128_get64(section->size);
348 hwaddr end_addr = start_addr + size - 1;
349
350 trace_xen_unmap_portio_range(ioservid, start_addr, end_addr);
351 xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0,
352 start_addr, end_addr);
353 }
354
355 static inline void xen_map_pcidev(XenXC xc, domid_t dom,
356 ioservid_t ioservid,
357 PCIDevice *pci_dev)
358 {
359 trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus),
360 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
361 xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid,
362 0, pci_bus_num(pci_dev->bus),
363 PCI_SLOT(pci_dev->devfn),
364 PCI_FUNC(pci_dev->devfn));
365 }
366
367 static inline void xen_unmap_pcidev(XenXC xc, domid_t dom,
368 ioservid_t ioservid,
369 PCIDevice *pci_dev)
370 {
371 trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus),
372 PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
373 xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid,
374 0, pci_bus_num(pci_dev->bus),
375 PCI_SLOT(pci_dev->devfn),
376 PCI_FUNC(pci_dev->devfn));
377 }
378
379 static inline int xen_create_ioreq_server(XenXC xc, domid_t dom,
380 ioservid_t *ioservid)
381 {
382 int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC,
383 ioservid);
384
385 if (rc == 0) {
386 trace_xen_ioreq_server_create(*ioservid);
387 }
388
389 return rc;
390 }
391
392 static inline void xen_destroy_ioreq_server(XenXC xc, domid_t dom,
393 ioservid_t ioservid)
394 {
395 trace_xen_ioreq_server_destroy(ioservid);
396 xc_hvm_destroy_ioreq_server(xc, dom, ioservid);
397 }
398
399 static inline int xen_get_ioreq_server_info(XenXC xc, domid_t dom,
400 ioservid_t ioservid,
401 xen_pfn_t *ioreq_pfn,
402 xen_pfn_t *bufioreq_pfn,
403 evtchn_port_t *bufioreq_evtchn)
404 {
405 return xc_hvm_get_ioreq_server_info(xc, dom, ioservid,
406 ioreq_pfn, bufioreq_pfn,
407 bufioreq_evtchn);
408 }
409
410 static inline int xen_set_ioreq_server_state(XenXC xc, domid_t dom,
411 ioservid_t ioservid,
412 bool enable)
413 {
414 trace_xen_ioreq_server_state(ioservid, enable);
415 return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable);
416 }
417
418 #endif
419
420 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460
421 static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
422 unsigned int space,
423 unsigned long idx,
424 xen_pfn_t gpfn)
425 {
426 return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
427 }
428 #else
429 static inline int xen_xc_domain_add_to_physmap(XenXC xch, uint32_t domid,
430 unsigned int space,
431 unsigned long idx,
432 xen_pfn_t gpfn)
433 {
434 /* In Xen 4.6 rc is -1 and errno contains the error value. */
435 int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn);
436 if (rc == -1)
437 return errno;
438 return rc;
439 }
440 #endif
441
442 #endif /* QEMU_HW_XEN_COMMON_H */