]>
Commit | Line | Data |
---|---|---|
d94f9486 | 1 | #ifndef QEMU_HW_XEN_COMMON_H |
175de524 | 2 | #define QEMU_HW_XEN_COMMON_H |
d94f9486 | 3 | |
5eeb39c2 IC |
4 | /* |
5 | * If we have new enough libxenctrl then we do not want/need these compat | |
6 | * interfaces, despite what the user supplied cflags might say. They | |
7 | * must be undefined before including xenctrl.h | |
8 | */ | |
9 | #undef XC_WANT_COMPAT_EVTCHN_API | |
10 | #undef XC_WANT_COMPAT_GNTTAB_API | |
11 | #undef XC_WANT_COMPAT_MAP_FOREIGN_API | |
12 | ||
d94f9486 | 13 | #include <xenctrl.h> |
edfb07ed | 14 | #include <xenstore.h> |
d94f9486 AL |
15 | #include <xen/io/xenbus.h> |
16 | ||
83c9f4ca | 17 | #include "hw/hw.h" |
0d09e41a | 18 | #include "hw/xen/xen.h" |
3996e85c | 19 | #include "hw/pci/pci.h" |
1de7afc9 | 20 | #include "qemu/queue.h" |
3996e85c | 21 | #include "trace.h" |
d94f9486 AL |
22 | |
23 | /* | |
edfb07ed | 24 | * We don't support Xen prior to 4.2.0. |
d94f9486 | 25 | */ |
d5b93ddf | 26 | |
cb8d4c8f | 27 | /* Xen 4.2 through 4.6 */ |
edfb07ed | 28 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 471 |
d5b93ddf | 29 | |
81daba58 | 30 | typedef xc_interface xenforeignmemory_handle; |
a2db2a1e | 31 | typedef xc_evtchn xenevtchn_handle; |
c1345a88 | 32 | typedef xc_gnttab xengnttab_handle; |
d5b93ddf | 33 | |
a2db2a1e IC |
34 | #define xenevtchn_open(l, f) xc_evtchn_open(l, f); |
35 | #define xenevtchn_close(h) xc_evtchn_close(h) | |
36 | #define xenevtchn_fd(h) xc_evtchn_fd(h) | |
37 | #define xenevtchn_pending(h) xc_evtchn_pending(h) | |
38 | #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) | |
39 | #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) | |
40 | #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) | |
41 | #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) | |
d5b93ddf | 42 | |
c1345a88 IC |
43 | #define xengnttab_open(l, f) xc_gnttab_open(l, f) |
44 | #define xengnttab_close(h) xc_gnttab_close(h) | |
45 | #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) | |
46 | #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) | |
47 | #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) | |
48 | #define xengnttab_map_grant_refs(h, c, d, r, p) \ | |
49 | xc_gnttab_map_grant_refs(h, c, d, r, p) | |
816ac92e JG |
50 | #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \ |
51 | xc_gnttab_map_domain_grant_refs(h, c, d, r, p) | |
d5b93ddf | 52 | |
6aa0205e IC |
53 | #define xenforeignmemory_open(l, f) xen_xc |
54 | ||
55 | static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, | |
56 | int prot, size_t pages, | |
57 | const xen_pfn_t arr[/*pages*/], | |
58 | int err[/*pages*/]) | |
59 | { | |
60 | if (err) | |
61 | return xc_map_foreign_bulk(h, dom, prot, arr, err, pages); | |
62 | else | |
63 | return xc_map_foreign_pages(h, dom, prot, arr, pages); | |
64 | } | |
65 | ||
66 | #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) | |
e0cb42ae | 67 | |
5eeb39c2 IC |
68 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 471 */ |
69 | ||
5eeb39c2 IC |
70 | #include <xenevtchn.h> |
71 | #include <xengnttab.h> | |
72 | #include <xenforeignmemory.h> | |
73 | ||
d94f9486 AL |
74 | #endif |
75 | ||
180640ea | 76 | void destroy_hvm_domain(bool reboot); |
9ce94e7c | 77 | |
eaab4d60 AK |
78 | /* shutdown/destroy current domain because of an error */ |
79 | void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); | |
80 | ||
37f9e258 | 81 | #ifdef HVM_PARAM_VMPORT_REGS_PFN |
81daba58 | 82 | static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, |
d01a5a3f | 83 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 | 84 | { |
d01a5a3f SS |
85 | int rc; |
86 | uint64_t value; | |
87 | rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value); | |
88 | if (rc >= 0) { | |
89 | *vmport_regs_pfn = (xen_pfn_t) value; | |
90 | } | |
91 | return rc; | |
37f9e258 DS |
92 | } |
93 | #else | |
81daba58 | 94 | static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, |
d01a5a3f | 95 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 DS |
96 | { |
97 | return -ENOSYS; | |
98 | } | |
99 | #endif | |
100 | ||
d8b441a3 JB |
101 | /* Xen before 4.6 */ |
102 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 | |
103 | ||
104 | #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC | |
105 | #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 | |
106 | #endif | |
107 | ||
108 | #endif | |
109 | ||
b7665c60 PD |
110 | static inline int xen_get_default_ioreq_server_info(xc_interface *xc, |
111 | domid_t dom, | |
112 | xen_pfn_t *ioreq_pfn, | |
113 | xen_pfn_t *bufioreq_pfn, | |
114 | evtchn_port_t | |
115 | *bufioreq_evtchn) | |
116 | { | |
117 | unsigned long param; | |
118 | int rc; | |
119 | ||
120 | rc = xc_get_hvm_param(xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); | |
121 | if (rc < 0) { | |
122 | fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n"); | |
123 | return -1; | |
124 | } | |
125 | ||
126 | *ioreq_pfn = param; | |
127 | ||
128 | rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); | |
129 | if (rc < 0) { | |
130 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n"); | |
131 | return -1; | |
132 | } | |
133 | ||
134 | *bufioreq_pfn = param; | |
135 | ||
136 | rc = xc_get_hvm_param(xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, | |
137 | ¶m); | |
138 | if (rc < 0) { | |
139 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); | |
140 | return -1; | |
141 | } | |
142 | ||
143 | *bufioreq_evtchn = param; | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
3996e85c PD |
148 | /* Xen before 4.5 */ |
149 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 450 | |
150 | ||
151 | #ifndef HVM_PARAM_BUFIOREQ_EVTCHN | |
152 | #define HVM_PARAM_BUFIOREQ_EVTCHN 26 | |
153 | #endif | |
154 | ||
155 | #define IOREQ_TYPE_PCI_CONFIG 2 | |
156 | ||
d09952ee | 157 | typedef uint16_t ioservid_t; |
3996e85c | 158 | |
81daba58 | 159 | static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
160 | ioservid_t ioservid, |
161 | MemoryRegionSection *section) | |
162 | { | |
163 | } | |
164 | ||
81daba58 | 165 | static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
166 | ioservid_t ioservid, |
167 | MemoryRegionSection *section) | |
168 | { | |
169 | } | |
170 | ||
81daba58 | 171 | static inline void xen_map_io_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
172 | ioservid_t ioservid, |
173 | MemoryRegionSection *section) | |
174 | { | |
175 | } | |
176 | ||
81daba58 | 177 | static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
178 | ioservid_t ioservid, |
179 | MemoryRegionSection *section) | |
180 | { | |
181 | } | |
182 | ||
81daba58 | 183 | static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, |
3996e85c PD |
184 | ioservid_t ioservid, |
185 | PCIDevice *pci_dev) | |
186 | { | |
187 | } | |
188 | ||
81daba58 | 189 | static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, |
3996e85c PD |
190 | ioservid_t ioservid, |
191 | PCIDevice *pci_dev) | |
192 | { | |
193 | } | |
194 | ||
b7665c60 PD |
195 | static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, |
196 | ioservid_t *ioservid) | |
3996e85c | 197 | { |
3996e85c PD |
198 | } |
199 | ||
81daba58 | 200 | static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, |
3996e85c PD |
201 | ioservid_t ioservid) |
202 | { | |
203 | } | |
204 | ||
81daba58 | 205 | static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom, |
3996e85c PD |
206 | ioservid_t ioservid, |
207 | xen_pfn_t *ioreq_pfn, | |
208 | xen_pfn_t *bufioreq_pfn, | |
209 | evtchn_port_t *bufioreq_evtchn) | |
210 | { | |
b7665c60 PD |
211 | return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, bufioreq_pfn, |
212 | bufioreq_evtchn); | |
3996e85c PD |
213 | } |
214 | ||
81daba58 | 215 | static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, |
3996e85c PD |
216 | ioservid_t ioservid, |
217 | bool enable) | |
218 | { | |
219 | return 0; | |
220 | } | |
221 | ||
222 | /* Xen 4.5 */ | |
223 | #else | |
224 | ||
b7665c60 PD |
225 | static bool use_default_ioreq_server; |
226 | ||
81daba58 | 227 | static inline void xen_map_memory_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
228 | ioservid_t ioservid, |
229 | MemoryRegionSection *section) | |
230 | { | |
231 | hwaddr start_addr = section->offset_within_address_space; | |
232 | ram_addr_t size = int128_get64(section->size); | |
233 | hwaddr end_addr = start_addr + size - 1; | |
234 | ||
b7665c60 PD |
235 | if (use_default_ioreq_server) { |
236 | return; | |
237 | } | |
238 | ||
3996e85c PD |
239 | trace_xen_map_mmio_range(ioservid, start_addr, end_addr); |
240 | xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 1, | |
241 | start_addr, end_addr); | |
242 | } | |
243 | ||
81daba58 | 244 | static inline void xen_unmap_memory_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
245 | ioservid_t ioservid, |
246 | MemoryRegionSection *section) | |
247 | { | |
248 | hwaddr start_addr = section->offset_within_address_space; | |
249 | ram_addr_t size = int128_get64(section->size); | |
250 | hwaddr end_addr = start_addr + size - 1; | |
251 | ||
b7665c60 PD |
252 | if (use_default_ioreq_server) { |
253 | return; | |
254 | } | |
255 | ||
256 | ||
3996e85c PD |
257 | trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr); |
258 | xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 1, | |
259 | start_addr, end_addr); | |
260 | } | |
261 | ||
81daba58 | 262 | static inline void xen_map_io_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
263 | ioservid_t ioservid, |
264 | MemoryRegionSection *section) | |
265 | { | |
266 | hwaddr start_addr = section->offset_within_address_space; | |
267 | ram_addr_t size = int128_get64(section->size); | |
268 | hwaddr end_addr = start_addr + size - 1; | |
269 | ||
b7665c60 PD |
270 | if (use_default_ioreq_server) { |
271 | return; | |
272 | } | |
273 | ||
274 | ||
3996e85c PD |
275 | trace_xen_map_portio_range(ioservid, start_addr, end_addr); |
276 | xc_hvm_map_io_range_to_ioreq_server(xc, dom, ioservid, 0, | |
277 | start_addr, end_addr); | |
278 | } | |
279 | ||
81daba58 | 280 | static inline void xen_unmap_io_section(xc_interface *xc, domid_t dom, |
3996e85c PD |
281 | ioservid_t ioservid, |
282 | MemoryRegionSection *section) | |
283 | { | |
284 | hwaddr start_addr = section->offset_within_address_space; | |
285 | ram_addr_t size = int128_get64(section->size); | |
286 | hwaddr end_addr = start_addr + size - 1; | |
287 | ||
b7665c60 PD |
288 | if (use_default_ioreq_server) { |
289 | return; | |
290 | } | |
291 | ||
3996e85c PD |
292 | trace_xen_unmap_portio_range(ioservid, start_addr, end_addr); |
293 | xc_hvm_unmap_io_range_from_ioreq_server(xc, dom, ioservid, 0, | |
294 | start_addr, end_addr); | |
295 | } | |
296 | ||
81daba58 | 297 | static inline void xen_map_pcidev(xc_interface *xc, domid_t dom, |
3996e85c PD |
298 | ioservid_t ioservid, |
299 | PCIDevice *pci_dev) | |
300 | { | |
b7665c60 PD |
301 | if (use_default_ioreq_server) { |
302 | return; | |
303 | } | |
304 | ||
3996e85c PD |
305 | trace_xen_map_pcidev(ioservid, pci_bus_num(pci_dev->bus), |
306 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); | |
307 | xc_hvm_map_pcidev_to_ioreq_server(xc, dom, ioservid, | |
308 | 0, pci_bus_num(pci_dev->bus), | |
309 | PCI_SLOT(pci_dev->devfn), | |
310 | PCI_FUNC(pci_dev->devfn)); | |
311 | } | |
312 | ||
81daba58 | 313 | static inline void xen_unmap_pcidev(xc_interface *xc, domid_t dom, |
3996e85c PD |
314 | ioservid_t ioservid, |
315 | PCIDevice *pci_dev) | |
316 | { | |
b7665c60 PD |
317 | if (use_default_ioreq_server) { |
318 | return; | |
319 | } | |
320 | ||
3996e85c PD |
321 | trace_xen_unmap_pcidev(ioservid, pci_bus_num(pci_dev->bus), |
322 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); | |
323 | xc_hvm_unmap_pcidev_from_ioreq_server(xc, dom, ioservid, | |
324 | 0, pci_bus_num(pci_dev->bus), | |
325 | PCI_SLOT(pci_dev->devfn), | |
326 | PCI_FUNC(pci_dev->devfn)); | |
327 | } | |
328 | ||
b7665c60 PD |
329 | static inline void xen_create_ioreq_server(xc_interface *xc, domid_t dom, |
330 | ioservid_t *ioservid) | |
3996e85c | 331 | { |
d8b441a3 JB |
332 | int rc = xc_hvm_create_ioreq_server(xc, dom, HVM_IOREQSRV_BUFIOREQ_ATOMIC, |
333 | ioservid); | |
3996e85c PD |
334 | |
335 | if (rc == 0) { | |
336 | trace_xen_ioreq_server_create(*ioservid); | |
b7665c60 | 337 | return; |
3996e85c PD |
338 | } |
339 | ||
b7665c60 PD |
340 | *ioservid = 0; |
341 | use_default_ioreq_server = true; | |
342 | trace_xen_default_ioreq_server(); | |
3996e85c PD |
343 | } |
344 | ||
81daba58 | 345 | static inline void xen_destroy_ioreq_server(xc_interface *xc, domid_t dom, |
3996e85c PD |
346 | ioservid_t ioservid) |
347 | { | |
b7665c60 PD |
348 | if (use_default_ioreq_server) { |
349 | return; | |
350 | } | |
351 | ||
3996e85c PD |
352 | trace_xen_ioreq_server_destroy(ioservid); |
353 | xc_hvm_destroy_ioreq_server(xc, dom, ioservid); | |
354 | } | |
355 | ||
81daba58 | 356 | static inline int xen_get_ioreq_server_info(xc_interface *xc, domid_t dom, |
3996e85c PD |
357 | ioservid_t ioservid, |
358 | xen_pfn_t *ioreq_pfn, | |
359 | xen_pfn_t *bufioreq_pfn, | |
360 | evtchn_port_t *bufioreq_evtchn) | |
361 | { | |
b7665c60 PD |
362 | if (use_default_ioreq_server) { |
363 | return xen_get_default_ioreq_server_info(xc, dom, ioreq_pfn, | |
364 | bufioreq_pfn, | |
365 | bufioreq_evtchn); | |
366 | } | |
367 | ||
3996e85c PD |
368 | return xc_hvm_get_ioreq_server_info(xc, dom, ioservid, |
369 | ioreq_pfn, bufioreq_pfn, | |
370 | bufioreq_evtchn); | |
371 | } | |
372 | ||
81daba58 | 373 | static inline int xen_set_ioreq_server_state(xc_interface *xc, domid_t dom, |
3996e85c PD |
374 | ioservid_t ioservid, |
375 | bool enable) | |
376 | { | |
b7665c60 PD |
377 | if (use_default_ioreq_server) { |
378 | return 0; | |
379 | } | |
380 | ||
3996e85c PD |
381 | trace_xen_ioreq_server_state(ioservid, enable); |
382 | return xc_hvm_set_ioreq_server_state(xc, dom, ioservid, enable); | |
383 | } | |
384 | ||
385 | #endif | |
386 | ||
20a544c7 | 387 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 460 |
81daba58 | 388 | static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid, |
20a544c7 KRW |
389 | unsigned int space, |
390 | unsigned long idx, | |
391 | xen_pfn_t gpfn) | |
392 | { | |
393 | return xc_domain_add_to_physmap(xch, domid, space, idx, gpfn); | |
394 | } | |
395 | #else | |
81daba58 | 396 | static inline int xen_xc_domain_add_to_physmap(xc_interface *xch, uint32_t domid, |
20a544c7 KRW |
397 | unsigned int space, |
398 | unsigned long idx, | |
399 | xen_pfn_t gpfn) | |
400 | { | |
401 | /* In Xen 4.6 rc is -1 and errno contains the error value. */ | |
402 | int rc = xc_domain_add_to_physmap(xch, domid, space, idx, gpfn); | |
403 | if (rc == -1) | |
404 | return errno; | |
405 | return rc; | |
406 | } | |
407 | #endif | |
408 | ||
64a7ad6f | 409 | #ifdef CONFIG_XEN_PV_DOMAIN_BUILD |
cdadde39 | 410 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 470 |
81daba58 | 411 | static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, |
cdadde39 RPM |
412 | xen_domain_handle_t handle, uint32_t flags, |
413 | uint32_t *pdomid) | |
414 | { | |
415 | return xc_domain_create(xc, ssidref, handle, flags, pdomid); | |
416 | } | |
417 | #else | |
81daba58 | 418 | static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, |
cdadde39 RPM |
419 | xen_domain_handle_t handle, uint32_t flags, |
420 | uint32_t *pdomid) | |
421 | { | |
422 | return xc_domain_create(xc, ssidref, handle, flags, pdomid, NULL); | |
423 | } | |
424 | #endif | |
64a7ad6f | 425 | #endif |
cdadde39 | 426 | |
b6eb9b45 PS |
427 | /* Xen before 4.8 */ |
428 | ||
429 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480 | |
430 | ||
431 | ||
432 | typedef void *xengnttab_grant_copy_segment_t; | |
433 | ||
434 | static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count, | |
435 | xengnttab_grant_copy_segment_t *segs) | |
436 | { | |
437 | return -ENOSYS; | |
438 | } | |
439 | #endif | |
440 | ||
d94f9486 | 441 | #endif /* QEMU_HW_XEN_COMMON_H */ |