]>
Commit | Line | Data |
---|---|---|
d94f9486 | 1 | #ifndef QEMU_HW_XEN_COMMON_H |
175de524 | 2 | #define QEMU_HW_XEN_COMMON_H |
d94f9486 | 3 | |
5eeb39c2 IC |
4 | /* |
5 | * If we have new enough libxenctrl then we do not want/need these compat | |
6 | * interfaces, despite what the user supplied cflags might say. They | |
7 | * must be undefined before including xenctrl.h | |
8 | */ | |
9 | #undef XC_WANT_COMPAT_EVTCHN_API | |
10 | #undef XC_WANT_COMPAT_GNTTAB_API | |
11 | #undef XC_WANT_COMPAT_MAP_FOREIGN_API | |
12 | ||
d94f9486 | 13 | #include <xenctrl.h> |
edfb07ed | 14 | #include <xenstore.h> |
a3434a2d | 15 | #include "hw/xen/interface/io/xenbus.h" |
d94f9486 | 16 | |
0d09e41a | 17 | #include "hw/xen/xen.h" |
3996e85c | 18 | #include "hw/pci/pci.h" |
0ab8ed18 | 19 | #include "hw/xen/trace.h" |
d94f9486 | 20 | |
260cabed PD |
21 | extern xc_interface *xen_xc; |
22 | ||
d94f9486 | 23 | /* |
edfb07ed | 24 | * We don't support Xen prior to 4.2.0. |
d94f9486 | 25 | */ |
d5b93ddf | 26 | |
14d015b6 PD |
27 | /* Xen 4.2 through 4.6 */ |
28 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701 | |
29 | ||
30 | typedef xc_interface xenforeignmemory_handle; | |
31 | typedef xc_evtchn xenevtchn_handle; | |
32 | typedef xc_gnttab xengnttab_handle; | |
a3d669c8 | 33 | typedef evtchn_port_or_error_t xenevtchn_port_or_error_t; |
14d015b6 PD |
34 | |
35 | #define xenevtchn_open(l, f) xc_evtchn_open(l, f); | |
36 | #define xenevtchn_close(h) xc_evtchn_close(h) | |
37 | #define xenevtchn_fd(h) xc_evtchn_fd(h) | |
38 | #define xenevtchn_pending(h) xc_evtchn_pending(h) | |
39 | #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p) | |
40 | #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p) | |
41 | #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p) | |
42 | #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p) | |
43 | ||
44 | #define xengnttab_open(l, f) xc_gnttab_open(l, f) | |
45 | #define xengnttab_close(h) xc_gnttab_close(h) | |
46 | #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n) | |
47 | #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p) | |
48 | #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n) | |
49 | #define xengnttab_map_grant_refs(h, c, d, r, p) \ | |
50 | xc_gnttab_map_grant_refs(h, c, d, r, p) | |
51 | #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \ | |
52 | xc_gnttab_map_domain_grant_refs(h, c, d, r, p) | |
53 | ||
54 | #define xenforeignmemory_open(l, f) xen_xc | |
55 | #define xenforeignmemory_close(h) | |
56 | ||
57 | static inline void *xenforeignmemory_map(xc_interface *h, uint32_t dom, | |
58 | int prot, size_t pages, | |
59 | const xen_pfn_t arr[/*pages*/], | |
60 | int err[/*pages*/]) | |
61 | { | |
62 | if (err) | |
63 | return xc_map_foreign_bulk(h, dom, prot, arr, err, pages); | |
64 | else | |
65 | return xc_map_foreign_pages(h, dom, prot, arr, pages); | |
66 | } | |
67 | ||
68 | #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE) | |
69 | ||
70 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */ | |
71 | ||
72 | #include <xenevtchn.h> | |
73 | #include <xengnttab.h> | |
74 | #include <xenforeignmemory.h> | |
75 | ||
76 | #endif | |
77 | ||
78 | extern xenforeignmemory_handle *xen_fmem; | |
79 | ||
85f3c64d IJ |
80 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 |
81 | ||
82 | typedef xc_interface xendevicemodel_handle; | |
83 | ||
84 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */ | |
85 | ||
86 | #undef XC_WANT_COMPAT_DEVICEMODEL_API | |
87 | #include <xendevicemodel.h> | |
88 | ||
89 | #endif | |
90 | ||
2cbf8903 RL |
91 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 |
92 | ||
93 | static inline int xendevicemodel_relocate_memory( | |
94 | xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn, | |
95 | uint64_t dst_gfn) | |
96 | { | |
97 | uint32_t i; | |
98 | int rc; | |
99 | ||
100 | for (i = 0; i < size; i++) { | |
101 | unsigned long idx = src_gfn + i; | |
102 | xen_pfn_t gpfn = dst_gfn + i; | |
103 | ||
104 | rc = xc_domain_add_to_physmap(xen_xc, domid, XENMAPSPACE_gmfn, idx, | |
105 | gpfn); | |
106 | if (rc) { | |
107 | return rc; | |
108 | } | |
109 | } | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
114 | static inline int xendevicemodel_pin_memory_cacheattr( | |
115 | xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end, | |
116 | uint32_t type) | |
117 | { | |
118 | return xc_domain_pin_memory_cacheattr(xen_xc, domid, start, end, type); | |
119 | } | |
120 | ||
d3c49ebb PD |
121 | typedef void xenforeignmemory_resource_handle; |
122 | ||
123 | #define XENMEM_resource_ioreq_server 0 | |
124 | ||
125 | #define XENMEM_resource_ioreq_server_frame_bufioreq 0 | |
126 | #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n)) | |
127 | ||
128 | static inline xenforeignmemory_resource_handle *xenforeignmemory_map_resource( | |
129 | xenforeignmemory_handle *fmem, domid_t domid, unsigned int type, | |
130 | unsigned int id, unsigned long frame, unsigned long nr_frames, | |
131 | void **paddr, int prot, int flags) | |
132 | { | |
133 | errno = EOPNOTSUPP; | |
134 | return NULL; | |
135 | } | |
136 | ||
2cbf8903 RL |
137 | #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */ |
138 | ||
5ba3d756 ID |
139 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000 |
140 | ||
331b5189 | 141 | #define XEN_COMPAT_PHYSMAP |
5ba3d756 ID |
142 | static inline void *xenforeignmemory_map2(xenforeignmemory_handle *h, |
143 | uint32_t dom, void *addr, | |
144 | int prot, int flags, size_t pages, | |
145 | const xen_pfn_t arr[/*pages*/], | |
146 | int err[/*pages*/]) | |
147 | { | |
148 | assert(addr == NULL && flags == 0); | |
149 | return xenforeignmemory_map(h, dom, prot, pages, arr, err); | |
150 | } | |
151 | ||
0ef4d87d IJ |
152 | static inline int xentoolcore_restrict_all(domid_t domid) |
153 | { | |
154 | errno = ENOTTY; | |
155 | return -1; | |
156 | } | |
157 | ||
6b47c2aa IJ |
158 | static inline int xendevicemodel_shutdown(xendevicemodel_handle *dmod, |
159 | domid_t domid, unsigned int reason) | |
160 | { | |
161 | errno = ENOTTY; | |
162 | return -1; | |
163 | } | |
164 | ||
0ef4d87d IJ |
165 | #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */ |
166 | ||
167 | #include <xentoolcore.h> | |
168 | ||
5ba3d756 ID |
169 | #endif |
170 | ||
f1167ee6 | 171 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900 |
d655f34e | 172 | |
d655f34e PD |
173 | static inline xendevicemodel_handle *xendevicemodel_open( |
174 | struct xentoollog_logger *logger, unsigned int open_flags) | |
175 | { | |
176 | return xen_xc; | |
177 | } | |
178 | ||
f1167ee6 | 179 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 |
d655f34e PD |
180 | |
181 | static inline int xendevicemodel_create_ioreq_server( | |
182 | xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq, | |
183 | ioservid_t *id) | |
184 | { | |
185 | return xc_hvm_create_ioreq_server(dmod, domid, handle_bufioreq, | |
186 | id); | |
187 | } | |
188 | ||
189 | static inline int xendevicemodel_get_ioreq_server_info( | |
190 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, | |
191 | xen_pfn_t *ioreq_pfn, xen_pfn_t *bufioreq_pfn, | |
192 | evtchn_port_t *bufioreq_port) | |
193 | { | |
194 | return xc_hvm_get_ioreq_server_info(dmod, domid, id, ioreq_pfn, | |
195 | bufioreq_pfn, bufioreq_port); | |
196 | } | |
197 | ||
198 | static inline int xendevicemodel_map_io_range_to_ioreq_server( | |
199 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, | |
200 | uint64_t start, uint64_t end) | |
201 | { | |
202 | return xc_hvm_map_io_range_to_ioreq_server(dmod, domid, id, is_mmio, | |
203 | start, end); | |
204 | } | |
205 | ||
206 | static inline int xendevicemodel_unmap_io_range_from_ioreq_server( | |
207 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio, | |
208 | uint64_t start, uint64_t end) | |
209 | { | |
210 | return xc_hvm_unmap_io_range_from_ioreq_server(dmod, domid, id, is_mmio, | |
211 | start, end); | |
212 | } | |
213 | ||
214 | static inline int xendevicemodel_map_pcidev_to_ioreq_server( | |
215 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, | |
216 | uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) | |
217 | { | |
218 | return xc_hvm_map_pcidev_to_ioreq_server(dmod, domid, id, segment, | |
219 | bus, device, function); | |
220 | } | |
221 | ||
222 | static inline int xendevicemodel_unmap_pcidev_from_ioreq_server( | |
223 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, | |
224 | uint16_t segment, uint8_t bus, uint8_t device, uint8_t function) | |
225 | { | |
226 | return xc_hvm_unmap_pcidev_from_ioreq_server(dmod, domid, id, segment, | |
227 | bus, device, function); | |
228 | } | |
229 | ||
230 | static inline int xendevicemodel_destroy_ioreq_server( | |
231 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id) | |
232 | { | |
233 | return xc_hvm_destroy_ioreq_server(dmod, domid, id); | |
234 | } | |
235 | ||
236 | static inline int xendevicemodel_set_ioreq_server_state( | |
237 | xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled) | |
238 | { | |
239 | return xc_hvm_set_ioreq_server_state(dmod, domid, id, enabled); | |
240 | } | |
241 | ||
f1167ee6 | 242 | #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */ |
d655f34e PD |
243 | |
244 | static inline int xendevicemodel_set_pci_intx_level( | |
245 | xendevicemodel_handle *dmod, domid_t domid, uint16_t segment, | |
246 | uint8_t bus, uint8_t device, uint8_t intx, unsigned int level) | |
247 | { | |
248 | return xc_hvm_set_pci_intx_level(dmod, domid, segment, bus, device, | |
249 | intx, level); | |
250 | } | |
251 | ||
252 | static inline int xendevicemodel_set_isa_irq_level( | |
253 | xendevicemodel_handle *dmod, domid_t domid, uint8_t irq, | |
254 | unsigned int level) | |
255 | { | |
256 | return xc_hvm_set_isa_irq_level(dmod, domid, irq, level); | |
257 | } | |
258 | ||
259 | static inline int xendevicemodel_set_pci_link_route( | |
260 | xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq) | |
261 | { | |
262 | return xc_hvm_set_pci_link_route(dmod, domid, link, irq); | |
263 | } | |
264 | ||
265 | static inline int xendevicemodel_inject_msi( | |
266 | xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr, | |
267 | uint32_t msi_data) | |
268 | { | |
269 | return xc_hvm_inject_msi(dmod, domid, msi_addr, msi_data); | |
270 | } | |
271 | ||
272 | static inline int xendevicemodel_track_dirty_vram( | |
273 | xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, | |
274 | uint32_t nr, unsigned long *dirty_bitmap) | |
275 | { | |
276 | return xc_hvm_track_dirty_vram(dmod, domid, first_pfn, nr, | |
277 | dirty_bitmap); | |
278 | } | |
279 | ||
280 | static inline int xendevicemodel_modified_memory( | |
281 | xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn, | |
282 | uint32_t nr) | |
283 | { | |
284 | return xc_hvm_modified_memory(dmod, domid, first_pfn, nr); | |
285 | } | |
286 | ||
287 | static inline int xendevicemodel_set_mem_type( | |
288 | xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type, | |
289 | uint64_t first_pfn, uint32_t nr) | |
290 | { | |
291 | return xc_hvm_set_mem_type(dmod, domid, mem_type, first_pfn, nr); | |
292 | } | |
293 | ||
d655f34e PD |
294 | #endif |
295 | ||
296 | extern xendevicemodel_handle *xen_dmod; | |
297 | ||
8f25e754 PD |
298 | static inline int xen_set_mem_type(domid_t domid, hvmmem_type_t type, |
299 | uint64_t first_pfn, uint32_t nr) | |
300 | { | |
d655f34e PD |
301 | return xendevicemodel_set_mem_type(xen_dmod, domid, type, first_pfn, |
302 | nr); | |
8f25e754 PD |
303 | } |
304 | ||
305 | static inline int xen_set_pci_intx_level(domid_t domid, uint16_t segment, | |
306 | uint8_t bus, uint8_t device, | |
307 | uint8_t intx, unsigned int level) | |
308 | { | |
d655f34e PD |
309 | return xendevicemodel_set_pci_intx_level(xen_dmod, domid, segment, bus, |
310 | device, intx, level); | |
8f25e754 PD |
311 | } |
312 | ||
313 | static inline int xen_set_pci_link_route(domid_t domid, uint8_t link, | |
314 | uint8_t irq) | |
315 | { | |
d655f34e | 316 | return xendevicemodel_set_pci_link_route(xen_dmod, domid, link, irq); |
8f25e754 PD |
317 | } |
318 | ||
319 | static inline int xen_inject_msi(domid_t domid, uint64_t msi_addr, | |
320 | uint32_t msi_data) | |
321 | { | |
d655f34e | 322 | return xendevicemodel_inject_msi(xen_dmod, domid, msi_addr, msi_data); |
8f25e754 PD |
323 | } |
324 | ||
325 | static inline int xen_set_isa_irq_level(domid_t domid, uint8_t irq, | |
326 | unsigned int level) | |
327 | { | |
d655f34e | 328 | return xendevicemodel_set_isa_irq_level(xen_dmod, domid, irq, level); |
8f25e754 PD |
329 | } |
330 | ||
331 | static inline int xen_track_dirty_vram(domid_t domid, uint64_t first_pfn, | |
332 | uint32_t nr, unsigned long *bitmap) | |
333 | { | |
d655f34e PD |
334 | return xendevicemodel_track_dirty_vram(xen_dmod, domid, first_pfn, nr, |
335 | bitmap); | |
8f25e754 PD |
336 | } |
337 | ||
338 | static inline int xen_modified_memory(domid_t domid, uint64_t first_pfn, | |
339 | uint32_t nr) | |
340 | { | |
d655f34e | 341 | return xendevicemodel_modified_memory(xen_dmod, domid, first_pfn, nr); |
8f25e754 PD |
342 | } |
343 | ||
1c599472 PD |
344 | static inline int xen_restrict(domid_t domid) |
345 | { | |
14d015b6 | 346 | int rc; |
0ef4d87d | 347 | rc = xentoolcore_restrict_all(domid); |
14d015b6 | 348 | trace_xen_domid_restrict(rc ? errno : 0); |
14d015b6 | 349 | return rc; |
6aa0205e IC |
350 | } |
351 | ||
180640ea | 352 | void destroy_hvm_domain(bool reboot); |
9ce94e7c | 353 | |
eaab4d60 AK |
354 | /* shutdown/destroy current domain because of an error */ |
355 | void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2); | |
356 | ||
37f9e258 | 357 | #ifdef HVM_PARAM_VMPORT_REGS_PFN |
81daba58 | 358 | static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, |
d01a5a3f | 359 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 | 360 | { |
d01a5a3f SS |
361 | int rc; |
362 | uint64_t value; | |
363 | rc = xc_hvm_param_get(xc, dom, HVM_PARAM_VMPORT_REGS_PFN, &value); | |
364 | if (rc >= 0) { | |
365 | *vmport_regs_pfn = (xen_pfn_t) value; | |
366 | } | |
367 | return rc; | |
37f9e258 DS |
368 | } |
369 | #else | |
81daba58 | 370 | static inline int xen_get_vmport_regs_pfn(xc_interface *xc, domid_t dom, |
d01a5a3f | 371 | xen_pfn_t *vmport_regs_pfn) |
37f9e258 DS |
372 | { |
373 | return -ENOSYS; | |
374 | } | |
375 | #endif | |
376 | ||
d8b441a3 | 377 | /* Xen before 4.6 */ |
f1167ee6 | 378 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600 |
d8b441a3 JB |
379 | |
380 | #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC | |
381 | #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2 | |
382 | #endif | |
383 | ||
384 | #endif | |
385 | ||
260cabed | 386 | static inline int xen_get_default_ioreq_server_info(domid_t dom, |
b7665c60 PD |
387 | xen_pfn_t *ioreq_pfn, |
388 | xen_pfn_t *bufioreq_pfn, | |
389 | evtchn_port_t | |
390 | *bufioreq_evtchn) | |
391 | { | |
392 | unsigned long param; | |
393 | int rc; | |
394 | ||
260cabed | 395 | rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_IOREQ_PFN, ¶m); |
b7665c60 PD |
396 | if (rc < 0) { |
397 | fprintf(stderr, "failed to get HVM_PARAM_IOREQ_PFN\n"); | |
398 | return -1; | |
399 | } | |
400 | ||
401 | *ioreq_pfn = param; | |
402 | ||
260cabed | 403 | rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_PFN, ¶m); |
b7665c60 PD |
404 | if (rc < 0) { |
405 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_PFN\n"); | |
406 | return -1; | |
407 | } | |
408 | ||
409 | *bufioreq_pfn = param; | |
410 | ||
260cabed | 411 | rc = xc_get_hvm_param(xen_xc, dom, HVM_PARAM_BUFIOREQ_EVTCHN, |
b7665c60 PD |
412 | ¶m); |
413 | if (rc < 0) { | |
414 | fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); | |
415 | return -1; | |
416 | } | |
417 | ||
418 | *bufioreq_evtchn = param; | |
419 | ||
420 | return 0; | |
421 | } | |
422 | ||
3996e85c | 423 | /* Xen before 4.5 */ |
f1167ee6 | 424 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500 |
3996e85c PD |
425 | |
426 | #ifndef HVM_PARAM_BUFIOREQ_EVTCHN | |
427 | #define HVM_PARAM_BUFIOREQ_EVTCHN 26 | |
428 | #endif | |
429 | ||
430 | #define IOREQ_TYPE_PCI_CONFIG 2 | |
431 | ||
d09952ee | 432 | typedef uint16_t ioservid_t; |
3996e85c | 433 | |
260cabed | 434 | static inline void xen_map_memory_section(domid_t dom, |
3996e85c PD |
435 | ioservid_t ioservid, |
436 | MemoryRegionSection *section) | |
437 | { | |
438 | } | |
439 | ||
260cabed | 440 | static inline void xen_unmap_memory_section(domid_t dom, |
3996e85c PD |
441 | ioservid_t ioservid, |
442 | MemoryRegionSection *section) | |
443 | { | |
444 | } | |
445 | ||
260cabed | 446 | static inline void xen_map_io_section(domid_t dom, |
3996e85c PD |
447 | ioservid_t ioservid, |
448 | MemoryRegionSection *section) | |
449 | { | |
450 | } | |
451 | ||
260cabed | 452 | static inline void xen_unmap_io_section(domid_t dom, |
3996e85c PD |
453 | ioservid_t ioservid, |
454 | MemoryRegionSection *section) | |
455 | { | |
456 | } | |
457 | ||
260cabed | 458 | static inline void xen_map_pcidev(domid_t dom, |
3996e85c PD |
459 | ioservid_t ioservid, |
460 | PCIDevice *pci_dev) | |
461 | { | |
462 | } | |
463 | ||
260cabed | 464 | static inline void xen_unmap_pcidev(domid_t dom, |
3996e85c PD |
465 | ioservid_t ioservid, |
466 | PCIDevice *pci_dev) | |
467 | { | |
468 | } | |
469 | ||
260cabed | 470 | static inline void xen_create_ioreq_server(domid_t dom, |
b7665c60 | 471 | ioservid_t *ioservid) |
3996e85c | 472 | { |
3996e85c PD |
473 | } |
474 | ||
260cabed | 475 | static inline void xen_destroy_ioreq_server(domid_t dom, |
3996e85c PD |
476 | ioservid_t ioservid) |
477 | { | |
478 | } | |
479 | ||
260cabed | 480 | static inline int xen_get_ioreq_server_info(domid_t dom, |
3996e85c PD |
481 | ioservid_t ioservid, |
482 | xen_pfn_t *ioreq_pfn, | |
483 | xen_pfn_t *bufioreq_pfn, | |
484 | evtchn_port_t *bufioreq_evtchn) | |
485 | { | |
260cabed PD |
486 | return xen_get_default_ioreq_server_info(dom, ioreq_pfn, |
487 | bufioreq_pfn, | |
b7665c60 | 488 | bufioreq_evtchn); |
3996e85c PD |
489 | } |
490 | ||
260cabed | 491 | static inline int xen_set_ioreq_server_state(domid_t dom, |
3996e85c PD |
492 | ioservid_t ioservid, |
493 | bool enable) | |
494 | { | |
495 | return 0; | |
496 | } | |
497 | ||
498 | /* Xen 4.5 */ | |
499 | #else | |
500 | ||
b7665c60 PD |
501 | static bool use_default_ioreq_server; |
502 | ||
260cabed | 503 | static inline void xen_map_memory_section(domid_t dom, |
3996e85c PD |
504 | ioservid_t ioservid, |
505 | MemoryRegionSection *section) | |
506 | { | |
507 | hwaddr start_addr = section->offset_within_address_space; | |
508 | ram_addr_t size = int128_get64(section->size); | |
509 | hwaddr end_addr = start_addr + size - 1; | |
510 | ||
b7665c60 PD |
511 | if (use_default_ioreq_server) { |
512 | return; | |
513 | } | |
514 | ||
3996e85c | 515 | trace_xen_map_mmio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
516 | xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 1, |
517 | start_addr, end_addr); | |
3996e85c PD |
518 | } |
519 | ||
260cabed | 520 | static inline void xen_unmap_memory_section(domid_t dom, |
3996e85c PD |
521 | ioservid_t ioservid, |
522 | MemoryRegionSection *section) | |
523 | { | |
524 | hwaddr start_addr = section->offset_within_address_space; | |
525 | ram_addr_t size = int128_get64(section->size); | |
526 | hwaddr end_addr = start_addr + size - 1; | |
527 | ||
b7665c60 PD |
528 | if (use_default_ioreq_server) { |
529 | return; | |
530 | } | |
531 | ||
3996e85c | 532 | trace_xen_unmap_mmio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
533 | xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, |
534 | 1, start_addr, end_addr); | |
3996e85c PD |
535 | } |
536 | ||
260cabed | 537 | static inline void xen_map_io_section(domid_t dom, |
3996e85c PD |
538 | ioservid_t ioservid, |
539 | MemoryRegionSection *section) | |
540 | { | |
541 | hwaddr start_addr = section->offset_within_address_space; | |
542 | ram_addr_t size = int128_get64(section->size); | |
543 | hwaddr end_addr = start_addr + size - 1; | |
544 | ||
b7665c60 PD |
545 | if (use_default_ioreq_server) { |
546 | return; | |
547 | } | |
548 | ||
3996e85c | 549 | trace_xen_map_portio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
550 | xendevicemodel_map_io_range_to_ioreq_server(xen_dmod, dom, ioservid, 0, |
551 | start_addr, end_addr); | |
3996e85c PD |
552 | } |
553 | ||
260cabed | 554 | static inline void xen_unmap_io_section(domid_t dom, |
3996e85c PD |
555 | ioservid_t ioservid, |
556 | MemoryRegionSection *section) | |
557 | { | |
558 | hwaddr start_addr = section->offset_within_address_space; | |
559 | ram_addr_t size = int128_get64(section->size); | |
560 | hwaddr end_addr = start_addr + size - 1; | |
561 | ||
b7665c60 PD |
562 | if (use_default_ioreq_server) { |
563 | return; | |
564 | } | |
565 | ||
3996e85c | 566 | trace_xen_unmap_portio_range(ioservid, start_addr, end_addr); |
d655f34e PD |
567 | xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod, dom, ioservid, |
568 | 0, start_addr, end_addr); | |
3996e85c PD |
569 | } |
570 | ||
260cabed | 571 | static inline void xen_map_pcidev(domid_t dom, |
3996e85c PD |
572 | ioservid_t ioservid, |
573 | PCIDevice *pci_dev) | |
574 | { | |
b7665c60 PD |
575 | if (use_default_ioreq_server) { |
576 | return; | |
577 | } | |
578 | ||
cdc57472 | 579 | trace_xen_map_pcidev(ioservid, pci_dev_bus_num(pci_dev), |
3996e85c | 580 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); |
d655f34e | 581 | xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod, dom, ioservid, 0, |
cdc57472 | 582 | pci_dev_bus_num(pci_dev), |
d655f34e PD |
583 | PCI_SLOT(pci_dev->devfn), |
584 | PCI_FUNC(pci_dev->devfn)); | |
3996e85c PD |
585 | } |
586 | ||
260cabed | 587 | static inline void xen_unmap_pcidev(domid_t dom, |
3996e85c PD |
588 | ioservid_t ioservid, |
589 | PCIDevice *pci_dev) | |
590 | { | |
b7665c60 PD |
591 | if (use_default_ioreq_server) { |
592 | return; | |
593 | } | |
594 | ||
cdc57472 | 595 | trace_xen_unmap_pcidev(ioservid, pci_dev_bus_num(pci_dev), |
3996e85c | 596 | PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); |
d655f34e | 597 | xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod, dom, ioservid, 0, |
cdc57472 | 598 | pci_dev_bus_num(pci_dev), |
d655f34e PD |
599 | PCI_SLOT(pci_dev->devfn), |
600 | PCI_FUNC(pci_dev->devfn)); | |
3996e85c PD |
601 | } |
602 | ||
260cabed | 603 | static inline void xen_create_ioreq_server(domid_t dom, |
b7665c60 | 604 | ioservid_t *ioservid) |
3996e85c | 605 | { |
d655f34e PD |
606 | int rc = xendevicemodel_create_ioreq_server(xen_dmod, dom, |
607 | HVM_IOREQSRV_BUFIOREQ_ATOMIC, | |
608 | ioservid); | |
3996e85c PD |
609 | |
610 | if (rc == 0) { | |
611 | trace_xen_ioreq_server_create(*ioservid); | |
b7665c60 | 612 | return; |
3996e85c PD |
613 | } |
614 | ||
b7665c60 PD |
615 | *ioservid = 0; |
616 | use_default_ioreq_server = true; | |
617 | trace_xen_default_ioreq_server(); | |
3996e85c PD |
618 | } |
619 | ||
260cabed | 620 | static inline void xen_destroy_ioreq_server(domid_t dom, |
3996e85c PD |
621 | ioservid_t ioservid) |
622 | { | |
b7665c60 PD |
623 | if (use_default_ioreq_server) { |
624 | return; | |
625 | } | |
626 | ||
3996e85c | 627 | trace_xen_ioreq_server_destroy(ioservid); |
d655f34e | 628 | xendevicemodel_destroy_ioreq_server(xen_dmod, dom, ioservid); |
3996e85c PD |
629 | } |
630 | ||
260cabed | 631 | static inline int xen_get_ioreq_server_info(domid_t dom, |
3996e85c PD |
632 | ioservid_t ioservid, |
633 | xen_pfn_t *ioreq_pfn, | |
634 | xen_pfn_t *bufioreq_pfn, | |
635 | evtchn_port_t *bufioreq_evtchn) | |
636 | { | |
b7665c60 | 637 | if (use_default_ioreq_server) { |
260cabed | 638 | return xen_get_default_ioreq_server_info(dom, ioreq_pfn, |
b7665c60 PD |
639 | bufioreq_pfn, |
640 | bufioreq_evtchn); | |
641 | } | |
642 | ||
d655f34e PD |
643 | return xendevicemodel_get_ioreq_server_info(xen_dmod, dom, ioservid, |
644 | ioreq_pfn, bufioreq_pfn, | |
645 | bufioreq_evtchn); | |
3996e85c PD |
646 | } |
647 | ||
260cabed | 648 | static inline int xen_set_ioreq_server_state(domid_t dom, |
3996e85c PD |
649 | ioservid_t ioservid, |
650 | bool enable) | |
651 | { | |
b7665c60 PD |
652 | if (use_default_ioreq_server) { |
653 | return 0; | |
654 | } | |
655 | ||
3996e85c | 656 | trace_xen_ioreq_server_state(ioservid, enable); |
d655f34e PD |
657 | return xendevicemodel_set_ioreq_server_state(xen_dmod, dom, ioservid, |
658 | enable); | |
3996e85c PD |
659 | } |
660 | ||
661 | #endif | |
662 | ||
b6eb9b45 PS |
663 | /* Xen before 4.8 */ |
664 | ||
f1167ee6 | 665 | #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800 |
b6eb9b45 | 666 | |
5c0d914a PD |
667 | struct xengnttab_grant_copy_segment { |
668 | union xengnttab_copy_ptr { | |
669 | void *virt; | |
670 | struct { | |
671 | uint32_t ref; | |
672 | uint16_t offset; | |
673 | uint16_t domid; | |
674 | } foreign; | |
675 | } source, dest; | |
676 | uint16_t len; | |
677 | uint16_t flags; | |
678 | int16_t status; | |
679 | }; | |
680 | ||
681 | typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t; | |
b6eb9b45 PS |
682 | |
683 | static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count, | |
684 | xengnttab_grant_copy_segment_t *segs) | |
685 | { | |
686 | return -ENOSYS; | |
687 | } | |
688 | #endif | |
689 | ||
d94f9486 | 690 | #endif /* QEMU_HW_XEN_COMMON_H */ |