2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
13 #include "hw/xen_common.h"
14 #include "hw/xen_backend.h"
17 #include "xen-mapcache.h"
20 #include <xen/hvm/ioreq.h>
21 #include <xen/hvm/params.h>
26 #define DPRINTF(fmt, ...) \
27 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
29 #define DPRINTF(fmt, ...) \
33 /* Compatibility with older version */
34 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
35 static inline uint32_t xen_vcpu_eport(shared_iopage_t
*shared_page
, int i
)
37 return shared_page
->vcpu_iodata
[i
].vp_eport
;
39 static inline ioreq_t
*xen_vcpu_ioreq(shared_iopage_t
*shared_page
, int vcpu
)
41 return &shared_page
->vcpu_iodata
[vcpu
].vp_ioreq
;
43 # define FMT_ioreq_size PRIx64
45 static inline uint32_t xen_vcpu_eport(shared_iopage_t
*shared_page
, int i
)
47 return shared_page
->vcpu_ioreq
[i
].vp_eport
;
49 static inline ioreq_t
*xen_vcpu_ioreq(shared_iopage_t
*shared_page
, int vcpu
)
51 return &shared_page
->vcpu_ioreq
[vcpu
];
53 # define FMT_ioreq_size "u"
56 #define BUFFER_IO_MAX_DELAY 100
58 typedef struct XenPhysmap
{
59 target_phys_addr_t start_addr
;
61 target_phys_addr_t phys_offset
;
63 QLIST_ENTRY(XenPhysmap
) list
;
66 typedef struct XenIOState
{
67 shared_iopage_t
*shared_page
;
68 buffered_iopage_t
*buffered_io_page
;
69 QEMUTimer
*buffered_io_timer
;
70 /* the evtchn port for polling the notification, */
71 evtchn_port_t
*ioreq_local_port
;
72 /* the evtchn fd for polling */
74 /* which vcpu we are serving */
77 struct xs_handle
*xenstore
;
78 CPUPhysMemoryClient client
;
79 QLIST_HEAD(, XenPhysmap
) physmap
;
80 const XenPhysmap
*log_for_dirtybit
;
85 /* Xen specific function for piix pci */
87 int xen_pci_slot_get_pirq(PCIDevice
*pci_dev
, int irq_num
)
89 return irq_num
+ ((pci_dev
->devfn
>> 3) << 2);
92 void xen_piix3_set_irq(void *opaque
, int irq_num
, int level
)
94 xc_hvm_set_pci_intx_level(xen_xc
, xen_domid
, 0, 0, irq_num
>> 2,
98 void xen_piix_pci_write_config_client(uint32_t address
, uint32_t val
, int len
)
102 /* Scan for updates to PCI link routes (0x60-0x63). */
103 for (i
= 0; i
< len
; i
++) {
104 uint8_t v
= (val
>> (8 * i
)) & 0xff;
109 if (((address
+ i
) >= 0x60) && ((address
+ i
) <= 0x63)) {
110 xc_hvm_set_pci_link_route(xen_xc
, xen_domid
, address
+ i
- 0x60, v
);
115 void xen_cmos_set_s3_resume(void *opaque
, int irq
, int level
)
117 pc_cmos_set_s3_resume(opaque
, irq
, level
);
119 xc_set_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_ACPI_S_STATE
, 3);
123 /* Xen Interrupt Controller */
125 static void xen_set_irq(void *opaque
, int irq
, int level
)
127 xc_hvm_set_isa_irq_level(xen_xc
, xen_domid
, irq
, level
);
130 qemu_irq
*xen_interrupt_controller_init(void)
132 return qemu_allocate_irqs(xen_set_irq
, NULL
, 16);
137 static void xen_ram_init(ram_addr_t ram_size
)
140 ram_addr_t below_4g_mem_size
, above_4g_mem_size
= 0;
142 new_block
= qemu_mallocz(sizeof (*new_block
));
143 pstrcpy(new_block
->idstr
, sizeof (new_block
->idstr
), "xen.ram");
144 new_block
->host
= NULL
;
145 new_block
->offset
= 0;
146 new_block
->length
= ram_size
;
148 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
150 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
151 new_block
->length
>> TARGET_PAGE_BITS
);
152 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
153 0xff, new_block
->length
>> TARGET_PAGE_BITS
);
155 if (ram_size
>= 0xe0000000 ) {
156 above_4g_mem_size
= ram_size
- 0xe0000000;
157 below_4g_mem_size
= 0xe0000000;
159 below_4g_mem_size
= ram_size
;
162 cpu_register_physical_memory(0, below_4g_mem_size
, new_block
->offset
);
163 #if TARGET_PHYS_ADDR_BITS > 32
164 if (above_4g_mem_size
> 0) {
165 cpu_register_physical_memory(0x100000000ULL
, above_4g_mem_size
,
166 new_block
->offset
+ below_4g_mem_size
);
171 void xen_ram_alloc(ram_addr_t ram_addr
, ram_addr_t size
)
173 unsigned long nr_pfn
;
177 trace_xen_ram_alloc(ram_addr
, size
);
179 nr_pfn
= size
>> TARGET_PAGE_BITS
;
180 pfn_list
= qemu_malloc(sizeof (*pfn_list
) * nr_pfn
);
182 for (i
= 0; i
< nr_pfn
; i
++) {
183 pfn_list
[i
] = (ram_addr
>> TARGET_PAGE_BITS
) + i
;
186 if (xc_domain_populate_physmap_exact(xen_xc
, xen_domid
, nr_pfn
, 0, 0, pfn_list
)) {
187 hw_error("xen: failed to populate ram at %lx", ram_addr
);
193 static XenPhysmap
*get_physmapping(XenIOState
*state
,
194 target_phys_addr_t start_addr
, ram_addr_t size
)
196 XenPhysmap
*physmap
= NULL
;
198 start_addr
&= TARGET_PAGE_MASK
;
200 QLIST_FOREACH(physmap
, &state
->physmap
, list
) {
201 if (range_covers_byte(physmap
->start_addr
, physmap
->size
, start_addr
)) {
208 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
209 static int xen_add_to_physmap(XenIOState
*state
,
210 target_phys_addr_t start_addr
,
212 target_phys_addr_t phys_offset
)
216 XenPhysmap
*physmap
= NULL
;
217 target_phys_addr_t pfn
, start_gpfn
;
219 if (get_physmapping(state
, start_addr
, size
)) {
226 DPRINTF("mapping vram to %llx - %llx, from %llx\n",
227 start_addr
, start_addr
+ size
, phys_offset
);
229 pfn
= phys_offset
>> TARGET_PAGE_BITS
;
230 start_gpfn
= start_addr
>> TARGET_PAGE_BITS
;
231 for (i
= 0; i
< size
>> TARGET_PAGE_BITS
; i
++) {
232 unsigned long idx
= pfn
+ i
;
233 xen_pfn_t gpfn
= start_gpfn
+ i
;
235 rc
= xc_domain_add_to_physmap(xen_xc
, xen_domid
, XENMAPSPACE_gmfn
, idx
, gpfn
);
237 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn
" to PFN %"
238 PRI_xen_pfn
" failed: %d\n", idx
, gpfn
, rc
);
243 physmap
= qemu_malloc(sizeof (XenPhysmap
));
245 physmap
->start_addr
= start_addr
;
246 physmap
->size
= size
;
247 physmap
->phys_offset
= phys_offset
;
249 QLIST_INSERT_HEAD(&state
->physmap
, physmap
, list
);
251 xc_domain_pin_memory_cacheattr(xen_xc
, xen_domid
,
252 start_addr
>> TARGET_PAGE_BITS
,
253 (start_addr
+ size
) >> TARGET_PAGE_BITS
,
254 XEN_DOMCTL_MEM_CACHEATTR_WB
);
258 static int xen_remove_from_physmap(XenIOState
*state
,
259 target_phys_addr_t start_addr
,
264 XenPhysmap
*physmap
= NULL
;
265 target_phys_addr_t phys_offset
= 0;
267 physmap
= get_physmapping(state
, start_addr
, size
);
268 if (physmap
== NULL
) {
272 phys_offset
= physmap
->phys_offset
;
273 size
= physmap
->size
;
275 DPRINTF("unmapping vram to %llx - %llx, from %llx\n",
276 phys_offset
, phys_offset
+ size
, start_addr
);
278 size
>>= TARGET_PAGE_BITS
;
279 start_addr
>>= TARGET_PAGE_BITS
;
280 phys_offset
>>= TARGET_PAGE_BITS
;
281 for (i
= 0; i
< size
; i
++) {
282 unsigned long idx
= start_addr
+ i
;
283 xen_pfn_t gpfn
= phys_offset
+ i
;
285 rc
= xc_domain_add_to_physmap(xen_xc
, xen_domid
, XENMAPSPACE_gmfn
, idx
, gpfn
);
287 fprintf(stderr
, "add_to_physmap MFN %"PRI_xen_pfn
" to PFN %"
288 PRI_xen_pfn
" failed: %d\n", idx
, gpfn
, rc
);
293 QLIST_REMOVE(physmap
, list
);
294 if (state
->log_for_dirtybit
== physmap
) {
295 state
->log_for_dirtybit
= NULL
;
303 static int xen_add_to_physmap(XenIOState
*state
,
304 target_phys_addr_t start_addr
,
306 target_phys_addr_t phys_offset
)
311 static int xen_remove_from_physmap(XenIOState
*state
,
312 target_phys_addr_t start_addr
,
319 static void xen_client_set_memory(struct CPUPhysMemoryClient
*client
,
320 target_phys_addr_t start_addr
,
322 ram_addr_t phys_offset
,
325 XenIOState
*state
= container_of(client
, XenIOState
, client
);
326 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
327 hvmmem_type_t mem_type
;
329 if (!(start_addr
!= phys_offset
330 && ( (log_dirty
&& flags
< IO_MEM_UNASSIGNED
)
331 || (!log_dirty
&& flags
== IO_MEM_UNASSIGNED
)))) {
335 trace_xen_client_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
337 start_addr
&= TARGET_PAGE_MASK
;
338 size
= TARGET_PAGE_ALIGN(size
);
339 phys_offset
&= TARGET_PAGE_MASK
;
343 xen_add_to_physmap(state
, start_addr
, size
, phys_offset
);
346 mem_type
= HVMMEM_ram_ro
;
347 if (xc_hvm_set_mem_type(xen_xc
, xen_domid
, mem_type
,
348 start_addr
>> TARGET_PAGE_BITS
,
349 size
>> TARGET_PAGE_BITS
)) {
350 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx
"\n",
354 case IO_MEM_UNASSIGNED
:
355 if (xen_remove_from_physmap(state
, start_addr
, size
) < 0) {
356 DPRINTF("physmapping does not exist at "TARGET_FMT_plx
"\n", start_addr
);
362 static int xen_sync_dirty_bitmap(XenIOState
*state
,
363 target_phys_addr_t start_addr
,
366 target_phys_addr_t npages
= size
>> TARGET_PAGE_BITS
;
367 target_phys_addr_t vram_offset
= 0;
368 const int width
= sizeof(unsigned long) * 8;
369 unsigned long bitmap
[(npages
+ width
- 1) / width
];
371 const XenPhysmap
*physmap
= NULL
;
373 physmap
= get_physmapping(state
, start_addr
, size
);
374 if (physmap
== NULL
) {
379 if (state
->log_for_dirtybit
== NULL
) {
380 state
->log_for_dirtybit
= physmap
;
381 } else if (state
->log_for_dirtybit
!= physmap
) {
384 vram_offset
= physmap
->phys_offset
;
386 rc
= xc_hvm_track_dirty_vram(xen_xc
, xen_domid
,
387 start_addr
>> TARGET_PAGE_BITS
, npages
,
393 for (i
= 0; i
< ARRAY_SIZE(bitmap
); i
++) {
394 unsigned long map
= bitmap
[i
];
398 cpu_physical_memory_set_dirty(vram_offset
+ (i
* width
+ j
) * TARGET_PAGE_SIZE
);
405 static int xen_log_start(CPUPhysMemoryClient
*client
, target_phys_addr_t phys_addr
, ram_addr_t size
)
407 XenIOState
*state
= container_of(client
, XenIOState
, client
);
409 return xen_sync_dirty_bitmap(state
, phys_addr
, size
);
412 static int xen_log_stop(CPUPhysMemoryClient
*client
, target_phys_addr_t phys_addr
, ram_addr_t size
)
414 XenIOState
*state
= container_of(client
, XenIOState
, client
);
416 state
->log_for_dirtybit
= NULL
;
417 /* Disable dirty bit tracking */
418 return xc_hvm_track_dirty_vram(xen_xc
, xen_domid
, 0, 0, NULL
);
421 static int xen_client_sync_dirty_bitmap(struct CPUPhysMemoryClient
*client
,
422 target_phys_addr_t start_addr
,
423 target_phys_addr_t end_addr
)
425 XenIOState
*state
= container_of(client
, XenIOState
, client
);
427 return xen_sync_dirty_bitmap(state
, start_addr
, end_addr
- start_addr
);
430 static int xen_client_migration_log(struct CPUPhysMemoryClient
*client
,
436 static CPUPhysMemoryClient xen_cpu_phys_memory_client
= {
437 .set_memory
= xen_client_set_memory
,
438 .sync_dirty_bitmap
= xen_client_sync_dirty_bitmap
,
439 .migration_log
= xen_client_migration_log
,
440 .log_start
= xen_log_start
,
441 .log_stop
= xen_log_stop
,
444 /* VCPU Operations, MMIO, IO ring ... */
446 static void xen_reset_vcpu(void *opaque
)
448 CPUState
*env
= opaque
;
453 void xen_vcpu_init(void)
457 if ((first_cpu
= qemu_get_cpu(0))) {
458 qemu_register_reset(xen_reset_vcpu
, first_cpu
);
459 xen_reset_vcpu(first_cpu
);
463 /* get the ioreq packets from share mem */
464 static ioreq_t
*cpu_get_ioreq_from_shared_memory(XenIOState
*state
, int vcpu
)
466 ioreq_t
*req
= xen_vcpu_ioreq(state
->shared_page
, vcpu
);
468 if (req
->state
!= STATE_IOREQ_READY
) {
469 DPRINTF("I/O request not ready: "
470 "%x, ptr: %x, port: %"PRIx64
", "
471 "data: %"PRIx64
", count: %" FMT_ioreq_size
", size: %" FMT_ioreq_size
"\n",
472 req
->state
, req
->data_is_ptr
, req
->addr
,
473 req
->data
, req
->count
, req
->size
);
477 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
479 req
->state
= STATE_IOREQ_INPROCESS
;
483 /* use poll to get the port notification */
484 /* ioreq_vec--out,the */
485 /* retval--the number of ioreq packet */
486 static ioreq_t
*cpu_get_ioreq(XenIOState
*state
)
491 port
= xc_evtchn_pending(state
->xce_handle
);
493 for (i
= 0; i
< smp_cpus
; i
++) {
494 if (state
->ioreq_local_port
[i
] == port
) {
500 hw_error("Fatal error while trying to get io event!\n");
503 /* unmask the wanted port again */
504 xc_evtchn_unmask(state
->xce_handle
, port
);
506 /* get the io packet from shared memory */
507 state
->send_vcpu
= i
;
508 return cpu_get_ioreq_from_shared_memory(state
, i
);
511 /* read error or read nothing */
515 static uint32_t do_inp(pio_addr_t addr
, unsigned long size
)
519 return cpu_inb(addr
);
521 return cpu_inw(addr
);
523 return cpu_inl(addr
);
525 hw_error("inp: bad size: %04"FMT_pioaddr
" %lx", addr
, size
);
529 static void do_outp(pio_addr_t addr
,
530 unsigned long size
, uint32_t val
)
534 return cpu_outb(addr
, val
);
536 return cpu_outw(addr
, val
);
538 return cpu_outl(addr
, val
);
540 hw_error("outp: bad size: %04"FMT_pioaddr
" %lx", addr
, size
);
544 static void cpu_ioreq_pio(ioreq_t
*req
)
548 sign
= req
->df
? -1 : 1;
550 if (req
->dir
== IOREQ_READ
) {
551 if (!req
->data_is_ptr
) {
552 req
->data
= do_inp(req
->addr
, req
->size
);
556 for (i
= 0; i
< req
->count
; i
++) {
557 tmp
= do_inp(req
->addr
, req
->size
);
558 cpu_physical_memory_write(req
->data
+ (sign
* i
* req
->size
),
559 (uint8_t *) &tmp
, req
->size
);
562 } else if (req
->dir
== IOREQ_WRITE
) {
563 if (!req
->data_is_ptr
) {
564 do_outp(req
->addr
, req
->size
, req
->data
);
566 for (i
= 0; i
< req
->count
; i
++) {
569 cpu_physical_memory_read(req
->data
+ (sign
* i
* req
->size
),
570 (uint8_t*) &tmp
, req
->size
);
571 do_outp(req
->addr
, req
->size
, tmp
);
577 static void cpu_ioreq_move(ioreq_t
*req
)
581 sign
= req
->df
? -1 : 1;
583 if (!req
->data_is_ptr
) {
584 if (req
->dir
== IOREQ_READ
) {
585 for (i
= 0; i
< req
->count
; i
++) {
586 cpu_physical_memory_read(req
->addr
+ (sign
* i
* req
->size
),
587 (uint8_t *) &req
->data
, req
->size
);
589 } else if (req
->dir
== IOREQ_WRITE
) {
590 for (i
= 0; i
< req
->count
; i
++) {
591 cpu_physical_memory_write(req
->addr
+ (sign
* i
* req
->size
),
592 (uint8_t *) &req
->data
, req
->size
);
598 if (req
->dir
== IOREQ_READ
) {
599 for (i
= 0; i
< req
->count
; i
++) {
600 cpu_physical_memory_read(req
->addr
+ (sign
* i
* req
->size
),
601 (uint8_t*) &tmp
, req
->size
);
602 cpu_physical_memory_write(req
->data
+ (sign
* i
* req
->size
),
603 (uint8_t*) &tmp
, req
->size
);
605 } else if (req
->dir
== IOREQ_WRITE
) {
606 for (i
= 0; i
< req
->count
; i
++) {
607 cpu_physical_memory_read(req
->data
+ (sign
* i
* req
->size
),
608 (uint8_t*) &tmp
, req
->size
);
609 cpu_physical_memory_write(req
->addr
+ (sign
* i
* req
->size
),
610 (uint8_t*) &tmp
, req
->size
);
616 static void handle_ioreq(ioreq_t
*req
)
618 if (!req
->data_is_ptr
&& (req
->dir
== IOREQ_WRITE
) &&
619 (req
->size
< sizeof (target_ulong
))) {
620 req
->data
&= ((target_ulong
) 1 << (8 * req
->size
)) - 1;
627 case IOREQ_TYPE_COPY
:
630 case IOREQ_TYPE_TIMEOFFSET
:
632 case IOREQ_TYPE_INVALIDATE
:
633 qemu_invalidate_map_cache();
636 hw_error("Invalid ioreq type 0x%x\n", req
->type
);
640 static void handle_buffered_iopage(XenIOState
*state
)
642 buf_ioreq_t
*buf_req
= NULL
;
646 if (!state
->buffered_io_page
) {
650 while (state
->buffered_io_page
->read_pointer
!= state
->buffered_io_page
->write_pointer
) {
651 buf_req
= &state
->buffered_io_page
->buf_ioreq
[
652 state
->buffered_io_page
->read_pointer
% IOREQ_BUFFER_SLOT_NUM
];
653 req
.size
= 1UL << buf_req
->size
;
655 req
.addr
= buf_req
->addr
;
656 req
.data
= buf_req
->data
;
657 req
.state
= STATE_IOREQ_READY
;
658 req
.dir
= buf_req
->dir
;
660 req
.type
= buf_req
->type
;
662 qw
= (req
.size
== 8);
664 buf_req
= &state
->buffered_io_page
->buf_ioreq
[
665 (state
->buffered_io_page
->read_pointer
+ 1) % IOREQ_BUFFER_SLOT_NUM
];
666 req
.data
|= ((uint64_t)buf_req
->data
) << 32;
672 state
->buffered_io_page
->read_pointer
+= qw
? 2 : 1;
676 static void handle_buffered_io(void *opaque
)
678 XenIOState
*state
= opaque
;
680 handle_buffered_iopage(state
);
681 qemu_mod_timer(state
->buffered_io_timer
,
682 BUFFER_IO_MAX_DELAY
+ qemu_get_clock_ms(rt_clock
));
685 static void cpu_handle_ioreq(void *opaque
)
687 XenIOState
*state
= opaque
;
688 ioreq_t
*req
= cpu_get_ioreq(state
);
690 handle_buffered_iopage(state
);
694 if (req
->state
!= STATE_IOREQ_INPROCESS
) {
695 fprintf(stderr
, "Badness in I/O request ... not in service?!: "
696 "%x, ptr: %x, port: %"PRIx64
", "
697 "data: %"PRIx64
", count: %" FMT_ioreq_size
", size: %" FMT_ioreq_size
"\n",
698 req
->state
, req
->data_is_ptr
, req
->addr
,
699 req
->data
, req
->count
, req
->size
);
700 destroy_hvm_domain();
704 xen_wmb(); /* Update ioreq contents /then/ update state. */
707 * We do this before we send the response so that the tools
708 * have the opportunity to pick up on the reset before the
709 * guest resumes and does a hlt with interrupts disabled which
710 * causes Xen to powerdown the domain.
713 if (qemu_shutdown_requested_get()) {
714 destroy_hvm_domain();
716 if (qemu_reset_requested_get()) {
721 req
->state
= STATE_IORESP_READY
;
722 xc_evtchn_notify(state
->xce_handle
, state
->ioreq_local_port
[state
->send_vcpu
]);
726 static void xenstore_record_dm_state(XenIOState
*s
, const char *state
)
730 snprintf(path
, sizeof (path
), "/local/domain/0/device-model/%u/state", xen_domid
);
731 if (!xs_write(s
->xenstore
, XBT_NULL
, path
, state
, strlen(state
))) {
732 fprintf(stderr
, "error recording dm state\n");
737 static void xen_main_loop_prepare(XenIOState
*state
)
741 if (state
->xce_handle
!= XC_HANDLER_INITIAL_VALUE
) {
742 evtchn_fd
= xc_evtchn_fd(state
->xce_handle
);
745 state
->buffered_io_timer
= qemu_new_timer_ms(rt_clock
, handle_buffered_io
,
747 qemu_mod_timer(state
->buffered_io_timer
, qemu_get_clock_ms(rt_clock
));
749 if (evtchn_fd
!= -1) {
750 qemu_set_fd_handler(evtchn_fd
, cpu_handle_ioreq
, NULL
, state
);
753 /* record state running */
754 xenstore_record_dm_state(state
, "running");
760 static void xen_vm_change_state_handler(void *opaque
, int running
, int reason
)
762 XenIOState
*state
= opaque
;
764 xen_main_loop_prepare(state
);
768 static void xen_exit_notifier(Notifier
*n
)
770 XenIOState
*state
= container_of(n
, XenIOState
, exit
);
772 xc_evtchn_close(state
->xce_handle
);
773 xs_daemon_close(state
->xenstore
);
778 xen_xc
= xen_xc_interface_open(0, 0, 0);
779 if (xen_xc
== XC_HANDLER_INITIAL_VALUE
) {
780 xen_be_printf(NULL
, 0, "can't open xen interface\n");
787 int xen_hvm_init(void)
790 unsigned long ioreq_pfn
;
793 state
= qemu_mallocz(sizeof (XenIOState
));
795 state
->xce_handle
= xen_xc_evtchn_open(NULL
, 0);
796 if (state
->xce_handle
== XC_HANDLER_INITIAL_VALUE
) {
797 perror("xen: event channel open");
801 state
->xenstore
= xs_daemon_open();
802 if (state
->xenstore
== NULL
) {
803 perror("xen: xenstore open");
807 state
->exit
.notify
= xen_exit_notifier
;
808 qemu_add_exit_notifier(&state
->exit
);
810 xc_get_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_IOREQ_PFN
, &ioreq_pfn
);
811 DPRINTF("shared page at pfn %lx\n", ioreq_pfn
);
812 state
->shared_page
= xc_map_foreign_range(xen_xc
, xen_domid
, XC_PAGE_SIZE
,
813 PROT_READ
|PROT_WRITE
, ioreq_pfn
);
814 if (state
->shared_page
== NULL
) {
815 hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT
,
819 xc_get_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_BUFIOREQ_PFN
, &ioreq_pfn
);
820 DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn
);
821 state
->buffered_io_page
= xc_map_foreign_range(xen_xc
, xen_domid
, XC_PAGE_SIZE
,
822 PROT_READ
|PROT_WRITE
, ioreq_pfn
);
823 if (state
->buffered_io_page
== NULL
) {
824 hw_error("map buffered IO page returned error %d", errno
);
827 state
->ioreq_local_port
= qemu_mallocz(smp_cpus
* sizeof (evtchn_port_t
));
829 /* FIXME: how about if we overflow the page here? */
830 for (i
= 0; i
< smp_cpus
; i
++) {
831 rc
= xc_evtchn_bind_interdomain(state
->xce_handle
, xen_domid
,
832 xen_vcpu_eport(state
->shared_page
, i
));
834 fprintf(stderr
, "bind interdomain ioctl error %d\n", errno
);
837 state
->ioreq_local_port
[i
] = rc
;
840 /* Init RAM management */
841 qemu_map_cache_init();
842 xen_ram_init(ram_size
);
844 qemu_add_vm_change_state_handler(xen_vm_change_state_handler
, state
);
846 state
->client
= xen_cpu_phys_memory_client
;
847 QLIST_INIT(&state
->physmap
);
848 cpu_register_phys_memory_client(&state
->client
);
849 state
->log_for_dirtybit
= NULL
;
854 void destroy_hvm_domain(void)
859 xc_handle
= xen_xc_interface_open(0, 0, 0);
860 if (xc_handle
== XC_HANDLER_INITIAL_VALUE
) {
861 fprintf(stderr
, "Cannot acquire xenctrl handle\n");
863 sts
= xc_domain_shutdown(xc_handle
, xen_domid
, SHUTDOWN_poweroff
);
865 fprintf(stderr
, "? xc_domain_shutdown failed to issue poweroff, "
866 "sts %d, %s\n", sts
, strerror(errno
));
868 fprintf(stderr
, "Issued domain %d poweroff\n", xen_domid
);
870 xc_interface_close(xc_handle
);