2 * QEMU PowerPC sPAPR XIVE interrupt controller model
4 * Copyright (c) 2017-2019, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/kvm.h"
17 #include "sysemu/runstate.h"
18 #include "hw/ppc/spapr.h"
19 #include "hw/ppc/spapr_cpu_core.h"
20 #include "hw/ppc/spapr_xive.h"
21 #include "hw/ppc/xive.h"
24 #include <sys/ioctl.h>
27 * Helpers for CPU hotplug
29 * TODO: make a common KVMEnabledCPU layer for XICS and XIVE
31 typedef struct KVMEnabledCPU
{
32 unsigned long vcpu_id
;
33 QLIST_ENTRY(KVMEnabledCPU
) node
;
36 static QLIST_HEAD(, KVMEnabledCPU
)
37 kvm_enabled_cpus
= QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus
);
39 static bool kvm_cpu_is_enabled(CPUState
*cs
)
41 KVMEnabledCPU
*enabled_cpu
;
42 unsigned long vcpu_id
= kvm_arch_vcpu_id(cs
);
44 QLIST_FOREACH(enabled_cpu
, &kvm_enabled_cpus
, node
) {
45 if (enabled_cpu
->vcpu_id
== vcpu_id
) {
52 static void kvm_cpu_enable(CPUState
*cs
)
54 KVMEnabledCPU
*enabled_cpu
;
55 unsigned long vcpu_id
= kvm_arch_vcpu_id(cs
);
57 enabled_cpu
= g_malloc(sizeof(*enabled_cpu
));
58 enabled_cpu
->vcpu_id
= vcpu_id
;
59 QLIST_INSERT_HEAD(&kvm_enabled_cpus
, enabled_cpu
, node
);
62 static void kvm_cpu_disable_all(void)
64 KVMEnabledCPU
*enabled_cpu
, *next
;
66 QLIST_FOREACH_SAFE(enabled_cpu
, &kvm_enabled_cpus
, node
, next
) {
67 QLIST_REMOVE(enabled_cpu
, node
);
73 * XIVE Thread Interrupt Management context (KVM)
76 int kvmppc_xive_cpu_set_state(XiveTCTX
*tctx
, Error
**errp
)
78 SpaprXive
*xive
= SPAPR_XIVE(tctx
->xptr
);
82 assert(xive
->fd
!= -1);
84 /* word0 and word1 of the OS ring. */
85 state
[0] = *((uint64_t *) &tctx
->regs
[TM_QW1_OS
]);
87 ret
= kvm_set_one_reg(tctx
->cs
, KVM_REG_PPC_VP_STATE
, state
);
89 error_setg_errno(errp
, -ret
,
90 "XIVE: could not restore KVM state of CPU %ld",
91 kvm_arch_vcpu_id(tctx
->cs
));
98 int kvmppc_xive_cpu_get_state(XiveTCTX
*tctx
, Error
**errp
)
100 SpaprXive
*xive
= SPAPR_XIVE(tctx
->xptr
);
101 uint64_t state
[2] = { 0 };
104 assert(xive
->fd
!= -1);
106 ret
= kvm_get_one_reg(tctx
->cs
, KVM_REG_PPC_VP_STATE
, state
);
108 error_setg_errno(errp
, -ret
,
109 "XIVE: could not capture KVM state of CPU %ld",
110 kvm_arch_vcpu_id(tctx
->cs
));
114 /* word0 and word1 of the OS ring. */
115 *((uint64_t *) &tctx
->regs
[TM_QW1_OS
]) = state
[0];
126 static void kvmppc_xive_cpu_do_synchronize_state(CPUState
*cpu
,
129 XiveCpuGetState
*s
= arg
.host_ptr
;
131 s
->ret
= kvmppc_xive_cpu_get_state(s
->tctx
, s
->errp
);
134 int kvmppc_xive_cpu_synchronize_state(XiveTCTX
*tctx
, Error
**errp
)
136 XiveCpuGetState s
= {
142 * Kick the vCPU to make sure they are available for the KVM ioctl.
144 run_on_cpu(tctx
->cs
, kvmppc_xive_cpu_do_synchronize_state
,
145 RUN_ON_CPU_HOST_PTR(&s
));
150 int kvmppc_xive_cpu_connect(XiveTCTX
*tctx
, Error
**errp
)
153 SpaprXive
*xive
= SPAPR_XIVE(tctx
->xptr
);
154 unsigned long vcpu_id
;
157 assert(xive
->fd
!= -1);
159 /* Check if CPU was hot unplugged and replugged. */
160 if (kvm_cpu_is_enabled(tctx
->cs
)) {
164 vcpu_id
= kvm_arch_vcpu_id(tctx
->cs
);
166 ret
= kvm_vcpu_enable_cap(tctx
->cs
, KVM_CAP_PPC_IRQ_XIVE
, 0, xive
->fd
,
169 error_setg_errno(errp
, -ret
,
170 "XIVE: unable to connect CPU%ld to KVM device",
172 if (ret
== -ENOSPC
) {
173 error_append_hint(errp
, "Try -smp maxcpus=N with N < %u\n",
174 MACHINE(qdev_get_machine())->smp
.max_cpus
);
179 kvm_cpu_enable(tctx
->cs
);
184 * XIVE Interrupt Source (KVM)
187 int kvmppc_xive_set_source_config(SpaprXive
*xive
, uint32_t lisn
, XiveEAS
*eas
,
198 assert(xive_eas_is_valid(eas
));
200 end_idx
= xive_get_field64(EAS_END_INDEX
, eas
->w
);
201 end_blk
= xive_get_field64(EAS_END_BLOCK
, eas
->w
);
202 eisn
= xive_get_field64(EAS_END_DATA
, eas
->w
);
203 masked
= xive_eas_is_masked(eas
);
205 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
207 kvm_src
= priority
<< KVM_XIVE_SOURCE_PRIORITY_SHIFT
&
208 KVM_XIVE_SOURCE_PRIORITY_MASK
;
209 kvm_src
|= server
<< KVM_XIVE_SOURCE_SERVER_SHIFT
&
210 KVM_XIVE_SOURCE_SERVER_MASK
;
211 kvm_src
|= ((uint64_t) masked
<< KVM_XIVE_SOURCE_MASKED_SHIFT
) &
212 KVM_XIVE_SOURCE_MASKED_MASK
;
213 kvm_src
|= ((uint64_t)eisn
<< KVM_XIVE_SOURCE_EISN_SHIFT
) &
214 KVM_XIVE_SOURCE_EISN_MASK
;
216 return kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE_CONFIG
, lisn
,
217 &kvm_src
, true, errp
);
220 void kvmppc_xive_sync_source(SpaprXive
*xive
, uint32_t lisn
, Error
**errp
)
222 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE_SYNC
, lisn
,
227 * At reset, the interrupt sources are simply created and MASKED. We
228 * only need to inform the KVM XIVE device about their type: LSI or
231 int kvmppc_xive_source_reset_one(XiveSource
*xsrc
, int srcno
, Error
**errp
)
233 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
236 assert(xive
->fd
!= -1);
238 if (xive_source_irq_is_lsi(xsrc
, srcno
)) {
239 state
|= KVM_XIVE_LEVEL_SENSITIVE
;
240 if (xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
241 state
|= KVM_XIVE_LEVEL_ASSERTED
;
245 return kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_SOURCE
, srcno
, &state
,
249 static int kvmppc_xive_source_reset(XiveSource
*xsrc
, Error
**errp
)
251 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
254 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
257 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
261 ret
= kvmppc_xive_source_reset_one(xsrc
, i
, errp
);
271 * This is used to perform the magic loads on the ESB pages, described
274 * Memory barriers should not be needed for loads (no store for now).
276 static uint64_t xive_esb_rw(XiveSource
*xsrc
, int srcno
, uint32_t offset
,
277 uint64_t data
, bool write
)
279 uint64_t *addr
= xsrc
->esb_mmap
+ xive_source_esb_mgmt(xsrc
, srcno
) +
283 *addr
= cpu_to_be64(data
);
286 /* Prevent the compiler from optimizing away the load */
287 volatile uint64_t value
= be64_to_cpu(*addr
);
292 static uint8_t xive_esb_read(XiveSource
*xsrc
, int srcno
, uint32_t offset
)
294 return xive_esb_rw(xsrc
, srcno
, offset
, 0, 0) & 0x3;
297 static void xive_esb_trigger(XiveSource
*xsrc
, int srcno
)
299 uint64_t *addr
= xsrc
->esb_mmap
+ xive_source_esb_page(xsrc
, srcno
);
304 uint64_t kvmppc_xive_esb_rw(XiveSource
*xsrc
, int srcno
, uint32_t offset
,
305 uint64_t data
, bool write
)
308 return xive_esb_rw(xsrc
, srcno
, offset
, data
, 1);
312 * Special Load EOI handling for LSI sources. Q bit is never set
313 * and the interrupt should be re-triggered if the level is still
316 if (xive_source_irq_is_lsi(xsrc
, srcno
) &&
317 offset
== XIVE_ESB_LOAD_EOI
) {
318 xive_esb_read(xsrc
, srcno
, XIVE_ESB_SET_PQ_00
);
319 if (xsrc
->status
[srcno
] & XIVE_STATUS_ASSERTED
) {
320 xive_esb_trigger(xsrc
, srcno
);
324 return xive_esb_rw(xsrc
, srcno
, offset
, 0, 0);
328 static void kvmppc_xive_source_get_state(XiveSource
*xsrc
)
330 SpaprXive
*xive
= SPAPR_XIVE(xsrc
->xive
);
333 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
336 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
340 /* Perform a load without side effect to retrieve the PQ bits */
341 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_GET
);
343 /* and save PQ locally */
344 xive_source_esb_set(xsrc
, i
, pq
);
348 void kvmppc_xive_source_set_irq(void *opaque
, int srcno
, int val
)
350 XiveSource
*xsrc
= opaque
;
352 if (!xive_source_irq_is_lsi(xsrc
, srcno
)) {
358 xsrc
->status
[srcno
] |= XIVE_STATUS_ASSERTED
;
360 xsrc
->status
[srcno
] &= ~XIVE_STATUS_ASSERTED
;
364 xive_esb_trigger(xsrc
, srcno
);
368 * sPAPR XIVE interrupt controller (KVM)
370 int kvmppc_xive_get_queue_config(SpaprXive
*xive
, uint8_t end_blk
,
371 uint32_t end_idx
, XiveEND
*end
,
374 struct kvm_ppc_xive_eq kvm_eq
= { 0 };
380 assert(xive_end_is_valid(end
));
382 /* Encode the tuple (server, prio) as a KVM EQ index */
383 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
385 kvm_eq_idx
= priority
<< KVM_XIVE_EQ_PRIORITY_SHIFT
&
386 KVM_XIVE_EQ_PRIORITY_MASK
;
387 kvm_eq_idx
|= server
<< KVM_XIVE_EQ_SERVER_SHIFT
&
388 KVM_XIVE_EQ_SERVER_MASK
;
390 ret
= kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_EQ_CONFIG
, kvm_eq_idx
,
391 &kvm_eq
, false, errp
);
397 * The EQ index and toggle bit are updated by HW. These are the
398 * only fields from KVM we want to update QEMU with. The other END
399 * fields should already be in the QEMU END table.
401 end
->w1
= xive_set_field32(END_W1_GENERATION
, 0ul, kvm_eq
.qtoggle
) |
402 xive_set_field32(END_W1_PAGE_OFF
, 0ul, kvm_eq
.qindex
);
407 int kvmppc_xive_set_queue_config(SpaprXive
*xive
, uint8_t end_blk
,
408 uint32_t end_idx
, XiveEND
*end
,
411 struct kvm_ppc_xive_eq kvm_eq
= { 0 };
417 * Build the KVM state from the local END structure.
421 if (xive_get_field32(END_W0_UCOND_NOTIFY
, end
->w0
)) {
422 kvm_eq
.flags
|= KVM_XIVE_EQ_ALWAYS_NOTIFY
;
426 * If the hcall is disabling the EQ, set the size and page address
427 * to zero. When migrating, only valid ENDs are taken into
430 if (xive_end_is_valid(end
)) {
431 kvm_eq
.qshift
= xive_get_field32(END_W0_QSIZE
, end
->w0
) + 12;
432 kvm_eq
.qaddr
= xive_end_qaddr(end
);
434 * The EQ toggle bit and index should only be relevant when
435 * restoring the EQ state
437 kvm_eq
.qtoggle
= xive_get_field32(END_W1_GENERATION
, end
->w1
);
438 kvm_eq
.qindex
= xive_get_field32(END_W1_PAGE_OFF
, end
->w1
);
444 /* Encode the tuple (server, prio) as a KVM EQ index */
445 spapr_xive_end_to_target(end_blk
, end_idx
, &server
, &priority
);
447 kvm_eq_idx
= priority
<< KVM_XIVE_EQ_PRIORITY_SHIFT
&
448 KVM_XIVE_EQ_PRIORITY_MASK
;
449 kvm_eq_idx
|= server
<< KVM_XIVE_EQ_SERVER_SHIFT
&
450 KVM_XIVE_EQ_SERVER_MASK
;
453 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_EQ_CONFIG
, kvm_eq_idx
,
454 &kvm_eq
, true, errp
);
457 void kvmppc_xive_reset(SpaprXive
*xive
, Error
**errp
)
459 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
, KVM_DEV_XIVE_RESET
,
463 static int kvmppc_xive_get_queues(SpaprXive
*xive
, Error
**errp
)
468 for (i
= 0; i
< xive
->nr_ends
; i
++) {
469 if (!xive_end_is_valid(&xive
->endt
[i
])) {
473 ret
= kvmppc_xive_get_queue_config(xive
, SPAPR_XIVE_BLOCK_ID
, i
,
474 &xive
->endt
[i
], errp
);
484 * The primary goal of the XIVE VM change handler is to mark the EQ
485 * pages dirty when all XIVE event notifications have stopped.
487 * Whenever the VM is stopped, the VM change handler sets the source
488 * PQs to PENDING to stop the flow of events and to possibly catch a
489 * triggered interrupt occuring while the VM is stopped. The previous
490 * state is saved in anticipation of a migration. The XIVE controller
491 * is then synced through KVM to flush any in-flight event
492 * notification and stabilize the EQs.
494 * At this stage, we can mark the EQ page dirty and let a migration
495 * sequence transfer the EQ pages to the destination, which is done
496 * just after the stop state.
498 * The previous configuration of the sources is restored when the VM
499 * runs again. If an interrupt was queued while the VM was stopped,
500 * simply generate a trigger.
502 static void kvmppc_xive_change_state_handler(void *opaque
, int running
,
505 SpaprXive
*xive
= opaque
;
506 XiveSource
*xsrc
= &xive
->source
;
507 Error
*local_err
= NULL
;
511 * Restore the sources to their initial state. This is called when
512 * the VM resumes after a stop or a migration.
515 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
519 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
523 pq
= xive_source_esb_get(xsrc
, i
);
524 old_pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_SET_PQ_00
+ (pq
<< 8));
527 * An interrupt was queued while the VM was stopped,
528 * generate a trigger.
530 if (pq
== XIVE_ESB_RESET
&& old_pq
== XIVE_ESB_QUEUED
) {
531 xive_esb_trigger(xsrc
, i
);
539 * Mask the sources, to stop the flow of event notifications, and
540 * save the PQs locally in the XiveSource object. The XiveSource
541 * state will be collected later on by its vmstate handler if a
542 * migration is in progress.
544 for (i
= 0; i
< xsrc
->nr_irqs
; i
++) {
547 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
551 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_GET
);
554 * PQ is set to PENDING to possibly catch a triggered
555 * interrupt occuring while the VM is stopped (hotplug event
558 if (pq
!= XIVE_ESB_OFF
) {
559 pq
= xive_esb_read(xsrc
, i
, XIVE_ESB_SET_PQ_10
);
561 xive_source_esb_set(xsrc
, i
, pq
);
565 * Sync the XIVE controller in KVM, to flush in-flight event
566 * notification that should be enqueued in the EQs and mark the
567 * XIVE EQ pages dirty to collect all updates.
569 kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
,
570 KVM_DEV_XIVE_EQ_SYNC
, NULL
, true, &local_err
);
572 error_report_err(local_err
);
577 void kvmppc_xive_synchronize_state(SpaprXive
*xive
, Error
**errp
)
579 assert(xive
->fd
!= -1);
582 * When the VM is stopped, the sources are masked and the previous
583 * state is saved in anticipation of a migration. We should not
584 * synchronize the source state in that case else we will override
587 if (runstate_is_running()) {
588 kvmppc_xive_source_get_state(&xive
->source
);
591 /* EAT: there is no extra state to query from KVM */
594 kvmppc_xive_get_queues(xive
, errp
);
598 * The SpaprXive 'pre_save' method is called by the vmstate handler of
599 * the SpaprXive model, after the XIVE controller is synced in the VM
602 int kvmppc_xive_pre_save(SpaprXive
*xive
)
604 Error
*local_err
= NULL
;
607 assert(xive
->fd
!= -1);
609 /* EAT: there is no extra state to query from KVM */
612 ret
= kvmppc_xive_get_queues(xive
, &local_err
);
614 error_report_err(local_err
);
622 * The SpaprXive 'post_load' method is not called by a vmstate
623 * handler. It is called at the sPAPR machine level at the end of the
624 * migration sequence by the sPAPR IRQ backend 'post_load' method,
625 * when all XIVE states have been transferred and loaded.
627 int kvmppc_xive_post_load(SpaprXive
*xive
, int version_id
)
629 Error
*local_err
= NULL
;
634 /* The KVM XIVE device should be in use */
635 assert(xive
->fd
!= -1);
637 /* Restore the ENDT first. The targetting depends on it. */
638 for (i
= 0; i
< xive
->nr_ends
; i
++) {
639 if (!xive_end_is_valid(&xive
->endt
[i
])) {
643 ret
= kvmppc_xive_set_queue_config(xive
, SPAPR_XIVE_BLOCK_ID
, i
,
644 &xive
->endt
[i
], &local_err
);
650 /* Restore the EAT */
651 for (i
= 0; i
< xive
->nr_irqs
; i
++) {
652 if (!xive_eas_is_valid(&xive
->eat
[i
])) {
657 * We can only restore the source config if the source has been
658 * previously set in KVM. Since we don't do that for all interrupts
659 * at reset time anymore, let's do it now.
661 ret
= kvmppc_xive_source_reset_one(&xive
->source
, i
, &local_err
);
666 ret
= kvmppc_xive_set_source_config(xive
, i
, &xive
->eat
[i
], &local_err
);
673 * Restore the thread interrupt contexts of initial CPUs.
675 * The context of hotplugged CPUs is restored later, by the
676 * 'post_load' handler of the XiveTCTX model because they are not
677 * available at the time the SpaprXive 'post_load' method is
678 * called. We can not restore the context of all CPUs in the
679 * 'post_load' handler of XiveTCTX because the machine is not
680 * necessarily connected to the KVM device at that time.
683 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
685 ret
= kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu
)->tctx
, &local_err
);
691 /* The source states will be restored when the machine starts running */
695 error_report_err(local_err
);
699 /* Returns MAP_FAILED on error and sets errno */
700 static void *kvmppc_xive_mmap(SpaprXive
*xive
, int pgoff
, size_t len
,
704 uint32_t page_shift
= 16; /* TODO: fix page_shift */
706 addr
= mmap(NULL
, len
, PROT_WRITE
| PROT_READ
, MAP_SHARED
, xive
->fd
,
707 pgoff
<< page_shift
);
708 if (addr
== MAP_FAILED
) {
709 error_setg_errno(errp
, errno
, "XIVE: unable to set memory mapping");
716 * All the XIVE memory regions are now backed by mappings from the KVM
719 int kvmppc_xive_connect(SpaprInterruptController
*intc
, uint32_t nr_servers
,
722 SpaprXive
*xive
= SPAPR_XIVE(intc
);
723 XiveSource
*xsrc
= &xive
->source
;
724 size_t esb_len
= xive_source_esb_len(xsrc
);
725 size_t tima_len
= 4ull << TM_SHIFT
;
732 * The KVM XIVE device already in use. This is the case when
733 * rebooting under the XIVE-only interrupt mode.
735 if (xive
->fd
!= -1) {
739 if (!kvmppc_has_cap_xive()) {
740 error_setg(errp
, "IRQ_XIVE capability must be present for KVM");
744 /* First, create the KVM XIVE device */
745 fd
= kvm_create_device(kvm_state
, KVM_DEV_TYPE_XIVE
, false);
747 error_setg_errno(errp
, -fd
, "XIVE: error creating KVM device");
752 /* Tell KVM about the # of VCPUs we may have */
753 if (kvm_device_check_attr(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
,
754 KVM_DEV_XIVE_NR_SERVERS
)) {
755 ret
= kvm_device_access(xive
->fd
, KVM_DEV_XIVE_GRP_CTRL
,
756 KVM_DEV_XIVE_NR_SERVERS
, &nr_servers
, true,
764 * 1. Source ESB pages - KVM mapping
766 addr
= kvmppc_xive_mmap(xive
, KVM_XIVE_ESB_PAGE_OFFSET
, esb_len
, errp
);
767 if (addr
== MAP_FAILED
) {
770 xsrc
->esb_mmap
= addr
;
772 memory_region_init_ram_device_ptr(&xsrc
->esb_mmio_kvm
, OBJECT(xsrc
),
773 "xive.esb-kvm", esb_len
, xsrc
->esb_mmap
);
774 memory_region_add_subregion_overlap(&xsrc
->esb_mmio
, 0,
775 &xsrc
->esb_mmio_kvm
, 1);
778 * 2. END ESB pages (No KVM support yet)
782 * 3. TIMA pages - KVM mapping
784 addr
= kvmppc_xive_mmap(xive
, KVM_XIVE_TIMA_PAGE_OFFSET
, tima_len
, errp
);
785 if (addr
== MAP_FAILED
) {
788 xive
->tm_mmap
= addr
;
790 memory_region_init_ram_device_ptr(&xive
->tm_mmio_kvm
, OBJECT(xive
),
791 "xive.tima", tima_len
, xive
->tm_mmap
);
792 memory_region_add_subregion_overlap(&xive
->tm_mmio
, 0,
793 &xive
->tm_mmio_kvm
, 1);
795 xive
->change
= qemu_add_vm_change_state_handler(
796 kvmppc_xive_change_state_handler
, xive
);
798 /* Connect the presenters to the initial VCPUs of the machine */
800 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
802 ret
= kvmppc_xive_cpu_connect(spapr_cpu_state(cpu
)->tctx
, errp
);
808 /* Update the KVM sources */
809 ret
= kvmppc_xive_source_reset(xsrc
, errp
);
814 kvm_kernel_irqchip
= true;
815 kvm_msi_via_irqfd_allowed
= true;
816 kvm_gsi_direct_mapping
= true;
820 kvmppc_xive_disconnect(intc
);
824 void kvmppc_xive_disconnect(SpaprInterruptController
*intc
)
826 SpaprXive
*xive
= SPAPR_XIVE(intc
);
830 assert(xive
->fd
!= -1);
832 /* Clear the KVM mapping */
833 xsrc
= &xive
->source
;
834 esb_len
= xive_source_esb_len(xsrc
);
836 if (xsrc
->esb_mmap
) {
837 memory_region_del_subregion(&xsrc
->esb_mmio
, &xsrc
->esb_mmio_kvm
);
838 object_unparent(OBJECT(&xsrc
->esb_mmio_kvm
));
839 munmap(xsrc
->esb_mmap
, esb_len
);
840 xsrc
->esb_mmap
= NULL
;
844 memory_region_del_subregion(&xive
->tm_mmio
, &xive
->tm_mmio_kvm
);
845 object_unparent(OBJECT(&xive
->tm_mmio_kvm
));
846 munmap(xive
->tm_mmap
, 4ull << TM_SHIFT
);
847 xive
->tm_mmap
= NULL
;
851 * When the KVM device fd is closed, the KVM device is destroyed
852 * and removed from the list of devices of the VM. The VCPU
853 * presenters are also detached from the device.
858 kvm_kernel_irqchip
= false;
859 kvm_msi_via_irqfd_allowed
= false;
860 kvm_gsi_direct_mapping
= false;
862 /* Clear the local list of presenter (hotplug) */
863 kvm_cpu_disable_all();
865 /* VM Change state handler is not needed anymore */
867 qemu_del_vm_change_state_handler(xive
->change
);