2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 #define pr_fmt(fmt) "xive-kvm: " fmt
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/err.h>
14 #include <linux/gfp.h>
15 #include <linux/spinlock.h>
16 #include <linux/delay.h>
17 #include <linux/percpu.h>
18 #include <linux/cpumask.h>
19 #include <asm/uaccess.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/hvcall.h>
25 #include <asm/xive-regs.h>
26 #include <asm/debug.h>
27 #include <asm/debugfs.h>
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
34 #include "book3s_xive.h"
38 * Virtual mode variants of the hcalls for use on radix/radix
39 * with AIL. They require the VCPU's VP to be "pushed"
41 * We still instanciate them here because we use some of the
42 * generated utility functions as well in this file.
44 #define XIVE_RUNTIME_CHECKS
45 #define X_PFX xive_vm_
46 #define X_STATIC static
47 #define X_STAT_PFX stat_vm_
48 #define __x_tima xive_tima
49 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
51 #define __x_writeb __raw_writeb
52 #define __x_readw __raw_readw
53 #define __x_readq __raw_readq
54 #define __x_writeq __raw_writeq
56 #include "book3s_xive_template.c"
59 * We leave a gap of a couple of interrupts in the queue to
60 * account for the IPI and additional safety guard.
65 * This is a simple trigger for a generic XIVE IRQ. This must
66 * only be called for interrupts that support a trigger page
68 static bool xive_irq_trigger(struct xive_irq_data
*xd
)
70 /* This should be only for MSIs */
71 if (WARN_ON(xd
->flags
& XIVE_IRQ_FLAG_LSI
))
74 /* Those interrupts should always have a trigger page */
75 if (WARN_ON(!xd
->trig_mmio
))
78 out_be64(xd
->trig_mmio
, 0);
83 static irqreturn_t
xive_esc_irq(int irq
, void *data
)
85 struct kvm_vcpu
*vcpu
= data
;
87 /* We use the existing H_PROD mechanism to wake up the target */
88 vcpu
->arch
.prodded
= 1;
91 kvmppc_fast_vcpu_kick(vcpu
);
96 static int xive_attach_escalation(struct kvm_vcpu
*vcpu
, u8 prio
)
98 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
99 struct xive_q
*q
= &xc
->queues
[prio
];
103 /* Already there ? */
104 if (xc
->esc_virq
[prio
])
107 /* Hook up the escalation interrupt */
108 xc
->esc_virq
[prio
] = irq_create_mapping(NULL
, q
->esc_irq
);
109 if (!xc
->esc_virq
[prio
]) {
110 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
111 prio
, xc
->server_num
);
116 * Future improvement: start with them disabled
117 * and handle DD2 and later scheme of merged escalation
120 name
= kasprintf(GFP_KERNEL
, "kvm-%d-%d-%d",
121 vcpu
->kvm
->arch
.lpid
, xc
->server_num
, prio
);
123 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
124 prio
, xc
->server_num
);
128 rc
= request_irq(xc
->esc_virq
[prio
], xive_esc_irq
,
129 IRQF_NO_THREAD
, name
, vcpu
);
131 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
132 prio
, xc
->server_num
);
135 xc
->esc_virq_names
[prio
] = name
;
138 irq_dispose_mapping(xc
->esc_virq
[prio
]);
139 xc
->esc_virq
[prio
] = 0;
144 static int xive_provision_queue(struct kvm_vcpu
*vcpu
, u8 prio
)
146 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
147 struct kvmppc_xive
*xive
= xc
->xive
;
148 struct xive_q
*q
= &xc
->queues
[prio
];
152 if (WARN_ON(q
->qpage
))
155 /* Allocate the queue and retrieve infos on current node for now */
156 qpage
= (__be32
*)__get_free_pages(GFP_KERNEL
, xive
->q_page_order
);
158 pr_err("Failed to allocate queue %d for VCPU %d\n",
159 prio
, xc
->server_num
);
162 memset(qpage
, 0, 1 << xive
->q_order
);
165 * Reconfigure the queue. This will set q->qpage only once the
166 * queue is fully configured. This is a requirement for prio 0
167 * as we will stop doing EOIs for every IPI as soon as we observe
168 * qpage being non-NULL, and instead will only EOI when we receive
169 * corresponding queue 0 entries
171 rc
= xive_native_configure_queue(xc
->vp_id
, q
, prio
, qpage
,
172 xive
->q_order
, true);
174 pr_err("Failed to configure queue %d for VCPU %d\n",
175 prio
, xc
->server_num
);
179 /* Called with kvm_lock held */
180 static int xive_check_provisioning(struct kvm
*kvm
, u8 prio
)
182 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
183 struct kvm_vcpu
*vcpu
;
186 lockdep_assert_held(&kvm
->lock
);
188 /* Already provisioned ? */
189 if (xive
->qmap
& (1 << prio
))
192 pr_devel("Provisioning prio... %d\n", prio
);
194 /* Provision each VCPU and enable escalations */
195 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
196 if (!vcpu
->arch
.xive_vcpu
)
198 rc
= xive_provision_queue(vcpu
, prio
);
200 xive_attach_escalation(vcpu
, prio
);
205 /* Order previous stores and mark it as provisioned */
207 xive
->qmap
|= (1 << prio
);
211 static void xive_inc_q_pending(struct kvm
*kvm
, u32 server
, u8 prio
)
213 struct kvm_vcpu
*vcpu
;
214 struct kvmppc_xive_vcpu
*xc
;
217 /* Locate target server */
218 vcpu
= kvmppc_xive_find_server(kvm
, server
);
220 pr_warn("%s: Can't find server %d\n", __func__
, server
);
223 xc
= vcpu
->arch
.xive_vcpu
;
227 q
= &xc
->queues
[prio
];
228 atomic_inc(&q
->pending_count
);
231 static int xive_try_pick_queue(struct kvm_vcpu
*vcpu
, u8 prio
)
233 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
242 q
= &xc
->queues
[prio
];
243 if (WARN_ON(!q
->qpage
))
246 /* Calculate max number of interrupts in that queue. */
247 max
= (q
->msk
+ 1) - XIVE_Q_GAP
;
248 return atomic_add_unless(&q
->count
, 1, max
) ? 0 : -EBUSY
;
251 static int xive_select_target(struct kvm
*kvm
, u32
*server
, u8 prio
)
253 struct kvm_vcpu
*vcpu
;
256 /* Locate target server */
257 vcpu
= kvmppc_xive_find_server(kvm
, *server
);
259 pr_devel("Can't find server %d\n", *server
);
263 pr_devel("Finding irq target on 0x%x/%d...\n", *server
, prio
);
266 rc
= xive_try_pick_queue(vcpu
, prio
);
270 pr_devel(" .. failed, looking up candidate...\n");
272 /* Failed, pick another VCPU */
273 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
274 if (!vcpu
->arch
.xive_vcpu
)
276 rc
= xive_try_pick_queue(vcpu
, prio
);
278 *server
= vcpu
->arch
.xive_vcpu
->server_num
;
279 pr_devel(" found on 0x%x/%d\n", *server
, prio
);
283 pr_devel(" no available target !\n");
285 /* No available target ! */
289 static u8
xive_lock_and_mask(struct kvmppc_xive
*xive
,
290 struct kvmppc_xive_src_block
*sb
,
291 struct kvmppc_xive_irq_state
*state
)
293 struct xive_irq_data
*xd
;
299 * Take the lock, set masked, try again if racing
303 arch_spin_lock(&sb
->lock
);
304 old_prio
= state
->guest_priority
;
305 state
->guest_priority
= MASKED
;
309 state
->guest_priority
= old_prio
;
310 arch_spin_unlock(&sb
->lock
);
313 /* No change ? Bail */
314 if (old_prio
== MASKED
)
317 /* Get the right irq */
318 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
321 * If the interrupt is marked as needing masking via
322 * firmware, we do it here. Firmware masking however
323 * is "lossy", it won't return the old p and q bits
324 * and won't set the interrupt to a state where it will
325 * record queued ones. If this is an issue we should do
326 * lazy masking instead.
328 * For now, we work around this in unmask by forcing
329 * an interrupt whenever we unmask a non-LSI via FW
332 if (xd
->flags
& OPAL_XIVE_IRQ_MASK_VIA_FW
) {
333 xive_native_configure_irq(hw_num
,
334 xive
->vp_base
+ state
->act_server
,
335 MASKED
, state
->number
);
336 /* set old_p so we can track if an H_EOI was done */
338 state
->old_q
= false;
340 /* Set PQ to 10, return old P and old Q and remember them */
341 val
= xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_10
);
342 state
->old_p
= !!(val
& 2);
343 state
->old_q
= !!(val
& 1);
346 * Synchronize hardware to sensure the queues are updated
349 xive_native_sync_source(hw_num
);
355 static void xive_lock_for_unmask(struct kvmppc_xive_src_block
*sb
,
356 struct kvmppc_xive_irq_state
*state
)
359 * Take the lock try again if racing with H_EOI
362 arch_spin_lock(&sb
->lock
);
365 arch_spin_unlock(&sb
->lock
);
369 static void xive_finish_unmask(struct kvmppc_xive
*xive
,
370 struct kvmppc_xive_src_block
*sb
,
371 struct kvmppc_xive_irq_state
*state
,
374 struct xive_irq_data
*xd
;
377 /* If we aren't changing a thing, move on */
378 if (state
->guest_priority
!= MASKED
)
381 /* Get the right irq */
382 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
385 * See command in xive_lock_and_mask() concerning masking
388 if (xd
->flags
& OPAL_XIVE_IRQ_MASK_VIA_FW
) {
389 xive_native_configure_irq(hw_num
,
390 xive
->vp_base
+ state
->act_server
,
391 state
->act_priority
, state
->number
);
392 /* If an EOI is needed, do it here */
394 xive_vm_source_eoi(hw_num
, xd
);
395 /* If this is not an LSI, force a trigger */
396 if (!(xd
->flags
& OPAL_XIVE_IRQ_LSI
))
397 xive_irq_trigger(xd
);
401 /* Old Q set, set PQ to 11 */
403 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_11
);
406 * If not old P, then perform an "effective" EOI,
407 * on the source. This will handle the cases where
411 xive_vm_source_eoi(hw_num
, xd
);
413 /* Synchronize ordering and mark unmasked */
416 state
->guest_priority
= prio
;
420 * Target an interrupt to a given server/prio, this will fallback
421 * to another server if necessary and perform the HW targetting
424 * NOTE: Must be called with the state lock held
426 static int xive_target_interrupt(struct kvm
*kvm
,
427 struct kvmppc_xive_irq_state
*state
,
430 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
435 * This will return a tentative server and actual
436 * priority. The count for that new target will have
437 * already been incremented.
439 rc
= xive_select_target(kvm
, &server
, prio
);
442 * We failed to find a target ? Not much we can do
443 * at least until we support the GIQ.
449 * Increment the old queue pending count if there
450 * was one so that the old queue count gets adjusted later
451 * when observed to be empty.
453 if (state
->act_priority
!= MASKED
)
454 xive_inc_q_pending(kvm
,
456 state
->act_priority
);
458 * Update state and HW
460 state
->act_priority
= prio
;
461 state
->act_server
= server
;
463 /* Get the right irq */
464 kvmppc_xive_select_irq(state
, &hw_num
, NULL
);
466 return xive_native_configure_irq(hw_num
,
467 xive
->vp_base
+ server
,
468 prio
, state
->number
);
472 * Targetting rules: In order to avoid losing track of
473 * pending interrupts accross mask and unmask, which would
474 * allow queue overflows, we implement the following rules:
476 * - Unless it was never enabled (or we run out of capacity)
477 * an interrupt is always targetted at a valid server/queue
478 * pair even when "masked" by the guest. This pair tends to
479 * be the last one used but it can be changed under some
480 * circumstances. That allows us to separate targetting
481 * from masking, we only handle accounting during (re)targetting,
482 * this also allows us to let an interrupt drain into its target
483 * queue after masking, avoiding complex schemes to remove
484 * interrupts out of remote processor queues.
486 * - When masking, we set PQ to 10 and save the previous value
489 * - When unmasking, if saved Q was set, we set PQ to 11
490 * otherwise we leave PQ to the HW state which will be either
491 * 10 if nothing happened or 11 if the interrupt fired while
492 * masked. Effectively we are OR'ing the previous Q into the
495 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
496 * which will unmask the interrupt and shoot a new one if Q was
499 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
500 * effectively meaning an H_EOI from the guest is still expected
501 * for that interrupt).
503 * - If H_EOI occurs while masked, we clear the saved P.
505 * - When changing target, we account on the new target and
506 * increment a separate "pending" counter on the old one.
507 * This pending counter will be used to decrement the old
508 * target's count when its queue has been observed empty.
511 int kvmppc_xive_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
514 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
515 struct kvmppc_xive_src_block
*sb
;
516 struct kvmppc_xive_irq_state
*state
;
524 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
525 irq
, server
, priority
);
527 /* First, check provisioning of queues */
528 if (priority
!= MASKED
)
529 rc
= xive_check_provisioning(xive
->kvm
,
530 xive_prio_from_guest(priority
));
532 pr_devel(" provisioning failure %d !\n", rc
);
536 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
539 state
= &sb
->irq_state
[idx
];
542 * We first handle masking/unmasking since the locking
543 * might need to be retried due to EOIs, we'll handle
544 * targetting changes later. These functions will return
545 * with the SB lock held.
547 * xive_lock_and_mask() will also set state->guest_priority
548 * but won't otherwise change other fields of the state.
550 * xive_lock_for_unmask will not actually unmask, this will
551 * be done later by xive_finish_unmask() once the targetting
552 * has been done, so we don't try to unmask an interrupt
553 * that hasn't yet been targetted.
555 if (priority
== MASKED
)
556 xive_lock_and_mask(xive
, sb
, state
);
558 xive_lock_for_unmask(sb
, state
);
562 * Then we handle targetting.
564 * First calculate a new "actual priority"
566 new_act_prio
= state
->act_priority
;
567 if (priority
!= MASKED
)
568 new_act_prio
= xive_prio_from_guest(priority
);
570 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
571 new_act_prio
, state
->act_server
, state
->act_priority
);
574 * Then check if we actually need to change anything,
576 * The condition for re-targetting the interrupt is that
577 * we have a valid new priority (new_act_prio is not 0xff)
578 * and either the server or the priority changed.
580 * Note: If act_priority was ff and the new priority is
581 * also ff, we don't do anything and leave the interrupt
582 * untargetted. An attempt of doing an int_on on an
583 * untargetted interrupt will fail. If that is a problem
584 * we could initialize interrupts with valid default
587 if (new_act_prio
!= MASKED
&&
588 (state
->act_server
!= server
||
589 state
->act_priority
!= new_act_prio
))
590 rc
= xive_target_interrupt(kvm
, state
, server
, new_act_prio
);
593 * Perform the final unmasking of the interrupt source
596 if (priority
!= MASKED
)
597 xive_finish_unmask(xive
, sb
, state
, priority
);
600 * Finally Update saved_priority to match. Only int_on/off
601 * set this field to a different value.
603 state
->saved_priority
= priority
;
605 arch_spin_unlock(&sb
->lock
);
609 int kvmppc_xive_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
612 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
613 struct kvmppc_xive_src_block
*sb
;
614 struct kvmppc_xive_irq_state
*state
;
620 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
623 state
= &sb
->irq_state
[idx
];
624 arch_spin_lock(&sb
->lock
);
625 *server
= state
->act_server
;
626 *priority
= state
->guest_priority
;
627 arch_spin_unlock(&sb
->lock
);
632 int kvmppc_xive_int_on(struct kvm
*kvm
, u32 irq
)
634 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
635 struct kvmppc_xive_src_block
*sb
;
636 struct kvmppc_xive_irq_state
*state
;
642 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
645 state
= &sb
->irq_state
[idx
];
647 pr_devel("int_on(irq=0x%x)\n", irq
);
650 * Check if interrupt was not targetted
652 if (state
->act_priority
== MASKED
) {
653 pr_devel("int_on on untargetted interrupt\n");
657 /* If saved_priority is 0xff, do nothing */
658 if (state
->saved_priority
== MASKED
)
662 * Lock and unmask it.
664 xive_lock_for_unmask(sb
, state
);
665 xive_finish_unmask(xive
, sb
, state
, state
->saved_priority
);
666 arch_spin_unlock(&sb
->lock
);
671 int kvmppc_xive_int_off(struct kvm
*kvm
, u32 irq
)
673 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
674 struct kvmppc_xive_src_block
*sb
;
675 struct kvmppc_xive_irq_state
*state
;
681 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
684 state
= &sb
->irq_state
[idx
];
686 pr_devel("int_off(irq=0x%x)\n", irq
);
691 state
->saved_priority
= xive_lock_and_mask(xive
, sb
, state
);
692 arch_spin_unlock(&sb
->lock
);
697 static bool xive_restore_pending_irq(struct kvmppc_xive
*xive
, u32 irq
)
699 struct kvmppc_xive_src_block
*sb
;
700 struct kvmppc_xive_irq_state
*state
;
703 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
706 state
= &sb
->irq_state
[idx
];
711 * Trigger the IPI. This assumes we never restore a pass-through
712 * interrupt which should be safe enough
714 xive_irq_trigger(&state
->ipi_data
);
719 u64
kvmppc_xive_get_icp(struct kvm_vcpu
*vcpu
)
721 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
726 /* Return the per-cpu state for state saving/migration */
727 return (u64
)xc
->cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
|
728 (u64
)xc
->mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
|
729 (u64
)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT
;
732 int kvmppc_xive_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
734 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
735 struct kvmppc_xive
*xive
= vcpu
->kvm
->arch
.xive
;
742 /* Grab individual state fields. We don't use pending_pri */
743 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
744 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
745 KVM_REG_PPC_ICP_XISR_MASK
;
746 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
748 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
749 xc
->server_num
, cppr
, mfrr
, xisr
);
752 * We can't update the state of a "pushed" VCPU, but that
755 if (WARN_ON(vcpu
->arch
.xive_pushed
))
758 /* Update VCPU HW saved state */
759 vcpu
->arch
.xive_saved_state
.cppr
= cppr
;
760 xc
->hw_cppr
= xc
->cppr
= cppr
;
763 * Update MFRR state. If it's not 0xff, we mark the VCPU as
764 * having a pending MFRR change, which will re-evaluate the
765 * target. The VCPU will thus potentially get a spurious
766 * interrupt but that's not a big deal.
770 xive_irq_trigger(&xc
->vp_ipi_data
);
773 * Now saved XIRR is "interesting". It means there's something in
774 * the legacy "1 element" queue... for an IPI we simply ignore it,
775 * as the MFRR restore will handle that. For anything else we need
776 * to force a resend of the source.
777 * However the source may not have been setup yet. If that's the
778 * case, we keep that info and increment a counter in the xive to
779 * tell subsequent xive_set_source() to go look.
781 if (xisr
> XICS_IPI
&& !xive_restore_pending_irq(xive
, xisr
)) {
782 xc
->delayed_irq
= xisr
;
783 xive
->delayed_irqs
++;
784 pr_devel(" xisr restore delayed\n");
790 int kvmppc_xive_set_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
791 struct irq_desc
*host_desc
)
793 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
794 struct kvmppc_xive_src_block
*sb
;
795 struct kvmppc_xive_irq_state
*state
;
796 struct irq_data
*host_data
= irq_desc_get_irq_data(host_desc
);
797 unsigned int host_irq
= irq_desc_get_irq(host_desc
);
798 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(host_data
);
806 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq
, hw_irq
);
808 sb
= kvmppc_xive_find_source(xive
, guest_irq
, &idx
);
811 state
= &sb
->irq_state
[idx
];
814 * Mark the passed-through interrupt as going to a VCPU,
815 * this will prevent further EOIs and similar operations
816 * from the XIVE code. It will also mask the interrupt
817 * to either PQ=10 or 11 state, the latter if the interrupt
818 * is pending. This will allow us to unmask or retrigger it
819 * after routing it to the guest with a simple EOI.
821 * The "state" argument is a "token", all it needs is to be
822 * non-NULL to switch to passed-through or NULL for the
823 * other way around. We may not yet have an actual VCPU
824 * target here and we don't really care.
826 rc
= irq_set_vcpu_affinity(host_irq
, state
);
828 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq
);
833 * Mask and read state of IPI. We need to know if its P bit
834 * is set as that means it's potentially already using a
835 * queue entry in the target
837 prio
= xive_lock_and_mask(xive
, sb
, state
);
838 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio
,
839 state
->old_p
, state
->old_q
);
841 /* Turn the IPI hard off */
842 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_01
);
844 /* Grab info about irq */
845 state
->pt_number
= hw_irq
;
846 state
->pt_data
= irq_data_get_irq_handler_data(host_data
);
849 * Configure the IRQ to match the existing configuration of
850 * the IPI if it was already targetted. Otherwise this will
851 * mask the interrupt in a lossy way (act_priority is 0xff)
852 * which is fine for a never started interrupt.
854 xive_native_configure_irq(hw_irq
,
855 xive
->vp_base
+ state
->act_server
,
856 state
->act_priority
, state
->number
);
859 * We do an EOI to enable the interrupt (and retrigger if needed)
860 * if the guest has the interrupt unmasked and the P bit was *not*
861 * set in the IPI. If it was set, we know a slot may still be in
862 * use in the target queue thus we have to wait for a guest
865 if (prio
!= MASKED
&& !state
->old_p
)
866 xive_vm_source_eoi(hw_irq
, state
->pt_data
);
868 /* Clear old_p/old_q as they are no longer relevant */
869 state
->old_p
= state
->old_q
= false;
871 /* Restore guest prio (unlocks EOI) */
873 state
->guest_priority
= prio
;
874 arch_spin_unlock(&sb
->lock
);
878 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped
);
880 int kvmppc_xive_clr_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
881 struct irq_desc
*host_desc
)
883 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
884 struct kvmppc_xive_src_block
*sb
;
885 struct kvmppc_xive_irq_state
*state
;
886 unsigned int host_irq
= irq_desc_get_irq(host_desc
);
894 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq
);
896 sb
= kvmppc_xive_find_source(xive
, guest_irq
, &idx
);
899 state
= &sb
->irq_state
[idx
];
902 * Mask and read state of IRQ. We need to know if its P bit
903 * is set as that means it's potentially already using a
904 * queue entry in the target
906 prio
= xive_lock_and_mask(xive
, sb
, state
);
907 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio
,
908 state
->old_p
, state
->old_q
);
911 * If old_p is set, the interrupt is pending, we switch it to
912 * PQ=11. This will force a resend in the host so the interrupt
913 * isn't lost to whatver host driver may pick it up
916 xive_vm_esb_load(state
->pt_data
, XIVE_ESB_SET_PQ_11
);
918 /* Release the passed-through interrupt to the host */
919 rc
= irq_set_vcpu_affinity(host_irq
, NULL
);
921 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq
);
925 /* Forget about the IRQ */
926 state
->pt_number
= 0;
927 state
->pt_data
= NULL
;
929 /* Reconfigure the IPI */
930 xive_native_configure_irq(state
->ipi_number
,
931 xive
->vp_base
+ state
->act_server
,
932 state
->act_priority
, state
->number
);
935 * If old_p is set (we have a queue entry potentially
936 * occupied) or the interrupt is masked, we set the IPI
937 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
939 if (prio
== MASKED
|| state
->old_p
)
940 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_10
);
942 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_00
);
944 /* Restore guest prio (unlocks EOI) */
946 state
->guest_priority
= prio
;
947 arch_spin_unlock(&sb
->lock
);
951 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped
);
953 static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu
*vcpu
)
955 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
956 struct kvm
*kvm
= vcpu
->kvm
;
957 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
960 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
961 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
965 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++) {
966 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[j
];
970 if (state
->act_priority
== MASKED
)
972 if (state
->act_server
!= xc
->server_num
)
976 arch_spin_lock(&sb
->lock
);
977 state
->act_priority
= MASKED
;
978 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_01
);
979 xive_native_configure_irq(state
->ipi_number
, 0, MASKED
, 0);
980 if (state
->pt_number
) {
981 xive_vm_esb_load(state
->pt_data
, XIVE_ESB_SET_PQ_01
);
982 xive_native_configure_irq(state
->pt_number
, 0, MASKED
, 0);
984 arch_spin_unlock(&sb
->lock
);
989 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu
*vcpu
)
991 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
992 struct kvmppc_xive
*xive
= xc
->xive
;
995 pr_devel("cleanup_vcpu(cpu=%d)\n", xc
->server_num
);
997 /* Ensure no interrupt is still routed to that VP */
999 kvmppc_xive_disable_vcpu_interrupts(vcpu
);
1001 /* Mask the VP IPI */
1002 xive_vm_esb_load(&xc
->vp_ipi_data
, XIVE_ESB_SET_PQ_01
);
1004 /* Disable the VP */
1005 xive_native_disable_vp(xc
->vp_id
);
1007 /* Free the queues & associated interrupts */
1008 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1009 struct xive_q
*q
= &xc
->queues
[i
];
1011 /* Free the escalation irq */
1012 if (xc
->esc_virq
[i
]) {
1013 free_irq(xc
->esc_virq
[i
], vcpu
);
1014 irq_dispose_mapping(xc
->esc_virq
[i
]);
1015 kfree(xc
->esc_virq_names
[i
]);
1017 /* Free the queue */
1018 xive_native_disable_queue(xc
->vp_id
, q
, i
);
1020 free_pages((unsigned long)q
->qpage
,
1021 xive
->q_page_order
);
1028 xive_cleanup_irq_data(&xc
->vp_ipi_data
);
1029 xive_native_free_irq(xc
->vp_ipi
);
1035 int kvmppc_xive_connect_vcpu(struct kvm_device
*dev
,
1036 struct kvm_vcpu
*vcpu
, u32 cpu
)
1038 struct kvmppc_xive
*xive
= dev
->private;
1039 struct kvmppc_xive_vcpu
*xc
;
1042 pr_devel("connect_vcpu(cpu=%d)\n", cpu
);
1044 if (dev
->ops
!= &kvm_xive_ops
) {
1045 pr_devel("Wrong ops !\n");
1048 if (xive
->kvm
!= vcpu
->kvm
)
1050 if (vcpu
->arch
.irq_type
)
1052 if (kvmppc_xive_find_server(vcpu
->kvm
, cpu
)) {
1053 pr_devel("Duplicate !\n");
1056 if (cpu
>= KVM_MAX_VCPUS
) {
1057 pr_devel("Out of bounds !\n");
1060 xc
= kzalloc(sizeof(*xc
), GFP_KERNEL
);
1064 /* We need to synchronize with queue provisioning */
1065 mutex_lock(&vcpu
->kvm
->lock
);
1066 vcpu
->arch
.xive_vcpu
= xc
;
1069 xc
->server_num
= cpu
;
1070 xc
->vp_id
= xive
->vp_base
+ cpu
;
1074 r
= xive_native_get_vp_info(xc
->vp_id
, &xc
->vp_cam
, &xc
->vp_chip_id
);
1078 /* Configure VCPU fields for use by assembly push/pull */
1079 vcpu
->arch
.xive_saved_state
.w01
= cpu_to_be64(0xff000000);
1080 vcpu
->arch
.xive_cam_word
= cpu_to_be32(xc
->vp_cam
| TM_QW1W2_VO
);
1083 xc
->vp_ipi
= xive_native_alloc_irq();
1088 pr_devel(" IPI=0x%x\n", xc
->vp_ipi
);
1090 r
= xive_native_populate_irq_data(xc
->vp_ipi
, &xc
->vp_ipi_data
);
1095 * Initialize queues. Initially we set them all for no queueing
1096 * and we enable escalation for queue 0 only which we'll use for
1097 * our mfrr change notifications. If the VCPU is hot-plugged, we
1098 * do handle provisioning however.
1100 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1101 struct xive_q
*q
= &xc
->queues
[i
];
1103 /* Is queue already enabled ? Provision it */
1104 if (xive
->qmap
& (1 << i
)) {
1105 r
= xive_provision_queue(vcpu
, i
);
1107 xive_attach_escalation(vcpu
, i
);
1111 r
= xive_native_configure_queue(xc
->vp_id
,
1112 q
, i
, NULL
, 0, true);
1114 pr_err("Failed to configure queue %d for VCPU %d\n",
1121 /* If not done above, attach priority 0 escalation */
1122 r
= xive_attach_escalation(vcpu
, 0);
1127 r
= xive_native_enable_vp(xc
->vp_id
);
1132 r
= xive_native_configure_irq(xc
->vp_ipi
, xc
->vp_id
, 0, XICS_IPI
);
1134 xive_vm_esb_load(&xc
->vp_ipi_data
, XIVE_ESB_SET_PQ_00
);
1137 mutex_unlock(&vcpu
->kvm
->lock
);
1139 kvmppc_xive_cleanup_vcpu(vcpu
);
1143 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1148 * Scanning of queues before/after migration save
1150 static void xive_pre_save_set_queued(struct kvmppc_xive
*xive
, u32 irq
)
1152 struct kvmppc_xive_src_block
*sb
;
1153 struct kvmppc_xive_irq_state
*state
;
1156 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1160 state
= &sb
->irq_state
[idx
];
1162 /* Some sanity checking */
1163 if (!state
->valid
) {
1164 pr_err("invalid irq 0x%x in cpu queue!\n", irq
);
1169 * If the interrupt is in a queue it should have P set.
1170 * We warn so that gets reported. A backtrace isn't useful
1171 * so no need to use a WARN_ON.
1173 if (!state
->saved_p
)
1174 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq
);
1177 state
->in_queue
= true;
1180 static void xive_pre_save_mask_irq(struct kvmppc_xive
*xive
,
1181 struct kvmppc_xive_src_block
*sb
,
1184 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[irq
];
1189 /* Mask and save state, this will also sync HW queues */
1190 state
->saved_scan_prio
= xive_lock_and_mask(xive
, sb
, state
);
1192 /* Transfer P and Q */
1193 state
->saved_p
= state
->old_p
;
1194 state
->saved_q
= state
->old_q
;
1197 arch_spin_unlock(&sb
->lock
);
1200 static void xive_pre_save_unmask_irq(struct kvmppc_xive
*xive
,
1201 struct kvmppc_xive_src_block
*sb
,
1204 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[irq
];
1210 * Lock / exclude EOI (not technically necessary if the
1211 * guest isn't running concurrently. If this becomes a
1212 * performance issue we can probably remove the lock.
1214 xive_lock_for_unmask(sb
, state
);
1216 /* Restore mask/prio if it wasn't masked */
1217 if (state
->saved_scan_prio
!= MASKED
)
1218 xive_finish_unmask(xive
, sb
, state
, state
->saved_scan_prio
);
1221 arch_spin_unlock(&sb
->lock
);
1224 static void xive_pre_save_queue(struct kvmppc_xive
*xive
, struct xive_q
*q
)
1227 u32 toggle
= q
->toggle
;
1231 irq
= __xive_read_eq(q
->qpage
, q
->msk
, &idx
, &toggle
);
1233 xive_pre_save_set_queued(xive
, irq
);
1237 static void xive_pre_save_scan(struct kvmppc_xive
*xive
)
1239 struct kvm_vcpu
*vcpu
= NULL
;
1243 * See comment in xive_get_source() about how this
1244 * work. Collect a stable state for all interrupts
1246 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1247 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1250 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1251 xive_pre_save_mask_irq(xive
, sb
, j
);
1254 /* Then scan the queues and update the "in_queue" flag */
1255 kvm_for_each_vcpu(i
, vcpu
, xive
->kvm
) {
1256 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1259 for (j
= 0; j
< KVMPPC_XIVE_Q_COUNT
; j
++) {
1260 if (xc
->queues
[j
].qpage
)
1261 xive_pre_save_queue(xive
, &xc
->queues
[j
]);
1265 /* Finally restore interrupt states */
1266 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1267 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1270 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1271 xive_pre_save_unmask_irq(xive
, sb
, j
);
1275 static void xive_post_save_scan(struct kvmppc_xive
*xive
)
1279 /* Clear all the in_queue flags */
1280 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1281 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1284 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1285 sb
->irq_state
[j
].in_queue
= false;
1288 /* Next get_source() will do a new scan */
1289 xive
->saved_src_count
= 0;
1293 * This returns the source configuration and state to user space.
1295 static int xive_get_source(struct kvmppc_xive
*xive
, long irq
, u64 addr
)
1297 struct kvmppc_xive_src_block
*sb
;
1298 struct kvmppc_xive_irq_state
*state
;
1299 u64 __user
*ubufp
= (u64 __user
*) addr
;
1303 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1307 state
= &sb
->irq_state
[idx
];
1312 pr_devel("get_source(%ld)...\n", irq
);
1315 * So to properly save the state into something that looks like a
1316 * XICS migration stream we cannot treat interrupts individually.
1318 * We need, instead, mask them all (& save their previous PQ state)
1319 * to get a stable state in the HW, then sync them to ensure that
1320 * any interrupt that had already fired hits its queue, and finally
1321 * scan all the queues to collect which interrupts are still present
1322 * in the queues, so we can set the "pending" flag on them and
1323 * they can be resent on restore.
1325 * So we do it all when the "first" interrupt gets saved, all the
1326 * state is collected at that point, the rest of xive_get_source()
1327 * will merely collect and convert that state to the expected
1328 * userspace bit mask.
1330 if (xive
->saved_src_count
== 0)
1331 xive_pre_save_scan(xive
);
1332 xive
->saved_src_count
++;
1334 /* Convert saved state into something compatible with xics */
1335 val
= state
->act_server
;
1336 prio
= state
->saved_scan_prio
;
1338 if (prio
== MASKED
) {
1339 val
|= KVM_XICS_MASKED
;
1340 prio
= state
->saved_priority
;
1342 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1344 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1346 val
|= KVM_XICS_PENDING
;
1349 val
|= KVM_XICS_PRESENTED
;
1352 val
|= KVM_XICS_QUEUED
;
1355 * We mark it pending (which will attempt a re-delivery)
1356 * if we are in a queue *or* we were masked and had
1357 * Q set which is equivalent to the XICS "masked pending"
1360 if (state
->in_queue
|| (prio
== MASKED
&& state
->saved_q
))
1361 val
|= KVM_XICS_PENDING
;
1365 * If that was the last interrupt saved, reset the
1368 if (xive
->saved_src_count
== xive
->src_count
)
1369 xive_post_save_scan(xive
);
1371 /* Copy the result to userspace */
1372 if (put_user(val
, ubufp
))
1378 static struct kvmppc_xive_src_block
*xive_create_src_block(struct kvmppc_xive
*xive
,
1381 struct kvm
*kvm
= xive
->kvm
;
1382 struct kvmppc_xive_src_block
*sb
;
1385 bid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1387 mutex_lock(&kvm
->lock
);
1389 /* block already exists - somebody else got here first */
1390 if (xive
->src_blocks
[bid
])
1393 /* Create the ICS */
1394 sb
= kzalloc(sizeof(*sb
), GFP_KERNEL
);
1400 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1401 sb
->irq_state
[i
].number
= (bid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1402 sb
->irq_state
[i
].guest_priority
= MASKED
;
1403 sb
->irq_state
[i
].saved_priority
= MASKED
;
1404 sb
->irq_state
[i
].act_priority
= MASKED
;
1407 xive
->src_blocks
[bid
] = sb
;
1409 if (bid
> xive
->max_sbid
)
1410 xive
->max_sbid
= bid
;
1413 mutex_unlock(&kvm
->lock
);
1414 return xive
->src_blocks
[bid
];
1417 static bool xive_check_delayed_irq(struct kvmppc_xive
*xive
, u32 irq
)
1419 struct kvm
*kvm
= xive
->kvm
;
1420 struct kvm_vcpu
*vcpu
= NULL
;
1423 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1424 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1429 if (xc
->delayed_irq
== irq
) {
1430 xc
->delayed_irq
= 0;
1431 xive
->delayed_irqs
--;
1438 static int xive_set_source(struct kvmppc_xive
*xive
, long irq
, u64 addr
)
1440 struct kvmppc_xive_src_block
*sb
;
1441 struct kvmppc_xive_irq_state
*state
;
1442 u64 __user
*ubufp
= (u64 __user
*) addr
;
1445 u8 act_prio
, guest_prio
;
1449 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1452 pr_devel("set_source(irq=0x%lx)\n", irq
);
1454 /* Find the source */
1455 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1457 pr_devel("No source, creating source block...\n");
1458 sb
= xive_create_src_block(xive
, irq
);
1460 pr_devel("Failed to create block...\n");
1464 state
= &sb
->irq_state
[idx
];
1466 /* Read user passed data */
1467 if (get_user(val
, ubufp
)) {
1468 pr_devel("fault getting user info !\n");
1472 server
= val
& KVM_XICS_DESTINATION_MASK
;
1473 guest_prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1475 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1476 val
, server
, guest_prio
);
1478 * If the source doesn't already have an IPI, allocate
1479 * one and get the corresponding data
1481 if (!state
->ipi_number
) {
1482 state
->ipi_number
= xive_native_alloc_irq();
1483 if (state
->ipi_number
== 0) {
1484 pr_devel("Failed to allocate IPI !\n");
1487 xive_native_populate_irq_data(state
->ipi_number
, &state
->ipi_data
);
1488 pr_devel(" src_ipi=0x%x\n", state
->ipi_number
);
1492 * We use lock_and_mask() to set us in the right masked
1493 * state. We will override that state from the saved state
1494 * further down, but this will handle the cases of interrupts
1495 * that need FW masking. We set the initial guest_priority to
1496 * 0 before calling it to ensure it actually performs the masking.
1498 state
->guest_priority
= 0;
1499 xive_lock_and_mask(xive
, sb
, state
);
1502 * Now, we select a target if we have one. If we don't we
1503 * leave the interrupt untargetted. It means that an interrupt
1504 * can become "untargetted" accross migration if it was masked
1505 * by set_xive() but there is little we can do about it.
1508 /* First convert prio and mark interrupt as untargetted */
1509 act_prio
= xive_prio_from_guest(guest_prio
);
1510 state
->act_priority
= MASKED
;
1513 * We need to drop the lock due to the mutex below. Hopefully
1514 * nothing is touching that interrupt yet since it hasn't been
1515 * advertized to a running guest yet
1517 arch_spin_unlock(&sb
->lock
);
1519 /* If we have a priority target the interrupt */
1520 if (act_prio
!= MASKED
) {
1521 /* First, check provisioning of queues */
1522 mutex_lock(&xive
->kvm
->lock
);
1523 rc
= xive_check_provisioning(xive
->kvm
, act_prio
);
1524 mutex_unlock(&xive
->kvm
->lock
);
1526 /* Target interrupt */
1528 rc
= xive_target_interrupt(xive
->kvm
, state
,
1531 * If provisioning or targetting failed, leave it
1532 * alone and masked. It will remain disabled until
1533 * the guest re-targets it.
1538 * Find out if this was a delayed irq stashed in an ICP,
1539 * in which case, treat it as pending
1541 if (xive
->delayed_irqs
&& xive_check_delayed_irq(xive
, irq
)) {
1542 val
|= KVM_XICS_PENDING
;
1543 pr_devel(" Found delayed ! forcing PENDING !\n");
1546 /* Cleanup the SW state */
1547 state
->old_p
= false;
1548 state
->old_q
= false;
1550 state
->asserted
= false;
1552 /* Restore LSI state */
1553 if (val
& KVM_XICS_LEVEL_SENSITIVE
) {
1555 if (val
& KVM_XICS_PENDING
)
1556 state
->asserted
= true;
1557 pr_devel(" LSI ! Asserted=%d\n", state
->asserted
);
1561 * Restore P and Q. If the interrupt was pending, we
1562 * force Q and !P, which will trigger a resend.
1564 * That means that a guest that had both an interrupt
1565 * pending (queued) and Q set will restore with only
1566 * one instance of that interrupt instead of 2, but that
1567 * is perfectly fine as coalescing interrupts that haven't
1568 * been presented yet is always allowed.
1570 if (val
& KVM_XICS_PRESENTED
&& !(val
& KVM_XICS_PENDING
))
1571 state
->old_p
= true;
1572 if (val
& KVM_XICS_QUEUED
|| val
& KVM_XICS_PENDING
)
1573 state
->old_q
= true;
1575 pr_devel(" P=%d, Q=%d\n", state
->old_p
, state
->old_q
);
1578 * If the interrupt was unmasked, update guest priority and
1579 * perform the appropriate state transition and do a
1580 * re-trigger if necessary.
1582 if (val
& KVM_XICS_MASKED
) {
1583 pr_devel(" masked, saving prio\n");
1584 state
->guest_priority
= MASKED
;
1585 state
->saved_priority
= guest_prio
;
1587 pr_devel(" unmasked, restoring to prio %d\n", guest_prio
);
1588 xive_finish_unmask(xive
, sb
, state
, guest_prio
);
1589 state
->saved_priority
= guest_prio
;
1592 /* Increment the number of valid sources and mark this one valid */
1595 state
->valid
= true;
1600 int kvmppc_xive_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1603 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
1604 struct kvmppc_xive_src_block
*sb
;
1605 struct kvmppc_xive_irq_state
*state
;
1611 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1615 /* Perform locklessly .... (we need to do some RCUisms here...) */
1616 state
= &sb
->irq_state
[idx
];
1620 /* We don't allow a trigger on a passed-through interrupt */
1621 if (state
->pt_number
)
1624 if ((level
== 1 && state
->lsi
) || level
== KVM_INTERRUPT_SET_LEVEL
)
1625 state
->asserted
= 1;
1626 else if (level
== 0 || level
== KVM_INTERRUPT_UNSET
) {
1627 state
->asserted
= 0;
1631 /* Trigger the IPI */
1632 xive_irq_trigger(&state
->ipi_data
);
1637 static int xive_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1639 struct kvmppc_xive
*xive
= dev
->private;
1641 /* We honor the existing XICS ioctl */
1642 switch (attr
->group
) {
1643 case KVM_DEV_XICS_GRP_SOURCES
:
1644 return xive_set_source(xive
, attr
->attr
, attr
->addr
);
1649 static int xive_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1651 struct kvmppc_xive
*xive
= dev
->private;
1653 /* We honor the existing XICS ioctl */
1654 switch (attr
->group
) {
1655 case KVM_DEV_XICS_GRP_SOURCES
:
1656 return xive_get_source(xive
, attr
->attr
, attr
->addr
);
1661 static int xive_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1663 /* We honor the same limits as XICS, at least for now */
1664 switch (attr
->group
) {
1665 case KVM_DEV_XICS_GRP_SOURCES
:
1666 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1667 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1674 static void kvmppc_xive_cleanup_irq(u32 hw_num
, struct xive_irq_data
*xd
)
1676 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_01
);
1677 xive_native_configure_irq(hw_num
, 0, MASKED
, 0);
1678 xive_cleanup_irq_data(xd
);
1681 static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block
*sb
)
1685 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1686 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[i
];
1691 kvmppc_xive_cleanup_irq(state
->ipi_number
, &state
->ipi_data
);
1692 xive_native_free_irq(state
->ipi_number
);
1694 /* Pass-through, cleanup too */
1695 if (state
->pt_number
)
1696 kvmppc_xive_cleanup_irq(state
->pt_number
, state
->pt_data
);
1698 state
->valid
= false;
1702 static void kvmppc_xive_free(struct kvm_device
*dev
)
1704 struct kvmppc_xive
*xive
= dev
->private;
1705 struct kvm
*kvm
= xive
->kvm
;
1708 debugfs_remove(xive
->dentry
);
1711 kvm
->arch
.xive
= NULL
;
1713 /* Mask and free interrupts */
1714 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1715 if (xive
->src_blocks
[i
])
1716 kvmppc_xive_free_sources(xive
->src_blocks
[i
]);
1717 kfree(xive
->src_blocks
[i
]);
1718 xive
->src_blocks
[i
] = NULL
;
1721 if (xive
->vp_base
!= XIVE_INVALID_VP
)
1722 xive_native_free_vp_block(xive
->vp_base
);
1729 static int kvmppc_xive_create(struct kvm_device
*dev
, u32 type
)
1731 struct kvmppc_xive
*xive
;
1732 struct kvm
*kvm
= dev
->kvm
;
1735 pr_devel("Creating xive for partition\n");
1737 xive
= kzalloc(sizeof(*xive
), GFP_KERNEL
);
1741 dev
->private = xive
;
1745 /* Already there ? */
1749 kvm
->arch
.xive
= xive
;
1751 /* We use the default queue size set by the host */
1752 xive
->q_order
= xive_native_default_eq_shift();
1753 if (xive
->q_order
< PAGE_SHIFT
)
1754 xive
->q_page_order
= 0;
1756 xive
->q_page_order
= xive
->q_order
- PAGE_SHIFT
;
1758 /* Allocate a bunch of VPs */
1759 xive
->vp_base
= xive_native_alloc_vp_block(KVM_MAX_VCPUS
);
1760 pr_devel("VP_Base=%x\n", xive
->vp_base
);
1762 if (xive
->vp_base
== XIVE_INVALID_VP
)
1774 static int xive_debug_show(struct seq_file
*m
, void *private)
1776 struct kvmppc_xive
*xive
= m
->private;
1777 struct kvm
*kvm
= xive
->kvm
;
1778 struct kvm_vcpu
*vcpu
;
1779 u64 t_rm_h_xirr
= 0;
1780 u64 t_rm_h_ipoll
= 0;
1781 u64 t_rm_h_cppr
= 0;
1784 u64 t_vm_h_xirr
= 0;
1785 u64 t_vm_h_ipoll
= 0;
1786 u64 t_vm_h_cppr
= 0;
1794 seq_printf(m
, "=========\nVCPU state\n=========\n");
1796 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1797 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1802 seq_printf(m
, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1803 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1804 xc
->server_num
, xc
->cppr
, xc
->hw_cppr
,
1805 xc
->mfrr
, xc
->pending
,
1806 xc
->stat_rm_h_xirr
, xc
->stat_vm_h_xirr
);
1808 t_rm_h_xirr
+= xc
->stat_rm_h_xirr
;
1809 t_rm_h_ipoll
+= xc
->stat_rm_h_ipoll
;
1810 t_rm_h_cppr
+= xc
->stat_rm_h_cppr
;
1811 t_rm_h_eoi
+= xc
->stat_rm_h_eoi
;
1812 t_rm_h_ipi
+= xc
->stat_rm_h_ipi
;
1813 t_vm_h_xirr
+= xc
->stat_vm_h_xirr
;
1814 t_vm_h_ipoll
+= xc
->stat_vm_h_ipoll
;
1815 t_vm_h_cppr
+= xc
->stat_vm_h_cppr
;
1816 t_vm_h_eoi
+= xc
->stat_vm_h_eoi
;
1817 t_vm_h_ipi
+= xc
->stat_vm_h_ipi
;
1820 seq_printf(m
, "Hcalls totals\n");
1821 seq_printf(m
, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr
, t_vm_h_xirr
);
1822 seq_printf(m
, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll
, t_vm_h_ipoll
);
1823 seq_printf(m
, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr
, t_vm_h_cppr
);
1824 seq_printf(m
, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi
, t_vm_h_eoi
);
1825 seq_printf(m
, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi
, t_vm_h_ipi
);
1830 static int xive_debug_open(struct inode
*inode
, struct file
*file
)
1832 return single_open(file
, xive_debug_show
, inode
->i_private
);
1835 static const struct file_operations xive_debug_fops
= {
1836 .open
= xive_debug_open
,
1838 .llseek
= seq_lseek
,
1839 .release
= single_release
,
1842 static void xive_debugfs_init(struct kvmppc_xive
*xive
)
1846 name
= kasprintf(GFP_KERNEL
, "kvm-xive-%p", xive
);
1848 pr_err("%s: no memory for name\n", __func__
);
1852 xive
->dentry
= debugfs_create_file(name
, S_IRUGO
, powerpc_debugfs_root
,
1853 xive
, &xive_debug_fops
);
1855 pr_debug("%s: created %s\n", __func__
, name
);
1859 static void kvmppc_xive_init(struct kvm_device
*dev
)
1861 struct kvmppc_xive
*xive
= (struct kvmppc_xive
*)dev
->private;
1863 /* Register some debug interfaces */
1864 xive_debugfs_init(xive
);
1867 struct kvm_device_ops kvm_xive_ops
= {
1869 .create
= kvmppc_xive_create
,
1870 .init
= kvmppc_xive_init
,
1871 .destroy
= kvmppc_xive_free
,
1872 .set_attr
= xive_set_attr
,
1873 .get_attr
= xive_get_attr
,
1874 .has_attr
= xive_has_attr
,
1877 void kvmppc_xive_init_module(void)
1879 __xive_vm_h_xirr
= xive_vm_h_xirr
;
1880 __xive_vm_h_ipoll
= xive_vm_h_ipoll
;
1881 __xive_vm_h_ipi
= xive_vm_h_ipi
;
1882 __xive_vm_h_cppr
= xive_vm_h_cppr
;
1883 __xive_vm_h_eoi
= xive_vm_h_eoi
;
1886 void kvmppc_xive_exit_module(void)
1888 __xive_vm_h_xirr
= NULL
;
1889 __xive_vm_h_ipoll
= NULL
;
1890 __xive_vm_h_ipi
= NULL
;
1891 __xive_vm_h_cppr
= NULL
;
1892 __xive_vm_h_eoi
= NULL
;