2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
17 #include <linux/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
22 #include <asm/debug.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
28 #include "book3s_xics.h"
31 #define XICS_DBG(fmt...) do { } while (0)
33 #define XICS_DBG(fmt...) trace_printk(fmt)
36 #define ENABLE_REALMODE true
37 #define DEBUG_REALMODE false
43 * Each ICS has a spin lock protecting the information about the IRQ
44 * sources and avoiding simultaneous deliveries of the same interrupt.
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
63 /* -- ICS routines -- */
65 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
66 u32 new_irq
, bool check_resend
);
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
73 static int ics_deliver_irq(struct kvmppc_xics
*xics
, u32 irq
, u32 level
)
75 struct ics_irq_state
*state
;
76 struct kvmppc_ics
*ics
;
80 XICS_DBG("ics deliver %#x (level: %d)\n", irq
, level
);
82 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
84 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq
);
87 state
= &ics
->irq_state
[src
];
91 if (level
== KVM_INTERRUPT_SET_LEVEL
|| level
== KVM_INTERRUPT_SET
)
93 else if (level
== KVM_INTERRUPT_UNSET
)
96 * Take other values the same as 1, consistent with original code.
100 if (!state
->lsi
&& level
== 0) /* noop for MSI */
104 pq_old
= state
->pq_state
;
107 if (pq_old
& PQ_PRESENTED
)
108 /* Setting already set LSI ... */
111 pq_new
= PQ_PRESENTED
;
115 pq_new
= ((pq_old
<< 1) & 3) | PQ_PRESENTED
;
116 } while (cmpxchg(&state
->pq_state
, pq_old
, pq_new
) != pq_old
);
118 /* Test P=1, Q=0, this is the only case where we present */
119 if (pq_new
== PQ_PRESENTED
)
120 icp_deliver_irq(xics
, NULL
, irq
, false);
122 /* Record which CPU this arrived on for passed-through interrupts */
124 state
->intr_cpu
= raw_smp_processor_id();
129 static void ics_check_resend(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
130 struct kvmppc_icp
*icp
)
134 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
135 struct ics_irq_state
*state
= &ics
->irq_state
[i
];
137 XICS_DBG("resend %#x prio %#x\n", state
->number
,
139 icp_deliver_irq(xics
, icp
, state
->number
, true);
144 static bool write_xive(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
145 struct ics_irq_state
*state
,
146 u32 server
, u32 priority
, u32 saved_priority
)
151 local_irq_save(flags
);
152 arch_spin_lock(&ics
->lock
);
154 state
->server
= server
;
155 state
->priority
= priority
;
156 state
->saved_priority
= saved_priority
;
158 if ((state
->masked_pending
|| state
->resend
) && priority
!= MASKED
) {
159 state
->masked_pending
= 0;
164 arch_spin_unlock(&ics
->lock
);
165 local_irq_restore(flags
);
170 int kvmppc_xics_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
, u32 priority
)
172 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
173 struct kvmppc_icp
*icp
;
174 struct kvmppc_ics
*ics
;
175 struct ics_irq_state
*state
;
181 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
184 state
= &ics
->irq_state
[src
];
186 icp
= kvmppc_xics_find_server(kvm
, server
);
190 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
191 irq
, server
, priority
,
192 state
->masked_pending
, state
->resend
);
194 if (write_xive(xics
, ics
, state
, server
, priority
, priority
))
195 icp_deliver_irq(xics
, icp
, irq
, false);
200 int kvmppc_xics_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
, u32
*priority
)
202 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
203 struct kvmppc_ics
*ics
;
204 struct ics_irq_state
*state
;
211 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
214 state
= &ics
->irq_state
[src
];
216 local_irq_save(flags
);
217 arch_spin_lock(&ics
->lock
);
218 *server
= state
->server
;
219 *priority
= state
->priority
;
220 arch_spin_unlock(&ics
->lock
);
221 local_irq_restore(flags
);
226 int kvmppc_xics_int_on(struct kvm
*kvm
, u32 irq
)
228 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
229 struct kvmppc_icp
*icp
;
230 struct kvmppc_ics
*ics
;
231 struct ics_irq_state
*state
;
237 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
240 state
= &ics
->irq_state
[src
];
242 icp
= kvmppc_xics_find_server(kvm
, state
->server
);
246 if (write_xive(xics
, ics
, state
, state
->server
, state
->saved_priority
,
247 state
->saved_priority
))
248 icp_deliver_irq(xics
, icp
, irq
, false);
253 int kvmppc_xics_int_off(struct kvm
*kvm
, u32 irq
)
255 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
256 struct kvmppc_ics
*ics
;
257 struct ics_irq_state
*state
;
263 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
266 state
= &ics
->irq_state
[src
];
268 write_xive(xics
, ics
, state
, state
->server
, MASKED
, state
->priority
);
273 /* -- ICP routines, including hcalls -- */
275 static inline bool icp_try_update(struct kvmppc_icp
*icp
,
276 union kvmppc_icp_state old
,
277 union kvmppc_icp_state
new,
282 /* Calculate new output value */
283 new.out_ee
= (new.xisr
&& (new.pending_pri
< new.cppr
));
285 /* Attempt atomic update */
286 success
= cmpxchg64(&icp
->state
.raw
, old
.raw
, new.raw
) == old
.raw
;
290 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
292 old
.cppr
, old
.mfrr
, old
.pending_pri
, old
.xisr
,
293 old
.need_resend
, old
.out_ee
);
294 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
295 new.cppr
, new.mfrr
, new.pending_pri
, new.xisr
,
296 new.need_resend
, new.out_ee
);
298 * Check for output state update
300 * Note that this is racy since another processor could be updating
301 * the state already. This is why we never clear the interrupt output
302 * here, we only ever set it. The clear only happens prior to doing
303 * an update and only by the processor itself. Currently we do it
304 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
306 * We also do not try to figure out whether the EE state has changed,
307 * we unconditionally set it if the new state calls for it. The reason
308 * for that is that we opportunistically remove the pending interrupt
309 * flag when raising CPPR, so we need to set it back here if an
310 * interrupt is still pending.
313 kvmppc_book3s_queue_irqprio(icp
->vcpu
,
314 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
316 kvmppc_fast_vcpu_kick(icp
->vcpu
);
322 static void icp_check_resend(struct kvmppc_xics
*xics
,
323 struct kvmppc_icp
*icp
)
327 /* Order this load with the test for need_resend in the caller */
329 for_each_set_bit(icsid
, icp
->resend_map
, xics
->max_icsid
+ 1) {
330 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
332 if (!test_and_clear_bit(icsid
, icp
->resend_map
))
336 ics_check_resend(xics
, ics
, icp
);
340 static bool icp_try_to_deliver(struct kvmppc_icp
*icp
, u32 irq
, u8 priority
,
343 union kvmppc_icp_state old_state
, new_state
;
346 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq
, priority
,
350 old_state
= new_state
= READ_ONCE(icp
->state
);
354 /* See if we can deliver */
355 success
= new_state
.cppr
> priority
&&
356 new_state
.mfrr
> priority
&&
357 new_state
.pending_pri
> priority
;
360 * If we can, check for a rejection and perform the
364 *reject
= new_state
.xisr
;
365 new_state
.xisr
= irq
;
366 new_state
.pending_pri
= priority
;
369 * If we failed to deliver we set need_resend
370 * so a subsequent CPPR state change causes us
371 * to try a new delivery.
373 new_state
.need_resend
= true;
376 } while (!icp_try_update(icp
, old_state
, new_state
, false));
381 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
382 u32 new_irq
, bool check_resend
)
384 struct ics_irq_state
*state
;
385 struct kvmppc_ics
*ics
;
391 * This is used both for initial delivery of an interrupt and
392 * for subsequent rejection.
394 * Rejection can be racy vs. resends. We have evaluated the
395 * rejection in an atomic ICP transaction which is now complete,
396 * so potentially the ICP can already accept the interrupt again.
398 * So we need to retry the delivery. Essentially the reject path
399 * boils down to a failed delivery. Always.
401 * Now the interrupt could also have moved to a different target,
402 * thus we may need to re-do the ICP lookup as well
406 /* Get the ICS state and lock it */
407 ics
= kvmppc_xics_find_ics(xics
, new_irq
, &src
);
409 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq
);
412 state
= &ics
->irq_state
[src
];
414 /* Get a lock on the ICS */
415 local_irq_save(flags
);
416 arch_spin_lock(&ics
->lock
);
419 if (!icp
|| state
->server
!= icp
->server_num
) {
420 icp
= kvmppc_xics_find_server(xics
->kvm
, state
->server
);
422 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
423 new_irq
, state
->server
);
432 /* Clear the resend bit of that interrupt */
436 * If masked, bail out
438 * Note: PAPR doesn't mention anything about masked pending
439 * when doing a resend, only when doing a delivery.
441 * However that would have the effect of losing a masked
442 * interrupt that was rejected and isn't consistent with
443 * the whole masked_pending business which is about not
444 * losing interrupts that occur while masked.
446 * I don't differentiate normal deliveries and resends, this
447 * implementation will differ from PAPR and not lose such
450 if (state
->priority
== MASKED
) {
451 XICS_DBG("irq %#x masked pending\n", new_irq
);
452 state
->masked_pending
= 1;
457 * Try the delivery, this will set the need_resend flag
458 * in the ICP as part of the atomic transaction if the
459 * delivery is not possible.
461 * Note that if successful, the new delivery might have itself
462 * rejected an interrupt that was "delivered" before we took the
465 * In this case we do the whole sequence all over again for the
466 * new guy. We cannot assume that the rejected interrupt is less
467 * favored than the new one, and thus doesn't need to be delivered,
468 * because by the time we exit icp_try_to_deliver() the target
469 * processor may well have alrady consumed & completed it, and thus
470 * the rejected interrupt might actually be already acceptable.
472 if (icp_try_to_deliver(icp
, new_irq
, state
->priority
, &reject
)) {
474 * Delivery was successful, did we reject somebody else ?
476 if (reject
&& reject
!= XICS_IPI
) {
477 arch_spin_unlock(&ics
->lock
);
478 local_irq_restore(flags
);
485 * We failed to deliver the interrupt we need to set the
486 * resend map bit and mark the ICS state as needing a resend
491 * Make sure when checking resend, we don't miss the resend
492 * if resend_map bit is seen and cleared.
495 set_bit(ics
->icsid
, icp
->resend_map
);
498 * If the need_resend flag got cleared in the ICP some time
499 * between icp_try_to_deliver() atomic update and now, then
500 * we know it might have missed the resend_map bit. So we
504 if (!icp
->state
.need_resend
) {
506 arch_spin_unlock(&ics
->lock
);
507 local_irq_restore(flags
);
513 arch_spin_unlock(&ics
->lock
);
514 local_irq_restore(flags
);
517 static void icp_down_cppr(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
520 union kvmppc_icp_state old_state
, new_state
;
524 * This handles several related states in one operation:
526 * ICP State: Down_CPPR
528 * Load CPPR with new value and if the XISR is 0
529 * then check for resends:
533 * If MFRR is more favored than CPPR, check for IPIs
534 * and notify ICS of a potential resend. This is done
535 * asynchronously (when used in real mode, we will have
538 * We do not handle the complete Check_IPI as documented
539 * here. In the PAPR, this state will be used for both
540 * Set_MFRR and Down_CPPR. However, we know that we aren't
541 * changing the MFRR state here so we don't need to handle
542 * the case of an MFRR causing a reject of a pending irq,
543 * this will have been handled when the MFRR was set in the
546 * Thus we don't have to handle rejects, only resends.
548 * When implementing real mode for HV KVM, resend will lead to
549 * a H_TOO_HARD return and the whole transaction will be handled
553 old_state
= new_state
= READ_ONCE(icp
->state
);
556 new_state
.cppr
= new_cppr
;
559 * Cut down Resend / Check_IPI / IPI
561 * The logic is that we cannot have a pending interrupt
562 * trumped by an IPI at this point (see above), so we
563 * know that either the pending interrupt is already an
564 * IPI (in which case we don't care to override it) or
565 * it's either more favored than us or non existent
567 if (new_state
.mfrr
< new_cppr
&&
568 new_state
.mfrr
<= new_state
.pending_pri
) {
569 WARN_ON(new_state
.xisr
!= XICS_IPI
&&
570 new_state
.xisr
!= 0);
571 new_state
.pending_pri
= new_state
.mfrr
;
572 new_state
.xisr
= XICS_IPI
;
575 /* Latch/clear resend bit */
576 resend
= new_state
.need_resend
;
577 new_state
.need_resend
= 0;
579 } while (!icp_try_update(icp
, old_state
, new_state
, true));
582 * Now handle resend checks. Those are asynchronous to the ICP
583 * state update in HW (ie bus transactions) so we can handle them
584 * separately here too
587 icp_check_resend(xics
, icp
);
590 static noinline
unsigned long kvmppc_h_xirr(struct kvm_vcpu
*vcpu
)
592 union kvmppc_icp_state old_state
, new_state
;
593 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
596 /* First, remove EE from the processor */
597 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
598 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
601 * ICP State: Accept_Interrupt
603 * Return the pending interrupt (if any) along with the
604 * current CPPR, then clear the XISR & set CPPR to the
608 old_state
= new_state
= READ_ONCE(icp
->state
);
610 xirr
= old_state
.xisr
| (((u32
)old_state
.cppr
) << 24);
613 new_state
.cppr
= new_state
.pending_pri
;
614 new_state
.pending_pri
= 0xff;
617 } while (!icp_try_update(icp
, old_state
, new_state
, true));
619 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu
->vcpu_id
, xirr
);
624 static noinline
int kvmppc_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
627 union kvmppc_icp_state old_state
, new_state
;
628 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
629 struct kvmppc_icp
*icp
;
634 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
635 vcpu
->vcpu_id
, server
, mfrr
);
637 icp
= vcpu
->arch
.icp
;
638 local
= icp
->server_num
== server
;
640 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
646 * ICP state: Set_MFRR
648 * If the CPPR is more favored than the new MFRR, then
649 * nothing needs to be rejected as there can be no XISR to
650 * reject. If the MFRR is being made less favored then
651 * there might be a previously-rejected interrupt needing
654 * ICP state: Check_IPI
656 * If the CPPR is less favored, then we might be replacing
657 * an interrupt, and thus need to possibly reject it.
661 * Besides rejecting any pending interrupts, we also
662 * update XISR and pending_pri to mark IPI as pending.
664 * PAPR does not describe this state, but if the MFRR is being
665 * made less favored than its earlier value, there might be
666 * a previously-rejected interrupt needing to be resent.
667 * Ideally, we would want to resend only if
668 * prio(pending_interrupt) < mfrr &&
669 * prio(pending_interrupt) < cppr
670 * where pending interrupt is the one that was rejected. But
671 * we don't have that state, so we simply trigger a resend
672 * whenever the MFRR is made less favored.
675 old_state
= new_state
= READ_ONCE(icp
->state
);
678 new_state
.mfrr
= mfrr
;
683 if (mfrr
< new_state
.cppr
) {
684 /* Reject a pending interrupt if not an IPI */
685 if (mfrr
<= new_state
.pending_pri
) {
686 reject
= new_state
.xisr
;
687 new_state
.pending_pri
= mfrr
;
688 new_state
.xisr
= XICS_IPI
;
692 if (mfrr
> old_state
.mfrr
) {
693 resend
= new_state
.need_resend
;
694 new_state
.need_resend
= 0;
696 } while (!icp_try_update(icp
, old_state
, new_state
, local
));
699 if (reject
&& reject
!= XICS_IPI
)
700 icp_deliver_irq(xics
, icp
, reject
, false);
704 icp_check_resend(xics
, icp
);
709 static int kvmppc_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
)
711 union kvmppc_icp_state state
;
712 struct kvmppc_icp
*icp
;
714 icp
= vcpu
->arch
.icp
;
715 if (icp
->server_num
!= server
) {
716 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
720 state
= READ_ONCE(icp
->state
);
721 kvmppc_set_gpr(vcpu
, 4, ((u32
)state
.cppr
<< 24) | state
.xisr
);
722 kvmppc_set_gpr(vcpu
, 5, state
.mfrr
);
726 static noinline
void kvmppc_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
728 union kvmppc_icp_state old_state
, new_state
;
729 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
730 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
733 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu
->vcpu_id
, cppr
);
736 * ICP State: Set_CPPR
738 * We can safely compare the new value with the current
739 * value outside of the transaction as the CPPR is only
740 * ever changed by the processor on itself
742 if (cppr
> icp
->state
.cppr
)
743 icp_down_cppr(xics
, icp
, cppr
);
744 else if (cppr
== icp
->state
.cppr
)
750 * The processor is raising its priority, this can result
751 * in a rejection of a pending interrupt:
753 * ICP State: Reject_Current
755 * We can remove EE from the current processor, the update
756 * transaction will set it again if needed
758 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
759 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
762 old_state
= new_state
= READ_ONCE(icp
->state
);
765 new_state
.cppr
= cppr
;
767 if (cppr
<= new_state
.pending_pri
) {
768 reject
= new_state
.xisr
;
770 new_state
.pending_pri
= 0xff;
773 } while (!icp_try_update(icp
, old_state
, new_state
, true));
776 * Check for rejects. They are handled by doing a new delivery
777 * attempt (see comments in icp_deliver_irq).
779 if (reject
&& reject
!= XICS_IPI
)
780 icp_deliver_irq(xics
, icp
, reject
, false);
783 static int ics_eoi(struct kvm_vcpu
*vcpu
, u32 irq
)
785 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
786 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
787 struct kvmppc_ics
*ics
;
788 struct ics_irq_state
*state
;
793 * ICS EOI handling: For LSI, if P bit is still set, we need to
796 * For MSI, we move Q bit into P (and clear Q). If it is set,
800 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
802 XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq
);
805 state
= &ics
->irq_state
[src
];
808 pq_new
= state
->pq_state
;
811 pq_old
= state
->pq_state
;
812 pq_new
= pq_old
>> 1;
813 } while (cmpxchg(&state
->pq_state
, pq_old
, pq_new
) != pq_old
);
815 if (pq_new
& PQ_PRESENTED
)
816 icp_deliver_irq(xics
, icp
, irq
, false);
818 kvm_notify_acked_irq(vcpu
->kvm
, 0, irq
);
823 static noinline
int kvmppc_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
825 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
826 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
827 u32 irq
= xirr
& 0x00ffffff;
829 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu
->vcpu_id
, xirr
);
834 * Note: If EOI is incorrectly used by SW to lower the CPPR
835 * value (ie more favored), we do not check for rejection of
836 * a pending interrupt, this is a SW error and PAPR sepcifies
837 * that we don't have to deal with it.
839 * The sending of an EOI to the ICS is handled after the
842 * ICP State: Down_CPPR which we handle
843 * in a separate function as it's shared with H_CPPR.
845 icp_down_cppr(xics
, icp
, xirr
>> 24);
847 /* IPIs have no EOI */
851 return ics_eoi(vcpu
, irq
);
854 int kvmppc_xics_rm_complete(struct kvm_vcpu
*vcpu
, u32 hcall
)
856 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
857 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
859 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
860 hcall
, icp
->rm_action
, icp
->rm_dbgstate
.raw
, icp
->rm_dbgtgt
);
862 if (icp
->rm_action
& XICS_RM_KICK_VCPU
) {
863 icp
->n_rm_kick_vcpu
++;
864 kvmppc_fast_vcpu_kick(icp
->rm_kick_target
);
866 if (icp
->rm_action
& XICS_RM_CHECK_RESEND
) {
867 icp
->n_rm_check_resend
++;
868 icp_check_resend(xics
, icp
->rm_resend_icp
);
870 if (icp
->rm_action
& XICS_RM_NOTIFY_EOI
) {
871 icp
->n_rm_notify_eoi
++;
872 kvm_notify_acked_irq(vcpu
->kvm
, 0, icp
->rm_eoied_irq
);
879 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete
);
881 int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 req
)
883 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
887 /* Check if we have an ICP */
888 if (!xics
|| !vcpu
->arch
.icp
)
891 /* These requests don't have real-mode implementations at present */
894 res
= kvmppc_h_xirr(vcpu
);
895 kvmppc_set_gpr(vcpu
, 4, res
);
896 kvmppc_set_gpr(vcpu
, 5, get_tb());
899 rc
= kvmppc_h_ipoll(vcpu
, kvmppc_get_gpr(vcpu
, 4));
903 /* Check for real mode returning too hard */
904 if (xics
->real_mode
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
905 return kvmppc_xics_rm_complete(vcpu
, req
);
909 res
= kvmppc_h_xirr(vcpu
);
910 kvmppc_set_gpr(vcpu
, 4, res
);
913 kvmppc_h_cppr(vcpu
, kvmppc_get_gpr(vcpu
, 4));
916 rc
= kvmppc_h_eoi(vcpu
, kvmppc_get_gpr(vcpu
, 4));
919 rc
= kvmppc_h_ipi(vcpu
, kvmppc_get_gpr(vcpu
, 4),
920 kvmppc_get_gpr(vcpu
, 5));
926 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall
);
929 /* -- Initialisation code etc. -- */
931 static void xics_debugfs_irqmap(struct seq_file
*m
,
932 struct kvmppc_passthru_irqmap
*pimap
)
938 seq_printf(m
, "========\nPIRQ mappings: %d maps\n===========\n",
940 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
941 seq_printf(m
, "r_hwirq=%x, v_hwirq=%x\n",
942 pimap
->mapped
[i
].r_hwirq
, pimap
->mapped
[i
].v_hwirq
);
946 static int xics_debug_show(struct seq_file
*m
, void *private)
948 struct kvmppc_xics
*xics
= m
->private;
949 struct kvm
*kvm
= xics
->kvm
;
950 struct kvm_vcpu
*vcpu
;
953 unsigned long t_rm_kick_vcpu
, t_rm_check_resend
;
954 unsigned long t_rm_notify_eoi
;
955 unsigned long t_reject
, t_check_resend
;
962 t_rm_check_resend
= 0;
966 xics_debugfs_irqmap(m
, kvm
->arch
.pimap
);
968 seq_printf(m
, "=========\nICP state\n=========\n");
970 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
971 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
972 union kvmppc_icp_state state
;
977 state
.raw
= READ_ONCE(icp
->state
.raw
);
978 seq_printf(m
, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
979 icp
->server_num
, state
.xisr
,
980 state
.pending_pri
, state
.cppr
, state
.mfrr
,
981 state
.out_ee
, state
.need_resend
);
982 t_rm_kick_vcpu
+= icp
->n_rm_kick_vcpu
;
983 t_rm_notify_eoi
+= icp
->n_rm_notify_eoi
;
984 t_rm_check_resend
+= icp
->n_rm_check_resend
;
985 t_check_resend
+= icp
->n_check_resend
;
986 t_reject
+= icp
->n_reject
;
989 seq_printf(m
, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
990 t_rm_kick_vcpu
, t_rm_check_resend
,
992 seq_printf(m
, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
993 t_check_resend
, t_reject
);
994 for (icsid
= 0; icsid
<= KVMPPC_XICS_MAX_ICS_ID
; icsid
++) {
995 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
1000 seq_printf(m
, "=========\nICS state for ICS 0x%x\n=========\n",
1003 local_irq_save(flags
);
1004 arch_spin_lock(&ics
->lock
);
1006 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1007 struct ics_irq_state
*irq
= &ics
->irq_state
[i
];
1009 seq_printf(m
, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1010 irq
->number
, irq
->server
, irq
->priority
,
1011 irq
->saved_priority
, irq
->pq_state
,
1012 irq
->resend
, irq
->masked_pending
);
1015 arch_spin_unlock(&ics
->lock
);
1016 local_irq_restore(flags
);
1021 static int xics_debug_open(struct inode
*inode
, struct file
*file
)
1023 return single_open(file
, xics_debug_show
, inode
->i_private
);
1026 static const struct file_operations xics_debug_fops
= {
1027 .open
= xics_debug_open
,
1029 .llseek
= seq_lseek
,
1030 .release
= single_release
,
1033 static void xics_debugfs_init(struct kvmppc_xics
*xics
)
1037 name
= kasprintf(GFP_KERNEL
, "kvm-xics-%p", xics
);
1039 pr_err("%s: no memory for name\n", __func__
);
1043 xics
->dentry
= debugfs_create_file(name
, S_IRUGO
, powerpc_debugfs_root
,
1044 xics
, &xics_debug_fops
);
1046 pr_debug("%s: created %s\n", __func__
, name
);
1050 static struct kvmppc_ics
*kvmppc_xics_create_ics(struct kvm
*kvm
,
1051 struct kvmppc_xics
*xics
, int irq
)
1053 struct kvmppc_ics
*ics
;
1056 icsid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1058 mutex_lock(&kvm
->lock
);
1060 /* ICS already exists - somebody else got here first */
1061 if (xics
->ics
[icsid
])
1064 /* Create the ICS */
1065 ics
= kzalloc(sizeof(struct kvmppc_ics
), GFP_KERNEL
);
1071 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1072 ics
->irq_state
[i
].number
= (icsid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1073 ics
->irq_state
[i
].priority
= MASKED
;
1074 ics
->irq_state
[i
].saved_priority
= MASKED
;
1077 xics
->ics
[icsid
] = ics
;
1079 if (icsid
> xics
->max_icsid
)
1080 xics
->max_icsid
= icsid
;
1083 mutex_unlock(&kvm
->lock
);
1084 return xics
->ics
[icsid
];
1087 int kvmppc_xics_create_icp(struct kvm_vcpu
*vcpu
, unsigned long server_num
)
1089 struct kvmppc_icp
*icp
;
1091 if (!vcpu
->kvm
->arch
.xics
)
1094 if (kvmppc_xics_find_server(vcpu
->kvm
, server_num
))
1097 icp
= kzalloc(sizeof(struct kvmppc_icp
), GFP_KERNEL
);
1102 icp
->server_num
= server_num
;
1103 icp
->state
.mfrr
= MASKED
;
1104 icp
->state
.pending_pri
= MASKED
;
1105 vcpu
->arch
.icp
= icp
;
1107 XICS_DBG("created server for vcpu %d\n", vcpu
->vcpu_id
);
1112 u64
kvmppc_xics_get_icp(struct kvm_vcpu
*vcpu
)
1114 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1115 union kvmppc_icp_state state
;
1120 return ((u64
)state
.cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
) |
1121 ((u64
)state
.xisr
<< KVM_REG_PPC_ICP_XISR_SHIFT
) |
1122 ((u64
)state
.mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
) |
1123 ((u64
)state
.pending_pri
<< KVM_REG_PPC_ICP_PPRI_SHIFT
);
1126 int kvmppc_xics_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
1128 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1129 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
1130 union kvmppc_icp_state old_state
, new_state
;
1131 struct kvmppc_ics
*ics
;
1132 u8 cppr
, mfrr
, pending_pri
;
1140 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
1141 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
1142 KVM_REG_PPC_ICP_XISR_MASK
;
1143 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
1144 pending_pri
= icpval
>> KVM_REG_PPC_ICP_PPRI_SHIFT
;
1146 /* Require the new state to be internally consistent */
1148 if (pending_pri
!= 0xff)
1150 } else if (xisr
== XICS_IPI
) {
1151 if (pending_pri
!= mfrr
|| pending_pri
>= cppr
)
1154 if (pending_pri
>= mfrr
|| pending_pri
>= cppr
)
1156 ics
= kvmppc_xics_find_ics(xics
, xisr
, &src
);
1162 new_state
.cppr
= cppr
;
1163 new_state
.xisr
= xisr
;
1164 new_state
.mfrr
= mfrr
;
1165 new_state
.pending_pri
= pending_pri
;
1168 * Deassert the CPU interrupt request.
1169 * icp_try_update will reassert it if necessary.
1171 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
1172 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
1175 * Note that if we displace an interrupt from old_state.xisr,
1176 * we don't mark it as rejected. We expect userspace to set
1177 * the state of the interrupt sources to be consistent with
1178 * the ICP states (either before or afterwards, which doesn't
1179 * matter). We do handle resends due to CPPR becoming less
1180 * favoured because that is necessary to end up with a
1181 * consistent state in the situation where userspace restores
1182 * the ICS states before the ICP states.
1185 old_state
= READ_ONCE(icp
->state
);
1187 if (new_state
.mfrr
<= old_state
.mfrr
) {
1189 new_state
.need_resend
= old_state
.need_resend
;
1191 resend
= old_state
.need_resend
;
1192 new_state
.need_resend
= 0;
1194 } while (!icp_try_update(icp
, old_state
, new_state
, false));
1197 icp_check_resend(xics
, icp
);
1202 static int xics_get_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1205 struct kvmppc_ics
*ics
;
1206 struct ics_irq_state
*irqp
;
1207 u64 __user
*ubufp
= (u64 __user
*) addr
;
1210 unsigned long flags
;
1212 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1216 irqp
= &ics
->irq_state
[idx
];
1217 local_irq_save(flags
);
1218 arch_spin_lock(&ics
->lock
);
1222 prio
= irqp
->priority
;
1223 if (prio
== MASKED
) {
1224 val
|= KVM_XICS_MASKED
;
1225 prio
= irqp
->saved_priority
;
1227 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1229 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1230 if (irqp
->pq_state
& PQ_PRESENTED
)
1231 val
|= KVM_XICS_PENDING
;
1232 } else if (irqp
->masked_pending
|| irqp
->resend
)
1233 val
|= KVM_XICS_PENDING
;
1235 if (irqp
->pq_state
& PQ_PRESENTED
)
1236 val
|= KVM_XICS_PRESENTED
;
1238 if (irqp
->pq_state
& PQ_QUEUED
)
1239 val
|= KVM_XICS_QUEUED
;
1243 arch_spin_unlock(&ics
->lock
);
1244 local_irq_restore(flags
);
1246 if (!ret
&& put_user(val
, ubufp
))
1252 static int xics_set_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1254 struct kvmppc_ics
*ics
;
1255 struct ics_irq_state
*irqp
;
1256 u64 __user
*ubufp
= (u64 __user
*) addr
;
1261 unsigned long flags
;
1263 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1266 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1268 ics
= kvmppc_xics_create_ics(xics
->kvm
, xics
, irq
);
1272 irqp
= &ics
->irq_state
[idx
];
1273 if (get_user(val
, ubufp
))
1276 server
= val
& KVM_XICS_DESTINATION_MASK
;
1277 prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1278 if (prio
!= MASKED
&&
1279 kvmppc_xics_find_server(xics
->kvm
, server
) == NULL
)
1282 local_irq_save(flags
);
1283 arch_spin_lock(&ics
->lock
);
1284 irqp
->server
= server
;
1285 irqp
->saved_priority
= prio
;
1286 if (val
& KVM_XICS_MASKED
)
1288 irqp
->priority
= prio
;
1290 irqp
->masked_pending
= 0;
1293 if (val
& KVM_XICS_LEVEL_SENSITIVE
)
1295 /* If PENDING, set P in case P is not saved because of old code */
1296 if (val
& KVM_XICS_PRESENTED
|| val
& KVM_XICS_PENDING
)
1297 irqp
->pq_state
|= PQ_PRESENTED
;
1298 if (val
& KVM_XICS_QUEUED
)
1299 irqp
->pq_state
|= PQ_QUEUED
;
1301 arch_spin_unlock(&ics
->lock
);
1302 local_irq_restore(flags
);
1304 if (val
& KVM_XICS_PENDING
)
1305 icp_deliver_irq(xics
, NULL
, irqp
->number
, false);
1310 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1313 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1317 return ics_deliver_irq(xics
, irq
, level
);
1320 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*irq_entry
,
1321 struct kvm
*kvm
, int irq_source_id
,
1322 int level
, bool line_status
)
1324 return kvm_set_irq(kvm
, irq_source_id
, irq_entry
->gsi
,
1325 level
, line_status
);
1328 static int xics_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1330 struct kvmppc_xics
*xics
= dev
->private;
1332 switch (attr
->group
) {
1333 case KVM_DEV_XICS_GRP_SOURCES
:
1334 return xics_set_source(xics
, attr
->attr
, attr
->addr
);
1339 static int xics_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1341 struct kvmppc_xics
*xics
= dev
->private;
1343 switch (attr
->group
) {
1344 case KVM_DEV_XICS_GRP_SOURCES
:
1345 return xics_get_source(xics
, attr
->attr
, attr
->addr
);
1350 static int xics_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1352 switch (attr
->group
) {
1353 case KVM_DEV_XICS_GRP_SOURCES
:
1354 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1355 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1362 static void kvmppc_xics_free(struct kvm_device
*dev
)
1364 struct kvmppc_xics
*xics
= dev
->private;
1366 struct kvm
*kvm
= xics
->kvm
;
1368 debugfs_remove(xics
->dentry
);
1371 kvm
->arch
.xics
= NULL
;
1373 for (i
= 0; i
<= xics
->max_icsid
; i
++)
1374 kfree(xics
->ics
[i
]);
1379 static int kvmppc_xics_create(struct kvm_device
*dev
, u32 type
)
1381 struct kvmppc_xics
*xics
;
1382 struct kvm
*kvm
= dev
->kvm
;
1385 xics
= kzalloc(sizeof(*xics
), GFP_KERNEL
);
1389 dev
->private = xics
;
1393 /* Already there ? */
1397 kvm
->arch
.xics
= xics
;
1404 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1405 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
1406 /* Enable real mode support */
1407 xics
->real_mode
= ENABLE_REALMODE
;
1408 xics
->real_mode_dbg
= DEBUG_REALMODE
;
1410 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1415 static void kvmppc_xics_init(struct kvm_device
*dev
)
1417 struct kvmppc_xics
*xics
= (struct kvmppc_xics
*)dev
->private;
1419 xics_debugfs_init(xics
);
1422 struct kvm_device_ops kvm_xics_ops
= {
1424 .create
= kvmppc_xics_create
,
1425 .init
= kvmppc_xics_init
,
1426 .destroy
= kvmppc_xics_free
,
1427 .set_attr
= xics_set_attr
,
1428 .get_attr
= xics_get_attr
,
1429 .has_attr
= xics_has_attr
,
1432 int kvmppc_xics_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1435 struct kvmppc_xics
*xics
= dev
->private;
1438 if (dev
->ops
!= &kvm_xics_ops
)
1440 if (xics
->kvm
!= vcpu
->kvm
)
1442 if (vcpu
->arch
.irq_type
)
1445 r
= kvmppc_xics_create_icp(vcpu
, xcpu
);
1447 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1452 void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
)
1454 if (!vcpu
->arch
.icp
)
1456 kfree(vcpu
->arch
.icp
);
1457 vcpu
->arch
.icp
= NULL
;
1458 vcpu
->arch
.irq_type
= KVMPPC_IRQ_DEFAULT
;
1461 static int xics_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1462 struct kvm
*kvm
, int irq_source_id
, int level
,
1465 return kvm_set_irq(kvm
, irq_source_id
, e
->gsi
, level
, line_status
);
1468 int kvm_irq_map_gsi(struct kvm
*kvm
,
1469 struct kvm_kernel_irq_routing_entry
*entries
, int gsi
)
1472 entries
->type
= KVM_IRQ_ROUTING_IRQCHIP
;
1473 entries
->set
= xics_set_irq
;
1474 entries
->irqchip
.irqchip
= 0;
1475 entries
->irqchip
.pin
= gsi
;
1479 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
1484 void kvmppc_xics_set_mapped(struct kvm
*kvm
, unsigned long irq
,
1485 unsigned long host_irq
)
1487 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1488 struct kvmppc_ics
*ics
;
1491 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1495 ics
->irq_state
[idx
].host_irq
= host_irq
;
1496 ics
->irq_state
[idx
].intr_cpu
= -1;
1498 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped
);
1500 void kvmppc_xics_clr_mapped(struct kvm
*kvm
, unsigned long irq
,
1501 unsigned long host_irq
)
1503 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1504 struct kvmppc_ics
*ics
;
1507 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1511 ics
->irq_state
[idx
].host_irq
= 0;
1513 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped
);