2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
17 #include <linux/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
22 #include <asm/debug.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
28 #include "book3s_xics.h"
31 #define XICS_DBG(fmt...) do { } while (0)
33 #define XICS_DBG(fmt...) trace_printk(fmt)
36 #define ENABLE_REALMODE true
37 #define DEBUG_REALMODE false
43 * Each ICS has a spin lock protecting the information about the IRQ
44 * sources and avoiding simultaneous deliveries of the same interrupt.
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
63 /* -- ICS routines -- */
65 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
73 static int ics_deliver_irq(struct kvmppc_xics
*xics
, u32 irq
, u32 level
)
75 struct ics_irq_state
*state
;
76 struct kvmppc_ics
*ics
;
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq
, level
);
81 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq
);
86 state
= &ics
->irq_state
[src
];
91 * We set state->asserted locklessly. This should be fine as
92 * we are the only setter, thus concurrent access is undefined
95 if ((level
== 1 && state
->lsi
) || level
== KVM_INTERRUPT_SET_LEVEL
)
97 else if (level
== 0 || level
== KVM_INTERRUPT_UNSET
) {
102 /* Record which CPU this arrived on for passed-through interrupts */
104 state
->intr_cpu
= raw_smp_processor_id();
106 /* Attempt delivery */
107 icp_deliver_irq(xics
, NULL
, irq
);
112 static void ics_check_resend(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
113 struct kvmppc_icp
*icp
)
119 local_irq_save(flags
);
120 arch_spin_lock(&ics
->lock
);
122 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
123 struct ics_irq_state
*state
= &ics
->irq_state
[i
];
128 XICS_DBG("resend %#x prio %#x\n", state
->number
,
131 arch_spin_unlock(&ics
->lock
);
132 local_irq_restore(flags
);
133 icp_deliver_irq(xics
, icp
, state
->number
);
134 local_irq_save(flags
);
135 arch_spin_lock(&ics
->lock
);
138 arch_spin_unlock(&ics
->lock
);
139 local_irq_restore(flags
);
142 static bool write_xive(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
143 struct ics_irq_state
*state
,
144 u32 server
, u32 priority
, u32 saved_priority
)
149 local_irq_save(flags
);
150 arch_spin_lock(&ics
->lock
);
152 state
->server
= server
;
153 state
->priority
= priority
;
154 state
->saved_priority
= saved_priority
;
156 if ((state
->masked_pending
|| state
->resend
) && priority
!= MASKED
) {
157 state
->masked_pending
= 0;
161 arch_spin_unlock(&ics
->lock
);
162 local_irq_restore(flags
);
167 int kvmppc_xics_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
, u32 priority
)
169 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
170 struct kvmppc_icp
*icp
;
171 struct kvmppc_ics
*ics
;
172 struct ics_irq_state
*state
;
178 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
181 state
= &ics
->irq_state
[src
];
183 icp
= kvmppc_xics_find_server(kvm
, server
);
187 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
188 irq
, server
, priority
,
189 state
->masked_pending
, state
->resend
);
191 if (write_xive(xics
, ics
, state
, server
, priority
, priority
))
192 icp_deliver_irq(xics
, icp
, irq
);
197 int kvmppc_xics_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
, u32
*priority
)
199 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
200 struct kvmppc_ics
*ics
;
201 struct ics_irq_state
*state
;
208 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
211 state
= &ics
->irq_state
[src
];
213 local_irq_save(flags
);
214 arch_spin_lock(&ics
->lock
);
215 *server
= state
->server
;
216 *priority
= state
->priority
;
217 arch_spin_unlock(&ics
->lock
);
218 local_irq_restore(flags
);
223 int kvmppc_xics_int_on(struct kvm
*kvm
, u32 irq
)
225 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
226 struct kvmppc_icp
*icp
;
227 struct kvmppc_ics
*ics
;
228 struct ics_irq_state
*state
;
234 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
237 state
= &ics
->irq_state
[src
];
239 icp
= kvmppc_xics_find_server(kvm
, state
->server
);
243 if (write_xive(xics
, ics
, state
, state
->server
, state
->saved_priority
,
244 state
->saved_priority
))
245 icp_deliver_irq(xics
, icp
, irq
);
250 int kvmppc_xics_int_off(struct kvm
*kvm
, u32 irq
)
252 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
253 struct kvmppc_ics
*ics
;
254 struct ics_irq_state
*state
;
260 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
263 state
= &ics
->irq_state
[src
];
265 write_xive(xics
, ics
, state
, state
->server
, MASKED
, state
->priority
);
270 /* -- ICP routines, including hcalls -- */
272 static inline bool icp_try_update(struct kvmppc_icp
*icp
,
273 union kvmppc_icp_state old
,
274 union kvmppc_icp_state
new,
279 /* Calculate new output value */
280 new.out_ee
= (new.xisr
&& (new.pending_pri
< new.cppr
));
282 /* Attempt atomic update */
283 success
= cmpxchg64(&icp
->state
.raw
, old
.raw
, new.raw
) == old
.raw
;
287 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
289 old
.cppr
, old
.mfrr
, old
.pending_pri
, old
.xisr
,
290 old
.need_resend
, old
.out_ee
);
291 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
292 new.cppr
, new.mfrr
, new.pending_pri
, new.xisr
,
293 new.need_resend
, new.out_ee
);
295 * Check for output state update
297 * Note that this is racy since another processor could be updating
298 * the state already. This is why we never clear the interrupt output
299 * here, we only ever set it. The clear only happens prior to doing
300 * an update and only by the processor itself. Currently we do it
301 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
303 * We also do not try to figure out whether the EE state has changed,
304 * we unconditionally set it if the new state calls for it. The reason
305 * for that is that we opportunistically remove the pending interrupt
306 * flag when raising CPPR, so we need to set it back here if an
307 * interrupt is still pending.
310 kvmppc_book3s_queue_irqprio(icp
->vcpu
,
311 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
313 kvmppc_fast_vcpu_kick(icp
->vcpu
);
319 static void icp_check_resend(struct kvmppc_xics
*xics
,
320 struct kvmppc_icp
*icp
)
324 /* Order this load with the test for need_resend in the caller */
326 for_each_set_bit(icsid
, icp
->resend_map
, xics
->max_icsid
+ 1) {
327 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
329 if (!test_and_clear_bit(icsid
, icp
->resend_map
))
333 ics_check_resend(xics
, ics
, icp
);
337 static bool icp_try_to_deliver(struct kvmppc_icp
*icp
, u32 irq
, u8 priority
,
340 union kvmppc_icp_state old_state
, new_state
;
343 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq
, priority
,
347 old_state
= new_state
= READ_ONCE(icp
->state
);
351 /* See if we can deliver */
352 success
= new_state
.cppr
> priority
&&
353 new_state
.mfrr
> priority
&&
354 new_state
.pending_pri
> priority
;
357 * If we can, check for a rejection and perform the
361 *reject
= new_state
.xisr
;
362 new_state
.xisr
= irq
;
363 new_state
.pending_pri
= priority
;
366 * If we failed to deliver we set need_resend
367 * so a subsequent CPPR state change causes us
368 * to try a new delivery.
370 new_state
.need_resend
= true;
373 } while (!icp_try_update(icp
, old_state
, new_state
, false));
378 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
381 struct ics_irq_state
*state
;
382 struct kvmppc_ics
*ics
;
388 * This is used both for initial delivery of an interrupt and
389 * for subsequent rejection.
391 * Rejection can be racy vs. resends. We have evaluated the
392 * rejection in an atomic ICP transaction which is now complete,
393 * so potentially the ICP can already accept the interrupt again.
395 * So we need to retry the delivery. Essentially the reject path
396 * boils down to a failed delivery. Always.
398 * Now the interrupt could also have moved to a different target,
399 * thus we may need to re-do the ICP lookup as well
403 /* Get the ICS state and lock it */
404 ics
= kvmppc_xics_find_ics(xics
, new_irq
, &src
);
406 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq
);
409 state
= &ics
->irq_state
[src
];
411 /* Get a lock on the ICS */
412 local_irq_save(flags
);
413 arch_spin_lock(&ics
->lock
);
416 if (!icp
|| state
->server
!= icp
->server_num
) {
417 icp
= kvmppc_xics_find_server(xics
->kvm
, state
->server
);
419 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
420 new_irq
, state
->server
);
425 /* Clear the resend bit of that interrupt */
429 * If masked, bail out
431 * Note: PAPR doesn't mention anything about masked pending
432 * when doing a resend, only when doing a delivery.
434 * However that would have the effect of losing a masked
435 * interrupt that was rejected and isn't consistent with
436 * the whole masked_pending business which is about not
437 * losing interrupts that occur while masked.
439 * I don't differentiate normal deliveries and resends, this
440 * implementation will differ from PAPR and not lose such
443 if (state
->priority
== MASKED
) {
444 XICS_DBG("irq %#x masked pending\n", new_irq
);
445 state
->masked_pending
= 1;
450 * Try the delivery, this will set the need_resend flag
451 * in the ICP as part of the atomic transaction if the
452 * delivery is not possible.
454 * Note that if successful, the new delivery might have itself
455 * rejected an interrupt that was "delivered" before we took the
458 * In this case we do the whole sequence all over again for the
459 * new guy. We cannot assume that the rejected interrupt is less
460 * favored than the new one, and thus doesn't need to be delivered,
461 * because by the time we exit icp_try_to_deliver() the target
462 * processor may well have alrady consumed & completed it, and thus
463 * the rejected interrupt might actually be already acceptable.
465 if (icp_try_to_deliver(icp
, new_irq
, state
->priority
, &reject
)) {
467 * Delivery was successful, did we reject somebody else ?
469 if (reject
&& reject
!= XICS_IPI
) {
470 arch_spin_unlock(&ics
->lock
);
471 local_irq_restore(flags
);
477 * We failed to deliver the interrupt we need to set the
478 * resend map bit and mark the ICS state as needing a resend
480 set_bit(ics
->icsid
, icp
->resend_map
);
484 * If the need_resend flag got cleared in the ICP some time
485 * between icp_try_to_deliver() atomic update and now, then
486 * we know it might have missed the resend_map bit. So we
490 if (!icp
->state
.need_resend
) {
491 arch_spin_unlock(&ics
->lock
);
492 local_irq_restore(flags
);
497 arch_spin_unlock(&ics
->lock
);
498 local_irq_restore(flags
);
501 static void icp_down_cppr(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
504 union kvmppc_icp_state old_state
, new_state
;
508 * This handles several related states in one operation:
510 * ICP State: Down_CPPR
512 * Load CPPR with new value and if the XISR is 0
513 * then check for resends:
517 * If MFRR is more favored than CPPR, check for IPIs
518 * and notify ICS of a potential resend. This is done
519 * asynchronously (when used in real mode, we will have
522 * We do not handle the complete Check_IPI as documented
523 * here. In the PAPR, this state will be used for both
524 * Set_MFRR and Down_CPPR. However, we know that we aren't
525 * changing the MFRR state here so we don't need to handle
526 * the case of an MFRR causing a reject of a pending irq,
527 * this will have been handled when the MFRR was set in the
530 * Thus we don't have to handle rejects, only resends.
532 * When implementing real mode for HV KVM, resend will lead to
533 * a H_TOO_HARD return and the whole transaction will be handled
537 old_state
= new_state
= READ_ONCE(icp
->state
);
540 new_state
.cppr
= new_cppr
;
543 * Cut down Resend / Check_IPI / IPI
545 * The logic is that we cannot have a pending interrupt
546 * trumped by an IPI at this point (see above), so we
547 * know that either the pending interrupt is already an
548 * IPI (in which case we don't care to override it) or
549 * it's either more favored than us or non existent
551 if (new_state
.mfrr
< new_cppr
&&
552 new_state
.mfrr
<= new_state
.pending_pri
) {
553 WARN_ON(new_state
.xisr
!= XICS_IPI
&&
554 new_state
.xisr
!= 0);
555 new_state
.pending_pri
= new_state
.mfrr
;
556 new_state
.xisr
= XICS_IPI
;
559 /* Latch/clear resend bit */
560 resend
= new_state
.need_resend
;
561 new_state
.need_resend
= 0;
563 } while (!icp_try_update(icp
, old_state
, new_state
, true));
566 * Now handle resend checks. Those are asynchronous to the ICP
567 * state update in HW (ie bus transactions) so we can handle them
568 * separately here too
571 icp_check_resend(xics
, icp
);
574 static noinline
unsigned long kvmppc_h_xirr(struct kvm_vcpu
*vcpu
)
576 union kvmppc_icp_state old_state
, new_state
;
577 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
580 /* First, remove EE from the processor */
581 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
582 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
585 * ICP State: Accept_Interrupt
587 * Return the pending interrupt (if any) along with the
588 * current CPPR, then clear the XISR & set CPPR to the
592 old_state
= new_state
= READ_ONCE(icp
->state
);
594 xirr
= old_state
.xisr
| (((u32
)old_state
.cppr
) << 24);
597 new_state
.cppr
= new_state
.pending_pri
;
598 new_state
.pending_pri
= 0xff;
601 } while (!icp_try_update(icp
, old_state
, new_state
, true));
603 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu
->vcpu_id
, xirr
);
608 static noinline
int kvmppc_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
611 union kvmppc_icp_state old_state
, new_state
;
612 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
613 struct kvmppc_icp
*icp
;
618 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
619 vcpu
->vcpu_id
, server
, mfrr
);
621 icp
= vcpu
->arch
.icp
;
622 local
= icp
->server_num
== server
;
624 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
630 * ICP state: Set_MFRR
632 * If the CPPR is more favored than the new MFRR, then
633 * nothing needs to be rejected as there can be no XISR to
634 * reject. If the MFRR is being made less favored then
635 * there might be a previously-rejected interrupt needing
638 * ICP state: Check_IPI
640 * If the CPPR is less favored, then we might be replacing
641 * an interrupt, and thus need to possibly reject it.
645 * Besides rejecting any pending interrupts, we also
646 * update XISR and pending_pri to mark IPI as pending.
648 * PAPR does not describe this state, but if the MFRR is being
649 * made less favored than its earlier value, there might be
650 * a previously-rejected interrupt needing to be resent.
651 * Ideally, we would want to resend only if
652 * prio(pending_interrupt) < mfrr &&
653 * prio(pending_interrupt) < cppr
654 * where pending interrupt is the one that was rejected. But
655 * we don't have that state, so we simply trigger a resend
656 * whenever the MFRR is made less favored.
659 old_state
= new_state
= READ_ONCE(icp
->state
);
662 new_state
.mfrr
= mfrr
;
667 if (mfrr
< new_state
.cppr
) {
668 /* Reject a pending interrupt if not an IPI */
669 if (mfrr
<= new_state
.pending_pri
) {
670 reject
= new_state
.xisr
;
671 new_state
.pending_pri
= mfrr
;
672 new_state
.xisr
= XICS_IPI
;
676 if (mfrr
> old_state
.mfrr
) {
677 resend
= new_state
.need_resend
;
678 new_state
.need_resend
= 0;
680 } while (!icp_try_update(icp
, old_state
, new_state
, local
));
683 if (reject
&& reject
!= XICS_IPI
)
684 icp_deliver_irq(xics
, icp
, reject
);
688 icp_check_resend(xics
, icp
);
693 static int kvmppc_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
)
695 union kvmppc_icp_state state
;
696 struct kvmppc_icp
*icp
;
698 icp
= vcpu
->arch
.icp
;
699 if (icp
->server_num
!= server
) {
700 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
704 state
= READ_ONCE(icp
->state
);
705 kvmppc_set_gpr(vcpu
, 4, ((u32
)state
.cppr
<< 24) | state
.xisr
);
706 kvmppc_set_gpr(vcpu
, 5, state
.mfrr
);
710 static noinline
void kvmppc_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
712 union kvmppc_icp_state old_state
, new_state
;
713 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
714 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
717 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu
->vcpu_id
, cppr
);
720 * ICP State: Set_CPPR
722 * We can safely compare the new value with the current
723 * value outside of the transaction as the CPPR is only
724 * ever changed by the processor on itself
726 if (cppr
> icp
->state
.cppr
)
727 icp_down_cppr(xics
, icp
, cppr
);
728 else if (cppr
== icp
->state
.cppr
)
734 * The processor is raising its priority, this can result
735 * in a rejection of a pending interrupt:
737 * ICP State: Reject_Current
739 * We can remove EE from the current processor, the update
740 * transaction will set it again if needed
742 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
743 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
746 old_state
= new_state
= READ_ONCE(icp
->state
);
749 new_state
.cppr
= cppr
;
751 if (cppr
<= new_state
.pending_pri
) {
752 reject
= new_state
.xisr
;
754 new_state
.pending_pri
= 0xff;
757 } while (!icp_try_update(icp
, old_state
, new_state
, true));
760 * Check for rejects. They are handled by doing a new delivery
761 * attempt (see comments in icp_deliver_irq).
763 if (reject
&& reject
!= XICS_IPI
)
764 icp_deliver_irq(xics
, icp
, reject
);
767 static noinline
int kvmppc_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
769 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
770 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
771 struct kvmppc_ics
*ics
;
772 struct ics_irq_state
*state
;
773 u32 irq
= xirr
& 0x00ffffff;
776 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu
->vcpu_id
, xirr
);
781 * Note: If EOI is incorrectly used by SW to lower the CPPR
782 * value (ie more favored), we do not check for rejection of
783 * a pending interrupt, this is a SW error and PAPR sepcifies
784 * that we don't have to deal with it.
786 * The sending of an EOI to the ICS is handled after the
789 * ICP State: Down_CPPR which we handle
790 * in a separate function as it's shared with H_CPPR.
792 icp_down_cppr(xics
, icp
, xirr
>> 24);
794 /* IPIs have no EOI */
798 * EOI handling: If the interrupt is still asserted, we need to
799 * resend it. We can take a lockless "peek" at the ICS state here.
801 * "Message" interrupts will never have "asserted" set
803 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
805 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq
);
808 state
= &ics
->irq_state
[src
];
810 /* Still asserted, resend it */
812 icp_deliver_irq(xics
, icp
, irq
);
814 kvm_notify_acked_irq(vcpu
->kvm
, 0, irq
);
819 int kvmppc_xics_rm_complete(struct kvm_vcpu
*vcpu
, u32 hcall
)
821 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
822 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
824 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
825 hcall
, icp
->rm_action
, icp
->rm_dbgstate
.raw
, icp
->rm_dbgtgt
);
827 if (icp
->rm_action
& XICS_RM_KICK_VCPU
) {
828 icp
->n_rm_kick_vcpu
++;
829 kvmppc_fast_vcpu_kick(icp
->rm_kick_target
);
831 if (icp
->rm_action
& XICS_RM_CHECK_RESEND
) {
832 icp
->n_rm_check_resend
++;
833 icp_check_resend(xics
, icp
->rm_resend_icp
);
835 if (icp
->rm_action
& XICS_RM_REJECT
) {
837 icp_deliver_irq(xics
, icp
, icp
->rm_reject
);
839 if (icp
->rm_action
& XICS_RM_NOTIFY_EOI
) {
840 icp
->n_rm_notify_eoi
++;
841 kvm_notify_acked_irq(vcpu
->kvm
, 0, icp
->rm_eoied_irq
);
848 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete
);
850 int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 req
)
852 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
856 /* Check if we have an ICP */
857 if (!xics
|| !vcpu
->arch
.icp
)
860 /* These requests don't have real-mode implementations at present */
863 res
= kvmppc_h_xirr(vcpu
);
864 kvmppc_set_gpr(vcpu
, 4, res
);
865 kvmppc_set_gpr(vcpu
, 5, get_tb());
868 rc
= kvmppc_h_ipoll(vcpu
, kvmppc_get_gpr(vcpu
, 4));
872 /* Check for real mode returning too hard */
873 if (xics
->real_mode
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
874 return kvmppc_xics_rm_complete(vcpu
, req
);
878 res
= kvmppc_h_xirr(vcpu
);
879 kvmppc_set_gpr(vcpu
, 4, res
);
882 kvmppc_h_cppr(vcpu
, kvmppc_get_gpr(vcpu
, 4));
885 rc
= kvmppc_h_eoi(vcpu
, kvmppc_get_gpr(vcpu
, 4));
888 rc
= kvmppc_h_ipi(vcpu
, kvmppc_get_gpr(vcpu
, 4),
889 kvmppc_get_gpr(vcpu
, 5));
895 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall
);
898 /* -- Initialisation code etc. -- */
900 static void xics_debugfs_irqmap(struct seq_file
*m
,
901 struct kvmppc_passthru_irqmap
*pimap
)
907 seq_printf(m
, "========\nPIRQ mappings: %d maps\n===========\n",
909 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
910 seq_printf(m
, "r_hwirq=%x, v_hwirq=%x\n",
911 pimap
->mapped
[i
].r_hwirq
, pimap
->mapped
[i
].v_hwirq
);
915 static int xics_debug_show(struct seq_file
*m
, void *private)
917 struct kvmppc_xics
*xics
= m
->private;
918 struct kvm
*kvm
= xics
->kvm
;
919 struct kvm_vcpu
*vcpu
;
922 unsigned long t_rm_kick_vcpu
, t_rm_check_resend
;
923 unsigned long t_rm_reject
, t_rm_notify_eoi
;
924 unsigned long t_reject
, t_check_resend
;
931 t_rm_check_resend
= 0;
936 xics_debugfs_irqmap(m
, kvm
->arch
.pimap
);
938 seq_printf(m
, "=========\nICP state\n=========\n");
940 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
941 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
942 union kvmppc_icp_state state
;
947 state
.raw
= READ_ONCE(icp
->state
.raw
);
948 seq_printf(m
, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
949 icp
->server_num
, state
.xisr
,
950 state
.pending_pri
, state
.cppr
, state
.mfrr
,
951 state
.out_ee
, state
.need_resend
);
952 t_rm_kick_vcpu
+= icp
->n_rm_kick_vcpu
;
953 t_rm_notify_eoi
+= icp
->n_rm_notify_eoi
;
954 t_rm_check_resend
+= icp
->n_rm_check_resend
;
955 t_rm_reject
+= icp
->n_rm_reject
;
956 t_check_resend
+= icp
->n_check_resend
;
957 t_reject
+= icp
->n_reject
;
960 seq_printf(m
, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu reject=%lu notify_eoi=%lu\n",
961 t_rm_kick_vcpu
, t_rm_check_resend
,
962 t_rm_reject
, t_rm_notify_eoi
);
963 seq_printf(m
, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
964 t_check_resend
, t_reject
);
965 for (icsid
= 0; icsid
<= KVMPPC_XICS_MAX_ICS_ID
; icsid
++) {
966 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
971 seq_printf(m
, "=========\nICS state for ICS 0x%x\n=========\n",
974 local_irq_save(flags
);
975 arch_spin_lock(&ics
->lock
);
977 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
978 struct ics_irq_state
*irq
= &ics
->irq_state
[i
];
980 seq_printf(m
, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
981 irq
->number
, irq
->server
, irq
->priority
,
982 irq
->saved_priority
, irq
->asserted
,
983 irq
->resend
, irq
->masked_pending
);
986 arch_spin_unlock(&ics
->lock
);
987 local_irq_restore(flags
);
992 static int xics_debug_open(struct inode
*inode
, struct file
*file
)
994 return single_open(file
, xics_debug_show
, inode
->i_private
);
997 static const struct file_operations xics_debug_fops
= {
998 .open
= xics_debug_open
,
1000 .llseek
= seq_lseek
,
1001 .release
= single_release
,
1004 static void xics_debugfs_init(struct kvmppc_xics
*xics
)
1008 name
= kasprintf(GFP_KERNEL
, "kvm-xics-%p", xics
);
1010 pr_err("%s: no memory for name\n", __func__
);
1014 xics
->dentry
= debugfs_create_file(name
, S_IRUGO
, powerpc_debugfs_root
,
1015 xics
, &xics_debug_fops
);
1017 pr_debug("%s: created %s\n", __func__
, name
);
1021 static struct kvmppc_ics
*kvmppc_xics_create_ics(struct kvm
*kvm
,
1022 struct kvmppc_xics
*xics
, int irq
)
1024 struct kvmppc_ics
*ics
;
1027 icsid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1029 mutex_lock(&kvm
->lock
);
1031 /* ICS already exists - somebody else got here first */
1032 if (xics
->ics
[icsid
])
1035 /* Create the ICS */
1036 ics
= kzalloc(sizeof(struct kvmppc_ics
), GFP_KERNEL
);
1042 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1043 ics
->irq_state
[i
].number
= (icsid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1044 ics
->irq_state
[i
].priority
= MASKED
;
1045 ics
->irq_state
[i
].saved_priority
= MASKED
;
1048 xics
->ics
[icsid
] = ics
;
1050 if (icsid
> xics
->max_icsid
)
1051 xics
->max_icsid
= icsid
;
1054 mutex_unlock(&kvm
->lock
);
1055 return xics
->ics
[icsid
];
1058 int kvmppc_xics_create_icp(struct kvm_vcpu
*vcpu
, unsigned long server_num
)
1060 struct kvmppc_icp
*icp
;
1062 if (!vcpu
->kvm
->arch
.xics
)
1065 if (kvmppc_xics_find_server(vcpu
->kvm
, server_num
))
1068 icp
= kzalloc(sizeof(struct kvmppc_icp
), GFP_KERNEL
);
1073 icp
->server_num
= server_num
;
1074 icp
->state
.mfrr
= MASKED
;
1075 icp
->state
.pending_pri
= MASKED
;
1076 vcpu
->arch
.icp
= icp
;
1078 XICS_DBG("created server for vcpu %d\n", vcpu
->vcpu_id
);
1083 u64
kvmppc_xics_get_icp(struct kvm_vcpu
*vcpu
)
1085 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1086 union kvmppc_icp_state state
;
1091 return ((u64
)state
.cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
) |
1092 ((u64
)state
.xisr
<< KVM_REG_PPC_ICP_XISR_SHIFT
) |
1093 ((u64
)state
.mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
) |
1094 ((u64
)state
.pending_pri
<< KVM_REG_PPC_ICP_PPRI_SHIFT
);
1097 int kvmppc_xics_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
1099 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1100 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
1101 union kvmppc_icp_state old_state
, new_state
;
1102 struct kvmppc_ics
*ics
;
1103 u8 cppr
, mfrr
, pending_pri
;
1111 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
1112 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
1113 KVM_REG_PPC_ICP_XISR_MASK
;
1114 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
1115 pending_pri
= icpval
>> KVM_REG_PPC_ICP_PPRI_SHIFT
;
1117 /* Require the new state to be internally consistent */
1119 if (pending_pri
!= 0xff)
1121 } else if (xisr
== XICS_IPI
) {
1122 if (pending_pri
!= mfrr
|| pending_pri
>= cppr
)
1125 if (pending_pri
>= mfrr
|| pending_pri
>= cppr
)
1127 ics
= kvmppc_xics_find_ics(xics
, xisr
, &src
);
1133 new_state
.cppr
= cppr
;
1134 new_state
.xisr
= xisr
;
1135 new_state
.mfrr
= mfrr
;
1136 new_state
.pending_pri
= pending_pri
;
1139 * Deassert the CPU interrupt request.
1140 * icp_try_update will reassert it if necessary.
1142 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
1143 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
1146 * Note that if we displace an interrupt from old_state.xisr,
1147 * we don't mark it as rejected. We expect userspace to set
1148 * the state of the interrupt sources to be consistent with
1149 * the ICP states (either before or afterwards, which doesn't
1150 * matter). We do handle resends due to CPPR becoming less
1151 * favoured because that is necessary to end up with a
1152 * consistent state in the situation where userspace restores
1153 * the ICS states before the ICP states.
1156 old_state
= READ_ONCE(icp
->state
);
1158 if (new_state
.mfrr
<= old_state
.mfrr
) {
1160 new_state
.need_resend
= old_state
.need_resend
;
1162 resend
= old_state
.need_resend
;
1163 new_state
.need_resend
= 0;
1165 } while (!icp_try_update(icp
, old_state
, new_state
, false));
1168 icp_check_resend(xics
, icp
);
1173 static int xics_get_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1176 struct kvmppc_ics
*ics
;
1177 struct ics_irq_state
*irqp
;
1178 u64 __user
*ubufp
= (u64 __user
*) addr
;
1181 unsigned long flags
;
1183 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1187 irqp
= &ics
->irq_state
[idx
];
1188 local_irq_save(flags
);
1189 arch_spin_lock(&ics
->lock
);
1193 prio
= irqp
->priority
;
1194 if (prio
== MASKED
) {
1195 val
|= KVM_XICS_MASKED
;
1196 prio
= irqp
->saved_priority
;
1198 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1200 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1202 val
|= KVM_XICS_PENDING
;
1203 } else if (irqp
->masked_pending
|| irqp
->resend
)
1204 val
|= KVM_XICS_PENDING
;
1207 arch_spin_unlock(&ics
->lock
);
1208 local_irq_restore(flags
);
1210 if (!ret
&& put_user(val
, ubufp
))
1216 static int xics_set_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1218 struct kvmppc_ics
*ics
;
1219 struct ics_irq_state
*irqp
;
1220 u64 __user
*ubufp
= (u64 __user
*) addr
;
1225 unsigned long flags
;
1227 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1230 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1232 ics
= kvmppc_xics_create_ics(xics
->kvm
, xics
, irq
);
1236 irqp
= &ics
->irq_state
[idx
];
1237 if (get_user(val
, ubufp
))
1240 server
= val
& KVM_XICS_DESTINATION_MASK
;
1241 prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1242 if (prio
!= MASKED
&&
1243 kvmppc_xics_find_server(xics
->kvm
, server
) == NULL
)
1246 local_irq_save(flags
);
1247 arch_spin_lock(&ics
->lock
);
1248 irqp
->server
= server
;
1249 irqp
->saved_priority
= prio
;
1250 if (val
& KVM_XICS_MASKED
)
1252 irqp
->priority
= prio
;
1254 irqp
->masked_pending
= 0;
1257 if (val
& KVM_XICS_LEVEL_SENSITIVE
) {
1259 if (val
& KVM_XICS_PENDING
)
1263 arch_spin_unlock(&ics
->lock
);
1264 local_irq_restore(flags
);
1266 if (val
& KVM_XICS_PENDING
)
1267 icp_deliver_irq(xics
, NULL
, irqp
->number
);
1272 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1275 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1279 return ics_deliver_irq(xics
, irq
, level
);
1282 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*irq_entry
,
1283 struct kvm
*kvm
, int irq_source_id
,
1284 int level
, bool line_status
)
1286 return kvm_set_irq(kvm
, irq_source_id
, irq_entry
->gsi
,
1287 level
, line_status
);
1290 static int xics_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1292 struct kvmppc_xics
*xics
= dev
->private;
1294 switch (attr
->group
) {
1295 case KVM_DEV_XICS_GRP_SOURCES
:
1296 return xics_set_source(xics
, attr
->attr
, attr
->addr
);
1301 static int xics_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1303 struct kvmppc_xics
*xics
= dev
->private;
1305 switch (attr
->group
) {
1306 case KVM_DEV_XICS_GRP_SOURCES
:
1307 return xics_get_source(xics
, attr
->attr
, attr
->addr
);
1312 static int xics_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1314 switch (attr
->group
) {
1315 case KVM_DEV_XICS_GRP_SOURCES
:
1316 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1317 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1324 static void kvmppc_xics_free(struct kvm_device
*dev
)
1326 struct kvmppc_xics
*xics
= dev
->private;
1328 struct kvm
*kvm
= xics
->kvm
;
1330 debugfs_remove(xics
->dentry
);
1333 kvm
->arch
.xics
= NULL
;
1335 for (i
= 0; i
<= xics
->max_icsid
; i
++)
1336 kfree(xics
->ics
[i
]);
1341 static int kvmppc_xics_create(struct kvm_device
*dev
, u32 type
)
1343 struct kvmppc_xics
*xics
;
1344 struct kvm
*kvm
= dev
->kvm
;
1347 xics
= kzalloc(sizeof(*xics
), GFP_KERNEL
);
1351 dev
->private = xics
;
1355 /* Already there ? */
1359 kvm
->arch
.xics
= xics
;
1366 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1367 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
1368 /* Enable real mode support */
1369 xics
->real_mode
= ENABLE_REALMODE
;
1370 xics
->real_mode_dbg
= DEBUG_REALMODE
;
1372 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1377 static void kvmppc_xics_init(struct kvm_device
*dev
)
1379 struct kvmppc_xics
*xics
= (struct kvmppc_xics
*)dev
->private;
1381 xics_debugfs_init(xics
);
1384 struct kvm_device_ops kvm_xics_ops
= {
1386 .create
= kvmppc_xics_create
,
1387 .init
= kvmppc_xics_init
,
1388 .destroy
= kvmppc_xics_free
,
1389 .set_attr
= xics_set_attr
,
1390 .get_attr
= xics_get_attr
,
1391 .has_attr
= xics_has_attr
,
1394 int kvmppc_xics_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1397 struct kvmppc_xics
*xics
= dev
->private;
1400 if (dev
->ops
!= &kvm_xics_ops
)
1402 if (xics
->kvm
!= vcpu
->kvm
)
1404 if (vcpu
->arch
.irq_type
)
1407 r
= kvmppc_xics_create_icp(vcpu
, xcpu
);
1409 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1414 void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
)
1416 if (!vcpu
->arch
.icp
)
1418 kfree(vcpu
->arch
.icp
);
1419 vcpu
->arch
.icp
= NULL
;
1420 vcpu
->arch
.irq_type
= KVMPPC_IRQ_DEFAULT
;
1423 static int xics_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1424 struct kvm
*kvm
, int irq_source_id
, int level
,
1427 return kvm_set_irq(kvm
, irq_source_id
, e
->gsi
, level
, line_status
);
1430 int kvm_irq_map_gsi(struct kvm
*kvm
,
1431 struct kvm_kernel_irq_routing_entry
*entries
, int gsi
)
1434 entries
->type
= KVM_IRQ_ROUTING_IRQCHIP
;
1435 entries
->set
= xics_set_irq
;
1436 entries
->irqchip
.irqchip
= 0;
1437 entries
->irqchip
.pin
= gsi
;
1441 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
1446 void kvmppc_xics_set_mapped(struct kvm
*kvm
, unsigned long irq
,
1447 unsigned long host_irq
)
1449 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1450 struct kvmppc_ics
*ics
;
1453 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1457 ics
->irq_state
[idx
].host_irq
= host_irq
;
1458 ics
->irq_state
[idx
].intr_cpu
= -1;
1460 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped
);
1462 void kvmppc_xics_clr_mapped(struct kvm
*kvm
, unsigned long irq
,
1463 unsigned long host_irq
)
1465 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1466 struct kvmppc_ics
*ics
;
1469 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1473 ics
->irq_state
[idx
].host_irq
= 0;
1475 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped
);