2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/gfp.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/spinlock.h>
17 #include <linux/uaccess.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/hvcall.h>
22 #include <asm/debug.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
28 #include "book3s_xics.h"
31 #define XICS_DBG(fmt...) do { } while (0)
33 #define XICS_DBG(fmt...) trace_printk(fmt)
36 #define ENABLE_REALMODE true
37 #define DEBUG_REALMODE false
43 * Each ICS has a spin lock protecting the information about the IRQ
44 * sources and avoiding simultaneous deliveries of the same interrupt.
46 * ICP operations are done via a single compare & swap transaction
47 * (most ICP state fits in the union kvmppc_icp_state)
54 * - To speed up resends, keep a bitmap of "resend" set bits in the
57 * - Speed up server# -> ICP lookup (array ? hash table ?)
59 * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
60 * locks array to improve scalability
63 /* -- ICS routines -- */
65 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
69 * Return value ideally indicates how the interrupt was handled, but no
70 * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
73 static int ics_deliver_irq(struct kvmppc_xics
*xics
, u32 irq
, u32 level
)
75 struct ics_irq_state
*state
;
76 struct kvmppc_ics
*ics
;
79 XICS_DBG("ics deliver %#x (level: %d)\n", irq
, level
);
81 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
83 XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq
);
86 state
= &ics
->irq_state
[src
];
91 * We set state->asserted locklessly. This should be fine as
92 * we are the only setter, thus concurrent access is undefined
95 if ((level
== 1 && state
->lsi
) || level
== KVM_INTERRUPT_SET_LEVEL
)
97 else if (level
== 0 || level
== KVM_INTERRUPT_UNSET
) {
102 /* Record which CPU this arrived on for passed-through interrupts */
104 state
->intr_cpu
= raw_smp_processor_id();
106 /* Attempt delivery */
107 icp_deliver_irq(xics
, NULL
, irq
);
112 static void ics_check_resend(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
113 struct kvmppc_icp
*icp
)
119 local_irq_save(flags
);
120 arch_spin_lock(&ics
->lock
);
122 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
123 struct ics_irq_state
*state
= &ics
->irq_state
[i
];
130 XICS_DBG("resend %#x prio %#x\n", state
->number
,
133 arch_spin_unlock(&ics
->lock
);
134 local_irq_restore(flags
);
135 icp_deliver_irq(xics
, icp
, state
->number
);
136 local_irq_save(flags
);
137 arch_spin_lock(&ics
->lock
);
140 arch_spin_unlock(&ics
->lock
);
141 local_irq_restore(flags
);
144 static bool write_xive(struct kvmppc_xics
*xics
, struct kvmppc_ics
*ics
,
145 struct ics_irq_state
*state
,
146 u32 server
, u32 priority
, u32 saved_priority
)
151 local_irq_save(flags
);
152 arch_spin_lock(&ics
->lock
);
154 state
->server
= server
;
155 state
->priority
= priority
;
156 state
->saved_priority
= saved_priority
;
158 if ((state
->masked_pending
|| state
->resend
) && priority
!= MASKED
) {
159 state
->masked_pending
= 0;
164 arch_spin_unlock(&ics
->lock
);
165 local_irq_restore(flags
);
170 int kvmppc_xics_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
, u32 priority
)
172 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
173 struct kvmppc_icp
*icp
;
174 struct kvmppc_ics
*ics
;
175 struct ics_irq_state
*state
;
181 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
184 state
= &ics
->irq_state
[src
];
186 icp
= kvmppc_xics_find_server(kvm
, server
);
190 XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
191 irq
, server
, priority
,
192 state
->masked_pending
, state
->resend
);
194 if (write_xive(xics
, ics
, state
, server
, priority
, priority
))
195 icp_deliver_irq(xics
, icp
, irq
);
200 int kvmppc_xics_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
, u32
*priority
)
202 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
203 struct kvmppc_ics
*ics
;
204 struct ics_irq_state
*state
;
211 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
214 state
= &ics
->irq_state
[src
];
216 local_irq_save(flags
);
217 arch_spin_lock(&ics
->lock
);
218 *server
= state
->server
;
219 *priority
= state
->priority
;
220 arch_spin_unlock(&ics
->lock
);
221 local_irq_restore(flags
);
226 int kvmppc_xics_int_on(struct kvm
*kvm
, u32 irq
)
228 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
229 struct kvmppc_icp
*icp
;
230 struct kvmppc_ics
*ics
;
231 struct ics_irq_state
*state
;
237 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
240 state
= &ics
->irq_state
[src
];
242 icp
= kvmppc_xics_find_server(kvm
, state
->server
);
246 if (write_xive(xics
, ics
, state
, state
->server
, state
->saved_priority
,
247 state
->saved_priority
))
248 icp_deliver_irq(xics
, icp
, irq
);
253 int kvmppc_xics_int_off(struct kvm
*kvm
, u32 irq
)
255 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
256 struct kvmppc_ics
*ics
;
257 struct ics_irq_state
*state
;
263 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
266 state
= &ics
->irq_state
[src
];
268 write_xive(xics
, ics
, state
, state
->server
, MASKED
, state
->priority
);
273 /* -- ICP routines, including hcalls -- */
275 static inline bool icp_try_update(struct kvmppc_icp
*icp
,
276 union kvmppc_icp_state old
,
277 union kvmppc_icp_state
new,
282 /* Calculate new output value */
283 new.out_ee
= (new.xisr
&& (new.pending_pri
< new.cppr
));
285 /* Attempt atomic update */
286 success
= cmpxchg64(&icp
->state
.raw
, old
.raw
, new.raw
) == old
.raw
;
290 XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
292 old
.cppr
, old
.mfrr
, old
.pending_pri
, old
.xisr
,
293 old
.need_resend
, old
.out_ee
);
294 XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
295 new.cppr
, new.mfrr
, new.pending_pri
, new.xisr
,
296 new.need_resend
, new.out_ee
);
298 * Check for output state update
300 * Note that this is racy since another processor could be updating
301 * the state already. This is why we never clear the interrupt output
302 * here, we only ever set it. The clear only happens prior to doing
303 * an update and only by the processor itself. Currently we do it
304 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
306 * We also do not try to figure out whether the EE state has changed,
307 * we unconditionally set it if the new state calls for it. The reason
308 * for that is that we opportunistically remove the pending interrupt
309 * flag when raising CPPR, so we need to set it back here if an
310 * interrupt is still pending.
313 kvmppc_book3s_queue_irqprio(icp
->vcpu
,
314 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
316 kvmppc_fast_vcpu_kick(icp
->vcpu
);
322 static void icp_check_resend(struct kvmppc_xics
*xics
,
323 struct kvmppc_icp
*icp
)
327 /* Order this load with the test for need_resend in the caller */
329 for_each_set_bit(icsid
, icp
->resend_map
, xics
->max_icsid
+ 1) {
330 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
332 if (!test_and_clear_bit(icsid
, icp
->resend_map
))
336 ics_check_resend(xics
, ics
, icp
);
340 static bool icp_try_to_deliver(struct kvmppc_icp
*icp
, u32 irq
, u8 priority
,
343 union kvmppc_icp_state old_state
, new_state
;
346 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq
, priority
,
350 old_state
= new_state
= READ_ONCE(icp
->state
);
354 /* See if we can deliver */
355 success
= new_state
.cppr
> priority
&&
356 new_state
.mfrr
> priority
&&
357 new_state
.pending_pri
> priority
;
360 * If we can, check for a rejection and perform the
364 *reject
= new_state
.xisr
;
365 new_state
.xisr
= irq
;
366 new_state
.pending_pri
= priority
;
369 * If we failed to deliver we set need_resend
370 * so a subsequent CPPR state change causes us
371 * to try a new delivery.
373 new_state
.need_resend
= true;
376 } while (!icp_try_update(icp
, old_state
, new_state
, false));
381 static void icp_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
384 struct ics_irq_state
*state
;
385 struct kvmppc_ics
*ics
;
391 * This is used both for initial delivery of an interrupt and
392 * for subsequent rejection.
394 * Rejection can be racy vs. resends. We have evaluated the
395 * rejection in an atomic ICP transaction which is now complete,
396 * so potentially the ICP can already accept the interrupt again.
398 * So we need to retry the delivery. Essentially the reject path
399 * boils down to a failed delivery. Always.
401 * Now the interrupt could also have moved to a different target,
402 * thus we may need to re-do the ICP lookup as well
406 /* Get the ICS state and lock it */
407 ics
= kvmppc_xics_find_ics(xics
, new_irq
, &src
);
409 XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq
);
412 state
= &ics
->irq_state
[src
];
414 /* Get a lock on the ICS */
415 local_irq_save(flags
);
416 arch_spin_lock(&ics
->lock
);
419 if (!icp
|| state
->server
!= icp
->server_num
) {
420 icp
= kvmppc_xics_find_server(xics
->kvm
, state
->server
);
422 pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
423 new_irq
, state
->server
);
428 /* Clear the resend bit of that interrupt */
432 * If masked, bail out
434 * Note: PAPR doesn't mention anything about masked pending
435 * when doing a resend, only when doing a delivery.
437 * However that would have the effect of losing a masked
438 * interrupt that was rejected and isn't consistent with
439 * the whole masked_pending business which is about not
440 * losing interrupts that occur while masked.
442 * I don't differentiate normal deliveries and resends, this
443 * implementation will differ from PAPR and not lose such
446 if (state
->priority
== MASKED
) {
447 XICS_DBG("irq %#x masked pending\n", new_irq
);
448 state
->masked_pending
= 1;
453 * Try the delivery, this will set the need_resend flag
454 * in the ICP as part of the atomic transaction if the
455 * delivery is not possible.
457 * Note that if successful, the new delivery might have itself
458 * rejected an interrupt that was "delivered" before we took the
461 * In this case we do the whole sequence all over again for the
462 * new guy. We cannot assume that the rejected interrupt is less
463 * favored than the new one, and thus doesn't need to be delivered,
464 * because by the time we exit icp_try_to_deliver() the target
465 * processor may well have alrady consumed & completed it, and thus
466 * the rejected interrupt might actually be already acceptable.
468 if (icp_try_to_deliver(icp
, new_irq
, state
->priority
, &reject
)) {
470 * Delivery was successful, did we reject somebody else ?
472 if (reject
&& reject
!= XICS_IPI
) {
473 arch_spin_unlock(&ics
->lock
);
474 local_irq_restore(flags
);
480 * We failed to deliver the interrupt we need to set the
481 * resend map bit and mark the ICS state as needing a resend
483 set_bit(ics
->icsid
, icp
->resend_map
);
487 * If the need_resend flag got cleared in the ICP some time
488 * between icp_try_to_deliver() atomic update and now, then
489 * we know it might have missed the resend_map bit. So we
493 if (!icp
->state
.need_resend
) {
495 arch_spin_unlock(&ics
->lock
);
496 local_irq_restore(flags
);
501 arch_spin_unlock(&ics
->lock
);
502 local_irq_restore(flags
);
505 static void icp_down_cppr(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
508 union kvmppc_icp_state old_state
, new_state
;
512 * This handles several related states in one operation:
514 * ICP State: Down_CPPR
516 * Load CPPR with new value and if the XISR is 0
517 * then check for resends:
521 * If MFRR is more favored than CPPR, check for IPIs
522 * and notify ICS of a potential resend. This is done
523 * asynchronously (when used in real mode, we will have
526 * We do not handle the complete Check_IPI as documented
527 * here. In the PAPR, this state will be used for both
528 * Set_MFRR and Down_CPPR. However, we know that we aren't
529 * changing the MFRR state here so we don't need to handle
530 * the case of an MFRR causing a reject of a pending irq,
531 * this will have been handled when the MFRR was set in the
534 * Thus we don't have to handle rejects, only resends.
536 * When implementing real mode for HV KVM, resend will lead to
537 * a H_TOO_HARD return and the whole transaction will be handled
541 old_state
= new_state
= READ_ONCE(icp
->state
);
544 new_state
.cppr
= new_cppr
;
547 * Cut down Resend / Check_IPI / IPI
549 * The logic is that we cannot have a pending interrupt
550 * trumped by an IPI at this point (see above), so we
551 * know that either the pending interrupt is already an
552 * IPI (in which case we don't care to override it) or
553 * it's either more favored than us or non existent
555 if (new_state
.mfrr
< new_cppr
&&
556 new_state
.mfrr
<= new_state
.pending_pri
) {
557 WARN_ON(new_state
.xisr
!= XICS_IPI
&&
558 new_state
.xisr
!= 0);
559 new_state
.pending_pri
= new_state
.mfrr
;
560 new_state
.xisr
= XICS_IPI
;
563 /* Latch/clear resend bit */
564 resend
= new_state
.need_resend
;
565 new_state
.need_resend
= 0;
567 } while (!icp_try_update(icp
, old_state
, new_state
, true));
570 * Now handle resend checks. Those are asynchronous to the ICP
571 * state update in HW (ie bus transactions) so we can handle them
572 * separately here too
575 icp_check_resend(xics
, icp
);
578 static noinline
unsigned long kvmppc_h_xirr(struct kvm_vcpu
*vcpu
)
580 union kvmppc_icp_state old_state
, new_state
;
581 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
584 /* First, remove EE from the processor */
585 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
586 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
589 * ICP State: Accept_Interrupt
591 * Return the pending interrupt (if any) along with the
592 * current CPPR, then clear the XISR & set CPPR to the
596 old_state
= new_state
= READ_ONCE(icp
->state
);
598 xirr
= old_state
.xisr
| (((u32
)old_state
.cppr
) << 24);
601 new_state
.cppr
= new_state
.pending_pri
;
602 new_state
.pending_pri
= 0xff;
605 } while (!icp_try_update(icp
, old_state
, new_state
, true));
607 XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu
->vcpu_id
, xirr
);
612 static noinline
int kvmppc_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
615 union kvmppc_icp_state old_state
, new_state
;
616 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
617 struct kvmppc_icp
*icp
;
622 XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
623 vcpu
->vcpu_id
, server
, mfrr
);
625 icp
= vcpu
->arch
.icp
;
626 local
= icp
->server_num
== server
;
628 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
634 * ICP state: Set_MFRR
636 * If the CPPR is more favored than the new MFRR, then
637 * nothing needs to be rejected as there can be no XISR to
638 * reject. If the MFRR is being made less favored then
639 * there might be a previously-rejected interrupt needing
642 * ICP state: Check_IPI
644 * If the CPPR is less favored, then we might be replacing
645 * an interrupt, and thus need to possibly reject it.
649 * Besides rejecting any pending interrupts, we also
650 * update XISR and pending_pri to mark IPI as pending.
652 * PAPR does not describe this state, but if the MFRR is being
653 * made less favored than its earlier value, there might be
654 * a previously-rejected interrupt needing to be resent.
655 * Ideally, we would want to resend only if
656 * prio(pending_interrupt) < mfrr &&
657 * prio(pending_interrupt) < cppr
658 * where pending interrupt is the one that was rejected. But
659 * we don't have that state, so we simply trigger a resend
660 * whenever the MFRR is made less favored.
663 old_state
= new_state
= READ_ONCE(icp
->state
);
666 new_state
.mfrr
= mfrr
;
671 if (mfrr
< new_state
.cppr
) {
672 /* Reject a pending interrupt if not an IPI */
673 if (mfrr
<= new_state
.pending_pri
) {
674 reject
= new_state
.xisr
;
675 new_state
.pending_pri
= mfrr
;
676 new_state
.xisr
= XICS_IPI
;
680 if (mfrr
> old_state
.mfrr
) {
681 resend
= new_state
.need_resend
;
682 new_state
.need_resend
= 0;
684 } while (!icp_try_update(icp
, old_state
, new_state
, local
));
687 if (reject
&& reject
!= XICS_IPI
)
688 icp_deliver_irq(xics
, icp
, reject
);
692 icp_check_resend(xics
, icp
);
697 static int kvmppc_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
)
699 union kvmppc_icp_state state
;
700 struct kvmppc_icp
*icp
;
702 icp
= vcpu
->arch
.icp
;
703 if (icp
->server_num
!= server
) {
704 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
708 state
= READ_ONCE(icp
->state
);
709 kvmppc_set_gpr(vcpu
, 4, ((u32
)state
.cppr
<< 24) | state
.xisr
);
710 kvmppc_set_gpr(vcpu
, 5, state
.mfrr
);
714 static noinline
void kvmppc_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
716 union kvmppc_icp_state old_state
, new_state
;
717 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
718 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
721 XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu
->vcpu_id
, cppr
);
724 * ICP State: Set_CPPR
726 * We can safely compare the new value with the current
727 * value outside of the transaction as the CPPR is only
728 * ever changed by the processor on itself
730 if (cppr
> icp
->state
.cppr
)
731 icp_down_cppr(xics
, icp
, cppr
);
732 else if (cppr
== icp
->state
.cppr
)
738 * The processor is raising its priority, this can result
739 * in a rejection of a pending interrupt:
741 * ICP State: Reject_Current
743 * We can remove EE from the current processor, the update
744 * transaction will set it again if needed
746 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
747 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
750 old_state
= new_state
= READ_ONCE(icp
->state
);
753 new_state
.cppr
= cppr
;
755 if (cppr
<= new_state
.pending_pri
) {
756 reject
= new_state
.xisr
;
758 new_state
.pending_pri
= 0xff;
761 } while (!icp_try_update(icp
, old_state
, new_state
, true));
764 * Check for rejects. They are handled by doing a new delivery
765 * attempt (see comments in icp_deliver_irq).
767 if (reject
&& reject
!= XICS_IPI
)
768 icp_deliver_irq(xics
, icp
, reject
);
771 static noinline
int kvmppc_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
773 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
774 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
775 struct kvmppc_ics
*ics
;
776 struct ics_irq_state
*state
;
777 u32 irq
= xirr
& 0x00ffffff;
780 XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu
->vcpu_id
, xirr
);
785 * Note: If EOI is incorrectly used by SW to lower the CPPR
786 * value (ie more favored), we do not check for rejection of
787 * a pending interrupt, this is a SW error and PAPR sepcifies
788 * that we don't have to deal with it.
790 * The sending of an EOI to the ICS is handled after the
793 * ICP State: Down_CPPR which we handle
794 * in a separate function as it's shared with H_CPPR.
796 icp_down_cppr(xics
, icp
, xirr
>> 24);
798 /* IPIs have no EOI */
802 * EOI handling: If the interrupt is still asserted, we need to
803 * resend it. We can take a lockless "peek" at the ICS state here.
805 * "Message" interrupts will never have "asserted" set
807 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
809 XICS_DBG("h_eoi: IRQ 0x%06x not found !\n", irq
);
812 state
= &ics
->irq_state
[src
];
814 /* Still asserted, resend it */
816 icp_deliver_irq(xics
, icp
, irq
);
818 kvm_notify_acked_irq(vcpu
->kvm
, 0, irq
);
823 int kvmppc_xics_rm_complete(struct kvm_vcpu
*vcpu
, u32 hcall
)
825 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
826 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
828 XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
829 hcall
, icp
->rm_action
, icp
->rm_dbgstate
.raw
, icp
->rm_dbgtgt
);
831 if (icp
->rm_action
& XICS_RM_KICK_VCPU
) {
832 icp
->n_rm_kick_vcpu
++;
833 kvmppc_fast_vcpu_kick(icp
->rm_kick_target
);
835 if (icp
->rm_action
& XICS_RM_CHECK_RESEND
) {
836 icp
->n_rm_check_resend
++;
837 icp_check_resend(xics
, icp
->rm_resend_icp
);
839 if (icp
->rm_action
& XICS_RM_NOTIFY_EOI
) {
840 icp
->n_rm_notify_eoi
++;
841 kvm_notify_acked_irq(vcpu
->kvm
, 0, icp
->rm_eoied_irq
);
848 EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete
);
850 int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 req
)
852 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
856 /* Check if we have an ICP */
857 if (!xics
|| !vcpu
->arch
.icp
)
860 /* These requests don't have real-mode implementations at present */
863 res
= kvmppc_h_xirr(vcpu
);
864 kvmppc_set_gpr(vcpu
, 4, res
);
865 kvmppc_set_gpr(vcpu
, 5, get_tb());
868 rc
= kvmppc_h_ipoll(vcpu
, kvmppc_get_gpr(vcpu
, 4));
872 /* Check for real mode returning too hard */
873 if (xics
->real_mode
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
874 return kvmppc_xics_rm_complete(vcpu
, req
);
878 res
= kvmppc_h_xirr(vcpu
);
879 kvmppc_set_gpr(vcpu
, 4, res
);
882 kvmppc_h_cppr(vcpu
, kvmppc_get_gpr(vcpu
, 4));
885 rc
= kvmppc_h_eoi(vcpu
, kvmppc_get_gpr(vcpu
, 4));
888 rc
= kvmppc_h_ipi(vcpu
, kvmppc_get_gpr(vcpu
, 4),
889 kvmppc_get_gpr(vcpu
, 5));
895 EXPORT_SYMBOL_GPL(kvmppc_xics_hcall
);
898 /* -- Initialisation code etc. -- */
900 static void xics_debugfs_irqmap(struct seq_file
*m
,
901 struct kvmppc_passthru_irqmap
*pimap
)
907 seq_printf(m
, "========\nPIRQ mappings: %d maps\n===========\n",
909 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
910 seq_printf(m
, "r_hwirq=%x, v_hwirq=%x\n",
911 pimap
->mapped
[i
].r_hwirq
, pimap
->mapped
[i
].v_hwirq
);
915 static int xics_debug_show(struct seq_file
*m
, void *private)
917 struct kvmppc_xics
*xics
= m
->private;
918 struct kvm
*kvm
= xics
->kvm
;
919 struct kvm_vcpu
*vcpu
;
922 unsigned long t_rm_kick_vcpu
, t_rm_check_resend
;
923 unsigned long t_rm_notify_eoi
;
924 unsigned long t_reject
, t_check_resend
;
931 t_rm_check_resend
= 0;
935 xics_debugfs_irqmap(m
, kvm
->arch
.pimap
);
937 seq_printf(m
, "=========\nICP state\n=========\n");
939 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
940 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
941 union kvmppc_icp_state state
;
946 state
.raw
= READ_ONCE(icp
->state
.raw
);
947 seq_printf(m
, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
948 icp
->server_num
, state
.xisr
,
949 state
.pending_pri
, state
.cppr
, state
.mfrr
,
950 state
.out_ee
, state
.need_resend
);
951 t_rm_kick_vcpu
+= icp
->n_rm_kick_vcpu
;
952 t_rm_notify_eoi
+= icp
->n_rm_notify_eoi
;
953 t_rm_check_resend
+= icp
->n_rm_check_resend
;
954 t_check_resend
+= icp
->n_check_resend
;
955 t_reject
+= icp
->n_reject
;
958 seq_printf(m
, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
959 t_rm_kick_vcpu
, t_rm_check_resend
,
961 seq_printf(m
, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
962 t_check_resend
, t_reject
);
963 for (icsid
= 0; icsid
<= KVMPPC_XICS_MAX_ICS_ID
; icsid
++) {
964 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
969 seq_printf(m
, "=========\nICS state for ICS 0x%x\n=========\n",
972 local_irq_save(flags
);
973 arch_spin_lock(&ics
->lock
);
975 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
976 struct ics_irq_state
*irq
= &ics
->irq_state
[i
];
978 seq_printf(m
, "irq 0x%06x: server %#x prio %#x save prio %#x asserted %d resend %d masked pending %d\n",
979 irq
->number
, irq
->server
, irq
->priority
,
980 irq
->saved_priority
, irq
->asserted
,
981 irq
->resend
, irq
->masked_pending
);
984 arch_spin_unlock(&ics
->lock
);
985 local_irq_restore(flags
);
990 static int xics_debug_open(struct inode
*inode
, struct file
*file
)
992 return single_open(file
, xics_debug_show
, inode
->i_private
);
995 static const struct file_operations xics_debug_fops
= {
996 .open
= xics_debug_open
,
999 .release
= single_release
,
1002 static void xics_debugfs_init(struct kvmppc_xics
*xics
)
1006 name
= kasprintf(GFP_KERNEL
, "kvm-xics-%p", xics
);
1008 pr_err("%s: no memory for name\n", __func__
);
1012 xics
->dentry
= debugfs_create_file(name
, S_IRUGO
, powerpc_debugfs_root
,
1013 xics
, &xics_debug_fops
);
1015 pr_debug("%s: created %s\n", __func__
, name
);
1019 static struct kvmppc_ics
*kvmppc_xics_create_ics(struct kvm
*kvm
,
1020 struct kvmppc_xics
*xics
, int irq
)
1022 struct kvmppc_ics
*ics
;
1025 icsid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1027 mutex_lock(&kvm
->lock
);
1029 /* ICS already exists - somebody else got here first */
1030 if (xics
->ics
[icsid
])
1033 /* Create the ICS */
1034 ics
= kzalloc(sizeof(struct kvmppc_ics
), GFP_KERNEL
);
1040 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1041 ics
->irq_state
[i
].number
= (icsid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1042 ics
->irq_state
[i
].priority
= MASKED
;
1043 ics
->irq_state
[i
].saved_priority
= MASKED
;
1046 xics
->ics
[icsid
] = ics
;
1048 if (icsid
> xics
->max_icsid
)
1049 xics
->max_icsid
= icsid
;
1052 mutex_unlock(&kvm
->lock
);
1053 return xics
->ics
[icsid
];
1056 int kvmppc_xics_create_icp(struct kvm_vcpu
*vcpu
, unsigned long server_num
)
1058 struct kvmppc_icp
*icp
;
1060 if (!vcpu
->kvm
->arch
.xics
)
1063 if (kvmppc_xics_find_server(vcpu
->kvm
, server_num
))
1066 icp
= kzalloc(sizeof(struct kvmppc_icp
), GFP_KERNEL
);
1071 icp
->server_num
= server_num
;
1072 icp
->state
.mfrr
= MASKED
;
1073 icp
->state
.pending_pri
= MASKED
;
1074 vcpu
->arch
.icp
= icp
;
1076 XICS_DBG("created server for vcpu %d\n", vcpu
->vcpu_id
);
1081 u64
kvmppc_xics_get_icp(struct kvm_vcpu
*vcpu
)
1083 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1084 union kvmppc_icp_state state
;
1089 return ((u64
)state
.cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
) |
1090 ((u64
)state
.xisr
<< KVM_REG_PPC_ICP_XISR_SHIFT
) |
1091 ((u64
)state
.mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
) |
1092 ((u64
)state
.pending_pri
<< KVM_REG_PPC_ICP_PPRI_SHIFT
);
1095 int kvmppc_xics_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
1097 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
1098 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
1099 union kvmppc_icp_state old_state
, new_state
;
1100 struct kvmppc_ics
*ics
;
1101 u8 cppr
, mfrr
, pending_pri
;
1109 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
1110 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
1111 KVM_REG_PPC_ICP_XISR_MASK
;
1112 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
1113 pending_pri
= icpval
>> KVM_REG_PPC_ICP_PPRI_SHIFT
;
1115 /* Require the new state to be internally consistent */
1117 if (pending_pri
!= 0xff)
1119 } else if (xisr
== XICS_IPI
) {
1120 if (pending_pri
!= mfrr
|| pending_pri
>= cppr
)
1123 if (pending_pri
>= mfrr
|| pending_pri
>= cppr
)
1125 ics
= kvmppc_xics_find_ics(xics
, xisr
, &src
);
1131 new_state
.cppr
= cppr
;
1132 new_state
.xisr
= xisr
;
1133 new_state
.mfrr
= mfrr
;
1134 new_state
.pending_pri
= pending_pri
;
1137 * Deassert the CPU interrupt request.
1138 * icp_try_update will reassert it if necessary.
1140 kvmppc_book3s_dequeue_irqprio(icp
->vcpu
,
1141 BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
1144 * Note that if we displace an interrupt from old_state.xisr,
1145 * we don't mark it as rejected. We expect userspace to set
1146 * the state of the interrupt sources to be consistent with
1147 * the ICP states (either before or afterwards, which doesn't
1148 * matter). We do handle resends due to CPPR becoming less
1149 * favoured because that is necessary to end up with a
1150 * consistent state in the situation where userspace restores
1151 * the ICS states before the ICP states.
1154 old_state
= READ_ONCE(icp
->state
);
1156 if (new_state
.mfrr
<= old_state
.mfrr
) {
1158 new_state
.need_resend
= old_state
.need_resend
;
1160 resend
= old_state
.need_resend
;
1161 new_state
.need_resend
= 0;
1163 } while (!icp_try_update(icp
, old_state
, new_state
, false));
1166 icp_check_resend(xics
, icp
);
1171 static int xics_get_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1174 struct kvmppc_ics
*ics
;
1175 struct ics_irq_state
*irqp
;
1176 u64 __user
*ubufp
= (u64 __user
*) addr
;
1179 unsigned long flags
;
1181 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1185 irqp
= &ics
->irq_state
[idx
];
1186 local_irq_save(flags
);
1187 arch_spin_lock(&ics
->lock
);
1191 prio
= irqp
->priority
;
1192 if (prio
== MASKED
) {
1193 val
|= KVM_XICS_MASKED
;
1194 prio
= irqp
->saved_priority
;
1196 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1198 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1200 val
|= KVM_XICS_PENDING
;
1201 } else if (irqp
->masked_pending
|| irqp
->resend
)
1202 val
|= KVM_XICS_PENDING
;
1205 arch_spin_unlock(&ics
->lock
);
1206 local_irq_restore(flags
);
1208 if (!ret
&& put_user(val
, ubufp
))
1214 static int xics_set_source(struct kvmppc_xics
*xics
, long irq
, u64 addr
)
1216 struct kvmppc_ics
*ics
;
1217 struct ics_irq_state
*irqp
;
1218 u64 __user
*ubufp
= (u64 __user
*) addr
;
1223 unsigned long flags
;
1225 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1228 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1230 ics
= kvmppc_xics_create_ics(xics
->kvm
, xics
, irq
);
1234 irqp
= &ics
->irq_state
[idx
];
1235 if (get_user(val
, ubufp
))
1238 server
= val
& KVM_XICS_DESTINATION_MASK
;
1239 prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1240 if (prio
!= MASKED
&&
1241 kvmppc_xics_find_server(xics
->kvm
, server
) == NULL
)
1244 local_irq_save(flags
);
1245 arch_spin_lock(&ics
->lock
);
1246 irqp
->server
= server
;
1247 irqp
->saved_priority
= prio
;
1248 if (val
& KVM_XICS_MASKED
)
1250 irqp
->priority
= prio
;
1252 irqp
->masked_pending
= 0;
1255 if (val
& KVM_XICS_LEVEL_SENSITIVE
) {
1257 if (val
& KVM_XICS_PENDING
)
1261 arch_spin_unlock(&ics
->lock
);
1262 local_irq_restore(flags
);
1264 if (val
& KVM_XICS_PENDING
)
1265 icp_deliver_irq(xics
, NULL
, irqp
->number
);
1270 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1273 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1277 return ics_deliver_irq(xics
, irq
, level
);
1280 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*irq_entry
,
1281 struct kvm
*kvm
, int irq_source_id
,
1282 int level
, bool line_status
)
1284 return kvm_set_irq(kvm
, irq_source_id
, irq_entry
->gsi
,
1285 level
, line_status
);
1288 static int xics_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1290 struct kvmppc_xics
*xics
= dev
->private;
1292 switch (attr
->group
) {
1293 case KVM_DEV_XICS_GRP_SOURCES
:
1294 return xics_set_source(xics
, attr
->attr
, attr
->addr
);
1299 static int xics_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1301 struct kvmppc_xics
*xics
= dev
->private;
1303 switch (attr
->group
) {
1304 case KVM_DEV_XICS_GRP_SOURCES
:
1305 return xics_get_source(xics
, attr
->attr
, attr
->addr
);
1310 static int xics_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1312 switch (attr
->group
) {
1313 case KVM_DEV_XICS_GRP_SOURCES
:
1314 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1315 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1322 static void kvmppc_xics_free(struct kvm_device
*dev
)
1324 struct kvmppc_xics
*xics
= dev
->private;
1326 struct kvm
*kvm
= xics
->kvm
;
1328 debugfs_remove(xics
->dentry
);
1331 kvm
->arch
.xics
= NULL
;
1333 for (i
= 0; i
<= xics
->max_icsid
; i
++)
1334 kfree(xics
->ics
[i
]);
1339 static int kvmppc_xics_create(struct kvm_device
*dev
, u32 type
)
1341 struct kvmppc_xics
*xics
;
1342 struct kvm
*kvm
= dev
->kvm
;
1345 xics
= kzalloc(sizeof(*xics
), GFP_KERNEL
);
1349 dev
->private = xics
;
1353 /* Already there ? */
1357 kvm
->arch
.xics
= xics
;
1364 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1365 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
1366 /* Enable real mode support */
1367 xics
->real_mode
= ENABLE_REALMODE
;
1368 xics
->real_mode_dbg
= DEBUG_REALMODE
;
1370 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1375 static void kvmppc_xics_init(struct kvm_device
*dev
)
1377 struct kvmppc_xics
*xics
= (struct kvmppc_xics
*)dev
->private;
1379 xics_debugfs_init(xics
);
1382 struct kvm_device_ops kvm_xics_ops
= {
1384 .create
= kvmppc_xics_create
,
1385 .init
= kvmppc_xics_init
,
1386 .destroy
= kvmppc_xics_free
,
1387 .set_attr
= xics_set_attr
,
1388 .get_attr
= xics_get_attr
,
1389 .has_attr
= xics_has_attr
,
1392 int kvmppc_xics_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
1395 struct kvmppc_xics
*xics
= dev
->private;
1398 if (dev
->ops
!= &kvm_xics_ops
)
1400 if (xics
->kvm
!= vcpu
->kvm
)
1402 if (vcpu
->arch
.irq_type
)
1405 r
= kvmppc_xics_create_icp(vcpu
, xcpu
);
1407 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1412 void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
)
1414 if (!vcpu
->arch
.icp
)
1416 kfree(vcpu
->arch
.icp
);
1417 vcpu
->arch
.icp
= NULL
;
1418 vcpu
->arch
.irq_type
= KVMPPC_IRQ_DEFAULT
;
1421 static int xics_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1422 struct kvm
*kvm
, int irq_source_id
, int level
,
1425 return kvm_set_irq(kvm
, irq_source_id
, e
->gsi
, level
, line_status
);
1428 int kvm_irq_map_gsi(struct kvm
*kvm
,
1429 struct kvm_kernel_irq_routing_entry
*entries
, int gsi
)
1432 entries
->type
= KVM_IRQ_ROUTING_IRQCHIP
;
1433 entries
->set
= xics_set_irq
;
1434 entries
->irqchip
.irqchip
= 0;
1435 entries
->irqchip
.pin
= gsi
;
1439 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
1444 void kvmppc_xics_set_mapped(struct kvm
*kvm
, unsigned long irq
,
1445 unsigned long host_irq
)
1447 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1448 struct kvmppc_ics
*ics
;
1451 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1455 ics
->irq_state
[idx
].host_irq
= host_irq
;
1456 ics
->irq_state
[idx
].intr_cpu
= -1;
1458 EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped
);
1460 void kvmppc_xics_clr_mapped(struct kvm
*kvm
, unsigned long irq
,
1461 unsigned long host_irq
)
1463 struct kvmppc_xics
*xics
= kvm
->arch
.xics
;
1464 struct kvmppc_ics
*ics
;
1467 ics
= kvmppc_xics_find_ics(xics
, irq
, &idx
);
1471 ics
->irq_state
[idx
].host_irq
= 0;
1473 EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped
);