2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/kernel_stat.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/hvcall.h>
19 #include <asm/debug.h>
20 #include <asm/synch.h>
21 #include <asm/cputhreads.h>
22 #include <asm/pgtable.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/pnv-pci.h>
28 #include "book3s_xics.h"
32 int h_ipi_redirect
= 1;
33 EXPORT_SYMBOL(h_ipi_redirect
);
34 int kvm_irq_bypass
= 1;
35 EXPORT_SYMBOL(kvm_irq_bypass
);
37 static void icp_rm_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
39 static int xics_opal_rm_set_server(unsigned int hw_irq
, int server_cpu
);
41 /* -- ICS routines -- */
42 static void ics_rm_check_resend(struct kvmppc_xics
*xics
,
43 struct kvmppc_ics
*ics
, struct kvmppc_icp
*icp
)
47 arch_spin_lock(&ics
->lock
);
49 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
50 struct ics_irq_state
*state
= &ics
->irq_state
[i
];
55 arch_spin_unlock(&ics
->lock
);
56 icp_rm_deliver_irq(xics
, icp
, state
->number
);
57 arch_spin_lock(&ics
->lock
);
60 arch_spin_unlock(&ics
->lock
);
63 /* -- ICP routines -- */
66 static inline void icp_send_hcore_msg(int hcore
, struct kvm_vcpu
*vcpu
)
70 hcpu
= hcore
<< threads_shift
;
71 kvmppc_host_rm_ops_hv
->rm_core
[hcore
].rm_data
= vcpu
;
72 smp_muxed_ipi_set_message(hcpu
, PPC_MSG_RM_HOST_ACTION
);
73 if (paca
[hcpu
].kvm_hstate
.xics_phys
)
74 icp_native_cause_ipi_rm(hcpu
);
76 opal_rm_int_set_mfrr(get_hard_smp_processor_id(hcpu
),
80 static inline void icp_send_hcore_msg(int hcore
, struct kvm_vcpu
*vcpu
) { }
84 * We start the search from our current CPU Id in the core map
85 * and go in a circle until we get back to our ID looking for a
86 * core that is running in host context and that hasn't already
87 * been targeted for another rm_host_ops.
89 * In the future, could consider using a fairer algorithm (one
90 * that distributes the IPIs better)
92 * Returns -1, if no CPU could be found in the host
93 * Else, returns a CPU Id which has been reserved for use
95 static inline int grab_next_hostcore(int start
,
96 struct kvmppc_host_rm_core
*rm_core
, int max
, int action
)
100 union kvmppc_rm_state old
, new;
102 for (core
= start
+ 1; core
< max
; core
++) {
103 old
= new = READ_ONCE(rm_core
[core
].rm_state
);
105 if (!old
.in_host
|| old
.rm_action
)
108 /* Try to grab this host core if not taken already. */
109 new.rm_action
= action
;
111 success
= cmpxchg64(&rm_core
[core
].rm_state
.raw
,
112 old
.raw
, new.raw
) == old
.raw
;
115 * Make sure that the store to the rm_action is made
116 * visible before we return to caller (and the
117 * subsequent store to rm_data) to synchronize with
128 static inline int find_available_hostcore(int action
)
131 int my_core
= smp_processor_id() >> threads_shift
;
132 struct kvmppc_host_rm_core
*rm_core
= kvmppc_host_rm_ops_hv
->rm_core
;
134 core
= grab_next_hostcore(my_core
, rm_core
, cpu_nr_cores(), action
);
136 core
= grab_next_hostcore(core
, rm_core
, my_core
, action
);
141 static void icp_rm_set_vcpu_irq(struct kvm_vcpu
*vcpu
,
142 struct kvm_vcpu
*this_vcpu
)
144 struct kvmppc_icp
*this_icp
= this_vcpu
->arch
.icp
;
148 /* Mark the target VCPU as having an interrupt pending */
149 vcpu
->stat
.queue_intr
++;
150 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL
, &vcpu
->arch
.pending_exceptions
);
152 /* Kick self ? Just set MER and return */
153 if (vcpu
== this_vcpu
) {
154 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_MER
);
159 * Check if the core is loaded,
160 * if not, find an available host core to post to wake the VCPU,
161 * if we can't find one, set up state to eventually return too hard.
163 cpu
= vcpu
->arch
.thread_cpu
;
164 if (cpu
< 0 || cpu
>= nr_cpu_ids
) {
166 if (kvmppc_host_rm_ops_hv
&& h_ipi_redirect
)
167 hcore
= find_available_hostcore(XICS_RM_KICK_VCPU
);
169 icp_send_hcore_msg(hcore
, vcpu
);
171 this_icp
->rm_action
|= XICS_RM_KICK_VCPU
;
172 this_icp
->rm_kick_target
= vcpu
;
178 kvmhv_rm_send_ipi(cpu
);
181 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu
*vcpu
)
183 /* Note: Only called on self ! */
184 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL
,
185 &vcpu
->arch
.pending_exceptions
);
186 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) & ~LPCR_MER
);
189 static inline bool icp_rm_try_update(struct kvmppc_icp
*icp
,
190 union kvmppc_icp_state old
,
191 union kvmppc_icp_state
new)
193 struct kvm_vcpu
*this_vcpu
= local_paca
->kvm_hstate
.kvm_vcpu
;
196 /* Calculate new output value */
197 new.out_ee
= (new.xisr
&& (new.pending_pri
< new.cppr
));
199 /* Attempt atomic update */
200 success
= cmpxchg64(&icp
->state
.raw
, old
.raw
, new.raw
) == old
.raw
;
205 * Check for output state update
207 * Note that this is racy since another processor could be updating
208 * the state already. This is why we never clear the interrupt output
209 * here, we only ever set it. The clear only happens prior to doing
210 * an update and only by the processor itself. Currently we do it
211 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
213 * We also do not try to figure out whether the EE state has changed,
214 * we unconditionally set it if the new state calls for it. The reason
215 * for that is that we opportunistically remove the pending interrupt
216 * flag when raising CPPR, so we need to set it back here if an
217 * interrupt is still pending.
220 icp_rm_set_vcpu_irq(icp
->vcpu
, this_vcpu
);
222 /* Expose the state change for debug purposes */
223 this_vcpu
->arch
.icp
->rm_dbgstate
= new;
224 this_vcpu
->arch
.icp
->rm_dbgtgt
= icp
->vcpu
;
230 static inline int check_too_hard(struct kvmppc_xics
*xics
,
231 struct kvmppc_icp
*icp
)
233 return (xics
->real_mode_dbg
|| icp
->rm_action
) ? H_TOO_HARD
: H_SUCCESS
;
236 static void icp_rm_check_resend(struct kvmppc_xics
*xics
,
237 struct kvmppc_icp
*icp
)
241 /* Order this load with the test for need_resend in the caller */
243 for_each_set_bit(icsid
, icp
->resend_map
, xics
->max_icsid
+ 1) {
244 struct kvmppc_ics
*ics
= xics
->ics
[icsid
];
246 if (!test_and_clear_bit(icsid
, icp
->resend_map
))
250 ics_rm_check_resend(xics
, ics
, icp
);
254 static bool icp_rm_try_to_deliver(struct kvmppc_icp
*icp
, u32 irq
, u8 priority
,
257 union kvmppc_icp_state old_state
, new_state
;
261 old_state
= new_state
= READ_ONCE(icp
->state
);
265 /* See if we can deliver */
266 success
= new_state
.cppr
> priority
&&
267 new_state
.mfrr
> priority
&&
268 new_state
.pending_pri
> priority
;
271 * If we can, check for a rejection and perform the
275 *reject
= new_state
.xisr
;
276 new_state
.xisr
= irq
;
277 new_state
.pending_pri
= priority
;
280 * If we failed to deliver we set need_resend
281 * so a subsequent CPPR state change causes us
282 * to try a new delivery.
284 new_state
.need_resend
= true;
287 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
292 static void icp_rm_deliver_irq(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
295 struct ics_irq_state
*state
;
296 struct kvmppc_ics
*ics
;
301 * This is used both for initial delivery of an interrupt and
302 * for subsequent rejection.
304 * Rejection can be racy vs. resends. We have evaluated the
305 * rejection in an atomic ICP transaction which is now complete,
306 * so potentially the ICP can already accept the interrupt again.
308 * So we need to retry the delivery. Essentially the reject path
309 * boils down to a failed delivery. Always.
311 * Now the interrupt could also have moved to a different target,
312 * thus we may need to re-do the ICP lookup as well
316 /* Get the ICS state and lock it */
317 ics
= kvmppc_xics_find_ics(xics
, new_irq
, &src
);
319 /* Unsafe increment, but this does not need to be accurate */
323 state
= &ics
->irq_state
[src
];
325 /* Get a lock on the ICS */
326 arch_spin_lock(&ics
->lock
);
329 if (!icp
|| state
->server
!= icp
->server_num
) {
330 icp
= kvmppc_xics_find_server(xics
->kvm
, state
->server
);
332 /* Unsafe increment again*/
338 /* Clear the resend bit of that interrupt */
342 * If masked, bail out
344 * Note: PAPR doesn't mention anything about masked pending
345 * when doing a resend, only when doing a delivery.
347 * However that would have the effect of losing a masked
348 * interrupt that was rejected and isn't consistent with
349 * the whole masked_pending business which is about not
350 * losing interrupts that occur while masked.
352 * I don't differentiate normal deliveries and resends, this
353 * implementation will differ from PAPR and not lose such
356 if (state
->priority
== MASKED
) {
357 state
->masked_pending
= 1;
362 * Try the delivery, this will set the need_resend flag
363 * in the ICP as part of the atomic transaction if the
364 * delivery is not possible.
366 * Note that if successful, the new delivery might have itself
367 * rejected an interrupt that was "delivered" before we took the
370 * In this case we do the whole sequence all over again for the
371 * new guy. We cannot assume that the rejected interrupt is less
372 * favored than the new one, and thus doesn't need to be delivered,
373 * because by the time we exit icp_rm_try_to_deliver() the target
374 * processor may well have already consumed & completed it, and thus
375 * the rejected interrupt might actually be already acceptable.
377 if (icp_rm_try_to_deliver(icp
, new_irq
, state
->priority
, &reject
)) {
379 * Delivery was successful, did we reject somebody else ?
381 if (reject
&& reject
!= XICS_IPI
) {
382 arch_spin_unlock(&ics
->lock
);
388 * We failed to deliver the interrupt we need to set the
389 * resend map bit and mark the ICS state as needing a resend
391 set_bit(ics
->icsid
, icp
->resend_map
);
395 * If the need_resend flag got cleared in the ICP some time
396 * between icp_rm_try_to_deliver() atomic update and now, then
397 * we know it might have missed the resend_map bit. So we
401 if (!icp
->state
.need_resend
) {
402 arch_spin_unlock(&ics
->lock
);
407 arch_spin_unlock(&ics
->lock
);
410 static void icp_rm_down_cppr(struct kvmppc_xics
*xics
, struct kvmppc_icp
*icp
,
413 union kvmppc_icp_state old_state
, new_state
;
417 * This handles several related states in one operation:
419 * ICP State: Down_CPPR
421 * Load CPPR with new value and if the XISR is 0
422 * then check for resends:
426 * If MFRR is more favored than CPPR, check for IPIs
427 * and notify ICS of a potential resend. This is done
428 * asynchronously (when used in real mode, we will have
431 * We do not handle the complete Check_IPI as documented
432 * here. In the PAPR, this state will be used for both
433 * Set_MFRR and Down_CPPR. However, we know that we aren't
434 * changing the MFRR state here so we don't need to handle
435 * the case of an MFRR causing a reject of a pending irq,
436 * this will have been handled when the MFRR was set in the
439 * Thus we don't have to handle rejects, only resends.
441 * When implementing real mode for HV KVM, resend will lead to
442 * a H_TOO_HARD return and the whole transaction will be handled
446 old_state
= new_state
= READ_ONCE(icp
->state
);
449 new_state
.cppr
= new_cppr
;
452 * Cut down Resend / Check_IPI / IPI
454 * The logic is that we cannot have a pending interrupt
455 * trumped by an IPI at this point (see above), so we
456 * know that either the pending interrupt is already an
457 * IPI (in which case we don't care to override it) or
458 * it's either more favored than us or non existent
460 if (new_state
.mfrr
< new_cppr
&&
461 new_state
.mfrr
<= new_state
.pending_pri
) {
462 new_state
.pending_pri
= new_state
.mfrr
;
463 new_state
.xisr
= XICS_IPI
;
466 /* Latch/clear resend bit */
467 resend
= new_state
.need_resend
;
468 new_state
.need_resend
= 0;
470 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
473 * Now handle resend checks. Those are asynchronous to the ICP
474 * state update in HW (ie bus transactions) so we can handle them
475 * separately here as well.
478 icp
->n_check_resend
++;
479 icp_rm_check_resend(xics
, icp
);
484 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu
*vcpu
)
486 union kvmppc_icp_state old_state
, new_state
;
487 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
488 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
491 if (!xics
|| !xics
->real_mode
)
494 /* First clear the interrupt */
495 icp_rm_clr_vcpu_irq(icp
->vcpu
);
498 * ICP State: Accept_Interrupt
500 * Return the pending interrupt (if any) along with the
501 * current CPPR, then clear the XISR & set CPPR to the
505 old_state
= new_state
= READ_ONCE(icp
->state
);
507 xirr
= old_state
.xisr
| (((u32
)old_state
.cppr
) << 24);
510 new_state
.cppr
= new_state
.pending_pri
;
511 new_state
.pending_pri
= 0xff;
514 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
516 /* Return the result in GPR4 */
517 vcpu
->arch
.gpr
[4] = xirr
;
519 return check_too_hard(xics
, icp
);
522 int kvmppc_rm_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
525 union kvmppc_icp_state old_state
, new_state
;
526 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
527 struct kvmppc_icp
*icp
, *this_icp
= vcpu
->arch
.icp
;
532 if (!xics
|| !xics
->real_mode
)
535 local
= this_icp
->server_num
== server
;
539 icp
= kvmppc_xics_find_server(vcpu
->kvm
, server
);
544 * ICP state: Set_MFRR
546 * If the CPPR is more favored than the new MFRR, then
547 * nothing needs to be done as there can be no XISR to
550 * ICP state: Check_IPI
552 * If the CPPR is less favored, then we might be replacing
553 * an interrupt, and thus need to possibly reject it.
557 * Besides rejecting any pending interrupts, we also
558 * update XISR and pending_pri to mark IPI as pending.
560 * PAPR does not describe this state, but if the MFRR is being
561 * made less favored than its earlier value, there might be
562 * a previously-rejected interrupt needing to be resent.
563 * Ideally, we would want to resend only if
564 * prio(pending_interrupt) < mfrr &&
565 * prio(pending_interrupt) < cppr
566 * where pending interrupt is the one that was rejected. But
567 * we don't have that state, so we simply trigger a resend
568 * whenever the MFRR is made less favored.
571 old_state
= new_state
= READ_ONCE(icp
->state
);
574 new_state
.mfrr
= mfrr
;
579 if (mfrr
< new_state
.cppr
) {
580 /* Reject a pending interrupt if not an IPI */
581 if (mfrr
<= new_state
.pending_pri
) {
582 reject
= new_state
.xisr
;
583 new_state
.pending_pri
= mfrr
;
584 new_state
.xisr
= XICS_IPI
;
588 if (mfrr
> old_state
.mfrr
) {
589 resend
= new_state
.need_resend
;
590 new_state
.need_resend
= 0;
592 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
594 /* Handle reject in real mode */
595 if (reject
&& reject
!= XICS_IPI
) {
596 this_icp
->n_reject
++;
597 icp_rm_deliver_irq(xics
, icp
, reject
);
600 /* Handle resends in real mode */
602 this_icp
->n_check_resend
++;
603 icp_rm_check_resend(xics
, icp
);
606 return check_too_hard(xics
, this_icp
);
609 int kvmppc_rm_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
611 union kvmppc_icp_state old_state
, new_state
;
612 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
613 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
616 if (!xics
|| !xics
->real_mode
)
620 * ICP State: Set_CPPR
622 * We can safely compare the new value with the current
623 * value outside of the transaction as the CPPR is only
624 * ever changed by the processor on itself
626 if (cppr
> icp
->state
.cppr
) {
627 icp_rm_down_cppr(xics
, icp
, cppr
);
629 } else if (cppr
== icp
->state
.cppr
)
635 * The processor is raising its priority, this can result
636 * in a rejection of a pending interrupt:
638 * ICP State: Reject_Current
640 * We can remove EE from the current processor, the update
641 * transaction will set it again if needed
643 icp_rm_clr_vcpu_irq(icp
->vcpu
);
646 old_state
= new_state
= READ_ONCE(icp
->state
);
649 new_state
.cppr
= cppr
;
651 if (cppr
<= new_state
.pending_pri
) {
652 reject
= new_state
.xisr
;
654 new_state
.pending_pri
= 0xff;
657 } while (!icp_rm_try_update(icp
, old_state
, new_state
));
660 * Check for rejects. They are handled by doing a new delivery
661 * attempt (see comments in icp_rm_deliver_irq).
663 if (reject
&& reject
!= XICS_IPI
) {
665 icp_rm_deliver_irq(xics
, icp
, reject
);
668 return check_too_hard(xics
, icp
);
671 int kvmppc_rm_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
673 struct kvmppc_xics
*xics
= vcpu
->kvm
->arch
.xics
;
674 struct kvmppc_icp
*icp
= vcpu
->arch
.icp
;
675 struct kvmppc_ics
*ics
;
676 struct ics_irq_state
*state
;
677 u32 irq
= xirr
& 0x00ffffff;
680 if (!xics
|| !xics
->real_mode
)
686 * Note: If EOI is incorrectly used by SW to lower the CPPR
687 * value (ie more favored), we do not check for rejection of
688 * a pending interrupt, this is a SW error and PAPR sepcifies
689 * that we don't have to deal with it.
691 * The sending of an EOI to the ICS is handled after the
694 * ICP State: Down_CPPR which we handle
695 * in a separate function as it's shared with H_CPPR.
697 icp_rm_down_cppr(xics
, icp
, xirr
>> 24);
699 /* IPIs have no EOI */
703 * EOI handling: If the interrupt is still asserted, we need to
704 * resend it. We can take a lockless "peek" at the ICS state here.
706 * "Message" interrupts will never have "asserted" set
708 ics
= kvmppc_xics_find_ics(xics
, irq
, &src
);
711 state
= &ics
->irq_state
[src
];
713 /* Still asserted, resend it */
714 if (state
->asserted
) {
716 icp_rm_deliver_irq(xics
, icp
, irq
);
719 if (!hlist_empty(&vcpu
->kvm
->irq_ack_notifier_list
)) {
720 icp
->rm_action
|= XICS_RM_NOTIFY_EOI
;
721 icp
->rm_eoied_irq
= irq
;
724 if (state
->host_irq
) {
725 ++vcpu
->stat
.pthru_all
;
726 if (state
->intr_cpu
!= -1) {
727 int pcpu
= raw_smp_processor_id();
729 pcpu
= cpu_first_thread_sibling(pcpu
);
730 ++vcpu
->stat
.pthru_host
;
731 if (state
->intr_cpu
!= pcpu
) {
732 ++vcpu
->stat
.pthru_bad_aff
;
733 xics_opal_rm_set_server(state
->host_irq
, pcpu
);
735 state
->intr_cpu
= -1;
739 return check_too_hard(xics
, icp
);
742 unsigned long eoi_rc
;
744 static void icp_eoi(struct irq_chip
*c
, u32 hwirq
, __be32 xirr
, bool *again
)
746 unsigned long xics_phys
;
749 rc
= pnv_opal_pci_msi_eoi(c
, hwirq
);
757 xics_phys
= local_paca
->kvm_hstate
.xics_phys
;
759 _stwcix(xics_phys
+ XICS_XIRR
, xirr
);
761 rc
= opal_rm_int_eoi(be32_to_cpu(xirr
));
766 static int xics_opal_rm_set_server(unsigned int hw_irq
, int server_cpu
)
768 unsigned int mangle_cpu
= get_hard_smp_processor_id(server_cpu
) << 2;
770 return opal_rm_set_xive(hw_irq
, mangle_cpu
, DEFAULT_PRIORITY
);
774 * Increment a per-CPU 32-bit unsigned integer variable.
775 * Safe to call in real-mode. Handles vmalloc'ed addresses
777 * ToDo: Make this work for any integral type
780 static inline void this_cpu_inc_rm(unsigned int __percpu
*addr
)
784 int cpu
= smp_processor_id();
786 raddr
= per_cpu_ptr(addr
, cpu
);
787 l
= (unsigned long)raddr
;
789 if (REGION_ID(l
) == VMALLOC_REGION_ID
) {
790 l
= vmalloc_to_phys(raddr
);
791 raddr
= (unsigned int *)l
;
797 * We don't try to update the flags in the irq_desc 'istate' field in
798 * here as would happen in the normal IRQ handling path for several reasons:
799 * - state flags represent internal IRQ state and are not expected to be
800 * updated outside the IRQ subsystem
801 * - more importantly, these are useful for edge triggered interrupts,
802 * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here
803 * and these states shouldn't apply to us.
805 * However, we do update irq_stats - we somewhat duplicate the code in
806 * kstat_incr_irqs_this_cpu() for this since this function is defined
807 * in irq/internal.h which we don't want to include here.
808 * The only difference is that desc->kstat_irqs is an allocated per CPU
809 * variable and could have been vmalloc'ed, so we can't directly
810 * call __this_cpu_inc() on it. The kstat structure is a static
811 * per CPU variable and it should be accessible by real-mode KVM.
814 static void kvmppc_rm_handle_irq_desc(struct irq_desc
*desc
)
816 this_cpu_inc_rm(desc
->kstat_irqs
);
817 __this_cpu_inc(kstat
.irqs_sum
);
820 long kvmppc_deliver_irq_passthru(struct kvm_vcpu
*vcpu
,
822 struct kvmppc_irq_map
*irq_map
,
823 struct kvmppc_passthru_irqmap
*pimap
,
826 struct kvmppc_xics
*xics
;
827 struct kvmppc_icp
*icp
;
830 irq
= irq_map
->v_hwirq
;
831 xics
= vcpu
->kvm
->arch
.xics
;
832 icp
= vcpu
->arch
.icp
;
834 kvmppc_rm_handle_irq_desc(irq_map
->desc
);
835 icp_rm_deliver_irq(xics
, icp
, irq
);
837 /* EOI the interrupt */
838 icp_eoi(irq_desc_get_chip(irq_map
->desc
), irq_map
->r_hwirq
, xirr
,
841 if (check_too_hard(xics
, icp
) == H_TOO_HARD
)
847 /* --- Non-real mode XICS-related built-in routines --- */
850 * Host Operations poked by RM KVM
852 static void rm_host_ipi_action(int action
, void *data
)
855 case XICS_RM_KICK_VCPU
:
856 kvmppc_host_rm_ops_hv
->vcpu_kick(data
);
859 WARN(1, "Unexpected rm_action=%d data=%p\n", action
, data
);
865 void kvmppc_xics_ipi_action(void)
868 unsigned int cpu
= smp_processor_id();
869 struct kvmppc_host_rm_core
*rm_corep
;
871 core
= cpu
>> threads_shift
;
872 rm_corep
= &kvmppc_host_rm_ops_hv
->rm_core
[core
];
874 if (rm_corep
->rm_data
) {
875 rm_host_ipi_action(rm_corep
->rm_state
.rm_action
,
877 /* Order these stores against the real mode KVM */
878 rm_corep
->rm_data
= NULL
;
880 rm_corep
->rm_state
.rm_action
= 0;