2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
10 #include <linux/kvm_host.h>
11 #include <linux/preempt.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/sizes.h>
18 #include <linux/cma.h>
19 #include <linux/bitops.h>
21 #include <asm/asm-prototypes.h>
22 #include <asm/cputable.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/archrandom.h>
28 #include <asm/dbell.h>
29 #include <asm/cputhreads.h>
34 #define KVM_CMA_CHUNK_ORDER 18
36 #include "book3s_xics.h"
37 #include "book3s_xive.h"
40 * The XIVE module will populate these when it loads
42 unsigned long (*__xive_vm_h_xirr
)(struct kvm_vcpu
*vcpu
);
43 unsigned long (*__xive_vm_h_ipoll
)(struct kvm_vcpu
*vcpu
, unsigned long server
);
44 int (*__xive_vm_h_ipi
)(struct kvm_vcpu
*vcpu
, unsigned long server
,
46 int (*__xive_vm_h_cppr
)(struct kvm_vcpu
*vcpu
, unsigned long cppr
);
47 int (*__xive_vm_h_eoi
)(struct kvm_vcpu
*vcpu
, unsigned long xirr
);
48 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr
);
49 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll
);
50 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi
);
51 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr
);
52 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi
);
55 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
56 * should be power of 2.
58 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
60 * By default we reserve 5% of memory for hash pagetable allocation.
62 static unsigned long kvm_cma_resv_ratio
= 5;
64 static struct cma
*kvm_cma
;
66 static int __init
early_parse_kvm_cma_resv(char *p
)
68 pr_debug("%s(%s)\n", __func__
, p
);
71 return kstrtoul(p
, 0, &kvm_cma_resv_ratio
);
73 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv
);
75 struct page
*kvm_alloc_hpt_cma(unsigned long nr_pages
)
77 VM_BUG_ON(order_base_2(nr_pages
) < KVM_CMA_CHUNK_ORDER
- PAGE_SHIFT
);
79 return cma_alloc(kvm_cma
, nr_pages
, order_base_2(HPT_ALIGN_PAGES
),
82 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma
);
84 void kvm_free_hpt_cma(struct page
*page
, unsigned long nr_pages
)
86 cma_release(kvm_cma
, page
, nr_pages
);
88 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma
);
91 * kvm_cma_reserve() - reserve area for kvm hash pagetable
93 * This function reserves memory from early allocator. It should be
94 * called by arch specific code once the memblock allocator
95 * has been activated and all other subsystems have already allocated/reserved
98 void __init
kvm_cma_reserve(void)
100 unsigned long align_size
;
101 struct memblock_region
*reg
;
102 phys_addr_t selected_size
= 0;
105 * We need CMA reservation only when we are in HV mode
107 if (!cpu_has_feature(CPU_FTR_HVMODE
))
110 * We cannot use memblock_phys_mem_size() here, because
111 * memblock_analyze() has not been called yet.
113 for_each_memblock(memory
, reg
)
114 selected_size
+= memblock_region_memory_end_pfn(reg
) -
115 memblock_region_memory_base_pfn(reg
);
117 selected_size
= (selected_size
* kvm_cma_resv_ratio
/ 100) << PAGE_SHIFT
;
119 pr_debug("%s: reserving %ld MiB for global area\n", __func__
,
120 (unsigned long)selected_size
/ SZ_1M
);
121 align_size
= HPT_ALIGN_PAGES
<< PAGE_SHIFT
;
122 cma_declare_contiguous(0, selected_size
, 0, align_size
,
123 KVM_CMA_CHUNK_ORDER
- PAGE_SHIFT
, false, "kvm_cma",
129 * Real-mode H_CONFER implementation.
130 * We check if we are the only vcpu out of this virtual core
131 * still running in the guest and not ceded. If so, we pop up
132 * to the virtual-mode implementation; if not, just return to
135 long int kvmppc_rm_h_confer(struct kvm_vcpu
*vcpu
, int target
,
136 unsigned int yield_count
)
138 struct kvmppc_vcore
*vc
= local_paca
->kvm_hstate
.kvm_vcore
;
139 int ptid
= local_paca
->kvm_hstate
.ptid
;
142 int threads_conferring
;
143 u64 stop
= get_tb() + 10 * tb_ticks_per_usec
;
144 int rv
= H_SUCCESS
; /* => don't yield */
146 set_bit(ptid
, &vc
->conferring_threads
);
147 while ((get_tb() < stop
) && !VCORE_IS_EXITING(vc
)) {
148 threads_running
= VCORE_ENTRY_MAP(vc
);
149 threads_ceded
= vc
->napping_threads
;
150 threads_conferring
= vc
->conferring_threads
;
151 if ((threads_ceded
| threads_conferring
) == threads_running
) {
152 rv
= H_TOO_HARD
; /* => do yield */
156 clear_bit(ptid
, &vc
->conferring_threads
);
161 * When running HV mode KVM we need to block certain operations while KVM VMs
162 * exist in the system. We use a counter of VMs to track this.
164 * One of the operations we need to block is onlining of secondaries, so we
165 * protect hv_vm_count with get/put_online_cpus().
167 static atomic_t hv_vm_count
;
169 void kvm_hv_vm_activated(void)
172 atomic_inc(&hv_vm_count
);
175 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated
);
177 void kvm_hv_vm_deactivated(void)
180 atomic_dec(&hv_vm_count
);
183 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated
);
185 bool kvm_hv_mode_active(void)
187 return atomic_read(&hv_vm_count
) != 0;
190 extern int hcall_real_table
[], hcall_real_table_end
[];
192 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd
)
195 if (cmd
< hcall_real_table_end
- hcall_real_table
&&
196 hcall_real_table
[cmd
])
201 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode
);
203 int kvmppc_hwrng_present(void)
205 return powernv_hwrng_present();
207 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present
);
209 long kvmppc_h_random(struct kvm_vcpu
*vcpu
)
213 /* Only need to do the expensive mfmsr() on radix */
214 if (kvm_is_radix(vcpu
->kvm
) && (mfmsr() & MSR_IR
))
215 r
= powernv_get_random_long(&vcpu
->arch
.regs
.gpr
[4]);
217 r
= powernv_get_random_real_mode(&vcpu
->arch
.regs
.gpr
[4]);
225 * Send an interrupt or message to another CPU.
226 * The caller needs to include any barrier needed to order writes
227 * to memory vs. the IPI/message.
229 void kvmhv_rm_send_ipi(int cpu
)
231 void __iomem
*xics_phys
;
232 unsigned long msg
= PPC_DBELL_TYPE(PPC_DBELL_SERVER
);
234 /* For a nested hypervisor, use the XICS via hcall */
235 if (kvmhv_on_pseries()) {
236 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
238 plpar_hcall_raw(H_IPI
, retbuf
, get_hard_smp_processor_id(cpu
),
243 /* On POWER9 we can use msgsnd for any destination cpu. */
244 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
245 msg
|= get_hard_smp_processor_id(cpu
);
246 __asm__
__volatile__ (PPC_MSGSND(%0) : : "r" (msg
));
250 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
251 if (cpu_has_feature(CPU_FTR_ARCH_207S
) &&
252 cpu_first_thread_sibling(cpu
) ==
253 cpu_first_thread_sibling(raw_smp_processor_id())) {
254 msg
|= cpu_thread_in_core(cpu
);
255 __asm__
__volatile__ (PPC_MSGSND(%0) : : "r" (msg
));
259 /* We should never reach this */
260 if (WARN_ON_ONCE(xics_on_xive()))
263 /* Else poke the target with an IPI */
264 xics_phys
= paca_ptrs
[cpu
]->kvm_hstate
.xics_phys
;
266 __raw_rm_writeb(IPI_PRIORITY
, xics_phys
+ XICS_MFRR
);
268 opal_int_set_mfrr(get_hard_smp_processor_id(cpu
), IPI_PRIORITY
);
272 * The following functions are called from the assembly code
273 * in book3s_hv_rmhandlers.S.
275 static void kvmhv_interrupt_vcore(struct kvmppc_vcore
*vc
, int active
)
279 /* Order setting of exit map vs. msgsnd/IPI */
281 for (; active
; active
>>= 1, ++cpu
)
283 kvmhv_rm_send_ipi(cpu
);
286 void kvmhv_commence_exit(int trap
)
288 struct kvmppc_vcore
*vc
= local_paca
->kvm_hstate
.kvm_vcore
;
289 int ptid
= local_paca
->kvm_hstate
.ptid
;
290 struct kvm_split_mode
*sip
= local_paca
->kvm_hstate
.kvm_split_mode
;
294 /* Set our bit in the threads-exiting-guest map in the 0xff00
295 bits of vcore->entry_exit_map */
298 ee
= vc
->entry_exit_map
;
299 } while (cmpxchg(&vc
->entry_exit_map
, ee
, ee
| me
) != ee
);
301 /* Are we the first here? */
306 * Trigger the other threads in this vcore to exit the guest.
307 * If this is a hypervisor decrementer interrupt then they
308 * will be already on their way out of the guest.
310 if (trap
!= BOOK3S_INTERRUPT_HV_DECREMENTER
)
311 kvmhv_interrupt_vcore(vc
, ee
& ~(1 << ptid
));
314 * If we are doing dynamic micro-threading, interrupt the other
315 * subcores to pull them out of their guests too.
320 for (i
= 0; i
< MAX_SUBCORES
; ++i
) {
325 ee
= vc
->entry_exit_map
;
326 /* Already asked to exit? */
329 } while (cmpxchg(&vc
->entry_exit_map
, ee
,
330 ee
| VCORE_EXIT_REQ
) != ee
);
332 kvmhv_interrupt_vcore(vc
, ee
);
336 * On POWER9 when running a HPT guest on a radix host (sip != NULL),
337 * we have to interrupt inactive CPU threads to get them to
338 * restore the host LPCR value.
341 if (cmpxchg(&sip
->do_restore
, 0, 1) == 0) {
342 vc
= local_paca
->kvm_hstate
.kvm_vcore
;
343 cpu0
= vc
->pcpu
+ ptid
- local_paca
->kvm_hstate
.tid
;
344 for (t
= 1; t
< threads_per_core
; ++t
) {
346 kvmhv_rm_send_ipi(cpu0
+ t
);
352 struct kvmppc_host_rm_ops
*kvmppc_host_rm_ops_hv
;
353 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv
);
355 #ifdef CONFIG_KVM_XICS
356 static struct kvmppc_irq_map
*get_irqmap(struct kvmppc_passthru_irqmap
*pimap
,
362 * We access the mapped array here without a lock. That
363 * is safe because we never reduce the number of entries
364 * in the array and we never change the v_hwirq field of
365 * an entry once it is set.
367 * We have also carefully ordered the stores in the writer
368 * and the loads here in the reader, so that if we find a matching
369 * hwirq here, the associated GSI and irq_desc fields are valid.
371 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
372 if (xisr
== pimap
->mapped
[i
].r_hwirq
) {
374 * Order subsequent reads in the caller to serialize
378 return &pimap
->mapped
[i
];
385 * If we have an interrupt that's not an IPI, check if we have a
386 * passthrough adapter and if so, check if this external interrupt
387 * is for the adapter.
388 * We will attempt to deliver the IRQ directly to the target VCPU's
389 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
391 * If the delivery fails or if this is not for a passthrough adapter,
392 * return to the host to handle this interrupt. We earlier
393 * saved a copy of the XIRR in the PACA, it will be picked up by
394 * the host ICP driver.
396 static int kvmppc_check_passthru(u32 xisr
, __be32 xirr
, bool *again
)
398 struct kvmppc_passthru_irqmap
*pimap
;
399 struct kvmppc_irq_map
*irq_map
;
400 struct kvm_vcpu
*vcpu
;
402 vcpu
= local_paca
->kvm_hstate
.kvm_vcpu
;
405 pimap
= kvmppc_get_passthru_irqmap(vcpu
->kvm
);
408 irq_map
= get_irqmap(pimap
, xisr
);
412 /* We're handling this interrupt, generic code doesn't need to */
413 local_paca
->kvm_hstate
.saved_xirr
= 0;
415 return kvmppc_deliver_irq_passthru(vcpu
, xirr
, irq_map
, pimap
, again
);
419 static inline int kvmppc_check_passthru(u32 xisr
, __be32 xirr
, bool *again
)
426 * Determine what sort of external interrupt is pending (if any).
428 * 0 if no interrupt is pending
429 * 1 if an interrupt is pending that needs to be handled by the host
430 * 2 Passthrough that needs completion in the host
431 * -1 if there was a guest wakeup IPI (which has now been cleared)
432 * -2 if there is PCI passthrough external interrupt that was handled
434 static long kvmppc_read_one_intr(bool *again
);
436 long kvmppc_read_intr(void)
447 rc
= kvmppc_read_one_intr(&again
);
448 if (rc
&& (ret
== 0 || rc
> ret
))
454 static long kvmppc_read_one_intr(bool *again
)
456 void __iomem
*xics_phys
;
466 /* see if a host IPI is pending */
467 host_ipi
= local_paca
->kvm_hstate
.host_ipi
;
471 /* Now read the interrupt from the ICP */
472 if (kvmhv_on_pseries()) {
473 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
475 rc
= plpar_hcall_raw(H_XIRR
, retbuf
, 0xFF);
476 xirr
= cpu_to_be32(retbuf
[0]);
478 xics_phys
= local_paca
->kvm_hstate
.xics_phys
;
481 rc
= opal_int_get_xirr(&xirr
, false);
483 xirr
= __raw_rm_readl(xics_phys
+ XICS_XIRR
);
489 * Save XIRR for later. Since we get control in reverse endian
490 * on LE systems, save it byte reversed and fetch it back in
491 * host endian. Note that xirr is the value read from the
492 * XIRR register, while h_xirr is the host endian version.
494 h_xirr
= be32_to_cpu(xirr
);
495 local_paca
->kvm_hstate
.saved_xirr
= h_xirr
;
496 xisr
= h_xirr
& 0xffffff;
498 * Ensure that the store/load complete to guarantee all side
499 * effects of loading from XIRR has completed
503 /* if nothing pending in the ICP */
507 /* We found something in the ICP...
509 * If it is an IPI, clear the MFRR and EOI it.
511 if (xisr
== XICS_IPI
) {
513 if (kvmhv_on_pseries()) {
514 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
516 plpar_hcall_raw(H_IPI
, retbuf
,
517 hard_smp_processor_id(), 0xff);
518 plpar_hcall_raw(H_EOI
, retbuf
, h_xirr
);
519 } else if (xics_phys
) {
520 __raw_rm_writeb(0xff, xics_phys
+ XICS_MFRR
);
521 __raw_rm_writel(xirr
, xics_phys
+ XICS_XIRR
);
523 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
524 rc
= opal_int_eoi(h_xirr
);
526 /* If rc > 0, there is another interrupt pending */
530 * Need to ensure side effects of above stores
531 * complete before proceeding.
536 * We need to re-check host IPI now in case it got set in the
537 * meantime. If it's clear, we bounce the interrupt to the
540 host_ipi
= local_paca
->kvm_hstate
.host_ipi
;
541 if (unlikely(host_ipi
!= 0)) {
542 /* We raced with the host,
543 * we need to resend that IPI, bummer
545 if (kvmhv_on_pseries()) {
546 unsigned long retbuf
[PLPAR_HCALL_BUFSIZE
];
548 plpar_hcall_raw(H_IPI
, retbuf
,
549 hard_smp_processor_id(),
551 } else if (xics_phys
)
552 __raw_rm_writeb(IPI_PRIORITY
,
553 xics_phys
+ XICS_MFRR
);
555 opal_int_set_mfrr(hard_smp_processor_id(),
557 /* Let side effects complete */
562 /* OK, it's an IPI for us */
563 local_paca
->kvm_hstate
.saved_xirr
= 0;
567 return kvmppc_check_passthru(xisr
, xirr
, again
);
570 #ifdef CONFIG_KVM_XICS
571 static inline bool is_rm(void)
573 return !(mfmsr() & MSR_DR
);
576 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu
*vcpu
)
578 if (!kvmppc_xics_enabled(vcpu
))
580 if (xics_on_xive()) {
582 return xive_rm_h_xirr(vcpu
);
583 if (unlikely(!__xive_vm_h_xirr
))
584 return H_NOT_AVAILABLE
;
585 return __xive_vm_h_xirr(vcpu
);
587 return xics_rm_h_xirr(vcpu
);
590 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu
*vcpu
)
592 if (!kvmppc_xics_enabled(vcpu
))
594 vcpu
->arch
.regs
.gpr
[5] = get_tb();
595 if (xics_on_xive()) {
597 return xive_rm_h_xirr(vcpu
);
598 if (unlikely(!__xive_vm_h_xirr
))
599 return H_NOT_AVAILABLE
;
600 return __xive_vm_h_xirr(vcpu
);
602 return xics_rm_h_xirr(vcpu
);
605 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
)
607 if (!kvmppc_xics_enabled(vcpu
))
609 if (xics_on_xive()) {
611 return xive_rm_h_ipoll(vcpu
, server
);
612 if (unlikely(!__xive_vm_h_ipoll
))
613 return H_NOT_AVAILABLE
;
614 return __xive_vm_h_ipoll(vcpu
, server
);
619 int kvmppc_rm_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
622 if (!kvmppc_xics_enabled(vcpu
))
624 if (xics_on_xive()) {
626 return xive_rm_h_ipi(vcpu
, server
, mfrr
);
627 if (unlikely(!__xive_vm_h_ipi
))
628 return H_NOT_AVAILABLE
;
629 return __xive_vm_h_ipi(vcpu
, server
, mfrr
);
631 return xics_rm_h_ipi(vcpu
, server
, mfrr
);
634 int kvmppc_rm_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
)
636 if (!kvmppc_xics_enabled(vcpu
))
638 if (xics_on_xive()) {
640 return xive_rm_h_cppr(vcpu
, cppr
);
641 if (unlikely(!__xive_vm_h_cppr
))
642 return H_NOT_AVAILABLE
;
643 return __xive_vm_h_cppr(vcpu
, cppr
);
645 return xics_rm_h_cppr(vcpu
, cppr
);
648 int kvmppc_rm_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
)
650 if (!kvmppc_xics_enabled(vcpu
))
652 if (xics_on_xive()) {
654 return xive_rm_h_eoi(vcpu
, xirr
);
655 if (unlikely(!__xive_vm_h_eoi
))
656 return H_NOT_AVAILABLE
;
657 return __xive_vm_h_eoi(vcpu
, xirr
);
659 return xics_rm_h_eoi(vcpu
, xirr
);
661 #endif /* CONFIG_KVM_XICS */
663 void kvmppc_bad_interrupt(struct pt_regs
*regs
)
666 * 100 could happen at any time, 200 can happen due to invalid real
667 * address access for example (or any time due to a hardware problem).
669 if (TRAP(regs
) == 0x100) {
670 get_paca()->in_nmi
++;
671 system_reset_exception(regs
);
672 get_paca()->in_nmi
--;
673 } else if (TRAP(regs
) == 0x200) {
674 machine_check_exception(regs
);
676 die("Bad interrupt in KVM entry/exit code", regs
, SIGABRT
);
678 panic("Bad KVM trap");
682 * Functions used to switch LPCR HR and UPRT bits on all threads
683 * when entering and exiting HPT guests on a radix host.
686 #define PHASE_REALMODE 1 /* in real mode */
687 #define PHASE_SET_LPCR 2 /* have set LPCR */
688 #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
689 #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
691 #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
693 static void wait_for_sync(struct kvm_split_mode
*sip
, int phase
)
695 int thr
= local_paca
->kvm_hstate
.tid
;
697 sip
->lpcr_sync
.phase
[thr
] |= phase
;
699 while ((sip
->lpcr_sync
.allphases
& phase
) != phase
) {
706 void kvmhv_p9_set_lpcr(struct kvm_split_mode
*sip
)
708 unsigned long rb
, set
;
710 /* wait for every other thread to get to real mode */
711 wait_for_sync(sip
, PHASE_REALMODE
);
713 /* Set LPCR and LPIDR */
714 mtspr(SPRN_LPCR
, sip
->lpcr_req
);
715 mtspr(SPRN_LPID
, sip
->lpidr_req
);
718 /* Invalidate the TLB on thread 0 */
719 if (local_paca
->kvm_hstate
.tid
== 0) {
721 asm volatile("ptesync" : : : "memory");
722 for (set
= 0; set
< POWER9_TLB_SETS_RADIX
; ++set
) {
723 rb
= TLBIEL_INVAL_SET_LPID
+
724 (set
<< TLBIEL_INVAL_SET_SHIFT
);
725 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
728 asm volatile("ptesync" : : : "memory");
731 /* indicate that we have done so and wait for others */
732 wait_for_sync(sip
, PHASE_SET_LPCR
);
733 /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
738 * Called when a thread that has been in the guest needs
739 * to reload the host LPCR value - but only on POWER9 when
740 * running a HPT guest on a radix host.
742 void kvmhv_p9_restore_lpcr(struct kvm_split_mode
*sip
)
744 /* we're out of the guest... */
745 wait_for_sync(sip
, PHASE_OUT_OF_GUEST
);
748 mtspr(SPRN_LPCR
, sip
->host_lpcr
);
751 if (local_paca
->kvm_hstate
.tid
== 0) {
753 smp_wmb(); /* order store of do_restore vs. phase */
756 wait_for_sync(sip
, PHASE_RESET_LPCR
);
758 local_paca
->kvm_hstate
.kvm_split_mode
= NULL
;
762 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
763 * Can we inject a Decrementer or a External interrupt?
765 void kvmppc_guest_entry_inject_int(struct kvm_vcpu
*vcpu
)
768 unsigned long vec
= 0;
771 /* Insert EXTERNAL bit into LPCR at the MER bit position */
772 ext
= (vcpu
->arch
.pending_exceptions
>> BOOK3S_IRQPRIO_EXTERNAL
) & 1;
773 lpcr
= mfspr(SPRN_LPCR
);
774 lpcr
|= ext
<< LPCR_MER_SH
;
775 mtspr(SPRN_LPCR
, lpcr
);
778 if (vcpu
->arch
.shregs
.msr
& MSR_EE
) {
780 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
782 long int dec
= mfspr(SPRN_DEC
);
783 if (!(lpcr
& LPCR_LD
))
786 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
790 unsigned long msr
, old_msr
= vcpu
->arch
.shregs
.msr
;
792 kvmppc_set_srr0(vcpu
, kvmppc_get_pc(vcpu
));
793 kvmppc_set_srr1(vcpu
, old_msr
);
794 kvmppc_set_pc(vcpu
, vec
);
795 msr
= vcpu
->arch
.intr_msr
;
796 if (MSR_TM_ACTIVE(old_msr
))
798 vcpu
->arch
.shregs
.msr
= msr
;
801 if (vcpu
->arch
.doorbell_request
) {
802 mtspr(SPRN_DPDES
, 1);
803 vcpu
->arch
.vcore
->dpdes
= 1;
805 vcpu
->arch
.doorbell_request
= 0;