]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/powerpc/kvm/book3s_hv_rmhandlers.S
Merge tag 'trace-v4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-hirsute-kernel.git] / arch / powerpc / kvm / book3s_hv_rmhandlers.S
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/mmu.h>
24 #include <asm/page.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
31 #include <asm/tm.h>
32 #include <asm/opal.h>
33 #include <asm/xive-regs.h>
34 #include <asm/thread_info.h>
35
36 /* Sign-extend HDEC if not on POWER9 */
37 #define EXTEND_HDEC(reg) \
38 BEGIN_FTR_SECTION; \
39 extsw reg, reg; \
40 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41
42 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
43
44 /* Values in HSTATE_NAPPING(r13) */
45 #define NAPPING_CEDE 1
46 #define NAPPING_NOVCPU 2
47
48 /* Stack frame offsets for kvmppc_hv_entry */
49 #define SFS 160
50 #define STACK_SLOT_TRAP (SFS-4)
51 #define STACK_SLOT_TID (SFS-16)
52 #define STACK_SLOT_PSSCR (SFS-24)
53 #define STACK_SLOT_PID (SFS-32)
54 #define STACK_SLOT_IAMR (SFS-40)
55 #define STACK_SLOT_CIABR (SFS-48)
56 #define STACK_SLOT_DAWR (SFS-56)
57 #define STACK_SLOT_DAWRX (SFS-64)
58 #define STACK_SLOT_HFSCR (SFS-72)
59
60 /*
61 * Call kvmppc_hv_entry in real mode.
62 * Must be called with interrupts hard-disabled.
63 *
64 * Input Registers:
65 *
66 * LR = return address to continue at after eventually re-enabling MMU
67 */
68 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
69 mflr r0
70 std r0, PPC_LR_STKOFF(r1)
71 stdu r1, -112(r1)
72 mfmsr r10
73 std r10, HSTATE_HOST_MSR(r13)
74 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
75 li r0,MSR_RI
76 andc r0,r10,r0
77 li r6,MSR_IR | MSR_DR
78 andc r6,r10,r6
79 mtmsrd r0,1 /* clear RI in MSR */
80 mtsrr0 r5
81 mtsrr1 r6
82 RFI_TO_KERNEL
83
84 kvmppc_call_hv_entry:
85 BEGIN_FTR_SECTION
86 /* On P9, do LPCR setting, if necessary */
87 ld r3, HSTATE_SPLIT_MODE(r13)
88 cmpdi r3, 0
89 beq 46f
90 lwz r4, KVM_SPLIT_DO_SET(r3)
91 cmpwi r4, 0
92 beq 46f
93 bl kvmhv_p9_set_lpcr
94 nop
95 46:
96 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
97
98 ld r4, HSTATE_KVM_VCPU(r13)
99 bl kvmppc_hv_entry
100
101 /* Back from guest - restore host state and return to caller */
102
103 BEGIN_FTR_SECTION
104 /* Restore host DABR and DABRX */
105 ld r5,HSTATE_DABR(r13)
106 li r6,7
107 mtspr SPRN_DABR,r5
108 mtspr SPRN_DABRX,r6
109 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
110
111 /* Restore SPRG3 */
112 ld r3,PACA_SPRG_VDSO(r13)
113 mtspr SPRN_SPRG_VDSO_WRITE,r3
114
115 /* Reload the host's PMU registers */
116 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
117 lbz r4, LPPACA_PMCINUSE(r3)
118 cmpwi r4, 0
119 beq 23f /* skip if not */
120 BEGIN_FTR_SECTION
121 ld r3, HSTATE_MMCR0(r13)
122 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
123 cmpwi r4, MMCR0_PMAO
124 beql kvmppc_fix_pmao
125 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
126 lwz r3, HSTATE_PMC1(r13)
127 lwz r4, HSTATE_PMC2(r13)
128 lwz r5, HSTATE_PMC3(r13)
129 lwz r6, HSTATE_PMC4(r13)
130 lwz r8, HSTATE_PMC5(r13)
131 lwz r9, HSTATE_PMC6(r13)
132 mtspr SPRN_PMC1, r3
133 mtspr SPRN_PMC2, r4
134 mtspr SPRN_PMC3, r5
135 mtspr SPRN_PMC4, r6
136 mtspr SPRN_PMC5, r8
137 mtspr SPRN_PMC6, r9
138 ld r3, HSTATE_MMCR0(r13)
139 ld r4, HSTATE_MMCR1(r13)
140 ld r5, HSTATE_MMCRA(r13)
141 ld r6, HSTATE_SIAR(r13)
142 ld r7, HSTATE_SDAR(r13)
143 mtspr SPRN_MMCR1, r4
144 mtspr SPRN_MMCRA, r5
145 mtspr SPRN_SIAR, r6
146 mtspr SPRN_SDAR, r7
147 BEGIN_FTR_SECTION
148 ld r8, HSTATE_MMCR2(r13)
149 ld r9, HSTATE_SIER(r13)
150 mtspr SPRN_MMCR2, r8
151 mtspr SPRN_SIER, r9
152 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
153 mtspr SPRN_MMCR0, r3
154 isync
155 23:
156
157 /*
158 * Reload DEC. HDEC interrupts were disabled when
159 * we reloaded the host's LPCR value.
160 */
161 ld r3, HSTATE_DECEXP(r13)
162 mftb r4
163 subf r4, r4, r3
164 mtspr SPRN_DEC, r4
165
166 /* hwthread_req may have got set by cede or no vcpu, so clear it */
167 li r0, 0
168 stb r0, HSTATE_HWTHREAD_REQ(r13)
169
170 /*
171 * For external interrupts we need to call the Linux
172 * handler to process the interrupt. We do that by jumping
173 * to absolute address 0x500 for external interrupts.
174 * The [h]rfid at the end of the handler will return to
175 * the book3s_hv_interrupts.S code. For other interrupts
176 * we do the rfid to get back to the book3s_hv_interrupts.S
177 * code here.
178 */
179 ld r8, 112+PPC_LR_STKOFF(r1)
180 addi r1, r1, 112
181 ld r7, HSTATE_HOST_MSR(r13)
182
183 /* Return the trap number on this thread as the return value */
184 mr r3, r12
185
186 /*
187 * If we came back from the guest via a relocation-on interrupt,
188 * we will be in virtual mode at this point, which makes it a
189 * little easier to get back to the caller.
190 */
191 mfmsr r0
192 andi. r0, r0, MSR_IR /* in real mode? */
193 bne .Lvirt_return
194
195 /* RFI into the highmem handler */
196 mfmsr r6
197 li r0, MSR_RI
198 andc r6, r6, r0
199 mtmsrd r6, 1 /* Clear RI in MSR */
200 mtsrr0 r8
201 mtsrr1 r7
202 RFI_TO_KERNEL
203
204 /* Virtual-mode return */
205 .Lvirt_return:
206 mtlr r8
207 blr
208
209 kvmppc_primary_no_guest:
210 /* We handle this much like a ceded vcpu */
211 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
212 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
213 /* HDEC value came from DEC in the first place, it will fit */
214 mfspr r3, SPRN_HDEC
215 mtspr SPRN_DEC, r3
216 /*
217 * Make sure the primary has finished the MMU switch.
218 * We should never get here on a secondary thread, but
219 * check it for robustness' sake.
220 */
221 ld r5, HSTATE_KVM_VCORE(r13)
222 65: lbz r0, VCORE_IN_GUEST(r5)
223 cmpwi r0, 0
224 beq 65b
225 /* Set LPCR. */
226 ld r8,VCORE_LPCR(r5)
227 mtspr SPRN_LPCR,r8
228 isync
229 /* set our bit in napping_threads */
230 ld r5, HSTATE_KVM_VCORE(r13)
231 lbz r7, HSTATE_PTID(r13)
232 li r0, 1
233 sld r0, r0, r7
234 addi r6, r5, VCORE_NAPPING_THREADS
235 1: lwarx r3, 0, r6
236 or r3, r3, r0
237 stwcx. r3, 0, r6
238 bne 1b
239 /* order napping_threads update vs testing entry_exit_map */
240 isync
241 li r12, 0
242 lwz r7, VCORE_ENTRY_EXIT(r5)
243 cmpwi r7, 0x100
244 bge kvm_novcpu_exit /* another thread already exiting */
245 li r3, NAPPING_NOVCPU
246 stb r3, HSTATE_NAPPING(r13)
247
248 li r3, 0 /* Don't wake on privileged (OS) doorbell */
249 b kvm_do_nap
250
251 /*
252 * kvm_novcpu_wakeup
253 * Entered from kvm_start_guest if kvm_hstate.napping is set
254 * to NAPPING_NOVCPU
255 * r2 = kernel TOC
256 * r13 = paca
257 */
258 kvm_novcpu_wakeup:
259 ld r1, HSTATE_HOST_R1(r13)
260 ld r5, HSTATE_KVM_VCORE(r13)
261 li r0, 0
262 stb r0, HSTATE_NAPPING(r13)
263
264 /* check the wake reason */
265 bl kvmppc_check_wake_reason
266
267 /*
268 * Restore volatile registers since we could have called
269 * a C routine in kvmppc_check_wake_reason.
270 * r5 = VCORE
271 */
272 ld r5, HSTATE_KVM_VCORE(r13)
273
274 /* see if any other thread is already exiting */
275 lwz r0, VCORE_ENTRY_EXIT(r5)
276 cmpwi r0, 0x100
277 bge kvm_novcpu_exit
278
279 /* clear our bit in napping_threads */
280 lbz r7, HSTATE_PTID(r13)
281 li r0, 1
282 sld r0, r0, r7
283 addi r6, r5, VCORE_NAPPING_THREADS
284 4: lwarx r7, 0, r6
285 andc r7, r7, r0
286 stwcx. r7, 0, r6
287 bne 4b
288
289 /* See if the wake reason means we need to exit */
290 cmpdi r3, 0
291 bge kvm_novcpu_exit
292
293 /* See if our timeslice has expired (HDEC is negative) */
294 mfspr r0, SPRN_HDEC
295 EXTEND_HDEC(r0)
296 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
297 cmpdi r0, 0
298 blt kvm_novcpu_exit
299
300 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
301 ld r4, HSTATE_KVM_VCPU(r13)
302 cmpdi r4, 0
303 beq kvmppc_primary_no_guest
304
305 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
306 addi r3, r4, VCPU_TB_RMENTRY
307 bl kvmhv_start_timing
308 #endif
309 b kvmppc_got_guest
310
311 kvm_novcpu_exit:
312 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
313 ld r4, HSTATE_KVM_VCPU(r13)
314 cmpdi r4, 0
315 beq 13f
316 addi r3, r4, VCPU_TB_RMEXIT
317 bl kvmhv_accumulate_time
318 #endif
319 13: mr r3, r12
320 stw r12, STACK_SLOT_TRAP(r1)
321 bl kvmhv_commence_exit
322 nop
323 lwz r12, STACK_SLOT_TRAP(r1)
324 b kvmhv_switch_to_host
325
326 /*
327 * We come in here when wakened from nap mode.
328 * Relocation is off and most register values are lost.
329 * r13 points to the PACA.
330 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
331 */
332 .globl kvm_start_guest
333 kvm_start_guest:
334 /* Set runlatch bit the minute you wake up from nap */
335 mfspr r0, SPRN_CTRLF
336 ori r0, r0, 1
337 mtspr SPRN_CTRLT, r0
338
339 /*
340 * Could avoid this and pass it through in r3. For now,
341 * code expects it to be in SRR1.
342 */
343 mtspr SPRN_SRR1,r3
344
345 ld r2,PACATOC(r13)
346
347 li r0,KVM_HWTHREAD_IN_KVM
348 stb r0,HSTATE_HWTHREAD_STATE(r13)
349
350 /* NV GPR values from power7_idle() will no longer be valid */
351 li r0,1
352 stb r0,PACA_NAPSTATELOST(r13)
353
354 /* were we napping due to cede? */
355 lbz r0,HSTATE_NAPPING(r13)
356 cmpwi r0,NAPPING_CEDE
357 beq kvm_end_cede
358 cmpwi r0,NAPPING_NOVCPU
359 beq kvm_novcpu_wakeup
360
361 ld r1,PACAEMERGSP(r13)
362 subi r1,r1,STACK_FRAME_OVERHEAD
363
364 /*
365 * We weren't napping due to cede, so this must be a secondary
366 * thread being woken up to run a guest, or being woken up due
367 * to a stray IPI. (Or due to some machine check or hypervisor
368 * maintenance interrupt while the core is in KVM.)
369 */
370
371 /* Check the wake reason in SRR1 to see why we got here */
372 bl kvmppc_check_wake_reason
373 /*
374 * kvmppc_check_wake_reason could invoke a C routine, but we
375 * have no volatile registers to restore when we return.
376 */
377
378 cmpdi r3, 0
379 bge kvm_no_guest
380
381 /* get vcore pointer, NULL if we have nothing to run */
382 ld r5,HSTATE_KVM_VCORE(r13)
383 cmpdi r5,0
384 /* if we have no vcore to run, go back to sleep */
385 beq kvm_no_guest
386
387 kvm_secondary_got_guest:
388
389 /* Set HSTATE_DSCR(r13) to something sensible */
390 ld r6, PACA_DSCR_DEFAULT(r13)
391 std r6, HSTATE_DSCR(r13)
392
393 /* On thread 0 of a subcore, set HDEC to max */
394 lbz r4, HSTATE_PTID(r13)
395 cmpwi r4, 0
396 bne 63f
397 LOAD_REG_ADDR(r6, decrementer_max)
398 ld r6, 0(r6)
399 mtspr SPRN_HDEC, r6
400 /* and set per-LPAR registers, if doing dynamic micro-threading */
401 ld r6, HSTATE_SPLIT_MODE(r13)
402 cmpdi r6, 0
403 beq 63f
404 BEGIN_FTR_SECTION
405 ld r0, KVM_SPLIT_RPR(r6)
406 mtspr SPRN_RPR, r0
407 ld r0, KVM_SPLIT_PMMAR(r6)
408 mtspr SPRN_PMMAR, r0
409 ld r0, KVM_SPLIT_LDBAR(r6)
410 mtspr SPRN_LDBAR, r0
411 isync
412 FTR_SECTION_ELSE
413 /* On P9 we use the split_info for coordinating LPCR changes */
414 lwz r4, KVM_SPLIT_DO_SET(r6)
415 cmpwi r4, 0
416 beq 63f
417 mr r3, r6
418 bl kvmhv_p9_set_lpcr
419 nop
420 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
421 63:
422 /* Order load of vcpu after load of vcore */
423 lwsync
424 ld r4, HSTATE_KVM_VCPU(r13)
425 bl kvmppc_hv_entry
426
427 /* Back from the guest, go back to nap */
428 /* Clear our vcpu and vcore pointers so we don't come back in early */
429 li r0, 0
430 std r0, HSTATE_KVM_VCPU(r13)
431 /*
432 * Once we clear HSTATE_KVM_VCORE(r13), the code in
433 * kvmppc_run_core() is going to assume that all our vcpu
434 * state is visible in memory. This lwsync makes sure
435 * that that is true.
436 */
437 lwsync
438 std r0, HSTATE_KVM_VCORE(r13)
439
440 /*
441 * All secondaries exiting guest will fall through this path.
442 * Before proceeding, just check for HMI interrupt and
443 * invoke opal hmi handler. By now we are sure that the
444 * primary thread on this core/subcore has already made partition
445 * switch/TB resync and we are good to call opal hmi handler.
446 */
447 cmpwi r12, BOOK3S_INTERRUPT_HMI
448 bne kvm_no_guest
449
450 li r3,0 /* NULL argument */
451 bl hmi_exception_realmode
452 /*
453 * At this point we have finished executing in the guest.
454 * We need to wait for hwthread_req to become zero, since
455 * we may not turn on the MMU while hwthread_req is non-zero.
456 * While waiting we also need to check if we get given a vcpu to run.
457 */
458 kvm_no_guest:
459 lbz r3, HSTATE_HWTHREAD_REQ(r13)
460 cmpwi r3, 0
461 bne 53f
462 HMT_MEDIUM
463 li r0, KVM_HWTHREAD_IN_KERNEL
464 stb r0, HSTATE_HWTHREAD_STATE(r13)
465 /* need to recheck hwthread_req after a barrier, to avoid race */
466 sync
467 lbz r3, HSTATE_HWTHREAD_REQ(r13)
468 cmpwi r3, 0
469 bne 54f
470 /*
471 * We jump to pnv_wakeup_loss, which will return to the caller
472 * of power7_nap in the powernv cpu offline loop. The value we
473 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
474 * requires SRR1 in r12.
475 */
476 li r3, LPCR_PECE0
477 mfspr r4, SPRN_LPCR
478 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
479 mtspr SPRN_LPCR, r4
480 li r3, 0
481 mfspr r12,SPRN_SRR1
482 b pnv_wakeup_loss
483
484 53: HMT_LOW
485 ld r5, HSTATE_KVM_VCORE(r13)
486 cmpdi r5, 0
487 bne 60f
488 ld r3, HSTATE_SPLIT_MODE(r13)
489 cmpdi r3, 0
490 beq kvm_no_guest
491 lwz r0, KVM_SPLIT_DO_SET(r3)
492 cmpwi r0, 0
493 bne kvmhv_do_set
494 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
495 cmpwi r0, 0
496 bne kvmhv_do_restore
497 lbz r0, KVM_SPLIT_DO_NAP(r3)
498 cmpwi r0, 0
499 beq kvm_no_guest
500 HMT_MEDIUM
501 b kvm_unsplit_nap
502 60: HMT_MEDIUM
503 b kvm_secondary_got_guest
504
505 54: li r0, KVM_HWTHREAD_IN_KVM
506 stb r0, HSTATE_HWTHREAD_STATE(r13)
507 b kvm_no_guest
508
509 kvmhv_do_set:
510 /* Set LPCR, LPIDR etc. on P9 */
511 HMT_MEDIUM
512 bl kvmhv_p9_set_lpcr
513 nop
514 b kvm_no_guest
515
516 kvmhv_do_restore:
517 HMT_MEDIUM
518 bl kvmhv_p9_restore_lpcr
519 nop
520 b kvm_no_guest
521
522 /*
523 * Here the primary thread is trying to return the core to
524 * whole-core mode, so we need to nap.
525 */
526 kvm_unsplit_nap:
527 /*
528 * When secondaries are napping in kvm_unsplit_nap() with
529 * hwthread_req = 1, HMI goes ignored even though subcores are
530 * already exited the guest. Hence HMI keeps waking up secondaries
531 * from nap in a loop and secondaries always go back to nap since
532 * no vcore is assigned to them. This makes impossible for primary
533 * thread to get hold of secondary threads resulting into a soft
534 * lockup in KVM path.
535 *
536 * Let us check if HMI is pending and handle it before we go to nap.
537 */
538 cmpwi r12, BOOK3S_INTERRUPT_HMI
539 bne 55f
540 li r3, 0 /* NULL argument */
541 bl hmi_exception_realmode
542 55:
543 /*
544 * Ensure that secondary doesn't nap when it has
545 * its vcore pointer set.
546 */
547 sync /* matches smp_mb() before setting split_info.do_nap */
548 ld r0, HSTATE_KVM_VCORE(r13)
549 cmpdi r0, 0
550 bne kvm_no_guest
551 /* clear any pending message */
552 BEGIN_FTR_SECTION
553 lis r6, (PPC_DBELL_SERVER << (63-36))@h
554 PPC_MSGCLR(6)
555 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
556 /* Set kvm_split_mode.napped[tid] = 1 */
557 ld r3, HSTATE_SPLIT_MODE(r13)
558 li r0, 1
559 lbz r4, HSTATE_TID(r13)
560 addi r4, r4, KVM_SPLIT_NAPPED
561 stbx r0, r3, r4
562 /* Check the do_nap flag again after setting napped[] */
563 sync
564 lbz r0, KVM_SPLIT_DO_NAP(r3)
565 cmpwi r0, 0
566 beq 57f
567 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
568 mfspr r5, SPRN_LPCR
569 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
570 b kvm_nap_sequence
571
572 57: li r0, 0
573 stbx r0, r3, r4
574 b kvm_no_guest
575
576 /******************************************************************************
577 * *
578 * Entry code *
579 * *
580 *****************************************************************************/
581
582 .global kvmppc_hv_entry
583 kvmppc_hv_entry:
584
585 /* Required state:
586 *
587 * R4 = vcpu pointer (or NULL)
588 * MSR = ~IR|DR
589 * R13 = PACA
590 * R1 = host R1
591 * R2 = TOC
592 * all other volatile GPRS = free
593 * Does not preserve non-volatile GPRs or CR fields
594 */
595 mflr r0
596 std r0, PPC_LR_STKOFF(r1)
597 stdu r1, -SFS(r1)
598
599 /* Save R1 in the PACA */
600 std r1, HSTATE_HOST_R1(r13)
601
602 li r6, KVM_GUEST_MODE_HOST_HV
603 stb r6, HSTATE_IN_GUEST(r13)
604
605 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
606 /* Store initial timestamp */
607 cmpdi r4, 0
608 beq 1f
609 addi r3, r4, VCPU_TB_RMENTRY
610 bl kvmhv_start_timing
611 1:
612 #endif
613
614 /* Use cr7 as an indication of radix mode */
615 ld r5, HSTATE_KVM_VCORE(r13)
616 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
617 lbz r0, KVM_RADIX(r9)
618 cmpwi cr7, r0, 0
619
620 /* Clear out SLB if hash */
621 bne cr7, 2f
622 li r6,0
623 slbmte r6,r6
624 slbia
625 ptesync
626 2:
627 /*
628 * POWER7/POWER8 host -> guest partition switch code.
629 * We don't have to lock against concurrent tlbies,
630 * but we do have to coordinate across hardware threads.
631 */
632 /* Set bit in entry map iff exit map is zero. */
633 li r7, 1
634 lbz r6, HSTATE_PTID(r13)
635 sld r7, r7, r6
636 addi r8, r5, VCORE_ENTRY_EXIT
637 21: lwarx r3, 0, r8
638 cmpwi r3, 0x100 /* any threads starting to exit? */
639 bge secondary_too_late /* if so we're too late to the party */
640 or r3, r3, r7
641 stwcx. r3, 0, r8
642 bne 21b
643
644 /* Primary thread switches to guest partition. */
645 cmpwi r6,0
646 bne 10f
647 lwz r7,KVM_LPID(r9)
648 BEGIN_FTR_SECTION
649 ld r6,KVM_SDR1(r9)
650 li r0,LPID_RSVD /* switch to reserved LPID */
651 mtspr SPRN_LPID,r0
652 ptesync
653 mtspr SPRN_SDR1,r6 /* switch to partition page table */
654 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
655 mtspr SPRN_LPID,r7
656 isync
657
658 /* See if we need to flush the TLB */
659 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
660 BEGIN_FTR_SECTION
661 /*
662 * On POWER9, individual threads can come in here, but the
663 * TLB is shared between the 4 threads in a core, hence
664 * invalidating on one thread invalidates for all.
665 * Thus we make all 4 threads use the same bit here.
666 */
667 clrrdi r6,r6,2
668 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
669 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
670 srdi r6,r6,6 /* doubleword number */
671 sldi r6,r6,3 /* address offset */
672 add r6,r6,r9
673 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
674 li r8,1
675 sld r8,r8,r7
676 ld r7,0(r6)
677 and. r7,r7,r8
678 beq 22f
679 /* Flush the TLB of any entries for this LPID */
680 lwz r0,KVM_TLB_SETS(r9)
681 mtctr r0
682 li r7,0x800 /* IS field = 0b10 */
683 ptesync
684 li r0,0 /* RS for P9 version of tlbiel */
685 bne cr7, 29f
686 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
687 addi r7,r7,0x1000
688 bdnz 28b
689 b 30f
690 29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
691 addi r7,r7,0x1000
692 bdnz 29b
693 30: ptesync
694 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
695 andc r7,r7,r8
696 stdcx. r7,0,r6
697 bne 23b
698
699 /* Add timebase offset onto timebase */
700 22: ld r8,VCORE_TB_OFFSET(r5)
701 cmpdi r8,0
702 beq 37f
703 mftb r6 /* current host timebase */
704 add r8,r8,r6
705 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
706 mftb r7 /* check if lower 24 bits overflowed */
707 clrldi r6,r6,40
708 clrldi r7,r7,40
709 cmpld r7,r6
710 bge 37f
711 addis r8,r8,0x100 /* if so, increment upper 40 bits */
712 mtspr SPRN_TBU40,r8
713
714 /* Load guest PCR value to select appropriate compat mode */
715 37: ld r7, VCORE_PCR(r5)
716 cmpdi r7, 0
717 beq 38f
718 mtspr SPRN_PCR, r7
719 38:
720
721 BEGIN_FTR_SECTION
722 /* DPDES and VTB are shared between threads */
723 ld r8, VCORE_DPDES(r5)
724 ld r7, VCORE_VTB(r5)
725 mtspr SPRN_DPDES, r8
726 mtspr SPRN_VTB, r7
727 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
728
729 /* Mark the subcore state as inside guest */
730 bl kvmppc_subcore_enter_guest
731 nop
732 ld r5, HSTATE_KVM_VCORE(r13)
733 ld r4, HSTATE_KVM_VCPU(r13)
734 li r0,1
735 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
736
737 /* Do we have a guest vcpu to run? */
738 10: cmpdi r4, 0
739 beq kvmppc_primary_no_guest
740 kvmppc_got_guest:
741
742 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
743 lwz r5,VCPU_SLB_MAX(r4)
744 cmpwi r5,0
745 beq 9f
746 mtctr r5
747 addi r6,r4,VCPU_SLB
748 1: ld r8,VCPU_SLB_E(r6)
749 ld r9,VCPU_SLB_V(r6)
750 slbmte r9,r8
751 addi r6,r6,VCPU_SLB_SIZE
752 bdnz 1b
753 9:
754 /* Increment yield count if they have a VPA */
755 ld r3, VCPU_VPA(r4)
756 cmpdi r3, 0
757 beq 25f
758 li r6, LPPACA_YIELDCOUNT
759 LWZX_BE r5, r3, r6
760 addi r5, r5, 1
761 STWX_BE r5, r3, r6
762 li r6, 1
763 stb r6, VCPU_VPA_DIRTY(r4)
764 25:
765
766 /* Save purr/spurr */
767 mfspr r5,SPRN_PURR
768 mfspr r6,SPRN_SPURR
769 std r5,HSTATE_PURR(r13)
770 std r6,HSTATE_SPURR(r13)
771 ld r7,VCPU_PURR(r4)
772 ld r8,VCPU_SPURR(r4)
773 mtspr SPRN_PURR,r7
774 mtspr SPRN_SPURR,r8
775
776 /* Save host values of some registers */
777 BEGIN_FTR_SECTION
778 mfspr r5, SPRN_TIDR
779 mfspr r6, SPRN_PSSCR
780 mfspr r7, SPRN_PID
781 mfspr r8, SPRN_IAMR
782 std r5, STACK_SLOT_TID(r1)
783 std r6, STACK_SLOT_PSSCR(r1)
784 std r7, STACK_SLOT_PID(r1)
785 std r8, STACK_SLOT_IAMR(r1)
786 mfspr r5, SPRN_HFSCR
787 std r5, STACK_SLOT_HFSCR(r1)
788 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
789 BEGIN_FTR_SECTION
790 mfspr r5, SPRN_CIABR
791 mfspr r6, SPRN_DAWR
792 mfspr r7, SPRN_DAWRX
793 std r5, STACK_SLOT_CIABR(r1)
794 std r6, STACK_SLOT_DAWR(r1)
795 std r7, STACK_SLOT_DAWRX(r1)
796 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
797
798 BEGIN_FTR_SECTION
799 /* Set partition DABR */
800 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
801 lwz r5,VCPU_DABRX(r4)
802 ld r6,VCPU_DABR(r4)
803 mtspr SPRN_DABRX,r5
804 mtspr SPRN_DABR,r6
805 isync
806 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
807
808 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
809 BEGIN_FTR_SECTION
810 /*
811 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
812 */
813 bl kvmppc_restore_tm
814 END_FTR_SECTION_IFSET(CPU_FTR_TM)
815 #endif
816
817 /* Load guest PMU registers */
818 /* R4 is live here (vcpu pointer) */
819 li r3, 1
820 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
821 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
822 isync
823 BEGIN_FTR_SECTION
824 ld r3, VCPU_MMCR(r4)
825 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
826 cmpwi r5, MMCR0_PMAO
827 beql kvmppc_fix_pmao
828 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
829 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
830 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
831 lwz r6, VCPU_PMC + 8(r4)
832 lwz r7, VCPU_PMC + 12(r4)
833 lwz r8, VCPU_PMC + 16(r4)
834 lwz r9, VCPU_PMC + 20(r4)
835 mtspr SPRN_PMC1, r3
836 mtspr SPRN_PMC2, r5
837 mtspr SPRN_PMC3, r6
838 mtspr SPRN_PMC4, r7
839 mtspr SPRN_PMC5, r8
840 mtspr SPRN_PMC6, r9
841 ld r3, VCPU_MMCR(r4)
842 ld r5, VCPU_MMCR + 8(r4)
843 ld r6, VCPU_MMCR + 16(r4)
844 ld r7, VCPU_SIAR(r4)
845 ld r8, VCPU_SDAR(r4)
846 mtspr SPRN_MMCR1, r5
847 mtspr SPRN_MMCRA, r6
848 mtspr SPRN_SIAR, r7
849 mtspr SPRN_SDAR, r8
850 BEGIN_FTR_SECTION
851 ld r5, VCPU_MMCR + 24(r4)
852 ld r6, VCPU_SIER(r4)
853 mtspr SPRN_MMCR2, r5
854 mtspr SPRN_SIER, r6
855 BEGIN_FTR_SECTION_NESTED(96)
856 lwz r7, VCPU_PMC + 24(r4)
857 lwz r8, VCPU_PMC + 28(r4)
858 ld r9, VCPU_MMCR + 32(r4)
859 mtspr SPRN_SPMC1, r7
860 mtspr SPRN_SPMC2, r8
861 mtspr SPRN_MMCRS, r9
862 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
863 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
864 mtspr SPRN_MMCR0, r3
865 isync
866
867 /* Load up FP, VMX and VSX registers */
868 bl kvmppc_load_fp
869
870 ld r14, VCPU_GPR(R14)(r4)
871 ld r15, VCPU_GPR(R15)(r4)
872 ld r16, VCPU_GPR(R16)(r4)
873 ld r17, VCPU_GPR(R17)(r4)
874 ld r18, VCPU_GPR(R18)(r4)
875 ld r19, VCPU_GPR(R19)(r4)
876 ld r20, VCPU_GPR(R20)(r4)
877 ld r21, VCPU_GPR(R21)(r4)
878 ld r22, VCPU_GPR(R22)(r4)
879 ld r23, VCPU_GPR(R23)(r4)
880 ld r24, VCPU_GPR(R24)(r4)
881 ld r25, VCPU_GPR(R25)(r4)
882 ld r26, VCPU_GPR(R26)(r4)
883 ld r27, VCPU_GPR(R27)(r4)
884 ld r28, VCPU_GPR(R28)(r4)
885 ld r29, VCPU_GPR(R29)(r4)
886 ld r30, VCPU_GPR(R30)(r4)
887 ld r31, VCPU_GPR(R31)(r4)
888
889 /* Switch DSCR to guest value */
890 ld r5, VCPU_DSCR(r4)
891 mtspr SPRN_DSCR, r5
892
893 BEGIN_FTR_SECTION
894 /* Skip next section on POWER7 */
895 b 8f
896 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
897 /* Load up POWER8-specific registers */
898 ld r5, VCPU_IAMR(r4)
899 lwz r6, VCPU_PSPB(r4)
900 ld r7, VCPU_FSCR(r4)
901 mtspr SPRN_IAMR, r5
902 mtspr SPRN_PSPB, r6
903 mtspr SPRN_FSCR, r7
904 ld r5, VCPU_DAWR(r4)
905 ld r6, VCPU_DAWRX(r4)
906 ld r7, VCPU_CIABR(r4)
907 ld r8, VCPU_TAR(r4)
908 mtspr SPRN_DAWR, r5
909 mtspr SPRN_DAWRX, r6
910 mtspr SPRN_CIABR, r7
911 mtspr SPRN_TAR, r8
912 ld r5, VCPU_IC(r4)
913 ld r8, VCPU_EBBHR(r4)
914 mtspr SPRN_IC, r5
915 mtspr SPRN_EBBHR, r8
916 ld r5, VCPU_EBBRR(r4)
917 ld r6, VCPU_BESCR(r4)
918 lwz r7, VCPU_GUEST_PID(r4)
919 ld r8, VCPU_WORT(r4)
920 mtspr SPRN_EBBRR, r5
921 mtspr SPRN_BESCR, r6
922 mtspr SPRN_PID, r7
923 mtspr SPRN_WORT, r8
924 BEGIN_FTR_SECTION
925 PPC_INVALIDATE_ERAT
926 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
927 BEGIN_FTR_SECTION
928 /* POWER8-only registers */
929 ld r5, VCPU_TCSCR(r4)
930 ld r6, VCPU_ACOP(r4)
931 ld r7, VCPU_CSIGR(r4)
932 ld r8, VCPU_TACR(r4)
933 mtspr SPRN_TCSCR, r5
934 mtspr SPRN_ACOP, r6
935 mtspr SPRN_CSIGR, r7
936 mtspr SPRN_TACR, r8
937 FTR_SECTION_ELSE
938 /* POWER9-only registers */
939 ld r5, VCPU_TID(r4)
940 ld r6, VCPU_PSSCR(r4)
941 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
942 ld r7, VCPU_HFSCR(r4)
943 mtspr SPRN_TIDR, r5
944 mtspr SPRN_PSSCR, r6
945 mtspr SPRN_HFSCR, r7
946 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
947 8:
948
949 /*
950 * Set the decrementer to the guest decrementer.
951 */
952 ld r8,VCPU_DEC_EXPIRES(r4)
953 /* r8 is a host timebase value here, convert to guest TB */
954 ld r5,HSTATE_KVM_VCORE(r13)
955 ld r6,VCORE_TB_OFFSET(r5)
956 add r8,r8,r6
957 mftb r7
958 subf r3,r7,r8
959 mtspr SPRN_DEC,r3
960 std r3,VCPU_DEC(r4)
961
962 ld r5, VCPU_SPRG0(r4)
963 ld r6, VCPU_SPRG1(r4)
964 ld r7, VCPU_SPRG2(r4)
965 ld r8, VCPU_SPRG3(r4)
966 mtspr SPRN_SPRG0, r5
967 mtspr SPRN_SPRG1, r6
968 mtspr SPRN_SPRG2, r7
969 mtspr SPRN_SPRG3, r8
970
971 /* Load up DAR and DSISR */
972 ld r5, VCPU_DAR(r4)
973 lwz r6, VCPU_DSISR(r4)
974 mtspr SPRN_DAR, r5
975 mtspr SPRN_DSISR, r6
976
977 /* Restore AMR and UAMOR, set AMOR to all 1s */
978 ld r5,VCPU_AMR(r4)
979 ld r6,VCPU_UAMOR(r4)
980 li r7,-1
981 mtspr SPRN_AMR,r5
982 mtspr SPRN_UAMOR,r6
983 mtspr SPRN_AMOR,r7
984
985 /* Restore state of CTRL run bit; assume 1 on entry */
986 lwz r5,VCPU_CTRL(r4)
987 andi. r5,r5,1
988 bne 4f
989 mfspr r6,SPRN_CTRLF
990 clrrdi r6,r6,1
991 mtspr SPRN_CTRLT,r6
992 4:
993 /* Secondary threads wait for primary to have done partition switch */
994 ld r5, HSTATE_KVM_VCORE(r13)
995 lbz r6, HSTATE_PTID(r13)
996 cmpwi r6, 0
997 beq 21f
998 lbz r0, VCORE_IN_GUEST(r5)
999 cmpwi r0, 0
1000 bne 21f
1001 HMT_LOW
1002 20: lwz r3, VCORE_ENTRY_EXIT(r5)
1003 cmpwi r3, 0x100
1004 bge no_switch_exit
1005 lbz r0, VCORE_IN_GUEST(r5)
1006 cmpwi r0, 0
1007 beq 20b
1008 HMT_MEDIUM
1009 21:
1010 /* Set LPCR. */
1011 ld r8,VCORE_LPCR(r5)
1012 mtspr SPRN_LPCR,r8
1013 isync
1014
1015 /* Check if HDEC expires soon */
1016 mfspr r3, SPRN_HDEC
1017 EXTEND_HDEC(r3)
1018 cmpdi r3, 512 /* 1 microsecond */
1019 blt hdec_soon
1020
1021 #ifdef CONFIG_KVM_XICS
1022 /* We are entering the guest on that thread, push VCPU to XIVE */
1023 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1024 cmpldi cr0, r10, 0
1025 beq no_xive
1026 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1027 li r9, TM_QW1_OS
1028 eieio
1029 stdcix r11,r9,r10
1030 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1031 li r9, TM_QW1_OS + TM_WORD2
1032 stwcix r11,r9,r10
1033 li r9, 1
1034 stw r9, VCPU_XIVE_PUSHED(r4)
1035 eieio
1036 no_xive:
1037 #endif /* CONFIG_KVM_XICS */
1038
1039 deliver_guest_interrupt:
1040 ld r6, VCPU_CTR(r4)
1041 ld r7, VCPU_XER(r4)
1042
1043 mtctr r6
1044 mtxer r7
1045
1046 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
1047 ld r10, VCPU_PC(r4)
1048 ld r11, VCPU_MSR(r4)
1049 ld r6, VCPU_SRR0(r4)
1050 ld r7, VCPU_SRR1(r4)
1051 mtspr SPRN_SRR0, r6
1052 mtspr SPRN_SRR1, r7
1053
1054 /* r11 = vcpu->arch.msr & ~MSR_HV */
1055 rldicl r11, r11, 63 - MSR_HV_LG, 1
1056 rotldi r11, r11, 1 + MSR_HV_LG
1057 ori r11, r11, MSR_ME
1058
1059 /* Check if we can deliver an external or decrementer interrupt now */
1060 ld r0, VCPU_PENDING_EXC(r4)
1061 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1062 cmpdi cr1, r0, 0
1063 andi. r8, r11, MSR_EE
1064 mfspr r8, SPRN_LPCR
1065 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1066 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1067 mtspr SPRN_LPCR, r8
1068 isync
1069 beq 5f
1070 li r0, BOOK3S_INTERRUPT_EXTERNAL
1071 bne cr1, 12f
1072 mfspr r0, SPRN_DEC
1073 BEGIN_FTR_SECTION
1074 /* On POWER9 check whether the guest has large decrementer enabled */
1075 andis. r8, r8, LPCR_LD@h
1076 bne 15f
1077 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1078 extsw r0, r0
1079 15: cmpdi r0, 0
1080 li r0, BOOK3S_INTERRUPT_DECREMENTER
1081 bge 5f
1082
1083 12: mtspr SPRN_SRR0, r10
1084 mr r10,r0
1085 mtspr SPRN_SRR1, r11
1086 mr r9, r4
1087 bl kvmppc_msr_interrupt
1088 5:
1089 BEGIN_FTR_SECTION
1090 b fast_guest_return
1091 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1092 /* On POWER9, check for pending doorbell requests */
1093 lbz r0, VCPU_DBELL_REQ(r4)
1094 cmpwi r0, 0
1095 beq fast_guest_return
1096 ld r5, HSTATE_KVM_VCORE(r13)
1097 /* Set DPDES register so the CPU will take a doorbell interrupt */
1098 li r0, 1
1099 mtspr SPRN_DPDES, r0
1100 std r0, VCORE_DPDES(r5)
1101 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1102 lwsync
1103 /* Clear the pending doorbell request */
1104 li r0, 0
1105 stb r0, VCPU_DBELL_REQ(r4)
1106
1107 /*
1108 * Required state:
1109 * R4 = vcpu
1110 * R10: value for HSRR0
1111 * R11: value for HSRR1
1112 * R13 = PACA
1113 */
1114 fast_guest_return:
1115 li r0,0
1116 stb r0,VCPU_CEDED(r4) /* cancel cede */
1117 mtspr SPRN_HSRR0,r10
1118 mtspr SPRN_HSRR1,r11
1119
1120 /* Activate guest mode, so faults get handled by KVM */
1121 li r9, KVM_GUEST_MODE_GUEST_HV
1122 stb r9, HSTATE_IN_GUEST(r13)
1123
1124 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1125 /* Accumulate timing */
1126 addi r3, r4, VCPU_TB_GUEST
1127 bl kvmhv_accumulate_time
1128 #endif
1129
1130 /* Enter guest */
1131
1132 BEGIN_FTR_SECTION
1133 ld r5, VCPU_CFAR(r4)
1134 mtspr SPRN_CFAR, r5
1135 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1136 BEGIN_FTR_SECTION
1137 ld r0, VCPU_PPR(r4)
1138 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1139
1140 ld r5, VCPU_LR(r4)
1141 lwz r6, VCPU_CR(r4)
1142 mtlr r5
1143 mtcr r6
1144
1145 ld r1, VCPU_GPR(R1)(r4)
1146 ld r2, VCPU_GPR(R2)(r4)
1147 ld r3, VCPU_GPR(R3)(r4)
1148 ld r5, VCPU_GPR(R5)(r4)
1149 ld r6, VCPU_GPR(R6)(r4)
1150 ld r7, VCPU_GPR(R7)(r4)
1151 ld r8, VCPU_GPR(R8)(r4)
1152 ld r9, VCPU_GPR(R9)(r4)
1153 ld r10, VCPU_GPR(R10)(r4)
1154 ld r11, VCPU_GPR(R11)(r4)
1155 ld r12, VCPU_GPR(R12)(r4)
1156 ld r13, VCPU_GPR(R13)(r4)
1157
1158 BEGIN_FTR_SECTION
1159 mtspr SPRN_PPR, r0
1160 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1161
1162 /* Move canary into DSISR to check for later */
1163 BEGIN_FTR_SECTION
1164 li r0, 0x7fff
1165 mtspr SPRN_HDSISR, r0
1166 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1167
1168 ld r0, VCPU_GPR(R0)(r4)
1169 ld r4, VCPU_GPR(R4)(r4)
1170 HRFI_TO_GUEST
1171 b .
1172
1173 secondary_too_late:
1174 li r12, 0
1175 cmpdi r4, 0
1176 beq 11f
1177 stw r12, VCPU_TRAP(r4)
1178 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1179 addi r3, r4, VCPU_TB_RMEXIT
1180 bl kvmhv_accumulate_time
1181 #endif
1182 11: b kvmhv_switch_to_host
1183
1184 no_switch_exit:
1185 HMT_MEDIUM
1186 li r12, 0
1187 b 12f
1188 hdec_soon:
1189 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1190 12: stw r12, VCPU_TRAP(r4)
1191 mr r9, r4
1192 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1193 addi r3, r4, VCPU_TB_RMEXIT
1194 bl kvmhv_accumulate_time
1195 #endif
1196 b guest_exit_cont
1197
1198 /******************************************************************************
1199 * *
1200 * Exit code *
1201 * *
1202 *****************************************************************************/
1203
1204 /*
1205 * We come here from the first-level interrupt handlers.
1206 */
1207 .globl kvmppc_interrupt_hv
1208 kvmppc_interrupt_hv:
1209 /*
1210 * Register contents:
1211 * R12 = (guest CR << 32) | interrupt vector
1212 * R13 = PACA
1213 * guest R12 saved in shadow VCPU SCRATCH0
1214 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1215 * guest R13 saved in SPRN_SCRATCH0
1216 */
1217 std r9, HSTATE_SCRATCH2(r13)
1218 lbz r9, HSTATE_IN_GUEST(r13)
1219 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1220 beq kvmppc_bad_host_intr
1221 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1222 cmpwi r9, KVM_GUEST_MODE_GUEST
1223 ld r9, HSTATE_SCRATCH2(r13)
1224 beq kvmppc_interrupt_pr
1225 #endif
1226 /* We're now back in the host but in guest MMU context */
1227 li r9, KVM_GUEST_MODE_HOST_HV
1228 stb r9, HSTATE_IN_GUEST(r13)
1229
1230 ld r9, HSTATE_KVM_VCPU(r13)
1231
1232 /* Save registers */
1233
1234 std r0, VCPU_GPR(R0)(r9)
1235 std r1, VCPU_GPR(R1)(r9)
1236 std r2, VCPU_GPR(R2)(r9)
1237 std r3, VCPU_GPR(R3)(r9)
1238 std r4, VCPU_GPR(R4)(r9)
1239 std r5, VCPU_GPR(R5)(r9)
1240 std r6, VCPU_GPR(R6)(r9)
1241 std r7, VCPU_GPR(R7)(r9)
1242 std r8, VCPU_GPR(R8)(r9)
1243 ld r0, HSTATE_SCRATCH2(r13)
1244 std r0, VCPU_GPR(R9)(r9)
1245 std r10, VCPU_GPR(R10)(r9)
1246 std r11, VCPU_GPR(R11)(r9)
1247 ld r3, HSTATE_SCRATCH0(r13)
1248 std r3, VCPU_GPR(R12)(r9)
1249 /* CR is in the high half of r12 */
1250 srdi r4, r12, 32
1251 stw r4, VCPU_CR(r9)
1252 BEGIN_FTR_SECTION
1253 ld r3, HSTATE_CFAR(r13)
1254 std r3, VCPU_CFAR(r9)
1255 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1256 BEGIN_FTR_SECTION
1257 ld r4, HSTATE_PPR(r13)
1258 std r4, VCPU_PPR(r9)
1259 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1260
1261 /* Restore R1/R2 so we can handle faults */
1262 ld r1, HSTATE_HOST_R1(r13)
1263 ld r2, PACATOC(r13)
1264
1265 mfspr r10, SPRN_SRR0
1266 mfspr r11, SPRN_SRR1
1267 std r10, VCPU_SRR0(r9)
1268 std r11, VCPU_SRR1(r9)
1269 /* trap is in the low half of r12, clear CR from the high half */
1270 clrldi r12, r12, 32
1271 andi. r0, r12, 2 /* need to read HSRR0/1? */
1272 beq 1f
1273 mfspr r10, SPRN_HSRR0
1274 mfspr r11, SPRN_HSRR1
1275 clrrdi r12, r12, 2
1276 1: std r10, VCPU_PC(r9)
1277 std r11, VCPU_MSR(r9)
1278
1279 GET_SCRATCH0(r3)
1280 mflr r4
1281 std r3, VCPU_GPR(R13)(r9)
1282 std r4, VCPU_LR(r9)
1283
1284 stw r12,VCPU_TRAP(r9)
1285
1286 /*
1287 * Now that we have saved away SRR0/1 and HSRR0/1,
1288 * interrupts are recoverable in principle, so set MSR_RI.
1289 * This becomes important for relocation-on interrupts from
1290 * the guest, which we can get in radix mode on POWER9.
1291 */
1292 li r0, MSR_RI
1293 mtmsrd r0, 1
1294
1295 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1296 addi r3, r9, VCPU_TB_RMINTR
1297 mr r4, r9
1298 bl kvmhv_accumulate_time
1299 ld r5, VCPU_GPR(R5)(r9)
1300 ld r6, VCPU_GPR(R6)(r9)
1301 ld r7, VCPU_GPR(R7)(r9)
1302 ld r8, VCPU_GPR(R8)(r9)
1303 #endif
1304
1305 /* Save HEIR (HV emulation assist reg) in emul_inst
1306 if this is an HEI (HV emulation interrupt, e40) */
1307 li r3,KVM_INST_FETCH_FAILED
1308 stw r3,VCPU_LAST_INST(r9)
1309 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1310 bne 11f
1311 mfspr r3,SPRN_HEIR
1312 11: stw r3,VCPU_HEIR(r9)
1313
1314 /* these are volatile across C function calls */
1315 #ifdef CONFIG_RELOCATABLE
1316 ld r3, HSTATE_SCRATCH1(r13)
1317 mtctr r3
1318 #else
1319 mfctr r3
1320 #endif
1321 mfxer r4
1322 std r3, VCPU_CTR(r9)
1323 std r4, VCPU_XER(r9)
1324
1325 /* If this is a page table miss then see if it's theirs or ours */
1326 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1327 beq kvmppc_hdsi
1328 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1329 beq kvmppc_hisi
1330
1331 /* See if this is a leftover HDEC interrupt */
1332 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1333 bne 2f
1334 mfspr r3,SPRN_HDEC
1335 EXTEND_HDEC(r3)
1336 cmpdi r3,0
1337 mr r4,r9
1338 bge fast_guest_return
1339 2:
1340 /* See if this is an hcall we can handle in real mode */
1341 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1342 beq hcall_try_real_mode
1343
1344 /* Hypervisor doorbell - exit only if host IPI flag set */
1345 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1346 bne 3f
1347 BEGIN_FTR_SECTION
1348 PPC_MSGSYNC
1349 lwsync
1350 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1351 lbz r0, HSTATE_HOST_IPI(r13)
1352 cmpwi r0, 0
1353 beq 4f
1354 b guest_exit_cont
1355 3:
1356 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1357 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1358 bne 14f
1359 mfspr r3, SPRN_HFSCR
1360 std r3, VCPU_HFSCR(r9)
1361 b guest_exit_cont
1362 14:
1363 /* External interrupt ? */
1364 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1365 bne+ guest_exit_cont
1366
1367 /* External interrupt, first check for host_ipi. If this is
1368 * set, we know the host wants us out so let's do it now
1369 */
1370 bl kvmppc_read_intr
1371
1372 /*
1373 * Restore the active volatile registers after returning from
1374 * a C function.
1375 */
1376 ld r9, HSTATE_KVM_VCPU(r13)
1377 li r12, BOOK3S_INTERRUPT_EXTERNAL
1378
1379 /*
1380 * kvmppc_read_intr return codes:
1381 *
1382 * Exit to host (r3 > 0)
1383 * 1 An interrupt is pending that needs to be handled by the host
1384 * Exit guest and return to host by branching to guest_exit_cont
1385 *
1386 * 2 Passthrough that needs completion in the host
1387 * Exit guest and return to host by branching to guest_exit_cont
1388 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1389 * to indicate to the host to complete handling the interrupt
1390 *
1391 * Before returning to guest, we check if any CPU is heading out
1392 * to the host and if so, we head out also. If no CPUs are heading
1393 * check return values <= 0.
1394 *
1395 * Return to guest (r3 <= 0)
1396 * 0 No external interrupt is pending
1397 * -1 A guest wakeup IPI (which has now been cleared)
1398 * In either case, we return to guest to deliver any pending
1399 * guest interrupts.
1400 *
1401 * -2 A PCI passthrough external interrupt was handled
1402 * (interrupt was delivered directly to guest)
1403 * Return to guest to deliver any pending guest interrupts.
1404 */
1405
1406 cmpdi r3, 1
1407 ble 1f
1408
1409 /* Return code = 2 */
1410 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1411 stw r12, VCPU_TRAP(r9)
1412 b guest_exit_cont
1413
1414 1: /* Return code <= 1 */
1415 cmpdi r3, 0
1416 bgt guest_exit_cont
1417
1418 /* Return code <= 0 */
1419 4: ld r5, HSTATE_KVM_VCORE(r13)
1420 lwz r0, VCORE_ENTRY_EXIT(r5)
1421 cmpwi r0, 0x100
1422 mr r4, r9
1423 blt deliver_guest_interrupt
1424
1425 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1426 #ifdef CONFIG_KVM_XICS
1427 /* We are exiting, pull the VP from the XIVE */
1428 lwz r0, VCPU_XIVE_PUSHED(r9)
1429 cmpwi cr0, r0, 0
1430 beq 1f
1431 li r7, TM_SPC_PULL_OS_CTX
1432 li r6, TM_QW1_OS
1433 mfmsr r0
1434 andi. r0, r0, MSR_IR /* in real mode? */
1435 beq 2f
1436 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1437 cmpldi cr0, r10, 0
1438 beq 1f
1439 /* First load to pull the context, we ignore the value */
1440 eieio
1441 lwzx r11, r7, r10
1442 /* Second load to recover the context state (Words 0 and 1) */
1443 ldx r11, r6, r10
1444 b 3f
1445 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1446 cmpldi cr0, r10, 0
1447 beq 1f
1448 /* First load to pull the context, we ignore the value */
1449 eieio
1450 lwzcix r11, r7, r10
1451 /* Second load to recover the context state (Words 0 and 1) */
1452 ldcix r11, r6, r10
1453 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1454 /* Fixup some of the state for the next load */
1455 li r10, 0
1456 li r0, 0xff
1457 stw r10, VCPU_XIVE_PUSHED(r9)
1458 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1459 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1460 eieio
1461 1:
1462 #endif /* CONFIG_KVM_XICS */
1463 /* Save more register state */
1464 mfdar r6
1465 mfdsisr r7
1466 std r6, VCPU_DAR(r9)
1467 stw r7, VCPU_DSISR(r9)
1468 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1469 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1470 beq mc_cont
1471 std r6, VCPU_FAULT_DAR(r9)
1472 stw r7, VCPU_FAULT_DSISR(r9)
1473
1474 /* See if it is a machine check */
1475 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1476 beq machine_check_realmode
1477 mc_cont:
1478 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1479 addi r3, r9, VCPU_TB_RMEXIT
1480 mr r4, r9
1481 bl kvmhv_accumulate_time
1482 #endif
1483
1484 mr r3, r12
1485 /* Increment exit count, poke other threads to exit */
1486 bl kvmhv_commence_exit
1487 nop
1488 ld r9, HSTATE_KVM_VCPU(r13)
1489 lwz r12, VCPU_TRAP(r9)
1490
1491 /* Stop others sending VCPU interrupts to this physical CPU */
1492 li r0, -1
1493 stw r0, VCPU_CPU(r9)
1494 stw r0, VCPU_THREAD_CPU(r9)
1495
1496 /* Save guest CTRL register, set runlatch to 1 */
1497 mfspr r6,SPRN_CTRLF
1498 stw r6,VCPU_CTRL(r9)
1499 andi. r0,r6,1
1500 bne 4f
1501 ori r6,r6,1
1502 mtspr SPRN_CTRLT,r6
1503 4:
1504 /* Check if we are running hash or radix and store it in cr2 */
1505 ld r5, VCPU_KVM(r9)
1506 lbz r0, KVM_RADIX(r5)
1507 cmpwi cr2,r0,0
1508
1509 /* Read the guest SLB and save it away */
1510 li r5, 0
1511 bne cr2, 3f /* for radix, save 0 entries */
1512 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1513 mtctr r0
1514 li r6,0
1515 addi r7,r9,VCPU_SLB
1516 1: slbmfee r8,r6
1517 andis. r0,r8,SLB_ESID_V@h
1518 beq 2f
1519 add r8,r8,r6 /* put index in */
1520 slbmfev r3,r6
1521 std r8,VCPU_SLB_E(r7)
1522 std r3,VCPU_SLB_V(r7)
1523 addi r7,r7,VCPU_SLB_SIZE
1524 addi r5,r5,1
1525 2: addi r6,r6,1
1526 bdnz 1b
1527 3: stw r5,VCPU_SLB_MAX(r9)
1528
1529 /*
1530 * Save the guest PURR/SPURR
1531 */
1532 mfspr r5,SPRN_PURR
1533 mfspr r6,SPRN_SPURR
1534 ld r7,VCPU_PURR(r9)
1535 ld r8,VCPU_SPURR(r9)
1536 std r5,VCPU_PURR(r9)
1537 std r6,VCPU_SPURR(r9)
1538 subf r5,r7,r5
1539 subf r6,r8,r6
1540
1541 /*
1542 * Restore host PURR/SPURR and add guest times
1543 * so that the time in the guest gets accounted.
1544 */
1545 ld r3,HSTATE_PURR(r13)
1546 ld r4,HSTATE_SPURR(r13)
1547 add r3,r3,r5
1548 add r4,r4,r6
1549 mtspr SPRN_PURR,r3
1550 mtspr SPRN_SPURR,r4
1551
1552 /* Save DEC */
1553 ld r3, HSTATE_KVM_VCORE(r13)
1554 mfspr r5,SPRN_DEC
1555 mftb r6
1556 /* On P9, if the guest has large decr enabled, don't sign extend */
1557 BEGIN_FTR_SECTION
1558 ld r4, VCORE_LPCR(r3)
1559 andis. r4, r4, LPCR_LD@h
1560 bne 16f
1561 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1562 extsw r5,r5
1563 16: add r5,r5,r6
1564 /* r5 is a guest timebase value here, convert to host TB */
1565 ld r4,VCORE_TB_OFFSET(r3)
1566 subf r5,r4,r5
1567 std r5,VCPU_DEC_EXPIRES(r9)
1568
1569 BEGIN_FTR_SECTION
1570 b 8f
1571 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1572 /* Save POWER8-specific registers */
1573 mfspr r5, SPRN_IAMR
1574 mfspr r6, SPRN_PSPB
1575 mfspr r7, SPRN_FSCR
1576 std r5, VCPU_IAMR(r9)
1577 stw r6, VCPU_PSPB(r9)
1578 std r7, VCPU_FSCR(r9)
1579 mfspr r5, SPRN_IC
1580 mfspr r7, SPRN_TAR
1581 std r5, VCPU_IC(r9)
1582 std r7, VCPU_TAR(r9)
1583 mfspr r8, SPRN_EBBHR
1584 std r8, VCPU_EBBHR(r9)
1585 mfspr r5, SPRN_EBBRR
1586 mfspr r6, SPRN_BESCR
1587 mfspr r7, SPRN_PID
1588 mfspr r8, SPRN_WORT
1589 std r5, VCPU_EBBRR(r9)
1590 std r6, VCPU_BESCR(r9)
1591 stw r7, VCPU_GUEST_PID(r9)
1592 std r8, VCPU_WORT(r9)
1593 BEGIN_FTR_SECTION
1594 mfspr r5, SPRN_TCSCR
1595 mfspr r6, SPRN_ACOP
1596 mfspr r7, SPRN_CSIGR
1597 mfspr r8, SPRN_TACR
1598 std r5, VCPU_TCSCR(r9)
1599 std r6, VCPU_ACOP(r9)
1600 std r7, VCPU_CSIGR(r9)
1601 std r8, VCPU_TACR(r9)
1602 FTR_SECTION_ELSE
1603 mfspr r5, SPRN_TIDR
1604 mfspr r6, SPRN_PSSCR
1605 std r5, VCPU_TID(r9)
1606 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1607 rotldi r6, r6, 60
1608 std r6, VCPU_PSSCR(r9)
1609 /* Restore host HFSCR value */
1610 ld r7, STACK_SLOT_HFSCR(r1)
1611 mtspr SPRN_HFSCR, r7
1612 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1613 /*
1614 * Restore various registers to 0, where non-zero values
1615 * set by the guest could disrupt the host.
1616 */
1617 li r0, 0
1618 mtspr SPRN_PSPB, r0
1619 mtspr SPRN_WORT, r0
1620 BEGIN_FTR_SECTION
1621 mtspr SPRN_IAMR, r0
1622 mtspr SPRN_TCSCR, r0
1623 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1624 li r0, 1
1625 sldi r0, r0, 31
1626 mtspr SPRN_MMCRS, r0
1627 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1628 8:
1629
1630 /* Save and reset AMR and UAMOR before turning on the MMU */
1631 mfspr r5,SPRN_AMR
1632 mfspr r6,SPRN_UAMOR
1633 std r5,VCPU_AMR(r9)
1634 std r6,VCPU_UAMOR(r9)
1635 li r6,0
1636 mtspr SPRN_AMR,r6
1637 mtspr SPRN_UAMOR, r6
1638
1639 /* Switch DSCR back to host value */
1640 mfspr r8, SPRN_DSCR
1641 ld r7, HSTATE_DSCR(r13)
1642 std r8, VCPU_DSCR(r9)
1643 mtspr SPRN_DSCR, r7
1644
1645 /* Save non-volatile GPRs */
1646 std r14, VCPU_GPR(R14)(r9)
1647 std r15, VCPU_GPR(R15)(r9)
1648 std r16, VCPU_GPR(R16)(r9)
1649 std r17, VCPU_GPR(R17)(r9)
1650 std r18, VCPU_GPR(R18)(r9)
1651 std r19, VCPU_GPR(R19)(r9)
1652 std r20, VCPU_GPR(R20)(r9)
1653 std r21, VCPU_GPR(R21)(r9)
1654 std r22, VCPU_GPR(R22)(r9)
1655 std r23, VCPU_GPR(R23)(r9)
1656 std r24, VCPU_GPR(R24)(r9)
1657 std r25, VCPU_GPR(R25)(r9)
1658 std r26, VCPU_GPR(R26)(r9)
1659 std r27, VCPU_GPR(R27)(r9)
1660 std r28, VCPU_GPR(R28)(r9)
1661 std r29, VCPU_GPR(R29)(r9)
1662 std r30, VCPU_GPR(R30)(r9)
1663 std r31, VCPU_GPR(R31)(r9)
1664
1665 /* Save SPRGs */
1666 mfspr r3, SPRN_SPRG0
1667 mfspr r4, SPRN_SPRG1
1668 mfspr r5, SPRN_SPRG2
1669 mfspr r6, SPRN_SPRG3
1670 std r3, VCPU_SPRG0(r9)
1671 std r4, VCPU_SPRG1(r9)
1672 std r5, VCPU_SPRG2(r9)
1673 std r6, VCPU_SPRG3(r9)
1674
1675 /* save FP state */
1676 mr r3, r9
1677 bl kvmppc_save_fp
1678
1679 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1680 BEGIN_FTR_SECTION
1681 /*
1682 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1683 */
1684 bl kvmppc_save_tm
1685 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1686 #endif
1687
1688 /* Increment yield count if they have a VPA */
1689 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1690 cmpdi r8, 0
1691 beq 25f
1692 li r4, LPPACA_YIELDCOUNT
1693 LWZX_BE r3, r8, r4
1694 addi r3, r3, 1
1695 STWX_BE r3, r8, r4
1696 li r3, 1
1697 stb r3, VCPU_VPA_DIRTY(r9)
1698 25:
1699 /* Save PMU registers if requested */
1700 /* r8 and cr0.eq are live here */
1701 BEGIN_FTR_SECTION
1702 /*
1703 * POWER8 seems to have a hardware bug where setting
1704 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1705 * when some counters are already negative doesn't seem
1706 * to cause a performance monitor alert (and hence interrupt).
1707 * The effect of this is that when saving the PMU state,
1708 * if there is no PMU alert pending when we read MMCR0
1709 * before freezing the counters, but one becomes pending
1710 * before we read the counters, we lose it.
1711 * To work around this, we need a way to freeze the counters
1712 * before reading MMCR0. Normally, freezing the counters
1713 * is done by writing MMCR0 (to set MMCR0[FC]) which
1714 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1715 * we can also freeze the counters using MMCR2, by writing
1716 * 1s to all the counter freeze condition bits (there are
1717 * 9 bits each for 6 counters).
1718 */
1719 li r3, -1 /* set all freeze bits */
1720 clrrdi r3, r3, 10
1721 mfspr r10, SPRN_MMCR2
1722 mtspr SPRN_MMCR2, r3
1723 isync
1724 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1725 li r3, 1
1726 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1727 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1728 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1729 mfspr r6, SPRN_MMCRA
1730 /* Clear MMCRA in order to disable SDAR updates */
1731 li r7, 0
1732 mtspr SPRN_MMCRA, r7
1733 isync
1734 beq 21f /* if no VPA, save PMU stuff anyway */
1735 lbz r7, LPPACA_PMCINUSE(r8)
1736 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1737 bne 21f
1738 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1739 b 22f
1740 21: mfspr r5, SPRN_MMCR1
1741 mfspr r7, SPRN_SIAR
1742 mfspr r8, SPRN_SDAR
1743 std r4, VCPU_MMCR(r9)
1744 std r5, VCPU_MMCR + 8(r9)
1745 std r6, VCPU_MMCR + 16(r9)
1746 BEGIN_FTR_SECTION
1747 std r10, VCPU_MMCR + 24(r9)
1748 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1749 std r7, VCPU_SIAR(r9)
1750 std r8, VCPU_SDAR(r9)
1751 mfspr r3, SPRN_PMC1
1752 mfspr r4, SPRN_PMC2
1753 mfspr r5, SPRN_PMC3
1754 mfspr r6, SPRN_PMC4
1755 mfspr r7, SPRN_PMC5
1756 mfspr r8, SPRN_PMC6
1757 stw r3, VCPU_PMC(r9)
1758 stw r4, VCPU_PMC + 4(r9)
1759 stw r5, VCPU_PMC + 8(r9)
1760 stw r6, VCPU_PMC + 12(r9)
1761 stw r7, VCPU_PMC + 16(r9)
1762 stw r8, VCPU_PMC + 20(r9)
1763 BEGIN_FTR_SECTION
1764 mfspr r5, SPRN_SIER
1765 std r5, VCPU_SIER(r9)
1766 BEGIN_FTR_SECTION_NESTED(96)
1767 mfspr r6, SPRN_SPMC1
1768 mfspr r7, SPRN_SPMC2
1769 mfspr r8, SPRN_MMCRS
1770 stw r6, VCPU_PMC + 24(r9)
1771 stw r7, VCPU_PMC + 28(r9)
1772 std r8, VCPU_MMCR + 32(r9)
1773 lis r4, 0x8000
1774 mtspr SPRN_MMCRS, r4
1775 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1776 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1777 22:
1778
1779 /* Restore host values of some registers */
1780 BEGIN_FTR_SECTION
1781 ld r5, STACK_SLOT_CIABR(r1)
1782 ld r6, STACK_SLOT_DAWR(r1)
1783 ld r7, STACK_SLOT_DAWRX(r1)
1784 mtspr SPRN_CIABR, r5
1785 mtspr SPRN_DAWR, r6
1786 mtspr SPRN_DAWRX, r7
1787 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1788 BEGIN_FTR_SECTION
1789 ld r5, STACK_SLOT_TID(r1)
1790 ld r6, STACK_SLOT_PSSCR(r1)
1791 ld r7, STACK_SLOT_PID(r1)
1792 ld r8, STACK_SLOT_IAMR(r1)
1793 mtspr SPRN_TIDR, r5
1794 mtspr SPRN_PSSCR, r6
1795 mtspr SPRN_PID, r7
1796 mtspr SPRN_IAMR, r8
1797 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1798
1799 #ifdef CONFIG_PPC_RADIX_MMU
1800 /*
1801 * Are we running hash or radix ?
1802 */
1803 ld r5, VCPU_KVM(r9)
1804 lbz r0, KVM_RADIX(r5)
1805 cmpwi cr2, r0, 0
1806 beq cr2, 3f
1807
1808 /* Radix: Handle the case where the guest used an illegal PID */
1809 LOAD_REG_ADDR(r4, mmu_base_pid)
1810 lwz r3, VCPU_GUEST_PID(r9)
1811 lwz r5, 0(r4)
1812 cmpw cr0,r3,r5
1813 blt 2f
1814
1815 /*
1816 * Illegal PID, the HW might have prefetched and cached in the TLB
1817 * some translations for the LPID 0 / guest PID combination which
1818 * Linux doesn't know about, so we need to flush that PID out of
1819 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1820 * the right context.
1821 */
1822 li r0,0
1823 mtspr SPRN_LPID,r0
1824 isync
1825
1826 /* Then do a congruence class local flush */
1827 ld r6,VCPU_KVM(r9)
1828 lwz r0,KVM_TLB_SETS(r6)
1829 mtctr r0
1830 li r7,0x400 /* IS field = 0b01 */
1831 ptesync
1832 sldi r0,r3,32 /* RS has PID */
1833 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1834 addi r7,r7,0x1000
1835 bdnz 1b
1836 ptesync
1837
1838 2: /* Flush the ERAT on radix P9 DD1 guest exit */
1839 BEGIN_FTR_SECTION
1840 PPC_INVALIDATE_ERAT
1841 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
1842 b 4f
1843 #endif /* CONFIG_PPC_RADIX_MMU */
1844
1845 /* Hash: clear out SLB */
1846 3: li r5,0
1847 slbmte r5,r5
1848 slbia
1849 ptesync
1850 4:
1851 /*
1852 * POWER7/POWER8 guest -> host partition switch code.
1853 * We don't have to lock against tlbies but we do
1854 * have to coordinate the hardware threads.
1855 */
1856 kvmhv_switch_to_host:
1857 /* Secondary threads wait for primary to do partition switch */
1858 ld r5,HSTATE_KVM_VCORE(r13)
1859 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1860 lbz r3,HSTATE_PTID(r13)
1861 cmpwi r3,0
1862 beq 15f
1863 HMT_LOW
1864 13: lbz r3,VCORE_IN_GUEST(r5)
1865 cmpwi r3,0
1866 bne 13b
1867 HMT_MEDIUM
1868 b 16f
1869
1870 /* Primary thread waits for all the secondaries to exit guest */
1871 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1872 rlwinm r0,r3,32-8,0xff
1873 clrldi r3,r3,56
1874 cmpw r3,r0
1875 bne 15b
1876 isync
1877
1878 /* Did we actually switch to the guest at all? */
1879 lbz r6, VCORE_IN_GUEST(r5)
1880 cmpwi r6, 0
1881 beq 19f
1882
1883 /* Primary thread switches back to host partition */
1884 lwz r7,KVM_HOST_LPID(r4)
1885 BEGIN_FTR_SECTION
1886 ld r6,KVM_HOST_SDR1(r4)
1887 li r8,LPID_RSVD /* switch to reserved LPID */
1888 mtspr SPRN_LPID,r8
1889 ptesync
1890 mtspr SPRN_SDR1,r6 /* switch to host page table */
1891 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1892 mtspr SPRN_LPID,r7
1893 isync
1894
1895 BEGIN_FTR_SECTION
1896 /* DPDES and VTB are shared between threads */
1897 mfspr r7, SPRN_DPDES
1898 mfspr r8, SPRN_VTB
1899 std r7, VCORE_DPDES(r5)
1900 std r8, VCORE_VTB(r5)
1901 /* clear DPDES so we don't get guest doorbells in the host */
1902 li r8, 0
1903 mtspr SPRN_DPDES, r8
1904 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1905
1906 /* If HMI, call kvmppc_realmode_hmi_handler() */
1907 cmpwi r12, BOOK3S_INTERRUPT_HMI
1908 bne 27f
1909 bl kvmppc_realmode_hmi_handler
1910 nop
1911 cmpdi r3, 0
1912 li r12, BOOK3S_INTERRUPT_HMI
1913 /*
1914 * At this point kvmppc_realmode_hmi_handler may have resync-ed
1915 * the TB, and if it has, we must not subtract the guest timebase
1916 * offset from the timebase. So, skip it.
1917 *
1918 * Also, do not call kvmppc_subcore_exit_guest() because it has
1919 * been invoked as part of kvmppc_realmode_hmi_handler().
1920 */
1921 beq 30f
1922
1923 27:
1924 /* Subtract timebase offset from timebase */
1925 ld r8,VCORE_TB_OFFSET(r5)
1926 cmpdi r8,0
1927 beq 17f
1928 mftb r6 /* current guest timebase */
1929 subf r8,r8,r6
1930 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1931 mftb r7 /* check if lower 24 bits overflowed */
1932 clrldi r6,r6,40
1933 clrldi r7,r7,40
1934 cmpld r7,r6
1935 bge 17f
1936 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1937 mtspr SPRN_TBU40,r8
1938
1939 17: bl kvmppc_subcore_exit_guest
1940 nop
1941 30: ld r5,HSTATE_KVM_VCORE(r13)
1942 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1943
1944 /* Reset PCR */
1945 ld r0, VCORE_PCR(r5)
1946 cmpdi r0, 0
1947 beq 18f
1948 li r0, 0
1949 mtspr SPRN_PCR, r0
1950 18:
1951 /* Signal secondary CPUs to continue */
1952 stb r0,VCORE_IN_GUEST(r5)
1953 19: lis r8,0x7fff /* MAX_INT@h */
1954 mtspr SPRN_HDEC,r8
1955
1956 16:
1957 BEGIN_FTR_SECTION
1958 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1959 ld r3, HSTATE_SPLIT_MODE(r13)
1960 cmpdi r3, 0
1961 beq 47f
1962 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1963 cmpwi r8, 0
1964 beq 47f
1965 stw r12, STACK_SLOT_TRAP(r1)
1966 bl kvmhv_p9_restore_lpcr
1967 nop
1968 lwz r12, STACK_SLOT_TRAP(r1)
1969 b 48f
1970 47:
1971 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1972 ld r8,KVM_HOST_LPCR(r4)
1973 mtspr SPRN_LPCR,r8
1974 isync
1975 48:
1976 /* load host SLB entries */
1977 BEGIN_MMU_FTR_SECTION
1978 b 0f
1979 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1980 ld r8,PACA_SLBSHADOWPTR(r13)
1981
1982 .rept SLB_NUM_BOLTED
1983 li r3, SLBSHADOW_SAVEAREA
1984 LDX_BE r5, r8, r3
1985 addi r3, r3, 8
1986 LDX_BE r6, r8, r3
1987 andis. r7,r5,SLB_ESID_V@h
1988 beq 1f
1989 slbmte r6,r5
1990 1: addi r8,r8,16
1991 .endr
1992 0:
1993 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1994 /* Finish timing, if we have a vcpu */
1995 ld r4, HSTATE_KVM_VCPU(r13)
1996 cmpdi r4, 0
1997 li r3, 0
1998 beq 2f
1999 bl kvmhv_accumulate_time
2000 2:
2001 #endif
2002 /* Unset guest mode */
2003 li r0, KVM_GUEST_MODE_NONE
2004 stb r0, HSTATE_IN_GUEST(r13)
2005
2006 ld r0, SFS+PPC_LR_STKOFF(r1)
2007 addi r1, r1, SFS
2008 mtlr r0
2009 blr
2010
2011 /*
2012 * Check whether an HDSI is an HPTE not found fault or something else.
2013 * If it is an HPTE not found fault that is due to the guest accessing
2014 * a page that they have mapped but which we have paged out, then
2015 * we continue on with the guest exit path. In all other cases,
2016 * reflect the HDSI to the guest as a DSI.
2017 */
2018 kvmppc_hdsi:
2019 ld r3, VCPU_KVM(r9)
2020 lbz r0, KVM_RADIX(r3)
2021 mfspr r4, SPRN_HDAR
2022 mfspr r6, SPRN_HDSISR
2023 BEGIN_FTR_SECTION
2024 /* Look for DSISR canary. If we find it, retry instruction */
2025 cmpdi r6, 0x7fff
2026 beq 6f
2027 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2028 cmpwi r0, 0
2029 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2030 /* HPTE not found fault or protection fault? */
2031 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2032 beq 1f /* if not, send it to the guest */
2033 andi. r0, r11, MSR_DR /* data relocation enabled? */
2034 beq 3f
2035 BEGIN_FTR_SECTION
2036 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2037 b 4f
2038 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2039 clrrdi r0, r4, 28
2040 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2041 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2042 bne 7f /* if no SLB entry found */
2043 4: std r4, VCPU_FAULT_DAR(r9)
2044 stw r6, VCPU_FAULT_DSISR(r9)
2045
2046 /* Search the hash table. */
2047 mr r3, r9 /* vcpu pointer */
2048 li r7, 1 /* data fault */
2049 bl kvmppc_hpte_hv_fault
2050 ld r9, HSTATE_KVM_VCPU(r13)
2051 ld r10, VCPU_PC(r9)
2052 ld r11, VCPU_MSR(r9)
2053 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2054 cmpdi r3, 0 /* retry the instruction */
2055 beq 6f
2056 cmpdi r3, -1 /* handle in kernel mode */
2057 beq guest_exit_cont
2058 cmpdi r3, -2 /* MMIO emulation; need instr word */
2059 beq 2f
2060
2061 /* Synthesize a DSI (or DSegI) for the guest */
2062 ld r4, VCPU_FAULT_DAR(r9)
2063 mr r6, r3
2064 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2065 mtspr SPRN_DSISR, r6
2066 7: mtspr SPRN_DAR, r4
2067 mtspr SPRN_SRR0, r10
2068 mtspr SPRN_SRR1, r11
2069 mr r10, r0
2070 bl kvmppc_msr_interrupt
2071 fast_interrupt_c_return:
2072 6: ld r7, VCPU_CTR(r9)
2073 ld r8, VCPU_XER(r9)
2074 mtctr r7
2075 mtxer r8
2076 mr r4, r9
2077 b fast_guest_return
2078
2079 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2080 ld r5, KVM_VRMA_SLB_V(r5)
2081 b 4b
2082
2083 /* If this is for emulated MMIO, load the instruction word */
2084 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2085
2086 /* Set guest mode to 'jump over instruction' so if lwz faults
2087 * we'll just continue at the next IP. */
2088 li r0, KVM_GUEST_MODE_SKIP
2089 stb r0, HSTATE_IN_GUEST(r13)
2090
2091 /* Do the access with MSR:DR enabled */
2092 mfmsr r3
2093 ori r4, r3, MSR_DR /* Enable paging for data */
2094 mtmsrd r4
2095 lwz r8, 0(r10)
2096 mtmsrd r3
2097
2098 /* Store the result */
2099 stw r8, VCPU_LAST_INST(r9)
2100
2101 /* Unset guest mode. */
2102 li r0, KVM_GUEST_MODE_HOST_HV
2103 stb r0, HSTATE_IN_GUEST(r13)
2104 b guest_exit_cont
2105
2106 .Lradix_hdsi:
2107 std r4, VCPU_FAULT_DAR(r9)
2108 stw r6, VCPU_FAULT_DSISR(r9)
2109 .Lradix_hisi:
2110 mfspr r5, SPRN_ASDR
2111 std r5, VCPU_FAULT_GPA(r9)
2112 b guest_exit_cont
2113
2114 /*
2115 * Similarly for an HISI, reflect it to the guest as an ISI unless
2116 * it is an HPTE not found fault for a page that we have paged out.
2117 */
2118 kvmppc_hisi:
2119 ld r3, VCPU_KVM(r9)
2120 lbz r0, KVM_RADIX(r3)
2121 cmpwi r0, 0
2122 bne .Lradix_hisi /* for radix, just save ASDR */
2123 andis. r0, r11, SRR1_ISI_NOPT@h
2124 beq 1f
2125 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2126 beq 3f
2127 BEGIN_FTR_SECTION
2128 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2129 b 4f
2130 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2131 clrrdi r0, r10, 28
2132 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2133 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2134 bne 7f /* if no SLB entry found */
2135 4:
2136 /* Search the hash table. */
2137 mr r3, r9 /* vcpu pointer */
2138 mr r4, r10
2139 mr r6, r11
2140 li r7, 0 /* instruction fault */
2141 bl kvmppc_hpte_hv_fault
2142 ld r9, HSTATE_KVM_VCPU(r13)
2143 ld r10, VCPU_PC(r9)
2144 ld r11, VCPU_MSR(r9)
2145 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2146 cmpdi r3, 0 /* retry the instruction */
2147 beq fast_interrupt_c_return
2148 cmpdi r3, -1 /* handle in kernel mode */
2149 beq guest_exit_cont
2150
2151 /* Synthesize an ISI (or ISegI) for the guest */
2152 mr r11, r3
2153 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2154 7: mtspr SPRN_SRR0, r10
2155 mtspr SPRN_SRR1, r11
2156 mr r10, r0
2157 bl kvmppc_msr_interrupt
2158 b fast_interrupt_c_return
2159
2160 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2161 ld r5, KVM_VRMA_SLB_V(r6)
2162 b 4b
2163
2164 /*
2165 * Try to handle an hcall in real mode.
2166 * Returns to the guest if we handle it, or continues on up to
2167 * the kernel if we can't (i.e. if we don't have a handler for
2168 * it, or if the handler returns H_TOO_HARD).
2169 *
2170 * r5 - r8 contain hcall args,
2171 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2172 */
2173 hcall_try_real_mode:
2174 ld r3,VCPU_GPR(R3)(r9)
2175 andi. r0,r11,MSR_PR
2176 /* sc 1 from userspace - reflect to guest syscall */
2177 bne sc_1_fast_return
2178 clrrdi r3,r3,2
2179 cmpldi r3,hcall_real_table_end - hcall_real_table
2180 bge guest_exit_cont
2181 /* See if this hcall is enabled for in-kernel handling */
2182 ld r4, VCPU_KVM(r9)
2183 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2184 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2185 add r4, r4, r0
2186 ld r0, KVM_ENABLED_HCALLS(r4)
2187 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2188 srd r0, r0, r4
2189 andi. r0, r0, 1
2190 beq guest_exit_cont
2191 /* Get pointer to handler, if any, and call it */
2192 LOAD_REG_ADDR(r4, hcall_real_table)
2193 lwax r3,r3,r4
2194 cmpwi r3,0
2195 beq guest_exit_cont
2196 add r12,r3,r4
2197 mtctr r12
2198 mr r3,r9 /* get vcpu pointer */
2199 ld r4,VCPU_GPR(R4)(r9)
2200 bctrl
2201 cmpdi r3,H_TOO_HARD
2202 beq hcall_real_fallback
2203 ld r4,HSTATE_KVM_VCPU(r13)
2204 std r3,VCPU_GPR(R3)(r4)
2205 ld r10,VCPU_PC(r4)
2206 ld r11,VCPU_MSR(r4)
2207 b fast_guest_return
2208
2209 sc_1_fast_return:
2210 mtspr SPRN_SRR0,r10
2211 mtspr SPRN_SRR1,r11
2212 li r10, BOOK3S_INTERRUPT_SYSCALL
2213 bl kvmppc_msr_interrupt
2214 mr r4,r9
2215 b fast_guest_return
2216
2217 /* We've attempted a real mode hcall, but it's punted it back
2218 * to userspace. We need to restore some clobbered volatiles
2219 * before resuming the pass-it-to-qemu path */
2220 hcall_real_fallback:
2221 li r12,BOOK3S_INTERRUPT_SYSCALL
2222 ld r9, HSTATE_KVM_VCPU(r13)
2223
2224 b guest_exit_cont
2225
2226 .globl hcall_real_table
2227 hcall_real_table:
2228 .long 0 /* 0 - unused */
2229 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2230 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2231 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2232 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2233 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2234 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2235 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2236 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2237 .long 0 /* 0x24 - H_SET_SPRG0 */
2238 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2239 .long 0 /* 0x2c */
2240 .long 0 /* 0x30 */
2241 .long 0 /* 0x34 */
2242 .long 0 /* 0x38 */
2243 .long 0 /* 0x3c */
2244 .long 0 /* 0x40 */
2245 .long 0 /* 0x44 */
2246 .long 0 /* 0x48 */
2247 .long 0 /* 0x4c */
2248 .long 0 /* 0x50 */
2249 .long 0 /* 0x54 */
2250 .long 0 /* 0x58 */
2251 .long 0 /* 0x5c */
2252 .long 0 /* 0x60 */
2253 #ifdef CONFIG_KVM_XICS
2254 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2255 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2256 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2257 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2258 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2259 #else
2260 .long 0 /* 0x64 - H_EOI */
2261 .long 0 /* 0x68 - H_CPPR */
2262 .long 0 /* 0x6c - H_IPI */
2263 .long 0 /* 0x70 - H_IPOLL */
2264 .long 0 /* 0x74 - H_XIRR */
2265 #endif
2266 .long 0 /* 0x78 */
2267 .long 0 /* 0x7c */
2268 .long 0 /* 0x80 */
2269 .long 0 /* 0x84 */
2270 .long 0 /* 0x88 */
2271 .long 0 /* 0x8c */
2272 .long 0 /* 0x90 */
2273 .long 0 /* 0x94 */
2274 .long 0 /* 0x98 */
2275 .long 0 /* 0x9c */
2276 .long 0 /* 0xa0 */
2277 .long 0 /* 0xa4 */
2278 .long 0 /* 0xa8 */
2279 .long 0 /* 0xac */
2280 .long 0 /* 0xb0 */
2281 .long 0 /* 0xb4 */
2282 .long 0 /* 0xb8 */
2283 .long 0 /* 0xbc */
2284 .long 0 /* 0xc0 */
2285 .long 0 /* 0xc4 */
2286 .long 0 /* 0xc8 */
2287 .long 0 /* 0xcc */
2288 .long 0 /* 0xd0 */
2289 .long 0 /* 0xd4 */
2290 .long 0 /* 0xd8 */
2291 .long 0 /* 0xdc */
2292 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2293 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2294 .long 0 /* 0xe8 */
2295 .long 0 /* 0xec */
2296 .long 0 /* 0xf0 */
2297 .long 0 /* 0xf4 */
2298 .long 0 /* 0xf8 */
2299 .long 0 /* 0xfc */
2300 .long 0 /* 0x100 */
2301 .long 0 /* 0x104 */
2302 .long 0 /* 0x108 */
2303 .long 0 /* 0x10c */
2304 .long 0 /* 0x110 */
2305 .long 0 /* 0x114 */
2306 .long 0 /* 0x118 */
2307 .long 0 /* 0x11c */
2308 .long 0 /* 0x120 */
2309 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2310 .long 0 /* 0x128 */
2311 .long 0 /* 0x12c */
2312 .long 0 /* 0x130 */
2313 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2314 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2315 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2316 .long 0 /* 0x140 */
2317 .long 0 /* 0x144 */
2318 .long 0 /* 0x148 */
2319 .long 0 /* 0x14c */
2320 .long 0 /* 0x150 */
2321 .long 0 /* 0x154 */
2322 .long 0 /* 0x158 */
2323 .long 0 /* 0x15c */
2324 .long 0 /* 0x160 */
2325 .long 0 /* 0x164 */
2326 .long 0 /* 0x168 */
2327 .long 0 /* 0x16c */
2328 .long 0 /* 0x170 */
2329 .long 0 /* 0x174 */
2330 .long 0 /* 0x178 */
2331 .long 0 /* 0x17c */
2332 .long 0 /* 0x180 */
2333 .long 0 /* 0x184 */
2334 .long 0 /* 0x188 */
2335 .long 0 /* 0x18c */
2336 .long 0 /* 0x190 */
2337 .long 0 /* 0x194 */
2338 .long 0 /* 0x198 */
2339 .long 0 /* 0x19c */
2340 .long 0 /* 0x1a0 */
2341 .long 0 /* 0x1a4 */
2342 .long 0 /* 0x1a8 */
2343 .long 0 /* 0x1ac */
2344 .long 0 /* 0x1b0 */
2345 .long 0 /* 0x1b4 */
2346 .long 0 /* 0x1b8 */
2347 .long 0 /* 0x1bc */
2348 .long 0 /* 0x1c0 */
2349 .long 0 /* 0x1c4 */
2350 .long 0 /* 0x1c8 */
2351 .long 0 /* 0x1cc */
2352 .long 0 /* 0x1d0 */
2353 .long 0 /* 0x1d4 */
2354 .long 0 /* 0x1d8 */
2355 .long 0 /* 0x1dc */
2356 .long 0 /* 0x1e0 */
2357 .long 0 /* 0x1e4 */
2358 .long 0 /* 0x1e8 */
2359 .long 0 /* 0x1ec */
2360 .long 0 /* 0x1f0 */
2361 .long 0 /* 0x1f4 */
2362 .long 0 /* 0x1f8 */
2363 .long 0 /* 0x1fc */
2364 .long 0 /* 0x200 */
2365 .long 0 /* 0x204 */
2366 .long 0 /* 0x208 */
2367 .long 0 /* 0x20c */
2368 .long 0 /* 0x210 */
2369 .long 0 /* 0x214 */
2370 .long 0 /* 0x218 */
2371 .long 0 /* 0x21c */
2372 .long 0 /* 0x220 */
2373 .long 0 /* 0x224 */
2374 .long 0 /* 0x228 */
2375 .long 0 /* 0x22c */
2376 .long 0 /* 0x230 */
2377 .long 0 /* 0x234 */
2378 .long 0 /* 0x238 */
2379 .long 0 /* 0x23c */
2380 .long 0 /* 0x240 */
2381 .long 0 /* 0x244 */
2382 .long 0 /* 0x248 */
2383 .long 0 /* 0x24c */
2384 .long 0 /* 0x250 */
2385 .long 0 /* 0x254 */
2386 .long 0 /* 0x258 */
2387 .long 0 /* 0x25c */
2388 .long 0 /* 0x260 */
2389 .long 0 /* 0x264 */
2390 .long 0 /* 0x268 */
2391 .long 0 /* 0x26c */
2392 .long 0 /* 0x270 */
2393 .long 0 /* 0x274 */
2394 .long 0 /* 0x278 */
2395 .long 0 /* 0x27c */
2396 .long 0 /* 0x280 */
2397 .long 0 /* 0x284 */
2398 .long 0 /* 0x288 */
2399 .long 0 /* 0x28c */
2400 .long 0 /* 0x290 */
2401 .long 0 /* 0x294 */
2402 .long 0 /* 0x298 */
2403 .long 0 /* 0x29c */
2404 .long 0 /* 0x2a0 */
2405 .long 0 /* 0x2a4 */
2406 .long 0 /* 0x2a8 */
2407 .long 0 /* 0x2ac */
2408 .long 0 /* 0x2b0 */
2409 .long 0 /* 0x2b4 */
2410 .long 0 /* 0x2b8 */
2411 .long 0 /* 0x2bc */
2412 .long 0 /* 0x2c0 */
2413 .long 0 /* 0x2c4 */
2414 .long 0 /* 0x2c8 */
2415 .long 0 /* 0x2cc */
2416 .long 0 /* 0x2d0 */
2417 .long 0 /* 0x2d4 */
2418 .long 0 /* 0x2d8 */
2419 .long 0 /* 0x2dc */
2420 .long 0 /* 0x2e0 */
2421 .long 0 /* 0x2e4 */
2422 .long 0 /* 0x2e8 */
2423 .long 0 /* 0x2ec */
2424 .long 0 /* 0x2f0 */
2425 .long 0 /* 0x2f4 */
2426 .long 0 /* 0x2f8 */
2427 #ifdef CONFIG_KVM_XICS
2428 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2429 #else
2430 .long 0 /* 0x2fc - H_XIRR_X*/
2431 #endif
2432 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2433 .globl hcall_real_table_end
2434 hcall_real_table_end:
2435
2436 _GLOBAL(kvmppc_h_set_xdabr)
2437 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2438 beq 6f
2439 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2440 andc. r0, r5, r0
2441 beq 3f
2442 6: li r3, H_PARAMETER
2443 blr
2444
2445 _GLOBAL(kvmppc_h_set_dabr)
2446 li r5, DABRX_USER | DABRX_KERNEL
2447 3:
2448 BEGIN_FTR_SECTION
2449 b 2f
2450 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2451 std r4,VCPU_DABR(r3)
2452 stw r5, VCPU_DABRX(r3)
2453 mtspr SPRN_DABRX, r5
2454 /* Work around P7 bug where DABR can get corrupted on mtspr */
2455 1: mtspr SPRN_DABR,r4
2456 mfspr r5, SPRN_DABR
2457 cmpd r4, r5
2458 bne 1b
2459 isync
2460 li r3,0
2461 blr
2462
2463 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2464 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2465 rlwimi r5, r4, 2, DAWRX_WT
2466 clrrdi r4, r4, 3
2467 std r4, VCPU_DAWR(r3)
2468 std r5, VCPU_DAWRX(r3)
2469 mtspr SPRN_DAWR, r4
2470 mtspr SPRN_DAWRX, r5
2471 li r3, 0
2472 blr
2473
2474 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2475 ori r11,r11,MSR_EE
2476 std r11,VCPU_MSR(r3)
2477 li r0,1
2478 stb r0,VCPU_CEDED(r3)
2479 sync /* order setting ceded vs. testing prodded */
2480 lbz r5,VCPU_PRODDED(r3)
2481 cmpwi r5,0
2482 bne kvm_cede_prodded
2483 li r12,0 /* set trap to 0 to say hcall is handled */
2484 stw r12,VCPU_TRAP(r3)
2485 li r0,H_SUCCESS
2486 std r0,VCPU_GPR(R3)(r3)
2487
2488 /*
2489 * Set our bit in the bitmask of napping threads unless all the
2490 * other threads are already napping, in which case we send this
2491 * up to the host.
2492 */
2493 ld r5,HSTATE_KVM_VCORE(r13)
2494 lbz r6,HSTATE_PTID(r13)
2495 lwz r8,VCORE_ENTRY_EXIT(r5)
2496 clrldi r8,r8,56
2497 li r0,1
2498 sld r0,r0,r6
2499 addi r6,r5,VCORE_NAPPING_THREADS
2500 31: lwarx r4,0,r6
2501 or r4,r4,r0
2502 cmpw r4,r8
2503 beq kvm_cede_exit
2504 stwcx. r4,0,r6
2505 bne 31b
2506 /* order napping_threads update vs testing entry_exit_map */
2507 isync
2508 li r0,NAPPING_CEDE
2509 stb r0,HSTATE_NAPPING(r13)
2510 lwz r7,VCORE_ENTRY_EXIT(r5)
2511 cmpwi r7,0x100
2512 bge 33f /* another thread already exiting */
2513
2514 /*
2515 * Although not specifically required by the architecture, POWER7
2516 * preserves the following registers in nap mode, even if an SMT mode
2517 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2518 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2519 */
2520 /* Save non-volatile GPRs */
2521 std r14, VCPU_GPR(R14)(r3)
2522 std r15, VCPU_GPR(R15)(r3)
2523 std r16, VCPU_GPR(R16)(r3)
2524 std r17, VCPU_GPR(R17)(r3)
2525 std r18, VCPU_GPR(R18)(r3)
2526 std r19, VCPU_GPR(R19)(r3)
2527 std r20, VCPU_GPR(R20)(r3)
2528 std r21, VCPU_GPR(R21)(r3)
2529 std r22, VCPU_GPR(R22)(r3)
2530 std r23, VCPU_GPR(R23)(r3)
2531 std r24, VCPU_GPR(R24)(r3)
2532 std r25, VCPU_GPR(R25)(r3)
2533 std r26, VCPU_GPR(R26)(r3)
2534 std r27, VCPU_GPR(R27)(r3)
2535 std r28, VCPU_GPR(R28)(r3)
2536 std r29, VCPU_GPR(R29)(r3)
2537 std r30, VCPU_GPR(R30)(r3)
2538 std r31, VCPU_GPR(R31)(r3)
2539
2540 /* save FP state */
2541 bl kvmppc_save_fp
2542
2543 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2544 BEGIN_FTR_SECTION
2545 /*
2546 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2547 */
2548 ld r9, HSTATE_KVM_VCPU(r13)
2549 bl kvmppc_save_tm
2550 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2551 #endif
2552
2553 /*
2554 * Set DEC to the smaller of DEC and HDEC, so that we wake
2555 * no later than the end of our timeslice (HDEC interrupts
2556 * don't wake us from nap).
2557 */
2558 mfspr r3, SPRN_DEC
2559 mfspr r4, SPRN_HDEC
2560 mftb r5
2561 BEGIN_FTR_SECTION
2562 /* On P9 check whether the guest has large decrementer mode enabled */
2563 ld r6, HSTATE_KVM_VCORE(r13)
2564 ld r6, VCORE_LPCR(r6)
2565 andis. r6, r6, LPCR_LD@h
2566 bne 68f
2567 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2568 extsw r3, r3
2569 68: EXTEND_HDEC(r4)
2570 cmpd r3, r4
2571 ble 67f
2572 mtspr SPRN_DEC, r4
2573 67:
2574 /* save expiry time of guest decrementer */
2575 add r3, r3, r5
2576 ld r4, HSTATE_KVM_VCPU(r13)
2577 ld r5, HSTATE_KVM_VCORE(r13)
2578 ld r6, VCORE_TB_OFFSET(r5)
2579 subf r3, r6, r3 /* convert to host TB value */
2580 std r3, VCPU_DEC_EXPIRES(r4)
2581
2582 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2583 ld r4, HSTATE_KVM_VCPU(r13)
2584 addi r3, r4, VCPU_TB_CEDE
2585 bl kvmhv_accumulate_time
2586 #endif
2587
2588 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2589
2590 /*
2591 * Take a nap until a decrementer or external or doobell interrupt
2592 * occurs, with PECE1 and PECE0 set in LPCR.
2593 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2594 * Also clear the runlatch bit before napping.
2595 */
2596 kvm_do_nap:
2597 mfspr r0, SPRN_CTRLF
2598 clrrdi r0, r0, 1
2599 mtspr SPRN_CTRLT, r0
2600
2601 li r0,1
2602 stb r0,HSTATE_HWTHREAD_REQ(r13)
2603 mfspr r5,SPRN_LPCR
2604 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2605 BEGIN_FTR_SECTION
2606 ori r5, r5, LPCR_PECEDH
2607 rlwimi r5, r3, 0, LPCR_PECEDP
2608 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2609
2610 kvm_nap_sequence: /* desired LPCR value in r5 */
2611 BEGIN_FTR_SECTION
2612 /*
2613 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2614 * enable state loss = 1 (allow SMT mode switch)
2615 * requested level = 0 (just stop dispatching)
2616 */
2617 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2618 mtspr SPRN_PSSCR, r3
2619 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2620 li r4, LPCR_PECE_HVEE@higher
2621 sldi r4, r4, 32
2622 or r5, r5, r4
2623 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2624 mtspr SPRN_LPCR,r5
2625 isync
2626 li r0, 0
2627 std r0, HSTATE_SCRATCH0(r13)
2628 ptesync
2629 ld r0, HSTATE_SCRATCH0(r13)
2630 1: cmpd r0, r0
2631 bne 1b
2632 BEGIN_FTR_SECTION
2633 nap
2634 FTR_SECTION_ELSE
2635 PPC_STOP
2636 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2637 b .
2638
2639 33: mr r4, r3
2640 li r3, 0
2641 li r12, 0
2642 b 34f
2643
2644 kvm_end_cede:
2645 /* get vcpu pointer */
2646 ld r4, HSTATE_KVM_VCPU(r13)
2647
2648 /* Woken by external or decrementer interrupt */
2649 ld r1, HSTATE_HOST_R1(r13)
2650
2651 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2652 addi r3, r4, VCPU_TB_RMINTR
2653 bl kvmhv_accumulate_time
2654 #endif
2655
2656 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2657 BEGIN_FTR_SECTION
2658 /*
2659 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2660 */
2661 bl kvmppc_restore_tm
2662 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2663 #endif
2664
2665 /* load up FP state */
2666 bl kvmppc_load_fp
2667
2668 /* Restore guest decrementer */
2669 ld r3, VCPU_DEC_EXPIRES(r4)
2670 ld r5, HSTATE_KVM_VCORE(r13)
2671 ld r6, VCORE_TB_OFFSET(r5)
2672 add r3, r3, r6 /* convert host TB to guest TB value */
2673 mftb r7
2674 subf r3, r7, r3
2675 mtspr SPRN_DEC, r3
2676
2677 /* Load NV GPRS */
2678 ld r14, VCPU_GPR(R14)(r4)
2679 ld r15, VCPU_GPR(R15)(r4)
2680 ld r16, VCPU_GPR(R16)(r4)
2681 ld r17, VCPU_GPR(R17)(r4)
2682 ld r18, VCPU_GPR(R18)(r4)
2683 ld r19, VCPU_GPR(R19)(r4)
2684 ld r20, VCPU_GPR(R20)(r4)
2685 ld r21, VCPU_GPR(R21)(r4)
2686 ld r22, VCPU_GPR(R22)(r4)
2687 ld r23, VCPU_GPR(R23)(r4)
2688 ld r24, VCPU_GPR(R24)(r4)
2689 ld r25, VCPU_GPR(R25)(r4)
2690 ld r26, VCPU_GPR(R26)(r4)
2691 ld r27, VCPU_GPR(R27)(r4)
2692 ld r28, VCPU_GPR(R28)(r4)
2693 ld r29, VCPU_GPR(R29)(r4)
2694 ld r30, VCPU_GPR(R30)(r4)
2695 ld r31, VCPU_GPR(R31)(r4)
2696
2697 /* Check the wake reason in SRR1 to see why we got here */
2698 bl kvmppc_check_wake_reason
2699
2700 /*
2701 * Restore volatile registers since we could have called a
2702 * C routine in kvmppc_check_wake_reason
2703 * r4 = VCPU
2704 * r3 tells us whether we need to return to host or not
2705 * WARNING: it gets checked further down:
2706 * should not modify r3 until this check is done.
2707 */
2708 ld r4, HSTATE_KVM_VCPU(r13)
2709
2710 /* clear our bit in vcore->napping_threads */
2711 34: ld r5,HSTATE_KVM_VCORE(r13)
2712 lbz r7,HSTATE_PTID(r13)
2713 li r0,1
2714 sld r0,r0,r7
2715 addi r6,r5,VCORE_NAPPING_THREADS
2716 32: lwarx r7,0,r6
2717 andc r7,r7,r0
2718 stwcx. r7,0,r6
2719 bne 32b
2720 li r0,0
2721 stb r0,HSTATE_NAPPING(r13)
2722
2723 /* See if the wake reason saved in r3 means we need to exit */
2724 stw r12, VCPU_TRAP(r4)
2725 mr r9, r4
2726 cmpdi r3, 0
2727 bgt guest_exit_cont
2728
2729 /* see if any other thread is already exiting */
2730 lwz r0,VCORE_ENTRY_EXIT(r5)
2731 cmpwi r0,0x100
2732 bge guest_exit_cont
2733
2734 b kvmppc_cede_reentry /* if not go back to guest */
2735
2736 /* cede when already previously prodded case */
2737 kvm_cede_prodded:
2738 li r0,0
2739 stb r0,VCPU_PRODDED(r3)
2740 sync /* order testing prodded vs. clearing ceded */
2741 stb r0,VCPU_CEDED(r3)
2742 li r3,H_SUCCESS
2743 blr
2744
2745 /* we've ceded but we want to give control to the host */
2746 kvm_cede_exit:
2747 ld r9, HSTATE_KVM_VCPU(r13)
2748 b guest_exit_cont
2749
2750 /* Try to handle a machine check in real mode */
2751 machine_check_realmode:
2752 mr r3, r9 /* get vcpu pointer */
2753 bl kvmppc_realmode_machine_check
2754 nop
2755 ld r9, HSTATE_KVM_VCPU(r13)
2756 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2757 /*
2758 * For the guest that is FWNMI capable, deliver all the MCE errors
2759 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2760 * reason. This new approach injects machine check errors in guest
2761 * address space to guest with additional information in the form
2762 * of RTAS event, thus enabling guest kernel to suitably handle
2763 * such errors.
2764 *
2765 * For the guest that is not FWNMI capable (old QEMU) fallback
2766 * to old behaviour for backward compatibility:
2767 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2768 * through machine check interrupt (set HSRR0 to 0x200).
2769 * For handled errors (no-fatal), just go back to guest execution
2770 * with current HSRR0.
2771 * if we receive machine check with MSR(RI=0) then deliver it to
2772 * guest as machine check causing guest to crash.
2773 */
2774 ld r11, VCPU_MSR(r9)
2775 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2776 bne mc_cont /* if so, exit to host */
2777 /* Check if guest is capable of handling NMI exit */
2778 ld r10, VCPU_KVM(r9)
2779 lbz r10, KVM_FWNMI(r10)
2780 cmpdi r10, 1 /* FWNMI capable? */
2781 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
2782
2783 /* if not, fall through for backward compatibility. */
2784 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2785 beq 1f /* Deliver a machine check to guest */
2786 ld r10, VCPU_PC(r9)
2787 cmpdi r3, 0 /* Did we handle MCE ? */
2788 bne 2f /* Continue guest execution. */
2789 /* If not, deliver a machine check. SRR0/1 are already set */
2790 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2791 bl kvmppc_msr_interrupt
2792 2: b fast_interrupt_c_return
2793
2794 /*
2795 * Check the reason we woke from nap, and take appropriate action.
2796 * Returns (in r3):
2797 * 0 if nothing needs to be done
2798 * 1 if something happened that needs to be handled by the host
2799 * -1 if there was a guest wakeup (IPI or msgsnd)
2800 * -2 if we handled a PCI passthrough interrupt (returned by
2801 * kvmppc_read_intr only)
2802 *
2803 * Also sets r12 to the interrupt vector for any interrupt that needs
2804 * to be handled now by the host (0x500 for external interrupt), or zero.
2805 * Modifies all volatile registers (since it may call a C function).
2806 * This routine calls kvmppc_read_intr, a C function, if an external
2807 * interrupt is pending.
2808 */
2809 kvmppc_check_wake_reason:
2810 mfspr r6, SPRN_SRR1
2811 BEGIN_FTR_SECTION
2812 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2813 FTR_SECTION_ELSE
2814 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2815 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2816 cmpwi r6, 8 /* was it an external interrupt? */
2817 beq 7f /* if so, see what it was */
2818 li r3, 0
2819 li r12, 0
2820 cmpwi r6, 6 /* was it the decrementer? */
2821 beq 0f
2822 BEGIN_FTR_SECTION
2823 cmpwi r6, 5 /* privileged doorbell? */
2824 beq 0f
2825 cmpwi r6, 3 /* hypervisor doorbell? */
2826 beq 3f
2827 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2828 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2829 beq 4f
2830 li r3, 1 /* anything else, return 1 */
2831 0: blr
2832
2833 /* hypervisor doorbell */
2834 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2835
2836 /*
2837 * Clear the doorbell as we will invoke the handler
2838 * explicitly in the guest exit path.
2839 */
2840 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2841 PPC_MSGCLR(6)
2842 /* see if it's a host IPI */
2843 li r3, 1
2844 BEGIN_FTR_SECTION
2845 PPC_MSGSYNC
2846 lwsync
2847 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2848 lbz r0, HSTATE_HOST_IPI(r13)
2849 cmpwi r0, 0
2850 bnelr
2851 /* if not, return -1 */
2852 li r3, -1
2853 blr
2854
2855 /* Woken up due to Hypervisor maintenance interrupt */
2856 4: li r12, BOOK3S_INTERRUPT_HMI
2857 li r3, 1
2858 blr
2859
2860 /* external interrupt - create a stack frame so we can call C */
2861 7: mflr r0
2862 std r0, PPC_LR_STKOFF(r1)
2863 stdu r1, -PPC_MIN_STKFRM(r1)
2864 bl kvmppc_read_intr
2865 nop
2866 li r12, BOOK3S_INTERRUPT_EXTERNAL
2867 cmpdi r3, 1
2868 ble 1f
2869
2870 /*
2871 * Return code of 2 means PCI passthrough interrupt, but
2872 * we need to return back to host to complete handling the
2873 * interrupt. Trap reason is expected in r12 by guest
2874 * exit code.
2875 */
2876 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2877 1:
2878 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2879 addi r1, r1, PPC_MIN_STKFRM
2880 mtlr r0
2881 blr
2882
2883 /*
2884 * Save away FP, VMX and VSX registers.
2885 * r3 = vcpu pointer
2886 * N.B. r30 and r31 are volatile across this function,
2887 * thus it is not callable from C.
2888 */
2889 kvmppc_save_fp:
2890 mflr r30
2891 mr r31,r3
2892 mfmsr r5
2893 ori r8,r5,MSR_FP
2894 #ifdef CONFIG_ALTIVEC
2895 BEGIN_FTR_SECTION
2896 oris r8,r8,MSR_VEC@h
2897 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2898 #endif
2899 #ifdef CONFIG_VSX
2900 BEGIN_FTR_SECTION
2901 oris r8,r8,MSR_VSX@h
2902 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2903 #endif
2904 mtmsrd r8
2905 addi r3,r3,VCPU_FPRS
2906 bl store_fp_state
2907 #ifdef CONFIG_ALTIVEC
2908 BEGIN_FTR_SECTION
2909 addi r3,r31,VCPU_VRS
2910 bl store_vr_state
2911 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2912 #endif
2913 mfspr r6,SPRN_VRSAVE
2914 stw r6,VCPU_VRSAVE(r31)
2915 mtlr r30
2916 blr
2917
2918 /*
2919 * Load up FP, VMX and VSX registers
2920 * r4 = vcpu pointer
2921 * N.B. r30 and r31 are volatile across this function,
2922 * thus it is not callable from C.
2923 */
2924 kvmppc_load_fp:
2925 mflr r30
2926 mr r31,r4
2927 mfmsr r9
2928 ori r8,r9,MSR_FP
2929 #ifdef CONFIG_ALTIVEC
2930 BEGIN_FTR_SECTION
2931 oris r8,r8,MSR_VEC@h
2932 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2933 #endif
2934 #ifdef CONFIG_VSX
2935 BEGIN_FTR_SECTION
2936 oris r8,r8,MSR_VSX@h
2937 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2938 #endif
2939 mtmsrd r8
2940 addi r3,r4,VCPU_FPRS
2941 bl load_fp_state
2942 #ifdef CONFIG_ALTIVEC
2943 BEGIN_FTR_SECTION
2944 addi r3,r31,VCPU_VRS
2945 bl load_vr_state
2946 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2947 #endif
2948 lwz r7,VCPU_VRSAVE(r31)
2949 mtspr SPRN_VRSAVE,r7
2950 mtlr r30
2951 mr r4,r31
2952 blr
2953
2954 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2955 /*
2956 * Save transactional state and TM-related registers.
2957 * Called with r9 pointing to the vcpu struct.
2958 * This can modify all checkpointed registers, but
2959 * restores r1, r2 and r9 (vcpu pointer) before exit.
2960 */
2961 kvmppc_save_tm:
2962 mflr r0
2963 std r0, PPC_LR_STKOFF(r1)
2964
2965 /* Turn on TM. */
2966 mfmsr r8
2967 li r0, 1
2968 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2969 mtmsrd r8
2970
2971 ld r5, VCPU_MSR(r9)
2972 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2973 beq 1f /* TM not active in guest. */
2974
2975 std r1, HSTATE_HOST_R1(r13)
2976 li r3, TM_CAUSE_KVM_RESCHED
2977
2978 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2979 li r5, 0
2980 mtmsrd r5, 1
2981
2982 /* All GPRs are volatile at this point. */
2983 TRECLAIM(R3)
2984
2985 /* Temporarily store r13 and r9 so we have some regs to play with */
2986 SET_SCRATCH0(r13)
2987 GET_PACA(r13)
2988 std r9, PACATMSCRATCH(r13)
2989 ld r9, HSTATE_KVM_VCPU(r13)
2990
2991 /* Get a few more GPRs free. */
2992 std r29, VCPU_GPRS_TM(29)(r9)
2993 std r30, VCPU_GPRS_TM(30)(r9)
2994 std r31, VCPU_GPRS_TM(31)(r9)
2995
2996 /* Save away PPR and DSCR soon so don't run with user values. */
2997 mfspr r31, SPRN_PPR
2998 HMT_MEDIUM
2999 mfspr r30, SPRN_DSCR
3000 ld r29, HSTATE_DSCR(r13)
3001 mtspr SPRN_DSCR, r29
3002
3003 /* Save all but r9, r13 & r29-r31 */
3004 reg = 0
3005 .rept 29
3006 .if (reg != 9) && (reg != 13)
3007 std reg, VCPU_GPRS_TM(reg)(r9)
3008 .endif
3009 reg = reg + 1
3010 .endr
3011 /* ... now save r13 */
3012 GET_SCRATCH0(r4)
3013 std r4, VCPU_GPRS_TM(13)(r9)
3014 /* ... and save r9 */
3015 ld r4, PACATMSCRATCH(r13)
3016 std r4, VCPU_GPRS_TM(9)(r9)
3017
3018 /* Reload stack pointer and TOC. */
3019 ld r1, HSTATE_HOST_R1(r13)
3020 ld r2, PACATOC(r13)
3021
3022 /* Set MSR RI now we have r1 and r13 back. */
3023 li r5, MSR_RI
3024 mtmsrd r5, 1
3025
3026 /* Save away checkpinted SPRs. */
3027 std r31, VCPU_PPR_TM(r9)
3028 std r30, VCPU_DSCR_TM(r9)
3029 mflr r5
3030 mfcr r6
3031 mfctr r7
3032 mfspr r8, SPRN_AMR
3033 mfspr r10, SPRN_TAR
3034 mfxer r11
3035 std r5, VCPU_LR_TM(r9)
3036 stw r6, VCPU_CR_TM(r9)
3037 std r7, VCPU_CTR_TM(r9)
3038 std r8, VCPU_AMR_TM(r9)
3039 std r10, VCPU_TAR_TM(r9)
3040 std r11, VCPU_XER_TM(r9)
3041
3042 /* Restore r12 as trap number. */
3043 lwz r12, VCPU_TRAP(r9)
3044
3045 /* Save FP/VSX. */
3046 addi r3, r9, VCPU_FPRS_TM
3047 bl store_fp_state
3048 addi r3, r9, VCPU_VRS_TM
3049 bl store_vr_state
3050 mfspr r6, SPRN_VRSAVE
3051 stw r6, VCPU_VRSAVE_TM(r9)
3052 1:
3053 /*
3054 * We need to save these SPRs after the treclaim so that the software
3055 * error code is recorded correctly in the TEXASR. Also the user may
3056 * change these outside of a transaction, so they must always be
3057 * context switched.
3058 */
3059 mfspr r5, SPRN_TFHAR
3060 mfspr r6, SPRN_TFIAR
3061 mfspr r7, SPRN_TEXASR
3062 std r5, VCPU_TFHAR(r9)
3063 std r6, VCPU_TFIAR(r9)
3064 std r7, VCPU_TEXASR(r9)
3065
3066 ld r0, PPC_LR_STKOFF(r1)
3067 mtlr r0
3068 blr
3069
3070 /*
3071 * Restore transactional state and TM-related registers.
3072 * Called with r4 pointing to the vcpu struct.
3073 * This potentially modifies all checkpointed registers.
3074 * It restores r1, r2, r4 from the PACA.
3075 */
3076 kvmppc_restore_tm:
3077 mflr r0
3078 std r0, PPC_LR_STKOFF(r1)
3079
3080 /* Turn on TM/FP/VSX/VMX so we can restore them. */
3081 mfmsr r5
3082 li r6, MSR_TM >> 32
3083 sldi r6, r6, 32
3084 or r5, r5, r6
3085 ori r5, r5, MSR_FP
3086 oris r5, r5, (MSR_VEC | MSR_VSX)@h
3087 mtmsrd r5
3088
3089 /*
3090 * The user may change these outside of a transaction, so they must
3091 * always be context switched.
3092 */
3093 ld r5, VCPU_TFHAR(r4)
3094 ld r6, VCPU_TFIAR(r4)
3095 ld r7, VCPU_TEXASR(r4)
3096 mtspr SPRN_TFHAR, r5
3097 mtspr SPRN_TFIAR, r6
3098 mtspr SPRN_TEXASR, r7
3099
3100 ld r5, VCPU_MSR(r4)
3101 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
3102 beqlr /* TM not active in guest */
3103 std r1, HSTATE_HOST_R1(r13)
3104
3105 /* Make sure the failure summary is set, otherwise we'll program check
3106 * when we trechkpt. It's possible that this might have been not set
3107 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
3108 * host.
3109 */
3110 oris r7, r7, (TEXASR_FS)@h
3111 mtspr SPRN_TEXASR, r7
3112
3113 /*
3114 * We need to load up the checkpointed state for the guest.
3115 * We need to do this early as it will blow away any GPRs, VSRs and
3116 * some SPRs.
3117 */
3118
3119 mr r31, r4
3120 addi r3, r31, VCPU_FPRS_TM
3121 bl load_fp_state
3122 addi r3, r31, VCPU_VRS_TM
3123 bl load_vr_state
3124 mr r4, r31
3125 lwz r7, VCPU_VRSAVE_TM(r4)
3126 mtspr SPRN_VRSAVE, r7
3127
3128 ld r5, VCPU_LR_TM(r4)
3129 lwz r6, VCPU_CR_TM(r4)
3130 ld r7, VCPU_CTR_TM(r4)
3131 ld r8, VCPU_AMR_TM(r4)
3132 ld r9, VCPU_TAR_TM(r4)
3133 ld r10, VCPU_XER_TM(r4)
3134 mtlr r5
3135 mtcr r6
3136 mtctr r7
3137 mtspr SPRN_AMR, r8
3138 mtspr SPRN_TAR, r9
3139 mtxer r10
3140
3141 /*
3142 * Load up PPR and DSCR values but don't put them in the actual SPRs
3143 * till the last moment to avoid running with userspace PPR and DSCR for
3144 * too long.
3145 */
3146 ld r29, VCPU_DSCR_TM(r4)
3147 ld r30, VCPU_PPR_TM(r4)
3148
3149 std r2, PACATMSCRATCH(r13) /* Save TOC */
3150
3151 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
3152 li r5, 0
3153 mtmsrd r5, 1
3154
3155 /* Load GPRs r0-r28 */
3156 reg = 0
3157 .rept 29
3158 ld reg, VCPU_GPRS_TM(reg)(r31)
3159 reg = reg + 1
3160 .endr
3161
3162 mtspr SPRN_DSCR, r29
3163 mtspr SPRN_PPR, r30
3164
3165 /* Load final GPRs */
3166 ld 29, VCPU_GPRS_TM(29)(r31)
3167 ld 30, VCPU_GPRS_TM(30)(r31)
3168 ld 31, VCPU_GPRS_TM(31)(r31)
3169
3170 /* TM checkpointed state is now setup. All GPRs are now volatile. */
3171 TRECHKPT
3172
3173 /* Now let's get back the state we need. */
3174 HMT_MEDIUM
3175 GET_PACA(r13)
3176 ld r29, HSTATE_DSCR(r13)
3177 mtspr SPRN_DSCR, r29
3178 ld r4, HSTATE_KVM_VCPU(r13)
3179 ld r1, HSTATE_HOST_R1(r13)
3180 ld r2, PACATMSCRATCH(r13)
3181
3182 /* Set the MSR RI since we have our registers back. */
3183 li r5, MSR_RI
3184 mtmsrd r5, 1
3185
3186 ld r0, PPC_LR_STKOFF(r1)
3187 mtlr r0
3188 blr
3189 #endif
3190
3191 /*
3192 * We come here if we get any exception or interrupt while we are
3193 * executing host real mode code while in guest MMU context.
3194 * r12 is (CR << 32) | vector
3195 * r13 points to our PACA
3196 * r12 is saved in HSTATE_SCRATCH0(r13)
3197 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3198 * r9 is saved in HSTATE_SCRATCH2(r13)
3199 * r13 is saved in HSPRG1
3200 * cfar is saved in HSTATE_CFAR(r13)
3201 * ppr is saved in HSTATE_PPR(r13)
3202 */
3203 kvmppc_bad_host_intr:
3204 /*
3205 * Switch to the emergency stack, but start half-way down in
3206 * case we were already on it.
3207 */
3208 mr r9, r1
3209 std r1, PACAR1(r13)
3210 ld r1, PACAEMERGSP(r13)
3211 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3212 std r9, 0(r1)
3213 std r0, GPR0(r1)
3214 std r9, GPR1(r1)
3215 std r2, GPR2(r1)
3216 SAVE_4GPRS(3, r1)
3217 SAVE_2GPRS(7, r1)
3218 srdi r0, r12, 32
3219 clrldi r12, r12, 32
3220 std r0, _CCR(r1)
3221 std r12, _TRAP(r1)
3222 andi. r0, r12, 2
3223 beq 1f
3224 mfspr r3, SPRN_HSRR0
3225 mfspr r4, SPRN_HSRR1
3226 mfspr r5, SPRN_HDAR
3227 mfspr r6, SPRN_HDSISR
3228 b 2f
3229 1: mfspr r3, SPRN_SRR0
3230 mfspr r4, SPRN_SRR1
3231 mfspr r5, SPRN_DAR
3232 mfspr r6, SPRN_DSISR
3233 2: std r3, _NIP(r1)
3234 std r4, _MSR(r1)
3235 std r5, _DAR(r1)
3236 std r6, _DSISR(r1)
3237 ld r9, HSTATE_SCRATCH2(r13)
3238 ld r12, HSTATE_SCRATCH0(r13)
3239 GET_SCRATCH0(r0)
3240 SAVE_4GPRS(9, r1)
3241 std r0, GPR13(r1)
3242 SAVE_NVGPRS(r1)
3243 ld r5, HSTATE_CFAR(r13)
3244 std r5, ORIG_GPR3(r1)
3245 mflr r3
3246 #ifdef CONFIG_RELOCATABLE
3247 ld r4, HSTATE_SCRATCH1(r13)
3248 #else
3249 mfctr r4
3250 #endif
3251 mfxer r5
3252 lbz r6, PACAIRQSOFTMASK(r13)
3253 std r3, _LINK(r1)
3254 std r4, _CTR(r1)
3255 std r5, _XER(r1)
3256 std r6, SOFTE(r1)
3257 ld r2, PACATOC(r13)
3258 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3259 std r3, STACK_FRAME_OVERHEAD-16(r1)
3260
3261 /*
3262 * On POWER9 do a minimal restore of the MMU and call C code,
3263 * which will print a message and panic.
3264 * XXX On POWER7 and POWER8, we just spin here since we don't
3265 * know what the other threads are doing (and we don't want to
3266 * coordinate with them) - but at least we now have register state
3267 * in memory that we might be able to look at from another CPU.
3268 */
3269 BEGIN_FTR_SECTION
3270 b .
3271 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3272 ld r9, HSTATE_KVM_VCPU(r13)
3273 ld r10, VCPU_KVM(r9)
3274
3275 li r0, 0
3276 mtspr SPRN_AMR, r0
3277 mtspr SPRN_IAMR, r0
3278 mtspr SPRN_CIABR, r0
3279 mtspr SPRN_DAWRX, r0
3280
3281 /* Flush the ERAT on radix P9 DD1 guest exit */
3282 BEGIN_FTR_SECTION
3283 PPC_INVALIDATE_ERAT
3284 END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
3285
3286 BEGIN_MMU_FTR_SECTION
3287 b 4f
3288 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3289
3290 slbmte r0, r0
3291 slbia
3292 ptesync
3293 ld r8, PACA_SLBSHADOWPTR(r13)
3294 .rept SLB_NUM_BOLTED
3295 li r3, SLBSHADOW_SAVEAREA
3296 LDX_BE r5, r8, r3
3297 addi r3, r3, 8
3298 LDX_BE r6, r8, r3
3299 andis. r7, r5, SLB_ESID_V@h
3300 beq 3f
3301 slbmte r6, r5
3302 3: addi r8, r8, 16
3303 .endr
3304
3305 4: lwz r7, KVM_HOST_LPID(r10)
3306 mtspr SPRN_LPID, r7
3307 mtspr SPRN_PID, r0
3308 ld r8, KVM_HOST_LPCR(r10)
3309 mtspr SPRN_LPCR, r8
3310 isync
3311 li r0, KVM_GUEST_MODE_NONE
3312 stb r0, HSTATE_IN_GUEST(r13)
3313
3314 /*
3315 * Turn on the MMU and jump to C code
3316 */
3317 bcl 20, 31, .+4
3318 5: mflr r3
3319 addi r3, r3, 9f - 5b
3320 ld r4, PACAKMSR(r13)
3321 mtspr SPRN_SRR0, r3
3322 mtspr SPRN_SRR1, r4
3323 RFI_TO_KERNEL
3324 9: addi r3, r1, STACK_FRAME_OVERHEAD
3325 bl kvmppc_bad_interrupt
3326 b 9b
3327
3328 /*
3329 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3330 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3331 * r11 has the guest MSR value (in/out)
3332 * r9 has a vcpu pointer (in)
3333 * r0 is used as a scratch register
3334 */
3335 kvmppc_msr_interrupt:
3336 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3337 cmpwi r0, 2 /* Check if we are in transactional state.. */
3338 ld r11, VCPU_INTR_MSR(r9)
3339 bne 1f
3340 /* ... if transactional, change to suspended */
3341 li r0, 1
3342 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3343 blr
3344
3345 /*
3346 * This works around a hardware bug on POWER8E processors, where
3347 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3348 * performance monitor interrupt. Instead, when we need to have
3349 * an interrupt pending, we have to arrange for a counter to overflow.
3350 */
3351 kvmppc_fix_pmao:
3352 li r3, 0
3353 mtspr SPRN_MMCR2, r3
3354 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3355 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3356 mtspr SPRN_MMCR0, r3
3357 lis r3, 0x7fff
3358 ori r3, r3, 0xffff
3359 mtspr SPRN_PMC6, r3
3360 isync
3361 blr
3362
3363 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3364 /*
3365 * Start timing an activity
3366 * r3 = pointer to time accumulation struct, r4 = vcpu
3367 */
3368 kvmhv_start_timing:
3369 ld r5, HSTATE_KVM_VCORE(r13)
3370 lbz r6, VCORE_IN_GUEST(r5)
3371 cmpwi r6, 0
3372 beq 5f /* if in guest, need to */
3373 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3374 5: mftb r5
3375 subf r5, r6, r5
3376 std r3, VCPU_CUR_ACTIVITY(r4)
3377 std r5, VCPU_ACTIVITY_START(r4)
3378 blr
3379
3380 /*
3381 * Accumulate time to one activity and start another.
3382 * r3 = pointer to new time accumulation struct, r4 = vcpu
3383 */
3384 kvmhv_accumulate_time:
3385 ld r5, HSTATE_KVM_VCORE(r13)
3386 lbz r8, VCORE_IN_GUEST(r5)
3387 cmpwi r8, 0
3388 beq 4f /* if in guest, need to */
3389 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
3390 4: ld r5, VCPU_CUR_ACTIVITY(r4)
3391 ld r6, VCPU_ACTIVITY_START(r4)
3392 std r3, VCPU_CUR_ACTIVITY(r4)
3393 mftb r7
3394 subf r7, r8, r7
3395 std r7, VCPU_ACTIVITY_START(r4)
3396 cmpdi r5, 0
3397 beqlr
3398 subf r3, r6, r7
3399 ld r8, TAS_SEQCOUNT(r5)
3400 cmpdi r8, 0
3401 addi r8, r8, 1
3402 std r8, TAS_SEQCOUNT(r5)
3403 lwsync
3404 ld r7, TAS_TOTAL(r5)
3405 add r7, r7, r3
3406 std r7, TAS_TOTAL(r5)
3407 ld r6, TAS_MIN(r5)
3408 ld r7, TAS_MAX(r5)
3409 beq 3f
3410 cmpd r3, r6
3411 bge 1f
3412 3: std r3, TAS_MIN(r5)
3413 1: cmpd r3, r7
3414 ble 2f
3415 std r3, TAS_MAX(r5)
3416 2: lwsync
3417 addi r8, r8, 1
3418 std r8, TAS_SEQCOUNT(r5)
3419 blr
3420 #endif