1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corporation, 2018
4 * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5 * Paul Mackerras <paulus@ozlabs.org>
7 * Description: KVM functions specific to running nested KVM-HV guests
8 * on Book3S processors (specifically POWER9 and later).
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
22 #include <asm/plpar_wrappers.h>
24 static struct patb_entry
*pseries_partition_tb
;
26 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest
*gp
);
27 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot
*free
);
29 void kvmhv_save_hv_regs(struct kvm_vcpu
*vcpu
, struct hv_guest_state
*hr
)
31 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
33 hr
->pcr
= vc
->pcr
| PCR_MASK
;
34 hr
->dpdes
= vc
->dpdes
;
35 hr
->hfscr
= vcpu
->arch
.hfscr
;
36 hr
->tb_offset
= vc
->tb_offset
;
37 hr
->dawr0
= vcpu
->arch
.dawr0
;
38 hr
->dawrx0
= vcpu
->arch
.dawrx0
;
39 hr
->ciabr
= vcpu
->arch
.ciabr
;
40 hr
->purr
= vcpu
->arch
.purr
;
41 hr
->spurr
= vcpu
->arch
.spurr
;
42 hr
->ic
= vcpu
->arch
.ic
;
44 hr
->srr0
= vcpu
->arch
.shregs
.srr0
;
45 hr
->srr1
= vcpu
->arch
.shregs
.srr1
;
46 hr
->sprg
[0] = vcpu
->arch
.shregs
.sprg0
;
47 hr
->sprg
[1] = vcpu
->arch
.shregs
.sprg1
;
48 hr
->sprg
[2] = vcpu
->arch
.shregs
.sprg2
;
49 hr
->sprg
[3] = vcpu
->arch
.shregs
.sprg3
;
50 hr
->pidr
= vcpu
->arch
.pid
;
51 hr
->cfar
= vcpu
->arch
.cfar
;
52 hr
->ppr
= vcpu
->arch
.ppr
;
53 hr
->dawr1
= vcpu
->arch
.dawr1
;
54 hr
->dawrx1
= vcpu
->arch
.dawrx1
;
57 /* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
58 static noinline_for_stack
void byteswap_pt_regs(struct pt_regs
*regs
)
60 unsigned long *addr
= (unsigned long *) regs
;
62 for (; addr
< ((unsigned long *) (regs
+ 1)); addr
++)
63 *addr
= swab64(*addr
);
66 static void byteswap_hv_regs(struct hv_guest_state
*hr
)
68 hr
->version
= swab64(hr
->version
);
69 hr
->lpid
= swab32(hr
->lpid
);
70 hr
->vcpu_token
= swab32(hr
->vcpu_token
);
71 hr
->lpcr
= swab64(hr
->lpcr
);
72 hr
->pcr
= swab64(hr
->pcr
) | PCR_MASK
;
73 hr
->amor
= swab64(hr
->amor
);
74 hr
->dpdes
= swab64(hr
->dpdes
);
75 hr
->hfscr
= swab64(hr
->hfscr
);
76 hr
->tb_offset
= swab64(hr
->tb_offset
);
77 hr
->dawr0
= swab64(hr
->dawr0
);
78 hr
->dawrx0
= swab64(hr
->dawrx0
);
79 hr
->ciabr
= swab64(hr
->ciabr
);
80 hr
->hdec_expiry
= swab64(hr
->hdec_expiry
);
81 hr
->purr
= swab64(hr
->purr
);
82 hr
->spurr
= swab64(hr
->spurr
);
83 hr
->ic
= swab64(hr
->ic
);
84 hr
->vtb
= swab64(hr
->vtb
);
85 hr
->hdar
= swab64(hr
->hdar
);
86 hr
->hdsisr
= swab64(hr
->hdsisr
);
87 hr
->heir
= swab64(hr
->heir
);
88 hr
->asdr
= swab64(hr
->asdr
);
89 hr
->srr0
= swab64(hr
->srr0
);
90 hr
->srr1
= swab64(hr
->srr1
);
91 hr
->sprg
[0] = swab64(hr
->sprg
[0]);
92 hr
->sprg
[1] = swab64(hr
->sprg
[1]);
93 hr
->sprg
[2] = swab64(hr
->sprg
[2]);
94 hr
->sprg
[3] = swab64(hr
->sprg
[3]);
95 hr
->pidr
= swab64(hr
->pidr
);
96 hr
->cfar
= swab64(hr
->cfar
);
97 hr
->ppr
= swab64(hr
->ppr
);
98 hr
->dawr1
= swab64(hr
->dawr1
);
99 hr
->dawrx1
= swab64(hr
->dawrx1
);
102 static void save_hv_return_state(struct kvm_vcpu
*vcpu
,
103 struct hv_guest_state
*hr
)
105 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
107 hr
->dpdes
= vc
->dpdes
;
108 hr
->purr
= vcpu
->arch
.purr
;
109 hr
->spurr
= vcpu
->arch
.spurr
;
110 hr
->ic
= vcpu
->arch
.ic
;
112 hr
->srr0
= vcpu
->arch
.shregs
.srr0
;
113 hr
->srr1
= vcpu
->arch
.shregs
.srr1
;
114 hr
->sprg
[0] = vcpu
->arch
.shregs
.sprg0
;
115 hr
->sprg
[1] = vcpu
->arch
.shregs
.sprg1
;
116 hr
->sprg
[2] = vcpu
->arch
.shregs
.sprg2
;
117 hr
->sprg
[3] = vcpu
->arch
.shregs
.sprg3
;
118 hr
->pidr
= vcpu
->arch
.pid
;
119 hr
->cfar
= vcpu
->arch
.cfar
;
120 hr
->ppr
= vcpu
->arch
.ppr
;
121 switch (vcpu
->arch
.trap
) {
122 case BOOK3S_INTERRUPT_H_DATA_STORAGE
:
123 hr
->hdar
= vcpu
->arch
.fault_dar
;
124 hr
->hdsisr
= vcpu
->arch
.fault_dsisr
;
125 hr
->asdr
= vcpu
->arch
.fault_gpa
;
127 case BOOK3S_INTERRUPT_H_INST_STORAGE
:
128 hr
->asdr
= vcpu
->arch
.fault_gpa
;
130 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL
:
131 hr
->hfscr
= ((~HFSCR_INTR_CAUSE
& hr
->hfscr
) |
132 (HFSCR_INTR_CAUSE
& vcpu
->arch
.hfscr
));
134 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
135 hr
->heir
= vcpu
->arch
.emul_inst
;
140 static void restore_hv_regs(struct kvm_vcpu
*vcpu
, const struct hv_guest_state
*hr
)
142 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
144 vc
->pcr
= hr
->pcr
| PCR_MASK
;
145 vc
->dpdes
= hr
->dpdes
;
146 vcpu
->arch
.hfscr
= hr
->hfscr
;
147 vcpu
->arch
.dawr0
= hr
->dawr0
;
148 vcpu
->arch
.dawrx0
= hr
->dawrx0
;
149 vcpu
->arch
.ciabr
= hr
->ciabr
;
150 vcpu
->arch
.purr
= hr
->purr
;
151 vcpu
->arch
.spurr
= hr
->spurr
;
152 vcpu
->arch
.ic
= hr
->ic
;
154 vcpu
->arch
.shregs
.srr0
= hr
->srr0
;
155 vcpu
->arch
.shregs
.srr1
= hr
->srr1
;
156 vcpu
->arch
.shregs
.sprg0
= hr
->sprg
[0];
157 vcpu
->arch
.shregs
.sprg1
= hr
->sprg
[1];
158 vcpu
->arch
.shregs
.sprg2
= hr
->sprg
[2];
159 vcpu
->arch
.shregs
.sprg3
= hr
->sprg
[3];
160 vcpu
->arch
.pid
= hr
->pidr
;
161 vcpu
->arch
.cfar
= hr
->cfar
;
162 vcpu
->arch
.ppr
= hr
->ppr
;
163 vcpu
->arch
.dawr1
= hr
->dawr1
;
164 vcpu
->arch
.dawrx1
= hr
->dawrx1
;
167 void kvmhv_restore_hv_return_state(struct kvm_vcpu
*vcpu
,
168 struct hv_guest_state
*hr
)
170 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
172 vc
->dpdes
= hr
->dpdes
;
173 vcpu
->arch
.hfscr
= hr
->hfscr
;
174 vcpu
->arch
.purr
= hr
->purr
;
175 vcpu
->arch
.spurr
= hr
->spurr
;
176 vcpu
->arch
.ic
= hr
->ic
;
178 vcpu
->arch
.fault_dar
= hr
->hdar
;
179 vcpu
->arch
.fault_dsisr
= hr
->hdsisr
;
180 vcpu
->arch
.fault_gpa
= hr
->asdr
;
181 vcpu
->arch
.emul_inst
= hr
->heir
;
182 vcpu
->arch
.shregs
.srr0
= hr
->srr0
;
183 vcpu
->arch
.shregs
.srr1
= hr
->srr1
;
184 vcpu
->arch
.shregs
.sprg0
= hr
->sprg
[0];
185 vcpu
->arch
.shregs
.sprg1
= hr
->sprg
[1];
186 vcpu
->arch
.shregs
.sprg2
= hr
->sprg
[2];
187 vcpu
->arch
.shregs
.sprg3
= hr
->sprg
[3];
188 vcpu
->arch
.pid
= hr
->pidr
;
189 vcpu
->arch
.cfar
= hr
->cfar
;
190 vcpu
->arch
.ppr
= hr
->ppr
;
193 static void kvmhv_nested_mmio_needed(struct kvm_vcpu
*vcpu
, u64 regs_ptr
)
195 /* No need to reflect the page fault to L1, we've handled it */
199 * Since the L2 gprs have already been written back into L1 memory when
200 * we complete the mmio, store the L1 memory location of the L2 gpr
201 * being loaded into by the mmio so that the loaded value can be
202 * written there in kvmppc_complete_mmio_load()
204 if (((vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) == KVM_MMIO_REG_GPR
)
205 && (vcpu
->mmio_is_write
== 0)) {
206 vcpu
->arch
.nested_io_gpr
= (gpa_t
) regs_ptr
+
207 offsetof(struct pt_regs
,
208 gpr
[vcpu
->arch
.io_gpr
]);
209 vcpu
->arch
.io_gpr
= KVM_MMIO_REG_NESTED_GPR
;
213 static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu
*vcpu
,
214 struct hv_guest_state
*l2_hv
,
215 struct pt_regs
*l2_regs
,
216 u64 hv_ptr
, u64 regs_ptr
)
220 if (kvm_vcpu_read_guest(vcpu
, hv_ptr
, &l2_hv
->version
,
221 sizeof(l2_hv
->version
)))
224 if (kvmppc_need_byteswap(vcpu
))
225 l2_hv
->version
= swab64(l2_hv
->version
);
227 size
= hv_guest_state_size(l2_hv
->version
);
231 return kvm_vcpu_read_guest(vcpu
, hv_ptr
, l2_hv
, size
) ||
232 kvm_vcpu_read_guest(vcpu
, regs_ptr
, l2_regs
,
233 sizeof(struct pt_regs
));
236 static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu
*vcpu
,
237 struct hv_guest_state
*l2_hv
,
238 struct pt_regs
*l2_regs
,
239 u64 hv_ptr
, u64 regs_ptr
)
243 size
= hv_guest_state_size(l2_hv
->version
);
247 return kvm_vcpu_write_guest(vcpu
, hv_ptr
, l2_hv
, size
) ||
248 kvm_vcpu_write_guest(vcpu
, regs_ptr
, l2_regs
,
249 sizeof(struct pt_regs
));
252 static void load_l2_hv_regs(struct kvm_vcpu
*vcpu
,
253 const struct hv_guest_state
*l2_hv
,
254 const struct hv_guest_state
*l1_hv
, u64
*lpcr
)
256 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
259 restore_hv_regs(vcpu
, l2_hv
);
262 * Don't let L1 change LPCR bits for the L2 except these:
264 mask
= LPCR_DPFD
| LPCR_ILE
| LPCR_TC
| LPCR_AIL
| LPCR_LD
|
265 LPCR_LPES
| LPCR_MER
;
268 * Additional filtering is required depending on hardware
271 *lpcr
= kvmppc_filter_lpcr_hv(vcpu
->kvm
,
272 (vc
->lpcr
& ~mask
) | (*lpcr
& mask
));
275 * Don't let L1 enable features for L2 which we don't allow for L1,
276 * but preserve the interrupt cause field.
278 vcpu
->arch
.hfscr
= l2_hv
->hfscr
& (HFSCR_INTR_CAUSE
| vcpu
->arch
.hfscr_permitted
);
280 /* Don't let data address watchpoint match in hypervisor state */
281 vcpu
->arch
.dawrx0
= l2_hv
->dawrx0
& ~DAWRX_HYP
;
282 vcpu
->arch
.dawrx1
= l2_hv
->dawrx1
& ~DAWRX_HYP
;
284 /* Don't let completed instruction address breakpt match in HV state */
285 if ((l2_hv
->ciabr
& CIABR_PRIV
) == CIABR_PRIV_HYPER
)
286 vcpu
->arch
.ciabr
= l2_hv
->ciabr
& ~CIABR_PRIV
;
289 long kvmhv_enter_nested_guest(struct kvm_vcpu
*vcpu
)
292 struct kvm_nested_guest
*l2
;
293 struct pt_regs l2_regs
, saved_l1_regs
;
294 struct hv_guest_state l2_hv
= {0}, saved_l1_hv
;
295 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
296 u64 hv_ptr
, regs_ptr
;
298 s64 delta_purr
, delta_spurr
, delta_ic
, delta_vtb
;
300 if (vcpu
->kvm
->arch
.l1_ptcr
== 0)
301 return H_NOT_AVAILABLE
;
303 if (MSR_TM_TRANSACTIONAL(vcpu
->arch
.shregs
.msr
))
306 /* copy parameters in */
307 hv_ptr
= kvmppc_get_gpr(vcpu
, 4);
308 regs_ptr
= kvmppc_get_gpr(vcpu
, 5);
309 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
310 err
= kvmhv_read_guest_state_and_regs(vcpu
, &l2_hv
, &l2_regs
,
312 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
316 if (kvmppc_need_byteswap(vcpu
))
317 byteswap_hv_regs(&l2_hv
);
318 if (l2_hv
.version
> HV_GUEST_STATE_VERSION
)
321 if (kvmppc_need_byteswap(vcpu
))
322 byteswap_pt_regs(&l2_regs
);
323 if (l2_hv
.vcpu_token
>= NR_CPUS
)
327 * L1 must have set up a suspended state to enter the L2 in a
328 * transactional state, and only in that case. These have to be
329 * filtered out here to prevent causing a TM Bad Thing in the
330 * host HRFID. We could synthesize a TM Bad Thing back to the L1
331 * here but there doesn't seem like much point.
333 if (MSR_TM_SUSPENDED(vcpu
->arch
.shregs
.msr
)) {
334 if (!MSR_TM_ACTIVE(l2_regs
.msr
))
337 if (l2_regs
.msr
& MSR_TS_MASK
)
339 if (WARN_ON_ONCE(vcpu
->arch
.shregs
.msr
& MSR_TS_MASK
))
344 l2
= kvmhv_get_nested(vcpu
->kvm
, l2_hv
.lpid
, true);
347 if (!l2
->l1_gr_to_hr
) {
348 mutex_lock(&l2
->tlb_lock
);
349 kvmhv_update_ptbl_cache(l2
);
350 mutex_unlock(&l2
->tlb_lock
);
353 /* save l1 values of things */
354 vcpu
->arch
.regs
.msr
= vcpu
->arch
.shregs
.msr
;
355 saved_l1_regs
= vcpu
->arch
.regs
;
356 kvmhv_save_hv_regs(vcpu
, &saved_l1_hv
);
358 /* convert TB values/offsets to host (L0) values */
359 hdec_exp
= l2_hv
.hdec_expiry
- vc
->tb_offset
;
360 vc
->tb_offset
+= l2_hv
.tb_offset
;
362 /* set L1 state to L2 state */
363 vcpu
->arch
.nested
= l2
;
364 vcpu
->arch
.nested_vcpu_id
= l2_hv
.vcpu_token
;
365 vcpu
->arch
.nested_hfscr
= l2_hv
.hfscr
;
366 vcpu
->arch
.regs
= l2_regs
;
368 /* Guest must always run with ME enabled, HV disabled. */
369 vcpu
->arch
.shregs
.msr
= (vcpu
->arch
.regs
.msr
| MSR_ME
) & ~MSR_HV
;
372 load_l2_hv_regs(vcpu
, &l2_hv
, &saved_l1_hv
, &lpcr
);
374 vcpu
->arch
.ret
= RESUME_GUEST
;
377 if (mftb() >= hdec_exp
) {
378 vcpu
->arch
.trap
= BOOK3S_INTERRUPT_HV_DECREMENTER
;
382 r
= kvmhv_run_single_vcpu(vcpu
, hdec_exp
, lpcr
);
383 } while (is_kvmppc_resume_guest(r
));
385 /* save L2 state for return */
386 l2_regs
= vcpu
->arch
.regs
;
387 l2_regs
.msr
= vcpu
->arch
.shregs
.msr
;
388 delta_purr
= vcpu
->arch
.purr
- l2_hv
.purr
;
389 delta_spurr
= vcpu
->arch
.spurr
- l2_hv
.spurr
;
390 delta_ic
= vcpu
->arch
.ic
- l2_hv
.ic
;
391 delta_vtb
= vc
->vtb
- l2_hv
.vtb
;
392 save_hv_return_state(vcpu
, &l2_hv
);
394 /* restore L1 state */
395 vcpu
->arch
.nested
= NULL
;
396 vcpu
->arch
.regs
= saved_l1_regs
;
397 vcpu
->arch
.shregs
.msr
= saved_l1_regs
.msr
& ~MSR_TS_MASK
;
398 /* set L1 MSR TS field according to L2 transaction state */
399 if (l2_regs
.msr
& MSR_TS_MASK
)
400 vcpu
->arch
.shregs
.msr
|= MSR_TS_S
;
401 vc
->tb_offset
= saved_l1_hv
.tb_offset
;
402 restore_hv_regs(vcpu
, &saved_l1_hv
);
403 vcpu
->arch
.purr
+= delta_purr
;
404 vcpu
->arch
.spurr
+= delta_spurr
;
405 vcpu
->arch
.ic
+= delta_ic
;
406 vc
->vtb
+= delta_vtb
;
408 kvmhv_put_nested(l2
);
410 /* copy l2_hv_state and regs back to guest */
411 if (kvmppc_need_byteswap(vcpu
)) {
412 byteswap_hv_regs(&l2_hv
);
413 byteswap_pt_regs(&l2_regs
);
415 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
416 err
= kvmhv_write_guest_state_and_regs(vcpu
, &l2_hv
, &l2_regs
,
418 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
425 if (vcpu
->mmio_needed
) {
426 kvmhv_nested_mmio_needed(vcpu
, regs_ptr
);
430 return vcpu
->arch
.trap
;
433 long kvmhv_nested_init(void)
439 if (!kvmhv_on_pseries())
441 if (!radix_enabled())
444 /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
445 ptb_order
= __ilog2(KVMPPC_NR_LPIDS
- 1) + 1;
448 pseries_partition_tb
= kmalloc(sizeof(struct patb_entry
) << ptb_order
,
450 if (!pseries_partition_tb
) {
451 pr_err("kvm-hv: failed to allocated nested partition table\n");
455 ptcr
= __pa(pseries_partition_tb
) | (ptb_order
- 8);
456 rc
= plpar_hcall_norets(H_SET_PARTITION_TABLE
, ptcr
);
457 if (rc
!= H_SUCCESS
) {
458 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
460 kfree(pseries_partition_tb
);
461 pseries_partition_tb
= NULL
;
468 void kvmhv_nested_exit(void)
471 * N.B. the kvmhv_on_pseries() test is there because it enables
472 * the compiler to remove the call to plpar_hcall_norets()
473 * when CONFIG_PPC_PSERIES=n.
475 if (kvmhv_on_pseries() && pseries_partition_tb
) {
476 plpar_hcall_norets(H_SET_PARTITION_TABLE
, 0);
477 kfree(pseries_partition_tb
);
478 pseries_partition_tb
= NULL
;
482 static void kvmhv_flush_lpid(unsigned int lpid
)
486 if (!kvmhv_on_pseries()) {
487 radix__flush_all_lpid(lpid
);
491 if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE
))
492 rc
= plpar_hcall_norets(H_TLB_INVALIDATE
, H_TLBIE_P1_ENC(2, 0, 1),
493 lpid
, TLBIEL_INVAL_SET_LPID
);
495 rc
= pseries_rpt_invalidate(lpid
, H_RPTI_TARGET_CMMU
,
497 H_RPTI_TYPE_TLB
| H_RPTI_TYPE_PWC
|
499 H_RPTI_PAGE_ALL
, 0, -1UL);
501 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc
);
504 void kvmhv_set_ptbl_entry(unsigned int lpid
, u64 dw0
, u64 dw1
)
506 if (!kvmhv_on_pseries()) {
507 mmu_partition_table_set_entry(lpid
, dw0
, dw1
, true);
511 pseries_partition_tb
[lpid
].patb0
= cpu_to_be64(dw0
);
512 pseries_partition_tb
[lpid
].patb1
= cpu_to_be64(dw1
);
513 /* L0 will do the necessary barriers */
514 kvmhv_flush_lpid(lpid
);
517 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest
*gp
)
521 dw0
= PATB_HR
| radix__get_tree_size() |
522 __pa(gp
->shadow_pgtable
) | RADIX_PGD_INDEX_SIZE
;
523 kvmhv_set_ptbl_entry(gp
->shadow_lpid
, dw0
, gp
->process_table
);
526 void kvmhv_vm_nested_init(struct kvm
*kvm
)
528 kvm
->arch
.max_nested_lpid
= -1;
532 * Handle the H_SET_PARTITION_TABLE hcall.
533 * r4 = guest real address of partition table + log_2(size) - 12
534 * (formatted as for the PTCR).
536 long kvmhv_set_partition_table(struct kvm_vcpu
*vcpu
)
538 struct kvm
*kvm
= vcpu
->kvm
;
539 unsigned long ptcr
= kvmppc_get_gpr(vcpu
, 4);
541 long ret
= H_SUCCESS
;
543 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
545 * Limit the partition table to 4096 entries (because that's what
546 * hardware supports), and check the base address.
548 if ((ptcr
& PRTS_MASK
) > 12 - 8 ||
549 !kvm_is_visible_gfn(vcpu
->kvm
, (ptcr
& PRTB_MASK
) >> PAGE_SHIFT
))
551 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
552 if (ret
== H_SUCCESS
)
553 kvm
->arch
.l1_ptcr
= ptcr
;
558 * Handle the H_COPY_TOFROM_GUEST hcall.
559 * r4 = L1 lpid of nested guest
561 * r6 = eaddr to access
562 * r7 = to buffer (L1 gpa)
563 * r8 = from buffer (L1 gpa)
564 * r9 = n bytes to copy
566 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu
*vcpu
)
568 struct kvm_nested_guest
*gp
;
569 int l1_lpid
= kvmppc_get_gpr(vcpu
, 4);
570 int pid
= kvmppc_get_gpr(vcpu
, 5);
571 gva_t eaddr
= kvmppc_get_gpr(vcpu
, 6);
572 gpa_t gp_to
= (gpa_t
) kvmppc_get_gpr(vcpu
, 7);
573 gpa_t gp_from
= (gpa_t
) kvmppc_get_gpr(vcpu
, 8);
575 unsigned long n
= kvmppc_get_gpr(vcpu
, 9);
576 bool is_load
= !!gp_to
;
579 if (gp_to
&& gp_from
) /* One must be NULL to determine the direction */
582 if (eaddr
& (0xFFFUL
<< 52))
585 buf
= kzalloc(n
, GFP_KERNEL
| __GFP_NOWARN
);
589 gp
= kvmhv_get_nested(vcpu
->kvm
, l1_lpid
, false);
595 mutex_lock(&gp
->tlb_lock
);
598 /* Load from the nested guest into our buffer */
599 rc
= __kvmhv_copy_tofrom_guest_radix(gp
->shadow_lpid
, pid
,
600 eaddr
, buf
, NULL
, n
);
604 /* Write what was loaded into our buffer back to the L1 guest */
605 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
606 rc
= kvm_vcpu_write_guest(vcpu
, gp_to
, buf
, n
);
607 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
611 /* Load the data to be stored from the L1 guest into our buf */
612 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
613 rc
= kvm_vcpu_read_guest(vcpu
, gp_from
, buf
, n
);
614 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
618 /* Store from our buffer into the nested guest */
619 rc
= __kvmhv_copy_tofrom_guest_radix(gp
->shadow_lpid
, pid
,
620 eaddr
, NULL
, buf
, n
);
626 mutex_unlock(&gp
->tlb_lock
);
627 kvmhv_put_nested(gp
);
637 * Reload the partition table entry for a guest.
638 * Caller must hold gp->tlb_lock.
640 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest
*gp
)
643 struct patb_entry ptbl_entry
;
644 unsigned long ptbl_addr
;
645 struct kvm
*kvm
= gp
->l1_host
;
648 ptbl_addr
= (kvm
->arch
.l1_ptcr
& PRTB_MASK
) + (gp
->l1_lpid
<< 4);
649 if (gp
->l1_lpid
< (1ul << ((kvm
->arch
.l1_ptcr
& PRTS_MASK
) + 8))) {
650 int srcu_idx
= srcu_read_lock(&kvm
->srcu
);
651 ret
= kvm_read_guest(kvm
, ptbl_addr
,
652 &ptbl_entry
, sizeof(ptbl_entry
));
653 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
657 gp
->process_table
= 0;
659 gp
->l1_gr_to_hr
= be64_to_cpu(ptbl_entry
.patb0
);
660 gp
->process_table
= be64_to_cpu(ptbl_entry
.patb1
);
662 kvmhv_set_nested_ptbl(gp
);
665 static struct kvm_nested_guest
*kvmhv_alloc_nested(struct kvm
*kvm
, unsigned int lpid
)
667 struct kvm_nested_guest
*gp
;
670 gp
= kzalloc(sizeof(*gp
), GFP_KERNEL
);
675 mutex_init(&gp
->tlb_lock
);
676 gp
->shadow_pgtable
= pgd_alloc(kvm
->mm
);
677 if (!gp
->shadow_pgtable
)
679 shadow_lpid
= kvmppc_alloc_lpid();
682 gp
->shadow_lpid
= shadow_lpid
;
685 memset(gp
->prev_cpu
, -1, sizeof(gp
->prev_cpu
));
690 pgd_free(kvm
->mm
, gp
->shadow_pgtable
);
697 * Free up any resources allocated for a nested guest.
699 static void kvmhv_release_nested(struct kvm_nested_guest
*gp
)
701 struct kvm
*kvm
= gp
->l1_host
;
703 if (gp
->shadow_pgtable
) {
705 * No vcpu is using this struct and no call to
706 * kvmhv_get_nested can find this struct,
707 * so we don't need to hold kvm->mmu_lock.
709 kvmppc_free_pgtable_radix(kvm
, gp
->shadow_pgtable
,
711 pgd_free(kvm
->mm
, gp
->shadow_pgtable
);
713 kvmhv_set_ptbl_entry(gp
->shadow_lpid
, 0, 0);
714 kvmppc_free_lpid(gp
->shadow_lpid
);
718 static void kvmhv_remove_nested(struct kvm_nested_guest
*gp
)
720 struct kvm
*kvm
= gp
->l1_host
;
721 int lpid
= gp
->l1_lpid
;
724 spin_lock(&kvm
->mmu_lock
);
725 if (gp
== kvm
->arch
.nested_guests
[lpid
]) {
726 kvm
->arch
.nested_guests
[lpid
] = NULL
;
727 if (lpid
== kvm
->arch
.max_nested_lpid
) {
728 while (--lpid
>= 0 && !kvm
->arch
.nested_guests
[lpid
])
730 kvm
->arch
.max_nested_lpid
= lpid
;
735 spin_unlock(&kvm
->mmu_lock
);
737 kvmhv_release_nested(gp
);
741 * Free up all nested resources allocated for this guest.
742 * This is called with no vcpus of the guest running, when
743 * switching the guest to HPT mode or when destroying the
746 void kvmhv_release_all_nested(struct kvm
*kvm
)
749 struct kvm_nested_guest
*gp
;
750 struct kvm_nested_guest
*freelist
= NULL
;
751 struct kvm_memory_slot
*memslot
;
754 spin_lock(&kvm
->mmu_lock
);
755 for (i
= 0; i
<= kvm
->arch
.max_nested_lpid
; i
++) {
756 gp
= kvm
->arch
.nested_guests
[i
];
759 kvm
->arch
.nested_guests
[i
] = NULL
;
760 if (--gp
->refcnt
== 0) {
765 kvm
->arch
.max_nested_lpid
= -1;
766 spin_unlock(&kvm
->mmu_lock
);
767 while ((gp
= freelist
) != NULL
) {
769 kvmhv_release_nested(gp
);
772 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
773 kvm_for_each_memslot(memslot
, kvm_memslots(kvm
))
774 kvmhv_free_memslot_nest_rmap(memslot
);
775 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
778 /* caller must hold gp->tlb_lock */
779 static void kvmhv_flush_nested(struct kvm_nested_guest
*gp
)
781 struct kvm
*kvm
= gp
->l1_host
;
783 spin_lock(&kvm
->mmu_lock
);
784 kvmppc_free_pgtable_radix(kvm
, gp
->shadow_pgtable
, gp
->shadow_lpid
);
785 spin_unlock(&kvm
->mmu_lock
);
786 kvmhv_flush_lpid(gp
->shadow_lpid
);
787 kvmhv_update_ptbl_cache(gp
);
788 if (gp
->l1_gr_to_hr
== 0)
789 kvmhv_remove_nested(gp
);
792 struct kvm_nested_guest
*kvmhv_get_nested(struct kvm
*kvm
, int l1_lpid
,
795 struct kvm_nested_guest
*gp
, *newgp
;
797 if (l1_lpid
>= KVM_MAX_NESTED_GUESTS
||
798 l1_lpid
>= (1ul << ((kvm
->arch
.l1_ptcr
& PRTS_MASK
) + 12 - 4)))
801 spin_lock(&kvm
->mmu_lock
);
802 gp
= kvm
->arch
.nested_guests
[l1_lpid
];
805 spin_unlock(&kvm
->mmu_lock
);
810 newgp
= kvmhv_alloc_nested(kvm
, l1_lpid
);
813 spin_lock(&kvm
->mmu_lock
);
814 if (kvm
->arch
.nested_guests
[l1_lpid
]) {
815 /* someone else beat us to it */
816 gp
= kvm
->arch
.nested_guests
[l1_lpid
];
818 kvm
->arch
.nested_guests
[l1_lpid
] = newgp
;
822 if (l1_lpid
> kvm
->arch
.max_nested_lpid
)
823 kvm
->arch
.max_nested_lpid
= l1_lpid
;
826 spin_unlock(&kvm
->mmu_lock
);
829 kvmhv_release_nested(newgp
);
834 void kvmhv_put_nested(struct kvm_nested_guest
*gp
)
836 struct kvm
*kvm
= gp
->l1_host
;
839 spin_lock(&kvm
->mmu_lock
);
841 spin_unlock(&kvm
->mmu_lock
);
843 kvmhv_release_nested(gp
);
846 static struct kvm_nested_guest
*kvmhv_find_nested(struct kvm
*kvm
, int lpid
)
848 if (lpid
> kvm
->arch
.max_nested_lpid
)
850 return kvm
->arch
.nested_guests
[lpid
];
853 pte_t
*find_kvm_nested_guest_pte(struct kvm
*kvm
, unsigned long lpid
,
854 unsigned long ea
, unsigned *hshift
)
856 struct kvm_nested_guest
*gp
;
859 gp
= kvmhv_find_nested(kvm
, lpid
);
863 VM_WARN(!spin_is_locked(&kvm
->mmu_lock
),
864 "%s called with kvm mmu_lock not held \n", __func__
);
865 pte
= __find_linux_pte(gp
->shadow_pgtable
, ea
, NULL
, hshift
);
870 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1
, u64 rmap_2
)
872 return !((rmap_1
^ rmap_2
) & (RMAP_NESTED_LPID_MASK
|
873 RMAP_NESTED_GPA_MASK
));
876 void kvmhv_insert_nest_rmap(struct kvm
*kvm
, unsigned long *rmapp
,
877 struct rmap_nested
**n_rmap
)
879 struct llist_node
*entry
= ((struct llist_head
*) rmapp
)->first
;
880 struct rmap_nested
*cursor
;
881 u64 rmap
, new_rmap
= (*n_rmap
)->rmap
;
883 /* Are there any existing entries? */
885 /* No -> use the rmap as a single entry */
886 *rmapp
= new_rmap
| RMAP_NESTED_IS_SINGLE_ENTRY
;
890 /* Do any entries match what we're trying to insert? */
891 for_each_nest_rmap_safe(cursor
, entry
, &rmap
) {
892 if (kvmhv_n_rmap_is_equal(rmap
, new_rmap
))
896 /* Do we need to create a list or just add the new entry? */
898 if (rmap
& RMAP_NESTED_IS_SINGLE_ENTRY
) /* Not previously a list */
900 llist_add(&((*n_rmap
)->list
), (struct llist_head
*) rmapp
);
901 if (rmap
& RMAP_NESTED_IS_SINGLE_ENTRY
) /* Not previously a list */
902 (*n_rmap
)->list
.next
= (struct llist_node
*) rmap
;
904 /* Set NULL so not freed by caller */
908 static void kvmhv_update_nest_rmap_rc(struct kvm
*kvm
, u64 n_rmap
,
909 unsigned long clr
, unsigned long set
,
910 unsigned long hpa
, unsigned long mask
)
913 unsigned int shift
, lpid
;
916 gpa
= n_rmap
& RMAP_NESTED_GPA_MASK
;
917 lpid
= (n_rmap
& RMAP_NESTED_LPID_MASK
) >> RMAP_NESTED_LPID_SHIFT
;
920 ptep
= find_kvm_nested_guest_pte(kvm
, lpid
, gpa
, &shift
);
922 * If the pte is present and the pfn is still the same, update the pte.
923 * If the pfn has changed then this is a stale rmap entry, the nested
924 * gpa actually points somewhere else now, and there is nothing to do.
925 * XXX A future optimisation would be to remove the rmap entry here.
927 if (ptep
&& pte_present(*ptep
) && ((pte_val(*ptep
) & mask
) == hpa
)) {
928 __radix_pte_update(ptep
, clr
, set
);
929 kvmppc_radix_tlbie_page(kvm
, gpa
, shift
, lpid
);
934 * For a given list of rmap entries, update the rc bits in all ptes in shadow
935 * page tables for nested guests which are referenced by the rmap list.
937 void kvmhv_update_nest_rmap_rc_list(struct kvm
*kvm
, unsigned long *rmapp
,
938 unsigned long clr
, unsigned long set
,
939 unsigned long hpa
, unsigned long nbytes
)
941 struct llist_node
*entry
= ((struct llist_head
*) rmapp
)->first
;
942 struct rmap_nested
*cursor
;
943 unsigned long rmap
, mask
;
945 if ((clr
| set
) & ~(_PAGE_DIRTY
| _PAGE_ACCESSED
))
948 mask
= PTE_RPN_MASK
& ~(nbytes
- 1);
951 for_each_nest_rmap_safe(cursor
, entry
, &rmap
)
952 kvmhv_update_nest_rmap_rc(kvm
, rmap
, clr
, set
, hpa
, mask
);
955 static void kvmhv_remove_nest_rmap(struct kvm
*kvm
, u64 n_rmap
,
956 unsigned long hpa
, unsigned long mask
)
958 struct kvm_nested_guest
*gp
;
960 unsigned int shift
, lpid
;
963 gpa
= n_rmap
& RMAP_NESTED_GPA_MASK
;
964 lpid
= (n_rmap
& RMAP_NESTED_LPID_MASK
) >> RMAP_NESTED_LPID_SHIFT
;
965 gp
= kvmhv_find_nested(kvm
, lpid
);
969 /* Find and invalidate the pte */
970 ptep
= find_kvm_nested_guest_pte(kvm
, lpid
, gpa
, &shift
);
971 /* Don't spuriously invalidate ptes if the pfn has changed */
972 if (ptep
&& pte_present(*ptep
) && ((pte_val(*ptep
) & mask
) == hpa
))
973 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, NULL
, gp
->shadow_lpid
);
976 static void kvmhv_remove_nest_rmap_list(struct kvm
*kvm
, unsigned long *rmapp
,
977 unsigned long hpa
, unsigned long mask
)
979 struct llist_node
*entry
= llist_del_all((struct llist_head
*) rmapp
);
980 struct rmap_nested
*cursor
;
983 for_each_nest_rmap_safe(cursor
, entry
, &rmap
) {
984 kvmhv_remove_nest_rmap(kvm
, rmap
, hpa
, mask
);
989 /* called with kvm->mmu_lock held */
990 void kvmhv_remove_nest_rmap_range(struct kvm
*kvm
,
991 const struct kvm_memory_slot
*memslot
,
992 unsigned long gpa
, unsigned long hpa
,
993 unsigned long nbytes
)
995 unsigned long gfn
, end_gfn
;
996 unsigned long addr_mask
;
1000 gfn
= (gpa
>> PAGE_SHIFT
) - memslot
->base_gfn
;
1001 end_gfn
= gfn
+ (nbytes
>> PAGE_SHIFT
);
1003 addr_mask
= PTE_RPN_MASK
& ~(nbytes
- 1);
1006 for (; gfn
< end_gfn
; gfn
++) {
1007 unsigned long *rmap
= &memslot
->arch
.rmap
[gfn
];
1008 kvmhv_remove_nest_rmap_list(kvm
, rmap
, hpa
, addr_mask
);
1012 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot
*free
)
1016 for (page
= 0; page
< free
->npages
; page
++) {
1017 unsigned long rmap
, *rmapp
= &free
->arch
.rmap
[page
];
1018 struct rmap_nested
*cursor
;
1019 struct llist_node
*entry
;
1021 entry
= llist_del_all((struct llist_head
*) rmapp
);
1022 for_each_nest_rmap_safe(cursor
, entry
, &rmap
)
1027 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu
*vcpu
,
1028 struct kvm_nested_guest
*gp
,
1029 long gpa
, int *shift_ret
)
1031 struct kvm
*kvm
= vcpu
->kvm
;
1036 spin_lock(&kvm
->mmu_lock
);
1037 ptep
= find_kvm_nested_guest_pte(kvm
, gp
->l1_lpid
, gpa
, &shift
);
1040 if (ptep
&& pte_present(*ptep
)) {
1041 kvmppc_unmap_pte(kvm
, ptep
, gpa
, shift
, NULL
, gp
->shadow_lpid
);
1044 spin_unlock(&kvm
->mmu_lock
);
1051 static inline int get_ric(unsigned int instr
)
1053 return (instr
>> 18) & 0x3;
1056 static inline int get_prs(unsigned int instr
)
1058 return (instr
>> 17) & 0x1;
1061 static inline int get_r(unsigned int instr
)
1063 return (instr
>> 16) & 0x1;
1066 static inline int get_lpid(unsigned long r_val
)
1068 return r_val
& 0xffffffff;
1071 static inline int get_is(unsigned long r_val
)
1073 return (r_val
>> 10) & 0x3;
1076 static inline int get_ap(unsigned long r_val
)
1078 return (r_val
>> 5) & 0x7;
1081 static inline long get_epn(unsigned long r_val
)
1086 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu
*vcpu
, int lpid
,
1089 struct kvm
*kvm
= vcpu
->kvm
;
1090 struct kvm_nested_guest
*gp
;
1092 int shift
, shadow_shift
;
1095 shift
= ap_to_shift(ap
);
1098 /* Invalid ap encoding */
1101 addr
&= ~((1UL << shift
) - 1);
1102 npages
= 1UL << (shift
- PAGE_SHIFT
);
1104 gp
= kvmhv_get_nested(kvm
, lpid
, false);
1105 if (!gp
) /* No such guest -> nothing to do */
1107 mutex_lock(&gp
->tlb_lock
);
1109 /* There may be more than one host page backing this single guest pte */
1111 kvmhv_invalidate_shadow_pte(vcpu
, gp
, addr
, &shadow_shift
);
1113 npages
-= 1UL << (shadow_shift
- PAGE_SHIFT
);
1114 addr
+= 1UL << shadow_shift
;
1115 } while (npages
> 0);
1117 mutex_unlock(&gp
->tlb_lock
);
1118 kvmhv_put_nested(gp
);
1122 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu
*vcpu
,
1123 struct kvm_nested_guest
*gp
, int ric
)
1125 struct kvm
*kvm
= vcpu
->kvm
;
1127 mutex_lock(&gp
->tlb_lock
);
1130 /* Invalidate TLB */
1131 spin_lock(&kvm
->mmu_lock
);
1132 kvmppc_free_pgtable_radix(kvm
, gp
->shadow_pgtable
,
1134 kvmhv_flush_lpid(gp
->shadow_lpid
);
1135 spin_unlock(&kvm
->mmu_lock
);
1140 * We don't cache this -> nothing to do
1144 /* Invalidate TLB, PWC and caching of partition table entries */
1145 kvmhv_flush_nested(gp
);
1150 mutex_unlock(&gp
->tlb_lock
);
1153 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu
*vcpu
, int ric
)
1155 struct kvm
*kvm
= vcpu
->kvm
;
1156 struct kvm_nested_guest
*gp
;
1159 spin_lock(&kvm
->mmu_lock
);
1160 for (i
= 0; i
<= kvm
->arch
.max_nested_lpid
; i
++) {
1161 gp
= kvm
->arch
.nested_guests
[i
];
1163 spin_unlock(&kvm
->mmu_lock
);
1164 kvmhv_emulate_tlbie_lpid(vcpu
, gp
, ric
);
1165 spin_lock(&kvm
->mmu_lock
);
1168 spin_unlock(&kvm
->mmu_lock
);
1171 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu
*vcpu
, unsigned int instr
,
1172 unsigned long rsval
, unsigned long rbval
)
1174 struct kvm
*kvm
= vcpu
->kvm
;
1175 struct kvm_nested_guest
*gp
;
1176 int r
, ric
, prs
, is
, ap
;
1181 ric
= get_ric(instr
);
1182 prs
= get_prs(instr
);
1184 lpid
= get_lpid(rsval
);
1188 * These cases are invalid and are not handled:
1189 * r != 1 -> Only radix supported
1190 * prs == 1 -> Not HV privileged
1191 * ric == 3 -> No cluster bombs for radix
1192 * is == 1 -> Partition scoped translations not associated with pid
1193 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1195 if ((!r
) || (prs
) || (ric
== 3) || (is
== 1) ||
1196 ((!is
) && (ric
== 1 || ric
== 2)))
1203 * Invalidate TLB for a given target address
1205 epn
= get_epn(rbval
);
1207 ret
= kvmhv_emulate_tlbie_tlb_addr(vcpu
, lpid
, ap
, epn
);
1210 /* Invalidate matching LPID */
1211 gp
= kvmhv_get_nested(kvm
, lpid
, false);
1213 kvmhv_emulate_tlbie_lpid(vcpu
, gp
, ric
);
1214 kvmhv_put_nested(gp
);
1218 /* Invalidate ALL LPIDs */
1219 kvmhv_emulate_tlbie_all_lpid(vcpu
, ric
);
1230 * This handles the H_TLB_INVALIDATE hcall.
1231 * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1234 long kvmhv_do_nested_tlbie(struct kvm_vcpu
*vcpu
)
1238 ret
= kvmhv_emulate_priv_tlbie(vcpu
, kvmppc_get_gpr(vcpu
, 4),
1239 kvmppc_get_gpr(vcpu
, 5), kvmppc_get_gpr(vcpu
, 6));
1245 static long do_tlb_invalidate_nested_all(struct kvm_vcpu
*vcpu
,
1246 unsigned long lpid
, unsigned long ric
)
1248 struct kvm
*kvm
= vcpu
->kvm
;
1249 struct kvm_nested_guest
*gp
;
1251 gp
= kvmhv_get_nested(kvm
, lpid
, false);
1253 kvmhv_emulate_tlbie_lpid(vcpu
, gp
, ric
);
1254 kvmhv_put_nested(gp
);
1260 * Number of pages above which we invalidate the entire LPID rather than
1261 * flush individual pages.
1263 static unsigned long tlb_range_flush_page_ceiling __read_mostly
= 33;
1265 static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu
*vcpu
,
1267 unsigned long pg_sizes
,
1268 unsigned long start
,
1272 unsigned long addr
, nr_pages
;
1273 struct mmu_psize_def
*def
;
1274 unsigned long psize
, ap
, page_size
;
1277 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
1278 def
= &mmu_psize_defs
[psize
];
1279 if (!(pg_sizes
& def
->h_rpt_pgsize
))
1282 nr_pages
= (end
- start
) >> def
->shift
;
1283 flush_lpid
= nr_pages
> tlb_range_flush_page_ceiling
;
1285 return do_tlb_invalidate_nested_all(vcpu
, lpid
,
1288 ap
= mmu_get_ap(psize
);
1289 page_size
= 1UL << def
->shift
;
1291 ret
= kvmhv_emulate_tlbie_tlb_addr(vcpu
, lpid
, ap
,
1296 } while (addr
< end
);
1302 * Performs partition-scoped invalidations for nested guests
1303 * as part of H_RPT_INVALIDATE hcall.
1305 long do_h_rpt_invalidate_pat(struct kvm_vcpu
*vcpu
, unsigned long lpid
,
1306 unsigned long type
, unsigned long pg_sizes
,
1307 unsigned long start
, unsigned long end
)
1310 * If L2 lpid isn't valid, we need to return H_PARAMETER.
1312 * However, nested KVM issues a L2 lpid flush call when creating
1313 * partition table entries for L2. This happens even before the
1314 * corresponding shadow lpid is created in HV which happens in
1315 * H_ENTER_NESTED call. Since we can't differentiate this case from
1316 * the invalid case, we ignore such flush requests and return success.
1318 if (!kvmhv_find_nested(vcpu
->kvm
, lpid
))
1322 * A flush all request can be handled by a full lpid flush only.
1324 if ((type
& H_RPTI_TYPE_NESTED_ALL
) == H_RPTI_TYPE_NESTED_ALL
)
1325 return do_tlb_invalidate_nested_all(vcpu
, lpid
, RIC_FLUSH_ALL
);
1328 * We don't need to handle a PWC flush like process table here,
1329 * because intermediate partition scoped table in nested guest doesn't
1330 * really have PWC. Only level we have PWC is in L0 and for nested
1331 * invalidate at L0 we always do kvm_flush_lpid() which does
1332 * radix__flush_all_lpid(). For range invalidate at any level, we
1333 * are not removing the higher level page tables and hence there is
1334 * no PWC invalidate needed.
1336 * if (type & H_RPTI_TYPE_PWC) {
1337 * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
1343 if (start
== 0 && end
== -1)
1344 return do_tlb_invalidate_nested_all(vcpu
, lpid
, RIC_FLUSH_TLB
);
1346 if (type
& H_RPTI_TYPE_TLB
)
1347 return do_tlb_invalidate_nested_tlb(vcpu
, lpid
, pg_sizes
,
1352 /* Used to convert a nested guest real address to a L1 guest real address */
1353 static int kvmhv_translate_addr_nested(struct kvm_vcpu
*vcpu
,
1354 struct kvm_nested_guest
*gp
,
1355 unsigned long n_gpa
, unsigned long dsisr
,
1356 struct kvmppc_pte
*gpte_p
)
1358 u64 fault_addr
, flags
= dsisr
& DSISR_ISSTORE
;
1361 ret
= kvmppc_mmu_walk_radix_tree(vcpu
, n_gpa
, gpte_p
, gp
->l1_gr_to_hr
,
1365 /* We didn't find a pte */
1366 if (ret
== -EINVAL
) {
1367 /* Unsupported mmu config */
1368 flags
|= DSISR_UNSUPP_MMU
;
1369 } else if (ret
== -ENOENT
) {
1370 /* No translation found */
1371 flags
|= DSISR_NOHPTE
;
1372 } else if (ret
== -EFAULT
) {
1373 /* Couldn't access L1 real address */
1374 flags
|= DSISR_PRTABLE_FAULT
;
1375 vcpu
->arch
.fault_gpa
= fault_addr
;
1382 /* We found a pte -> check permissions */
1383 if (dsisr
& DSISR_ISSTORE
) {
1385 if (!gpte_p
->may_write
) {
1386 flags
|= DSISR_PROTFAULT
;
1389 } else if (vcpu
->arch
.trap
== BOOK3S_INTERRUPT_H_INST_STORAGE
) {
1390 /* Can we execute? */
1391 if (!gpte_p
->may_execute
) {
1392 flags
|= SRR1_ISI_N_G_OR_CIP
;
1397 if (!gpte_p
->may_read
&& !gpte_p
->may_write
) {
1398 flags
|= DSISR_PROTFAULT
;
1407 vcpu
->arch
.fault_dsisr
= flags
;
1408 if (vcpu
->arch
.trap
== BOOK3S_INTERRUPT_H_INST_STORAGE
) {
1409 vcpu
->arch
.shregs
.msr
&= SRR1_MSR_BITS
;
1410 vcpu
->arch
.shregs
.msr
|= flags
;
1415 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu
*vcpu
,
1416 struct kvm_nested_guest
*gp
,
1417 unsigned long n_gpa
,
1418 struct kvmppc_pte gpte
,
1419 unsigned long dsisr
)
1421 struct kvm
*kvm
= vcpu
->kvm
;
1422 bool writing
= !!(dsisr
& DSISR_ISSTORE
);
1426 /* Are the rc bits set in the L1 partition scoped pte? */
1427 pgflags
= _PAGE_ACCESSED
;
1429 pgflags
|= _PAGE_DIRTY
;
1430 if (pgflags
& ~gpte
.rc
)
1433 spin_lock(&kvm
->mmu_lock
);
1434 /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1435 ret
= kvmppc_hv_handle_set_rc(kvm
, false, writing
,
1436 gpte
.raddr
, kvm
->arch
.lpid
);
1442 /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1443 ret
= kvmppc_hv_handle_set_rc(kvm
, true, writing
,
1444 n_gpa
, gp
->l1_lpid
);
1451 spin_unlock(&kvm
->mmu_lock
);
1455 static inline int kvmppc_radix_level_to_shift(int level
)
1467 static inline int kvmppc_radix_shift_to_level(int shift
)
1469 if (shift
== PUD_SHIFT
)
1471 if (shift
== PMD_SHIFT
)
1473 if (shift
== PAGE_SHIFT
)
1479 /* called with gp->tlb_lock held */
1480 static long int __kvmhv_nested_page_fault(struct kvm_vcpu
*vcpu
,
1481 struct kvm_nested_guest
*gp
)
1483 struct kvm
*kvm
= vcpu
->kvm
;
1484 struct kvm_memory_slot
*memslot
;
1485 struct rmap_nested
*n_rmap
;
1486 struct kvmppc_pte gpte
;
1488 unsigned long mmu_seq
;
1489 unsigned long dsisr
= vcpu
->arch
.fault_dsisr
;
1490 unsigned long ea
= vcpu
->arch
.fault_dar
;
1491 unsigned long *rmapp
;
1492 unsigned long n_gpa
, gpa
, gfn
, perm
= 0UL;
1493 unsigned int shift
, l1_shift
, level
;
1494 bool writing
= !!(dsisr
& DSISR_ISSTORE
);
1495 bool kvm_ro
= false;
1498 if (!gp
->l1_gr_to_hr
) {
1499 kvmhv_update_ptbl_cache(gp
);
1500 if (!gp
->l1_gr_to_hr
)
1504 /* Convert the nested guest real address into a L1 guest real address */
1506 n_gpa
= vcpu
->arch
.fault_gpa
& ~0xF000000000000FFFULL
;
1507 if (!(dsisr
& DSISR_PRTABLE_FAULT
))
1508 n_gpa
|= ea
& 0xFFF;
1509 ret
= kvmhv_translate_addr_nested(vcpu
, gp
, n_gpa
, dsisr
, &gpte
);
1512 * If the hardware found a translation but we don't now have a usable
1513 * translation in the l1 partition-scoped tree, remove the shadow pte
1514 * and let the guest retry.
1516 if (ret
== RESUME_HOST
&&
1517 (dsisr
& (DSISR_PROTFAULT
| DSISR_BADACCESS
| DSISR_NOEXEC_OR_G
|
1518 DSISR_BAD_COPYPASTE
)))
1523 /* Failed to set the reference/change bits */
1524 if (dsisr
& DSISR_SET_RC
) {
1525 ret
= kvmhv_handle_nested_set_rc(vcpu
, gp
, n_gpa
, gpte
, dsisr
);
1526 if (ret
== RESUME_HOST
)
1530 dsisr
&= ~DSISR_SET_RC
;
1531 if (!(dsisr
& (DSISR_BAD_FAULT_64S
| DSISR_NOHPTE
|
1533 return RESUME_GUEST
;
1537 * We took an HISI or HDSI while we were running a nested guest which
1538 * means we have no partition scoped translation for that. This means
1539 * we need to insert a pte for the mapping into our shadow_pgtable.
1542 l1_shift
= gpte
.page_shift
;
1543 if (l1_shift
< PAGE_SHIFT
) {
1544 /* We don't support l1 using a page size smaller than our own */
1545 pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1546 l1_shift
, PAGE_SHIFT
);
1550 gfn
= gpa
>> PAGE_SHIFT
;
1552 /* 1. Get the corresponding host memslot */
1554 memslot
= gfn_to_memslot(kvm
, gfn
);
1555 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
1556 if (dsisr
& (DSISR_PRTABLE_FAULT
| DSISR_BADACCESS
)) {
1557 /* unusual error -> reflect to the guest as a DSI */
1558 kvmppc_core_queue_data_storage(vcpu
, ea
, dsisr
);
1559 return RESUME_GUEST
;
1562 /* passthrough of emulated MMIO case */
1563 return kvmppc_hv_emulate_mmio(vcpu
, gpa
, ea
, writing
);
1565 if (memslot
->flags
& KVM_MEM_READONLY
) {
1567 /* Give the guest a DSI */
1568 kvmppc_core_queue_data_storage(vcpu
, ea
,
1569 DSISR_ISSTORE
| DSISR_PROTFAULT
);
1570 return RESUME_GUEST
;
1575 /* 2. Find the host pte for this L1 guest real address */
1577 /* Used to check for invalidations in progress */
1578 mmu_seq
= kvm
->mmu_notifier_seq
;
1581 /* See if can find translation in our partition scoped tables for L1 */
1583 spin_lock(&kvm
->mmu_lock
);
1584 pte_p
= find_kvm_secondary_pte(kvm
, gpa
, &shift
);
1589 spin_unlock(&kvm
->mmu_lock
);
1591 if (!pte_present(pte
) || (writing
&& !(pte_val(pte
) & _PAGE_WRITE
))) {
1592 /* No suitable pte found -> try to insert a mapping */
1593 ret
= kvmppc_book3s_instantiate_page(vcpu
, gpa
, memslot
,
1594 writing
, kvm_ro
, &pte
, &level
);
1596 return RESUME_GUEST
;
1599 shift
= kvmppc_radix_level_to_shift(level
);
1601 /* Align gfn to the start of the page */
1602 gfn
= (gpa
& ~((1UL << shift
) - 1)) >> PAGE_SHIFT
;
1604 /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1606 /* The permissions is the combination of the host and l1 guest ptes */
1607 perm
|= gpte
.may_read
? 0UL : _PAGE_READ
;
1608 perm
|= gpte
.may_write
? 0UL : _PAGE_WRITE
;
1609 perm
|= gpte
.may_execute
? 0UL : _PAGE_EXEC
;
1610 /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1611 perm
|= (gpte
.rc
& _PAGE_ACCESSED
) ? 0UL : _PAGE_ACCESSED
;
1612 perm
|= ((gpte
.rc
& _PAGE_DIRTY
) && writing
) ? 0UL : _PAGE_DIRTY
;
1613 pte
= __pte(pte_val(pte
) & ~perm
);
1615 /* What size pte can we insert? */
1616 if (shift
> l1_shift
) {
1618 unsigned int actual_shift
= PAGE_SHIFT
;
1619 if (PMD_SHIFT
< l1_shift
)
1620 actual_shift
= PMD_SHIFT
;
1621 mask
= (1UL << shift
) - (1UL << actual_shift
);
1622 pte
= __pte(pte_val(pte
) | (gpa
& mask
));
1623 shift
= actual_shift
;
1625 level
= kvmppc_radix_shift_to_level(shift
);
1626 n_gpa
&= ~((1UL << shift
) - 1);
1628 /* 4. Insert the pte into our shadow_pgtable */
1630 n_rmap
= kzalloc(sizeof(*n_rmap
), GFP_KERNEL
);
1632 return RESUME_GUEST
; /* Let the guest try again */
1633 n_rmap
->rmap
= (n_gpa
& RMAP_NESTED_GPA_MASK
) |
1634 (((unsigned long) gp
->l1_lpid
) << RMAP_NESTED_LPID_SHIFT
);
1635 rmapp
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1636 ret
= kvmppc_create_pte(kvm
, gp
->shadow_pgtable
, pte
, n_gpa
, level
,
1637 mmu_seq
, gp
->shadow_lpid
, rmapp
, &n_rmap
);
1640 ret
= RESUME_GUEST
; /* Let the guest try again */
1645 kvmhv_invalidate_shadow_pte(vcpu
, gp
, n_gpa
, NULL
);
1646 return RESUME_GUEST
;
1649 long int kvmhv_nested_page_fault(struct kvm_vcpu
*vcpu
)
1651 struct kvm_nested_guest
*gp
= vcpu
->arch
.nested
;
1654 mutex_lock(&gp
->tlb_lock
);
1655 ret
= __kvmhv_nested_page_fault(vcpu
, gp
);
1656 mutex_unlock(&gp
->tlb_lock
);
1660 int kvmhv_nested_next_lpid(struct kvm
*kvm
, int lpid
)
1664 spin_lock(&kvm
->mmu_lock
);
1665 while (++lpid
<= kvm
->arch
.max_nested_lpid
) {
1666 if (kvm
->arch
.nested_guests
[lpid
]) {
1671 spin_unlock(&kvm
->mmu_lock
);