2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 * Alexander Graf <agraf@suse.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
30 #include <asm/sections.h>
31 #include <asm/cacheflush.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
35 #define KVM_MAGIC_PAGE (-4096L)
36 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
38 #define KVM_INST_LWZ 0x80000000
39 #define KVM_INST_STW 0x90000000
40 #define KVM_INST_LD 0xe8000000
41 #define KVM_INST_STD 0xf8000000
42 #define KVM_INST_NOP 0x60000000
43 #define KVM_INST_B 0x48000000
44 #define KVM_INST_B_MASK 0x03ffffff
45 #define KVM_INST_B_MAX 0x01ffffff
46 #define KVM_INST_LI 0x38000000
48 #define KVM_MASK_RT 0x03e00000
49 #define KVM_RT_30 0x03c00000
50 #define KVM_MASK_RB 0x0000f800
51 #define KVM_INST_MFMSR 0x7c0000a6
56 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
57 (((sprn) & 0x1f) << 16) | \
58 (((sprn) & 0x3e0) << 6) | \
61 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
62 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
64 #define KVM_INST_TLBSYNC 0x7c00046c
65 #define KVM_INST_MTMSRD_L0 0x7c000164
66 #define KVM_INST_MTMSRD_L1 0x7c010164
67 #define KVM_INST_MTMSR 0x7c000124
69 #define KVM_INST_WRTEE 0x7c000106
70 #define KVM_INST_WRTEEI_0 0x7c000146
71 #define KVM_INST_WRTEEI_1 0x7c008146
73 #define KVM_INST_MTSRIN 0x7c0001e4
75 static bool kvm_patching_worked
= true;
76 static char kvm_tmp
[1024 * 1024];
77 static int kvm_tmp_index
;
79 static inline void kvm_patch_ins(u32
*inst
, u32 new_inst
)
82 flush_icache_range((ulong
)inst
, (ulong
)inst
+ 4);
85 static void kvm_patch_ins_ll(u32
*inst
, long addr
, u32 rt
)
88 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
90 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000fffc));
94 static void kvm_patch_ins_ld(u32
*inst
, long addr
, u32 rt
)
97 kvm_patch_ins(inst
, KVM_INST_LD
| rt
| (addr
& 0x0000fffc));
99 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| ((addr
+ 4) & 0x0000fffc));
103 static void kvm_patch_ins_lwz(u32
*inst
, long addr
, u32 rt
)
105 kvm_patch_ins(inst
, KVM_INST_LWZ
| rt
| (addr
& 0x0000ffff));
108 static void kvm_patch_ins_std(u32
*inst
, long addr
, u32 rt
)
111 kvm_patch_ins(inst
, KVM_INST_STD
| rt
| (addr
& 0x0000fffc));
113 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| ((addr
+ 4) & 0x0000fffc));
117 static void kvm_patch_ins_stw(u32
*inst
, long addr
, u32 rt
)
119 kvm_patch_ins(inst
, KVM_INST_STW
| rt
| (addr
& 0x0000fffc));
122 static void kvm_patch_ins_nop(u32
*inst
)
124 kvm_patch_ins(inst
, KVM_INST_NOP
);
127 static void kvm_patch_ins_b(u32
*inst
, int addr
)
129 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
130 /* On relocatable kernels interrupts handlers and our code
131 can be in different regions, so we don't patch them */
133 if ((ulong
)inst
< (ulong
)&__end_interrupts
)
137 kvm_patch_ins(inst
, KVM_INST_B
| (addr
& KVM_INST_B_MASK
));
140 static u32
*kvm_alloc(int len
)
144 if ((kvm_tmp_index
+ len
) > ARRAY_SIZE(kvm_tmp
)) {
145 printk(KERN_ERR
"KVM: No more space (%d + %d)\n",
147 kvm_patching_worked
= false;
151 p
= (void*)&kvm_tmp
[kvm_tmp_index
];
152 kvm_tmp_index
+= len
;
157 extern u32 kvm_emulate_mtmsrd_branch_offs
;
158 extern u32 kvm_emulate_mtmsrd_reg_offs
;
159 extern u32 kvm_emulate_mtmsrd_orig_ins_offs
;
160 extern u32 kvm_emulate_mtmsrd_len
;
161 extern u32 kvm_emulate_mtmsrd
[];
163 static void kvm_patch_ins_mtmsrd(u32
*inst
, u32 rt
)
170 p
= kvm_alloc(kvm_emulate_mtmsrd_len
* 4);
174 /* Find out where we are and put everything there */
175 distance_start
= (ulong
)p
- (ulong
)inst
;
176 next_inst
= ((ulong
)inst
+ 4);
177 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsrd_branch_offs
];
179 /* Make sure we only write valid b instructions */
180 if (distance_start
> KVM_INST_B_MAX
) {
181 kvm_patching_worked
= false;
185 /* Modify the chunk to fit the invocation */
186 memcpy(p
, kvm_emulate_mtmsrd
, kvm_emulate_mtmsrd_len
* 4);
187 p
[kvm_emulate_mtmsrd_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
188 switch (get_rt(rt
)) {
190 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
191 magic_var(scratch2
), KVM_RT_30
);
194 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsrd_reg_offs
],
195 magic_var(scratch1
), KVM_RT_30
);
198 p
[kvm_emulate_mtmsrd_reg_offs
] |= rt
;
202 p
[kvm_emulate_mtmsrd_orig_ins_offs
] = *inst
;
203 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsrd_len
* 4);
205 /* Patch the invocation */
206 kvm_patch_ins_b(inst
, distance_start
);
209 extern u32 kvm_emulate_mtmsr_branch_offs
;
210 extern u32 kvm_emulate_mtmsr_reg1_offs
;
211 extern u32 kvm_emulate_mtmsr_reg2_offs
;
212 extern u32 kvm_emulate_mtmsr_orig_ins_offs
;
213 extern u32 kvm_emulate_mtmsr_len
;
214 extern u32 kvm_emulate_mtmsr
[];
216 static void kvm_patch_ins_mtmsr(u32
*inst
, u32 rt
)
223 p
= kvm_alloc(kvm_emulate_mtmsr_len
* 4);
227 /* Find out where we are and put everything there */
228 distance_start
= (ulong
)p
- (ulong
)inst
;
229 next_inst
= ((ulong
)inst
+ 4);
230 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtmsr_branch_offs
];
232 /* Make sure we only write valid b instructions */
233 if (distance_start
> KVM_INST_B_MAX
) {
234 kvm_patching_worked
= false;
238 /* Modify the chunk to fit the invocation */
239 memcpy(p
, kvm_emulate_mtmsr
, kvm_emulate_mtmsr_len
* 4);
240 p
[kvm_emulate_mtmsr_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
242 /* Make clobbered registers work too */
243 switch (get_rt(rt
)) {
245 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
246 magic_var(scratch2
), KVM_RT_30
);
247 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
248 magic_var(scratch2
), KVM_RT_30
);
251 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg1_offs
],
252 magic_var(scratch1
), KVM_RT_30
);
253 kvm_patch_ins_ll(&p
[kvm_emulate_mtmsr_reg2_offs
],
254 magic_var(scratch1
), KVM_RT_30
);
257 p
[kvm_emulate_mtmsr_reg1_offs
] |= rt
;
258 p
[kvm_emulate_mtmsr_reg2_offs
] |= rt
;
262 p
[kvm_emulate_mtmsr_orig_ins_offs
] = *inst
;
263 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtmsr_len
* 4);
265 /* Patch the invocation */
266 kvm_patch_ins_b(inst
, distance_start
);
271 extern u32 kvm_emulate_wrtee_branch_offs
;
272 extern u32 kvm_emulate_wrtee_reg_offs
;
273 extern u32 kvm_emulate_wrtee_orig_ins_offs
;
274 extern u32 kvm_emulate_wrtee_len
;
275 extern u32 kvm_emulate_wrtee
[];
277 static void kvm_patch_ins_wrtee(u32
*inst
, u32 rt
, int imm_one
)
284 p
= kvm_alloc(kvm_emulate_wrtee_len
* 4);
288 /* Find out where we are and put everything there */
289 distance_start
= (ulong
)p
- (ulong
)inst
;
290 next_inst
= ((ulong
)inst
+ 4);
291 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrtee_branch_offs
];
293 /* Make sure we only write valid b instructions */
294 if (distance_start
> KVM_INST_B_MAX
) {
295 kvm_patching_worked
= false;
299 /* Modify the chunk to fit the invocation */
300 memcpy(p
, kvm_emulate_wrtee
, kvm_emulate_wrtee_len
* 4);
301 p
[kvm_emulate_wrtee_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
304 p
[kvm_emulate_wrtee_reg_offs
] =
305 KVM_INST_LI
| __PPC_RT(30) | MSR_EE
;
307 /* Make clobbered registers work too */
308 switch (get_rt(rt
)) {
310 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
311 magic_var(scratch2
), KVM_RT_30
);
314 kvm_patch_ins_ll(&p
[kvm_emulate_wrtee_reg_offs
],
315 magic_var(scratch1
), KVM_RT_30
);
318 p
[kvm_emulate_wrtee_reg_offs
] |= rt
;
323 p
[kvm_emulate_wrtee_orig_ins_offs
] = *inst
;
324 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrtee_len
* 4);
326 /* Patch the invocation */
327 kvm_patch_ins_b(inst
, distance_start
);
330 extern u32 kvm_emulate_wrteei_0_branch_offs
;
331 extern u32 kvm_emulate_wrteei_0_len
;
332 extern u32 kvm_emulate_wrteei_0
[];
334 static void kvm_patch_ins_wrteei_0(u32
*inst
)
341 p
= kvm_alloc(kvm_emulate_wrteei_0_len
* 4);
345 /* Find out where we are and put everything there */
346 distance_start
= (ulong
)p
- (ulong
)inst
;
347 next_inst
= ((ulong
)inst
+ 4);
348 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_wrteei_0_branch_offs
];
350 /* Make sure we only write valid b instructions */
351 if (distance_start
> KVM_INST_B_MAX
) {
352 kvm_patching_worked
= false;
356 memcpy(p
, kvm_emulate_wrteei_0
, kvm_emulate_wrteei_0_len
* 4);
357 p
[kvm_emulate_wrteei_0_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
358 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_wrteei_0_len
* 4);
360 /* Patch the invocation */
361 kvm_patch_ins_b(inst
, distance_start
);
366 #ifdef CONFIG_PPC_BOOK3S_32
368 extern u32 kvm_emulate_mtsrin_branch_offs
;
369 extern u32 kvm_emulate_mtsrin_reg1_offs
;
370 extern u32 kvm_emulate_mtsrin_reg2_offs
;
371 extern u32 kvm_emulate_mtsrin_orig_ins_offs
;
372 extern u32 kvm_emulate_mtsrin_len
;
373 extern u32 kvm_emulate_mtsrin
[];
375 static void kvm_patch_ins_mtsrin(u32
*inst
, u32 rt
, u32 rb
)
382 p
= kvm_alloc(kvm_emulate_mtsrin_len
* 4);
386 /* Find out where we are and put everything there */
387 distance_start
= (ulong
)p
- (ulong
)inst
;
388 next_inst
= ((ulong
)inst
+ 4);
389 distance_end
= next_inst
- (ulong
)&p
[kvm_emulate_mtsrin_branch_offs
];
391 /* Make sure we only write valid b instructions */
392 if (distance_start
> KVM_INST_B_MAX
) {
393 kvm_patching_worked
= false;
397 /* Modify the chunk to fit the invocation */
398 memcpy(p
, kvm_emulate_mtsrin
, kvm_emulate_mtsrin_len
* 4);
399 p
[kvm_emulate_mtsrin_branch_offs
] |= distance_end
& KVM_INST_B_MASK
;
400 p
[kvm_emulate_mtsrin_reg1_offs
] |= (rb
<< 10);
401 p
[kvm_emulate_mtsrin_reg2_offs
] |= rt
;
402 p
[kvm_emulate_mtsrin_orig_ins_offs
] = *inst
;
403 flush_icache_range((ulong
)p
, (ulong
)p
+ kvm_emulate_mtsrin_len
* 4);
405 /* Patch the invocation */
406 kvm_patch_ins_b(inst
, distance_start
);
411 static void kvm_map_magic_page(void *data
)
413 u32
*features
= data
;
418 in
[0] = KVM_MAGIC_PAGE
;
419 in
[1] = KVM_MAGIC_PAGE
;
421 kvm_hypercall(in
, out
, HC_VENDOR_KVM
| KVM_HC_PPC_MAP_MAGIC_PAGE
);
426 static void kvm_check_ins(u32
*inst
, u32 features
)
429 u32 inst_no_rt
= _inst
& ~KVM_MASK_RT
;
430 u32 inst_rt
= _inst
& KVM_MASK_RT
;
432 switch (inst_no_rt
) {
435 kvm_patch_ins_ld(inst
, magic_var(msr
), inst_rt
);
437 case KVM_INST_MFSPR(SPRN_SPRG0
):
438 kvm_patch_ins_ld(inst
, magic_var(sprg0
), inst_rt
);
440 case KVM_INST_MFSPR(SPRN_SPRG1
):
441 kvm_patch_ins_ld(inst
, magic_var(sprg1
), inst_rt
);
443 case KVM_INST_MFSPR(SPRN_SPRG2
):
444 kvm_patch_ins_ld(inst
, magic_var(sprg2
), inst_rt
);
446 case KVM_INST_MFSPR(SPRN_SPRG3
):
447 kvm_patch_ins_ld(inst
, magic_var(sprg3
), inst_rt
);
449 case KVM_INST_MFSPR(SPRN_SRR0
):
450 kvm_patch_ins_ld(inst
, magic_var(srr0
), inst_rt
);
452 case KVM_INST_MFSPR(SPRN_SRR1
):
453 kvm_patch_ins_ld(inst
, magic_var(srr1
), inst_rt
);
456 case KVM_INST_MFSPR(SPRN_DEAR
):
458 case KVM_INST_MFSPR(SPRN_DAR
):
460 kvm_patch_ins_ld(inst
, magic_var(dar
), inst_rt
);
462 case KVM_INST_MFSPR(SPRN_DSISR
):
463 kvm_patch_ins_lwz(inst
, magic_var(dsisr
), inst_rt
);
466 #ifdef CONFIG_PPC_BOOK3E_MMU
467 case KVM_INST_MFSPR(SPRN_MAS0
):
468 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
469 kvm_patch_ins_lwz(inst
, magic_var(mas0
), inst_rt
);
471 case KVM_INST_MFSPR(SPRN_MAS1
):
472 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
473 kvm_patch_ins_lwz(inst
, magic_var(mas1
), inst_rt
);
475 case KVM_INST_MFSPR(SPRN_MAS2
):
476 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
477 kvm_patch_ins_ld(inst
, magic_var(mas2
), inst_rt
);
479 case KVM_INST_MFSPR(SPRN_MAS3
):
480 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
481 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
) + 4, inst_rt
);
483 case KVM_INST_MFSPR(SPRN_MAS4
):
484 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
485 kvm_patch_ins_lwz(inst
, magic_var(mas4
), inst_rt
);
487 case KVM_INST_MFSPR(SPRN_MAS6
):
488 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
489 kvm_patch_ins_lwz(inst
, magic_var(mas6
), inst_rt
);
491 case KVM_INST_MFSPR(SPRN_MAS7
):
492 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
493 kvm_patch_ins_lwz(inst
, magic_var(mas7_3
), inst_rt
);
495 #endif /* CONFIG_PPC_BOOK3E_MMU */
497 case KVM_INST_MFSPR(SPRN_SPRG4
):
499 case KVM_INST_MFSPR(SPRN_SPRG4R
):
501 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
502 kvm_patch_ins_ld(inst
, magic_var(sprg4
), inst_rt
);
504 case KVM_INST_MFSPR(SPRN_SPRG5
):
506 case KVM_INST_MFSPR(SPRN_SPRG5R
):
508 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
509 kvm_patch_ins_ld(inst
, magic_var(sprg5
), inst_rt
);
511 case KVM_INST_MFSPR(SPRN_SPRG6
):
513 case KVM_INST_MFSPR(SPRN_SPRG6R
):
515 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
516 kvm_patch_ins_ld(inst
, magic_var(sprg6
), inst_rt
);
518 case KVM_INST_MFSPR(SPRN_SPRG7
):
520 case KVM_INST_MFSPR(SPRN_SPRG7R
):
522 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
523 kvm_patch_ins_ld(inst
, magic_var(sprg7
), inst_rt
);
527 case KVM_INST_MFSPR(SPRN_ESR
):
528 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
529 kvm_patch_ins_lwz(inst
, magic_var(esr
), inst_rt
);
533 case KVM_INST_MFSPR(SPRN_PIR
):
534 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
535 kvm_patch_ins_lwz(inst
, magic_var(pir
), inst_rt
);
540 case KVM_INST_MTSPR(SPRN_SPRG0
):
541 kvm_patch_ins_std(inst
, magic_var(sprg0
), inst_rt
);
543 case KVM_INST_MTSPR(SPRN_SPRG1
):
544 kvm_patch_ins_std(inst
, magic_var(sprg1
), inst_rt
);
546 case KVM_INST_MTSPR(SPRN_SPRG2
):
547 kvm_patch_ins_std(inst
, magic_var(sprg2
), inst_rt
);
549 case KVM_INST_MTSPR(SPRN_SPRG3
):
550 kvm_patch_ins_std(inst
, magic_var(sprg3
), inst_rt
);
552 case KVM_INST_MTSPR(SPRN_SRR0
):
553 kvm_patch_ins_std(inst
, magic_var(srr0
), inst_rt
);
555 case KVM_INST_MTSPR(SPRN_SRR1
):
556 kvm_patch_ins_std(inst
, magic_var(srr1
), inst_rt
);
559 case KVM_INST_MTSPR(SPRN_DEAR
):
561 case KVM_INST_MTSPR(SPRN_DAR
):
563 kvm_patch_ins_std(inst
, magic_var(dar
), inst_rt
);
565 case KVM_INST_MTSPR(SPRN_DSISR
):
566 kvm_patch_ins_stw(inst
, magic_var(dsisr
), inst_rt
);
568 #ifdef CONFIG_PPC_BOOK3E_MMU
569 case KVM_INST_MTSPR(SPRN_MAS0
):
570 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
571 kvm_patch_ins_stw(inst
, magic_var(mas0
), inst_rt
);
573 case KVM_INST_MTSPR(SPRN_MAS1
):
574 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
575 kvm_patch_ins_stw(inst
, magic_var(mas1
), inst_rt
);
577 case KVM_INST_MTSPR(SPRN_MAS2
):
578 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
579 kvm_patch_ins_std(inst
, magic_var(mas2
), inst_rt
);
581 case KVM_INST_MTSPR(SPRN_MAS3
):
582 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
583 kvm_patch_ins_stw(inst
, magic_var(mas7_3
) + 4, inst_rt
);
585 case KVM_INST_MTSPR(SPRN_MAS4
):
586 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
587 kvm_patch_ins_stw(inst
, magic_var(mas4
), inst_rt
);
589 case KVM_INST_MTSPR(SPRN_MAS6
):
590 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
591 kvm_patch_ins_stw(inst
, magic_var(mas6
), inst_rt
);
593 case KVM_INST_MTSPR(SPRN_MAS7
):
594 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
595 kvm_patch_ins_stw(inst
, magic_var(mas7_3
), inst_rt
);
597 #endif /* CONFIG_PPC_BOOK3E_MMU */
599 case KVM_INST_MTSPR(SPRN_SPRG4
):
600 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
601 kvm_patch_ins_std(inst
, magic_var(sprg4
), inst_rt
);
603 case KVM_INST_MTSPR(SPRN_SPRG5
):
604 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
605 kvm_patch_ins_std(inst
, magic_var(sprg5
), inst_rt
);
607 case KVM_INST_MTSPR(SPRN_SPRG6
):
608 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
609 kvm_patch_ins_std(inst
, magic_var(sprg6
), inst_rt
);
611 case KVM_INST_MTSPR(SPRN_SPRG7
):
612 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
613 kvm_patch_ins_std(inst
, magic_var(sprg7
), inst_rt
);
617 case KVM_INST_MTSPR(SPRN_ESR
):
618 if (features
& KVM_MAGIC_FEAT_MAS0_TO_SPRG7
)
619 kvm_patch_ins_stw(inst
, magic_var(esr
), inst_rt
);
624 case KVM_INST_TLBSYNC
:
625 kvm_patch_ins_nop(inst
);
629 case KVM_INST_MTMSRD_L1
:
630 kvm_patch_ins_mtmsrd(inst
, inst_rt
);
633 case KVM_INST_MTMSRD_L0
:
634 kvm_patch_ins_mtmsr(inst
, inst_rt
);
638 kvm_patch_ins_wrtee(inst
, inst_rt
, 0);
643 switch (inst_no_rt
& ~KVM_MASK_RB
) {
644 #ifdef CONFIG_PPC_BOOK3S_32
645 case KVM_INST_MTSRIN
:
646 if (features
& KVM_MAGIC_FEAT_SR
) {
647 u32 inst_rb
= _inst
& KVM_MASK_RB
;
648 kvm_patch_ins_mtsrin(inst
, inst_rt
, inst_rb
);
657 case KVM_INST_WRTEEI_0
:
658 kvm_patch_ins_wrteei_0(inst
);
661 case KVM_INST_WRTEEI_1
:
662 kvm_patch_ins_wrtee(inst
, 0, 1);
668 static void kvm_use_magic_page(void)
675 /* Tell the host to map the magic page to -4096 on all CPUs */
676 on_each_cpu(kvm_map_magic_page
, &features
, 1);
678 /* Quick self-test to see if the mapping works */
679 if (__get_user(tmp
, (u32
*)KVM_MAGIC_PAGE
)) {
680 kvm_patching_worked
= false;
684 /* Now loop through all code and find instructions */
685 start
= (void*)_stext
;
689 * Being interrupted in the middle of patching would
690 * be bad for SPRG4-7, which KVM can't keep in sync
691 * with emulated accesses because reads don't trap.
695 for (p
= start
; p
< end
; p
++)
696 kvm_check_ins(p
, features
);
700 printk(KERN_INFO
"KVM: Live patching for a fast VM %s\n",
701 kvm_patching_worked
? "worked" : "failed");
704 unsigned long kvm_hypercall(unsigned long *in
,
708 unsigned long register r0
asm("r0");
709 unsigned long register r3
asm("r3") = in
[0];
710 unsigned long register r4
asm("r4") = in
[1];
711 unsigned long register r5
asm("r5") = in
[2];
712 unsigned long register r6
asm("r6") = in
[3];
713 unsigned long register r7
asm("r7") = in
[4];
714 unsigned long register r8
asm("r8") = in
[5];
715 unsigned long register r9
asm("r9") = in
[6];
716 unsigned long register r10
asm("r10") = in
[7];
717 unsigned long register r11
asm("r11") = nr
;
718 unsigned long register r12
asm("r12");
720 asm volatile("bl kvm_hypercall_start"
721 : "=r"(r0
), "=r"(r3
), "=r"(r4
), "=r"(r5
), "=r"(r6
),
722 "=r"(r7
), "=r"(r8
), "=r"(r9
), "=r"(r10
), "=r"(r11
),
724 : "r"(r3
), "r"(r4
), "r"(r5
), "r"(r6
), "r"(r7
), "r"(r8
),
725 "r"(r9
), "r"(r10
), "r"(r11
)
726 : "memory", "cc", "xer", "ctr", "lr");
739 EXPORT_SYMBOL_GPL(kvm_hypercall
);
741 static int kvm_para_setup(void)
743 extern u32 kvm_hypercall_start
;
744 struct device_node
*hyper_node
;
748 hyper_node
= of_find_node_by_path("/hypervisor");
752 insts
= (u32
*)of_get_property(hyper_node
, "hcall-instructions", &len
);
758 for (i
= 0; i
< (len
/ 4); i
++)
759 kvm_patch_ins(&(&kvm_hypercall_start
)[i
], insts
[i
]);
764 static __init
void kvm_free_tmp(void)
766 unsigned long start
, end
;
768 start
= (ulong
)&kvm_tmp
[kvm_tmp_index
+ (PAGE_SIZE
- 1)] & PAGE_MASK
;
769 end
= (ulong
)&kvm_tmp
[ARRAY_SIZE(kvm_tmp
)] & PAGE_MASK
;
771 /* Free the tmp space we don't need */
772 for (; start
< end
; start
+= PAGE_SIZE
) {
773 ClearPageReserved(virt_to_page(start
));
774 init_page_count(virt_to_page(start
));
780 static int __init
kvm_guest_init(void)
782 if (!kvm_para_available())
785 if (kvm_para_setup())
788 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE
))
789 kvm_use_magic_page();
791 #ifdef CONFIG_PPC_BOOK3S_64
802 postcore_initcall(kvm_guest_init
);