]>
Commit | Line | Data |
---|---|---|
2a342ed5 AG |
1 | /* |
2 | * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License, version 2, as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
19 | */ | |
20 | ||
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/kvm_para.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/of.h> | |
26 | ||
27 | #include <asm/reg.h> | |
28 | #include <asm/kvm_ppc.h> | |
29 | #include <asm/sections.h> | |
30 | #include <asm/cacheflush.h> | |
31 | #include <asm/disassemble.h> | |
32 | ||
d17051cb AG |
33 | #define KVM_MAGIC_PAGE (-4096L) |
34 | #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) | |
35 | ||
d1293c92 AG |
36 | #define KVM_INST_LWZ 0x80000000 |
37 | #define KVM_INST_STW 0x90000000 | |
38 | #define KVM_INST_LD 0xe8000000 | |
39 | #define KVM_INST_STD 0xf8000000 | |
40 | #define KVM_INST_NOP 0x60000000 | |
41 | #define KVM_INST_B 0x48000000 | |
42 | #define KVM_INST_B_MASK 0x03ffffff | |
43 | #define KVM_INST_B_MAX 0x01ffffff | |
44 | ||
73a18109 | 45 | #define KVM_MASK_RT 0x03e00000 |
d1293c92 AG |
46 | #define KVM_INST_MFMSR 0x7c0000a6 |
47 | #define KVM_INST_MFSPR_SPRG0 0x7c1042a6 | |
48 | #define KVM_INST_MFSPR_SPRG1 0x7c1142a6 | |
49 | #define KVM_INST_MFSPR_SPRG2 0x7c1242a6 | |
50 | #define KVM_INST_MFSPR_SPRG3 0x7c1342a6 | |
51 | #define KVM_INST_MFSPR_SRR0 0x7c1a02a6 | |
52 | #define KVM_INST_MFSPR_SRR1 0x7c1b02a6 | |
53 | #define KVM_INST_MFSPR_DAR 0x7c1302a6 | |
54 | #define KVM_INST_MFSPR_DSISR 0x7c1202a6 | |
55 | ||
56 | #define KVM_INST_MTSPR_SPRG0 0x7c1043a6 | |
57 | #define KVM_INST_MTSPR_SPRG1 0x7c1143a6 | |
58 | #define KVM_INST_MTSPR_SPRG2 0x7c1243a6 | |
59 | #define KVM_INST_MTSPR_SPRG3 0x7c1343a6 | |
60 | #define KVM_INST_MTSPR_SRR0 0x7c1a03a6 | |
61 | #define KVM_INST_MTSPR_SRR1 0x7c1b03a6 | |
62 | #define KVM_INST_MTSPR_DAR 0x7c1303a6 | |
63 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 | |
73a18109 AG |
64 | |
65 | static bool kvm_patching_worked = true; | |
66 | ||
67 | static inline void kvm_patch_ins(u32 *inst, u32 new_inst) | |
68 | { | |
69 | *inst = new_inst; | |
70 | flush_icache_range((ulong)inst, (ulong)inst + 4); | |
71 | } | |
72 | ||
d1293c92 AG |
73 | static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) |
74 | { | |
75 | #ifdef CONFIG_64BIT | |
76 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | |
77 | #else | |
78 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc)); | |
79 | #endif | |
80 | } | |
81 | ||
82 | static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) | |
83 | { | |
84 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); | |
85 | } | |
86 | ||
87 | static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) | |
88 | { | |
89 | #ifdef CONFIG_64BIT | |
90 | kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); | |
91 | #else | |
92 | kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc)); | |
93 | #endif | |
94 | } | |
95 | ||
96 | static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) | |
97 | { | |
98 | kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); | |
99 | } | |
100 | ||
73a18109 AG |
101 | static void kvm_map_magic_page(void *data) |
102 | { | |
103 | kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE, | |
104 | KVM_MAGIC_PAGE, /* Physical Address */ | |
105 | KVM_MAGIC_PAGE); /* Effective Address */ | |
106 | } | |
107 | ||
108 | static void kvm_check_ins(u32 *inst) | |
109 | { | |
110 | u32 _inst = *inst; | |
111 | u32 inst_no_rt = _inst & ~KVM_MASK_RT; | |
112 | u32 inst_rt = _inst & KVM_MASK_RT; | |
113 | ||
114 | switch (inst_no_rt) { | |
d1293c92 AG |
115 | /* Loads */ |
116 | case KVM_INST_MFMSR: | |
117 | kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); | |
118 | break; | |
119 | case KVM_INST_MFSPR_SPRG0: | |
120 | kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); | |
121 | break; | |
122 | case KVM_INST_MFSPR_SPRG1: | |
123 | kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); | |
124 | break; | |
125 | case KVM_INST_MFSPR_SPRG2: | |
126 | kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); | |
127 | break; | |
128 | case KVM_INST_MFSPR_SPRG3: | |
129 | kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); | |
130 | break; | |
131 | case KVM_INST_MFSPR_SRR0: | |
132 | kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); | |
133 | break; | |
134 | case KVM_INST_MFSPR_SRR1: | |
135 | kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); | |
136 | break; | |
137 | case KVM_INST_MFSPR_DAR: | |
138 | kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); | |
139 | break; | |
140 | case KVM_INST_MFSPR_DSISR: | |
141 | kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); | |
142 | break; | |
143 | ||
144 | /* Stores */ | |
145 | case KVM_INST_MTSPR_SPRG0: | |
146 | kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); | |
147 | break; | |
148 | case KVM_INST_MTSPR_SPRG1: | |
149 | kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); | |
150 | break; | |
151 | case KVM_INST_MTSPR_SPRG2: | |
152 | kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); | |
153 | break; | |
154 | case KVM_INST_MTSPR_SPRG3: | |
155 | kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); | |
156 | break; | |
157 | case KVM_INST_MTSPR_SRR0: | |
158 | kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); | |
159 | break; | |
160 | case KVM_INST_MTSPR_SRR1: | |
161 | kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); | |
162 | break; | |
163 | case KVM_INST_MTSPR_DAR: | |
164 | kvm_patch_ins_std(inst, magic_var(dar), inst_rt); | |
165 | break; | |
166 | case KVM_INST_MTSPR_DSISR: | |
167 | kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); | |
168 | break; | |
73a18109 AG |
169 | } |
170 | ||
171 | switch (_inst) { | |
172 | } | |
173 | } | |
174 | ||
175 | static void kvm_use_magic_page(void) | |
176 | { | |
177 | u32 *p; | |
178 | u32 *start, *end; | |
179 | u32 tmp; | |
180 | ||
181 | /* Tell the host to map the magic page to -4096 on all CPUs */ | |
182 | on_each_cpu(kvm_map_magic_page, NULL, 1); | |
183 | ||
184 | /* Quick self-test to see if the mapping works */ | |
185 | if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) { | |
186 | kvm_patching_worked = false; | |
187 | return; | |
188 | } | |
189 | ||
190 | /* Now loop through all code and find instructions */ | |
191 | start = (void*)_stext; | |
192 | end = (void*)_etext; | |
193 | ||
194 | for (p = start; p < end; p++) | |
195 | kvm_check_ins(p); | |
196 | ||
197 | printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", | |
198 | kvm_patching_worked ? "worked" : "failed"); | |
199 | } | |
200 | ||
2a342ed5 AG |
201 | unsigned long kvm_hypercall(unsigned long *in, |
202 | unsigned long *out, | |
203 | unsigned long nr) | |
204 | { | |
205 | unsigned long register r0 asm("r0"); | |
206 | unsigned long register r3 asm("r3") = in[0]; | |
207 | unsigned long register r4 asm("r4") = in[1]; | |
208 | unsigned long register r5 asm("r5") = in[2]; | |
209 | unsigned long register r6 asm("r6") = in[3]; | |
210 | unsigned long register r7 asm("r7") = in[4]; | |
211 | unsigned long register r8 asm("r8") = in[5]; | |
212 | unsigned long register r9 asm("r9") = in[6]; | |
213 | unsigned long register r10 asm("r10") = in[7]; | |
214 | unsigned long register r11 asm("r11") = nr; | |
215 | unsigned long register r12 asm("r12"); | |
216 | ||
217 | asm volatile("bl kvm_hypercall_start" | |
218 | : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), | |
219 | "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), | |
220 | "=r"(r12) | |
221 | : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), | |
222 | "r"(r9), "r"(r10), "r"(r11) | |
223 | : "memory", "cc", "xer", "ctr", "lr"); | |
224 | ||
225 | out[0] = r4; | |
226 | out[1] = r5; | |
227 | out[2] = r6; | |
228 | out[3] = r7; | |
229 | out[4] = r8; | |
230 | out[5] = r9; | |
231 | out[6] = r10; | |
232 | out[7] = r11; | |
233 | ||
234 | return r3; | |
235 | } | |
236 | EXPORT_SYMBOL_GPL(kvm_hypercall); | |
73a18109 AG |
237 | |
238 | static int kvm_para_setup(void) | |
239 | { | |
240 | extern u32 kvm_hypercall_start; | |
241 | struct device_node *hyper_node; | |
242 | u32 *insts; | |
243 | int len, i; | |
244 | ||
245 | hyper_node = of_find_node_by_path("/hypervisor"); | |
246 | if (!hyper_node) | |
247 | return -1; | |
248 | ||
249 | insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len); | |
250 | if (len % 4) | |
251 | return -1; | |
252 | if (len > (4 * 4)) | |
253 | return -1; | |
254 | ||
255 | for (i = 0; i < (len / 4); i++) | |
256 | kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]); | |
257 | ||
258 | return 0; | |
259 | } | |
260 | ||
261 | static int __init kvm_guest_init(void) | |
262 | { | |
263 | if (!kvm_para_available()) | |
264 | return 0; | |
265 | ||
266 | if (kvm_para_setup()) | |
267 | return 0; | |
268 | ||
269 | if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) | |
270 | kvm_use_magic_page(); | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | postcore_initcall(kvm_guest_init); |