]>
Commit | Line | Data |
---|---|---|
6aa8b732 AK |
1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * This module enables machines with Intel VT-x extensions to run virtual | |
5 | * machines without emulation or binary translation. | |
6 | * | |
7 | * MMU support | |
8 | * | |
9 | * Copyright (C) 2006 Qumranet, Inc. | |
10 | * | |
11 | * Authors: | |
12 | * Yaniv Kamay <yaniv@qumranet.com> | |
13 | * Avi Kivity <avi@qumranet.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | * | |
18 | */ | |
19 | ||
20 | /* | |
21 | * We need the mmu code to access both 32-bit and 64-bit guest ptes, | |
22 | * so the code in this file is compiled twice, once per pte size. | |
23 | */ | |
24 | ||
25 | #if PTTYPE == 64 | |
26 | #define pt_element_t u64 | |
27 | #define guest_walker guest_walker64 | |
28 | #define FNAME(name) paging##64_##name | |
29 | #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK | |
30 | #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK | |
31 | #define PT_INDEX(addr, level) PT64_INDEX(addr, level) | |
32 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | |
33 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) | |
34 | #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK | |
cea0f0e7 AK |
35 | #ifdef CONFIG_X86_64 |
36 | #define PT_MAX_FULL_LEVELS 4 | |
37 | #else | |
38 | #define PT_MAX_FULL_LEVELS 2 | |
39 | #endif | |
6aa8b732 AK |
40 | #elif PTTYPE == 32 |
41 | #define pt_element_t u32 | |
42 | #define guest_walker guest_walker32 | |
43 | #define FNAME(name) paging##32_##name | |
44 | #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK | |
45 | #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK | |
46 | #define PT_INDEX(addr, level) PT32_INDEX(addr, level) | |
47 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | |
48 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) | |
49 | #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK | |
cea0f0e7 | 50 | #define PT_MAX_FULL_LEVELS 2 |
6aa8b732 AK |
51 | #else |
52 | #error Invalid PTTYPE value | |
53 | #endif | |
54 | ||
55 | /* | |
56 | * The guest_walker structure emulates the behavior of the hardware page | |
57 | * table walker. | |
58 | */ | |
59 | struct guest_walker { | |
60 | int level; | |
cea0f0e7 | 61 | gfn_t table_gfn[PT_MAX_FULL_LEVELS]; |
6aa8b732 | 62 | pt_element_t *table; |
ac79c978 | 63 | pt_element_t *ptep; |
6aa8b732 AK |
64 | pt_element_t inherited_ar; |
65 | }; | |
66 | ||
ac79c978 AK |
67 | /* |
68 | * Fetch a guest pte for a guest virtual address | |
69 | */ | |
70 | static void FNAME(walk_addr)(struct guest_walker *walker, | |
71 | struct kvm_vcpu *vcpu, gva_t addr) | |
6aa8b732 AK |
72 | { |
73 | hpa_t hpa; | |
74 | struct kvm_memory_slot *slot; | |
ac79c978 | 75 | pt_element_t *ptep; |
1b0973bd | 76 | pt_element_t root; |
cea0f0e7 | 77 | gfn_t table_gfn; |
6aa8b732 | 78 | |
cea0f0e7 | 79 | pgprintk("%s: addr %lx\n", __FUNCTION__, addr); |
6aa8b732 | 80 | walker->level = vcpu->mmu.root_level; |
1b0973bd AK |
81 | walker->table = NULL; |
82 | root = vcpu->cr3; | |
83 | #if PTTYPE == 64 | |
84 | if (!is_long_mode(vcpu)) { | |
85 | walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; | |
86 | root = *walker->ptep; | |
87 | if (!(root & PT_PRESENT_MASK)) | |
88 | return; | |
89 | --walker->level; | |
90 | } | |
91 | #endif | |
cea0f0e7 AK |
92 | table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
93 | walker->table_gfn[walker->level - 1] = table_gfn; | |
94 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | |
95 | walker->level - 1, table_gfn); | |
96 | slot = gfn_to_memslot(vcpu->kvm, table_gfn); | |
1b0973bd | 97 | hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); |
6aa8b732 AK |
98 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); |
99 | ||
a9058ecd | 100 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
6aa8b732 AK |
101 | (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); |
102 | ||
6aa8b732 | 103 | walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; |
ac79c978 AK |
104 | |
105 | for (;;) { | |
106 | int index = PT_INDEX(addr, walker->level); | |
107 | hpa_t paddr; | |
108 | ||
109 | ptep = &walker->table[index]; | |
110 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == | |
111 | ((unsigned long)ptep & PAGE_MASK)); | |
112 | ||
1b0973bd AK |
113 | if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK)) |
114 | *ptep |= PT_ACCESSED_MASK; | |
ac79c978 AK |
115 | |
116 | if (!is_present_pte(*ptep) || | |
117 | walker->level == PT_PAGE_TABLE_LEVEL || | |
118 | (walker->level == PT_DIRECTORY_LEVEL && | |
119 | (*ptep & PT_PAGE_SIZE_MASK) && | |
120 | (PTTYPE == 64 || is_pse(vcpu)))) | |
121 | break; | |
122 | ||
123 | if (walker->level != 3 || is_long_mode(vcpu)) | |
124 | walker->inherited_ar &= walker->table[index]; | |
cea0f0e7 | 125 | table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; |
ac79c978 AK |
126 | paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK); |
127 | kunmap_atomic(walker->table, KM_USER0); | |
128 | walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), | |
129 | KM_USER0); | |
130 | --walker->level; | |
cea0f0e7 AK |
131 | walker->table_gfn[walker->level - 1 ] = table_gfn; |
132 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | |
133 | walker->level - 1, table_gfn); | |
ac79c978 AK |
134 | } |
135 | walker->ptep = ptep; | |
374cbac0 | 136 | pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep); |
6aa8b732 AK |
137 | } |
138 | ||
139 | static void FNAME(release_walker)(struct guest_walker *walker) | |
140 | { | |
1b0973bd AK |
141 | if (walker->table) |
142 | kunmap_atomic(walker->table, KM_USER0); | |
6aa8b732 AK |
143 | } |
144 | ||
145 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, | |
146 | u64 *shadow_pte, u64 access_bits) | |
147 | { | |
148 | ASSERT(*shadow_pte == 0); | |
149 | access_bits &= guest_pte; | |
150 | *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); | |
151 | set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, | |
152 | guest_pte & PT_DIRTY_MASK, access_bits); | |
153 | } | |
154 | ||
155 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, | |
156 | u64 *shadow_pte, u64 access_bits, | |
157 | int index) | |
158 | { | |
159 | gpa_t gaddr; | |
160 | ||
161 | ASSERT(*shadow_pte == 0); | |
162 | access_bits &= guest_pde; | |
163 | gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index; | |
164 | if (PTTYPE == 32 && is_cpuid_PSE36()) | |
165 | gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << | |
166 | (32 - PT32_DIR_PSE36_SHIFT); | |
8c7bb723 | 167 | *shadow_pte = guest_pde & PT_PTE_COPY_MASK; |
6aa8b732 AK |
168 | set_pte_common(vcpu, shadow_pte, gaddr, |
169 | guest_pde & PT_DIRTY_MASK, access_bits); | |
170 | } | |
171 | ||
6aa8b732 AK |
172 | /* |
173 | * Fetch a shadow pte for a specific level in the paging hierarchy. | |
174 | */ | |
175 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |
176 | struct guest_walker *walker) | |
177 | { | |
178 | hpa_t shadow_addr; | |
179 | int level; | |
180 | u64 *prev_shadow_ent = NULL; | |
ac79c978 AK |
181 | pt_element_t *guest_ent = walker->ptep; |
182 | ||
183 | if (!is_present_pte(*guest_ent)) | |
184 | return NULL; | |
6aa8b732 AK |
185 | |
186 | shadow_addr = vcpu->mmu.root_hpa; | |
187 | level = vcpu->mmu.shadow_root_level; | |
aef3d3fe AK |
188 | if (level == PT32E_ROOT_LEVEL) { |
189 | shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3]; | |
190 | shadow_addr &= PT64_BASE_ADDR_MASK; | |
191 | --level; | |
192 | } | |
6aa8b732 AK |
193 | |
194 | for (; ; level--) { | |
195 | u32 index = SHADOW_PT_INDEX(addr, level); | |
196 | u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; | |
25c0de2c | 197 | struct kvm_mmu_page *shadow_page; |
8c7bb723 | 198 | u64 shadow_pte; |
cea0f0e7 AK |
199 | int metaphysical; |
200 | gfn_t table_gfn; | |
6aa8b732 AK |
201 | |
202 | if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { | |
203 | if (level == PT_PAGE_TABLE_LEVEL) | |
204 | return shadow_ent; | |
205 | shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; | |
206 | prev_shadow_ent = shadow_ent; | |
207 | continue; | |
208 | } | |
209 | ||
6aa8b732 AK |
210 | if (level == PT_PAGE_TABLE_LEVEL) { |
211 | ||
212 | if (walker->level == PT_DIRECTORY_LEVEL) { | |
213 | if (prev_shadow_ent) | |
214 | *prev_shadow_ent |= PT_SHADOW_PS_MARK; | |
215 | FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, | |
216 | walker->inherited_ar, | |
217 | PT_INDEX(addr, PT_PAGE_TABLE_LEVEL)); | |
218 | } else { | |
219 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); | |
220 | FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar); | |
221 | } | |
222 | return shadow_ent; | |
223 | } | |
224 | ||
cea0f0e7 AK |
225 | if (level - 1 == PT_PAGE_TABLE_LEVEL |
226 | && walker->level == PT_DIRECTORY_LEVEL) { | |
227 | metaphysical = 1; | |
228 | table_gfn = (*guest_ent & PT_BASE_ADDR_MASK) | |
229 | >> PAGE_SHIFT; | |
230 | } else { | |
231 | metaphysical = 0; | |
232 | table_gfn = walker->table_gfn[level - 2]; | |
233 | } | |
234 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | |
235 | metaphysical, shadow_ent); | |
25c0de2c | 236 | if (!shadow_page) |
6aa8b732 | 237 | return ERR_PTR(-ENOMEM); |
25c0de2c | 238 | shadow_addr = shadow_page->page_hpa; |
aef3d3fe AK |
239 | shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK |
240 | | PT_WRITABLE_MASK | PT_USER_MASK; | |
8c7bb723 | 241 | *shadow_ent = shadow_pte; |
6aa8b732 AK |
242 | prev_shadow_ent = shadow_ent; |
243 | } | |
244 | } | |
245 | ||
246 | /* | |
247 | * The guest faulted for write. We need to | |
248 | * | |
249 | * - check write permissions | |
250 | * - update the guest pte dirty bit | |
251 | * - update our own dirty page tracking structures | |
252 | */ | |
253 | static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, | |
254 | u64 *shadow_ent, | |
255 | struct guest_walker *walker, | |
256 | gva_t addr, | |
cea0f0e7 AK |
257 | int user, |
258 | int *write_pt) | |
6aa8b732 AK |
259 | { |
260 | pt_element_t *guest_ent; | |
261 | int writable_shadow; | |
262 | gfn_t gfn; | |
263 | ||
264 | if (is_writeble_pte(*shadow_ent)) | |
265 | return 0; | |
266 | ||
267 | writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK; | |
268 | if (user) { | |
269 | /* | |
270 | * User mode access. Fail if it's a kernel page or a read-only | |
271 | * page. | |
272 | */ | |
273 | if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow) | |
274 | return 0; | |
275 | ASSERT(*shadow_ent & PT_USER_MASK); | |
276 | } else | |
277 | /* | |
278 | * Kernel mode access. Fail if it's a read-only page and | |
279 | * supervisor write protection is enabled. | |
280 | */ | |
281 | if (!writable_shadow) { | |
282 | if (is_write_protection(vcpu)) | |
283 | return 0; | |
284 | *shadow_ent &= ~PT_USER_MASK; | |
285 | } | |
286 | ||
ac79c978 | 287 | guest_ent = walker->ptep; |
6aa8b732 AK |
288 | |
289 | if (!is_present_pte(*guest_ent)) { | |
290 | *shadow_ent = 0; | |
291 | return 0; | |
292 | } | |
293 | ||
294 | gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | |
cea0f0e7 AK |
295 | if (kvm_mmu_lookup_page(vcpu, gfn)) { |
296 | pgprintk("%s: found shadow page for %lx, marking ro\n", | |
297 | __FUNCTION__, gfn); | |
298 | *write_pt = 1; | |
299 | return 0; | |
300 | } | |
6aa8b732 AK |
301 | mark_page_dirty(vcpu->kvm, gfn); |
302 | *shadow_ent |= PT_WRITABLE_MASK; | |
303 | *guest_ent |= PT_DIRTY_MASK; | |
cd4a4e53 | 304 | rmap_add(vcpu->kvm, shadow_ent); |
6aa8b732 AK |
305 | |
306 | return 1; | |
307 | } | |
308 | ||
309 | /* | |
310 | * Page fault handler. There are several causes for a page fault: | |
311 | * - there is no shadow pte for the guest pte | |
312 | * - write access through a shadow pte marked read only so that we can set | |
313 | * the dirty bit | |
314 | * - write access to a shadow pte marked read only so we can update the page | |
315 | * dirty bitmap, when userspace requests it | |
316 | * - mmio access; in this case we will never install a present shadow pte | |
317 | * - normal guest page fault due to the guest pte marked not present, not | |
318 | * writable, or not executable | |
319 | * | |
320 | * Returns: 1 if we need to emulate the instruction, 0 otherwise | |
321 | */ | |
322 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |
323 | u32 error_code) | |
324 | { | |
325 | int write_fault = error_code & PFERR_WRITE_MASK; | |
326 | int pte_present = error_code & PFERR_PRESENT_MASK; | |
327 | int user_fault = error_code & PFERR_USER_MASK; | |
328 | struct guest_walker walker; | |
329 | u64 *shadow_pte; | |
330 | int fixed; | |
cea0f0e7 | 331 | int write_pt = 0; |
6aa8b732 | 332 | |
cea0f0e7 | 333 | pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); |
6aa8b732 AK |
334 | /* |
335 | * Look up the shadow pte for the faulting address. | |
336 | */ | |
337 | for (;;) { | |
ac79c978 | 338 | FNAME(walk_addr)(&walker, vcpu, addr); |
6aa8b732 AK |
339 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker); |
340 | if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */ | |
cea0f0e7 | 341 | printk("%s: oom\n", __FUNCTION__); |
6aa8b732 AK |
342 | nonpaging_flush(vcpu); |
343 | FNAME(release_walker)(&walker); | |
344 | continue; | |
345 | } | |
346 | break; | |
347 | } | |
348 | ||
349 | /* | |
350 | * The page is not mapped by the guest. Let the guest handle it. | |
351 | */ | |
352 | if (!shadow_pte) { | |
cea0f0e7 | 353 | pgprintk("%s: not mapped\n", __FUNCTION__); |
6aa8b732 AK |
354 | inject_page_fault(vcpu, addr, error_code); |
355 | FNAME(release_walker)(&walker); | |
356 | return 0; | |
357 | } | |
358 | ||
cea0f0e7 AK |
359 | pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, |
360 | shadow_pte, *shadow_pte); | |
361 | ||
6aa8b732 AK |
362 | /* |
363 | * Update the shadow pte. | |
364 | */ | |
365 | if (write_fault) | |
366 | fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, | |
cea0f0e7 | 367 | user_fault, &write_pt); |
6aa8b732 AK |
368 | else |
369 | fixed = fix_read_pf(shadow_pte); | |
370 | ||
cea0f0e7 AK |
371 | pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__, |
372 | shadow_pte, *shadow_pte); | |
373 | ||
6aa8b732 AK |
374 | FNAME(release_walker)(&walker); |
375 | ||
376 | /* | |
377 | * mmio: emulate if accessible, otherwise its a guest fault. | |
378 | */ | |
379 | if (is_io_pte(*shadow_pte)) { | |
380 | if (may_access(*shadow_pte, write_fault, user_fault)) | |
381 | return 1; | |
382 | pgprintk("%s: io work, no access\n", __FUNCTION__); | |
383 | inject_page_fault(vcpu, addr, | |
384 | error_code | PFERR_PRESENT_MASK); | |
385 | return 0; | |
386 | } | |
387 | ||
388 | /* | |
389 | * pte not present, guest page fault. | |
390 | */ | |
cea0f0e7 | 391 | if (pte_present && !fixed && !write_pt) { |
6aa8b732 AK |
392 | inject_page_fault(vcpu, addr, error_code); |
393 | return 0; | |
394 | } | |
395 | ||
396 | ++kvm_stat.pf_fixed; | |
397 | ||
cea0f0e7 | 398 | return write_pt; |
6aa8b732 AK |
399 | } |
400 | ||
401 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |
402 | { | |
403 | struct guest_walker walker; | |
404 | pt_element_t guest_pte; | |
405 | gpa_t gpa; | |
406 | ||
ac79c978 AK |
407 | FNAME(walk_addr)(&walker, vcpu, vaddr); |
408 | guest_pte = *walker.ptep; | |
6aa8b732 AK |
409 | FNAME(release_walker)(&walker); |
410 | ||
411 | if (!is_present_pte(guest_pte)) | |
412 | return UNMAPPED_GVA; | |
413 | ||
414 | if (walker.level == PT_DIRECTORY_LEVEL) { | |
415 | ASSERT((guest_pte & PT_PAGE_SIZE_MASK)); | |
416 | ASSERT(PTTYPE == 64 || is_pse(vcpu)); | |
417 | ||
418 | gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr & | |
419 | (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK)); | |
420 | ||
421 | if (PTTYPE == 32 && is_cpuid_PSE36()) | |
422 | gpa |= (guest_pte & PT32_DIR_PSE36_MASK) << | |
423 | (32 - PT32_DIR_PSE36_SHIFT); | |
424 | } else { | |
425 | gpa = (guest_pte & PT_BASE_ADDR_MASK); | |
426 | gpa |= (vaddr & ~PAGE_MASK); | |
427 | } | |
428 | ||
429 | return gpa; | |
430 | } | |
431 | ||
432 | #undef pt_element_t | |
433 | #undef guest_walker | |
434 | #undef FNAME | |
435 | #undef PT_BASE_ADDR_MASK | |
436 | #undef PT_INDEX | |
437 | #undef SHADOW_PT_INDEX | |
438 | #undef PT_LEVEL_MASK | |
439 | #undef PT_PTE_COPY_MASK | |
440 | #undef PT_NON_PTE_COPY_MASK | |
441 | #undef PT_DIR_BASE_ADDR_MASK | |
cea0f0e7 | 442 | #undef PT_MAX_FULL_LEVELS |