]>
Commit | Line | Data |
---|---|---|
1 | /*P:700 The pagetable code, on the other hand, still shows the scars of | |
2 | * previous encounters. It's functional, and as neat as it can be in the | |
3 | * circumstances, but be wary, for these things are subtle and break easily. | |
4 | * The Guest provides a virtual to physical mapping, but we can neither trust | |
5 | * it nor use it: we verify and convert it here to point the hardware to the | |
6 | * actual Guest pages when running the Guest. :*/ | |
7 | ||
8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. | |
9 | * GPL v2 and any later version */ | |
10 | #include <linux/mm.h> | |
11 | #include <linux/types.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <asm/tlbflush.h> | |
16 | #include <asm/uaccess.h> | |
17 | #include "lg.h" | |
18 | ||
19 | /*M:008 We hold reference to pages, which prevents them from being swapped. | |
20 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants | |
21 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we | |
22 | * could probably consider launching Guests as non-root. :*/ | |
23 | ||
24 | /*H:300 | |
25 | * The Page Table Code | |
26 | * | |
27 | * We use two-level page tables for the Guest. If you're not entirely | |
28 | * comfortable with virtual addresses, physical addresses and page tables then | |
29 | * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with | |
30 | * diagrams!). | |
31 | * | |
32 | * The Guest keeps page tables, but we maintain the actual ones here: these are | |
33 | * called "shadow" page tables. Which is a very Guest-centric name: these are | |
34 | * the real page tables the CPU uses, although we keep them up to date to | |
35 | * reflect the Guest's. (See what I mean about weird naming? Since when do | |
36 | * shadows reflect anything?) | |
37 | * | |
38 | * Anyway, this is the most complicated part of the Host code. There are seven | |
39 | * parts to this: | |
40 | * (i) Looking up a page table entry when the Guest faults, | |
41 | * (ii) Making sure the Guest stack is mapped, | |
42 | * (iii) Setting up a page table entry when the Guest tells us one has changed, | |
43 | * (iv) Switching page tables, | |
44 | * (v) Flushing (throwing away) page tables, | |
45 | * (vi) Mapping the Switcher when the Guest is about to run, | |
46 | * (vii) Setting up the page tables initially. | |
47 | :*/ | |
48 | ||
49 | ||
50 | /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is | |
51 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE | |
52 | * page. */ | |
53 | #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) | |
54 | ||
55 | /* We actually need a separate PTE page for each CPU. Remember that after the | |
56 | * Switcher code itself comes two pages for each CPU, and we don't want this | |
57 | * CPU's guest to see the pages of any other CPU. */ | |
58 | static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); | |
59 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) | |
60 | ||
61 | /*H:320 The page table code is curly enough to need helper functions to keep it | |
62 | * clear and clean. | |
63 | * | |
64 | * There are two functions which return pointers to the shadow (aka "real") | |
65 | * page tables. | |
66 | * | |
67 | * spgd_addr() takes the virtual address and returns a pointer to the top-level | |
68 | * page directory entry (PGD) for that address. Since we keep track of several | |
69 | * page tables, the "i" argument tells us which one we're interested in (it's | |
70 | * usually the current one). */ | |
71 | static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) | |
72 | { | |
73 | unsigned int index = pgd_index(vaddr); | |
74 | ||
75 | /* We kill any Guest trying to touch the Switcher addresses. */ | |
76 | if (index >= SWITCHER_PGD_INDEX) { | |
77 | kill_guest(cpu, "attempt to access switcher pages"); | |
78 | index = 0; | |
79 | } | |
80 | /* Return a pointer index'th pgd entry for the i'th page table. */ | |
81 | return &cpu->lg->pgdirs[i].pgdir[index]; | |
82 | } | |
83 | ||
84 | /* This routine then takes the page directory entry returned above, which | |
85 | * contains the address of the page table entry (PTE) page. It then returns a | |
86 | * pointer to the PTE entry for the given address. */ | |
87 | static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) | |
88 | { | |
89 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); | |
90 | /* You should never call this if the PGD entry wasn't valid */ | |
91 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); | |
92 | return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; | |
93 | } | |
94 | ||
95 | /* These two functions just like the above two, except they access the Guest | |
96 | * page tables. Hence they return a Guest address. */ | |
97 | static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) | |
98 | { | |
99 | unsigned int index = vaddr >> (PGDIR_SHIFT); | |
100 | return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); | |
101 | } | |
102 | ||
103 | static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) | |
104 | { | |
105 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; | |
106 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); | |
107 | return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); | |
108 | } | |
109 | ||
110 | /*H:350 This routine takes a page number given by the Guest and converts it to | |
111 | * an actual, physical page number. It can fail for several reasons: the | |
112 | * virtual address might not be mapped by the Launcher, the write flag is set | |
113 | * and the page is read-only, or the write flag was set and the page was | |
114 | * shared so had to be copied, but we ran out of memory. | |
115 | * | |
116 | * This holds a reference to the page, so release_pte() is careful to | |
117 | * put that back. */ | |
118 | static unsigned long get_pfn(unsigned long virtpfn, int write) | |
119 | { | |
120 | struct page *page; | |
121 | /* This value indicates failure. */ | |
122 | unsigned long ret = -1UL; | |
123 | ||
124 | /* get_user_pages() is a complex interface: it gets the "struct | |
125 | * vm_area_struct" and "struct page" assocated with a range of pages. | |
126 | * It also needs the task's mmap_sem held, and is not very quick. | |
127 | * It returns the number of pages it got. */ | |
128 | down_read(¤t->mm->mmap_sem); | |
129 | if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, | |
130 | 1, write, 1, &page, NULL) == 1) | |
131 | ret = page_to_pfn(page); | |
132 | up_read(¤t->mm->mmap_sem); | |
133 | return ret; | |
134 | } | |
135 | ||
136 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table | |
137 | * entry can be a little tricky. The flags are (almost) the same, but the | |
138 | * Guest PTE contains a virtual page number: the CPU needs the real page | |
139 | * number. */ | |
140 | static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) | |
141 | { | |
142 | unsigned long pfn, base, flags; | |
143 | ||
144 | /* The Guest sets the global flag, because it thinks that it is using | |
145 | * PGE. We only told it to use PGE so it would tell us whether it was | |
146 | * flushing a kernel mapping or a userspace mapping. We don't actually | |
147 | * use the global bit, so throw it away. */ | |
148 | flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); | |
149 | ||
150 | /* The Guest's pages are offset inside the Launcher. */ | |
151 | base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; | |
152 | ||
153 | /* We need a temporary "unsigned long" variable to hold the answer from | |
154 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't | |
155 | * fit in spte.pfn. get_pfn() finds the real physical number of the | |
156 | * page, given the virtual number. */ | |
157 | pfn = get_pfn(base + pte_pfn(gpte), write); | |
158 | if (pfn == -1UL) { | |
159 | kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); | |
160 | /* When we destroy the Guest, we'll go through the shadow page | |
161 | * tables and release_pte() them. Make sure we don't think | |
162 | * this one is valid! */ | |
163 | flags = 0; | |
164 | } | |
165 | /* Now we assemble our shadow PTE from the page number and flags. */ | |
166 | return pfn_pte(pfn, __pgprot(flags)); | |
167 | } | |
168 | ||
169 | /*H:460 And to complete the chain, release_pte() looks like this: */ | |
170 | static void release_pte(pte_t pte) | |
171 | { | |
172 | /* Remember that get_user_pages() took a reference to the page, in | |
173 | * get_pfn()? We have to put it back now. */ | |
174 | if (pte_flags(pte) & _PAGE_PRESENT) | |
175 | put_page(pfn_to_page(pte_pfn(pte))); | |
176 | } | |
177 | /*:*/ | |
178 | ||
179 | static void check_gpte(struct lg_cpu *cpu, pte_t gpte) | |
180 | { | |
181 | if ((pte_flags(gpte) & _PAGE_PSE) || | |
182 | pte_pfn(gpte) >= cpu->lg->pfn_limit) | |
183 | kill_guest(cpu, "bad page table entry"); | |
184 | } | |
185 | ||
186 | static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) | |
187 | { | |
188 | if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || | |
189 | (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) | |
190 | kill_guest(cpu, "bad page directory entry"); | |
191 | } | |
192 | ||
193 | /*H:330 | |
194 | * (i) Looking up a page table entry when the Guest faults. | |
195 | * | |
196 | * We saw this call in run_guest(): when we see a page fault in the Guest, we | |
197 | * come here. That's because we only set up the shadow page tables lazily as | |
198 | * they're needed, so we get page faults all the time and quietly fix them up | |
199 | * and return to the Guest without it knowing. | |
200 | * | |
201 | * If we fixed up the fault (ie. we mapped the address), this routine returns | |
202 | * true. Otherwise, it was a real fault and we need to tell the Guest. */ | |
203 | int demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) | |
204 | { | |
205 | pgd_t gpgd; | |
206 | pgd_t *spgd; | |
207 | unsigned long gpte_ptr; | |
208 | pte_t gpte; | |
209 | pte_t *spte; | |
210 | ||
211 | /* First step: get the top-level Guest page table entry. */ | |
212 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); | |
213 | /* Toplevel not present? We can't map it in. */ | |
214 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) | |
215 | return 0; | |
216 | ||
217 | /* Now look at the matching shadow entry. */ | |
218 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); | |
219 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { | |
220 | /* No shadow entry: allocate a new shadow PTE page. */ | |
221 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); | |
222 | /* This is not really the Guest's fault, but killing it is | |
223 | * simple for this corner case. */ | |
224 | if (!ptepage) { | |
225 | kill_guest(cpu, "out of memory allocating pte page"); | |
226 | return 0; | |
227 | } | |
228 | /* We check that the Guest pgd is OK. */ | |
229 | check_gpgd(cpu, gpgd); | |
230 | /* And we copy the flags to the shadow PGD entry. The page | |
231 | * number in the shadow PGD is the page we just allocated. */ | |
232 | *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); | |
233 | } | |
234 | ||
235 | /* OK, now we look at the lower level in the Guest page table: keep its | |
236 | * address, because we might update it later. */ | |
237 | gpte_ptr = gpte_addr(gpgd, vaddr); | |
238 | gpte = lgread(cpu, gpte_ptr, pte_t); | |
239 | ||
240 | /* If this page isn't in the Guest page tables, we can't page it in. */ | |
241 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) | |
242 | return 0; | |
243 | ||
244 | /* Check they're not trying to write to a page the Guest wants | |
245 | * read-only (bit 2 of errcode == write). */ | |
246 | if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) | |
247 | return 0; | |
248 | ||
249 | /* User access to a kernel-only page? (bit 3 == user access) */ | |
250 | if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) | |
251 | return 0; | |
252 | ||
253 | /* Check that the Guest PTE flags are OK, and the page number is below | |
254 | * the pfn_limit (ie. not mapping the Launcher binary). */ | |
255 | check_gpte(cpu, gpte); | |
256 | ||
257 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ | |
258 | gpte = pte_mkyoung(gpte); | |
259 | if (errcode & 2) | |
260 | gpte = pte_mkdirty(gpte); | |
261 | ||
262 | /* Get the pointer to the shadow PTE entry we're going to set. */ | |
263 | spte = spte_addr(*spgd, vaddr); | |
264 | /* If there was a valid shadow PTE entry here before, we release it. | |
265 | * This can happen with a write to a previously read-only entry. */ | |
266 | release_pte(*spte); | |
267 | ||
268 | /* If this is a write, we insist that the Guest page is writable (the | |
269 | * final arg to gpte_to_spte()). */ | |
270 | if (pte_dirty(gpte)) | |
271 | *spte = gpte_to_spte(cpu, gpte, 1); | |
272 | else | |
273 | /* If this is a read, don't set the "writable" bit in the page | |
274 | * table entry, even if the Guest says it's writable. That way | |
275 | * we will come back here when a write does actually occur, so | |
276 | * we can update the Guest's _PAGE_DIRTY flag. */ | |
277 | *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); | |
278 | ||
279 | /* Finally, we write the Guest PTE entry back: we've set the | |
280 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ | |
281 | lgwrite(cpu, gpte_ptr, pte_t, gpte); | |
282 | ||
283 | /* The fault is fixed, the page table is populated, the mapping | |
284 | * manipulated, the result returned and the code complete. A small | |
285 | * delay and a trace of alliteration are the only indications the Guest | |
286 | * has that a page fault occurred at all. */ | |
287 | return 1; | |
288 | } | |
289 | ||
290 | /*H:360 | |
291 | * (ii) Making sure the Guest stack is mapped. | |
292 | * | |
293 | * Remember that direct traps into the Guest need a mapped Guest kernel stack. | |
294 | * pin_stack_pages() calls us here: we could simply call demand_page(), but as | |
295 | * we've seen that logic is quite long, and usually the stack pages are already | |
296 | * mapped, so it's overkill. | |
297 | * | |
298 | * This is a quick version which answers the question: is this virtual address | |
299 | * mapped by the shadow page tables, and is it writable? */ | |
300 | static int page_writable(struct lg_cpu *cpu, unsigned long vaddr) | |
301 | { | |
302 | pgd_t *spgd; | |
303 | unsigned long flags; | |
304 | ||
305 | /* Look at the current top level entry: is it present? */ | |
306 | spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); | |
307 | if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) | |
308 | return 0; | |
309 | ||
310 | /* Check the flags on the pte entry itself: it must be present and | |
311 | * writable. */ | |
312 | flags = pte_flags(*(spte_addr(*spgd, vaddr))); | |
313 | ||
314 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); | |
315 | } | |
316 | ||
317 | /* So, when pin_stack_pages() asks us to pin a page, we check if it's already | |
318 | * in the page tables, and if not, we call demand_page() with error code 2 | |
319 | * (meaning "write"). */ | |
320 | void pin_page(struct lg_cpu *cpu, unsigned long vaddr) | |
321 | { | |
322 | if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) | |
323 | kill_guest(cpu, "bad stack page %#lx", vaddr); | |
324 | } | |
325 | ||
326 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ | |
327 | static void release_pgd(struct lguest *lg, pgd_t *spgd) | |
328 | { | |
329 | /* If the entry's not present, there's nothing to release. */ | |
330 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { | |
331 | unsigned int i; | |
332 | /* Converting the pfn to find the actual PTE page is easy: turn | |
333 | * the page number into a physical address, then convert to a | |
334 | * virtual address (easy for kernel pages like this one). */ | |
335 | pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); | |
336 | /* For each entry in the page, we might need to release it. */ | |
337 | for (i = 0; i < PTRS_PER_PTE; i++) | |
338 | release_pte(ptepage[i]); | |
339 | /* Now we can free the page of PTEs */ | |
340 | free_page((long)ptepage); | |
341 | /* And zero out the PGD entry so we never release it twice. */ | |
342 | *spgd = __pgd(0); | |
343 | } | |
344 | } | |
345 | ||
346 | /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() | |
347 | * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. | |
348 | * It simply releases every PTE page from 0 up to the Guest's kernel address. */ | |
349 | static void flush_user_mappings(struct lguest *lg, int idx) | |
350 | { | |
351 | unsigned int i; | |
352 | /* Release every pgd entry up to the kernel's address. */ | |
353 | for (i = 0; i < pgd_index(lg->kernel_address); i++) | |
354 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); | |
355 | } | |
356 | ||
357 | /*H:440 (v) Flushing (throwing away) page tables, | |
358 | * | |
359 | * The Guest has a hypercall to throw away the page tables: it's used when a | |
360 | * large number of mappings have been changed. */ | |
361 | void guest_pagetable_flush_user(struct lg_cpu *cpu) | |
362 | { | |
363 | /* Drop the userspace part of the current page table. */ | |
364 | flush_user_mappings(cpu->lg, cpu->cpu_pgd); | |
365 | } | |
366 | /*:*/ | |
367 | ||
368 | /* We walk down the guest page tables to get a guest-physical address */ | |
369 | unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) | |
370 | { | |
371 | pgd_t gpgd; | |
372 | pte_t gpte; | |
373 | ||
374 | /* First step: get the top-level Guest page table entry. */ | |
375 | gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); | |
376 | /* Toplevel not present? We can't map it in. */ | |
377 | if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) | |
378 | kill_guest(cpu, "Bad address %#lx", vaddr); | |
379 | ||
380 | gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); | |
381 | if (!(pte_flags(gpte) & _PAGE_PRESENT)) | |
382 | kill_guest(cpu, "Bad address %#lx", vaddr); | |
383 | ||
384 | return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); | |
385 | } | |
386 | ||
387 | /* We keep several page tables. This is a simple routine to find the page | |
388 | * table (if any) corresponding to this top-level address the Guest has given | |
389 | * us. */ | |
390 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | |
391 | { | |
392 | unsigned int i; | |
393 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
394 | if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) | |
395 | break; | |
396 | return i; | |
397 | } | |
398 | ||
399 | /*H:435 And this is us, creating the new page directory. If we really do | |
400 | * allocate a new one (and so the kernel parts are not there), we set | |
401 | * blank_pgdir. */ | |
402 | static unsigned int new_pgdir(struct lg_cpu *cpu, | |
403 | unsigned long gpgdir, | |
404 | int *blank_pgdir) | |
405 | { | |
406 | unsigned int next; | |
407 | ||
408 | /* We pick one entry at random to throw out. Choosing the Least | |
409 | * Recently Used might be better, but this is easy. */ | |
410 | next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); | |
411 | /* If it's never been allocated at all before, try now. */ | |
412 | if (!cpu->lg->pgdirs[next].pgdir) { | |
413 | cpu->lg->pgdirs[next].pgdir = | |
414 | (pgd_t *)get_zeroed_page(GFP_KERNEL); | |
415 | /* If the allocation fails, just keep using the one we have */ | |
416 | if (!cpu->lg->pgdirs[next].pgdir) | |
417 | next = cpu->cpu_pgd; | |
418 | else | |
419 | /* This is a blank page, so there are no kernel | |
420 | * mappings: caller must map the stack! */ | |
421 | *blank_pgdir = 1; | |
422 | } | |
423 | /* Record which Guest toplevel this shadows. */ | |
424 | cpu->lg->pgdirs[next].gpgdir = gpgdir; | |
425 | /* Release all the non-kernel mappings. */ | |
426 | flush_user_mappings(cpu->lg, next); | |
427 | ||
428 | return next; | |
429 | } | |
430 | ||
431 | /*H:430 (iv) Switching page tables | |
432 | * | |
433 | * Now we've seen all the page table setting and manipulation, let's see what | |
434 | * what happens when the Guest changes page tables (ie. changes the top-level | |
435 | * pgdir). This occurs on almost every context switch. */ | |
436 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) | |
437 | { | |
438 | int newpgdir, repin = 0; | |
439 | ||
440 | /* Look to see if we have this one already. */ | |
441 | newpgdir = find_pgdir(cpu->lg, pgtable); | |
442 | /* If not, we allocate or mug an existing one: if it's a fresh one, | |
443 | * repin gets set to 1. */ | |
444 | if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) | |
445 | newpgdir = new_pgdir(cpu, pgtable, &repin); | |
446 | /* Change the current pgd index to the new one. */ | |
447 | cpu->cpu_pgd = newpgdir; | |
448 | /* If it was completely blank, we map in the Guest kernel stack */ | |
449 | if (repin) | |
450 | pin_stack_pages(cpu); | |
451 | } | |
452 | ||
453 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all | |
454 | * the shadow page tables, including the Guest's kernel mappings. This is used | |
455 | * when we destroy the Guest. */ | |
456 | static void release_all_pagetables(struct lguest *lg) | |
457 | { | |
458 | unsigned int i, j; | |
459 | ||
460 | /* Every shadow pagetable this Guest has */ | |
461 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
462 | if (lg->pgdirs[i].pgdir) | |
463 | /* Every PGD entry except the Switcher at the top */ | |
464 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) | |
465 | release_pgd(lg, lg->pgdirs[i].pgdir + j); | |
466 | } | |
467 | ||
468 | /* We also throw away everything when a Guest tells us it's changed a kernel | |
469 | * mapping. Since kernel mappings are in every page table, it's easiest to | |
470 | * throw them all away. This traps the Guest in amber for a while as | |
471 | * everything faults back in, but it's rare. */ | |
472 | void guest_pagetable_clear_all(struct lg_cpu *cpu) | |
473 | { | |
474 | release_all_pagetables(cpu->lg); | |
475 | /* We need the Guest kernel stack mapped again. */ | |
476 | pin_stack_pages(cpu); | |
477 | } | |
478 | /*:*/ | |
479 | /*M:009 Since we throw away all mappings when a kernel mapping changes, our | |
480 | * performance sucks for guests using highmem. In fact, a guest with | |
481 | * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is | |
482 | * usually slower than a Guest with less memory. | |
483 | * | |
484 | * This, of course, cannot be fixed. It would take some kind of... well, I | |
485 | * don't know, but the term "puissant code-fu" comes to mind. :*/ | |
486 | ||
487 | /*H:420 This is the routine which actually sets the page table entry for then | |
488 | * "idx"'th shadow page table. | |
489 | * | |
490 | * Normally, we can just throw out the old entry and replace it with 0: if they | |
491 | * use it demand_page() will put the new entry in. We need to do this anyway: | |
492 | * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page | |
493 | * is read from, and _PAGE_DIRTY when it's written to. | |
494 | * | |
495 | * But Avi Kivity pointed out that most Operating Systems (Linux included) set | |
496 | * these bits on PTEs immediately anyway. This is done to save the CPU from | |
497 | * having to update them, but it helps us the same way: if they set | |
498 | * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if | |
499 | * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. | |
500 | */ | |
501 | static void do_set_pte(struct lg_cpu *cpu, int idx, | |
502 | unsigned long vaddr, pte_t gpte) | |
503 | { | |
504 | /* Look up the matching shadow page directory entry. */ | |
505 | pgd_t *spgd = spgd_addr(cpu, idx, vaddr); | |
506 | ||
507 | /* If the top level isn't present, there's no entry to update. */ | |
508 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { | |
509 | /* Otherwise, we start by releasing the existing entry. */ | |
510 | pte_t *spte = spte_addr(*spgd, vaddr); | |
511 | release_pte(*spte); | |
512 | ||
513 | /* If they're setting this entry as dirty or accessed, we might | |
514 | * as well put that entry they've given us in now. This shaves | |
515 | * 10% off a copy-on-write micro-benchmark. */ | |
516 | if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { | |
517 | check_gpte(cpu, gpte); | |
518 | *spte = gpte_to_spte(cpu, gpte, | |
519 | pte_flags(gpte) & _PAGE_DIRTY); | |
520 | } else | |
521 | /* Otherwise kill it and we can demand_page() it in | |
522 | * later. */ | |
523 | *spte = __pte(0); | |
524 | } | |
525 | } | |
526 | ||
527 | /*H:410 Updating a PTE entry is a little trickier. | |
528 | * | |
529 | * We keep track of several different page tables (the Guest uses one for each | |
530 | * process, so it makes sense to cache at least a few). Each of these have | |
531 | * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for | |
532 | * all processes. So when the page table above that address changes, we update | |
533 | * all the page tables, not just the current one. This is rare. | |
534 | * | |
535 | * The benefit is that when we have to track a new page table, we can copy keep | |
536 | * all the kernel mappings. This speeds up context switch immensely. */ | |
537 | void guest_set_pte(struct lg_cpu *cpu, | |
538 | unsigned long gpgdir, unsigned long vaddr, pte_t gpte) | |
539 | { | |
540 | /* Kernel mappings must be changed on all top levels. Slow, but | |
541 | * doesn't happen often. */ | |
542 | if (vaddr >= cpu->lg->kernel_address) { | |
543 | unsigned int i; | |
544 | for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) | |
545 | if (cpu->lg->pgdirs[i].pgdir) | |
546 | do_set_pte(cpu, i, vaddr, gpte); | |
547 | } else { | |
548 | /* Is this page table one we have a shadow for? */ | |
549 | int pgdir = find_pgdir(cpu->lg, gpgdir); | |
550 | if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) | |
551 | /* If so, do the update. */ | |
552 | do_set_pte(cpu, pgdir, vaddr, gpte); | |
553 | } | |
554 | } | |
555 | ||
556 | /*H:400 | |
557 | * (iii) Setting up a page table entry when the Guest tells us one has changed. | |
558 | * | |
559 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal | |
560 | * with the other side of page tables while we're here: what happens when the | |
561 | * Guest asks for a page table to be updated? | |
562 | * | |
563 | * We already saw that demand_page() will fill in the shadow page tables when | |
564 | * needed, so we can simply remove shadow page table entries whenever the Guest | |
565 | * tells us they've changed. When the Guest tries to use the new entry it will | |
566 | * fault and demand_page() will fix it up. | |
567 | * | |
568 | * So with that in mind here's our code to to update a (top-level) PGD entry: | |
569 | */ | |
570 | void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) | |
571 | { | |
572 | int pgdir; | |
573 | ||
574 | /* The kernel seems to try to initialize this early on: we ignore its | |
575 | * attempts to map over the Switcher. */ | |
576 | if (idx >= SWITCHER_PGD_INDEX) | |
577 | return; | |
578 | ||
579 | /* If they're talking about a page table we have a shadow for... */ | |
580 | pgdir = find_pgdir(lg, gpgdir); | |
581 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) | |
582 | /* ... throw it away. */ | |
583 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); | |
584 | } | |
585 | ||
586 | /*H:500 (vii) Setting up the page tables initially. | |
587 | * | |
588 | * When a Guest is first created, the Launcher tells us where the toplevel of | |
589 | * its first page table is. We set some things up here: */ | |
590 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) | |
591 | { | |
592 | /* We start on the first shadow page table, and give it a blank PGD | |
593 | * page. */ | |
594 | lg->pgdirs[0].gpgdir = pgtable; | |
595 | lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); | |
596 | if (!lg->pgdirs[0].pgdir) | |
597 | return -ENOMEM; | |
598 | lg->cpus[0].cpu_pgd = 0; | |
599 | return 0; | |
600 | } | |
601 | ||
602 | /* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ | |
603 | void page_table_guest_data_init(struct lg_cpu *cpu) | |
604 | { | |
605 | /* We get the kernel address: above this is all kernel memory. */ | |
606 | if (get_user(cpu->lg->kernel_address, | |
607 | &cpu->lg->lguest_data->kernel_address) | |
608 | /* We tell the Guest that it can't use the top 4MB of virtual | |
609 | * addresses used by the Switcher. */ | |
610 | || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) | |
611 | || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) | |
612 | kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); | |
613 | ||
614 | /* In flush_user_mappings() we loop from 0 to | |
615 | * "pgd_index(lg->kernel_address)". This assumes it won't hit the | |
616 | * Switcher mappings, so check that now. */ | |
617 | if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) | |
618 | kill_guest(cpu, "bad kernel address %#lx", | |
619 | cpu->lg->kernel_address); | |
620 | } | |
621 | ||
622 | /* When a Guest dies, our cleanup is fairly simple. */ | |
623 | void free_guest_pagetable(struct lguest *lg) | |
624 | { | |
625 | unsigned int i; | |
626 | ||
627 | /* Throw away all page table pages. */ | |
628 | release_all_pagetables(lg); | |
629 | /* Now free the top levels: free_page() can handle 0 just fine. */ | |
630 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | |
631 | free_page((long)lg->pgdirs[i].pgdir); | |
632 | } | |
633 | ||
634 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. | |
635 | * | |
636 | * The Switcher and the two pages for this CPU need to be visible in the | |
637 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages | |
638 | * for each CPU already set up, we just need to hook them in now we know which | |
639 | * Guest is about to run on this CPU. */ | |
640 | void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) | |
641 | { | |
642 | pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); | |
643 | pgd_t switcher_pgd; | |
644 | pte_t regs_pte; | |
645 | unsigned long pfn; | |
646 | ||
647 | /* Make the last PGD entry for this Guest point to the Switcher's PTE | |
648 | * page for this CPU (with appropriate flags). */ | |
649 | switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); | |
650 | ||
651 | cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; | |
652 | ||
653 | /* We also change the Switcher PTE page. When we're running the Guest, | |
654 | * we want the Guest's "regs" page to appear where the first Switcher | |
655 | * page for this CPU is. This is an optimization: when the Switcher | |
656 | * saves the Guest registers, it saves them into the first page of this | |
657 | * CPU's "struct lguest_pages": if we make sure the Guest's register | |
658 | * page is already mapped there, we don't have to copy them out | |
659 | * again. */ | |
660 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; | |
661 | regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); | |
662 | switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; | |
663 | } | |
664 | /*:*/ | |
665 | ||
666 | static void free_switcher_pte_pages(void) | |
667 | { | |
668 | unsigned int i; | |
669 | ||
670 | for_each_possible_cpu(i) | |
671 | free_page((long)switcher_pte_page(i)); | |
672 | } | |
673 | ||
674 | /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given | |
675 | * the CPU number and the "struct page"s for the Switcher code itself. | |
676 | * | |
677 | * Currently the Switcher is less than a page long, so "pages" is always 1. */ | |
678 | static __init void populate_switcher_pte_page(unsigned int cpu, | |
679 | struct page *switcher_page[], | |
680 | unsigned int pages) | |
681 | { | |
682 | unsigned int i; | |
683 | pte_t *pte = switcher_pte_page(cpu); | |
684 | ||
685 | /* The first entries are easy: they map the Switcher code. */ | |
686 | for (i = 0; i < pages; i++) { | |
687 | pte[i] = mk_pte(switcher_page[i], | |
688 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | |
689 | } | |
690 | ||
691 | /* The only other thing we map is this CPU's pair of pages. */ | |
692 | i = pages + cpu*2; | |
693 | ||
694 | /* First page (Guest registers) is writable from the Guest */ | |
695 | pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), | |
696 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); | |
697 | ||
698 | /* The second page contains the "struct lguest_ro_state", and is | |
699 | * read-only. */ | |
700 | pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), | |
701 | __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | |
702 | } | |
703 | ||
704 | /* We've made it through the page table code. Perhaps our tired brains are | |
705 | * still processing the details, or perhaps we're simply glad it's over. | |
706 | * | |
707 | * If nothing else, note that all this complexity in juggling shadow page | |
708 | * tables in sync with the Guest's page tables is for one reason: for most | |
709 | * Guests this page table dance determines how bad performance will be. This | |
710 | * is why Xen uses exotic direct Guest pagetable manipulation, and why both | |
711 | * Intel and AMD have implemented shadow page table support directly into | |
712 | * hardware. | |
713 | * | |
714 | * There is just one file remaining in the Host. */ | |
715 | ||
716 | /*H:510 At boot or module load time, init_pagetables() allocates and populates | |
717 | * the Switcher PTE page for each CPU. */ | |
718 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) | |
719 | { | |
720 | unsigned int i; | |
721 | ||
722 | for_each_possible_cpu(i) { | |
723 | switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL); | |
724 | if (!switcher_pte_page(i)) { | |
725 | free_switcher_pte_pages(); | |
726 | return -ENOMEM; | |
727 | } | |
728 | populate_switcher_pte_page(i, switcher_page, pages); | |
729 | } | |
730 | return 0; | |
731 | } | |
732 | /*:*/ | |
733 | ||
734 | /* Cleaning up simply involves freeing the PTE page for each CPU. */ | |
735 | void free_pagetables(void) | |
736 | { | |
737 | free_switcher_pte_pages(); | |
738 | } |