]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/lguest/boot.c
x86/paravirt: flush pending mmu updates on context switch
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / lguest / boot.c
1 /*P:010
2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
5 *
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
11 * Launcher.
12 *
13 * Secondly, we only run specially modified Guests, not normal kernels: setting
14 * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
15 * how to be a Guest at boot time. This means that you can use the same kernel
16 * you boot normally (ie. as a Host) as a Guest.
17 *
18 * These Guests know that they cannot do privileged operations, such as disable
19 * interrupts, and that they have to ask the Host to do such things explicitly.
20 * This file consists of all the replacements for such low-level native
21 * hardware operations: these special Guest versions call the Host.
22 *
23 * So how does the kernel know it's a Guest? We'll see that later, but let's
24 * just say that we end up here where we replace the native functions various
25 * "paravirt" structures with our Guest versions, then boot like normal. :*/
26
27 /*
28 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
29 *
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License as published by
32 * the Free Software Foundation; either version 2 of the License, or
33 * (at your option) any later version.
34 *
35 * This program is distributed in the hope that it will be useful, but
36 * WITHOUT ANY WARRANTY; without even the implied warranty of
37 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
38 * NON INFRINGEMENT. See the GNU General Public License for more
39 * details.
40 *
41 * You should have received a copy of the GNU General Public License
42 * along with this program; if not, write to the Free Software
43 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
44 */
45 #include <linux/kernel.h>
46 #include <linux/start_kernel.h>
47 #include <linux/string.h>
48 #include <linux/console.h>
49 #include <linux/screen_info.h>
50 #include <linux/irq.h>
51 #include <linux/interrupt.h>
52 #include <linux/clocksource.h>
53 #include <linux/clockchips.h>
54 #include <linux/lguest.h>
55 #include <linux/lguest_launcher.h>
56 #include <linux/virtio_console.h>
57 #include <linux/pm.h>
58 #include <asm/apic.h>
59 #include <asm/lguest.h>
60 #include <asm/paravirt.h>
61 #include <asm/param.h>
62 #include <asm/page.h>
63 #include <asm/pgtable.h>
64 #include <asm/desc.h>
65 #include <asm/setup.h>
66 #include <asm/e820.h>
67 #include <asm/mce.h>
68 #include <asm/io.h>
69 #include <asm/i387.h>
70 #include <asm/reboot.h> /* for struct machine_ops */
71
72 /*G:010 Welcome to the Guest!
73 *
74 * The Guest in our tale is a simple creature: identical to the Host but
75 * behaving in simplified but equivalent ways. In particular, the Guest is the
76 * same kernel as the Host (or at least, built from the same source code). :*/
77
78 struct lguest_data lguest_data = {
79 .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
80 .noirq_start = (u32)lguest_noirq_start,
81 .noirq_end = (u32)lguest_noirq_end,
82 .kernel_address = PAGE_OFFSET,
83 .blocked_interrupts = { 1 }, /* Block timer interrupts */
84 .syscall_vec = SYSCALL_VECTOR,
85 };
86
87 /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
88 * ring buffer of stored hypercalls which the Host will run though next time we
89 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
90 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
91 * and 255 once the Host has finished with it.
92 *
93 * If we come around to a slot which hasn't been finished, then the table is
94 * full and we just make the hypercall directly. This has the nice side
95 * effect of causing the Host to run all the stored calls in the ring buffer
96 * which empties it for next time! */
97 static void async_hcall(unsigned long call, unsigned long arg1,
98 unsigned long arg2, unsigned long arg3)
99 {
100 /* Note: This code assumes we're uniprocessor. */
101 static unsigned int next_call;
102 unsigned long flags;
103
104 /* Disable interrupts if not already disabled: we don't want an
105 * interrupt handler making a hypercall while we're already doing
106 * one! */
107 local_irq_save(flags);
108 if (lguest_data.hcall_status[next_call] != 0xFF) {
109 /* Table full, so do normal hcall which will flush table. */
110 hcall(call, arg1, arg2, arg3);
111 } else {
112 lguest_data.hcalls[next_call].arg0 = call;
113 lguest_data.hcalls[next_call].arg1 = arg1;
114 lguest_data.hcalls[next_call].arg2 = arg2;
115 lguest_data.hcalls[next_call].arg3 = arg3;
116 /* Arguments must all be written before we mark it to go */
117 wmb();
118 lguest_data.hcall_status[next_call] = 0;
119 if (++next_call == LHCALL_RING_SIZE)
120 next_call = 0;
121 }
122 local_irq_restore(flags);
123 }
124
125 /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
126 * real optimization trick!
127 *
128 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
129 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
130 * are reasonably expensive, batching them up makes sense. For example, a
131 * large munmap might update dozens of page table entries: that code calls
132 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
133 * lguest_leave_lazy_mode().
134 *
135 * So, when we're in lazy mode, we call async_hcall() to store the call for
136 * future processing: */
137 static void lazy_hcall(unsigned long call,
138 unsigned long arg1,
139 unsigned long arg2,
140 unsigned long arg3)
141 {
142 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
143 hcall(call, arg1, arg2, arg3);
144 else
145 async_hcall(call, arg1, arg2, arg3);
146 }
147
148 /* When lazy mode is turned off reset the per-cpu lazy mode variable and then
149 * issue the do-nothing hypercall to flush any stored calls. */
150 static void lguest_leave_lazy_mmu_mode(void)
151 {
152 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
153 paravirt_leave_lazy_mmu();
154 }
155
156 static void lguest_leave_lazy_cpu_mode(void)
157 {
158 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
159 paravirt_leave_lazy_cpu();
160 }
161
162 /*G:033
163 * After that diversion we return to our first native-instruction
164 * replacements: four functions for interrupt control.
165 *
166 * The simplest way of implementing these would be to have "turn interrupts
167 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
168 * these are by far the most commonly called functions of those we override.
169 *
170 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
171 * which the Guest can update with a single instruction. The Host knows to
172 * check there before it tries to deliver an interrupt.
173 */
174
175 /* save_flags() is expected to return the processor state (ie. "flags"). The
176 * flags word contains all kind of stuff, but in practice Linux only cares
177 * about the interrupt flag. Our "save_flags()" just returns that. */
178 static unsigned long save_fl(void)
179 {
180 return lguest_data.irq_enabled;
181 }
182 PV_CALLEE_SAVE_REGS_THUNK(save_fl);
183
184 /* restore_flags() just sets the flags back to the value given. */
185 static void restore_fl(unsigned long flags)
186 {
187 lguest_data.irq_enabled = flags;
188 }
189 PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
190
191 /* Interrupts go off... */
192 static void irq_disable(void)
193 {
194 lguest_data.irq_enabled = 0;
195 }
196 PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
197
198 /* Interrupts go on... */
199 static void irq_enable(void)
200 {
201 lguest_data.irq_enabled = X86_EFLAGS_IF;
202 }
203 PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
204
205 /*:*/
206 /*M:003 Note that we don't check for outstanding interrupts when we re-enable
207 * them (or when we unmask an interrupt). This seems to work for the moment,
208 * since interrupts are rare and we'll just get the interrupt on the next timer
209 * tick, but now we can run with CONFIG_NO_HZ, we should revisit this. One way
210 * would be to put the "irq_enabled" field in a page by itself, and have the
211 * Host write-protect it when an interrupt comes in when irqs are disabled.
212 * There will then be a page fault as soon as interrupts are re-enabled.
213 *
214 * A better method is to implement soft interrupt disable generally for x86:
215 * instead of disabling interrupts, we set a flag. If an interrupt does come
216 * in, we then disable them for real. This is uncommon, so we could simply use
217 * a hypercall for interrupt control and not worry about efficiency. :*/
218
219 /*G:034
220 * The Interrupt Descriptor Table (IDT).
221 *
222 * The IDT tells the processor what to do when an interrupt comes in. Each
223 * entry in the table is a 64-bit descriptor: this holds the privilege level,
224 * address of the handler, and... well, who cares? The Guest just asks the
225 * Host to make the change anyway, because the Host controls the real IDT.
226 */
227 static void lguest_write_idt_entry(gate_desc *dt,
228 int entrynum, const gate_desc *g)
229 {
230 /* The gate_desc structure is 8 bytes long: we hand it to the Host in
231 * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors
232 * around like this; typesafety wasn't a big concern in Linux's early
233 * years. */
234 u32 *desc = (u32 *)g;
235 /* Keep the local copy up to date. */
236 native_write_idt_entry(dt, entrynum, g);
237 /* Tell Host about this new entry. */
238 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
239 }
240
241 /* Changing to a different IDT is very rare: we keep the IDT up-to-date every
242 * time it is written, so we can simply loop through all entries and tell the
243 * Host about them. */
244 static void lguest_load_idt(const struct desc_ptr *desc)
245 {
246 unsigned int i;
247 struct desc_struct *idt = (void *)desc->address;
248
249 for (i = 0; i < (desc->size+1)/8; i++)
250 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
251 }
252
253 /*
254 * The Global Descriptor Table.
255 *
256 * The Intel architecture defines another table, called the Global Descriptor
257 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
258 * instruction, and then several other instructions refer to entries in the
259 * table. There are three entries which the Switcher needs, so the Host simply
260 * controls the entire thing and the Guest asks it to make changes using the
261 * LOAD_GDT hypercall.
262 *
263 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
264 * hypercall and use that repeatedly to load a new IDT. I don't think it
265 * really matters, but wouldn't it be nice if they were the same? Wouldn't
266 * it be even better if you were the one to send the patch to fix it?
267 */
268 static void lguest_load_gdt(const struct desc_ptr *desc)
269 {
270 BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
271 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
272 }
273
274 /* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
275 * then tell the Host to reload the entire thing. This operation is so rare
276 * that this naive implementation is reasonable. */
277 static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
278 const void *desc, int type)
279 {
280 native_write_gdt_entry(dt, entrynum, desc, type);
281 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
282 }
283
284 /* OK, I lied. There are three "thread local storage" GDT entries which change
285 * on every context switch (these three entries are how glibc implements
286 * __thread variables). So we have a hypercall specifically for this case. */
287 static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
288 {
289 /* There's one problem which normal hardware doesn't have: the Host
290 * can't handle us removing entries we're currently using. So we clear
291 * the GS register here: if it's needed it'll be reloaded anyway. */
292 lazy_load_gs(0);
293 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
294 }
295
296 /*G:038 That's enough excitement for now, back to ploughing through each of
297 * the different pv_ops structures (we're about 1/3 of the way through).
298 *
299 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
300 * uses this for some strange applications like Wine. We don't do anything
301 * here, so they'll get an informative and friendly Segmentation Fault. */
302 static void lguest_set_ldt(const void *addr, unsigned entries)
303 {
304 }
305
306 /* This loads a GDT entry into the "Task Register": that entry points to a
307 * structure called the Task State Segment. Some comments scattered though the
308 * kernel code indicate that this used for task switching in ages past, along
309 * with blood sacrifice and astrology.
310 *
311 * Now there's nothing interesting in here that we don't get told elsewhere.
312 * But the native version uses the "ltr" instruction, which makes the Host
313 * complain to the Guest about a Segmentation Fault and it'll oops. So we
314 * override the native version with a do-nothing version. */
315 static void lguest_load_tr_desc(void)
316 {
317 }
318
319 /* The "cpuid" instruction is a way of querying both the CPU identity
320 * (manufacturer, model, etc) and its features. It was introduced before the
321 * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
322 * As you might imagine, after a decade and a half this treatment, it is now a
323 * giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
324 *
325 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
326 * has been translated into 4 languages. I am not making this up!
327 *
328 * We could get funky here and identify ourselves as "GenuineLguest", but
329 * instead we just use the real "cpuid" instruction. Then I pretty much turned
330 * off feature bits until the Guest booted. (Don't say that: you'll damage
331 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
332 * hardly future proof.) Noone's listening! They don't like you anyway,
333 * parenthetic weirdo!
334 *
335 * Replacing the cpuid so we can turn features off is great for the kernel, but
336 * anyone (including userspace) can just use the raw "cpuid" instruction and
337 * the Host won't even notice since it isn't privileged. So we try not to get
338 * too worked up about it. */
339 static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
340 unsigned int *cx, unsigned int *dx)
341 {
342 int function = *ax;
343
344 native_cpuid(ax, bx, cx, dx);
345 switch (function) {
346 case 1: /* Basic feature request. */
347 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
348 *cx &= 0x00002201;
349 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */
350 *dx &= 0x07808111;
351 /* The Host can do a nice optimization if it knows that the
352 * kernel mappings (addresses above 0xC0000000 or whatever
353 * PAGE_OFFSET is set to) haven't changed. But Linux calls
354 * flush_tlb_user() for both user and kernel mappings unless
355 * the Page Global Enable (PGE) feature bit is set. */
356 *dx |= 0x00002000;
357 /* We also lie, and say we're family id 5. 6 or greater
358 * leads to a rdmsr in early_init_intel which we can't handle.
359 * Family ID is returned as bits 8-12 in ax. */
360 *ax &= 0xFFFFF0FF;
361 *ax |= 0x00000500;
362 break;
363 case 0x80000000:
364 /* Futureproof this a little: if they ask how much extended
365 * processor information there is, limit it to known fields. */
366 if (*ax > 0x80000008)
367 *ax = 0x80000008;
368 break;
369 }
370 }
371
372 /* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
373 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
374 * it. The Host needs to know when the Guest wants to change them, so we have
375 * a whole series of functions like read_cr0() and write_cr0().
376 *
377 * We start with cr0. cr0 allows you to turn on and off all kinds of basic
378 * features, but Linux only really cares about one: the horrifically-named Task
379 * Switched (TS) bit at bit 3 (ie. 8)
380 *
381 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
382 * the floating point unit is used. Which allows us to restore FPU state
383 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
384 * name like "FPUTRAP bit" be a little less cryptic?
385 *
386 * We store cr0 locally because the Host never changes it. The Guest sometimes
387 * wants to read it and we'd prefer not to bother the Host unnecessarily. */
388 static unsigned long current_cr0;
389 static void lguest_write_cr0(unsigned long val)
390 {
391 lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0);
392 current_cr0 = val;
393 }
394
395 static unsigned long lguest_read_cr0(void)
396 {
397 return current_cr0;
398 }
399
400 /* Intel provided a special instruction to clear the TS bit for people too cool
401 * to use write_cr0() to do it. This "clts" instruction is faster, because all
402 * the vowels have been optimized out. */
403 static void lguest_clts(void)
404 {
405 lazy_hcall(LHCALL_TS, 0, 0, 0);
406 current_cr0 &= ~X86_CR0_TS;
407 }
408
409 /* cr2 is the virtual address of the last page fault, which the Guest only ever
410 * reads. The Host kindly writes this into our "struct lguest_data", so we
411 * just read it out of there. */
412 static unsigned long lguest_read_cr2(void)
413 {
414 return lguest_data.cr2;
415 }
416
417 /* See lguest_set_pte() below. */
418 static bool cr3_changed = false;
419
420 /* cr3 is the current toplevel pagetable page: the principle is the same as
421 * cr0. Keep a local copy, and tell the Host when it changes. The only
422 * difference is that our local copy is in lguest_data because the Host needs
423 * to set it upon our initial hypercall. */
424 static void lguest_write_cr3(unsigned long cr3)
425 {
426 lguest_data.pgdir = cr3;
427 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
428 cr3_changed = true;
429 }
430
431 static unsigned long lguest_read_cr3(void)
432 {
433 return lguest_data.pgdir;
434 }
435
436 /* cr4 is used to enable and disable PGE, but we don't care. */
437 static unsigned long lguest_read_cr4(void)
438 {
439 return 0;
440 }
441
442 static void lguest_write_cr4(unsigned long val)
443 {
444 }
445
446 /*
447 * Page Table Handling.
448 *
449 * Now would be a good time to take a rest and grab a coffee or similarly
450 * relaxing stimulant. The easy parts are behind us, and the trek gradually
451 * winds uphill from here.
452 *
453 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
454 * maps virtual addresses to physical addresses using "page tables". We could
455 * use one huge index of 1 million entries: each address is 4 bytes, so that's
456 * 1024 pages just to hold the page tables. But since most virtual addresses
457 * are unused, we use a two level index which saves space. The cr3 register
458 * contains the physical address of the top level "page directory" page, which
459 * contains physical addresses of up to 1024 second-level pages. Each of these
460 * second level pages contains up to 1024 physical addresses of actual pages,
461 * or Page Table Entries (PTEs).
462 *
463 * Here's a diagram, where arrows indicate physical addresses:
464 *
465 * cr3 ---> +---------+
466 * | --------->+---------+
467 * | | | PADDR1 |
468 * Top-level | | PADDR2 |
469 * (PMD) page | | |
470 * | | Lower-level |
471 * | | (PTE) page |
472 * | | | |
473 * .... ....
474 *
475 * So to convert a virtual address to a physical address, we look up the top
476 * level, which points us to the second level, which gives us the physical
477 * address of that page. If the top level entry was not present, or the second
478 * level entry was not present, then the virtual address is invalid (we
479 * say "the page was not mapped").
480 *
481 * Put another way, a 32-bit virtual address is divided up like so:
482 *
483 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
484 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
485 * Index into top Index into second Offset within page
486 * page directory page pagetable page
487 *
488 * The kernel spends a lot of time changing both the top-level page directory
489 * and lower-level pagetable pages. The Guest doesn't know physical addresses,
490 * so while it maintains these page tables exactly like normal, it also needs
491 * to keep the Host informed whenever it makes a change: the Host will create
492 * the real page tables based on the Guests'.
493 */
494
495 /* The Guest calls this to set a second-level entry (pte), ie. to map a page
496 * into a process' address space. We set the entry then tell the Host the
497 * toplevel and address this corresponds to. The Guest uses one pagetable per
498 * process, so we need to tell the Host which one we're changing (mm->pgd). */
499 static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
500 pte_t *ptep, pte_t pteval)
501 {
502 *ptep = pteval;
503 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
504 }
505
506 /* The Guest calls this to set a top-level entry. Again, we set the entry then
507 * tell the Host which top-level page we changed, and the index of the entry we
508 * changed. */
509 static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
510 {
511 *pmdp = pmdval;
512 lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
513 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
514 }
515
516 /* There are a couple of legacy places where the kernel sets a PTE, but we
517 * don't know the top level any more. This is useless for us, since we don't
518 * know which pagetable is changing or what address, so we just tell the Host
519 * to forget all of them. Fortunately, this is very rare.
520 *
521 * ... except in early boot when the kernel sets up the initial pagetables,
522 * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell
523 * the Host anything changed until we've done the first page table switch,
524 * which brings boot back to 0.25 seconds. */
525 static void lguest_set_pte(pte_t *ptep, pte_t pteval)
526 {
527 *ptep = pteval;
528 if (cr3_changed)
529 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
530 }
531
532 /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
533 * native page table operations. On native hardware you can set a new page
534 * table entry whenever you want, but if you want to remove one you have to do
535 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
536 *
537 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
538 * called when a valid entry is written, not when it's removed (ie. marked not
539 * present). Instead, this is where we come when the Guest wants to remove a
540 * page table entry: we tell the Host to set that entry to 0 (ie. the present
541 * bit is zero). */
542 static void lguest_flush_tlb_single(unsigned long addr)
543 {
544 /* Simply set it to zero: if it was not, it will fault back in. */
545 lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
546 }
547
548 /* This is what happens after the Guest has removed a large number of entries.
549 * This tells the Host that any of the page table entries for userspace might
550 * have changed, ie. virtual addresses below PAGE_OFFSET. */
551 static void lguest_flush_tlb_user(void)
552 {
553 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
554 }
555
556 /* This is called when the kernel page tables have changed. That's not very
557 * common (unless the Guest is using highmem, which makes the Guest extremely
558 * slow), so it's worth separating this from the user flushing above. */
559 static void lguest_flush_tlb_kernel(void)
560 {
561 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
562 }
563
564 /*
565 * The Unadvanced Programmable Interrupt Controller.
566 *
567 * This is an attempt to implement the simplest possible interrupt controller.
568 * I spent some time looking though routines like set_irq_chip_and_handler,
569 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
570 * I *think* this is as simple as it gets.
571 *
572 * We can tell the Host what interrupts we want blocked ready for using the
573 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
574 * simple as setting a bit. We don't actually "ack" interrupts as such, we
575 * just mask and unmask them. I wonder if we should be cleverer?
576 */
577 static void disable_lguest_irq(unsigned int irq)
578 {
579 set_bit(irq, lguest_data.blocked_interrupts);
580 }
581
582 static void enable_lguest_irq(unsigned int irq)
583 {
584 clear_bit(irq, lguest_data.blocked_interrupts);
585 }
586
587 /* This structure describes the lguest IRQ controller. */
588 static struct irq_chip lguest_irq_controller = {
589 .name = "lguest",
590 .mask = disable_lguest_irq,
591 .mask_ack = disable_lguest_irq,
592 .unmask = enable_lguest_irq,
593 };
594
595 /* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
596 * interrupt (except 128, which is used for system calls), and then tells the
597 * Linux infrastructure that each interrupt is controlled by our level-based
598 * lguest interrupt controller. */
599 static void __init lguest_init_IRQ(void)
600 {
601 unsigned int i;
602
603 for (i = 0; i < LGUEST_IRQS; i++) {
604 int vector = FIRST_EXTERNAL_VECTOR + i;
605 /* Some systems map "vectors" to interrupts weirdly. Lguest has
606 * a straightforward 1 to 1 mapping, so force that here. */
607 __get_cpu_var(vector_irq)[vector] = i;
608 if (vector != SYSCALL_VECTOR)
609 set_intr_gate(vector, interrupt[i]);
610 }
611 /* This call is required to set up for 4k stacks, where we have
612 * separate stacks for hard and soft interrupts. */
613 irq_ctx_init(smp_processor_id());
614 }
615
616 void lguest_setup_irq(unsigned int irq)
617 {
618 irq_to_desc_alloc_cpu(irq, 0);
619 set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
620 handle_level_irq, "level");
621 }
622
623 /*
624 * Time.
625 *
626 * It would be far better for everyone if the Guest had its own clock, but
627 * until then the Host gives us the time on every interrupt.
628 */
629 static unsigned long lguest_get_wallclock(void)
630 {
631 return lguest_data.time.tv_sec;
632 }
633
634 /* The TSC is an Intel thing called the Time Stamp Counter. The Host tells us
635 * what speed it runs at, or 0 if it's unusable as a reliable clock source.
636 * This matches what we want here: if we return 0 from this function, the x86
637 * TSC clock will give up and not register itself. */
638 static unsigned long lguest_tsc_khz(void)
639 {
640 return lguest_data.tsc_khz;
641 }
642
643 /* If we can't use the TSC, the kernel falls back to our lower-priority
644 * "lguest_clock", where we read the time value given to us by the Host. */
645 static cycle_t lguest_clock_read(void)
646 {
647 unsigned long sec, nsec;
648
649 /* Since the time is in two parts (seconds and nanoseconds), we risk
650 * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
651 * and getting 99 and 0. As Linux tends to come apart under the stress
652 * of time travel, we must be careful: */
653 do {
654 /* First we read the seconds part. */
655 sec = lguest_data.time.tv_sec;
656 /* This read memory barrier tells the compiler and the CPU that
657 * this can't be reordered: we have to complete the above
658 * before going on. */
659 rmb();
660 /* Now we read the nanoseconds part. */
661 nsec = lguest_data.time.tv_nsec;
662 /* Make sure we've done that. */
663 rmb();
664 /* Now if the seconds part has changed, try again. */
665 } while (unlikely(lguest_data.time.tv_sec != sec));
666
667 /* Our lguest clock is in real nanoseconds. */
668 return sec*1000000000ULL + nsec;
669 }
670
671 /* This is the fallback clocksource: lower priority than the TSC clocksource. */
672 static struct clocksource lguest_clock = {
673 .name = "lguest",
674 .rating = 200,
675 .read = lguest_clock_read,
676 .mask = CLOCKSOURCE_MASK(64),
677 .mult = 1 << 22,
678 .shift = 22,
679 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
680 };
681
682 /* We also need a "struct clock_event_device": Linux asks us to set it to go
683 * off some time in the future. Actually, James Morris figured all this out, I
684 * just applied the patch. */
685 static int lguest_clockevent_set_next_event(unsigned long delta,
686 struct clock_event_device *evt)
687 {
688 /* FIXME: I don't think this can ever happen, but James tells me he had
689 * to put this code in. Maybe we should remove it now. Anyone? */
690 if (delta < LG_CLOCK_MIN_DELTA) {
691 if (printk_ratelimit())
692 printk(KERN_DEBUG "%s: small delta %lu ns\n",
693 __func__, delta);
694 return -ETIME;
695 }
696
697 /* Please wake us this far in the future. */
698 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
699 return 0;
700 }
701
702 static void lguest_clockevent_set_mode(enum clock_event_mode mode,
703 struct clock_event_device *evt)
704 {
705 switch (mode) {
706 case CLOCK_EVT_MODE_UNUSED:
707 case CLOCK_EVT_MODE_SHUTDOWN:
708 /* A 0 argument shuts the clock down. */
709 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
710 break;
711 case CLOCK_EVT_MODE_ONESHOT:
712 /* This is what we expect. */
713 break;
714 case CLOCK_EVT_MODE_PERIODIC:
715 BUG();
716 case CLOCK_EVT_MODE_RESUME:
717 break;
718 }
719 }
720
721 /* This describes our primitive timer chip. */
722 static struct clock_event_device lguest_clockevent = {
723 .name = "lguest",
724 .features = CLOCK_EVT_FEAT_ONESHOT,
725 .set_next_event = lguest_clockevent_set_next_event,
726 .set_mode = lguest_clockevent_set_mode,
727 .rating = INT_MAX,
728 .mult = 1,
729 .shift = 0,
730 .min_delta_ns = LG_CLOCK_MIN_DELTA,
731 .max_delta_ns = LG_CLOCK_MAX_DELTA,
732 };
733
734 /* This is the Guest timer interrupt handler (hardware interrupt 0). We just
735 * call the clockevent infrastructure and it does whatever needs doing. */
736 static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
737 {
738 unsigned long flags;
739
740 /* Don't interrupt us while this is running. */
741 local_irq_save(flags);
742 lguest_clockevent.event_handler(&lguest_clockevent);
743 local_irq_restore(flags);
744 }
745
746 /* At some point in the boot process, we get asked to set up our timing
747 * infrastructure. The kernel doesn't expect timer interrupts before this, but
748 * we cleverly initialized the "blocked_interrupts" field of "struct
749 * lguest_data" so that timer interrupts were blocked until now. */
750 static void lguest_time_init(void)
751 {
752 /* Set up the timer interrupt (0) to go to our simple timer routine */
753 set_irq_handler(0, lguest_time_irq);
754
755 clocksource_register(&lguest_clock);
756
757 /* We can't set cpumask in the initializer: damn C limitations! Set it
758 * here and register our timer device. */
759 lguest_clockevent.cpumask = cpumask_of(0);
760 clockevents_register_device(&lguest_clockevent);
761
762 /* Finally, we unblock the timer interrupt. */
763 enable_lguest_irq(0);
764 }
765
766 /*
767 * Miscellaneous bits and pieces.
768 *
769 * Here is an oddball collection of functions which the Guest needs for things
770 * to work. They're pretty simple.
771 */
772
773 /* The Guest needs to tell the Host what stack it expects traps to use. For
774 * native hardware, this is part of the Task State Segment mentioned above in
775 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
776 *
777 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
778 * segment), the privilege level (we're privilege level 1, the Host is 0 and
779 * will not tolerate us trying to use that), the stack pointer, and the number
780 * of pages in the stack. */
781 static void lguest_load_sp0(struct tss_struct *tss,
782 struct thread_struct *thread)
783 {
784 lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0,
785 THREAD_SIZE/PAGE_SIZE);
786 }
787
788 /* Let's just say, I wouldn't do debugging under a Guest. */
789 static void lguest_set_debugreg(int regno, unsigned long value)
790 {
791 /* FIXME: Implement */
792 }
793
794 /* There are times when the kernel wants to make sure that no memory writes are
795 * caught in the cache (that they've all reached real hardware devices). This
796 * doesn't matter for the Guest which has virtual hardware.
797 *
798 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
799 * (clflush) instruction is available and the kernel uses that. Otherwise, it
800 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
801 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
802 * ignore clflush, but replace wbinvd.
803 */
804 static void lguest_wbinvd(void)
805 {
806 }
807
808 /* If the Guest expects to have an Advanced Programmable Interrupt Controller,
809 * we play dumb by ignoring writes and returning 0 for reads. So it's no
810 * longer Programmable nor Controlling anything, and I don't think 8 lines of
811 * code qualifies for Advanced. It will also never interrupt anything. It
812 * does, however, allow us to get through the Linux boot code. */
813 #ifdef CONFIG_X86_LOCAL_APIC
814 static void lguest_apic_write(u32 reg, u32 v)
815 {
816 }
817
818 static u32 lguest_apic_read(u32 reg)
819 {
820 return 0;
821 }
822
823 static u64 lguest_apic_icr_read(void)
824 {
825 return 0;
826 }
827
828 static void lguest_apic_icr_write(u32 low, u32 id)
829 {
830 /* Warn to see if there's any stray references */
831 WARN_ON(1);
832 }
833
834 static void lguest_apic_wait_icr_idle(void)
835 {
836 return;
837 }
838
839 static u32 lguest_apic_safe_wait_icr_idle(void)
840 {
841 return 0;
842 }
843
844 static void set_lguest_basic_apic_ops(void)
845 {
846 apic->read = lguest_apic_read;
847 apic->write = lguest_apic_write;
848 apic->icr_read = lguest_apic_icr_read;
849 apic->icr_write = lguest_apic_icr_write;
850 apic->wait_icr_idle = lguest_apic_wait_icr_idle;
851 apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
852 };
853 #endif
854
855 /* STOP! Until an interrupt comes in. */
856 static void lguest_safe_halt(void)
857 {
858 hcall(LHCALL_HALT, 0, 0, 0);
859 }
860
861 /* The SHUTDOWN hypercall takes a string to describe what's happening, and
862 * an argument which says whether this to restart (reboot) the Guest or not.
863 *
864 * Note that the Host always prefers that the Guest speak in physical addresses
865 * rather than virtual addresses, so we use __pa() here. */
866 static void lguest_power_off(void)
867 {
868 hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0);
869 }
870
871 /*
872 * Panicing.
873 *
874 * Don't. But if you did, this is what happens.
875 */
876 static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
877 {
878 hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0);
879 /* The hcall won't return, but to keep gcc happy, we're "done". */
880 return NOTIFY_DONE;
881 }
882
883 static struct notifier_block paniced = {
884 .notifier_call = lguest_panic
885 };
886
887 /* Setting up memory is fairly easy. */
888 static __init char *lguest_memory_setup(void)
889 {
890 /* We do this here and not earlier because lockcheck used to barf if we
891 * did it before start_kernel(). I think we fixed that, so it'd be
892 * nice to move it back to lguest_init. Patch welcome... */
893 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
894
895 /* The Linux bootloader header contains an "e820" memory map: the
896 * Launcher populated the first entry with our memory limit. */
897 e820_add_region(boot_params.e820_map[0].addr,
898 boot_params.e820_map[0].size,
899 boot_params.e820_map[0].type);
900
901 /* This string is for the boot messages. */
902 return "LGUEST";
903 }
904
905 /* We will eventually use the virtio console device to produce console output,
906 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
907 * console output. */
908 static __init int early_put_chars(u32 vtermno, const char *buf, int count)
909 {
910 char scratch[17];
911 unsigned int len = count;
912
913 /* We use a nul-terminated string, so we have to make a copy. Icky,
914 * huh? */
915 if (len > sizeof(scratch) - 1)
916 len = sizeof(scratch) - 1;
917 scratch[len] = '\0';
918 memcpy(scratch, buf, len);
919 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0);
920
921 /* This routine returns the number of bytes actually written. */
922 return len;
923 }
924
925 /* Rebooting also tells the Host we're finished, but the RESTART flag tells the
926 * Launcher to reboot us. */
927 static void lguest_restart(char *reason)
928 {
929 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0);
930 }
931
932 /*G:050
933 * Patching (Powerfully Placating Performance Pedants)
934 *
935 * We have already seen that pv_ops structures let us replace simple native
936 * instructions with calls to the appropriate back end all throughout the
937 * kernel. This allows the same kernel to run as a Guest and as a native
938 * kernel, but it's slow because of all the indirect branches.
939 *
940 * Remember that David Wheeler quote about "Any problem in computer science can
941 * be solved with another layer of indirection"? The rest of that quote is
942 * "... But that usually will create another problem." This is the first of
943 * those problems.
944 *
945 * Our current solution is to allow the paravirt back end to optionally patch
946 * over the indirect calls to replace them with something more efficient. We
947 * patch the four most commonly called functions: disable interrupts, enable
948 * interrupts, restore interrupts and save interrupts. We usually have 6 or 10
949 * bytes to patch into: the Guest versions of these operations are small enough
950 * that we can fit comfortably.
951 *
952 * First we need assembly templates of each of the patchable Guest operations,
953 * and these are in i386_head.S. */
954
955 /*G:060 We construct a table from the assembler templates: */
956 static const struct lguest_insns
957 {
958 const char *start, *end;
959 } lguest_insns[] = {
960 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
961 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
962 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
963 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
964 };
965
966 /* Now our patch routine is fairly simple (based on the native one in
967 * paravirt.c). If we have a replacement, we copy it in and return how much of
968 * the available space we used. */
969 static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
970 unsigned long addr, unsigned len)
971 {
972 unsigned int insn_len;
973
974 /* Don't do anything special if we don't have a replacement */
975 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
976 return paravirt_patch_default(type, clobber, ibuf, addr, len);
977
978 insn_len = lguest_insns[type].end - lguest_insns[type].start;
979
980 /* Similarly if we can't fit replacement (shouldn't happen, but let's
981 * be thorough). */
982 if (len < insn_len)
983 return paravirt_patch_default(type, clobber, ibuf, addr, len);
984
985 /* Copy in our instructions. */
986 memcpy(ibuf, lguest_insns[type].start, insn_len);
987 return insn_len;
988 }
989
990 /*G:030 Once we get to lguest_init(), we know we're a Guest. The various
991 * pv_ops structures in the kernel provide points for (almost) every routine we
992 * have to override to avoid privileged instructions. */
993 __init void lguest_init(void)
994 {
995 /* We're under lguest, paravirt is enabled, and we're running at
996 * privilege level 1, not 0 as normal. */
997 pv_info.name = "lguest";
998 pv_info.paravirt_enabled = 1;
999 pv_info.kernel_rpl = 1;
1000
1001 /* We set up all the lguest overrides for sensitive operations. These
1002 * are detailed with the operations themselves. */
1003
1004 /* interrupt-related operations */
1005 pv_irq_ops.init_IRQ = lguest_init_IRQ;
1006 pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
1007 pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
1008 pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
1009 pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
1010 pv_irq_ops.safe_halt = lguest_safe_halt;
1011
1012 /* init-time operations */
1013 pv_init_ops.memory_setup = lguest_memory_setup;
1014 pv_init_ops.patch = lguest_patch;
1015
1016 /* Intercepts of various cpu instructions */
1017 pv_cpu_ops.load_gdt = lguest_load_gdt;
1018 pv_cpu_ops.cpuid = lguest_cpuid;
1019 pv_cpu_ops.load_idt = lguest_load_idt;
1020 pv_cpu_ops.iret = lguest_iret;
1021 pv_cpu_ops.load_sp0 = lguest_load_sp0;
1022 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
1023 pv_cpu_ops.set_ldt = lguest_set_ldt;
1024 pv_cpu_ops.load_tls = lguest_load_tls;
1025 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
1026 pv_cpu_ops.clts = lguest_clts;
1027 pv_cpu_ops.read_cr0 = lguest_read_cr0;
1028 pv_cpu_ops.write_cr0 = lguest_write_cr0;
1029 pv_cpu_ops.read_cr4 = lguest_read_cr4;
1030 pv_cpu_ops.write_cr4 = lguest_write_cr4;
1031 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1032 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1033 pv_cpu_ops.wbinvd = lguest_wbinvd;
1034 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1035 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_cpu_mode;
1036
1037 /* pagetable management */
1038 pv_mmu_ops.write_cr3 = lguest_write_cr3;
1039 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
1040 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
1041 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
1042 pv_mmu_ops.set_pte = lguest_set_pte;
1043 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1044 pv_mmu_ops.set_pmd = lguest_set_pmd;
1045 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1046 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1047 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1048 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
1049
1050 #ifdef CONFIG_X86_LOCAL_APIC
1051 /* apic read/write intercepts */
1052 set_lguest_basic_apic_ops();
1053 #endif
1054
1055 /* time operations */
1056 pv_time_ops.get_wallclock = lguest_get_wallclock;
1057 pv_time_ops.time_init = lguest_time_init;
1058 pv_time_ops.get_tsc_khz = lguest_tsc_khz;
1059
1060 /* Now is a good time to look at the implementations of these functions
1061 * before returning to the rest of lguest_init(). */
1062
1063 /*G:070 Now we've seen all the paravirt_ops, we return to
1064 * lguest_init() where the rest of the fairly chaotic boot setup
1065 * occurs. */
1066
1067 /* The native boot code sets up initial page tables immediately after
1068 * the kernel itself, and sets init_pg_tables_end so they're not
1069 * clobbered. The Launcher places our initial pagetables somewhere at
1070 * the top of our physical memory, so we don't need extra space: set
1071 * init_pg_tables_end to the end of the kernel. */
1072 init_pg_tables_start = __pa(pg0);
1073 init_pg_tables_end = __pa(pg0);
1074
1075 /* As described in head_32.S, we map the first 128M of memory. */
1076 max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
1077
1078 /* Load the %fs segment register (the per-cpu segment register) with
1079 * the normal data segment to get through booting. */
1080 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1081
1082 /* The Host<->Guest Switcher lives at the top of our address space, and
1083 * the Host told us how big it is when we made LGUEST_INIT hypercall:
1084 * it put the answer in lguest_data.reserve_mem */
1085 reserve_top_address(lguest_data.reserve_mem);
1086
1087 /* If we don't initialize the lock dependency checker now, it crashes
1088 * paravirt_disable_iospace. */
1089 lockdep_init();
1090
1091 /* The IDE code spends about 3 seconds probing for disks: if we reserve
1092 * all the I/O ports up front it can't get them and so doesn't probe.
1093 * Other device drivers are similar (but less severe). This cuts the
1094 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
1095 paravirt_disable_iospace();
1096
1097 /* This is messy CPU setup stuff which the native boot code does before
1098 * start_kernel, so we have to do, too: */
1099 cpu_detect(&new_cpu_data);
1100 /* head.S usually sets up the first capability word, so do it here. */
1101 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1102
1103 /* Math is always hard! */
1104 new_cpu_data.hard_math = 1;
1105
1106 /* We don't have features. We have puppies! Puppies! */
1107 #ifdef CONFIG_X86_MCE
1108 mce_disabled = 1;
1109 #endif
1110 #ifdef CONFIG_ACPI
1111 acpi_disabled = 1;
1112 acpi_ht = 0;
1113 #endif
1114
1115 /* We set the preferred console to "hvc". This is the "hypervisor
1116 * virtual console" driver written by the PowerPC people, which we also
1117 * adapted for lguest's use. */
1118 add_preferred_console("hvc", 0, NULL);
1119
1120 /* Register our very early console. */
1121 virtio_cons_early_init(early_put_chars);
1122
1123 /* Last of all, we set the power management poweroff hook to point to
1124 * the Guest routine to power off, and the reboot hook to our restart
1125 * routine. */
1126 pm_power_off = lguest_power_off;
1127 machine_ops.restart = lguest_restart;
1128
1129 /* Now we're set up, call i386_start_kernel() in head32.c and we proceed
1130 * to boot as normal. It never returns. */
1131 i386_start_kernel();
1132 }
1133 /*
1134 * This marks the end of stage II of our journey, The Guest.
1135 *
1136 * It is now time for us to explore the layer of virtual drivers and complete
1137 * our understanding of the Guest in "make Drivers".
1138 */