]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/paravirt.h
MN10300: Kill MN10300's own profiling Kconfig
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
54321d94 7#include <asm/pgtable_types.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
139ec7c4 10/* Bitmask of what can be clobbered: usually at least eax. */
21438f7c
GOC
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
9104a18d 15#define CLBR_EDI (1 << 3)
21438f7c 16
9104a18d
JF
17#ifdef CONFIG_X86_32
18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
19#define CLBR_ANY ((1 << 4) - 1)
ecb93d1c
JF
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
da5de7c2 22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
ecb93d1c 23#define CLBR_SCRATCH (0)
9104a18d
JF
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
21438f7c
GOC
30#define CLBR_R8 (1 << 5)
31#define CLBR_R9 (1 << 6)
32#define CLBR_R10 (1 << 7)
33#define CLBR_R11 (1 << 8)
ecb93d1c 34
21438f7c 35#define CLBR_ANY ((1 << 9) - 1)
9104a18d
JF
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
ecb93d1c 39#define CLBR_RET_REG (CLBR_RAX)
9104a18d
JF
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
21438f7c 42#include <asm/desc_defs.h>
21438f7c 43#endif /* X86_64 */
139ec7c4 44
ecb93d1c
JF
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
d3561b7f 47#ifndef __ASSEMBLY__
3dc494e8 48#include <linux/types.h>
d4c10477 49#include <linux/cpumask.h>
ce6234b5 50#include <asm/kmap_types.h>
8d947344 51#include <asm/desc_defs.h>
3dc494e8 52
ce6234b5 53struct page;
d3561b7f 54struct thread_struct;
6b68f01b 55struct desc_ptr;
d3561b7f 56struct tss_struct;
da181a8b 57struct mm_struct;
90a0a06a 58struct desc_struct;
294688c0 59
ecb93d1c
JF
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
93b1eab3
JF
68/* general info */
69struct pv_info {
d3561b7f 70 unsigned int kernel_rpl;
5311ab62 71 int shared_kernel_pmd;
93b1eab3 72 int paravirt_enabled;
d3561b7f 73 const char *name;
93b1eab3 74};
d3561b7f 75
93b1eab3 76struct pv_init_ops {
139ec7c4 77 /*
93b1eab3
JF
78 * Patch may replace one of the defined code sequences with
79 * arbitrary code, subject to the same register constraints.
80 * This generally means the code is not free to clobber any
81 * registers other than EAX. The patch function should return
82 * the number of bytes of code generated, as we nop pad the
83 * rest in generic code.
139ec7c4 84 */
ab144f5e
AK
85 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
86 unsigned long addr, unsigned len);
139ec7c4 87
294688c0 88 /* Basic arch-specific setup */
d3561b7f
RR
89 void (*arch_setup)(void);
90 char *(*memory_setup)(void);
6996d3b6
JF
91 void (*post_allocator_init)(void);
92
294688c0 93 /* Print a banner to identify the environment */
d3561b7f 94 void (*banner)(void);
93b1eab3
JF
95};
96
97
8965c1c0 98struct pv_lazy_ops {
93b1eab3 99 /* Set deferred update mode, used for batching operations. */
8965c1c0
JF
100 void (*enter)(void);
101 void (*leave)(void);
93b1eab3
JF
102};
103
104struct pv_time_ops {
105 void (*time_init)(void);
d3561b7f 106
294688c0 107 /* Set and set time of day */
d3561b7f
RR
108 unsigned long (*get_wallclock)(void);
109 int (*set_wallclock)(unsigned long);
d3561b7f 110
93b1eab3 111 unsigned long long (*sched_clock)(void);
e93ef949 112 unsigned long (*get_tsc_khz)(void);
93b1eab3 113};
d3561b7f 114
93b1eab3 115struct pv_cpu_ops {
294688c0 116 /* hooks for various privileged instructions */
1a1eecd1
AK
117 unsigned long (*get_debugreg)(int regno);
118 void (*set_debugreg)(int regno, unsigned long value);
d3561b7f 119
1a1eecd1 120 void (*clts)(void);
d3561b7f 121
1a1eecd1
AK
122 unsigned long (*read_cr0)(void);
123 void (*write_cr0)(unsigned long);
d3561b7f 124
1a1eecd1
AK
125 unsigned long (*read_cr4_safe)(void);
126 unsigned long (*read_cr4)(void);
127 void (*write_cr4)(unsigned long);
d3561b7f 128
4c9890c2
GOC
129#ifdef CONFIG_X86_64
130 unsigned long (*read_cr8)(void);
131 void (*write_cr8)(unsigned long);
132#endif
133
294688c0 134 /* Segment descriptor handling */
1a1eecd1 135 void (*load_tr_desc)(void);
6b68f01b
GOC
136 void (*load_gdt)(const struct desc_ptr *);
137 void (*load_idt)(const struct desc_ptr *);
138 void (*store_gdt)(struct desc_ptr *);
139 void (*store_idt)(struct desc_ptr *);
1a1eecd1
AK
140 void (*set_ldt)(const void *desc, unsigned entries);
141 unsigned long (*store_tr)(void);
142 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
9f9d489a
JF
143#ifdef CONFIG_X86_64
144 void (*load_gs_index)(unsigned int idx);
145#endif
75b8bb3e
GOC
146 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
147 const void *desc);
90a0a06a 148 void (*write_gdt_entry)(struct desc_struct *,
014b15be 149 int entrynum, const void *desc, int size);
8d947344
GOC
150 void (*write_idt_entry)(gate_desc *,
151 int entrynum, const gate_desc *gate);
38ffbe66
JF
152 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
153 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
154
faca6227 155 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
d3561b7f 156
1a1eecd1 157 void (*set_iopl_mask)(unsigned mask);
93b1eab3
JF
158
159 void (*wbinvd)(void);
1a1eecd1 160 void (*io_delay)(void);
d3561b7f 161
93b1eab3
JF
162 /* cpuid emulation, mostly so that caps bits can be disabled */
163 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
164 unsigned int *ecx, unsigned int *edx);
165
166 /* MSR, PMC and TSR operations.
167 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
b05f78f5 168 u64 (*read_msr_amd)(unsigned int msr, int *err);
93b1eab3 169 u64 (*read_msr)(unsigned int msr, int *err);
c9dcda5c 170 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
93b1eab3
JF
171
172 u64 (*read_tsc)(void);
b8d1fae7 173 u64 (*read_pmc)(int counter);
e5aaac44 174 unsigned long long (*read_tscp)(unsigned int *aux);
93b1eab3 175
2be29982
JF
176 /*
177 * Atomically enable interrupts and return to userspace. This
178 * is only ever used to return to 32-bit processes; in a
179 * 64-bit kernel, it's used for 32-on-64 compat processes, but
180 * never native 64-bit processes. (Jump, not call.)
181 */
d75cd22f 182 void (*irq_enable_sysexit)(void);
2be29982
JF
183
184 /*
185 * Switch to usermode gs and return to 64-bit usermode using
186 * sysret. Only used in 64-bit kernels to return to 64-bit
187 * processes. Usermode register state, including %rsp, must
188 * already be restored.
189 */
190 void (*usergs_sysret64)(void);
191
192 /*
193 * Switch to usermode gs and return to 32-bit usermode using
194 * sysret. Used to return to 32-on-64 compat processes.
195 * Other usermode register state, including %esp, must already
196 * be restored.
197 */
198 void (*usergs_sysret32)(void);
199
200 /* Normal iret. Jump to this with the standard iret stack
201 frame set up. */
93b1eab3 202 void (*iret)(void);
8965c1c0 203
e801f864
GOC
204 void (*swapgs)(void);
205
8965c1c0 206 struct pv_lazy_ops lazy_mode;
93b1eab3
JF
207};
208
209struct pv_irq_ops {
210 void (*init_IRQ)(void);
211
294688c0 212 /*
93b1eab3
JF
213 * Get/set interrupt state. save_fl and restore_fl are only
214 * expected to use X86_EFLAGS_IF; all other bits
215 * returned from save_fl are undefined, and may be ignored by
216 * restore_fl.
ecb93d1c
JF
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
294688c0 220 */
ecb93d1c
JF
221 struct paravirt_callee_save save_fl;
222 struct paravirt_callee_save restore_fl;
223 struct paravirt_callee_save irq_disable;
224 struct paravirt_callee_save irq_enable;
225
93b1eab3
JF
226 void (*safe_halt)(void);
227 void (*halt)(void);
fab58420
JF
228
229#ifdef CONFIG_X86_64
230 void (*adjust_exception_frame)(void);
231#endif
93b1eab3 232};
d6dd61c8 233
93b1eab3 234struct pv_apic_ops {
13623d79 235#ifdef CONFIG_X86_LOCAL_APIC
bbab4f3b
ZA
236 void (*setup_boot_clock)(void);
237 void (*setup_secondary_clock)(void);
294688c0
JF
238
239 void (*startup_ipi_hook)(int phys_apicid,
240 unsigned long start_eip,
241 unsigned long start_esp);
13623d79 242#endif
93b1eab3
JF
243};
244
245struct pv_mmu_ops {
246 /*
247 * Called before/after init_mm pagetable setup. setup_start
248 * may reset %cr3, and may pre-install parts of the pagetable;
249 * pagetable setup is expected to preserve any existing
250 * mapping.
251 */
252 void (*pagetable_setup_start)(pgd_t *pgd_base);
253 void (*pagetable_setup_done)(pgd_t *pgd_base);
254
255 unsigned long (*read_cr2)(void);
256 void (*write_cr2)(unsigned long);
257
258 unsigned long (*read_cr3)(void);
259 void (*write_cr3)(unsigned long);
260
261 /*
262 * Hooks for intercepting the creation/use/destruction of an
263 * mm_struct.
264 */
265 void (*activate_mm)(struct mm_struct *prev,
266 struct mm_struct *next);
267 void (*dup_mmap)(struct mm_struct *oldmm,
268 struct mm_struct *mm);
269 void (*exit_mmap)(struct mm_struct *mm);
270
13623d79 271
294688c0 272 /* TLB operations */
1a1eecd1
AK
273 void (*flush_tlb_user)(void);
274 void (*flush_tlb_kernel)(void);
f8822f42 275 void (*flush_tlb_single)(unsigned long addr);
4595f962
RR
276 void (*flush_tlb_others)(const struct cpumask *cpus,
277 struct mm_struct *mm,
d4c10477 278 unsigned long va);
1a1eecd1 279
eba0045f
JF
280 /* Hooks for allocating and freeing a pagetable top-level */
281 int (*pgd_alloc)(struct mm_struct *mm);
282 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
283
284 /*
285 * Hooks for allocating/releasing pagetable pages when they're
286 * attached to a pagetable
287 */
f8639939
EH
288 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
289 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
290 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
291 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
292 void (*release_pte)(unsigned long pfn);
293 void (*release_pmd)(unsigned long pfn);
294 void (*release_pud)(unsigned long pfn);
1a1eecd1 295
294688c0 296 /* Pagetable manipulation functions */
1a1eecd1 297 void (*set_pte)(pte_t *ptep, pte_t pteval);
294688c0
JF
298 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
299 pte_t *ptep, pte_t pteval);
1a1eecd1 300 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
49cd740b
JP
301 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
302 pte_t *ptep);
294688c0
JF
303 void (*pte_update_defer)(struct mm_struct *mm,
304 unsigned long addr, pte_t *ptep);
3dc494e8 305
08b882c6
JF
306 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
307 pte_t *ptep);
308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
309 pte_t *ptep, pte_t pte);
310
da5de7c2
JF
311 struct paravirt_callee_save pte_val;
312 struct paravirt_callee_save make_pte;
5b8dd1e9 313
da5de7c2
JF
314 struct paravirt_callee_save pgd_val;
315 struct paravirt_callee_save make_pgd;
5b8dd1e9
JF
316
317#if PAGETABLE_LEVELS >= 3
da181a8b 318#ifdef CONFIG_X86_PAE
1a1eecd1 319 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
49cd740b
JP
320 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
321 pte_t *ptep);
1a1eecd1 322 void (*pmd_clear)(pmd_t *pmdp);
3dc494e8 323
5b8dd1e9 324#endif /* CONFIG_X86_PAE */
3dc494e8 325
5b8dd1e9 326 void (*set_pud)(pud_t *pudp, pud_t pudval);
3dc494e8 327
da5de7c2
JF
328 struct paravirt_callee_save pmd_val;
329 struct paravirt_callee_save make_pmd;
5b8dd1e9
JF
330
331#if PAGETABLE_LEVELS == 4
da5de7c2
JF
332 struct paravirt_callee_save pud_val;
333 struct paravirt_callee_save make_pud;
9042219c
EH
334
335 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
5b8dd1e9
JF
336#endif /* PAGETABLE_LEVELS == 4 */
337#endif /* PAGETABLE_LEVELS >= 3 */
da181a8b 338
93b1eab3
JF
339#ifdef CONFIG_HIGHPTE
340 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
341#endif
8965c1c0
JF
342
343 struct pv_lazy_ops lazy_mode;
aeaaa59c
JF
344
345 /* dom0 ops */
346
347 /* Sometimes the physical address is a pfn, and sometimes its
348 an mfn. We can tell which is which from the index. */
349 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
350 unsigned long phys, pgprot_t flags);
93b1eab3 351};
9226d125 352
74d4affd
JF
353struct raw_spinlock;
354struct pv_lock_ops {
355 int (*spin_is_locked)(struct raw_spinlock *lock);
356 int (*spin_is_contended)(struct raw_spinlock *lock);
357 void (*spin_lock)(struct raw_spinlock *lock);
63d3a75d 358 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
74d4affd
JF
359 int (*spin_trylock)(struct raw_spinlock *lock);
360 void (*spin_unlock)(struct raw_spinlock *lock);
361};
362
93b1eab3
JF
363/* This contains all the paravirt structures: we get a convenient
364 * number for each function using the offset which we use to indicate
365 * what to patch. */
49cd740b 366struct paravirt_patch_template {
93b1eab3 367 struct pv_init_ops pv_init_ops;
93b1eab3
JF
368 struct pv_time_ops pv_time_ops;
369 struct pv_cpu_ops pv_cpu_ops;
370 struct pv_irq_ops pv_irq_ops;
371 struct pv_apic_ops pv_apic_ops;
372 struct pv_mmu_ops pv_mmu_ops;
74d4affd 373 struct pv_lock_ops pv_lock_ops;
d3561b7f
RR
374};
375
93b1eab3
JF
376extern struct pv_info pv_info;
377extern struct pv_init_ops pv_init_ops;
93b1eab3
JF
378extern struct pv_time_ops pv_time_ops;
379extern struct pv_cpu_ops pv_cpu_ops;
380extern struct pv_irq_ops pv_irq_ops;
381extern struct pv_apic_ops pv_apic_ops;
382extern struct pv_mmu_ops pv_mmu_ops;
74d4affd 383extern struct pv_lock_ops pv_lock_ops;
d3561b7f 384
d5822035 385#define PARAVIRT_PATCH(x) \
93b1eab3 386 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
d5822035 387
93b1eab3
JF
388#define paravirt_type(op) \
389 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
42854dc0 390 [paravirt_opptr] "i" (&(op))
d5822035
JF
391#define paravirt_clobber(clobber) \
392 [paravirt_clobber] "i" (clobber)
393
294688c0
JF
394/*
395 * Generate some code, and mark it as patchable by the
396 * apply_paravirt() alternate instruction patcher.
397 */
d5822035
JF
398#define _paravirt_alt(insn_string, type, clobber) \
399 "771:\n\t" insn_string "\n" "772:\n" \
400 ".pushsection .parainstructions,\"a\"\n" \
658be9d3
GOC
401 _ASM_ALIGN "\n" \
402 _ASM_PTR " 771b\n" \
d5822035
JF
403 " .byte " type "\n" \
404 " .byte 772b-771b\n" \
405 " .short " clobber "\n" \
406 ".popsection\n"
407
294688c0 408/* Generate patchable code, with the default asm parameters. */
f8822f42 409#define paravirt_alt(insn_string) \
d5822035
JF
410 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
411
2f485ef5
GOC
412/* Simple instruction patching code. */
413#define DEF_NATIVE(ops, name, code) \
414 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
415 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
416
63f70270 417unsigned paravirt_patch_nop(void);
41edafdb
JF
418unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
419unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
63f70270 420unsigned paravirt_patch_ignore(unsigned len);
ab144f5e
AK
421unsigned paravirt_patch_call(void *insnbuf,
422 const void *target, u16 tgt_clobbers,
423 unsigned long addr, u16 site_clobbers,
63f70270 424 unsigned len);
93b1eab3 425unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
ab144f5e
AK
426 unsigned long addr, unsigned len);
427unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
428 unsigned long addr, unsigned len);
63f70270 429
ab144f5e 430unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
63f70270
JF
431 const char *start, const char *end);
432
2f485ef5
GOC
433unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
434 unsigned long addr, unsigned len);
435
d572929c 436int paravirt_disable_iospace(void);
63f70270 437
294688c0
JF
438/*
439 * This generates an indirect call based on the operation type number.
440 * The type number, computed in PARAVIRT_PATCH, is derived from the
93b1eab3
JF
441 * offset into the paravirt_patch_template structure, and can therefore be
442 * freely converted back into a structure offset.
294688c0 443 */
42854dc0 444#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
294688c0
JF
445
446/*
93b1eab3
JF
447 * These macros are intended to wrap calls through one of the paravirt
448 * ops structs, so that they can be later identified and patched at
294688c0
JF
449 * runtime.
450 *
451 * Normally, a call to a pv_op function is a simple indirect call:
a4746364 452 * (pv_op_struct.operations)(args...).
294688c0
JF
453 *
454 * Unfortunately, this is a relatively slow operation for modern CPUs,
455 * because it cannot necessarily determine what the destination
456 * address is. In this case, the address is a runtime constant, so at
457 * the very least we can patch the call to e a simple direct call, or
458 * ideally, patch an inline implementation into the callsite. (Direct
459 * calls are essentially free, because the call and return addresses
460 * are completely predictable.)
461 *
a4746364 462 * For i386, these macros rely on the standard gcc "regparm(3)" calling
294688c0
JF
463 * convention, in which the first three arguments are placed in %eax,
464 * %edx, %ecx (in that order), and the remaining arguments are placed
465 * on the stack. All caller-save registers (eax,edx,ecx) are expected
466 * to be modified (either clobbered or used for return values).
a4746364
GOC
467 * X86_64, on the other hand, already specifies a register-based calling
468 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
469 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
470 * special handling for dealing with 4 arguments, unlike i386.
471 * However, x86_64 also have to clobber all caller saved registers, which
472 * unfortunately, are quite a bit (r8 - r11)
294688c0
JF
473 *
474 * The call instruction itself is marked by placing its start address
475 * and size into the .parainstructions section, so that
476 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
93b1eab3 477 * appropriate patching under the control of the backend pv_init_ops
294688c0
JF
478 * implementation.
479 *
480 * Unfortunately there's no way to get gcc to generate the args setup
481 * for the call, and then allow the call itself to be generated by an
482 * inline asm. Because of this, we must do the complete arg setup and
483 * return value handling from within these macros. This is fairly
484 * cumbersome.
485 *
486 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
487 * It could be extended to more arguments, but there would be little
488 * to be gained from that. For each number of arguments, there are
489 * the two VCALL and CALL variants for void and non-void functions.
490 *
491 * When there is a return value, the invoker of the macro must specify
492 * the return type. The macro then uses sizeof() on that type to
493 * determine whether its a 32 or 64 bit value, and places the return
494 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
a4746364
GOC
495 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
496 * the return value size.
294688c0
JF
497 *
498 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
a4746364
GOC
499 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
500 * in low,high order
294688c0
JF
501 *
502 * Small structures are passed and returned in registers. The macro
503 * calling convention can't directly deal with this, so the wrapper
504 * functions must do this.
505 *
506 * These PVOP_* macros are only defined within this header. This
507 * means that all uses must be wrapped in inline functions. This also
508 * makes sure the incoming and outgoing types are always correct.
509 */
a4746364 510#ifdef CONFIG_X86_32
791bad9d
JF
511#define PVOP_VCALL_ARGS \
512 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
a4746364 513#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
791bad9d
JF
514
515#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
516#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
517#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
518
a4746364
GOC
519#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
520 "=c" (__ecx)
521#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
791bad9d 522
0eb592db 523#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
791bad9d
JF
524#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
525
a4746364
GOC
526#define EXTRA_CLOBBERS
527#define VEXTRA_CLOBBERS
791bad9d
JF
528#else /* CONFIG_X86_64 */
529#define PVOP_VCALL_ARGS \
530 unsigned long __edi = __edi, __esi = __esi, \
531 __edx = __edx, __ecx = __ecx
a4746364 532#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
791bad9d
JF
533
534#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
535#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
536#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
537#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
538
a4746364
GOC
539#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
540 "=S" (__esi), "=d" (__edx), \
541 "=c" (__ecx)
a4746364
GOC
542#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
543
791bad9d
JF
544#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
545#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
546
a4746364
GOC
547#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
548#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
791bad9d 549#endif /* CONFIG_X86_32 */
a4746364 550
97349135
JF
551#ifdef CONFIG_PARAVIRT_DEBUG
552#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
553#else
554#define PVOP_TEST_NULL(op) ((void)op)
555#endif
556
791bad9d
JF
557#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
558 pre, post, ...) \
f8822f42 559 ({ \
1a45b7aa 560 rettype __ret; \
791bad9d 561 PVOP_CALL_ARGS; \
97349135 562 PVOP_TEST_NULL(op); \
a4746364
GOC
563 /* This is 32-bit specific, but is okay in 64-bit */ \
564 /* since this condition will never hold */ \
1a45b7aa
JF
565 if (sizeof(rettype) > sizeof(unsigned long)) { \
566 asm volatile(pre \
567 paravirt_alt(PARAVIRT_CALL) \
568 post \
791bad9d 569 : call_clbr \
1a45b7aa 570 : paravirt_type(op), \
791bad9d 571 paravirt_clobber(clbr), \
1a45b7aa 572 ##__VA_ARGS__ \
791bad9d 573 : "memory", "cc" extra_clbr); \
1a45b7aa 574 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
f8822f42 575 } else { \
1a45b7aa 576 asm volatile(pre \
f8822f42 577 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 578 post \
791bad9d 579 : call_clbr \
1a45b7aa 580 : paravirt_type(op), \
791bad9d 581 paravirt_clobber(clbr), \
1a45b7aa 582 ##__VA_ARGS__ \
791bad9d 583 : "memory", "cc" extra_clbr); \
1a45b7aa 584 __ret = (rettype)__eax; \
f8822f42
JF
585 } \
586 __ret; \
587 })
791bad9d
JF
588
589#define __PVOP_CALL(rettype, op, pre, post, ...) \
590 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
591 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
592
593#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
594 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
595 PVOP_CALLEE_CLOBBERS, , \
596 pre, post, ##__VA_ARGS__)
597
598
599#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
f8822f42 600 ({ \
a4746364 601 PVOP_VCALL_ARGS; \
97349135 602 PVOP_TEST_NULL(op); \
1a45b7aa 603 asm volatile(pre \
f8822f42 604 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 605 post \
791bad9d 606 : call_clbr \
1a45b7aa 607 : paravirt_type(op), \
791bad9d 608 paravirt_clobber(clbr), \
1a45b7aa 609 ##__VA_ARGS__ \
791bad9d 610 : "memory", "cc" extra_clbr); \
f8822f42
JF
611 })
612
791bad9d
JF
613#define __PVOP_VCALL(op, pre, post, ...) \
614 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
615 VEXTRA_CLOBBERS, \
616 pre, post, ##__VA_ARGS__)
617
618#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
619 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
620 PVOP_VCALLEE_CLOBBERS, , \
621 pre, post, ##__VA_ARGS__)
622
623
624
1a45b7aa
JF
625#define PVOP_CALL0(rettype, op) \
626 __PVOP_CALL(rettype, op, "", "")
627#define PVOP_VCALL0(op) \
628 __PVOP_VCALL(op, "", "")
629
791bad9d
JF
630#define PVOP_CALLEE0(rettype, op) \
631 __PVOP_CALLEESAVE(rettype, op, "", "")
632#define PVOP_VCALLEE0(op) \
633 __PVOP_VCALLEESAVE(op, "", "")
634
635
1a45b7aa 636#define PVOP_CALL1(rettype, op, arg1) \
791bad9d 637 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
1a45b7aa 638#define PVOP_VCALL1(op, arg1) \
791bad9d
JF
639 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
640
641#define PVOP_CALLEE1(rettype, op, arg1) \
642 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
643#define PVOP_VCALLEE1(op, arg1) \
644 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
645
1a45b7aa
JF
646
647#define PVOP_CALL2(rettype, op, arg1, arg2) \
791bad9d
JF
648 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
649 PVOP_CALL_ARG2(arg2))
1a45b7aa 650#define PVOP_VCALL2(op, arg1, arg2) \
791bad9d
JF
651 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
652 PVOP_CALL_ARG2(arg2))
653
654#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
655 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
656 PVOP_CALL_ARG2(arg2))
657#define PVOP_VCALLEE2(op, arg1, arg2) \
658 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
659 PVOP_CALL_ARG2(arg2))
660
1a45b7aa
JF
661
662#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
791bad9d
JF
663 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
664 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
1a45b7aa 665#define PVOP_VCALL3(op, arg1, arg2, arg3) \
791bad9d
JF
666 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
667 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
1a45b7aa 668
a4746364
GOC
669/* This is the only difference in x86_64. We can make it much simpler */
670#ifdef CONFIG_X86_32
1a45b7aa
JF
671#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
672 __PVOP_CALL(rettype, op, \
673 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
791bad9d
JF
674 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
675 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
1a45b7aa
JF
676#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
677 __PVOP_VCALL(op, \
678 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
679 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
680 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
a4746364
GOC
681#else
682#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
791bad9d
JF
683 __PVOP_CALL(rettype, op, "", "", \
684 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
685 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
a4746364 686#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
791bad9d
JF
687 __PVOP_VCALL(op, "", "", \
688 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
689 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
a4746364 690#endif
1a45b7aa 691
f8822f42
JF
692static inline int paravirt_enabled(void)
693{
93b1eab3 694 return pv_info.paravirt_enabled;
f8822f42 695}
d3561b7f 696
faca6227 697static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
698 struct thread_struct *thread)
699{
faca6227 700 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
701}
702
93b1eab3 703#define ARCH_SETUP pv_init_ops.arch_setup();
d3561b7f
RR
704static inline unsigned long get_wallclock(void)
705{
93b1eab3 706 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
d3561b7f
RR
707}
708
709static inline int set_wallclock(unsigned long nowtime)
710{
93b1eab3 711 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
d3561b7f
RR
712}
713
e30fab3a 714static inline void (*choose_time_init(void))(void)
d3561b7f 715{
93b1eab3 716 return pv_time_ops.time_init;
d3561b7f
RR
717}
718
719/* The paravirtualized CPUID instruction. */
720static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
721 unsigned int *ecx, unsigned int *edx)
722{
93b1eab3 723 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
724}
725
726/*
727 * These special macros can be used to get or set a debugging register
728 */
f8822f42
JF
729static inline unsigned long paravirt_get_debugreg(int reg)
730{
93b1eab3 731 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
732}
733#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
734static inline void set_debugreg(unsigned long val, int reg)
735{
93b1eab3 736 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 737}
d3561b7f 738
f8822f42
JF
739static inline void clts(void)
740{
93b1eab3 741 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 742}
d3561b7f 743
f8822f42
JF
744static inline unsigned long read_cr0(void)
745{
93b1eab3 746 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 747}
d3561b7f 748
f8822f42
JF
749static inline void write_cr0(unsigned long x)
750{
93b1eab3 751 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
752}
753
754static inline unsigned long read_cr2(void)
755{
93b1eab3 756 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
757}
758
759static inline void write_cr2(unsigned long x)
760{
93b1eab3 761 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
762}
763
764static inline unsigned long read_cr3(void)
765{
93b1eab3 766 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 767}
d3561b7f 768
f8822f42
JF
769static inline void write_cr3(unsigned long x)
770{
93b1eab3 771 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 772}
d3561b7f 773
f8822f42
JF
774static inline unsigned long read_cr4(void)
775{
93b1eab3 776 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
777}
778static inline unsigned long read_cr4_safe(void)
779{
93b1eab3 780 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 781}
d3561b7f 782
f8822f42
JF
783static inline void write_cr4(unsigned long x)
784{
93b1eab3 785 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 786}
3dc494e8 787
94ea03cd 788#ifdef CONFIG_X86_64
4c9890c2
GOC
789static inline unsigned long read_cr8(void)
790{
791 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
792}
793
794static inline void write_cr8(unsigned long x)
795{
796 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
797}
94ea03cd 798#endif
4c9890c2 799
d3561b7f
RR
800static inline void raw_safe_halt(void)
801{
93b1eab3 802 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
803}
804
805static inline void halt(void)
806{
93b1eab3 807 PVOP_VCALL0(pv_irq_ops.safe_halt);
f8822f42
JF
808}
809
810static inline void wbinvd(void)
811{
93b1eab3 812 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 813}
d3561b7f 814
93b1eab3 815#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 816
f8822f42
JF
817static inline u64 paravirt_read_msr(unsigned msr, int *err)
818{
93b1eab3 819 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 820}
b05f78f5
YL
821static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
822{
823 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
824}
f8822f42
JF
825static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
826{
93b1eab3 827 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
828}
829
90a0a06a 830/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
831#define rdmsr(msr, val1, val2) \
832do { \
f8822f42
JF
833 int _err; \
834 u64 _l = paravirt_read_msr(msr, &_err); \
835 val1 = (u32)_l; \
836 val2 = _l >> 32; \
49cd740b 837} while (0)
d3561b7f 838
49cd740b
JP
839#define wrmsr(msr, val1, val2) \
840do { \
f8822f42 841 paravirt_write_msr(msr, val1, val2); \
49cd740b 842} while (0)
d3561b7f 843
49cd740b
JP
844#define rdmsrl(msr, val) \
845do { \
f8822f42
JF
846 int _err; \
847 val = paravirt_read_msr(msr, &_err); \
49cd740b 848} while (0)
d3561b7f 849
49cd740b
JP
850#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
851#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
852
853/* rdmsr with exception handling */
49cd740b
JP
854#define rdmsr_safe(msr, a, b) \
855({ \
f8822f42
JF
856 int _err; \
857 u64 _l = paravirt_read_msr(msr, &_err); \
858 (*a) = (u32)_l; \
859 (*b) = _l >> 32; \
49cd740b
JP
860 _err; \
861})
d3561b7f 862
1de87bd4
AK
863static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
864{
865 int err;
866
867 *p = paravirt_read_msr(msr, &err);
868 return err;
869}
b05f78f5
YL
870static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
871{
872 int err;
873
874 *p = paravirt_read_msr_amd(msr, &err);
875 return err;
876}
f8822f42
JF
877
878static inline u64 paravirt_read_tsc(void)
879{
93b1eab3 880 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 881}
d3561b7f 882
49cd740b
JP
883#define rdtscl(low) \
884do { \
f8822f42
JF
885 u64 _l = paravirt_read_tsc(); \
886 low = (int)_l; \
49cd740b 887} while (0)
d3561b7f 888
f8822f42 889#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 890
688340ea
JF
891static inline unsigned long long paravirt_sched_clock(void)
892{
93b1eab3 893 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 894}
e93ef949 895#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
6cb9a835 896
f8822f42
JF
897static inline unsigned long long paravirt_read_pmc(int counter)
898{
93b1eab3 899 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 900}
d3561b7f 901
49cd740b
JP
902#define rdpmc(counter, low, high) \
903do { \
f8822f42
JF
904 u64 _l = paravirt_read_pmc(counter); \
905 low = (u32)_l; \
906 high = _l >> 32; \
49cd740b 907} while (0)
3dc494e8 908
e5aaac44
GOC
909static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
910{
911 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
912}
913
914#define rdtscp(low, high, aux) \
915do { \
916 int __aux; \
917 unsigned long __val = paravirt_rdtscp(&__aux); \
918 (low) = (u32)__val; \
919 (high) = (u32)(__val >> 32); \
920 (aux) = __aux; \
921} while (0)
922
923#define rdtscpll(val, aux) \
924do { \
925 unsigned long __aux; \
926 val = paravirt_rdtscp(&__aux); \
927 (aux) = __aux; \
928} while (0)
929
38ffbe66
JF
930static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
931{
932 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
933}
934
935static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
936{
937 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
938}
939
f8822f42
JF
940static inline void load_TR_desc(void)
941{
93b1eab3 942 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 943}
6b68f01b 944static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 945{
93b1eab3 946 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 947}
6b68f01b 948static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 949{
93b1eab3 950 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
951}
952static inline void set_ldt(const void *addr, unsigned entries)
953{
93b1eab3 954 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 955}
6b68f01b 956static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 957{
93b1eab3 958 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 959}
6b68f01b 960static inline void store_idt(struct desc_ptr *dtr)
f8822f42 961{
93b1eab3 962 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
963}
964static inline unsigned long paravirt_store_tr(void)
965{
93b1eab3 966 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
967}
968#define store_tr(tr) ((tr) = paravirt_store_tr())
969static inline void load_TLS(struct thread_struct *t, unsigned cpu)
970{
93b1eab3 971 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 972}
75b8bb3e 973
9f9d489a
JF
974#ifdef CONFIG_X86_64
975static inline void load_gs_index(unsigned int gs)
976{
977 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
978}
979#endif
980
75b8bb3e
GOC
981static inline void write_ldt_entry(struct desc_struct *dt, int entry,
982 const void *desc)
f8822f42 983{
75b8bb3e 984 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 985}
014b15be
GOC
986
987static inline void write_gdt_entry(struct desc_struct *dt, int entry,
988 void *desc, int type)
f8822f42 989{
014b15be 990 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 991}
014b15be 992
8d947344 993static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 994{
8d947344 995 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
996}
997static inline void set_iopl_mask(unsigned mask)
998{
93b1eab3 999 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 1000}
3dc494e8 1001
d3561b7f 1002/* The paravirtualized I/O functions */
49cd740b
JP
1003static inline void slow_down_io(void)
1004{
93b1eab3 1005 pv_cpu_ops.io_delay();
d3561b7f 1006#ifdef REALLY_SLOW_IO
93b1eab3
JF
1007 pv_cpu_ops.io_delay();
1008 pv_cpu_ops.io_delay();
1009 pv_cpu_ops.io_delay();
d3561b7f
RR
1010#endif
1011}
1012
13623d79 1013#ifdef CONFIG_X86_LOCAL_APIC
bbab4f3b
ZA
1014static inline void setup_boot_clock(void)
1015{
93b1eab3 1016 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
bbab4f3b
ZA
1017}
1018
1019static inline void setup_secondary_clock(void)
1020{
93b1eab3 1021 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
bbab4f3b 1022}
13623d79
RR
1023#endif
1024
6996d3b6
JF
1025static inline void paravirt_post_allocator_init(void)
1026{
93b1eab3
JF
1027 if (pv_init_ops.post_allocator_init)
1028 (*pv_init_ops.post_allocator_init)();
6996d3b6
JF
1029}
1030
b239fb25
JF
1031static inline void paravirt_pagetable_setup_start(pgd_t *base)
1032{
93b1eab3 1033 (*pv_mmu_ops.pagetable_setup_start)(base);
b239fb25
JF
1034}
1035
1036static inline void paravirt_pagetable_setup_done(pgd_t *base)
1037{
93b1eab3 1038 (*pv_mmu_ops.pagetable_setup_done)(base);
b239fb25 1039}
3dc494e8 1040
ae5da273
ZA
1041#ifdef CONFIG_SMP
1042static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1043 unsigned long start_esp)
1044{
93b1eab3
JF
1045 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
1046 phys_apicid, start_eip, start_esp);
ae5da273
ZA
1047}
1048#endif
13623d79 1049
d6dd61c8
JF
1050static inline void paravirt_activate_mm(struct mm_struct *prev,
1051 struct mm_struct *next)
1052{
93b1eab3 1053 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
1054}
1055
1056static inline void arch_dup_mmap(struct mm_struct *oldmm,
1057 struct mm_struct *mm)
1058{
93b1eab3 1059 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
1060}
1061
1062static inline void arch_exit_mmap(struct mm_struct *mm)
1063{
93b1eab3 1064 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
1065}
1066
f8822f42
JF
1067static inline void __flush_tlb(void)
1068{
93b1eab3 1069 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
1070}
1071static inline void __flush_tlb_global(void)
1072{
93b1eab3 1073 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
1074}
1075static inline void __flush_tlb_single(unsigned long addr)
1076{
93b1eab3 1077 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 1078}
da181a8b 1079
4595f962
RR
1080static inline void flush_tlb_others(const struct cpumask *cpumask,
1081 struct mm_struct *mm,
d4c10477
JF
1082 unsigned long va)
1083{
4595f962 1084 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
d4c10477
JF
1085}
1086
eba0045f
JF
1087static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1088{
1089 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1090}
1091
1092static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1093{
1094 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1095}
1096
f8639939 1097static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 1098{
6944a9c8 1099 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 1100}
f8639939 1101static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 1102{
6944a9c8 1103 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 1104}
c119ecce 1105
f8639939 1106static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 1107{
6944a9c8 1108 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 1109}
c119ecce 1110
f8639939
EH
1111static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1112 unsigned long start, unsigned long count)
f8822f42 1113{
6944a9c8 1114 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
f8822f42 1115}
f8639939 1116static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 1117{
6944a9c8 1118 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
1119}
1120
f8639939 1121static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
1122{
1123 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1124}
f8639939 1125static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
1126{
1127 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1128}
1129
ce6234b5
JF
1130#ifdef CONFIG_HIGHPTE
1131static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1132{
1133 unsigned long ret;
93b1eab3 1134 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
ce6234b5
JF
1135 return (void *)ret;
1136}
1137#endif
1138
f8822f42
JF
1139static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1140 pte_t *ptep)
da181a8b 1141{
93b1eab3 1142 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b
RR
1143}
1144
f8822f42
JF
1145static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1146 pte_t *ptep)
da181a8b 1147{
93b1eab3 1148 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
1149}
1150
773221f4 1151static inline pte_t __pte(pteval_t val)
da181a8b 1152{
773221f4
JF
1153 pteval_t ret;
1154
1155 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
1156 ret = PVOP_CALLEE2(pteval_t,
1157 pv_mmu_ops.make_pte,
1158 val, (u64)val >> 32);
773221f4 1159 else
da5de7c2
JF
1160 ret = PVOP_CALLEE1(pteval_t,
1161 pv_mmu_ops.make_pte,
1162 val);
773221f4 1163
c8e5393a 1164 return (pte_t) { .pte = ret };
da181a8b
RR
1165}
1166
773221f4
JF
1167static inline pteval_t pte_val(pte_t pte)
1168{
1169 pteval_t ret;
1170
1171 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
1172 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1173 pte.pte, (u64)pte.pte >> 32);
773221f4 1174 else
da5de7c2
JF
1175 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1176 pte.pte);
773221f4
JF
1177
1178 return ret;
1179}
1180
ef38503e 1181static inline pgd_t __pgd(pgdval_t val)
da181a8b 1182{
ef38503e
JF
1183 pgdval_t ret;
1184
1185 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
1186 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1187 val, (u64)val >> 32);
ef38503e 1188 else
da5de7c2
JF
1189 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1190 val);
ef38503e
JF
1191
1192 return (pgd_t) { ret };
1193}
1194
1195static inline pgdval_t pgd_val(pgd_t pgd)
1196{
1197 pgdval_t ret;
1198
1199 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
1200 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1201 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 1202 else
da5de7c2
JF
1203 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1204 pgd.pgd);
ef38503e
JF
1205
1206 return ret;
f8822f42
JF
1207}
1208
08b882c6
JF
1209#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1210static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1211 pte_t *ptep)
1212{
1213 pteval_t ret;
1214
1215 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1216 mm, addr, ptep);
1217
1218 return (pte_t) { .pte = ret };
1219}
1220
1221static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1222 pte_t *ptep, pte_t pte)
1223{
1224 if (sizeof(pteval_t) > sizeof(long))
1225 /* 5 arg words */
1226 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1227 else
1228 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1229 mm, addr, ptep, pte.pte);
1230}
1231
4eed80cd
JF
1232static inline void set_pte(pte_t *ptep, pte_t pte)
1233{
1234 if (sizeof(pteval_t) > sizeof(long))
1235 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1236 pte.pte, (u64)pte.pte >> 32);
1237 else
1238 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1239 pte.pte);
1240}
1241
1242static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1243 pte_t *ptep, pte_t pte)
1244{
1245 if (sizeof(pteval_t) > sizeof(long))
1246 /* 5 arg words */
1247 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1248 else
1249 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1250}
1251
60b3f626
JF
1252static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1253{
1254 pmdval_t val = native_pmd_val(pmd);
1255
1256 if (sizeof(pmdval_t) > sizeof(long))
1257 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1258 else
1259 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1260}
1261
1fe91514
GOC
1262#if PAGETABLE_LEVELS >= 3
1263static inline pmd_t __pmd(pmdval_t val)
1264{
1265 pmdval_t ret;
1266
1267 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
1268 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1269 val, (u64)val >> 32);
1fe91514 1270 else
da5de7c2
JF
1271 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1272 val);
1fe91514
GOC
1273
1274 return (pmd_t) { ret };
1275}
1276
1277static inline pmdval_t pmd_val(pmd_t pmd)
1278{
1279 pmdval_t ret;
1280
1281 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
1282 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1283 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 1284 else
da5de7c2
JF
1285 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1286 pmd.pmd);
1fe91514
GOC
1287
1288 return ret;
1289}
1290
1291static inline void set_pud(pud_t *pudp, pud_t pud)
1292{
1293 pudval_t val = native_pud_val(pud);
1294
1295 if (sizeof(pudval_t) > sizeof(long))
1296 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1297 val, (u64)val >> 32);
1298 else
1299 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1300 val);
1301}
9042219c
EH
1302#if PAGETABLE_LEVELS == 4
1303static inline pud_t __pud(pudval_t val)
1304{
1305 pudval_t ret;
1306
1307 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
1308 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1309 val, (u64)val >> 32);
9042219c 1310 else
da5de7c2
JF
1311 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1312 val);
9042219c
EH
1313
1314 return (pud_t) { ret };
1315}
1316
1317static inline pudval_t pud_val(pud_t pud)
1318{
1319 pudval_t ret;
1320
1321 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
1322 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1323 pud.pud, (u64)pud.pud >> 32);
9042219c 1324 else
4767afbf
JF
1325 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1326 pud.pud);
9042219c
EH
1327
1328 return ret;
1329}
1330
1331static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1332{
1333 pgdval_t val = native_pgd_val(pgd);
1334
1335 if (sizeof(pgdval_t) > sizeof(long))
1336 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1337 val, (u64)val >> 32);
1338 else
1339 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1340 val);
1341}
1342
1343static inline void pgd_clear(pgd_t *pgdp)
1344{
1345 set_pgd(pgdp, __pgd(0));
1346}
1347
1348static inline void pud_clear(pud_t *pudp)
1349{
1350 set_pud(pudp, __pud(0));
1351}
1352
1353#endif /* PAGETABLE_LEVELS == 4 */
1354
1fe91514
GOC
1355#endif /* PAGETABLE_LEVELS >= 3 */
1356
4eed80cd
JF
1357#ifdef CONFIG_X86_PAE
1358/* Special-case pte-setting operations for PAE, which can't update a
1359 64-bit pte atomically */
1360static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1361{
1362 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1363 pte.pte, pte.pte >> 32);
1364}
1365
4eed80cd
JF
1366static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1367 pte_t *ptep)
1368{
1369 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1370}
60b3f626
JF
1371
1372static inline void pmd_clear(pmd_t *pmdp)
1373{
1374 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1375}
4eed80cd
JF
1376#else /* !CONFIG_X86_PAE */
1377static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1378{
1379 set_pte(ptep, pte);
1380}
1381
4eed80cd
JF
1382static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1383 pte_t *ptep)
1384{
1385 set_pte_at(mm, addr, ptep, __pte(0));
1386}
60b3f626
JF
1387
1388static inline void pmd_clear(pmd_t *pmdp)
1389{
1390 set_pmd(pmdp, __pmd(0));
1391}
4eed80cd
JF
1392#endif /* CONFIG_X86_PAE */
1393
8965c1c0
JF
1394/* Lazy mode for batching updates / context switch */
1395enum paravirt_lazy_mode {
1396 PARAVIRT_LAZY_NONE,
1397 PARAVIRT_LAZY_MMU,
1398 PARAVIRT_LAZY_CPU,
1399};
1400
1401enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1402void paravirt_enter_lazy_cpu(void);
1403void paravirt_leave_lazy_cpu(void);
1404void paravirt_enter_lazy_mmu(void);
1405void paravirt_leave_lazy_mmu(void);
1406void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1407
9226d125 1408#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
f8822f42
JF
1409static inline void arch_enter_lazy_cpu_mode(void)
1410{
8965c1c0 1411 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
f8822f42
JF
1412}
1413
1414static inline void arch_leave_lazy_cpu_mode(void)
1415{
8965c1c0 1416 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
f8822f42
JF
1417}
1418
d85cf93d 1419void arch_flush_lazy_cpu_mode(void);
9226d125
ZA
1420
1421#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
1422static inline void arch_enter_lazy_mmu_mode(void)
1423{
8965c1c0 1424 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
1425}
1426
1427static inline void arch_leave_lazy_mmu_mode(void)
1428{
8965c1c0 1429 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
1430}
1431
d85cf93d 1432void arch_flush_lazy_mmu_mode(void);
9226d125 1433
aeaaa59c
JF
1434static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1435 unsigned long phys, pgprot_t flags)
1436{
1437 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438}
1439
45876233 1440void _paravirt_nop(void);
41edafdb
JF
1441u32 _paravirt_ident_32(u32);
1442u64 _paravirt_ident_64(u64);
1443
45876233
JF
1444#define paravirt_nop ((void *)_paravirt_nop)
1445
4bb689ee
IM
1446#ifdef CONFIG_SMP
1447
74d4affd
JF
1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1449{
1450 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1451}
1452
1453static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1454{
1455 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1456}
a5ef7ca0 1457#define __raw_spin_is_contended __raw_spin_is_contended
74d4affd
JF
1458
1459static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1460{
32172561 1461 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
1462}
1463
63d3a75d
JF
1464static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1465 unsigned long flags)
1466{
1467 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1468}
1469
74d4affd
JF
1470static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1471{
1472 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1473}
1474
1475static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1476{
32172561 1477 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
1478}
1479
4bb689ee
IM
1480#endif
1481
139ec7c4 1482/* These all sit in the .parainstructions section to tell us what to patch. */
98de032b 1483struct paravirt_patch_site {
139ec7c4
RR
1484 u8 *instr; /* original instructions */
1485 u8 instrtype; /* type of this instruction */
1486 u8 len; /* length of original instruction */
1487 u16 clobbers; /* what registers you may clobber */
1488};
1489
98de032b
JF
1490extern struct paravirt_patch_site __parainstructions[],
1491 __parainstructions_end[];
1492
2e47d3e6 1493#ifdef CONFIG_X86_32
ecb93d1c
JF
1494#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1495#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1496
1497/* save and restore all caller-save registers, except return value */
e584f559
JF
1498#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
1499#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 1500
2e47d3e6
GOC
1501#define PV_FLAGS_ARG "0"
1502#define PV_EXTRA_CLOBBERS
1503#define PV_VEXTRA_CLOBBERS
1504#else
ecb93d1c
JF
1505/* save and restore all caller-save registers, except return value */
1506#define PV_SAVE_ALL_CALLER_REGS \
1507 "push %rcx;" \
1508 "push %rdx;" \
1509 "push %rsi;" \
1510 "push %rdi;" \
1511 "push %r8;" \
1512 "push %r9;" \
1513 "push %r10;" \
1514 "push %r11;"
1515#define PV_RESTORE_ALL_CALLER_REGS \
1516 "pop %r11;" \
1517 "pop %r10;" \
1518 "pop %r9;" \
1519 "pop %r8;" \
1520 "pop %rdi;" \
1521 "pop %rsi;" \
1522 "pop %rdx;" \
1523 "pop %rcx;"
1524
2e47d3e6
GOC
1525/* We save some registers, but all of them, that's too much. We clobber all
1526 * caller saved registers but the argument parameter */
1527#define PV_SAVE_REGS "pushq %%rdi;"
1528#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
1529#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1530#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
1531#define PV_FLAGS_ARG "D"
1532#endif
1533
ecb93d1c
JF
1534/*
1535 * Generate a thunk around a function which saves all caller-save
1536 * registers except for the return value. This allows C functions to
1537 * be called from assembler code where fewer than normal registers are
1538 * available. It may also help code generation around calls from C
1539 * code if the common case doesn't use many registers.
1540 *
1541 * When a callee is wrapped in a thunk, the caller can assume that all
1542 * arg regs and all scratch registers are preserved across the
1543 * call. The return value in rax/eax will not be saved, even for void
1544 * functions.
1545 */
1546#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1547 extern typeof(func) __raw_callee_save_##func; \
1548 static void *__##func##__ __used = func; \
1549 \
1550 asm(".pushsection .text;" \
1551 "__raw_callee_save_" #func ": " \
1552 PV_SAVE_ALL_CALLER_REGS \
1553 "call " #func ";" \
1554 PV_RESTORE_ALL_CALLER_REGS \
1555 "ret;" \
1556 ".popsection")
1557
1558/* Get a reference to a callee-save function */
1559#define PV_CALLEE_SAVE(func) \
1560 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1561
1562/* Promise that "func" already uses the right calling convention */
1563#define __PV_IS_CALLEE_SAVE(func) \
1564 ((struct paravirt_callee_save) { func })
1565
139ec7c4
RR
1566static inline unsigned long __raw_local_save_flags(void)
1567{
1568 unsigned long f;
1569
ecb93d1c 1570 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1571 : "=a"(f)
93b1eab3 1572 : paravirt_type(pv_irq_ops.save_fl),
42c24fa2 1573 paravirt_clobber(CLBR_EAX)
ecb93d1c 1574 : "memory", "cc");
139ec7c4
RR
1575 return f;
1576}
1577
1578static inline void raw_local_irq_restore(unsigned long f)
1579{
ecb93d1c 1580 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1581 : "=a"(f)
2e47d3e6 1582 : PV_FLAGS_ARG(f),
93b1eab3 1583 paravirt_type(pv_irq_ops.restore_fl),
d5822035 1584 paravirt_clobber(CLBR_EAX)
ecb93d1c 1585 : "memory", "cc");
139ec7c4
RR
1586}
1587
1588static inline void raw_local_irq_disable(void)
1589{
ecb93d1c 1590 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1591 :
93b1eab3 1592 : paravirt_type(pv_irq_ops.irq_disable),
d5822035 1593 paravirt_clobber(CLBR_EAX)
ecb93d1c 1594 : "memory", "eax", "cc");
139ec7c4
RR
1595}
1596
1597static inline void raw_local_irq_enable(void)
1598{
ecb93d1c 1599 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1600 :
93b1eab3 1601 : paravirt_type(pv_irq_ops.irq_enable),
d5822035 1602 paravirt_clobber(CLBR_EAX)
ecb93d1c 1603 : "memory", "eax", "cc");
139ec7c4
RR
1604}
1605
1606static inline unsigned long __raw_local_irq_save(void)
1607{
1608 unsigned long f;
1609
d5822035
JF
1610 f = __raw_local_save_flags();
1611 raw_local_irq_disable();
139ec7c4
RR
1612 return f;
1613}
1614
74d4affd 1615
294688c0 1616/* Make sure as little as possible of this mess escapes. */
d5822035 1617#undef PARAVIRT_CALL
1a45b7aa
JF
1618#undef __PVOP_CALL
1619#undef __PVOP_VCALL
f8822f42
JF
1620#undef PVOP_VCALL0
1621#undef PVOP_CALL0
1622#undef PVOP_VCALL1
1623#undef PVOP_CALL1
1624#undef PVOP_VCALL2
1625#undef PVOP_CALL2
1626#undef PVOP_VCALL3
1627#undef PVOP_CALL3
1628#undef PVOP_VCALL4
1629#undef PVOP_CALL4
139ec7c4 1630
d3561b7f
RR
1631#else /* __ASSEMBLY__ */
1632
658be9d3 1633#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
1634771:; \
1635 ops; \
1636772:; \
1637 .pushsection .parainstructions,"a"; \
658be9d3
GOC
1638 .align algn; \
1639 word 771b; \
139ec7c4
RR
1640 .byte ptype; \
1641 .byte 772b-771b; \
1642 .short clobbers; \
1643 .popsection
1644
658be9d3 1645
9104a18d 1646#define COND_PUSH(set, mask, reg) \
ecb93d1c 1647 .if ((~(set)) & mask); push %reg; .endif
9104a18d 1648#define COND_POP(set, mask, reg) \
ecb93d1c 1649 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 1650
658be9d3 1651#ifdef CONFIG_X86_64
9104a18d
JF
1652
1653#define PV_SAVE_REGS(set) \
1654 COND_PUSH(set, CLBR_RAX, rax); \
1655 COND_PUSH(set, CLBR_RCX, rcx); \
1656 COND_PUSH(set, CLBR_RDX, rdx); \
1657 COND_PUSH(set, CLBR_RSI, rsi); \
1658 COND_PUSH(set, CLBR_RDI, rdi); \
1659 COND_PUSH(set, CLBR_R8, r8); \
1660 COND_PUSH(set, CLBR_R9, r9); \
1661 COND_PUSH(set, CLBR_R10, r10); \
1662 COND_PUSH(set, CLBR_R11, r11)
1663#define PV_RESTORE_REGS(set) \
1664 COND_POP(set, CLBR_R11, r11); \
1665 COND_POP(set, CLBR_R10, r10); \
1666 COND_POP(set, CLBR_R9, r9); \
1667 COND_POP(set, CLBR_R8, r8); \
1668 COND_POP(set, CLBR_RDI, rdi); \
1669 COND_POP(set, CLBR_RSI, rsi); \
1670 COND_POP(set, CLBR_RDX, rdx); \
1671 COND_POP(set, CLBR_RCX, rcx); \
1672 COND_POP(set, CLBR_RAX, rax)
1673
6057fc82 1674#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 1675#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 1676#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 1677#else
9104a18d
JF
1678#define PV_SAVE_REGS(set) \
1679 COND_PUSH(set, CLBR_EAX, eax); \
1680 COND_PUSH(set, CLBR_EDI, edi); \
1681 COND_PUSH(set, CLBR_ECX, ecx); \
1682 COND_PUSH(set, CLBR_EDX, edx)
1683#define PV_RESTORE_REGS(set) \
1684 COND_POP(set, CLBR_EDX, edx); \
1685 COND_POP(set, CLBR_ECX, ecx); \
1686 COND_POP(set, CLBR_EDI, edi); \
1687 COND_POP(set, CLBR_EAX, eax)
1688
6057fc82 1689#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 1690#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 1691#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
1692#endif
1693
93b1eab3
JF
1694#define INTERRUPT_RETURN \
1695 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 1696 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
1697
1698#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 1699 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 1700 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 1701 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 1702 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
1703
1704#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 1705 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 1706 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 1707 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 1708 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 1709
2be29982
JF
1710#define USERGS_SYSRET32 \
1711 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 1712 CLBR_NONE, \
2be29982 1713 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 1714
6057fc82 1715#ifdef CONFIG_X86_32
491eccb7
JF
1716#define GET_CR0_INTO_EAX \
1717 push %ecx; push %edx; \
1718 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 1719 pop %edx; pop %ecx
2be29982
JF
1720
1721#define ENABLE_INTERRUPTS_SYSEXIT \
1722 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1723 CLBR_NONE, \
1724 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1725
1726
1727#else /* !CONFIG_X86_32 */
a00394f8
JF
1728
1729/*
1730 * If swapgs is used while the userspace stack is still current,
1731 * there's no way to call a pvop. The PV replacement *must* be
1732 * inlined, or the swapgs instruction must be trapped and emulated.
1733 */
1734#define SWAPGS_UNSAFE_STACK \
1735 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1736 swapgs)
1737
9104a18d
JF
1738/*
1739 * Note: swapgs is very special, and in practise is either going to be
1740 * implemented with a single "swapgs" instruction or something very
1741 * special. Either way, we don't need to save any registers for
1742 * it.
1743 */
e801f864
GOC
1744#define SWAPGS \
1745 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 1746 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
1747 )
1748
491eccb7
JF
1749#define GET_CR2_INTO_RCX \
1750 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1751 movq %rax, %rcx; \
4a8c4c4e
GOC
1752 xorq %rax, %rax;
1753
fab58420
JF
1754#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1755 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1756 CLBR_NONE, \
1757 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1758
2be29982
JF
1759#define USERGS_SYSRET64 \
1760 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1761 CLBR_NONE, \
2be29982
JF
1762 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1763
1764#define ENABLE_INTERRUPTS_SYSEXIT32 \
1765 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1766 CLBR_NONE, \
1767 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1768#endif /* CONFIG_X86_32 */
139ec7c4 1769
d3561b7f
RR
1770#endif /* __ASSEMBLY__ */
1771#endif /* CONFIG_PARAVIRT */
1965aae3 1772#endif /* _ASM_X86_PARAVIRT_H */