]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/asm-x86/paravirt.h
Merge branch 'x86/paravirt-spinlocks' into x86/for-linus
[mirror_ubuntu-artful-kernel.git] / include / asm-x86 / paravirt.h
CommitLineData
d3561b7f
RR
1#ifndef __ASM_PARAVIRT_H
2#define __ASM_PARAVIRT_H
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
da181a8b 7#include <asm/page.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
139ec7c4 10/* Bitmask of what can be clobbered: usually at least eax. */
21438f7c
GOC
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
15
16#ifdef CONFIG_X86_64
17#define CLBR_RSI (1 << 3)
18#define CLBR_RDI (1 << 4)
19#define CLBR_R8 (1 << 5)
20#define CLBR_R9 (1 << 6)
21#define CLBR_R10 (1 << 7)
22#define CLBR_R11 (1 << 8)
23#define CLBR_ANY ((1 << 9) - 1)
24#include <asm/desc_defs.h>
25#else
26/* CLBR_ANY should match all regs platform has. For i386, that's just it */
27#define CLBR_ANY ((1 << 3) - 1)
28#endif /* X86_64 */
139ec7c4 29
d3561b7f 30#ifndef __ASSEMBLY__
3dc494e8 31#include <linux/types.h>
d4c10477 32#include <linux/cpumask.h>
ce6234b5 33#include <asm/kmap_types.h>
8d947344 34#include <asm/desc_defs.h>
3dc494e8 35
ce6234b5 36struct page;
d3561b7f 37struct thread_struct;
6b68f01b 38struct desc_ptr;
d3561b7f 39struct tss_struct;
da181a8b 40struct mm_struct;
90a0a06a 41struct desc_struct;
294688c0 42
93b1eab3
JF
43/* general info */
44struct pv_info {
d3561b7f 45 unsigned int kernel_rpl;
5311ab62 46 int shared_kernel_pmd;
93b1eab3 47 int paravirt_enabled;
d3561b7f 48 const char *name;
93b1eab3 49};
d3561b7f 50
93b1eab3 51struct pv_init_ops {
139ec7c4 52 /*
93b1eab3
JF
53 * Patch may replace one of the defined code sequences with
54 * arbitrary code, subject to the same register constraints.
55 * This generally means the code is not free to clobber any
56 * registers other than EAX. The patch function should return
57 * the number of bytes of code generated, as we nop pad the
58 * rest in generic code.
139ec7c4 59 */
ab144f5e
AK
60 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61 unsigned long addr, unsigned len);
139ec7c4 62
294688c0 63 /* Basic arch-specific setup */
d3561b7f
RR
64 void (*arch_setup)(void);
65 char *(*memory_setup)(void);
6996d3b6
JF
66 void (*post_allocator_init)(void);
67
294688c0 68 /* Print a banner to identify the environment */
d3561b7f 69 void (*banner)(void);
93b1eab3
JF
70};
71
72
8965c1c0 73struct pv_lazy_ops {
93b1eab3 74 /* Set deferred update mode, used for batching operations. */
8965c1c0
JF
75 void (*enter)(void);
76 void (*leave)(void);
93b1eab3
JF
77};
78
79struct pv_time_ops {
80 void (*time_init)(void);
d3561b7f 81
294688c0 82 /* Set and set time of day */
d3561b7f
RR
83 unsigned long (*get_wallclock)(void);
84 int (*set_wallclock)(unsigned long);
d3561b7f 85
93b1eab3 86 unsigned long long (*sched_clock)(void);
e93ef949 87 unsigned long (*get_tsc_khz)(void);
93b1eab3 88};
d3561b7f 89
93b1eab3 90struct pv_cpu_ops {
294688c0 91 /* hooks for various privileged instructions */
1a1eecd1
AK
92 unsigned long (*get_debugreg)(int regno);
93 void (*set_debugreg)(int regno, unsigned long value);
d3561b7f 94
1a1eecd1 95 void (*clts)(void);
d3561b7f 96
1a1eecd1
AK
97 unsigned long (*read_cr0)(void);
98 void (*write_cr0)(unsigned long);
d3561b7f 99
1a1eecd1
AK
100 unsigned long (*read_cr4_safe)(void);
101 unsigned long (*read_cr4)(void);
102 void (*write_cr4)(unsigned long);
d3561b7f 103
4c9890c2
GOC
104#ifdef CONFIG_X86_64
105 unsigned long (*read_cr8)(void);
106 void (*write_cr8)(unsigned long);
107#endif
108
294688c0 109 /* Segment descriptor handling */
1a1eecd1 110 void (*load_tr_desc)(void);
6b68f01b
GOC
111 void (*load_gdt)(const struct desc_ptr *);
112 void (*load_idt)(const struct desc_ptr *);
113 void (*store_gdt)(struct desc_ptr *);
114 void (*store_idt)(struct desc_ptr *);
1a1eecd1
AK
115 void (*set_ldt)(const void *desc, unsigned entries);
116 unsigned long (*store_tr)(void);
117 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
9f9d489a
JF
118#ifdef CONFIG_X86_64
119 void (*load_gs_index)(unsigned int idx);
120#endif
75b8bb3e
GOC
121 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122 const void *desc);
90a0a06a 123 void (*write_gdt_entry)(struct desc_struct *,
014b15be 124 int entrynum, const void *desc, int size);
8d947344
GOC
125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate);
faca6227 127 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
d3561b7f 128
1a1eecd1 129 void (*set_iopl_mask)(unsigned mask);
93b1eab3
JF
130
131 void (*wbinvd)(void);
1a1eecd1 132 void (*io_delay)(void);
d3561b7f 133
93b1eab3
JF
134 /* cpuid emulation, mostly so that caps bits can be disabled */
135 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
136 unsigned int *ecx, unsigned int *edx);
137
138 /* MSR, PMC and TSR operations.
139 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
140 u64 (*read_msr)(unsigned int msr, int *err);
c9dcda5c 141 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
93b1eab3
JF
142
143 u64 (*read_tsc)(void);
b8d1fae7 144 u64 (*read_pmc)(int counter);
e5aaac44 145 unsigned long long (*read_tscp)(unsigned int *aux);
93b1eab3 146
2be29982
JF
147 /*
148 * Atomically enable interrupts and return to userspace. This
149 * is only ever used to return to 32-bit processes; in a
150 * 64-bit kernel, it's used for 32-on-64 compat processes, but
151 * never native 64-bit processes. (Jump, not call.)
152 */
d75cd22f 153 void (*irq_enable_sysexit)(void);
2be29982
JF
154
155 /*
156 * Switch to usermode gs and return to 64-bit usermode using
157 * sysret. Only used in 64-bit kernels to return to 64-bit
158 * processes. Usermode register state, including %rsp, must
159 * already be restored.
160 */
161 void (*usergs_sysret64)(void);
162
163 /*
164 * Switch to usermode gs and return to 32-bit usermode using
165 * sysret. Used to return to 32-on-64 compat processes.
166 * Other usermode register state, including %esp, must already
167 * be restored.
168 */
169 void (*usergs_sysret32)(void);
170
171 /* Normal iret. Jump to this with the standard iret stack
172 frame set up. */
93b1eab3 173 void (*iret)(void);
8965c1c0 174
e801f864
GOC
175 void (*swapgs)(void);
176
8965c1c0 177 struct pv_lazy_ops lazy_mode;
93b1eab3
JF
178};
179
180struct pv_irq_ops {
181 void (*init_IRQ)(void);
182
294688c0 183 /*
93b1eab3
JF
184 * Get/set interrupt state. save_fl and restore_fl are only
185 * expected to use X86_EFLAGS_IF; all other bits
186 * returned from save_fl are undefined, and may be ignored by
187 * restore_fl.
294688c0 188 */
93b1eab3
JF
189 unsigned long (*save_fl)(void);
190 void (*restore_fl)(unsigned long);
191 void (*irq_disable)(void);
192 void (*irq_enable)(void);
193 void (*safe_halt)(void);
194 void (*halt)(void);
fab58420
JF
195
196#ifdef CONFIG_X86_64
197 void (*adjust_exception_frame)(void);
198#endif
93b1eab3 199};
d6dd61c8 200
93b1eab3 201struct pv_apic_ops {
13623d79 202#ifdef CONFIG_X86_LOCAL_APIC
294688c0
JF
203 /*
204 * Direct APIC operations, principally for VMI. Ideally
205 * these shouldn't be in this interface.
206 */
42e0a9aa 207 void (*apic_write)(unsigned long reg, u32 v);
42e0a9aa 208 u32 (*apic_read)(unsigned long reg);
bbab4f3b
ZA
209 void (*setup_boot_clock)(void);
210 void (*setup_secondary_clock)(void);
294688c0
JF
211
212 void (*startup_ipi_hook)(int phys_apicid,
213 unsigned long start_eip,
214 unsigned long start_esp);
13623d79 215#endif
93b1eab3
JF
216};
217
218struct pv_mmu_ops {
219 /*
220 * Called before/after init_mm pagetable setup. setup_start
221 * may reset %cr3, and may pre-install parts of the pagetable;
222 * pagetable setup is expected to preserve any existing
223 * mapping.
224 */
225 void (*pagetable_setup_start)(pgd_t *pgd_base);
226 void (*pagetable_setup_done)(pgd_t *pgd_base);
227
228 unsigned long (*read_cr2)(void);
229 void (*write_cr2)(unsigned long);
230
231 unsigned long (*read_cr3)(void);
232 void (*write_cr3)(unsigned long);
233
234 /*
235 * Hooks for intercepting the creation/use/destruction of an
236 * mm_struct.
237 */
238 void (*activate_mm)(struct mm_struct *prev,
239 struct mm_struct *next);
240 void (*dup_mmap)(struct mm_struct *oldmm,
241 struct mm_struct *mm);
242 void (*exit_mmap)(struct mm_struct *mm);
243
13623d79 244
294688c0 245 /* TLB operations */
1a1eecd1
AK
246 void (*flush_tlb_user)(void);
247 void (*flush_tlb_kernel)(void);
f8822f42 248 void (*flush_tlb_single)(unsigned long addr);
d4c10477
JF
249 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
250 unsigned long va);
1a1eecd1 251
eba0045f
JF
252 /* Hooks for allocating and freeing a pagetable top-level */
253 int (*pgd_alloc)(struct mm_struct *mm);
254 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
255
256 /*
257 * Hooks for allocating/releasing pagetable pages when they're
258 * attached to a pagetable
259 */
6944a9c8
JF
260 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
261 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
262 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
2761fa09 263 void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
6944a9c8
JF
264 void (*release_pte)(u32 pfn);
265 void (*release_pmd)(u32 pfn);
2761fa09 266 void (*release_pud)(u32 pfn);
1a1eecd1 267
294688c0 268 /* Pagetable manipulation functions */
1a1eecd1 269 void (*set_pte)(pte_t *ptep, pte_t pteval);
294688c0
JF
270 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
271 pte_t *ptep, pte_t pteval);
1a1eecd1 272 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
49cd740b
JP
273 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
274 pte_t *ptep);
294688c0
JF
275 void (*pte_update_defer)(struct mm_struct *mm,
276 unsigned long addr, pte_t *ptep);
3dc494e8 277
08b882c6
JF
278 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
279 pte_t *ptep);
280 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
281 pte_t *ptep, pte_t pte);
282
5b8dd1e9 283 pteval_t (*pte_val)(pte_t);
a15af1c9 284 pteval_t (*pte_flags)(pte_t);
5b8dd1e9
JF
285 pte_t (*make_pte)(pteval_t pte);
286
287 pgdval_t (*pgd_val)(pgd_t);
288 pgd_t (*make_pgd)(pgdval_t pgd);
289
290#if PAGETABLE_LEVELS >= 3
da181a8b 291#ifdef CONFIG_X86_PAE
1a1eecd1 292 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
93b1eab3
JF
293 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
294 pte_t *ptep, pte_t pte);
49cd740b
JP
295 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
296 pte_t *ptep);
1a1eecd1 297 void (*pmd_clear)(pmd_t *pmdp);
3dc494e8 298
5b8dd1e9 299#endif /* CONFIG_X86_PAE */
3dc494e8 300
5b8dd1e9 301 void (*set_pud)(pud_t *pudp, pud_t pudval);
3dc494e8 302
5b8dd1e9
JF
303 pmdval_t (*pmd_val)(pmd_t);
304 pmd_t (*make_pmd)(pmdval_t pmd);
305
306#if PAGETABLE_LEVELS == 4
307 pudval_t (*pud_val)(pud_t);
308 pud_t (*make_pud)(pudval_t pud);
9042219c
EH
309
310 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
5b8dd1e9
JF
311#endif /* PAGETABLE_LEVELS == 4 */
312#endif /* PAGETABLE_LEVELS >= 3 */
da181a8b 313
93b1eab3
JF
314#ifdef CONFIG_HIGHPTE
315 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
316#endif
8965c1c0
JF
317
318 struct pv_lazy_ops lazy_mode;
aeaaa59c
JF
319
320 /* dom0 ops */
321
322 /* Sometimes the physical address is a pfn, and sometimes its
323 an mfn. We can tell which is which from the index. */
324 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
325 unsigned long phys, pgprot_t flags);
93b1eab3 326};
9226d125 327
74d4affd
JF
328struct raw_spinlock;
329struct pv_lock_ops {
330 int (*spin_is_locked)(struct raw_spinlock *lock);
331 int (*spin_is_contended)(struct raw_spinlock *lock);
332 void (*spin_lock)(struct raw_spinlock *lock);
333 int (*spin_trylock)(struct raw_spinlock *lock);
334 void (*spin_unlock)(struct raw_spinlock *lock);
335};
336
93b1eab3
JF
337/* This contains all the paravirt structures: we get a convenient
338 * number for each function using the offset which we use to indicate
339 * what to patch. */
49cd740b 340struct paravirt_patch_template {
93b1eab3 341 struct pv_init_ops pv_init_ops;
93b1eab3
JF
342 struct pv_time_ops pv_time_ops;
343 struct pv_cpu_ops pv_cpu_ops;
344 struct pv_irq_ops pv_irq_ops;
345 struct pv_apic_ops pv_apic_ops;
346 struct pv_mmu_ops pv_mmu_ops;
74d4affd 347 struct pv_lock_ops pv_lock_ops;
d3561b7f
RR
348};
349
93b1eab3
JF
350extern struct pv_info pv_info;
351extern struct pv_init_ops pv_init_ops;
93b1eab3
JF
352extern struct pv_time_ops pv_time_ops;
353extern struct pv_cpu_ops pv_cpu_ops;
354extern struct pv_irq_ops pv_irq_ops;
355extern struct pv_apic_ops pv_apic_ops;
356extern struct pv_mmu_ops pv_mmu_ops;
74d4affd 357extern struct pv_lock_ops pv_lock_ops;
d3561b7f 358
d5822035 359#define PARAVIRT_PATCH(x) \
93b1eab3 360 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
d5822035 361
93b1eab3
JF
362#define paravirt_type(op) \
363 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
364 [paravirt_opptr] "m" (op)
d5822035
JF
365#define paravirt_clobber(clobber) \
366 [paravirt_clobber] "i" (clobber)
367
294688c0
JF
368/*
369 * Generate some code, and mark it as patchable by the
370 * apply_paravirt() alternate instruction patcher.
371 */
d5822035
JF
372#define _paravirt_alt(insn_string, type, clobber) \
373 "771:\n\t" insn_string "\n" "772:\n" \
374 ".pushsection .parainstructions,\"a\"\n" \
658be9d3
GOC
375 _ASM_ALIGN "\n" \
376 _ASM_PTR " 771b\n" \
d5822035
JF
377 " .byte " type "\n" \
378 " .byte 772b-771b\n" \
379 " .short " clobber "\n" \
380 ".popsection\n"
381
294688c0 382/* Generate patchable code, with the default asm parameters. */
f8822f42 383#define paravirt_alt(insn_string) \
d5822035
JF
384 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
385
2f485ef5
GOC
386/* Simple instruction patching code. */
387#define DEF_NATIVE(ops, name, code) \
388 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
389 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
390
63f70270
JF
391unsigned paravirt_patch_nop(void);
392unsigned paravirt_patch_ignore(unsigned len);
ab144f5e
AK
393unsigned paravirt_patch_call(void *insnbuf,
394 const void *target, u16 tgt_clobbers,
395 unsigned long addr, u16 site_clobbers,
63f70270 396 unsigned len);
93b1eab3 397unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
ab144f5e
AK
398 unsigned long addr, unsigned len);
399unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
400 unsigned long addr, unsigned len);
63f70270 401
ab144f5e 402unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
63f70270
JF
403 const char *start, const char *end);
404
2f485ef5
GOC
405unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
406 unsigned long addr, unsigned len);
407
d572929c 408int paravirt_disable_iospace(void);
63f70270 409
294688c0
JF
410/*
411 * This generates an indirect call based on the operation type number.
412 * The type number, computed in PARAVIRT_PATCH, is derived from the
93b1eab3
JF
413 * offset into the paravirt_patch_template structure, and can therefore be
414 * freely converted back into a structure offset.
294688c0 415 */
93b1eab3 416#define PARAVIRT_CALL "call *%[paravirt_opptr];"
294688c0
JF
417
418/*
93b1eab3
JF
419 * These macros are intended to wrap calls through one of the paravirt
420 * ops structs, so that they can be later identified and patched at
294688c0
JF
421 * runtime.
422 *
423 * Normally, a call to a pv_op function is a simple indirect call:
a4746364 424 * (pv_op_struct.operations)(args...).
294688c0
JF
425 *
426 * Unfortunately, this is a relatively slow operation for modern CPUs,
427 * because it cannot necessarily determine what the destination
428 * address is. In this case, the address is a runtime constant, so at
429 * the very least we can patch the call to e a simple direct call, or
430 * ideally, patch an inline implementation into the callsite. (Direct
431 * calls are essentially free, because the call and return addresses
432 * are completely predictable.)
433 *
a4746364 434 * For i386, these macros rely on the standard gcc "regparm(3)" calling
294688c0
JF
435 * convention, in which the first three arguments are placed in %eax,
436 * %edx, %ecx (in that order), and the remaining arguments are placed
437 * on the stack. All caller-save registers (eax,edx,ecx) are expected
438 * to be modified (either clobbered or used for return values).
a4746364
GOC
439 * X86_64, on the other hand, already specifies a register-based calling
440 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
441 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
442 * special handling for dealing with 4 arguments, unlike i386.
443 * However, x86_64 also have to clobber all caller saved registers, which
444 * unfortunately, are quite a bit (r8 - r11)
294688c0
JF
445 *
446 * The call instruction itself is marked by placing its start address
447 * and size into the .parainstructions section, so that
448 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
93b1eab3 449 * appropriate patching under the control of the backend pv_init_ops
294688c0
JF
450 * implementation.
451 *
452 * Unfortunately there's no way to get gcc to generate the args setup
453 * for the call, and then allow the call itself to be generated by an
454 * inline asm. Because of this, we must do the complete arg setup and
455 * return value handling from within these macros. This is fairly
456 * cumbersome.
457 *
458 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
459 * It could be extended to more arguments, but there would be little
460 * to be gained from that. For each number of arguments, there are
461 * the two VCALL and CALL variants for void and non-void functions.
462 *
463 * When there is a return value, the invoker of the macro must specify
464 * the return type. The macro then uses sizeof() on that type to
465 * determine whether its a 32 or 64 bit value, and places the return
466 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
a4746364
GOC
467 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
468 * the return value size.
294688c0
JF
469 *
470 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
a4746364
GOC
471 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
472 * in low,high order
294688c0
JF
473 *
474 * Small structures are passed and returned in registers. The macro
475 * calling convention can't directly deal with this, so the wrapper
476 * functions must do this.
477 *
478 * These PVOP_* macros are only defined within this header. This
479 * means that all uses must be wrapped in inline functions. This also
480 * makes sure the incoming and outgoing types are always correct.
481 */
a4746364
GOC
482#ifdef CONFIG_X86_32
483#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
484#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
485#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
486 "=c" (__ecx)
487#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
488#define EXTRA_CLOBBERS
489#define VEXTRA_CLOBBERS
490#else
491#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
492#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
493#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
494 "=S" (__esi), "=d" (__edx), \
495 "=c" (__ecx)
496
497#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
498
499#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
500#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
501#endif
502
97349135
JF
503#ifdef CONFIG_PARAVIRT_DEBUG
504#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
505#else
506#define PVOP_TEST_NULL(op) ((void)op)
507#endif
508
1a45b7aa 509#define __PVOP_CALL(rettype, op, pre, post, ...) \
f8822f42 510 ({ \
1a45b7aa 511 rettype __ret; \
a4746364 512 PVOP_CALL_ARGS; \
97349135 513 PVOP_TEST_NULL(op); \
a4746364
GOC
514 /* This is 32-bit specific, but is okay in 64-bit */ \
515 /* since this condition will never hold */ \
1a45b7aa
JF
516 if (sizeof(rettype) > sizeof(unsigned long)) { \
517 asm volatile(pre \
518 paravirt_alt(PARAVIRT_CALL) \
519 post \
a4746364 520 : PVOP_CALL_CLOBBERS \
1a45b7aa
JF
521 : paravirt_type(op), \
522 paravirt_clobber(CLBR_ANY), \
523 ##__VA_ARGS__ \
a4746364 524 : "memory", "cc" EXTRA_CLOBBERS); \
1a45b7aa 525 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
f8822f42 526 } else { \
1a45b7aa 527 asm volatile(pre \
f8822f42 528 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 529 post \
a4746364 530 : PVOP_CALL_CLOBBERS \
1a45b7aa
JF
531 : paravirt_type(op), \
532 paravirt_clobber(CLBR_ANY), \
533 ##__VA_ARGS__ \
a4746364 534 : "memory", "cc" EXTRA_CLOBBERS); \
1a45b7aa 535 __ret = (rettype)__eax; \
f8822f42
JF
536 } \
537 __ret; \
538 })
1a45b7aa 539#define __PVOP_VCALL(op, pre, post, ...) \
f8822f42 540 ({ \
a4746364 541 PVOP_VCALL_ARGS; \
97349135 542 PVOP_TEST_NULL(op); \
1a45b7aa 543 asm volatile(pre \
f8822f42 544 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 545 post \
a4746364 546 : PVOP_VCALL_CLOBBERS \
1a45b7aa
JF
547 : paravirt_type(op), \
548 paravirt_clobber(CLBR_ANY), \
549 ##__VA_ARGS__ \
a4746364 550 : "memory", "cc" VEXTRA_CLOBBERS); \
f8822f42
JF
551 })
552
1a45b7aa
JF
553#define PVOP_CALL0(rettype, op) \
554 __PVOP_CALL(rettype, op, "", "")
555#define PVOP_VCALL0(op) \
556 __PVOP_VCALL(op, "", "")
557
558#define PVOP_CALL1(rettype, op, arg1) \
a4746364 559 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
1a45b7aa 560#define PVOP_VCALL1(op, arg1) \
a4746364 561 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
1a45b7aa
JF
562
563#define PVOP_CALL2(rettype, op, arg1, arg2) \
a4746364
GOC
564 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
565 "1" ((unsigned long)(arg2)))
1a45b7aa 566#define PVOP_VCALL2(op, arg1, arg2) \
a4746364
GOC
567 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
568 "1" ((unsigned long)(arg2)))
1a45b7aa
JF
569
570#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
a4746364
GOC
571 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
572 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
1a45b7aa 573#define PVOP_VCALL3(op, arg1, arg2, arg3) \
a4746364
GOC
574 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
575 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
1a45b7aa 576
a4746364
GOC
577/* This is the only difference in x86_64. We can make it much simpler */
578#ifdef CONFIG_X86_32
1a45b7aa
JF
579#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
580 __PVOP_CALL(rettype, op, \
581 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
582 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
583 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
584#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
585 __PVOP_VCALL(op, \
586 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
587 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
588 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
a4746364
GOC
589#else
590#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
591 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
592 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
593 "3"((unsigned long)(arg4)))
594#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
595 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
596 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
597 "3"((unsigned long)(arg4)))
598#endif
1a45b7aa 599
f8822f42
JF
600static inline int paravirt_enabled(void)
601{
93b1eab3 602 return pv_info.paravirt_enabled;
f8822f42 603}
d3561b7f 604
faca6227 605static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
606 struct thread_struct *thread)
607{
faca6227 608 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
609}
610
93b1eab3 611#define ARCH_SETUP pv_init_ops.arch_setup();
d3561b7f
RR
612static inline unsigned long get_wallclock(void)
613{
93b1eab3 614 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
d3561b7f
RR
615}
616
617static inline int set_wallclock(unsigned long nowtime)
618{
93b1eab3 619 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
d3561b7f
RR
620}
621
e30fab3a 622static inline void (*choose_time_init(void))(void)
d3561b7f 623{
93b1eab3 624 return pv_time_ops.time_init;
d3561b7f
RR
625}
626
627/* The paravirtualized CPUID instruction. */
628static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
629 unsigned int *ecx, unsigned int *edx)
630{
93b1eab3 631 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
632}
633
634/*
635 * These special macros can be used to get or set a debugging register
636 */
f8822f42
JF
637static inline unsigned long paravirt_get_debugreg(int reg)
638{
93b1eab3 639 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
640}
641#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
642static inline void set_debugreg(unsigned long val, int reg)
643{
93b1eab3 644 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 645}
d3561b7f 646
f8822f42
JF
647static inline void clts(void)
648{
93b1eab3 649 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 650}
d3561b7f 651
f8822f42
JF
652static inline unsigned long read_cr0(void)
653{
93b1eab3 654 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 655}
d3561b7f 656
f8822f42
JF
657static inline void write_cr0(unsigned long x)
658{
93b1eab3 659 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
660}
661
662static inline unsigned long read_cr2(void)
663{
93b1eab3 664 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
665}
666
667static inline void write_cr2(unsigned long x)
668{
93b1eab3 669 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
670}
671
672static inline unsigned long read_cr3(void)
673{
93b1eab3 674 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 675}
d3561b7f 676
f8822f42
JF
677static inline void write_cr3(unsigned long x)
678{
93b1eab3 679 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 680}
d3561b7f 681
f8822f42
JF
682static inline unsigned long read_cr4(void)
683{
93b1eab3 684 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
685}
686static inline unsigned long read_cr4_safe(void)
687{
93b1eab3 688 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 689}
d3561b7f 690
f8822f42
JF
691static inline void write_cr4(unsigned long x)
692{
93b1eab3 693 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 694}
3dc494e8 695
94ea03cd 696#ifdef CONFIG_X86_64
4c9890c2
GOC
697static inline unsigned long read_cr8(void)
698{
699 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
700}
701
702static inline void write_cr8(unsigned long x)
703{
704 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
705}
94ea03cd 706#endif
4c9890c2 707
d3561b7f
RR
708static inline void raw_safe_halt(void)
709{
93b1eab3 710 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
711}
712
713static inline void halt(void)
714{
93b1eab3 715 PVOP_VCALL0(pv_irq_ops.safe_halt);
f8822f42
JF
716}
717
718static inline void wbinvd(void)
719{
93b1eab3 720 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 721}
d3561b7f 722
93b1eab3 723#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 724
f8822f42
JF
725static inline u64 paravirt_read_msr(unsigned msr, int *err)
726{
93b1eab3 727 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42
JF
728}
729static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
730{
93b1eab3 731 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
732}
733
90a0a06a 734/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
735#define rdmsr(msr, val1, val2) \
736do { \
f8822f42
JF
737 int _err; \
738 u64 _l = paravirt_read_msr(msr, &_err); \
739 val1 = (u32)_l; \
740 val2 = _l >> 32; \
49cd740b 741} while (0)
d3561b7f 742
49cd740b
JP
743#define wrmsr(msr, val1, val2) \
744do { \
f8822f42 745 paravirt_write_msr(msr, val1, val2); \
49cd740b 746} while (0)
d3561b7f 747
49cd740b
JP
748#define rdmsrl(msr, val) \
749do { \
f8822f42
JF
750 int _err; \
751 val = paravirt_read_msr(msr, &_err); \
49cd740b 752} while (0)
d3561b7f 753
49cd740b
JP
754#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
755#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
756
757/* rdmsr with exception handling */
49cd740b
JP
758#define rdmsr_safe(msr, a, b) \
759({ \
f8822f42
JF
760 int _err; \
761 u64 _l = paravirt_read_msr(msr, &_err); \
762 (*a) = (u32)_l; \
763 (*b) = _l >> 32; \
49cd740b
JP
764 _err; \
765})
d3561b7f 766
1de87bd4
AK
767static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
768{
769 int err;
770
771 *p = paravirt_read_msr(msr, &err);
772 return err;
773}
f8822f42
JF
774
775static inline u64 paravirt_read_tsc(void)
776{
93b1eab3 777 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 778}
d3561b7f 779
49cd740b
JP
780#define rdtscl(low) \
781do { \
f8822f42
JF
782 u64 _l = paravirt_read_tsc(); \
783 low = (int)_l; \
49cd740b 784} while (0)
d3561b7f 785
f8822f42 786#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 787
688340ea
JF
788static inline unsigned long long paravirt_sched_clock(void)
789{
93b1eab3 790 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 791}
e93ef949 792#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
6cb9a835 793
f8822f42
JF
794static inline unsigned long long paravirt_read_pmc(int counter)
795{
93b1eab3 796 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 797}
d3561b7f 798
49cd740b
JP
799#define rdpmc(counter, low, high) \
800do { \
f8822f42
JF
801 u64 _l = paravirt_read_pmc(counter); \
802 low = (u32)_l; \
803 high = _l >> 32; \
49cd740b 804} while (0)
3dc494e8 805
e5aaac44
GOC
806static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
807{
808 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
809}
810
811#define rdtscp(low, high, aux) \
812do { \
813 int __aux; \
814 unsigned long __val = paravirt_rdtscp(&__aux); \
815 (low) = (u32)__val; \
816 (high) = (u32)(__val >> 32); \
817 (aux) = __aux; \
818} while (0)
819
820#define rdtscpll(val, aux) \
821do { \
822 unsigned long __aux; \
823 val = paravirt_rdtscp(&__aux); \
824 (aux) = __aux; \
825} while (0)
826
f8822f42
JF
827static inline void load_TR_desc(void)
828{
93b1eab3 829 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 830}
6b68f01b 831static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 832{
93b1eab3 833 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 834}
6b68f01b 835static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 836{
93b1eab3 837 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
838}
839static inline void set_ldt(const void *addr, unsigned entries)
840{
93b1eab3 841 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 842}
6b68f01b 843static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 844{
93b1eab3 845 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 846}
6b68f01b 847static inline void store_idt(struct desc_ptr *dtr)
f8822f42 848{
93b1eab3 849 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
850}
851static inline unsigned long paravirt_store_tr(void)
852{
93b1eab3 853 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
854}
855#define store_tr(tr) ((tr) = paravirt_store_tr())
856static inline void load_TLS(struct thread_struct *t, unsigned cpu)
857{
93b1eab3 858 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 859}
75b8bb3e 860
9f9d489a
JF
861#ifdef CONFIG_X86_64
862static inline void load_gs_index(unsigned int gs)
863{
864 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
865}
866#endif
867
75b8bb3e
GOC
868static inline void write_ldt_entry(struct desc_struct *dt, int entry,
869 const void *desc)
f8822f42 870{
75b8bb3e 871 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 872}
014b15be
GOC
873
874static inline void write_gdt_entry(struct desc_struct *dt, int entry,
875 void *desc, int type)
f8822f42 876{
014b15be 877 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 878}
014b15be 879
8d947344 880static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 881{
8d947344 882 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
883}
884static inline void set_iopl_mask(unsigned mask)
885{
93b1eab3 886 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 887}
3dc494e8 888
d3561b7f 889/* The paravirtualized I/O functions */
49cd740b
JP
890static inline void slow_down_io(void)
891{
93b1eab3 892 pv_cpu_ops.io_delay();
d3561b7f 893#ifdef REALLY_SLOW_IO
93b1eab3
JF
894 pv_cpu_ops.io_delay();
895 pv_cpu_ops.io_delay();
896 pv_cpu_ops.io_delay();
d3561b7f
RR
897#endif
898}
899
13623d79
RR
900#ifdef CONFIG_X86_LOCAL_APIC
901/*
902 * Basic functions accessing APICs.
903 */
42e0a9aa 904static inline void apic_write(unsigned long reg, u32 v)
13623d79 905{
93b1eab3 906 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
13623d79
RR
907}
908
42e0a9aa 909static inline u32 apic_read(unsigned long reg)
13623d79 910{
93b1eab3 911 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
13623d79 912}
bbab4f3b
ZA
913
914static inline void setup_boot_clock(void)
915{
93b1eab3 916 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
bbab4f3b
ZA
917}
918
919static inline void setup_secondary_clock(void)
920{
93b1eab3 921 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
bbab4f3b 922}
13623d79
RR
923#endif
924
6996d3b6
JF
925static inline void paravirt_post_allocator_init(void)
926{
93b1eab3
JF
927 if (pv_init_ops.post_allocator_init)
928 (*pv_init_ops.post_allocator_init)();
6996d3b6
JF
929}
930
b239fb25
JF
931static inline void paravirt_pagetable_setup_start(pgd_t *base)
932{
93b1eab3 933 (*pv_mmu_ops.pagetable_setup_start)(base);
b239fb25
JF
934}
935
936static inline void paravirt_pagetable_setup_done(pgd_t *base)
937{
93b1eab3 938 (*pv_mmu_ops.pagetable_setup_done)(base);
b239fb25 939}
3dc494e8 940
ae5da273
ZA
941#ifdef CONFIG_SMP
942static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
943 unsigned long start_esp)
944{
93b1eab3
JF
945 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
946 phys_apicid, start_eip, start_esp);
ae5da273
ZA
947}
948#endif
13623d79 949
d6dd61c8
JF
950static inline void paravirt_activate_mm(struct mm_struct *prev,
951 struct mm_struct *next)
952{
93b1eab3 953 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
954}
955
956static inline void arch_dup_mmap(struct mm_struct *oldmm,
957 struct mm_struct *mm)
958{
93b1eab3 959 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
960}
961
962static inline void arch_exit_mmap(struct mm_struct *mm)
963{
93b1eab3 964 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
965}
966
f8822f42
JF
967static inline void __flush_tlb(void)
968{
93b1eab3 969 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
970}
971static inline void __flush_tlb_global(void)
972{
93b1eab3 973 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
974}
975static inline void __flush_tlb_single(unsigned long addr)
976{
93b1eab3 977 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 978}
da181a8b 979
d4c10477
JF
980static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
981 unsigned long va)
982{
93b1eab3 983 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
d4c10477
JF
984}
985
eba0045f
JF
986static inline int paravirt_pgd_alloc(struct mm_struct *mm)
987{
988 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
989}
990
991static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
992{
993 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
994}
995
6944a9c8 996static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
f8822f42 997{
6944a9c8 998 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 999}
6944a9c8 1000static inline void paravirt_release_pte(unsigned pfn)
f8822f42 1001{
6944a9c8 1002 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 1003}
c119ecce 1004
6944a9c8 1005static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
f8822f42 1006{
6944a9c8 1007 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 1008}
c119ecce 1009
6944a9c8
JF
1010static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
1011 unsigned start, unsigned count)
f8822f42 1012{
6944a9c8 1013 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
f8822f42 1014}
6944a9c8 1015static inline void paravirt_release_pmd(unsigned pfn)
da181a8b 1016{
6944a9c8 1017 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
1018}
1019
2761fa09
JF
1020static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
1021{
1022 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1023}
1024static inline void paravirt_release_pud(unsigned pfn)
1025{
1026 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1027}
1028
ce6234b5
JF
1029#ifdef CONFIG_HIGHPTE
1030static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1031{
1032 unsigned long ret;
93b1eab3 1033 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
ce6234b5
JF
1034 return (void *)ret;
1035}
1036#endif
1037
f8822f42
JF
1038static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1039 pte_t *ptep)
da181a8b 1040{
93b1eab3 1041 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b
RR
1042}
1043
f8822f42
JF
1044static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1045 pte_t *ptep)
da181a8b 1046{
93b1eab3 1047 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
1048}
1049
773221f4 1050static inline pte_t __pte(pteval_t val)
da181a8b 1051{
773221f4
JF
1052 pteval_t ret;
1053
1054 if (sizeof(pteval_t) > sizeof(long))
1055 ret = PVOP_CALL2(pteval_t,
1056 pv_mmu_ops.make_pte,
1057 val, (u64)val >> 32);
1058 else
1059 ret = PVOP_CALL1(pteval_t,
1060 pv_mmu_ops.make_pte,
1061 val);
1062
c8e5393a 1063 return (pte_t) { .pte = ret };
da181a8b
RR
1064}
1065
773221f4
JF
1066static inline pteval_t pte_val(pte_t pte)
1067{
1068 pteval_t ret;
1069
1070 if (sizeof(pteval_t) > sizeof(long))
1071 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1072 pte.pte, (u64)pte.pte >> 32);
1073 else
1074 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1075 pte.pte);
1076
1077 return ret;
1078}
1079
a15af1c9
JF
1080static inline pteval_t pte_flags(pte_t pte)
1081{
1082 pteval_t ret;
1083
1084 if (sizeof(pteval_t) > sizeof(long))
1085 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
1086 pte.pte, (u64)pte.pte >> 32);
1087 else
1088 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
1089 pte.pte);
1090
1091 return ret;
1092}
1093
ef38503e 1094static inline pgd_t __pgd(pgdval_t val)
da181a8b 1095{
ef38503e
JF
1096 pgdval_t ret;
1097
1098 if (sizeof(pgdval_t) > sizeof(long))
1099 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1100 val, (u64)val >> 32);
1101 else
1102 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1103 val);
1104
1105 return (pgd_t) { ret };
1106}
1107
1108static inline pgdval_t pgd_val(pgd_t pgd)
1109{
1110 pgdval_t ret;
1111
1112 if (sizeof(pgdval_t) > sizeof(long))
1113 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1114 pgd.pgd, (u64)pgd.pgd >> 32);
1115 else
1116 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1117 pgd.pgd);
1118
1119 return ret;
f8822f42
JF
1120}
1121
08b882c6
JF
1122#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1123static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1124 pte_t *ptep)
1125{
1126 pteval_t ret;
1127
1128 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1129 mm, addr, ptep);
1130
1131 return (pte_t) { .pte = ret };
1132}
1133
1134static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1135 pte_t *ptep, pte_t pte)
1136{
1137 if (sizeof(pteval_t) > sizeof(long))
1138 /* 5 arg words */
1139 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1140 else
1141 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1142 mm, addr, ptep, pte.pte);
1143}
1144
4eed80cd
JF
1145static inline void set_pte(pte_t *ptep, pte_t pte)
1146{
1147 if (sizeof(pteval_t) > sizeof(long))
1148 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1149 pte.pte, (u64)pte.pte >> 32);
1150 else
1151 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1152 pte.pte);
1153}
1154
1155static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1156 pte_t *ptep, pte_t pte)
1157{
1158 if (sizeof(pteval_t) > sizeof(long))
1159 /* 5 arg words */
1160 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1161 else
1162 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1163}
1164
60b3f626
JF
1165static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1166{
1167 pmdval_t val = native_pmd_val(pmd);
1168
1169 if (sizeof(pmdval_t) > sizeof(long))
1170 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1171 else
1172 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1173}
1174
1fe91514
GOC
1175#if PAGETABLE_LEVELS >= 3
1176static inline pmd_t __pmd(pmdval_t val)
1177{
1178 pmdval_t ret;
1179
1180 if (sizeof(pmdval_t) > sizeof(long))
1181 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1182 val, (u64)val >> 32);
1183 else
1184 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1185 val);
1186
1187 return (pmd_t) { ret };
1188}
1189
1190static inline pmdval_t pmd_val(pmd_t pmd)
1191{
1192 pmdval_t ret;
1193
1194 if (sizeof(pmdval_t) > sizeof(long))
1195 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1196 pmd.pmd, (u64)pmd.pmd >> 32);
1197 else
1198 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1199 pmd.pmd);
1200
1201 return ret;
1202}
1203
1204static inline void set_pud(pud_t *pudp, pud_t pud)
1205{
1206 pudval_t val = native_pud_val(pud);
1207
1208 if (sizeof(pudval_t) > sizeof(long))
1209 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1210 val, (u64)val >> 32);
1211 else
1212 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1213 val);
1214}
9042219c
EH
1215#if PAGETABLE_LEVELS == 4
1216static inline pud_t __pud(pudval_t val)
1217{
1218 pudval_t ret;
1219
1220 if (sizeof(pudval_t) > sizeof(long))
1221 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1222 val, (u64)val >> 32);
1223 else
1224 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1225 val);
1226
1227 return (pud_t) { ret };
1228}
1229
1230static inline pudval_t pud_val(pud_t pud)
1231{
1232 pudval_t ret;
1233
1234 if (sizeof(pudval_t) > sizeof(long))
1235 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1236 pud.pud, (u64)pud.pud >> 32);
1237 else
1238 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1239 pud.pud);
1240
1241 return ret;
1242}
1243
1244static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1245{
1246 pgdval_t val = native_pgd_val(pgd);
1247
1248 if (sizeof(pgdval_t) > sizeof(long))
1249 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1250 val, (u64)val >> 32);
1251 else
1252 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1253 val);
1254}
1255
1256static inline void pgd_clear(pgd_t *pgdp)
1257{
1258 set_pgd(pgdp, __pgd(0));
1259}
1260
1261static inline void pud_clear(pud_t *pudp)
1262{
1263 set_pud(pudp, __pud(0));
1264}
1265
1266#endif /* PAGETABLE_LEVELS == 4 */
1267
1fe91514
GOC
1268#endif /* PAGETABLE_LEVELS >= 3 */
1269
4eed80cd
JF
1270#ifdef CONFIG_X86_PAE
1271/* Special-case pte-setting operations for PAE, which can't update a
1272 64-bit pte atomically */
1273static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1274{
1275 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1276 pte.pte, pte.pte >> 32);
1277}
1278
1279static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1280 pte_t *ptep, pte_t pte)
1281{
1282 /* 5 arg words */
1283 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1284}
1285
1286static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1287 pte_t *ptep)
1288{
1289 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1290}
60b3f626
JF
1291
1292static inline void pmd_clear(pmd_t *pmdp)
1293{
1294 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1295}
4eed80cd
JF
1296#else /* !CONFIG_X86_PAE */
1297static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1298{
1299 set_pte(ptep, pte);
1300}
1301
1302static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1303 pte_t *ptep, pte_t pte)
1304{
1305 set_pte(ptep, pte);
1306}
1307
1308static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1309 pte_t *ptep)
1310{
1311 set_pte_at(mm, addr, ptep, __pte(0));
1312}
60b3f626
JF
1313
1314static inline void pmd_clear(pmd_t *pmdp)
1315{
1316 set_pmd(pmdp, __pmd(0));
1317}
4eed80cd
JF
1318#endif /* CONFIG_X86_PAE */
1319
8965c1c0
JF
1320/* Lazy mode for batching updates / context switch */
1321enum paravirt_lazy_mode {
1322 PARAVIRT_LAZY_NONE,
1323 PARAVIRT_LAZY_MMU,
1324 PARAVIRT_LAZY_CPU,
1325};
1326
1327enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1328void paravirt_enter_lazy_cpu(void);
1329void paravirt_leave_lazy_cpu(void);
1330void paravirt_enter_lazy_mmu(void);
1331void paravirt_leave_lazy_mmu(void);
1332void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1333
9226d125 1334#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
f8822f42
JF
1335static inline void arch_enter_lazy_cpu_mode(void)
1336{
8965c1c0 1337 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
f8822f42
JF
1338}
1339
1340static inline void arch_leave_lazy_cpu_mode(void)
1341{
8965c1c0 1342 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
f8822f42
JF
1343}
1344
1345static inline void arch_flush_lazy_cpu_mode(void)
1346{
8965c1c0
JF
1347 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1348 arch_leave_lazy_cpu_mode();
1349 arch_enter_lazy_cpu_mode();
1350 }
f8822f42
JF
1351}
1352
9226d125
ZA
1353
1354#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
1355static inline void arch_enter_lazy_mmu_mode(void)
1356{
8965c1c0 1357 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
1358}
1359
1360static inline void arch_leave_lazy_mmu_mode(void)
1361{
8965c1c0 1362 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
1363}
1364
1365static inline void arch_flush_lazy_mmu_mode(void)
1366{
8965c1c0
JF
1367 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1368 arch_leave_lazy_mmu_mode();
1369 arch_enter_lazy_mmu_mode();
1370 }
f8822f42 1371}
9226d125 1372
aeaaa59c
JF
1373static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1374 unsigned long phys, pgprot_t flags)
1375{
1376 pv_mmu_ops.set_fixmap(idx, phys, flags);
1377}
1378
45876233
JF
1379void _paravirt_nop(void);
1380#define paravirt_nop ((void *)_paravirt_nop)
1381
8efcbab6
JF
1382void paravirt_use_bytelocks(void);
1383
4bb689ee
IM
1384#ifdef CONFIG_SMP
1385
74d4affd
JF
1386static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1387{
1388 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1389}
1390
1391static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1392{
1393 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1394}
1395
1396static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1397{
32172561 1398 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
1399}
1400
1401static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1402{
1403 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1404}
1405
1406static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1407{
32172561 1408 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
1409}
1410
4bb689ee
IM
1411#endif
1412
139ec7c4 1413/* These all sit in the .parainstructions section to tell us what to patch. */
98de032b 1414struct paravirt_patch_site {
139ec7c4
RR
1415 u8 *instr; /* original instructions */
1416 u8 instrtype; /* type of this instruction */
1417 u8 len; /* length of original instruction */
1418 u16 clobbers; /* what registers you may clobber */
1419};
1420
98de032b
JF
1421extern struct paravirt_patch_site __parainstructions[],
1422 __parainstructions_end[];
1423
2e47d3e6
GOC
1424#ifdef CONFIG_X86_32
1425#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1426#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1427#define PV_FLAGS_ARG "0"
1428#define PV_EXTRA_CLOBBERS
1429#define PV_VEXTRA_CLOBBERS
1430#else
1431/* We save some registers, but all of them, that's too much. We clobber all
1432 * caller saved registers but the argument parameter */
1433#define PV_SAVE_REGS "pushq %%rdi;"
1434#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
1435#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1436#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
1437#define PV_FLAGS_ARG "D"
1438#endif
1439
139ec7c4
RR
1440static inline unsigned long __raw_local_save_flags(void)
1441{
1442 unsigned long f;
1443
2e47d3e6 1444 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1445 PARAVIRT_CALL
2e47d3e6 1446 PV_RESTORE_REGS)
d5822035 1447 : "=a"(f)
93b1eab3 1448 : paravirt_type(pv_irq_ops.save_fl),
42c24fa2 1449 paravirt_clobber(CLBR_EAX)
2e47d3e6 1450 : "memory", "cc" PV_VEXTRA_CLOBBERS);
139ec7c4
RR
1451 return f;
1452}
1453
1454static inline void raw_local_irq_restore(unsigned long f)
1455{
2e47d3e6 1456 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1457 PARAVIRT_CALL
2e47d3e6 1458 PV_RESTORE_REGS)
d5822035 1459 : "=a"(f)
2e47d3e6 1460 : PV_FLAGS_ARG(f),
93b1eab3 1461 paravirt_type(pv_irq_ops.restore_fl),
d5822035 1462 paravirt_clobber(CLBR_EAX)
2e47d3e6 1463 : "memory", "cc" PV_EXTRA_CLOBBERS);
139ec7c4
RR
1464}
1465
1466static inline void raw_local_irq_disable(void)
1467{
2e47d3e6 1468 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1469 PARAVIRT_CALL
2e47d3e6 1470 PV_RESTORE_REGS)
d5822035 1471 :
93b1eab3 1472 : paravirt_type(pv_irq_ops.irq_disable),
d5822035 1473 paravirt_clobber(CLBR_EAX)
2e47d3e6 1474 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
139ec7c4
RR
1475}
1476
1477static inline void raw_local_irq_enable(void)
1478{
2e47d3e6 1479 asm volatile(paravirt_alt(PV_SAVE_REGS
d5822035 1480 PARAVIRT_CALL
2e47d3e6 1481 PV_RESTORE_REGS)
d5822035 1482 :
93b1eab3 1483 : paravirt_type(pv_irq_ops.irq_enable),
d5822035 1484 paravirt_clobber(CLBR_EAX)
2e47d3e6 1485 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
139ec7c4
RR
1486}
1487
1488static inline unsigned long __raw_local_irq_save(void)
1489{
1490 unsigned long f;
1491
d5822035
JF
1492 f = __raw_local_save_flags();
1493 raw_local_irq_disable();
139ec7c4
RR
1494 return f;
1495}
1496
74d4affd 1497
294688c0 1498/* Make sure as little as possible of this mess escapes. */
d5822035 1499#undef PARAVIRT_CALL
1a45b7aa
JF
1500#undef __PVOP_CALL
1501#undef __PVOP_VCALL
f8822f42
JF
1502#undef PVOP_VCALL0
1503#undef PVOP_CALL0
1504#undef PVOP_VCALL1
1505#undef PVOP_CALL1
1506#undef PVOP_VCALL2
1507#undef PVOP_CALL2
1508#undef PVOP_VCALL3
1509#undef PVOP_CALL3
1510#undef PVOP_VCALL4
1511#undef PVOP_CALL4
139ec7c4 1512
d3561b7f
RR
1513#else /* __ASSEMBLY__ */
1514
658be9d3 1515#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
1516771:; \
1517 ops; \
1518772:; \
1519 .pushsection .parainstructions,"a"; \
658be9d3
GOC
1520 .align algn; \
1521 word 771b; \
139ec7c4
RR
1522 .byte ptype; \
1523 .byte 772b-771b; \
1524 .short clobbers; \
1525 .popsection
1526
658be9d3
GOC
1527
1528#ifdef CONFIG_X86_64
c24481e9
JF
1529#define PV_SAVE_REGS \
1530 push %rax; \
1531 push %rcx; \
1532 push %rdx; \
1533 push %rsi; \
1534 push %rdi; \
1535 push %r8; \
1536 push %r9; \
1537 push %r10; \
1538 push %r11
1539#define PV_RESTORE_REGS \
1540 pop %r11; \
1541 pop %r10; \
1542 pop %r9; \
1543 pop %r8; \
1544 pop %rdi; \
1545 pop %rsi; \
1546 pop %rdx; \
1547 pop %rcx; \
1548 pop %rax
6057fc82 1549#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 1550#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 1551#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 1552#else
6057fc82
GOC
1553#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1554#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1555#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 1556#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 1557#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
1558#endif
1559
93b1eab3
JF
1560#define INTERRUPT_RETURN \
1561 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 1562 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
1563
1564#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 1565 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
491eccb7
JF
1566 PV_SAVE_REGS; \
1567 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
6057fc82 1568 PV_RESTORE_REGS;) \
d5822035
JF
1569
1570#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 1571 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
491eccb7
JF
1572 PV_SAVE_REGS; \
1573 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
6057fc82 1574 PV_RESTORE_REGS;)
d5822035 1575
2be29982
JF
1576#define USERGS_SYSRET32 \
1577 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 1578 CLBR_NONE, \
2be29982 1579 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 1580
6057fc82 1581#ifdef CONFIG_X86_32
491eccb7
JF
1582#define GET_CR0_INTO_EAX \
1583 push %ecx; push %edx; \
1584 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 1585 pop %edx; pop %ecx
2be29982
JF
1586
1587#define ENABLE_INTERRUPTS_SYSEXIT \
1588 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1589 CLBR_NONE, \
1590 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1591
1592
1593#else /* !CONFIG_X86_32 */
a00394f8
JF
1594
1595/*
1596 * If swapgs is used while the userspace stack is still current,
1597 * there's no way to call a pvop. The PV replacement *must* be
1598 * inlined, or the swapgs instruction must be trapped and emulated.
1599 */
1600#define SWAPGS_UNSAFE_STACK \
1601 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1602 swapgs)
1603
e801f864
GOC
1604#define SWAPGS \
1605 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1606 PV_SAVE_REGS; \
491eccb7 1607 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
e801f864
GOC
1608 PV_RESTORE_REGS \
1609 )
1610
491eccb7
JF
1611#define GET_CR2_INTO_RCX \
1612 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1613 movq %rax, %rcx; \
4a8c4c4e
GOC
1614 xorq %rax, %rax;
1615
fab58420
JF
1616#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1617 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1618 CLBR_NONE, \
1619 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1620
2be29982
JF
1621#define USERGS_SYSRET64 \
1622 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1623 CLBR_NONE, \
2be29982
JF
1624 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1625
1626#define ENABLE_INTERRUPTS_SYSEXIT32 \
1627 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1628 CLBR_NONE, \
1629 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1630#endif /* CONFIG_X86_32 */
139ec7c4 1631
d3561b7f
RR
1632#endif /* __ASSEMBLY__ */
1633#endif /* CONFIG_PARAVIRT */
1634#endif /* __ASM_PARAVIRT_H */