1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_TYPES_H
3 #define _ASM_X86_PARAVIRT_TYPES_H
5 /* Bitmask of what can be clobbered: usually at least eax. */
7 #define CLBR_EAX (1 << 0)
8 #define CLBR_ECX (1 << 1)
9 #define CLBR_EDX (1 << 2)
10 #define CLBR_EDI (1 << 3)
13 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
14 #define CLBR_ANY ((1 << 4) - 1)
16 #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
17 #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
18 #define CLBR_SCRATCH (0)
20 #define CLBR_RAX CLBR_EAX
21 #define CLBR_RCX CLBR_ECX
22 #define CLBR_RDX CLBR_EDX
23 #define CLBR_RDI CLBR_EDI
24 #define CLBR_RSI (1 << 4)
25 #define CLBR_R8 (1 << 5)
26 #define CLBR_R9 (1 << 6)
27 #define CLBR_R10 (1 << 7)
28 #define CLBR_R11 (1 << 8)
30 #define CLBR_ANY ((1 << 9) - 1)
32 #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
33 CLBR_RCX | CLBR_R8 | CLBR_R9)
34 #define CLBR_RET_REG (CLBR_RAX)
35 #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
39 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
43 #include <asm/desc_defs.h>
44 #include <asm/kmap_types.h>
45 #include <asm/pgtable_types.h>
55 struct flush_tlb_info
;
58 * Wrapper type for pointers to code which uses the non-standard
59 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
61 struct paravirt_callee_save
{
67 unsigned int kernel_rpl
;
68 int shared_kernel_pmd
;
71 u16 extra_user_64bit_cs
; /* __USER_CS if none */
79 * Patch may replace one of the defined code sequences with
80 * arbitrary code, subject to the same register constraints.
81 * This generally means the code is not free to clobber any
82 * registers other than EAX. The patch function should return
83 * the number of bytes of code generated, as we nop pad the
84 * rest in generic code.
86 unsigned (*patch
)(u8 type
, u16 clobber
, void *insnbuf
,
87 unsigned long addr
, unsigned len
);
88 } __no_randomize_layout
;
92 /* Set deferred update mode, used for batching operations. */
96 } __no_randomize_layout
;
99 unsigned long long (*sched_clock
)(void);
100 unsigned long long (*steal_clock
)(int cpu
);
101 } __no_randomize_layout
;
104 /* hooks for various privileged instructions */
105 unsigned long (*get_debugreg
)(int regno
);
106 void (*set_debugreg
)(int regno
, unsigned long value
);
108 unsigned long (*read_cr0
)(void);
109 void (*write_cr0
)(unsigned long);
111 void (*write_cr4
)(unsigned long);
114 unsigned long (*read_cr8
)(void);
115 void (*write_cr8
)(unsigned long);
118 /* Segment descriptor handling */
119 void (*load_tr_desc
)(void);
120 void (*load_gdt
)(const struct desc_ptr
*);
121 void (*load_idt
)(const struct desc_ptr
*);
122 void (*set_ldt
)(const void *desc
, unsigned entries
);
123 unsigned long (*store_tr
)(void);
124 void (*load_tls
)(struct thread_struct
*t
, unsigned int cpu
);
126 void (*load_gs_index
)(unsigned int idx
);
128 void (*write_ldt_entry
)(struct desc_struct
*ldt
, int entrynum
,
130 void (*write_gdt_entry
)(struct desc_struct
*,
131 int entrynum
, const void *desc
, int size
);
132 void (*write_idt_entry
)(gate_desc
*,
133 int entrynum
, const gate_desc
*gate
);
134 void (*alloc_ldt
)(struct desc_struct
*ldt
, unsigned entries
);
135 void (*free_ldt
)(struct desc_struct
*ldt
, unsigned entries
);
137 void (*load_sp0
)(unsigned long sp0
);
139 void (*set_iopl_mask
)(unsigned mask
);
141 void (*wbinvd
)(void);
142 void (*io_delay
)(void);
144 /* cpuid emulation, mostly so that caps bits can be disabled */
145 void (*cpuid
)(unsigned int *eax
, unsigned int *ebx
,
146 unsigned int *ecx
, unsigned int *edx
);
148 /* Unsafe MSR operations. These will warn or panic on failure. */
149 u64 (*read_msr
)(unsigned int msr
);
150 void (*write_msr
)(unsigned int msr
, unsigned low
, unsigned high
);
153 * Safe MSR operations.
154 * read sets err to 0 or -EIO. write returns 0 or -EIO.
156 u64 (*read_msr_safe
)(unsigned int msr
, int *err
);
157 int (*write_msr_safe
)(unsigned int msr
, unsigned low
, unsigned high
);
159 u64 (*read_pmc
)(int counter
);
162 * Switch to usermode gs and return to 64-bit usermode using
163 * sysret. Only used in 64-bit kernels to return to 64-bit
164 * processes. Usermode register state, including %rsp, must
165 * already be restored.
167 void (*usergs_sysret64
)(void);
169 /* Normal iret. Jump to this with the standard iret stack
173 void (*swapgs
)(void);
175 void (*start_context_switch
)(struct task_struct
*prev
);
176 void (*end_context_switch
)(struct task_struct
*next
);
177 } __no_randomize_layout
;
181 * Get/set interrupt state. save_fl and restore_fl are only
182 * expected to use X86_EFLAGS_IF; all other bits
183 * returned from save_fl are undefined, and may be ignored by
186 * NOTE: These functions callers expect the callee to preserve
187 * more registers than the standard C calling convention.
189 struct paravirt_callee_save save_fl
;
190 struct paravirt_callee_save restore_fl
;
191 struct paravirt_callee_save irq_disable
;
192 struct paravirt_callee_save irq_enable
;
194 void (*safe_halt
)(void);
197 } __no_randomize_layout
;
200 unsigned long (*read_cr2
)(void);
201 void (*write_cr2
)(unsigned long);
203 unsigned long (*read_cr3
)(void);
204 void (*write_cr3
)(unsigned long);
207 * Hooks for intercepting the creation/use/destruction of an
210 void (*activate_mm
)(struct mm_struct
*prev
,
211 struct mm_struct
*next
);
212 void (*dup_mmap
)(struct mm_struct
*oldmm
,
213 struct mm_struct
*mm
);
214 void (*exit_mmap
)(struct mm_struct
*mm
);
218 void (*flush_tlb_user
)(void);
219 void (*flush_tlb_kernel
)(void);
220 void (*flush_tlb_single
)(unsigned long addr
);
221 void (*flush_tlb_others
)(const struct cpumask
*cpus
,
222 const struct flush_tlb_info
*info
);
224 /* Hooks for allocating and freeing a pagetable top-level */
225 int (*pgd_alloc
)(struct mm_struct
*mm
);
226 void (*pgd_free
)(struct mm_struct
*mm
, pgd_t
*pgd
);
229 * Hooks for allocating/releasing pagetable pages when they're
230 * attached to a pagetable
232 void (*alloc_pte
)(struct mm_struct
*mm
, unsigned long pfn
);
233 void (*alloc_pmd
)(struct mm_struct
*mm
, unsigned long pfn
);
234 void (*alloc_pud
)(struct mm_struct
*mm
, unsigned long pfn
);
235 void (*alloc_p4d
)(struct mm_struct
*mm
, unsigned long pfn
);
236 void (*release_pte
)(unsigned long pfn
);
237 void (*release_pmd
)(unsigned long pfn
);
238 void (*release_pud
)(unsigned long pfn
);
239 void (*release_p4d
)(unsigned long pfn
);
241 /* Pagetable manipulation functions */
242 void (*set_pte
)(pte_t
*ptep
, pte_t pteval
);
243 void (*set_pte_at
)(struct mm_struct
*mm
, unsigned long addr
,
244 pte_t
*ptep
, pte_t pteval
);
245 void (*set_pmd
)(pmd_t
*pmdp
, pmd_t pmdval
);
247 pte_t (*ptep_modify_prot_start
)(struct mm_struct
*mm
, unsigned long addr
,
249 void (*ptep_modify_prot_commit
)(struct mm_struct
*mm
, unsigned long addr
,
250 pte_t
*ptep
, pte_t pte
);
252 struct paravirt_callee_save pte_val
;
253 struct paravirt_callee_save make_pte
;
255 struct paravirt_callee_save pgd_val
;
256 struct paravirt_callee_save make_pgd
;
258 #if CONFIG_PGTABLE_LEVELS >= 3
259 #ifdef CONFIG_X86_PAE
260 void (*set_pte_atomic
)(pte_t
*ptep
, pte_t pteval
);
261 void (*pte_clear
)(struct mm_struct
*mm
, unsigned long addr
,
263 void (*pmd_clear
)(pmd_t
*pmdp
);
265 #endif /* CONFIG_X86_PAE */
267 void (*set_pud
)(pud_t
*pudp
, pud_t pudval
);
269 struct paravirt_callee_save pmd_val
;
270 struct paravirt_callee_save make_pmd
;
272 #if CONFIG_PGTABLE_LEVELS >= 4
273 struct paravirt_callee_save pud_val
;
274 struct paravirt_callee_save make_pud
;
276 void (*set_p4d
)(p4d_t
*p4dp
, p4d_t p4dval
);
278 #if CONFIG_PGTABLE_LEVELS >= 5
279 struct paravirt_callee_save p4d_val
;
280 struct paravirt_callee_save make_p4d
;
282 void (*set_pgd
)(pgd_t
*pgdp
, pgd_t pgdval
);
283 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
285 #endif /* CONFIG_PGTABLE_LEVELS >= 4 */
287 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
289 struct pv_lazy_ops lazy_mode
;
293 /* Sometimes the physical address is a pfn, and sometimes its
294 an mfn. We can tell which is which from the index. */
295 void (*set_fixmap
)(unsigned /* enum fixed_addresses */ idx
,
296 phys_addr_t phys
, pgprot_t flags
);
297 } __no_randomize_layout
;
299 struct arch_spinlock
;
301 #include <asm/spinlock_types.h>
307 void (*queued_spin_lock_slowpath
)(struct qspinlock
*lock
, u32 val
);
308 struct paravirt_callee_save queued_spin_unlock
;
310 void (*wait
)(u8
*ptr
, u8 val
);
311 void (*kick
)(int cpu
);
313 struct paravirt_callee_save vcpu_is_preempted
;
314 } __no_randomize_layout
;
316 /* This contains all the paravirt structures: we get a convenient
317 * number for each function using the offset which we use to indicate
319 struct paravirt_patch_template
{
320 struct pv_init_ops pv_init_ops
;
321 struct pv_time_ops pv_time_ops
;
322 struct pv_cpu_ops pv_cpu_ops
;
323 struct pv_irq_ops pv_irq_ops
;
324 struct pv_mmu_ops pv_mmu_ops
;
325 struct pv_lock_ops pv_lock_ops
;
326 } __no_randomize_layout
;
328 extern struct pv_info pv_info
;
329 extern struct pv_init_ops pv_init_ops
;
330 extern struct pv_time_ops pv_time_ops
;
331 extern struct pv_cpu_ops pv_cpu_ops
;
332 extern struct pv_irq_ops pv_irq_ops
;
333 extern struct pv_mmu_ops pv_mmu_ops
;
334 extern struct pv_lock_ops pv_lock_ops
;
336 #define PARAVIRT_PATCH(x) \
337 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
339 #define paravirt_type(op) \
340 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
341 [paravirt_opptr] "i" (&(op))
342 #define paravirt_clobber(clobber) \
343 [paravirt_clobber] "i" (clobber)
346 * Generate some code, and mark it as patchable by the
347 * apply_paravirt() alternate instruction patcher.
349 #define _paravirt_alt(insn_string, type, clobber) \
350 "771:\n\t" insn_string "\n" "772:\n" \
351 ".pushsection .parainstructions,\"a\"\n" \
354 " .byte " type "\n" \
355 " .byte 772b-771b\n" \
356 " .short " clobber "\n" \
359 /* Generate patchable code, with the default asm parameters. */
360 #define paravirt_alt(insn_string) \
361 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
363 /* Simple instruction patching code. */
364 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
366 #define DEF_NATIVE(ops, name, code) \
367 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
368 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
370 unsigned paravirt_patch_ident_32(void *insnbuf
, unsigned len
);
371 unsigned paravirt_patch_ident_64(void *insnbuf
, unsigned len
);
372 unsigned paravirt_patch_call(void *insnbuf
,
373 const void *target
, u16 tgt_clobbers
,
374 unsigned long addr
, u16 site_clobbers
,
376 unsigned paravirt_patch_jmp(void *insnbuf
, const void *target
,
377 unsigned long addr
, unsigned len
);
378 unsigned paravirt_patch_default(u8 type
, u16 clobbers
, void *insnbuf
,
379 unsigned long addr
, unsigned len
);
381 unsigned paravirt_patch_insns(void *insnbuf
, unsigned len
,
382 const char *start
, const char *end
);
384 unsigned native_patch(u8 type
, u16 clobbers
, void *ibuf
,
385 unsigned long addr
, unsigned len
);
387 int paravirt_disable_iospace(void);
390 * This generates an indirect call based on the operation type number.
391 * The type number, computed in PARAVIRT_PATCH, is derived from the
392 * offset into the paravirt_patch_template structure, and can therefore be
393 * freely converted back into a structure offset.
395 #define PARAVIRT_CALL "call *%c[paravirt_opptr];"
398 * These macros are intended to wrap calls through one of the paravirt
399 * ops structs, so that they can be later identified and patched at
402 * Normally, a call to a pv_op function is a simple indirect call:
403 * (pv_op_struct.operations)(args...).
405 * Unfortunately, this is a relatively slow operation for modern CPUs,
406 * because it cannot necessarily determine what the destination
407 * address is. In this case, the address is a runtime constant, so at
408 * the very least we can patch the call to e a simple direct call, or
409 * ideally, patch an inline implementation into the callsite. (Direct
410 * calls are essentially free, because the call and return addresses
411 * are completely predictable.)
413 * For i386, these macros rely on the standard gcc "regparm(3)" calling
414 * convention, in which the first three arguments are placed in %eax,
415 * %edx, %ecx (in that order), and the remaining arguments are placed
416 * on the stack. All caller-save registers (eax,edx,ecx) are expected
417 * to be modified (either clobbered or used for return values).
418 * X86_64, on the other hand, already specifies a register-based calling
419 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
420 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
421 * special handling for dealing with 4 arguments, unlike i386.
422 * However, x86_64 also have to clobber all caller saved registers, which
423 * unfortunately, are quite a bit (r8 - r11)
425 * The call instruction itself is marked by placing its start address
426 * and size into the .parainstructions section, so that
427 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
428 * appropriate patching under the control of the backend pv_init_ops
431 * Unfortunately there's no way to get gcc to generate the args setup
432 * for the call, and then allow the call itself to be generated by an
433 * inline asm. Because of this, we must do the complete arg setup and
434 * return value handling from within these macros. This is fairly
437 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
438 * It could be extended to more arguments, but there would be little
439 * to be gained from that. For each number of arguments, there are
440 * the two VCALL and CALL variants for void and non-void functions.
442 * When there is a return value, the invoker of the macro must specify
443 * the return type. The macro then uses sizeof() on that type to
444 * determine whether its a 32 or 64 bit value, and places the return
445 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
446 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
447 * the return value size.
449 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
450 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
453 * Small structures are passed and returned in registers. The macro
454 * calling convention can't directly deal with this, so the wrapper
455 * functions must do this.
457 * These PVOP_* macros are only defined within this header. This
458 * means that all uses must be wrapped in inline functions. This also
459 * makes sure the incoming and outgoing types are always correct.
462 #define PVOP_VCALL_ARGS \
463 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
465 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
467 #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
468 #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
469 #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
471 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
473 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
475 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
476 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
478 #define EXTRA_CLOBBERS
479 #define VEXTRA_CLOBBERS
480 #else /* CONFIG_X86_64 */
481 /* [re]ax isn't an arg, but the return val */
482 #define PVOP_VCALL_ARGS \
483 unsigned long __edi = __edi, __esi = __esi, \
484 __edx = __edx, __ecx = __ecx, __eax = __eax;
486 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
488 #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
489 #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
490 #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
491 #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
493 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
494 "=S" (__esi), "=d" (__edx), \
496 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
498 /* void functions are still allowed [re]ax for scratch */
499 #define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
500 #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
502 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
503 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
504 #endif /* CONFIG_X86_32 */
506 #ifdef CONFIG_PARAVIRT_DEBUG
507 #define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
509 #define PVOP_TEST_NULL(op) ((void)op)
512 #define PVOP_RETMASK(rettype) \
513 ({ unsigned long __mask = ~0UL; \
514 switch (sizeof(rettype)) { \
515 case 1: __mask = 0xffUL; break; \
516 case 2: __mask = 0xffffUL; break; \
517 case 4: __mask = 0xffffffffUL; break; \
524 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
529 PVOP_TEST_NULL(op); \
530 /* This is 32-bit specific, but is okay in 64-bit */ \
531 /* since this condition will never hold */ \
532 if (sizeof(rettype) > sizeof(unsigned long)) { \
534 paravirt_alt(PARAVIRT_CALL) \
536 : call_clbr, ASM_CALL_CONSTRAINT \
537 : paravirt_type(op), \
538 paravirt_clobber(clbr), \
540 : "memory", "cc" extra_clbr); \
541 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
544 paravirt_alt(PARAVIRT_CALL) \
546 : call_clbr, ASM_CALL_CONSTRAINT \
547 : paravirt_type(op), \
548 paravirt_clobber(clbr), \
550 : "memory", "cc" extra_clbr); \
551 __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
556 #define __PVOP_CALL(rettype, op, pre, post, ...) \
557 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
558 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
560 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
561 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
562 PVOP_CALLEE_CLOBBERS, , \
563 pre, post, ##__VA_ARGS__)
566 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
569 PVOP_TEST_NULL(op); \
571 paravirt_alt(PARAVIRT_CALL) \
573 : call_clbr, ASM_CALL_CONSTRAINT \
574 : paravirt_type(op), \
575 paravirt_clobber(clbr), \
577 : "memory", "cc" extra_clbr); \
580 #define __PVOP_VCALL(op, pre, post, ...) \
581 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
583 pre, post, ##__VA_ARGS__)
585 #define __PVOP_VCALLEESAVE(op, pre, post, ...) \
586 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
587 PVOP_VCALLEE_CLOBBERS, , \
588 pre, post, ##__VA_ARGS__)
592 #define PVOP_CALL0(rettype, op) \
593 __PVOP_CALL(rettype, op, "", "")
594 #define PVOP_VCALL0(op) \
595 __PVOP_VCALL(op, "", "")
597 #define PVOP_CALLEE0(rettype, op) \
598 __PVOP_CALLEESAVE(rettype, op, "", "")
599 #define PVOP_VCALLEE0(op) \
600 __PVOP_VCALLEESAVE(op, "", "")
603 #define PVOP_CALL1(rettype, op, arg1) \
604 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
605 #define PVOP_VCALL1(op, arg1) \
606 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
608 #define PVOP_CALLEE1(rettype, op, arg1) \
609 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
610 #define PVOP_VCALLEE1(op, arg1) \
611 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
614 #define PVOP_CALL2(rettype, op, arg1, arg2) \
615 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
616 PVOP_CALL_ARG2(arg2))
617 #define PVOP_VCALL2(op, arg1, arg2) \
618 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
619 PVOP_CALL_ARG2(arg2))
621 #define PVOP_CALLEE2(rettype, op, arg1, arg2) \
622 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
623 PVOP_CALL_ARG2(arg2))
624 #define PVOP_VCALLEE2(op, arg1, arg2) \
625 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
626 PVOP_CALL_ARG2(arg2))
629 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
630 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
631 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
632 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
633 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
634 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
636 /* This is the only difference in x86_64. We can make it much simpler */
638 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
639 __PVOP_CALL(rettype, op, \
640 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
641 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
642 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
643 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
645 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
646 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
647 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
649 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
650 __PVOP_CALL(rettype, op, "", "", \
651 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
652 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
653 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
654 __PVOP_VCALL(op, "", "", \
655 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
656 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
659 /* Lazy mode for batching updates / context switch */
660 enum paravirt_lazy_mode
{
666 enum paravirt_lazy_mode
paravirt_get_lazy_mode(void);
667 void paravirt_start_context_switch(struct task_struct
*prev
);
668 void paravirt_end_context_switch(struct task_struct
*next
);
670 void paravirt_enter_lazy_mmu(void);
671 void paravirt_leave_lazy_mmu(void);
672 void paravirt_flush_lazy_mmu(void);
674 void _paravirt_nop(void);
675 u32
_paravirt_ident_32(u32
);
676 u64
_paravirt_ident_64(u64
);
678 #define paravirt_nop ((void *)_paravirt_nop)
680 /* These all sit in the .parainstructions section to tell us what to patch. */
681 struct paravirt_patch_site
{
682 u8
*instr
; /* original instructions */
683 u8 instrtype
; /* type of this instruction */
684 u8 len
; /* length of original instruction */
685 u16 clobbers
; /* what registers you may clobber */
688 extern struct paravirt_patch_site __parainstructions
[],
689 __parainstructions_end
[];
691 #endif /* __ASSEMBLY__ */
693 #endif /* _ASM_X86_PARAVIRT_TYPES_H */