]>
Commit | Line | Data |
---|---|---|
53fd13cf GOC |
1 | #include <asm/paravirt.h> |
2 | #include <asm/asm-offsets.h> | |
8a650ce2 | 3 | #include <linux/stringify.h> |
53fd13cf GOC |
4 | |
5 | DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); | |
6 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); | |
7 | DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); | |
8 | DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); | |
53fd13cf GOC |
9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); |
10 | DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); | |
11 | DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); | |
12 | DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); | |
13 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); | |
14 | DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); | |
15 | ||
2be29982 | 16 | DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); |
53fd13cf GOC |
17 | DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); |
18 | ||
41edafdb JF |
19 | DEF_NATIVE(, mov32, "mov %edi, %eax"); |
20 | DEF_NATIVE(, mov64, "mov %rdi, %rax"); | |
21 | ||
cfd8983f | 22 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
f233f7f1 | 23 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); |
3cded417 | 24 | DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax"); |
f233f7f1 PZI |
25 | #endif |
26 | ||
41edafdb JF |
27 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
28 | { | |
29 | return paravirt_patch_insns(insnbuf, len, | |
30 | start__mov32, end__mov32); | |
31 | } | |
32 | ||
33 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) | |
34 | { | |
35 | return paravirt_patch_insns(insnbuf, len, | |
36 | start__mov64, end__mov64); | |
37 | } | |
38 | ||
f233f7f1 | 39 | extern bool pv_is_native_spin_unlock(void); |
3cded417 | 40 | extern bool pv_is_native_vcpu_is_preempted(void); |
f233f7f1 | 41 | |
53fd13cf GOC |
42 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
43 | unsigned long addr, unsigned len) | |
44 | { | |
45 | const unsigned char *start, *end; | |
46 | unsigned ret; | |
47 | ||
48 | #define PATCH_SITE(ops, x) \ | |
49 | case PARAVIRT_PATCH(ops.x): \ | |
50 | start = start_##ops##_##x; \ | |
51 | end = end_##ops##_##x; \ | |
52 | goto patch_site | |
53 | switch(type) { | |
54 | PATCH_SITE(pv_irq_ops, restore_fl); | |
55 | PATCH_SITE(pv_irq_ops, save_fl); | |
56 | PATCH_SITE(pv_irq_ops, irq_enable); | |
57 | PATCH_SITE(pv_irq_ops, irq_disable); | |
2be29982 | 58 | PATCH_SITE(pv_cpu_ops, usergs_sysret64); |
53fd13cf GOC |
59 | PATCH_SITE(pv_cpu_ops, swapgs); |
60 | PATCH_SITE(pv_mmu_ops, read_cr2); | |
61 | PATCH_SITE(pv_mmu_ops, read_cr3); | |
62 | PATCH_SITE(pv_mmu_ops, write_cr3); | |
63 | PATCH_SITE(pv_cpu_ops, clts); | |
64 | PATCH_SITE(pv_mmu_ops, flush_tlb_single); | |
65 | PATCH_SITE(pv_cpu_ops, wbinvd); | |
cfd8983f | 66 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
f233f7f1 PZI |
67 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
68 | if (pv_is_native_spin_unlock()) { | |
69 | start = start_pv_lock_ops_queued_spin_unlock; | |
70 | end = end_pv_lock_ops_queued_spin_unlock; | |
71 | goto patch_site; | |
72 | } | |
45dbea5f PZ |
73 | goto patch_default; |
74 | ||
3cded417 PZ |
75 | case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): |
76 | if (pv_is_native_vcpu_is_preempted()) { | |
77 | start = start_pv_lock_ops_vcpu_is_preempted; | |
78 | end = end_pv_lock_ops_vcpu_is_preempted; | |
79 | goto patch_site; | |
80 | } | |
45dbea5f | 81 | goto patch_default; |
f233f7f1 | 82 | #endif |
53fd13cf GOC |
83 | |
84 | default: | |
45dbea5f | 85 | patch_default: |
53fd13cf GOC |
86 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
87 | break; | |
f233f7f1 PZI |
88 | |
89 | patch_site: | |
90 | ret = paravirt_patch_insns(ibuf, len, start, end); | |
91 | break; | |
53fd13cf GOC |
92 | } |
93 | #undef PATCH_SITE | |
94 | return ret; | |
95 | } |