]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2f485ef5 GOC |
2 | #include <asm/paravirt.h> |
3 | ||
4 | DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); | |
5 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); | |
6 | DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); | |
7 | DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); | |
8 | DEF_NATIVE(pv_cpu_ops, iret, "iret"); | |
2f485ef5 GOC |
9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); |
10 | DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); | |
11 | DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); | |
2f485ef5 | 12 | |
cfd8983f | 13 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
f233f7f1 | 14 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); |
3cded417 | 15 | DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax"); |
f233f7f1 PZI |
16 | #endif |
17 | ||
41edafdb JF |
18 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
19 | { | |
20 | /* arg in %eax, return in %eax */ | |
21 | return 0; | |
22 | } | |
23 | ||
24 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) | |
25 | { | |
26 | /* arg in %edx:%eax, return in %edx:%eax */ | |
27 | return 0; | |
28 | } | |
29 | ||
f233f7f1 | 30 | extern bool pv_is_native_spin_unlock(void); |
3cded417 | 31 | extern bool pv_is_native_vcpu_is_preempted(void); |
f233f7f1 | 32 | |
2f485ef5 GOC |
33 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
34 | unsigned long addr, unsigned len) | |
35 | { | |
36 | const unsigned char *start, *end; | |
37 | unsigned ret; | |
38 | ||
39 | #define PATCH_SITE(ops, x) \ | |
40 | case PARAVIRT_PATCH(ops.x): \ | |
41 | start = start_##ops##_##x; \ | |
42 | end = end_##ops##_##x; \ | |
43 | goto patch_site | |
d9336a9b | 44 | switch (type) { |
2f485ef5 GOC |
45 | PATCH_SITE(pv_irq_ops, irq_disable); |
46 | PATCH_SITE(pv_irq_ops, irq_enable); | |
47 | PATCH_SITE(pv_irq_ops, restore_fl); | |
48 | PATCH_SITE(pv_irq_ops, save_fl); | |
49 | PATCH_SITE(pv_cpu_ops, iret); | |
2f485ef5 GOC |
50 | PATCH_SITE(pv_mmu_ops, read_cr2); |
51 | PATCH_SITE(pv_mmu_ops, read_cr3); | |
52 | PATCH_SITE(pv_mmu_ops, write_cr3); | |
cfd8983f | 53 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) |
f233f7f1 PZI |
54 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
55 | if (pv_is_native_spin_unlock()) { | |
56 | start = start_pv_lock_ops_queued_spin_unlock; | |
57 | end = end_pv_lock_ops_queued_spin_unlock; | |
58 | goto patch_site; | |
59 | } | |
45dbea5f PZ |
60 | goto patch_default; |
61 | ||
3cded417 PZ |
62 | case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted): |
63 | if (pv_is_native_vcpu_is_preempted()) { | |
64 | start = start_pv_lock_ops_vcpu_is_preempted; | |
65 | end = end_pv_lock_ops_vcpu_is_preempted; | |
66 | goto patch_site; | |
67 | } | |
45dbea5f | 68 | goto patch_default; |
f233f7f1 | 69 | #endif |
2f485ef5 GOC |
70 | |
71 | default: | |
cef4402d | 72 | patch_default: __maybe_unused |
2f485ef5 GOC |
73 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
74 | break; | |
f233f7f1 PZI |
75 | |
76 | patch_site: | |
77 | ret = paravirt_patch_insns(ibuf, len, start, end); | |
78 | break; | |
2f485ef5 GOC |
79 | } |
80 | #undef PATCH_SITE | |
81 | return ret; | |
82 | } |