]>
Commit | Line | Data |
---|---|---|
78271684 CF |
1 | /* |
2 | * TCG CPU-specific operations | |
3 | * | |
4 | * Copyright 2021 SUSE LLC | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
7 | * See the COPYING file in the top-level directory. | |
8 | */ | |
9 | ||
10 | #ifndef TCG_CPU_OPS_H | |
11 | #define TCG_CPU_OPS_H | |
12 | ||
13 | #include "hw/core/cpu.h" | |
14 | ||
15 | struct TCGCPUOps { | |
16 | /** | |
17 | * @initialize: Initalize TCG state | |
18 | * | |
19 | * Called when the first CPU is realized. | |
20 | */ | |
21 | void (*initialize)(void); | |
22 | /** | |
23 | * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock | |
24 | * | |
25 | * This is called when we abandon execution of a TB before starting it, | |
26 | * and must set all parts of the CPU state which the previous TB in the | |
27 | * chain may not have updated. | |
28 | * By default, when this is NULL, a call is made to @set_pc(tb->pc). | |
29 | * | |
30 | * If more state needs to be restored, the target must implement a | |
31 | * function to restore all the state, and register it here. | |
32 | */ | |
8349d2ae | 33 | void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb); |
d2925689 RH |
34 | /** |
35 | * @restore_state_to_opc: Synchronize state from INDEX_op_start_insn | |
36 | * | |
37 | * This is called when we unwind state in the middle of a TB, | |
38 | * usually before raising an exception. Set all part of the CPU | |
39 | * state which are tracked insn-by-insn in the target-specific | |
40 | * arguments to start_insn, passed as @data. | |
41 | */ | |
42 | void (*restore_state_to_opc)(CPUState *cpu, const TranslationBlock *tb, | |
43 | const uint64_t *data); | |
44 | ||
78271684 CF |
45 | /** @cpu_exec_enter: Callback for cpu_exec preparation */ |
46 | void (*cpu_exec_enter)(CPUState *cpu); | |
47 | /** @cpu_exec_exit: Callback for cpu_exec cleanup */ | |
48 | void (*cpu_exec_exit)(CPUState *cpu); | |
78271684 CF |
49 | /** @debug_excp_handler: Callback for handling debug exceptions */ |
50 | void (*debug_excp_handler)(CPUState *cpu); | |
51 | ||
52 | #ifdef NEED_CPU_H | |
12096421 PMD |
53 | #if defined(CONFIG_USER_ONLY) && defined(TARGET_I386) |
54 | /** | |
55 | * @fake_user_interrupt: Callback for 'fake exception' handling. | |
56 | * | |
57 | * Simulate 'fake exception' which will be handled outside the | |
58 | * cpu execution loop (hack for x86 user mode). | |
59 | */ | |
60 | void (*fake_user_interrupt)(CPUState *cpu); | |
61 | #else | |
62 | /** | |
63 | * @do_interrupt: Callback for interrupt handling. | |
64 | */ | |
65 | void (*do_interrupt)(CPUState *cpu); | |
66 | #endif /* !CONFIG_USER_ONLY || !TARGET_I386 */ | |
75fe97b4 PMD |
67 | #ifdef CONFIG_USER_ONLY |
68 | /** | |
69 | * record_sigsegv: | |
70 | * @cpu: cpu context | |
71 | * @addr: faulting guest address | |
72 | * @access_type: access was read/write/execute | |
73 | * @maperr: true for invalid page, false for permission fault | |
74 | * @ra: host pc for unwinding | |
75 | * | |
76 | * We are about to raise SIGSEGV with si_code set for @maperr, | |
77 | * and si_addr set for @addr. Record anything further needed | |
78 | * for the signal ucontext_t. | |
79 | * | |
80 | * If the emulated kernel does not provide anything to the signal | |
81 | * handler with anything besides the user context registers, and | |
82 | * the siginfo_t, then this hook need do nothing and may be omitted. | |
83 | * Otherwise, record the data and return; the caller will raise | |
84 | * the signal, unwind the cpu state, and return to the main loop. | |
85 | * | |
86 | * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided | |
87 | * so that a "normal" cpu exception can be raised. In this case, | |
88 | * the signal must be raised by the architecture cpu_loop. | |
89 | */ | |
90 | void (*record_sigsegv)(CPUState *cpu, vaddr addr, | |
91 | MMUAccessType access_type, | |
92 | bool maperr, uintptr_t ra); | |
93 | /** | |
94 | * record_sigbus: | |
95 | * @cpu: cpu context | |
96 | * @addr: misaligned guest address | |
97 | * @access_type: access was read/write/execute | |
98 | * @ra: host pc for unwinding | |
99 | * | |
100 | * We are about to raise SIGBUS with si_code BUS_ADRALN, | |
101 | * and si_addr set for @addr. Record anything further needed | |
102 | * for the signal ucontext_t. | |
103 | * | |
104 | * If the emulated kernel does not provide the signal handler with | |
105 | * anything besides the user context registers, and the siginfo_t, | |
106 | * then this hook need do nothing and may be omitted. | |
107 | * Otherwise, record the data and return; the caller will raise | |
108 | * the signal, unwind the cpu state, and return to the main loop. | |
109 | * | |
110 | * If it is simpler to re-use the sysemu do_unaligned_access code, | |
111 | * @ra is provided so that a "normal" cpu exception can be raised. | |
112 | * In this case, the signal must be raised by the architecture cpu_loop. | |
113 | */ | |
114 | void (*record_sigbus)(CPUState *cpu, vaddr addr, | |
115 | MMUAccessType access_type, uintptr_t ra); | |
116 | #else | |
77c0fc4e PMD |
117 | /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ |
118 | bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); | |
eeca7dc5 RH |
119 | /** |
120 | * @tlb_fill: Handle a softmmu tlb miss | |
121 | * | |
122 | * If the access is valid, call tlb_set_page and return true; | |
123 | * if the access is invalid and probe is true, return false; | |
124 | * otherwise raise an exception and do not return. | |
125 | */ | |
126 | bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, | |
127 | MMUAccessType access_type, int mmu_idx, | |
128 | bool probe, uintptr_t retaddr); | |
78271684 CF |
129 | /** |
130 | * @do_transaction_failed: Callback for handling failed memory transactions | |
131 | * (ie bus faults or external aborts; not MMU faults) | |
132 | */ | |
133 | void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr, | |
134 | unsigned size, MMUAccessType access_type, | |
135 | int mmu_idx, MemTxAttrs attrs, | |
136 | MemTxResult response, uintptr_t retaddr); | |
137 | /** | |
138 | * @do_unaligned_access: Callback for unaligned access handling | |
fa947a66 | 139 | * The callback must exit via raising an exception. |
78271684 | 140 | */ |
8905770b MAL |
141 | G_NORETURN void (*do_unaligned_access)(CPUState *cpu, vaddr addr, |
142 | MMUAccessType access_type, | |
143 | int mmu_idx, uintptr_t retaddr); | |
78271684 CF |
144 | |
145 | /** | |
146 | * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM | |
147 | */ | |
148 | vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len); | |
149 | ||
150 | /** | |
151 | * @debug_check_watchpoint: return true if the architectural | |
152 | * watchpoint whose address has matched should really fire, used by ARM | |
013577de | 153 | * and RISC-V |
78271684 CF |
154 | */ |
155 | bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp); | |
156 | ||
e3f7c801 RH |
157 | /** |
158 | * @debug_check_breakpoint: return true if the architectural | |
159 | * breakpoint whose PC has matched should really fire. | |
160 | */ | |
161 | bool (*debug_check_breakpoint)(CPUState *cpu); | |
162 | ||
d9bcb58a RH |
163 | /** |
164 | * @io_recompile_replay_branch: Callback for cpu_io_recompile. | |
165 | * | |
166 | * The cpu has been stopped, and cpu_restore_state_from_tb has been | |
167 | * called. If the faulting instruction is in a delay slot, and the | |
168 | * target architecture requires re-execution of the branch, then | |
169 | * adjust the cpu state as required and return true. | |
170 | */ | |
171 | bool (*io_recompile_replay_branch)(CPUState *cpu, | |
172 | const TranslationBlock *tb); | |
75fe97b4 | 173 | #endif /* !CONFIG_USER_ONLY */ |
78271684 CF |
174 | #endif /* NEED_CPU_H */ |
175 | ||
176 | }; | |
177 | ||
6eece7f5 PMD |
178 | #if defined(CONFIG_USER_ONLY) |
179 | ||
180 | static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | |
181 | MemTxAttrs atr, int fl, uintptr_t ra) | |
182 | { | |
183 | } | |
184 | ||
185 | static inline int cpu_watchpoint_address_matches(CPUState *cpu, | |
186 | vaddr addr, vaddr len) | |
187 | { | |
188 | return 0; | |
189 | } | |
190 | ||
191 | #else | |
192 | ||
193 | /** | |
194 | * cpu_check_watchpoint: | |
195 | * @cpu: cpu context | |
196 | * @addr: guest virtual address | |
197 | * @len: access length | |
198 | * @attrs: memory access attributes | |
199 | * @flags: watchpoint access type | |
200 | * @ra: unwind return address | |
201 | * | |
202 | * Check for a watchpoint hit in [addr, addr+len) of the type | |
203 | * specified by @flags. Exit via exception with a hit. | |
204 | */ | |
205 | void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, | |
206 | MemTxAttrs attrs, int flags, uintptr_t ra); | |
207 | ||
208 | /** | |
209 | * cpu_watchpoint_address_matches: | |
210 | * @cpu: cpu context | |
211 | * @addr: guest virtual address | |
212 | * @len: access length | |
213 | * | |
214 | * Return the watchpoint flags that apply to [addr, addr+len). | |
215 | * If no watchpoint is registered for the range, the result is 0. | |
216 | */ | |
217 | int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len); | |
218 | ||
219 | #endif | |
220 | ||
78271684 | 221 | #endif /* TCG_CPU_OPS_H */ |