]>
Commit | Line | Data |
---|---|---|
d9f24bf5 PB |
1 | /* |
2 | * Target-specific parts of the CPU object | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
d9f24bf5 PB |
21 | #include "qapi/error.h" |
22 | ||
23 | #include "exec/target_page.h" | |
24 | #include "hw/qdev-core.h" | |
25 | #include "hw/qdev-properties.h" | |
26 | #include "qemu/error-report.h" | |
27 | #include "migration/vmstate.h" | |
28 | #ifdef CONFIG_USER_ONLY | |
29 | #include "qemu.h" | |
30 | #else | |
8b80bd28 | 31 | #include "hw/core/sysemu-cpu-ops.h" |
d9f24bf5 PB |
32 | #include "exec/address-spaces.h" |
33 | #endif | |
34 | #include "sysemu/tcg.h" | |
35 | #include "sysemu/kvm.h" | |
36 | #include "sysemu/replay.h" | |
377bf6f3 | 37 | #include "exec/cpu-common.h" |
3b04508c | 38 | #include "exec/exec-all.h" |
3b9bd3f4 | 39 | #include "exec/translate-all.h" |
d9f24bf5 | 40 | #include "exec/log.h" |
30565f10 | 41 | #include "hw/core/accel-cpu.h" |
ad1a706f | 42 | #include "trace/trace-root.h" |
3b04508c | 43 | #include "qemu/accel.h" |
d9f24bf5 PB |
44 | |
45 | uintptr_t qemu_host_page_size; | |
46 | intptr_t qemu_host_page_mask; | |
47 | ||
48 | #ifndef CONFIG_USER_ONLY | |
49 | static int cpu_common_post_load(void *opaque, int version_id) | |
50 | { | |
51 | CPUState *cpu = opaque; | |
52 | ||
53 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the | |
54 | version_id is increased. */ | |
55 | cpu->interrupt_request &= ~0x01; | |
56 | tlb_flush(cpu); | |
57 | ||
58 | /* loadvm has just updated the content of RAM, bypassing the | |
59 | * usual mechanisms that ensure we flush TBs for writes to | |
60 | * memory we've translated code from. So we must flush all TBs, | |
61 | * which will now be stale. | |
62 | */ | |
63 | tb_flush(cpu); | |
64 | ||
65 | return 0; | |
66 | } | |
67 | ||
68 | static int cpu_common_pre_load(void *opaque) | |
69 | { | |
70 | CPUState *cpu = opaque; | |
71 | ||
72 | cpu->exception_index = -1; | |
73 | ||
74 | return 0; | |
75 | } | |
76 | ||
77 | static bool cpu_common_exception_index_needed(void *opaque) | |
78 | { | |
79 | CPUState *cpu = opaque; | |
80 | ||
81 | return tcg_enabled() && cpu->exception_index != -1; | |
82 | } | |
83 | ||
84 | static const VMStateDescription vmstate_cpu_common_exception_index = { | |
85 | .name = "cpu_common/exception_index", | |
86 | .version_id = 1, | |
87 | .minimum_version_id = 1, | |
88 | .needed = cpu_common_exception_index_needed, | |
89 | .fields = (VMStateField[]) { | |
90 | VMSTATE_INT32(exception_index, CPUState), | |
91 | VMSTATE_END_OF_LIST() | |
92 | } | |
93 | }; | |
94 | ||
95 | static bool cpu_common_crash_occurred_needed(void *opaque) | |
96 | { | |
97 | CPUState *cpu = opaque; | |
98 | ||
99 | return cpu->crash_occurred; | |
100 | } | |
101 | ||
102 | static const VMStateDescription vmstate_cpu_common_crash_occurred = { | |
103 | .name = "cpu_common/crash_occurred", | |
104 | .version_id = 1, | |
105 | .minimum_version_id = 1, | |
106 | .needed = cpu_common_crash_occurred_needed, | |
107 | .fields = (VMStateField[]) { | |
108 | VMSTATE_BOOL(crash_occurred, CPUState), | |
109 | VMSTATE_END_OF_LIST() | |
110 | } | |
111 | }; | |
112 | ||
113 | const VMStateDescription vmstate_cpu_common = { | |
114 | .name = "cpu_common", | |
115 | .version_id = 1, | |
116 | .minimum_version_id = 1, | |
117 | .pre_load = cpu_common_pre_load, | |
118 | .post_load = cpu_common_post_load, | |
119 | .fields = (VMStateField[]) { | |
120 | VMSTATE_UINT32(halted, CPUState), | |
121 | VMSTATE_UINT32(interrupt_request, CPUState), | |
122 | VMSTATE_END_OF_LIST() | |
123 | }, | |
124 | .subsections = (const VMStateDescription*[]) { | |
125 | &vmstate_cpu_common_exception_index, | |
126 | &vmstate_cpu_common_crash_occurred, | |
127 | NULL | |
128 | } | |
129 | }; | |
130 | #endif | |
131 | ||
7df5e3d6 | 132 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) |
d9f24bf5 | 133 | { |
feece4d0 | 134 | #ifndef CONFIG_USER_ONLY |
d9f24bf5 | 135 | CPUClass *cc = CPU_GET_CLASS(cpu); |
feece4d0 | 136 | #endif |
d9f24bf5 | 137 | |
7df5e3d6 | 138 | cpu_list_add(cpu); |
9ea057dc CF |
139 | if (!accel_cpu_realizefn(cpu, errp)) { |
140 | return; | |
141 | } | |
7df5e3d6 CF |
142 | /* NB: errp parameter is unused currently */ |
143 | if (tcg_enabled()) { | |
144 | tcg_exec_realizefn(cpu, errp); | |
145 | } | |
7df5e3d6 CF |
146 | |
147 | #ifdef CONFIG_USER_ONLY | |
4336073b PMD |
148 | assert(qdev_get_vmsd(DEVICE(cpu)) == NULL || |
149 | qdev_get_vmsd(DEVICE(cpu))->unmigratable); | |
7df5e3d6 CF |
150 | #else |
151 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
152 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); | |
153 | } | |
feece4d0 PMD |
154 | if (cc->sysemu_ops->legacy_vmsd != NULL) { |
155 | vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu); | |
7df5e3d6 CF |
156 | } |
157 | #endif /* CONFIG_USER_ONLY */ | |
158 | } | |
159 | ||
160 | void cpu_exec_unrealizefn(CPUState *cpu) | |
161 | { | |
feece4d0 | 162 | #ifndef CONFIG_USER_ONLY |
7df5e3d6 | 163 | CPUClass *cc = CPU_GET_CLASS(cpu); |
d9f24bf5 | 164 | |
feece4d0 PMD |
165 | if (cc->sysemu_ops->legacy_vmsd != NULL) { |
166 | vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu); | |
d9f24bf5 PB |
167 | } |
168 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
169 | vmstate_unregister(NULL, &vmstate_cpu_common, cpu); | |
170 | } | |
d9f24bf5 | 171 | #endif |
7df5e3d6 CF |
172 | if (tcg_enabled()) { |
173 | tcg_exec_unrealizefn(cpu); | |
174 | } | |
7df5e3d6 CF |
175 | |
176 | cpu_list_remove(cpu); | |
d9f24bf5 PB |
177 | } |
178 | ||
6e8dcacd RH |
179 | /* |
180 | * This can't go in hw/core/cpu.c because that file is compiled only | |
181 | * once for both user-mode and system builds. | |
182 | */ | |
995b87de | 183 | static Property cpu_common_props[] = { |
6e8dcacd RH |
184 | #ifdef CONFIG_USER_ONLY |
185 | /* | |
186 | * Create a property for the user-only object, so users can | |
187 | * adjust prctl(PR_SET_UNALIGN) from the command-line. | |
188 | * Has no effect if the target does not support the feature. | |
189 | */ | |
190 | DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState, | |
191 | prctl_unalign_sigbus, false), | |
192 | #else | |
995b87de | 193 | /* |
6e8dcacd RH |
194 | * Create a memory property for softmmu CPU object, so users can |
195 | * wire up its memory. The default if no link is set up is to use | |
995b87de RH |
196 | * the system address space. |
197 | */ | |
198 | DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, | |
199 | MemoryRegion *), | |
200 | #endif | |
995b87de RH |
201 | DEFINE_PROP_END_OF_LIST(), |
202 | }; | |
203 | ||
0c3c25fc PM |
204 | static bool cpu_get_start_powered_off(Object *obj, Error **errp) |
205 | { | |
206 | CPUState *cpu = CPU(obj); | |
207 | return cpu->start_powered_off; | |
208 | } | |
209 | ||
210 | static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp) | |
211 | { | |
212 | CPUState *cpu = CPU(obj); | |
213 | cpu->start_powered_off = value; | |
214 | } | |
215 | ||
995b87de RH |
216 | void cpu_class_init_props(DeviceClass *dc) |
217 | { | |
0c3c25fc PM |
218 | ObjectClass *oc = OBJECT_CLASS(dc); |
219 | ||
995b87de | 220 | device_class_set_props(dc, cpu_common_props); |
0c3c25fc PM |
221 | /* |
222 | * We can't use DEFINE_PROP_BOOL in the Property array for this | |
223 | * property, because we want this to be settable after realize. | |
224 | */ | |
225 | object_class_property_add_bool(oc, "start-powered-off", | |
226 | cpu_get_start_powered_off, | |
227 | cpu_set_start_powered_off); | |
995b87de RH |
228 | } |
229 | ||
d9f24bf5 PB |
230 | void cpu_exec_initfn(CPUState *cpu) |
231 | { | |
232 | cpu->as = NULL; | |
233 | cpu->num_ases = 0; | |
234 | ||
235 | #ifndef CONFIG_USER_ONLY | |
236 | cpu->thread_id = qemu_get_thread_id(); | |
237 | cpu->memory = get_system_memory(); | |
238 | object_ref(OBJECT(cpu->memory)); | |
239 | #endif | |
240 | } | |
241 | ||
d9f24bf5 PB |
242 | const char *parse_cpu_option(const char *cpu_option) |
243 | { | |
244 | ObjectClass *oc; | |
245 | CPUClass *cc; | |
246 | gchar **model_pieces; | |
247 | const char *cpu_type; | |
248 | ||
249 | model_pieces = g_strsplit(cpu_option, ",", 2); | |
250 | if (!model_pieces[0]) { | |
251 | error_report("-cpu option cannot be empty"); | |
252 | exit(1); | |
253 | } | |
254 | ||
255 | oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]); | |
256 | if (oc == NULL) { | |
257 | error_report("unable to find CPU model '%s'", model_pieces[0]); | |
258 | g_strfreev(model_pieces); | |
259 | exit(EXIT_FAILURE); | |
260 | } | |
261 | ||
262 | cpu_type = object_class_get_name(oc); | |
263 | cc = CPU_CLASS(oc); | |
264 | cc->parse_features(cpu_type, model_pieces[1], &error_fatal); | |
265 | g_strfreev(model_pieces); | |
266 | return cpu_type; | |
267 | } | |
268 | ||
377bf6f3 PMD |
269 | void list_cpus(const char *optarg) |
270 | { | |
271 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
272 | #if defined(cpu_list) | |
273 | cpu_list(); | |
274 | #endif | |
275 | } | |
276 | ||
d9f24bf5 PB |
277 | #if defined(CONFIG_USER_ONLY) |
278 | void tb_invalidate_phys_addr(target_ulong addr) | |
279 | { | |
280 | mmap_lock(); | |
281 | tb_invalidate_phys_page_range(addr, addr + 1); | |
282 | mmap_unlock(); | |
283 | } | |
d9f24bf5 PB |
284 | #else |
285 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) | |
286 | { | |
287 | ram_addr_t ram_addr; | |
288 | MemoryRegion *mr; | |
289 | hwaddr l = 1; | |
290 | ||
291 | if (!tcg_enabled()) { | |
292 | return; | |
293 | } | |
294 | ||
295 | RCU_READ_LOCK_GUARD(); | |
296 | mr = address_space_translate(as, addr, &addr, &l, false, attrs); | |
297 | if (!(memory_region_is_ram(mr) | |
298 | || memory_region_is_romd(mr))) { | |
299 | return; | |
300 | } | |
301 | ram_addr = memory_region_get_ram_addr(mr) + addr; | |
302 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1); | |
303 | } | |
d9f24bf5 PB |
304 | #endif |
305 | ||
306 | /* Add a breakpoint. */ | |
307 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, | |
308 | CPUBreakpoint **breakpoint) | |
309 | { | |
5bc31e94 | 310 | CPUClass *cc = CPU_GET_CLASS(cpu); |
d9f24bf5 PB |
311 | CPUBreakpoint *bp; |
312 | ||
5bc31e94 RH |
313 | if (cc->gdb_adjust_breakpoint) { |
314 | pc = cc->gdb_adjust_breakpoint(cpu, pc); | |
315 | } | |
316 | ||
d9f24bf5 PB |
317 | bp = g_malloc(sizeof(*bp)); |
318 | ||
319 | bp->pc = pc; | |
320 | bp->flags = flags; | |
321 | ||
322 | /* keep all GDB-injected breakpoints in front */ | |
323 | if (flags & BP_GDB) { | |
324 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); | |
325 | } else { | |
326 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); | |
327 | } | |
328 | ||
d9f24bf5 PB |
329 | if (breakpoint) { |
330 | *breakpoint = bp; | |
331 | } | |
ad1a706f RH |
332 | |
333 | trace_breakpoint_insert(cpu->cpu_index, pc, flags); | |
d9f24bf5 PB |
334 | return 0; |
335 | } | |
336 | ||
337 | /* Remove a specific breakpoint. */ | |
338 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) | |
339 | { | |
5bc31e94 | 340 | CPUClass *cc = CPU_GET_CLASS(cpu); |
d9f24bf5 PB |
341 | CPUBreakpoint *bp; |
342 | ||
5bc31e94 RH |
343 | if (cc->gdb_adjust_breakpoint) { |
344 | pc = cc->gdb_adjust_breakpoint(cpu, pc); | |
345 | } | |
346 | ||
d9f24bf5 PB |
347 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
348 | if (bp->pc == pc && bp->flags == flags) { | |
349 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
350 | return 0; | |
351 | } | |
352 | } | |
353 | return -ENOENT; | |
354 | } | |
355 | ||
356 | /* Remove a specific breakpoint by reference. */ | |
ad1a706f | 357 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) |
d9f24bf5 | 358 | { |
ad1a706f | 359 | QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); |
d9f24bf5 | 360 | |
ad1a706f RH |
361 | trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); |
362 | g_free(bp); | |
d9f24bf5 PB |
363 | } |
364 | ||
365 | /* Remove all matching breakpoints. */ | |
366 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) | |
367 | { | |
368 | CPUBreakpoint *bp, *next; | |
369 | ||
370 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { | |
371 | if (bp->flags & mask) { | |
372 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
373 | } | |
374 | } | |
375 | } | |
376 | ||
377 | /* enable or disable single step mode. EXCP_DEBUG is returned by the | |
378 | CPU loop after each instruction */ | |
379 | void cpu_single_step(CPUState *cpu, int enabled) | |
380 | { | |
381 | if (cpu->singlestep_enabled != enabled) { | |
382 | cpu->singlestep_enabled = enabled; | |
383 | if (kvm_enabled()) { | |
384 | kvm_update_guest_debug(cpu, 0); | |
d9f24bf5 | 385 | } |
ad1a706f | 386 | trace_breakpoint_singlestep(cpu->cpu_index, enabled); |
d9f24bf5 PB |
387 | } |
388 | } | |
389 | ||
390 | void cpu_abort(CPUState *cpu, const char *fmt, ...) | |
391 | { | |
392 | va_list ap; | |
393 | va_list ap2; | |
394 | ||
395 | va_start(ap, fmt); | |
396 | va_copy(ap2, ap); | |
397 | fprintf(stderr, "qemu: fatal: "); | |
398 | vfprintf(stderr, fmt, ap); | |
399 | fprintf(stderr, "\n"); | |
400 | cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
401 | if (qemu_log_separate()) { | |
c60f599b | 402 | FILE *logfile = qemu_log_trylock(); |
78b54858 RH |
403 | if (logfile) { |
404 | fprintf(logfile, "qemu: fatal: "); | |
405 | vfprintf(logfile, fmt, ap2); | |
406 | fprintf(logfile, "\n"); | |
407 | cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
78b54858 RH |
408 | qemu_log_unlock(logfile); |
409 | } | |
d9f24bf5 PB |
410 | } |
411 | va_end(ap2); | |
412 | va_end(ap); | |
413 | replay_finish(); | |
414 | #if defined(CONFIG_USER_ONLY) | |
415 | { | |
416 | struct sigaction act; | |
417 | sigfillset(&act.sa_mask); | |
418 | act.sa_handler = SIG_DFL; | |
419 | act.sa_flags = 0; | |
420 | sigaction(SIGABRT, &act, NULL); | |
421 | } | |
422 | #endif | |
423 | abort(); | |
424 | } | |
425 | ||
426 | /* physical memory access (slow version, mainly for debug) */ | |
427 | #if defined(CONFIG_USER_ONLY) | |
73842ef0 PMD |
428 | int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, |
429 | void *ptr, size_t len, bool is_write) | |
d9f24bf5 PB |
430 | { |
431 | int flags; | |
73842ef0 | 432 | vaddr l, page; |
d9f24bf5 PB |
433 | void * p; |
434 | uint8_t *buf = ptr; | |
435 | ||
436 | while (len > 0) { | |
437 | page = addr & TARGET_PAGE_MASK; | |
438 | l = (page + TARGET_PAGE_SIZE) - addr; | |
439 | if (l > len) | |
440 | l = len; | |
441 | flags = page_get_flags(page); | |
442 | if (!(flags & PAGE_VALID)) | |
443 | return -1; | |
444 | if (is_write) { | |
445 | if (!(flags & PAGE_WRITE)) | |
446 | return -1; | |
447 | /* XXX: this code should not depend on lock_user */ | |
448 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) | |
449 | return -1; | |
450 | memcpy(p, buf, l); | |
451 | unlock_user(p, addr, l); | |
452 | } else { | |
453 | if (!(flags & PAGE_READ)) | |
454 | return -1; | |
455 | /* XXX: this code should not depend on lock_user */ | |
456 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) | |
457 | return -1; | |
458 | memcpy(buf, p, l); | |
459 | unlock_user(p, addr, 0); | |
460 | } | |
461 | len -= l; | |
462 | buf += l; | |
463 | addr += l; | |
464 | } | |
465 | return 0; | |
466 | } | |
467 | #endif | |
468 | ||
469 | bool target_words_bigendian(void) | |
470 | { | |
ee3eb3a7 | 471 | #if TARGET_BIG_ENDIAN |
d9f24bf5 PB |
472 | return true; |
473 | #else | |
474 | return false; | |
475 | #endif | |
476 | } | |
477 | ||
478 | void page_size_init(void) | |
479 | { | |
480 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
481 | TARGET_PAGE_SIZE */ | |
482 | if (qemu_host_page_size == 0) { | |
8e3b0cbb | 483 | qemu_host_page_size = qemu_real_host_page_size(); |
d9f24bf5 PB |
484 | } |
485 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
486 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
487 | } | |
488 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; | |
489 | } |