]>
Commit | Line | Data |
---|---|---|
d9f24bf5 PB |
1 | /* |
2 | * Target-specific parts of the CPU object | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
d9f24bf5 PB |
21 | #include "qapi/error.h" |
22 | ||
23 | #include "exec/target_page.h" | |
24 | #include "hw/qdev-core.h" | |
25 | #include "hw/qdev-properties.h" | |
26 | #include "qemu/error-report.h" | |
27 | #include "migration/vmstate.h" | |
28 | #ifdef CONFIG_USER_ONLY | |
29 | #include "qemu.h" | |
30 | #else | |
8b80bd28 | 31 | #include "hw/core/sysemu-cpu-ops.h" |
d9f24bf5 PB |
32 | #include "exec/address-spaces.h" |
33 | #endif | |
34 | #include "sysemu/tcg.h" | |
35 | #include "sysemu/kvm.h" | |
36 | #include "sysemu/replay.h" | |
377bf6f3 | 37 | #include "exec/cpu-common.h" |
3b04508c | 38 | #include "exec/exec-all.h" |
3b9bd3f4 | 39 | #include "exec/translate-all.h" |
d9f24bf5 | 40 | #include "exec/log.h" |
30565f10 | 41 | #include "hw/core/accel-cpu.h" |
ad1a706f | 42 | #include "trace/trace-root.h" |
3b04508c | 43 | #include "qemu/accel.h" |
d9f24bf5 PB |
44 | |
45 | uintptr_t qemu_host_page_size; | |
46 | intptr_t qemu_host_page_mask; | |
47 | ||
48 | #ifndef CONFIG_USER_ONLY | |
49 | static int cpu_common_post_load(void *opaque, int version_id) | |
50 | { | |
51 | CPUState *cpu = opaque; | |
52 | ||
53 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the | |
54 | version_id is increased. */ | |
55 | cpu->interrupt_request &= ~0x01; | |
56 | tlb_flush(cpu); | |
57 | ||
58 | /* loadvm has just updated the content of RAM, bypassing the | |
59 | * usual mechanisms that ensure we flush TBs for writes to | |
60 | * memory we've translated code from. So we must flush all TBs, | |
61 | * which will now be stale. | |
62 | */ | |
63 | tb_flush(cpu); | |
64 | ||
65 | return 0; | |
66 | } | |
67 | ||
68 | static int cpu_common_pre_load(void *opaque) | |
69 | { | |
70 | CPUState *cpu = opaque; | |
71 | ||
72 | cpu->exception_index = -1; | |
73 | ||
74 | return 0; | |
75 | } | |
76 | ||
77 | static bool cpu_common_exception_index_needed(void *opaque) | |
78 | { | |
79 | CPUState *cpu = opaque; | |
80 | ||
81 | return tcg_enabled() && cpu->exception_index != -1; | |
82 | } | |
83 | ||
84 | static const VMStateDescription vmstate_cpu_common_exception_index = { | |
85 | .name = "cpu_common/exception_index", | |
86 | .version_id = 1, | |
87 | .minimum_version_id = 1, | |
88 | .needed = cpu_common_exception_index_needed, | |
89 | .fields = (VMStateField[]) { | |
90 | VMSTATE_INT32(exception_index, CPUState), | |
91 | VMSTATE_END_OF_LIST() | |
92 | } | |
93 | }; | |
94 | ||
95 | static bool cpu_common_crash_occurred_needed(void *opaque) | |
96 | { | |
97 | CPUState *cpu = opaque; | |
98 | ||
99 | return cpu->crash_occurred; | |
100 | } | |
101 | ||
102 | static const VMStateDescription vmstate_cpu_common_crash_occurred = { | |
103 | .name = "cpu_common/crash_occurred", | |
104 | .version_id = 1, | |
105 | .minimum_version_id = 1, | |
106 | .needed = cpu_common_crash_occurred_needed, | |
107 | .fields = (VMStateField[]) { | |
108 | VMSTATE_BOOL(crash_occurred, CPUState), | |
109 | VMSTATE_END_OF_LIST() | |
110 | } | |
111 | }; | |
112 | ||
113 | const VMStateDescription vmstate_cpu_common = { | |
114 | .name = "cpu_common", | |
115 | .version_id = 1, | |
116 | .minimum_version_id = 1, | |
117 | .pre_load = cpu_common_pre_load, | |
118 | .post_load = cpu_common_post_load, | |
119 | .fields = (VMStateField[]) { | |
120 | VMSTATE_UINT32(halted, CPUState), | |
121 | VMSTATE_UINT32(interrupt_request, CPUState), | |
122 | VMSTATE_END_OF_LIST() | |
123 | }, | |
124 | .subsections = (const VMStateDescription*[]) { | |
125 | &vmstate_cpu_common_exception_index, | |
126 | &vmstate_cpu_common_crash_occurred, | |
127 | NULL | |
128 | } | |
129 | }; | |
130 | #endif | |
131 | ||
7df5e3d6 | 132 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) |
d9f24bf5 | 133 | { |
6fbdff87 AB |
134 | /* cache the cpu class for the hotpath */ |
135 | cpu->cc = CPU_GET_CLASS(cpu); | |
d9f24bf5 | 136 | |
9ea057dc CF |
137 | if (!accel_cpu_realizefn(cpu, errp)) { |
138 | return; | |
139 | } | |
4e4fa6c1 | 140 | |
7df5e3d6 CF |
141 | /* NB: errp parameter is unused currently */ |
142 | if (tcg_enabled()) { | |
143 | tcg_exec_realizefn(cpu, errp); | |
144 | } | |
7df5e3d6 | 145 | |
4e4fa6c1 RH |
146 | /* Wait until cpu initialization complete before exposing cpu. */ |
147 | cpu_list_add(cpu); | |
148 | ||
149 | /* Plugin initialization must wait until cpu_index assigned. */ | |
150 | if (tcg_enabled()) { | |
151 | qemu_plugin_vcpu_init_hook(cpu); | |
152 | } | |
153 | ||
7df5e3d6 | 154 | #ifdef CONFIG_USER_ONLY |
4336073b PMD |
155 | assert(qdev_get_vmsd(DEVICE(cpu)) == NULL || |
156 | qdev_get_vmsd(DEVICE(cpu))->unmigratable); | |
7df5e3d6 CF |
157 | #else |
158 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
159 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); | |
160 | } | |
6fbdff87 AB |
161 | if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) { |
162 | vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu); | |
7df5e3d6 CF |
163 | } |
164 | #endif /* CONFIG_USER_ONLY */ | |
165 | } | |
166 | ||
167 | void cpu_exec_unrealizefn(CPUState *cpu) | |
168 | { | |
feece4d0 | 169 | #ifndef CONFIG_USER_ONLY |
7df5e3d6 | 170 | CPUClass *cc = CPU_GET_CLASS(cpu); |
d9f24bf5 | 171 | |
feece4d0 PMD |
172 | if (cc->sysemu_ops->legacy_vmsd != NULL) { |
173 | vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu); | |
d9f24bf5 PB |
174 | } |
175 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
176 | vmstate_unregister(NULL, &vmstate_cpu_common, cpu); | |
177 | } | |
d9f24bf5 | 178 | #endif |
7df5e3d6 CF |
179 | if (tcg_enabled()) { |
180 | tcg_exec_unrealizefn(cpu); | |
181 | } | |
7df5e3d6 CF |
182 | |
183 | cpu_list_remove(cpu); | |
d9f24bf5 PB |
184 | } |
185 | ||
6e8dcacd RH |
186 | /* |
187 | * This can't go in hw/core/cpu.c because that file is compiled only | |
188 | * once for both user-mode and system builds. | |
189 | */ | |
995b87de | 190 | static Property cpu_common_props[] = { |
6e8dcacd RH |
191 | #ifdef CONFIG_USER_ONLY |
192 | /* | |
193 | * Create a property for the user-only object, so users can | |
194 | * adjust prctl(PR_SET_UNALIGN) from the command-line. | |
195 | * Has no effect if the target does not support the feature. | |
196 | */ | |
197 | DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState, | |
198 | prctl_unalign_sigbus, false), | |
199 | #else | |
995b87de | 200 | /* |
6e8dcacd RH |
201 | * Create a memory property for softmmu CPU object, so users can |
202 | * wire up its memory. The default if no link is set up is to use | |
995b87de RH |
203 | * the system address space. |
204 | */ | |
205 | DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, | |
206 | MemoryRegion *), | |
207 | #endif | |
995b87de RH |
208 | DEFINE_PROP_END_OF_LIST(), |
209 | }; | |
210 | ||
0c3c25fc PM |
211 | static bool cpu_get_start_powered_off(Object *obj, Error **errp) |
212 | { | |
213 | CPUState *cpu = CPU(obj); | |
214 | return cpu->start_powered_off; | |
215 | } | |
216 | ||
217 | static void cpu_set_start_powered_off(Object *obj, bool value, Error **errp) | |
218 | { | |
219 | CPUState *cpu = CPU(obj); | |
220 | cpu->start_powered_off = value; | |
221 | } | |
222 | ||
995b87de RH |
223 | void cpu_class_init_props(DeviceClass *dc) |
224 | { | |
0c3c25fc PM |
225 | ObjectClass *oc = OBJECT_CLASS(dc); |
226 | ||
995b87de | 227 | device_class_set_props(dc, cpu_common_props); |
0c3c25fc PM |
228 | /* |
229 | * We can't use DEFINE_PROP_BOOL in the Property array for this | |
230 | * property, because we want this to be settable after realize. | |
231 | */ | |
232 | object_class_property_add_bool(oc, "start-powered-off", | |
233 | cpu_get_start_powered_off, | |
234 | cpu_set_start_powered_off); | |
995b87de RH |
235 | } |
236 | ||
d9f24bf5 PB |
237 | void cpu_exec_initfn(CPUState *cpu) |
238 | { | |
239 | cpu->as = NULL; | |
240 | cpu->num_ases = 0; | |
241 | ||
242 | #ifndef CONFIG_USER_ONLY | |
243 | cpu->thread_id = qemu_get_thread_id(); | |
244 | cpu->memory = get_system_memory(); | |
245 | object_ref(OBJECT(cpu->memory)); | |
246 | #endif | |
247 | } | |
248 | ||
d9f24bf5 PB |
249 | const char *parse_cpu_option(const char *cpu_option) |
250 | { | |
251 | ObjectClass *oc; | |
252 | CPUClass *cc; | |
253 | gchar **model_pieces; | |
254 | const char *cpu_type; | |
255 | ||
256 | model_pieces = g_strsplit(cpu_option, ",", 2); | |
257 | if (!model_pieces[0]) { | |
258 | error_report("-cpu option cannot be empty"); | |
259 | exit(1); | |
260 | } | |
261 | ||
262 | oc = cpu_class_by_name(CPU_RESOLVING_TYPE, model_pieces[0]); | |
263 | if (oc == NULL) { | |
264 | error_report("unable to find CPU model '%s'", model_pieces[0]); | |
265 | g_strfreev(model_pieces); | |
266 | exit(EXIT_FAILURE); | |
267 | } | |
268 | ||
269 | cpu_type = object_class_get_name(oc); | |
270 | cc = CPU_CLASS(oc); | |
271 | cc->parse_features(cpu_type, model_pieces[1], &error_fatal); | |
272 | g_strfreev(model_pieces); | |
273 | return cpu_type; | |
274 | } | |
275 | ||
377bf6f3 PMD |
276 | void list_cpus(const char *optarg) |
277 | { | |
278 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
279 | #if defined(cpu_list) | |
280 | cpu_list(); | |
281 | #endif | |
282 | } | |
283 | ||
d9f24bf5 PB |
284 | #if defined(CONFIG_USER_ONLY) |
285 | void tb_invalidate_phys_addr(target_ulong addr) | |
286 | { | |
287 | mmap_lock(); | |
d6d1fd29 | 288 | tb_invalidate_phys_page(addr); |
d9f24bf5 PB |
289 | mmap_unlock(); |
290 | } | |
d9f24bf5 PB |
291 | #else |
292 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) | |
293 | { | |
294 | ram_addr_t ram_addr; | |
295 | MemoryRegion *mr; | |
296 | hwaddr l = 1; | |
297 | ||
298 | if (!tcg_enabled()) { | |
299 | return; | |
300 | } | |
301 | ||
302 | RCU_READ_LOCK_GUARD(); | |
303 | mr = address_space_translate(as, addr, &addr, &l, false, attrs); | |
304 | if (!(memory_region_is_ram(mr) | |
305 | || memory_region_is_romd(mr))) { | |
306 | return; | |
307 | } | |
308 | ram_addr = memory_region_get_ram_addr(mr) + addr; | |
d6d1fd29 | 309 | tb_invalidate_phys_page(ram_addr); |
d9f24bf5 | 310 | } |
d9f24bf5 PB |
311 | #endif |
312 | ||
313 | /* Add a breakpoint. */ | |
314 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, | |
315 | CPUBreakpoint **breakpoint) | |
316 | { | |
5bc31e94 | 317 | CPUClass *cc = CPU_GET_CLASS(cpu); |
d9f24bf5 PB |
318 | CPUBreakpoint *bp; |
319 | ||
5bc31e94 RH |
320 | if (cc->gdb_adjust_breakpoint) { |
321 | pc = cc->gdb_adjust_breakpoint(cpu, pc); | |
322 | } | |
323 | ||
d9f24bf5 PB |
324 | bp = g_malloc(sizeof(*bp)); |
325 | ||
326 | bp->pc = pc; | |
327 | bp->flags = flags; | |
328 | ||
329 | /* keep all GDB-injected breakpoints in front */ | |
330 | if (flags & BP_GDB) { | |
331 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); | |
332 | } else { | |
333 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); | |
334 | } | |
335 | ||
d9f24bf5 PB |
336 | if (breakpoint) { |
337 | *breakpoint = bp; | |
338 | } | |
ad1a706f RH |
339 | |
340 | trace_breakpoint_insert(cpu->cpu_index, pc, flags); | |
d9f24bf5 PB |
341 | return 0; |
342 | } | |
343 | ||
344 | /* Remove a specific breakpoint. */ | |
345 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) | |
346 | { | |
5bc31e94 | 347 | CPUClass *cc = CPU_GET_CLASS(cpu); |
d9f24bf5 PB |
348 | CPUBreakpoint *bp; |
349 | ||
5bc31e94 RH |
350 | if (cc->gdb_adjust_breakpoint) { |
351 | pc = cc->gdb_adjust_breakpoint(cpu, pc); | |
352 | } | |
353 | ||
d9f24bf5 PB |
354 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
355 | if (bp->pc == pc && bp->flags == flags) { | |
356 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
357 | return 0; | |
358 | } | |
359 | } | |
360 | return -ENOENT; | |
361 | } | |
362 | ||
363 | /* Remove a specific breakpoint by reference. */ | |
ad1a706f | 364 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) |
d9f24bf5 | 365 | { |
ad1a706f | 366 | QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); |
d9f24bf5 | 367 | |
ad1a706f RH |
368 | trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); |
369 | g_free(bp); | |
d9f24bf5 PB |
370 | } |
371 | ||
372 | /* Remove all matching breakpoints. */ | |
373 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) | |
374 | { | |
375 | CPUBreakpoint *bp, *next; | |
376 | ||
377 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { | |
378 | if (bp->flags & mask) { | |
379 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
380 | } | |
381 | } | |
382 | } | |
383 | ||
384 | /* enable or disable single step mode. EXCP_DEBUG is returned by the | |
385 | CPU loop after each instruction */ | |
386 | void cpu_single_step(CPUState *cpu, int enabled) | |
387 | { | |
388 | if (cpu->singlestep_enabled != enabled) { | |
389 | cpu->singlestep_enabled = enabled; | |
390 | if (kvm_enabled()) { | |
391 | kvm_update_guest_debug(cpu, 0); | |
d9f24bf5 | 392 | } |
ad1a706f | 393 | trace_breakpoint_singlestep(cpu->cpu_index, enabled); |
d9f24bf5 PB |
394 | } |
395 | } | |
396 | ||
397 | void cpu_abort(CPUState *cpu, const char *fmt, ...) | |
398 | { | |
399 | va_list ap; | |
400 | va_list ap2; | |
401 | ||
402 | va_start(ap, fmt); | |
403 | va_copy(ap2, ap); | |
404 | fprintf(stderr, "qemu: fatal: "); | |
405 | vfprintf(stderr, fmt, ap); | |
406 | fprintf(stderr, "\n"); | |
407 | cpu_dump_state(cpu, stderr, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
408 | if (qemu_log_separate()) { | |
c60f599b | 409 | FILE *logfile = qemu_log_trylock(); |
78b54858 RH |
410 | if (logfile) { |
411 | fprintf(logfile, "qemu: fatal: "); | |
412 | vfprintf(logfile, fmt, ap2); | |
413 | fprintf(logfile, "\n"); | |
414 | cpu_dump_state(cpu, logfile, CPU_DUMP_FPU | CPU_DUMP_CCOP); | |
78b54858 RH |
415 | qemu_log_unlock(logfile); |
416 | } | |
d9f24bf5 PB |
417 | } |
418 | va_end(ap2); | |
419 | va_end(ap); | |
420 | replay_finish(); | |
421 | #if defined(CONFIG_USER_ONLY) | |
422 | { | |
423 | struct sigaction act; | |
424 | sigfillset(&act.sa_mask); | |
425 | act.sa_handler = SIG_DFL; | |
426 | act.sa_flags = 0; | |
427 | sigaction(SIGABRT, &act, NULL); | |
428 | } | |
429 | #endif | |
430 | abort(); | |
431 | } | |
432 | ||
433 | /* physical memory access (slow version, mainly for debug) */ | |
434 | #if defined(CONFIG_USER_ONLY) | |
73842ef0 PMD |
435 | int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, |
436 | void *ptr, size_t len, bool is_write) | |
d9f24bf5 PB |
437 | { |
438 | int flags; | |
73842ef0 | 439 | vaddr l, page; |
d9f24bf5 PB |
440 | void * p; |
441 | uint8_t *buf = ptr; | |
442 | ||
443 | while (len > 0) { | |
444 | page = addr & TARGET_PAGE_MASK; | |
445 | l = (page + TARGET_PAGE_SIZE) - addr; | |
446 | if (l > len) | |
447 | l = len; | |
448 | flags = page_get_flags(page); | |
449 | if (!(flags & PAGE_VALID)) | |
450 | return -1; | |
451 | if (is_write) { | |
452 | if (!(flags & PAGE_WRITE)) | |
453 | return -1; | |
454 | /* XXX: this code should not depend on lock_user */ | |
455 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) | |
456 | return -1; | |
457 | memcpy(p, buf, l); | |
458 | unlock_user(p, addr, l); | |
459 | } else { | |
460 | if (!(flags & PAGE_READ)) | |
461 | return -1; | |
462 | /* XXX: this code should not depend on lock_user */ | |
463 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) | |
464 | return -1; | |
465 | memcpy(buf, p, l); | |
466 | unlock_user(p, addr, 0); | |
467 | } | |
468 | len -= l; | |
469 | buf += l; | |
470 | addr += l; | |
471 | } | |
472 | return 0; | |
473 | } | |
474 | #endif | |
475 | ||
476 | bool target_words_bigendian(void) | |
477 | { | |
ee3eb3a7 | 478 | #if TARGET_BIG_ENDIAN |
d9f24bf5 PB |
479 | return true; |
480 | #else | |
481 | return false; | |
482 | #endif | |
483 | } | |
484 | ||
485 | void page_size_init(void) | |
486 | { | |
487 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
488 | TARGET_PAGE_SIZE */ | |
489 | if (qemu_host_page_size == 0) { | |
8e3b0cbb | 490 | qemu_host_page_size = qemu_real_host_page_size(); |
d9f24bf5 PB |
491 | } |
492 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
493 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
494 | } | |
495 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; | |
496 | } |