]> git.proxmox.com Git - mirror_qemu.git/blob - qom/cpu.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / qom / cpu.c
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "qemu-common.h"
24 #include "qom/cpu.h"
25 #include "sysemu/hw_accel.h"
26 #include "qemu/notify.h"
27 #include "qemu/log.h"
28 #include "exec/log.h"
29 #include "exec/cpu-common.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/qdev-properties.h"
33 #include "trace-root.h"
34
35 CPUInterruptHandler cpu_interrupt_handler;
36
37 bool cpu_exists(int64_t id)
38 {
39 CPUState *cpu;
40
41 CPU_FOREACH(cpu) {
42 CPUClass *cc = CPU_GET_CLASS(cpu);
43
44 if (cc->get_arch_id(cpu) == id) {
45 return true;
46 }
47 }
48 return false;
49 }
50
51 CPUState *cpu_generic_init(const char *typename, const char *cpu_model)
52 {
53 char *str, *name, *featurestr;
54 CPUState *cpu = NULL;
55 ObjectClass *oc;
56 CPUClass *cc;
57 Error *err = NULL;
58
59 str = g_strdup(cpu_model);
60 name = strtok(str, ",");
61
62 oc = cpu_class_by_name(typename, name);
63 if (oc == NULL) {
64 g_free(str);
65 return NULL;
66 }
67
68 cc = CPU_CLASS(oc);
69 featurestr = strtok(NULL, ",");
70 /* TODO: all callers of cpu_generic_init() need to be converted to
71 * call parse_features() only once, before calling cpu_generic_init().
72 */
73 cc->parse_features(object_class_get_name(oc), featurestr, &err);
74 g_free(str);
75 if (err != NULL) {
76 goto out;
77 }
78
79 cpu = CPU(object_new(object_class_get_name(oc)));
80 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
81
82 out:
83 if (err != NULL) {
84 error_report_err(err);
85 object_unref(OBJECT(cpu));
86 return NULL;
87 }
88
89 return cpu;
90 }
91
92 void *cpu_alloc_env(CPUState *cpu)
93 {
94 CPUClass *cc = CPU_GET_CLASS(cpu);
95
96 return cc->alloc_env ? cc->alloc_env(cpu) : NULL;
97 }
98
99 void cpu_get_env(CPUState *cpu, void *env)
100 {
101 CPUClass *cc = CPU_GET_CLASS(cpu);
102
103 if (cc->get_env) {
104 cc->get_env(cpu, env);
105 }
106 }
107
108 void cpu_set_env(CPUState *cpu, void *env)
109 {
110 CPUClass *cc = CPU_GET_CLASS(cpu);
111
112 if (cc->set_env) {
113 cc->set_env(cpu, env);
114 }
115 }
116
117 void cpu_free_env(CPUState *cpu, void *env)
118 {
119 CPUClass *cc = CPU_GET_CLASS(cpu);
120
121 if (cc->free_env) {
122 cc->free_env(cpu, env);
123 }
124 }
125
126 bool cpu_paging_enabled(const CPUState *cpu)
127 {
128 CPUClass *cc = CPU_GET_CLASS(cpu);
129
130 return cc->get_paging_enabled(cpu);
131 }
132
133 static bool cpu_common_get_paging_enabled(const CPUState *cpu)
134 {
135 return false;
136 }
137
138 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
139 Error **errp)
140 {
141 CPUClass *cc = CPU_GET_CLASS(cpu);
142
143 cc->get_memory_mapping(cpu, list, errp);
144 }
145
146 static void cpu_common_get_memory_mapping(CPUState *cpu,
147 MemoryMappingList *list,
148 Error **errp)
149 {
150 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
151 }
152
153 /* Resetting the IRQ comes from across the code base so we take the
154 * BQL here if we need to. cpu_interrupt assumes it is held.*/
155 void cpu_reset_interrupt(CPUState *cpu, int mask)
156 {
157 bool need_lock = !qemu_mutex_iothread_locked();
158
159 if (need_lock) {
160 qemu_mutex_lock_iothread();
161 }
162 cpu->interrupt_request &= ~mask;
163 if (need_lock) {
164 qemu_mutex_unlock_iothread();
165 }
166 }
167
168 void cpu_exit(CPUState *cpu)
169 {
170 atomic_set(&cpu->exit_request, 1);
171 /* Ensure cpu_exec will see the exit request after TCG has exited. */
172 smp_wmb();
173 atomic_set(&cpu->icount_decr.u16.high, -1);
174 }
175
176 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
177 void *opaque)
178 {
179 CPUClass *cc = CPU_GET_CLASS(cpu);
180
181 return (*cc->write_elf32_qemunote)(f, cpu, opaque);
182 }
183
184 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
185 CPUState *cpu, void *opaque)
186 {
187 return 0;
188 }
189
190 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
191 int cpuid, void *opaque)
192 {
193 CPUClass *cc = CPU_GET_CLASS(cpu);
194
195 return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
196 }
197
198 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
199 CPUState *cpu, int cpuid,
200 void *opaque)
201 {
202 return -1;
203 }
204
205 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
206 void *opaque)
207 {
208 CPUClass *cc = CPU_GET_CLASS(cpu);
209
210 return (*cc->write_elf64_qemunote)(f, cpu, opaque);
211 }
212
213 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
214 CPUState *cpu, void *opaque)
215 {
216 return 0;
217 }
218
219 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
220 int cpuid, void *opaque)
221 {
222 CPUClass *cc = CPU_GET_CLASS(cpu);
223
224 return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
225 }
226
227 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
228 CPUState *cpu, int cpuid,
229 void *opaque)
230 {
231 return -1;
232 }
233
234
235 static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
236 {
237 return 0;
238 }
239
240 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
241 {
242 return 0;
243 }
244
245 static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
246 {
247 /* If no extra check is required, QEMU watchpoint match can be considered
248 * as an architectural match.
249 */
250 return true;
251 }
252
253 bool target_words_bigendian(void);
254 static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
255 {
256 return target_words_bigendian();
257 }
258
259 static void cpu_common_noop(CPUState *cpu)
260 {
261 }
262
263 static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
264 {
265 return false;
266 }
267
268 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
269 {
270 CPUClass *cc = CPU_GET_CLASS(cpu);
271 GuestPanicInformation *res = NULL;
272
273 if (cc->get_crash_info) {
274 res = cc->get_crash_info(cpu);
275 }
276 return res;
277 }
278
279 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
280 int flags)
281 {
282 CPUClass *cc = CPU_GET_CLASS(cpu);
283
284 if (cc->dump_state) {
285 cpu_synchronize_state(cpu);
286 cc->dump_state(cpu, f, cpu_fprintf, flags);
287 }
288 }
289
290 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
291 int flags)
292 {
293 CPUClass *cc = CPU_GET_CLASS(cpu);
294
295 if (cc->dump_statistics) {
296 cc->dump_statistics(cpu, f, cpu_fprintf, flags);
297 }
298 }
299
300 void cpu_reset(CPUState *cpu)
301 {
302 CPUClass *klass = CPU_GET_CLASS(cpu);
303
304 if (klass->reset != NULL) {
305 (*klass->reset)(cpu);
306 }
307
308 trace_guest_cpu_reset(cpu);
309 }
310
311 static void cpu_common_reset(CPUState *cpu)
312 {
313 CPUClass *cc = CPU_GET_CLASS(cpu);
314
315 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
316 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
317 log_cpu_state(cpu, cc->reset_dump_flags);
318 }
319
320 cpu->interrupt_request = 0;
321 cpu->halted = 0;
322 cpu->mem_io_pc = 0;
323 cpu->mem_io_vaddr = 0;
324 cpu->icount_extra = 0;
325 cpu->icount_decr.u32 = 0;
326 cpu->can_do_io = 1;
327 cpu->exception_index = -1;
328 cpu->crash_occurred = false;
329
330 if (tcg_enabled()) {
331 cpu_tb_jmp_cache_clear(cpu);
332
333 tcg_flush_softmmu_tlb(cpu);
334 }
335 }
336
337 static bool cpu_common_has_work(CPUState *cs)
338 {
339 return false;
340 }
341
342 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
343 {
344 CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
345
346 return cc->class_by_name(cpu_model);
347 }
348
349 static ObjectClass *cpu_common_class_by_name(const char *cpu_model)
350 {
351 return NULL;
352 }
353
354 static void cpu_common_parse_features(const char *typename, char *features,
355 Error **errp)
356 {
357 char *featurestr; /* Single "key=value" string being parsed */
358 char *val;
359 static bool cpu_globals_initialized;
360
361 /* TODO: all callers of ->parse_features() need to be changed to
362 * call it only once, so we can remove this check (or change it
363 * to assert(!cpu_globals_initialized).
364 * Current callers of ->parse_features() are:
365 * - cpu_generic_init()
366 */
367 if (cpu_globals_initialized) {
368 return;
369 }
370 cpu_globals_initialized = true;
371
372 featurestr = features ? strtok(features, ",") : NULL;
373
374 while (featurestr) {
375 val = strchr(featurestr, '=');
376 if (val) {
377 GlobalProperty *prop = g_new0(typeof(*prop), 1);
378 *val = 0;
379 val++;
380 prop->driver = typename;
381 prop->property = g_strdup(featurestr);
382 prop->value = g_strdup(val);
383 prop->errp = &error_fatal;
384 qdev_prop_register_global(prop);
385 } else {
386 error_setg(errp, "Expected key=value format, found %s.",
387 featurestr);
388 return;
389 }
390 featurestr = strtok(NULL, ",");
391 }
392 }
393
394 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
395 {
396 CPUState *cpu = CPU(dev);
397
398 if (dev->hotplugged) {
399 cpu_synchronize_post_init(cpu);
400 cpu_resume(cpu);
401 }
402
403 /* NOTE: latest generic point where the cpu is fully realized */
404 trace_init_vcpu(cpu);
405 }
406
407 static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
408 {
409 CPUState *cpu = CPU(dev);
410 /* NOTE: latest generic point before the cpu is fully unrealized */
411 trace_fini_vcpu(cpu);
412 cpu_exec_unrealizefn(cpu);
413 }
414
415 static void cpu_common_initfn(Object *obj)
416 {
417 CPUState *cpu = CPU(obj);
418 CPUClass *cc = CPU_GET_CLASS(obj);
419
420 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
421 cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
422 /* *-user doesn't have configurable SMP topology */
423 /* the default value is changed by qemu_init_vcpu() for softmmu */
424 cpu->nr_cores = 1;
425 cpu->nr_threads = 1;
426
427 qemu_mutex_init(&cpu->work_mutex);
428 QTAILQ_INIT(&cpu->breakpoints);
429 QTAILQ_INIT(&cpu->watchpoints);
430
431 cpu_exec_initfn(cpu);
432 }
433
434 static void cpu_common_finalize(Object *obj)
435 {
436 }
437
438 static int64_t cpu_common_get_arch_id(CPUState *cpu)
439 {
440 return cpu->cpu_index;
441 }
442
443 static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
444 {
445 return addr;
446 }
447
448 static void generic_handle_interrupt(CPUState *cpu, int mask)
449 {
450 cpu->interrupt_request |= mask;
451
452 if (!qemu_cpu_is_self(cpu)) {
453 qemu_cpu_kick(cpu);
454 }
455 }
456
457 CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
458
459 static void cpu_class_init(ObjectClass *klass, void *data)
460 {
461 DeviceClass *dc = DEVICE_CLASS(klass);
462 CPUClass *k = CPU_CLASS(klass);
463
464 k->class_by_name = cpu_common_class_by_name;
465 k->parse_features = cpu_common_parse_features;
466 k->reset = cpu_common_reset;
467 k->get_arch_id = cpu_common_get_arch_id;
468 k->has_work = cpu_common_has_work;
469 k->get_paging_enabled = cpu_common_get_paging_enabled;
470 k->get_memory_mapping = cpu_common_get_memory_mapping;
471 k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
472 k->write_elf32_note = cpu_common_write_elf32_note;
473 k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
474 k->write_elf64_note = cpu_common_write_elf64_note;
475 k->gdb_read_register = cpu_common_gdb_read_register;
476 k->gdb_write_register = cpu_common_gdb_write_register;
477 k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
478 k->debug_excp_handler = cpu_common_noop;
479 k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
480 k->cpu_exec_enter = cpu_common_noop;
481 k->cpu_exec_exit = cpu_common_noop;
482 k->cpu_exec_interrupt = cpu_common_exec_interrupt;
483 k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
484 set_bit(DEVICE_CATEGORY_CPU, dc->categories);
485 dc->realize = cpu_common_realizefn;
486 dc->unrealize = cpu_common_unrealizefn;
487 dc->props = cpu_common_props;
488 /*
489 * Reason: CPUs still need special care by board code: wiring up
490 * IRQs, adding reset handlers, halting non-first CPUs, ...
491 */
492 dc->user_creatable = false;
493 }
494
495 static const TypeInfo cpu_type_info = {
496 .name = TYPE_CPU,
497 .parent = TYPE_DEVICE,
498 .instance_size = sizeof(CPUState),
499 .instance_init = cpu_common_initfn,
500 .instance_finalize = cpu_common_finalize,
501 .abstract = true,
502 .class_size = sizeof(CPUClass),
503 .class_init = cpu_class_init,
504 };
505
506 static void cpu_register_types(void)
507 {
508 type_register_static(&cpu_type_info);
509 }
510
511 type_init(cpu_register_types)