]> git.proxmox.com Git - mirror_qemu.git/blob - qom/cpu.c
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170907' into...
[mirror_qemu.git] / qom / cpu.c
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "qemu-common.h"
24 #include "qom/cpu.h"
25 #include "sysemu/hw_accel.h"
26 #include "qemu/notify.h"
27 #include "qemu/log.h"
28 #include "exec/log.h"
29 #include "exec/cpu-common.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/boards.h"
33 #include "hw/qdev-properties.h"
34 #include "trace-root.h"
35
36 CPUInterruptHandler cpu_interrupt_handler;
37
38 CPUState *cpu_by_arch_id(int64_t id)
39 {
40 CPUState *cpu;
41
42 CPU_FOREACH(cpu) {
43 CPUClass *cc = CPU_GET_CLASS(cpu);
44
45 if (cc->get_arch_id(cpu) == id) {
46 return cpu;
47 }
48 }
49 return NULL;
50 }
51
52 bool cpu_exists(int64_t id)
53 {
54 return !!cpu_by_arch_id(id);
55 }
56
57 CPUState *cpu_generic_init(const char *typename, const char *cpu_model)
58 {
59 CPUState *cpu = NULL;
60 ObjectClass *oc;
61 CPUClass *cc;
62 Error *err = NULL;
63 gchar **model_pieces;
64
65 model_pieces = g_strsplit(cpu_model, ",", 2);
66
67 oc = cpu_class_by_name(typename, model_pieces[0]);
68 if (oc == NULL) {
69 g_strfreev(model_pieces);
70 return NULL;
71 }
72
73 cc = CPU_CLASS(oc);
74 /* TODO: all callers of cpu_generic_init() need to be converted to
75 * call parse_features() only once, before calling cpu_generic_init().
76 */
77 cc->parse_features(object_class_get_name(oc), model_pieces[1], &err);
78 g_strfreev(model_pieces);
79 if (err != NULL) {
80 goto out;
81 }
82
83 cpu = CPU(object_new(object_class_get_name(oc)));
84 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
85
86 out:
87 if (err != NULL) {
88 error_report_err(err);
89 object_unref(OBJECT(cpu));
90 return NULL;
91 }
92
93 return cpu;
94 }
95
96 bool cpu_paging_enabled(const CPUState *cpu)
97 {
98 CPUClass *cc = CPU_GET_CLASS(cpu);
99
100 return cc->get_paging_enabled(cpu);
101 }
102
103 static bool cpu_common_get_paging_enabled(const CPUState *cpu)
104 {
105 return false;
106 }
107
108 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
109 Error **errp)
110 {
111 CPUClass *cc = CPU_GET_CLASS(cpu);
112
113 cc->get_memory_mapping(cpu, list, errp);
114 }
115
116 static void cpu_common_get_memory_mapping(CPUState *cpu,
117 MemoryMappingList *list,
118 Error **errp)
119 {
120 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
121 }
122
123 /* Resetting the IRQ comes from across the code base so we take the
124 * BQL here if we need to. cpu_interrupt assumes it is held.*/
125 void cpu_reset_interrupt(CPUState *cpu, int mask)
126 {
127 bool need_lock = !qemu_mutex_iothread_locked();
128
129 if (need_lock) {
130 qemu_mutex_lock_iothread();
131 }
132 cpu->interrupt_request &= ~mask;
133 if (need_lock) {
134 qemu_mutex_unlock_iothread();
135 }
136 }
137
138 void cpu_exit(CPUState *cpu)
139 {
140 atomic_set(&cpu->exit_request, 1);
141 /* Ensure cpu_exec will see the exit request after TCG has exited. */
142 smp_wmb();
143 atomic_set(&cpu->icount_decr.u16.high, -1);
144 }
145
146 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
147 void *opaque)
148 {
149 CPUClass *cc = CPU_GET_CLASS(cpu);
150
151 return (*cc->write_elf32_qemunote)(f, cpu, opaque);
152 }
153
154 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
155 CPUState *cpu, void *opaque)
156 {
157 return 0;
158 }
159
160 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
161 int cpuid, void *opaque)
162 {
163 CPUClass *cc = CPU_GET_CLASS(cpu);
164
165 return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
166 }
167
168 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
169 CPUState *cpu, int cpuid,
170 void *opaque)
171 {
172 return -1;
173 }
174
175 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
176 void *opaque)
177 {
178 CPUClass *cc = CPU_GET_CLASS(cpu);
179
180 return (*cc->write_elf64_qemunote)(f, cpu, opaque);
181 }
182
183 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
184 CPUState *cpu, void *opaque)
185 {
186 return 0;
187 }
188
189 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
190 int cpuid, void *opaque)
191 {
192 CPUClass *cc = CPU_GET_CLASS(cpu);
193
194 return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
195 }
196
197 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
198 CPUState *cpu, int cpuid,
199 void *opaque)
200 {
201 return -1;
202 }
203
204
205 static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
206 {
207 return 0;
208 }
209
210 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
211 {
212 return 0;
213 }
214
215 static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
216 {
217 /* If no extra check is required, QEMU watchpoint match can be considered
218 * as an architectural match.
219 */
220 return true;
221 }
222
223 bool target_words_bigendian(void);
224 static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
225 {
226 return target_words_bigendian();
227 }
228
229 static void cpu_common_noop(CPUState *cpu)
230 {
231 }
232
233 static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
234 {
235 return false;
236 }
237
238 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
239 {
240 CPUClass *cc = CPU_GET_CLASS(cpu);
241 GuestPanicInformation *res = NULL;
242
243 if (cc->get_crash_info) {
244 res = cc->get_crash_info(cpu);
245 }
246 return res;
247 }
248
249 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
250 int flags)
251 {
252 CPUClass *cc = CPU_GET_CLASS(cpu);
253
254 if (cc->dump_state) {
255 cpu_synchronize_state(cpu);
256 cc->dump_state(cpu, f, cpu_fprintf, flags);
257 }
258 }
259
260 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
261 int flags)
262 {
263 CPUClass *cc = CPU_GET_CLASS(cpu);
264
265 if (cc->dump_statistics) {
266 cc->dump_statistics(cpu, f, cpu_fprintf, flags);
267 }
268 }
269
270 void cpu_reset(CPUState *cpu)
271 {
272 CPUClass *klass = CPU_GET_CLASS(cpu);
273
274 if (klass->reset != NULL) {
275 (*klass->reset)(cpu);
276 }
277
278 trace_guest_cpu_reset(cpu);
279 }
280
281 static void cpu_common_reset(CPUState *cpu)
282 {
283 CPUClass *cc = CPU_GET_CLASS(cpu);
284
285 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
286 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
287 log_cpu_state(cpu, cc->reset_dump_flags);
288 }
289
290 cpu->interrupt_request = 0;
291 cpu->halted = 0;
292 cpu->mem_io_pc = 0;
293 cpu->mem_io_vaddr = 0;
294 cpu->icount_extra = 0;
295 cpu->icount_decr.u32 = 0;
296 cpu->can_do_io = 1;
297 cpu->exception_index = -1;
298 cpu->crash_occurred = false;
299
300 if (tcg_enabled()) {
301 cpu_tb_jmp_cache_clear(cpu);
302
303 tcg_flush_softmmu_tlb(cpu);
304 }
305 }
306
307 static bool cpu_common_has_work(CPUState *cs)
308 {
309 return false;
310 }
311
312 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
313 {
314 CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
315
316 return cc->class_by_name(cpu_model);
317 }
318
319 static ObjectClass *cpu_common_class_by_name(const char *cpu_model)
320 {
321 return NULL;
322 }
323
324 static void cpu_common_parse_features(const char *typename, char *features,
325 Error **errp)
326 {
327 char *featurestr; /* Single "key=value" string being parsed */
328 char *val;
329 static bool cpu_globals_initialized;
330
331 /* TODO: all callers of ->parse_features() need to be changed to
332 * call it only once, so we can remove this check (or change it
333 * to assert(!cpu_globals_initialized).
334 * Current callers of ->parse_features() are:
335 * - cpu_generic_init()
336 */
337 if (cpu_globals_initialized) {
338 return;
339 }
340 cpu_globals_initialized = true;
341
342 featurestr = features ? strtok(features, ",") : NULL;
343
344 while (featurestr) {
345 val = strchr(featurestr, '=');
346 if (val) {
347 GlobalProperty *prop = g_new0(typeof(*prop), 1);
348 *val = 0;
349 val++;
350 prop->driver = typename;
351 prop->property = g_strdup(featurestr);
352 prop->value = g_strdup(val);
353 prop->errp = &error_fatal;
354 qdev_prop_register_global(prop);
355 } else {
356 error_setg(errp, "Expected key=value format, found %s.",
357 featurestr);
358 return;
359 }
360 featurestr = strtok(NULL, ",");
361 }
362 }
363
364 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
365 {
366 CPUState *cpu = CPU(dev);
367 Object *machine = qdev_get_machine();
368
369 /* qdev_get_machine() can return something that's not TYPE_MACHINE
370 * if this is one of the user-only emulators; in that case there's
371 * no need to check the ignore_memory_transaction_failures board flag.
372 */
373 if (object_dynamic_cast(machine, TYPE_MACHINE)) {
374 ObjectClass *oc = object_get_class(machine);
375 MachineClass *mc = MACHINE_CLASS(oc);
376
377 if (mc) {
378 cpu->ignore_memory_transaction_failures =
379 mc->ignore_memory_transaction_failures;
380 }
381 }
382
383 if (dev->hotplugged) {
384 cpu_synchronize_post_init(cpu);
385 cpu_resume(cpu);
386 }
387
388 /* NOTE: latest generic point where the cpu is fully realized */
389 trace_init_vcpu(cpu);
390 }
391
392 static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
393 {
394 CPUState *cpu = CPU(dev);
395 /* NOTE: latest generic point before the cpu is fully unrealized */
396 trace_fini_vcpu(cpu);
397 cpu_exec_unrealizefn(cpu);
398 }
399
400 static void cpu_common_initfn(Object *obj)
401 {
402 CPUState *cpu = CPU(obj);
403 CPUClass *cc = CPU_GET_CLASS(obj);
404
405 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
406 cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
407 /* *-user doesn't have configurable SMP topology */
408 /* the default value is changed by qemu_init_vcpu() for softmmu */
409 cpu->nr_cores = 1;
410 cpu->nr_threads = 1;
411
412 qemu_mutex_init(&cpu->work_mutex);
413 QTAILQ_INIT(&cpu->breakpoints);
414 QTAILQ_INIT(&cpu->watchpoints);
415
416 cpu_exec_initfn(cpu);
417 }
418
419 static void cpu_common_finalize(Object *obj)
420 {
421 }
422
423 static int64_t cpu_common_get_arch_id(CPUState *cpu)
424 {
425 return cpu->cpu_index;
426 }
427
428 static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
429 {
430 return addr;
431 }
432
433 static void generic_handle_interrupt(CPUState *cpu, int mask)
434 {
435 cpu->interrupt_request |= mask;
436
437 if (!qemu_cpu_is_self(cpu)) {
438 qemu_cpu_kick(cpu);
439 }
440 }
441
442 CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
443
444 static void cpu_class_init(ObjectClass *klass, void *data)
445 {
446 DeviceClass *dc = DEVICE_CLASS(klass);
447 CPUClass *k = CPU_CLASS(klass);
448
449 k->class_by_name = cpu_common_class_by_name;
450 k->parse_features = cpu_common_parse_features;
451 k->reset = cpu_common_reset;
452 k->get_arch_id = cpu_common_get_arch_id;
453 k->has_work = cpu_common_has_work;
454 k->get_paging_enabled = cpu_common_get_paging_enabled;
455 k->get_memory_mapping = cpu_common_get_memory_mapping;
456 k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
457 k->write_elf32_note = cpu_common_write_elf32_note;
458 k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
459 k->write_elf64_note = cpu_common_write_elf64_note;
460 k->gdb_read_register = cpu_common_gdb_read_register;
461 k->gdb_write_register = cpu_common_gdb_write_register;
462 k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
463 k->debug_excp_handler = cpu_common_noop;
464 k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
465 k->cpu_exec_enter = cpu_common_noop;
466 k->cpu_exec_exit = cpu_common_noop;
467 k->cpu_exec_interrupt = cpu_common_exec_interrupt;
468 k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
469 set_bit(DEVICE_CATEGORY_CPU, dc->categories);
470 dc->realize = cpu_common_realizefn;
471 dc->unrealize = cpu_common_unrealizefn;
472 dc->props = cpu_common_props;
473 /*
474 * Reason: CPUs still need special care by board code: wiring up
475 * IRQs, adding reset handlers, halting non-first CPUs, ...
476 */
477 dc->user_creatable = false;
478 }
479
480 static const TypeInfo cpu_type_info = {
481 .name = TYPE_CPU,
482 .parent = TYPE_DEVICE,
483 .instance_size = sizeof(CPUState),
484 .instance_init = cpu_common_initfn,
485 .instance_finalize = cpu_common_finalize,
486 .abstract = true,
487 .class_size = sizeof(CPUClass),
488 .class_init = cpu_class_init,
489 };
490
491 static void cpu_register_types(void)
492 {
493 type_register_static(&cpu_type_info);
494 }
495
496 type_init(cpu_register_types)