]> git.proxmox.com Git - mirror_qemu.git/blob - qom/cpu.c
483f26a5fb847d0de35ff061c275bc8ac9e9834d
[mirror_qemu.git] / qom / cpu.c
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "qemu-common.h"
24 #include "qom/cpu.h"
25 #include "sysemu/hw_accel.h"
26 #include "qemu/notify.h"
27 #include "qemu/log.h"
28 #include "exec/log.h"
29 #include "exec/cpu-common.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/boards.h"
33 #include "hw/qdev-properties.h"
34 #include "trace-root.h"
35
36 CPUInterruptHandler cpu_interrupt_handler;
37
38 CPUState *cpu_by_arch_id(int64_t id)
39 {
40 CPUState *cpu;
41
42 CPU_FOREACH(cpu) {
43 CPUClass *cc = CPU_GET_CLASS(cpu);
44
45 if (cc->get_arch_id(cpu) == id) {
46 return cpu;
47 }
48 }
49 return NULL;
50 }
51
52 bool cpu_exists(int64_t id)
53 {
54 return !!cpu_by_arch_id(id);
55 }
56
57 CPUState *cpu_create(const char *typename)
58 {
59 Error *err = NULL;
60 CPUState *cpu = CPU(object_new(typename));
61 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
62 if (err != NULL) {
63 error_report_err(err);
64 object_unref(OBJECT(cpu));
65 return NULL;
66 }
67 return cpu;
68 }
69
70 const char *cpu_parse_cpu_model(const char *typename, const char *cpu_model)
71 {
72 ObjectClass *oc;
73 CPUClass *cc;
74 Error *err = NULL;
75 gchar **model_pieces;
76 const char *cpu_type;
77
78 model_pieces = g_strsplit(cpu_model, ",", 2);
79
80 oc = cpu_class_by_name(typename, model_pieces[0]);
81 if (oc == NULL) {
82 g_strfreev(model_pieces);
83 return NULL;
84 }
85
86 cpu_type = object_class_get_name(oc);
87 cc = CPU_CLASS(oc);
88 cc->parse_features(cpu_type, model_pieces[1], &err);
89 g_strfreev(model_pieces);
90 if (err != NULL) {
91 error_report_err(err);
92 return NULL;
93 }
94 return cpu_type;
95 }
96
97 CPUState *cpu_generic_init(const char *typename, const char *cpu_model)
98 {
99 /* TODO: all callers of cpu_generic_init() need to be converted to
100 * call cpu_parse_features() only once, before calling cpu_generic_init().
101 */
102 const char *cpu_type = cpu_parse_cpu_model(typename, cpu_model);
103
104 if (cpu_type) {
105 return cpu_create(cpu_type);
106 }
107 return NULL;
108 }
109
110 bool cpu_paging_enabled(const CPUState *cpu)
111 {
112 CPUClass *cc = CPU_GET_CLASS(cpu);
113
114 return cc->get_paging_enabled(cpu);
115 }
116
117 static bool cpu_common_get_paging_enabled(const CPUState *cpu)
118 {
119 return false;
120 }
121
122 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
123 Error **errp)
124 {
125 CPUClass *cc = CPU_GET_CLASS(cpu);
126
127 cc->get_memory_mapping(cpu, list, errp);
128 }
129
130 static void cpu_common_get_memory_mapping(CPUState *cpu,
131 MemoryMappingList *list,
132 Error **errp)
133 {
134 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
135 }
136
137 /* Resetting the IRQ comes from across the code base so we take the
138 * BQL here if we need to. cpu_interrupt assumes it is held.*/
139 void cpu_reset_interrupt(CPUState *cpu, int mask)
140 {
141 bool need_lock = !qemu_mutex_iothread_locked();
142
143 if (need_lock) {
144 qemu_mutex_lock_iothread();
145 }
146 cpu->interrupt_request &= ~mask;
147 if (need_lock) {
148 qemu_mutex_unlock_iothread();
149 }
150 }
151
152 void cpu_exit(CPUState *cpu)
153 {
154 atomic_set(&cpu->exit_request, 1);
155 /* Ensure cpu_exec will see the exit request after TCG has exited. */
156 smp_wmb();
157 atomic_set(&cpu->icount_decr.u16.high, -1);
158 }
159
160 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
161 void *opaque)
162 {
163 CPUClass *cc = CPU_GET_CLASS(cpu);
164
165 return (*cc->write_elf32_qemunote)(f, cpu, opaque);
166 }
167
168 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
169 CPUState *cpu, void *opaque)
170 {
171 return 0;
172 }
173
174 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
175 int cpuid, void *opaque)
176 {
177 CPUClass *cc = CPU_GET_CLASS(cpu);
178
179 return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
180 }
181
182 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
183 CPUState *cpu, int cpuid,
184 void *opaque)
185 {
186 return -1;
187 }
188
189 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
190 void *opaque)
191 {
192 CPUClass *cc = CPU_GET_CLASS(cpu);
193
194 return (*cc->write_elf64_qemunote)(f, cpu, opaque);
195 }
196
197 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
198 CPUState *cpu, void *opaque)
199 {
200 return 0;
201 }
202
203 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
204 int cpuid, void *opaque)
205 {
206 CPUClass *cc = CPU_GET_CLASS(cpu);
207
208 return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
209 }
210
211 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
212 CPUState *cpu, int cpuid,
213 void *opaque)
214 {
215 return -1;
216 }
217
218
219 static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
220 {
221 return 0;
222 }
223
224 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
225 {
226 return 0;
227 }
228
229 static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
230 {
231 /* If no extra check is required, QEMU watchpoint match can be considered
232 * as an architectural match.
233 */
234 return true;
235 }
236
237 bool target_words_bigendian(void);
238 static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
239 {
240 return target_words_bigendian();
241 }
242
243 static void cpu_common_noop(CPUState *cpu)
244 {
245 }
246
247 static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
248 {
249 return false;
250 }
251
252 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
253 {
254 CPUClass *cc = CPU_GET_CLASS(cpu);
255 GuestPanicInformation *res = NULL;
256
257 if (cc->get_crash_info) {
258 res = cc->get_crash_info(cpu);
259 }
260 return res;
261 }
262
263 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
264 int flags)
265 {
266 CPUClass *cc = CPU_GET_CLASS(cpu);
267
268 if (cc->dump_state) {
269 cpu_synchronize_state(cpu);
270 cc->dump_state(cpu, f, cpu_fprintf, flags);
271 }
272 }
273
274 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
275 int flags)
276 {
277 CPUClass *cc = CPU_GET_CLASS(cpu);
278
279 if (cc->dump_statistics) {
280 cc->dump_statistics(cpu, f, cpu_fprintf, flags);
281 }
282 }
283
284 void cpu_reset(CPUState *cpu)
285 {
286 CPUClass *klass = CPU_GET_CLASS(cpu);
287
288 if (klass->reset != NULL) {
289 (*klass->reset)(cpu);
290 }
291
292 trace_guest_cpu_reset(cpu);
293 }
294
295 static void cpu_common_reset(CPUState *cpu)
296 {
297 CPUClass *cc = CPU_GET_CLASS(cpu);
298
299 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
300 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
301 log_cpu_state(cpu, cc->reset_dump_flags);
302 }
303
304 cpu->interrupt_request = 0;
305 cpu->halted = 0;
306 cpu->mem_io_pc = 0;
307 cpu->mem_io_vaddr = 0;
308 cpu->icount_extra = 0;
309 cpu->icount_decr.u32 = 0;
310 cpu->can_do_io = 1;
311 cpu->exception_index = -1;
312 cpu->crash_occurred = false;
313
314 if (tcg_enabled()) {
315 cpu_tb_jmp_cache_clear(cpu);
316
317 tcg_flush_softmmu_tlb(cpu);
318 }
319 }
320
321 static bool cpu_common_has_work(CPUState *cs)
322 {
323 return false;
324 }
325
326 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
327 {
328 CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
329
330 return cc->class_by_name(cpu_model);
331 }
332
333 static ObjectClass *cpu_common_class_by_name(const char *cpu_model)
334 {
335 return NULL;
336 }
337
338 static void cpu_common_parse_features(const char *typename, char *features,
339 Error **errp)
340 {
341 char *featurestr; /* Single "key=value" string being parsed */
342 char *val;
343 static bool cpu_globals_initialized;
344
345 /* TODO: all callers of ->parse_features() need to be changed to
346 * call it only once, so we can remove this check (or change it
347 * to assert(!cpu_globals_initialized).
348 * Current callers of ->parse_features() are:
349 * - cpu_generic_init()
350 */
351 if (cpu_globals_initialized) {
352 return;
353 }
354 cpu_globals_initialized = true;
355
356 featurestr = features ? strtok(features, ",") : NULL;
357
358 while (featurestr) {
359 val = strchr(featurestr, '=');
360 if (val) {
361 GlobalProperty *prop = g_new0(typeof(*prop), 1);
362 *val = 0;
363 val++;
364 prop->driver = typename;
365 prop->property = g_strdup(featurestr);
366 prop->value = g_strdup(val);
367 prop->errp = &error_fatal;
368 qdev_prop_register_global(prop);
369 } else {
370 error_setg(errp, "Expected key=value format, found %s.",
371 featurestr);
372 return;
373 }
374 featurestr = strtok(NULL, ",");
375 }
376 }
377
378 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
379 {
380 CPUState *cpu = CPU(dev);
381 Object *machine = qdev_get_machine();
382
383 /* qdev_get_machine() can return something that's not TYPE_MACHINE
384 * if this is one of the user-only emulators; in that case there's
385 * no need to check the ignore_memory_transaction_failures board flag.
386 */
387 if (object_dynamic_cast(machine, TYPE_MACHINE)) {
388 ObjectClass *oc = object_get_class(machine);
389 MachineClass *mc = MACHINE_CLASS(oc);
390
391 if (mc) {
392 cpu->ignore_memory_transaction_failures =
393 mc->ignore_memory_transaction_failures;
394 }
395 }
396
397 if (dev->hotplugged) {
398 cpu_synchronize_post_init(cpu);
399 cpu_resume(cpu);
400 }
401
402 /* NOTE: latest generic point where the cpu is fully realized */
403 trace_init_vcpu(cpu);
404 }
405
406 static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
407 {
408 CPUState *cpu = CPU(dev);
409 /* NOTE: latest generic point before the cpu is fully unrealized */
410 trace_fini_vcpu(cpu);
411 cpu_exec_unrealizefn(cpu);
412 }
413
414 static void cpu_common_initfn(Object *obj)
415 {
416 CPUState *cpu = CPU(obj);
417 CPUClass *cc = CPU_GET_CLASS(obj);
418
419 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
420 cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
421 /* *-user doesn't have configurable SMP topology */
422 /* the default value is changed by qemu_init_vcpu() for softmmu */
423 cpu->nr_cores = 1;
424 cpu->nr_threads = 1;
425
426 qemu_mutex_init(&cpu->work_mutex);
427 QTAILQ_INIT(&cpu->breakpoints);
428 QTAILQ_INIT(&cpu->watchpoints);
429
430 cpu_exec_initfn(cpu);
431 }
432
433 static void cpu_common_finalize(Object *obj)
434 {
435 }
436
437 static int64_t cpu_common_get_arch_id(CPUState *cpu)
438 {
439 return cpu->cpu_index;
440 }
441
442 static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
443 {
444 return addr;
445 }
446
447 static void generic_handle_interrupt(CPUState *cpu, int mask)
448 {
449 cpu->interrupt_request |= mask;
450
451 if (!qemu_cpu_is_self(cpu)) {
452 qemu_cpu_kick(cpu);
453 }
454 }
455
456 CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
457
458 static void cpu_class_init(ObjectClass *klass, void *data)
459 {
460 DeviceClass *dc = DEVICE_CLASS(klass);
461 CPUClass *k = CPU_CLASS(klass);
462
463 k->class_by_name = cpu_common_class_by_name;
464 k->parse_features = cpu_common_parse_features;
465 k->reset = cpu_common_reset;
466 k->get_arch_id = cpu_common_get_arch_id;
467 k->has_work = cpu_common_has_work;
468 k->get_paging_enabled = cpu_common_get_paging_enabled;
469 k->get_memory_mapping = cpu_common_get_memory_mapping;
470 k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
471 k->write_elf32_note = cpu_common_write_elf32_note;
472 k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
473 k->write_elf64_note = cpu_common_write_elf64_note;
474 k->gdb_read_register = cpu_common_gdb_read_register;
475 k->gdb_write_register = cpu_common_gdb_write_register;
476 k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
477 k->debug_excp_handler = cpu_common_noop;
478 k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
479 k->cpu_exec_enter = cpu_common_noop;
480 k->cpu_exec_exit = cpu_common_noop;
481 k->cpu_exec_interrupt = cpu_common_exec_interrupt;
482 k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
483 set_bit(DEVICE_CATEGORY_CPU, dc->categories);
484 dc->realize = cpu_common_realizefn;
485 dc->unrealize = cpu_common_unrealizefn;
486 dc->props = cpu_common_props;
487 /*
488 * Reason: CPUs still need special care by board code: wiring up
489 * IRQs, adding reset handlers, halting non-first CPUs, ...
490 */
491 dc->user_creatable = false;
492 }
493
494 static const TypeInfo cpu_type_info = {
495 .name = TYPE_CPU,
496 .parent = TYPE_DEVICE,
497 .instance_size = sizeof(CPUState),
498 .instance_init = cpu_common_initfn,
499 .instance_finalize = cpu_common_finalize,
500 .abstract = true,
501 .class_size = sizeof(CPUClass),
502 .class_init = cpu_class_init,
503 };
504
505 static void cpu_register_types(void)
506 {
507 type_register_static(&cpu_type_info);
508 }
509
510 type_init(cpu_register_types)