Adapt header include paths.
Signed-off-by: Andreas Färber <afaerber@suse.de>
-void ppc_set_irq (CPUPPCState *env, int n_IRQ, int level);
+ #ifndef HW_PPC_H
+ #define HW_PPC_H 1
+
+void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level);
/* PowerPC hardware exceptions management helpers */
typedef void (*clk_setup_cb)(void *opaque, uint32_t freq);
#define PPC_SERIAL_MM_BAUDBASE 399193
/* ppc_booke.c */
-void ppc_booke_timers_init(CPUPPCState *env, uint32_t freq, uint32_t flags);
+void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags);
+
+ #endif
#if !defined(PPC_4XX_H)
#define PPC_4XX_H
- #include "pci.h"
+ #include "pci/pci.h"
/* PowerPC 4xx core initialization */
-CPUPPCState *ppc4xx_init (const char *cpu_model,
- clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
- uint32_t sysclk);
+PowerPCCPU *ppc4xx_init(const char *cpu_model,
+ clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
+ uint32_t sysclk);
/* PowerPC 4xx universal interrupt controller */
enum {
--- /dev/null
- #include "net.h"
+/*
+ * qdev property parsing and global properties
+ * (parts specific for qemu-system-*)
+ *
+ * This file is based on code from hw/qdev-properties.c from
+ * commit 074a86fccd185616469dfcdc0e157f438aebba18,
+ * Copyright (c) Gerd Hoffmann <kraxel@redhat.com> and other contributors.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
- #include "qerror.h"
- #include "blockdev.h"
++#include "net/net.h"
+#include "qdev.h"
- #include "qapi/qapi-visit-core.h"
++#include "qapi/qmp/qerror.h"
++#include "sysemu/blockdev.h"
+#include "hw/block-common.h"
+#include "net/hub.h"
++#include "qapi/visitor.h"
++#include "char/char.h"
+
+static void get_pointer(Object *obj, Visitor *v, Property *prop,
+ const char *(*print)(void *ptr),
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ void **ptr = qdev_get_prop_ptr(dev, prop);
+ char *p;
+
+ p = (char *) (*ptr ? print(*ptr) : "");
+ visit_type_str(v, &p, name, errp);
+}
+
+static void set_pointer(Object *obj, Visitor *v, Property *prop,
+ int (*parse)(DeviceState *dev, const char *str,
+ void **ptr),
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Error *local_err = NULL;
+ void **ptr = qdev_get_prop_ptr(dev, prop);
+ char *str;
+ int ret;
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_str(v, &str, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (!*str) {
+ g_free(str);
+ *ptr = NULL;
+ return;
+ }
+ ret = parse(dev, str, ptr);
+ error_set_from_qdev_prop_error(errp, ret, dev, prop, str);
+ g_free(str);
+}
+
+/* --- drive --- */
+
+static int parse_drive(DeviceState *dev, const char *str, void **ptr)
+{
+ BlockDriverState *bs;
+
+ bs = bdrv_find(str);
+ if (bs == NULL) {
+ return -ENOENT;
+ }
+ if (bdrv_attach_dev(bs, dev) < 0) {
+ return -EEXIST;
+ }
+ *ptr = bs;
+ return 0;
+}
+
+static void release_drive(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ BlockDriverState **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ bdrv_detach_dev(*ptr, dev);
+ blockdev_auto_del(*ptr);
+ }
+}
+
+static const char *print_drive(void *ptr)
+{
+ return bdrv_get_device_name(ptr);
+}
+
+static void get_drive(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_drive, name, errp);
+}
+
+static void set_drive(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_drive, name, errp);
+}
+
+PropertyInfo qdev_prop_drive = {
+ .name = "drive",
+ .get = get_drive,
+ .set = set_drive,
+ .release = release_drive,
+};
+
+/* --- character device --- */
+
+static int parse_chr(DeviceState *dev, const char *str, void **ptr)
+{
+ CharDriverState *chr = qemu_chr_find(str);
+ if (chr == NULL) {
+ return -ENOENT;
+ }
+ if (chr->avail_connections < 1) {
+ return -EEXIST;
+ }
+ *ptr = chr;
+ --chr->avail_connections;
+ return 0;
+}
+
+static void release_chr(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ CharDriverState **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ qemu_chr_add_handlers(*ptr, NULL, NULL, NULL, NULL);
+ }
+}
+
+
+static const char *print_chr(void *ptr)
+{
+ CharDriverState *chr = ptr;
+
+ return chr->label ? chr->label : "";
+}
+
+static void get_chr(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_chr, name, errp);
+}
+
+static void set_chr(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_chr, name, errp);
+}
+
+PropertyInfo qdev_prop_chr = {
+ .name = "chr",
+ .get = get_chr,
+ .set = set_chr,
+ .release = release_chr,
+};
+
+/* --- netdev device --- */
+
+static int parse_netdev(DeviceState *dev, const char *str, void **ptr)
+{
+ NetClientState *netdev = qemu_find_netdev(str);
+
+ if (netdev == NULL) {
+ return -ENOENT;
+ }
+ if (netdev->peer) {
+ return -EEXIST;
+ }
+ *ptr = netdev;
+ return 0;
+}
+
+static const char *print_netdev(void *ptr)
+{
+ NetClientState *netdev = ptr;
+
+ return netdev->name ? netdev->name : "";
+}
+
+static void get_netdev(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ get_pointer(obj, v, opaque, print_netdev, name, errp);
+}
+
+static void set_netdev(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ set_pointer(obj, v, opaque, parse_netdev, name, errp);
+}
+
+PropertyInfo qdev_prop_netdev = {
+ .name = "netdev",
+ .get = get_netdev,
+ .set = set_netdev,
+};
+
+/* --- vlan --- */
+
+static int print_vlan(DeviceState *dev, Property *prop, char *dest, size_t len)
+{
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (*ptr) {
+ int id;
+ if (!net_hub_id_for_client(*ptr, &id)) {
+ return snprintf(dest, len, "%d", id);
+ }
+ }
+
+ return snprintf(dest, len, "<null>");
+}
+
+static void get_vlan(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+ int32_t id = -1;
+
+ if (*ptr) {
+ int hub_id;
+ if (!net_hub_id_for_client(*ptr, &hub_id)) {
+ id = hub_id;
+ }
+ }
+
+ visit_type_int32(v, &id, name, errp);
+}
+
+static void set_vlan(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+ Error *local_err = NULL;
+ int32_t id;
+ NetClientState *hubport;
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_int32(v, &id, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (id == -1) {
+ *ptr = NULL;
+ return;
+ }
+
+ hubport = net_hub_port_find(id);
+ if (!hubport) {
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE,
+ name, prop->info->name);
+ return;
+ }
+ *ptr = hubport;
+}
+
+PropertyInfo qdev_prop_vlan = {
+ .name = "vlan",
+ .print = print_vlan,
+ .get = get_vlan,
+ .set = set_vlan,
+};
+
+int qdev_prop_set_drive(DeviceState *dev, const char *name,
+ BlockDriverState *value)
+{
+ Error *errp = NULL;
+ const char *bdrv_name = value ? bdrv_get_device_name(value) : "";
+ object_property_set_str(OBJECT(dev), bdrv_name,
+ name, &errp);
+ if (errp) {
+ qerror_report_err(errp);
+ error_free(errp);
+ return -1;
+ }
+ return 0;
+}
+
+void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name,
+ BlockDriverState *value)
+{
+ if (qdev_prop_set_drive(dev, name, value) < 0) {
+ exit(1);
+ }
+}
+void qdev_prop_set_chr(DeviceState *dev, const char *name,
+ CharDriverState *value)
+{
+ Error *errp = NULL;
+ assert(!value || value->label);
+ object_property_set_str(OBJECT(dev),
+ value ? value->label : "", name, &errp);
+ assert_no_error(errp);
+}
+
+void qdev_prop_set_netdev(DeviceState *dev, const char *name,
+ NetClientState *value)
+{
+ Error *errp = NULL;
+ assert(!value || value->name);
+ object_property_set_str(OBJECT(dev),
+ value ? value->name : "", name, &errp);
+ assert_no_error(errp);
+}
+
+void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd)
+{
+ qdev_prop_set_macaddr(dev, "mac", nd->macaddr.a);
+ if (nd->netdev) {
+ qdev_prop_set_netdev(dev, "netdev", nd->netdev);
+ }
+ if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED &&
+ object_property_find(OBJECT(dev), "vectors", NULL)) {
+ qdev_prop_set_uint32(dev, "vectors", nd->nvectors);
+ }
+ nd->instantiated = 1;
+}
+
+static int qdev_add_one_global(QemuOpts *opts, void *opaque)
+{
+ GlobalProperty *g;
+
+ g = g_malloc0(sizeof(*g));
+ g->driver = qemu_opt_get(opts, "driver");
+ g->property = qemu_opt_get(opts, "property");
+ g->value = qemu_opt_get(opts, "value");
+ qdev_prop_register_global(g);
+ return 0;
+}
+
+void qemu_add_globals(void)
+{
+ qemu_opts_foreach(qemu_find_opts("global"), qdev_add_one_global, NULL, 0);
+}
inherit from a particular bus (e.g. PCI or I2C) rather than
this API directly. */
-#include "net/net.h"
#include "qdev.h"
- #include "sysemu.h"
- #include "error.h"
- #include "qapi/qapi-visit-core.h"
+ #include "sysemu/sysemu.h"
+ #include "qapi/error.h"
+ #include "qapi/visitor.h"
int qdev_hotplug = 0;
static bool qdev_hot_added = false;
--- /dev/null
+ #ifndef CPU_COMMON_H
+ #define CPU_COMMON_H 1
+
+ /* CPU interfaces that are target independent. */
+
+ #include "exec/hwaddr.h"
+
+ #ifndef NEED_CPU_H
+ #include "exec/poison.h"
+ #endif
+
+ #include "qemu/bswap.h"
+ #include "qemu/queue.h"
+
++/**
++ * CPUListState:
++ * @cpu_fprintf: Print function.
++ * @file: File to print to using @cpu_fprint.
++ *
++ * State commonly used for iterating over CPU models.
++ */
++typedef struct CPUListState {
++ fprintf_function cpu_fprintf;
++ FILE *file;
++} CPUListState;
++
+ #if !defined(CONFIG_USER_ONLY)
+
+ enum device_endian {
+ DEVICE_NATIVE_ENDIAN,
+ DEVICE_BIG_ENDIAN,
+ DEVICE_LITTLE_ENDIAN,
+ };
+
+ /* address in the RAM (different from a physical address) */
+ #if defined(CONFIG_XEN_BACKEND)
+ typedef uint64_t ram_addr_t;
+ # define RAM_ADDR_MAX UINT64_MAX
+ # define RAM_ADDR_FMT "%" PRIx64
+ #else
+ typedef uintptr_t ram_addr_t;
+ # define RAM_ADDR_MAX UINTPTR_MAX
+ # define RAM_ADDR_FMT "%" PRIxPTR
+ #endif
+
+ /* memory API */
+
+ typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
+ typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
+
+ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+ /* This should only be used for ram local to a device. */
+ void *qemu_get_ram_ptr(ram_addr_t addr);
+ void qemu_put_ram_ptr(void *addr);
+ /* This should not be used by devices. */
+ int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+ ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
+ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
+
+ void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
+ int len, int is_write);
+ static inline void cpu_physical_memory_read(hwaddr addr,
+ void *buf, int len)
+ {
+ cpu_physical_memory_rw(addr, buf, len, 0);
+ }
+ static inline void cpu_physical_memory_write(hwaddr addr,
+ const void *buf, int len)
+ {
+ cpu_physical_memory_rw(addr, (void *)buf, len, 1);
+ }
+ void *cpu_physical_memory_map(hwaddr addr,
+ hwaddr *plen,
+ int is_write);
+ void cpu_physical_memory_unmap(void *buffer, hwaddr len,
+ int is_write, hwaddr access_len);
+ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
+
+ bool cpu_physical_memory_is_io(hwaddr phys_addr);
+
+ /* Coalesced MMIO regions are areas where write operations can be reordered.
+ * This usually implies that write operations are side-effect free. This allows
+ * batching which can make a major impact on performance when using
+ * virtualization.
+ */
+ void qemu_flush_coalesced_mmio_buffer(void);
+
+ uint32_t ldub_phys(hwaddr addr);
+ uint32_t lduw_le_phys(hwaddr addr);
+ uint32_t lduw_be_phys(hwaddr addr);
+ uint32_t ldl_le_phys(hwaddr addr);
+ uint32_t ldl_be_phys(hwaddr addr);
+ uint64_t ldq_le_phys(hwaddr addr);
+ uint64_t ldq_be_phys(hwaddr addr);
+ void stb_phys(hwaddr addr, uint32_t val);
+ void stw_le_phys(hwaddr addr, uint32_t val);
+ void stw_be_phys(hwaddr addr, uint32_t val);
+ void stl_le_phys(hwaddr addr, uint32_t val);
+ void stl_be_phys(hwaddr addr, uint32_t val);
+ void stq_le_phys(hwaddr addr, uint64_t val);
+ void stq_be_phys(hwaddr addr, uint64_t val);
+
+ #ifdef NEED_CPU_H
+ uint32_t lduw_phys(hwaddr addr);
+ uint32_t ldl_phys(hwaddr addr);
+ uint64_t ldq_phys(hwaddr addr);
+ void stl_phys_notdirty(hwaddr addr, uint32_t val);
+ void stq_phys_notdirty(hwaddr addr, uint64_t val);
+ void stw_phys(hwaddr addr, uint32_t val);
+ void stl_phys(hwaddr addr, uint32_t val);
+ void stq_phys(hwaddr addr, uint64_t val);
+ #endif
+
+ void cpu_physical_memory_write_rom(hwaddr addr,
+ const uint8_t *buf, int len);
+
+ extern struct MemoryRegion io_mem_ram;
+ extern struct MemoryRegion io_mem_rom;
+ extern struct MemoryRegion io_mem_unassigned;
+ extern struct MemoryRegion io_mem_notdirty;
+
+ #endif
+
+ #endif /* !CPU_COMMON_H */
--- /dev/null
-struct kvm_run;
-struct KVMState;
+ /*
+ * common defines for all CPUs
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+ #ifndef CPU_DEFS_H
+ #define CPU_DEFS_H
+
+ #ifndef NEED_CPU_H
+ #error cpu.h included from common code
+ #endif
+
+ #include "config.h"
+ #include <setjmp.h>
+ #include <inttypes.h>
+ #include <signal.h>
+ #include "qemu/osdep.h"
+ #include "qemu/queue.h"
+ #include "exec/hwaddr.h"
+
+ #ifndef TARGET_LONG_BITS
+ #error TARGET_LONG_BITS must be defined before including this header
+ #endif
+
+ #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
+
+ typedef int16_t target_short __attribute__ ((aligned(TARGET_SHORT_ALIGNMENT)));
+ typedef uint16_t target_ushort __attribute__((aligned(TARGET_SHORT_ALIGNMENT)));
+ typedef int32_t target_int __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+ typedef uint32_t target_uint __attribute__((aligned(TARGET_INT_ALIGNMENT)));
+ typedef int64_t target_llong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+ typedef uint64_t target_ullong __attribute__((aligned(TARGET_LLONG_ALIGNMENT)));
+ /* target_ulong is the type of a virtual address */
+ #if TARGET_LONG_SIZE == 4
+ typedef int32_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+ typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+ #define TARGET_FMT_lx "%08x"
+ #define TARGET_FMT_ld "%d"
+ #define TARGET_FMT_lu "%u"
+ #elif TARGET_LONG_SIZE == 8
+ typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+ typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT)));
+ #define TARGET_FMT_lx "%016" PRIx64
+ #define TARGET_FMT_ld "%" PRId64
+ #define TARGET_FMT_lu "%" PRIu64
+ #else
+ #error TARGET_LONG_SIZE undefined
+ #endif
+
+ #define EXCP_INTERRUPT 0x10000 /* async interruption */
+ #define EXCP_HLT 0x10001 /* hlt instruction reached */
+ #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
+ #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
+
+ #define TB_JMP_CACHE_BITS 12
+ #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
+
+ /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
+ addresses on the same page. The top bits are the same. This allows
+ TLB invalidation to quickly clear a subset of the hash table. */
+ #define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
+ #define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
+ #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
+ #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
+
+ #if !defined(CONFIG_USER_ONLY)
+ #define CPU_TLB_BITS 8
+ #define CPU_TLB_SIZE (1 << CPU_TLB_BITS)
+
+ #if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
+ #define CPU_TLB_ENTRY_BITS 4
+ #else
+ #define CPU_TLB_ENTRY_BITS 5
+ #endif
+
+ typedef struct CPUTLBEntry {
+ /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
+ bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
+ go directly to ram.
+ bit 3 : indicates that the entry is invalid
+ bit 2..0 : zero
+ */
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ /* padding to get a power of two size */
+ uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
+ (sizeof(target_ulong) * 3 +
+ ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
+ sizeof(uintptr_t))];
+ } CPUTLBEntry;
+
+ extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1];
+
+ #define CPU_COMMON_TLB \
+ /* The meaning of the MMU modes is defined in the target code. */ \
+ CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ hwaddr iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ target_ulong tlb_flush_addr; \
+ target_ulong tlb_flush_mask;
+
+ #else
+
+ #define CPU_COMMON_TLB
+
+ #endif
+
+
+ #ifdef HOST_WORDS_BIGENDIAN
+ typedef struct icount_decr_u16 {
+ uint16_t high;
+ uint16_t low;
+ } icount_decr_u16;
+ #else
+ typedef struct icount_decr_u16 {
+ uint16_t low;
+ uint16_t high;
+ } icount_decr_u16;
+ #endif
+
- const char *cpu_model_str; \
- struct KVMState *kvm_state; \
- struct kvm_run *kvm_run; \
- int kvm_fd; \
- int kvm_vcpu_dirty;
+ struct qemu_work_item;
+
+ typedef struct CPUBreakpoint {
+ target_ulong pc;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUBreakpoint) entry;
+ } CPUBreakpoint;
+
+ typedef struct CPUWatchpoint {
+ target_ulong vaddr;
+ target_ulong len_mask;
+ int flags; /* BP_* */
+ QTAILQ_ENTRY(CPUWatchpoint) entry;
+ } CPUWatchpoint;
+
+ #define CPU_TEMP_BUF_NLONGS 128
+ #define CPU_COMMON \
+ struct TranslationBlock *current_tb; /* currently executing TB */ \
+ /* soft mmu support */ \
+ /* in order to avoid passing too many arguments to the MMIO \
+ helpers, we store some rarely used information in the CPU \
+ context) */ \
+ uintptr_t mem_io_pc; /* host pc at which the memory was \
+ accessed */ \
+ target_ulong mem_io_vaddr; /* target virtual addr at which the \
+ memory was accessed */ \
+ uint32_t halted; /* Nonzero if the CPU is in suspend state */ \
+ uint32_t interrupt_request; \
+ volatile sig_atomic_t exit_request; \
+ CPU_COMMON_TLB \
+ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \
+ /* buffer for temporaries in the code generator */ \
+ long temp_buf[CPU_TEMP_BUF_NLONGS]; \
+ \
+ int64_t icount_extra; /* Instructions until next timer event. */ \
+ /* Number of cycles left, with interrupt flag in high bit. \
+ This allows a single read-compare-cbranch-write sequence to test \
+ for both decrementer underflow and exceptions. */ \
+ union { \
+ uint32_t u32; \
+ icount_decr_u16 u16; \
+ } icount_decr; \
+ uint32_t can_do_io; /* nonzero if memory mapped IO is safe. */ \
+ \
+ /* from this point: preserved by CPU reset */ \
+ /* ice debug support */ \
+ QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints; \
+ int singlestep_enabled; \
+ \
+ QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints; \
+ CPUWatchpoint *watchpoint_hit; \
+ \
+ struct GDBRegisterState *gdb_regs; \
+ \
+ /* Core interrupt code */ \
+ jmp_buf jmp_env; \
+ int exception_index; \
+ \
+ CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
+ int cpu_index; /* CPU index (informative) */ \
+ uint32_t host_tid; /* host thread ID */ \
+ int numa_node; /* NUMA node this cpu is belonging to */ \
+ int nr_cores; /* number of cores within this CPU package */ \
+ int nr_threads;/* number of threads within this CPU */ \
+ int running; /* Nonzero if cpu is currently running(usermode). */ \
+ /* user data */ \
+ void *opaque; \
+ \
++ const char *cpu_model_str;
+
+ #endif
--- /dev/null
+ /*
+ * QEMU CPU model
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+ #ifndef QEMU_CPU_H
+ #define QEMU_CPU_H
+
+ #include "qom/object.h"
+ #include "qemu/thread.h"
+
+ /**
+ * SECTION:cpu
+ * @section_id: QEMU-cpu
+ * @title: CPU Class
+ * @short_description: Base class for all CPUs
+ */
+
+ #define TYPE_CPU "cpu"
+
+ #define CPU(obj) OBJECT_CHECK(CPUState, (obj), TYPE_CPU)
+ #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
+ #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
+
+ typedef struct CPUState CPUState;
+
+ /**
+ * CPUClass:
+ * @reset: Callback to reset the #CPUState to its initial state.
+ *
+ * Represents a CPU family or model.
+ */
+ typedef struct CPUClass {
+ /*< private >*/
+ ObjectClass parent_class;
+ /*< public >*/
+
+ void (*reset)(CPUState *cpu);
+ } CPUClass;
+
++struct KVMState;
++struct kvm_run;
++
+ /**
+ * CPUState:
+ * @created: Indicates whether the CPU thread has been successfully created.
+ * @stop: Indicates a pending stop request.
+ * @stopped: Indicates the CPU has been artificially stopped.
++ * @kvm_fd: vCPU file descriptor for KVM.
+ *
+ * State of one CPU core or thread.
+ */
+ struct CPUState {
+ /*< private >*/
+ Object parent_obj;
+ /*< public >*/
+
+ struct QemuThread *thread;
+ #ifdef _WIN32
+ HANDLE hThread;
+ #endif
+ int thread_id;
+ struct QemuCond *halt_cond;
+ struct qemu_work_item *queued_work_first, *queued_work_last;
+ bool thread_kicked;
+ bool created;
+ bool stop;
+ bool stopped;
+
++#if !defined(CONFIG_USER_ONLY)
++ int kvm_fd;
++ bool kvm_vcpu_dirty;
++#endif
++ struct KVMState *kvm_state;
++ struct kvm_run *kvm_run;
++
+ /* TODO Move common fields from CPUArchState here. */
+ };
+
+
+ /**
+ * cpu_reset:
+ * @cpu: The CPU whose state is to be reset.
+ */
+ void cpu_reset(CPUState *cpu);
+
+ /**
+ * qemu_cpu_has_work:
+ * @cpu: The vCPU to check.
+ *
+ * Checks whether the CPU has work to do.
+ *
+ * Returns: %true if the CPU has work, %false otherwise.
+ */
+ bool qemu_cpu_has_work(CPUState *cpu);
+
+ /**
+ * qemu_cpu_is_self:
+ * @cpu: The vCPU to check against.
+ *
+ * Checks whether the caller is executing on the vCPU thread.
+ *
+ * Returns: %true if called from @cpu's thread, %false otherwise.
+ */
+ bool qemu_cpu_is_self(CPUState *cpu);
+
+ /**
+ * qemu_cpu_kick:
+ * @cpu: The vCPU to kick.
+ *
+ * Kicks @cpu's thread.
+ */
+ void qemu_cpu_kick(CPUState *cpu);
+
+ /**
+ * cpu_is_stopped:
+ * @cpu: The CPU to check.
+ *
+ * Checks whether the CPU is stopped.
+ *
+ * Returns: %true if run state is not running or if artificially stopped;
+ * %false otherwise.
+ */
+ bool cpu_is_stopped(CPUState *cpu);
+
+ /**
+ * run_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu.
+ */
+ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
+
+
+ #endif
--- /dev/null
-int kvm_vcpu_ioctl(CPUArchState *env, int type, ...);
+ /*
+ * QEMU KVM support
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+ #ifndef QEMU_KVM_H
+ #define QEMU_KVM_H
+
+ #include <errno.h>
+ #include "config-host.h"
+ #include "qemu/queue.h"
+
+ #ifdef CONFIG_KVM
+ #include <linux/kvm.h>
+ #include <linux/kvm_para.h>
+ #endif
+
+ extern int kvm_allowed;
+ extern bool kvm_kernel_irqchip;
+ extern bool kvm_async_interrupts_allowed;
+ extern bool kvm_irqfds_allowed;
+ extern bool kvm_msi_via_irqfd_allowed;
+ extern bool kvm_gsi_routing_allowed;
+
+ #if defined CONFIG_KVM || !defined NEED_CPU_H
+ #define kvm_enabled() (kvm_allowed)
+ /**
+ * kvm_irqchip_in_kernel:
+ *
+ * Returns: true if the user asked us to create an in-kernel
+ * irqchip via the "kernel_irqchip=on" machine option.
+ * What this actually means is architecture and machine model
+ * specific: on PC, for instance, it means that the LAPIC,
+ * IOAPIC and PIT are all in kernel. This function should never
+ * be used from generic target-independent code: use one of the
+ * following functions or some other specific check instead.
+ */
+ #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
+
+ /**
+ * kvm_async_interrupts_enabled:
+ *
+ * Returns: true if we can deliver interrupts to KVM
+ * asynchronously (ie by ioctl from any thread at any time)
+ * rather than having to do interrupt delivery synchronously
+ * (where the vcpu must be stopped at a suitable point first).
+ */
+ #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
+
+ /**
+ * kvm_irqfds_enabled:
+ *
+ * Returns: true if we can use irqfds to inject interrupts into
+ * a KVM CPU (ie the kernel supports irqfds and we are running
+ * with a configuration where it is meaningful to use them).
+ */
+ #define kvm_irqfds_enabled() (kvm_irqfds_allowed)
+
+ /**
+ * kvm_msi_via_irqfd_enabled:
+ *
+ * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
+ * to a KVM CPU via an irqfd. This requires that the kernel supports
+ * this and that we're running in a configuration that permits it.
+ */
+ #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
+
+ /**
+ * kvm_gsi_routing_enabled:
+ *
+ * Returns: true if GSI routing is enabled (ie the kernel supports
+ * it and we're running in a configuration that permits it).
+ */
+ #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
+
+ #else
+ #define kvm_enabled() (0)
+ #define kvm_irqchip_in_kernel() (false)
+ #define kvm_async_interrupts_enabled() (false)
+ #define kvm_irqfds_enabled() (false)
+ #define kvm_msi_via_irqfd_enabled() (false)
+ #define kvm_gsi_routing_allowed() (false)
+ #endif
+
+ struct kvm_run;
+ struct kvm_lapic_state;
+
+ typedef struct KVMCapabilityInfo {
+ const char *name;
+ int value;
+ } KVMCapabilityInfo;
+
+ #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
+ #define KVM_CAP_LAST_INFO { NULL, 0 }
+
+ struct KVMState;
+ typedef struct KVMState KVMState;
+ extern KVMState *kvm_state;
+
+ /* external API */
+
+ int kvm_init(void);
+
+ int kvm_has_sync_mmu(void);
+ int kvm_has_vcpu_events(void);
+ int kvm_has_robust_singlestep(void);
+ int kvm_has_debugregs(void);
+ int kvm_has_xsave(void);
+ int kvm_has_xcrs(void);
+ int kvm_has_pit_state2(void);
+ int kvm_has_many_ioeventfds(void);
+ int kvm_has_gsi_routing(void);
+ int kvm_has_intx_set_mask(void);
+
+ #ifdef NEED_CPU_H
+ int kvm_init_vcpu(CPUArchState *env);
+
+ int kvm_cpu_exec(CPUArchState *env);
+
+ #if !defined(CONFIG_USER_ONLY)
+ void *kvm_vmalloc(ram_addr_t size);
+ void *kvm_arch_vmalloc(ram_addr_t size);
+ void kvm_setup_guest_memory(void *start, size_t size);
+
+ void kvm_flush_coalesced_mmio_buffer(void);
+ #endif
+
+ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
+ target_ulong len, int type);
+ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
+ target_ulong len, int type);
+ void kvm_remove_all_breakpoints(CPUArchState *current_env);
+ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap);
+ #ifndef _WIN32
+ int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset);
+ #endif
+
+ int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
+ int kvm_on_sigbus(int code, void *addr);
+
+ /* internal API */
+
+ int kvm_ioctl(KVMState *s, int type, ...);
+
+ int kvm_vm_ioctl(KVMState *s, int type, ...);
+
-void kvm_arch_pre_run(CPUArchState *env, struct kvm_run *run);
-void kvm_arch_post_run(CPUArchState *env, struct kvm_run *run);
++int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
+
+ /* Arch specific hooks */
+
+ extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
+
-int kvm_arch_handle_exit(CPUArchState *env, struct kvm_run *run);
++void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
++void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
+
-int kvm_arch_process_async_events(CPUArchState *env);
++int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
+
-int kvm_arch_get_registers(CPUArchState *env);
++int kvm_arch_process_async_events(CPUState *cpu);
+
-int kvm_arch_put_registers(CPUArchState *env, int level);
++int kvm_arch_get_registers(CPUState *cpu);
+
+ /* state subset only touched by the VCPU itself during runtime */
+ #define KVM_PUT_RUNTIME_STATE 1
+ /* state subset modified during VCPU reset */
+ #define KVM_PUT_RESET_STATE 2
+ /* full state set, modified during initialization or on vmload */
+ #define KVM_PUT_FULL_STATE 3
+
-int kvm_arch_init_vcpu(CPUArchState *env);
++int kvm_arch_put_registers(CPUState *cpu, int level);
+
+ int kvm_arch_init(KVMState *s);
+
-void kvm_arch_reset_vcpu(CPUArchState *env);
++int kvm_arch_init_vcpu(CPUState *cpu);
+
-int kvm_arch_on_sigbus_vcpu(CPUArchState *env, int code, void *addr);
++void kvm_arch_reset_vcpu(CPUState *cpu);
+
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
++int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
+ int kvm_arch_on_sigbus(int code, void *addr);
+
+ void kvm_arch_init_irq_routing(KVMState *s);
+
+ int kvm_set_irq(KVMState *s, int irq, int level);
+ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
+
+ void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
+
+ void kvm_put_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
+ void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic);
+
+ struct kvm_guest_debug;
+ struct kvm_debug_exit_arch;
+
+ struct kvm_sw_breakpoint {
+ target_ulong pc;
+ target_ulong saved_insn;
+ int use_count;
+ QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
+ };
+
+ QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
+
-int kvm_sw_breakpoints_active(CPUArchState *env);
++struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
+ target_ulong pc);
+
-int kvm_arch_insert_sw_breakpoint(CPUArchState *current_env,
++int kvm_sw_breakpoints_active(CPUState *cpu);
+
-int kvm_arch_remove_sw_breakpoint(CPUArchState *current_env,
++int kvm_arch_insert_sw_breakpoint(CPUState *current_cpu,
+ struct kvm_sw_breakpoint *bp);
-void kvm_arch_update_guest_debug(CPUArchState *env, struct kvm_guest_debug *dbg);
++int kvm_arch_remove_sw_breakpoint(CPUState *current_cpu,
+ struct kvm_sw_breakpoint *bp);
+ int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type);
+ int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type);
+ void kvm_arch_remove_all_hw_breakpoints(void);
+
-bool kvm_arch_stop_on_emulation_error(CPUArchState *env);
++void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
+
++bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
+
+ int kvm_check_extension(KVMState *s, unsigned int extension);
+
+ uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
+ uint32_t index, int reg);
+ void kvm_cpu_synchronize_state(CPUArchState *env);
+ void kvm_cpu_synchronize_post_reset(CPUArchState *env);
+ void kvm_cpu_synchronize_post_init(CPUArchState *env);
+
+ /* generic hooks - to be moved/refactored once there are more users */
+
+ static inline void cpu_synchronize_state(CPUArchState *env)
+ {
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_state(env);
+ }
+ }
+
+ static inline void cpu_synchronize_post_reset(CPUArchState *env)
+ {
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_post_reset(env);
+ }
+ }
+
+ static inline void cpu_synchronize_post_init(CPUArchState *env)
+ {
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_post_init(env);
+ }
+ }
+
+
+ #if !defined(CONFIG_USER_ONLY)
+ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
+ hwaddr *phys_addr);
+ #endif
+
+ #endif
+ int kvm_set_ioeventfd_mmio(int fd, uint32_t adr, uint32_t val, bool assign,
+ uint32_t size);
+
+ int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign);
+
+ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg);
+ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg);
+ void kvm_irqchip_release_virq(KVMState *s, int virq);
+
+ int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
+ int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq);
+ void kvm_pc_gsi_handler(void *opaque, int n, int level);
+ void kvm_pc_setup_irq_routing(bool pci_enabled);
+ #endif
#include "cpu.h"
#include "qemu-common.h"
- #include "error.h"
++#include "qapi/error.h"
+static void alpha_cpu_realize(Object *obj, Error **errp)
+{
+#ifndef CONFIG_USER_ONLY
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+
+ qemu_init_vcpu(&cpu->env);
+#endif
+}
+
+/* Sort alphabetically by type name. */
+static gint alpha_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void alpha_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ object_class_get_name(oc));
+}
+
+void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ALPHA_CPU, false);
+ list = g_slist_sort(list, alpha_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, alpha_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+/* Models */
+
+#define TYPE(model) model "-" TYPE_ALPHA_CPU
+
+typedef struct AlphaCPUAlias {
+ const char *alias;
+ const char *typename;
+} AlphaCPUAlias;
+
+static const AlphaCPUAlias alpha_cpu_aliases[] = {
+ { "21064", TYPE("ev4") },
+ { "21164", TYPE("ev5") },
+ { "21164a", TYPE("ev56") },
+ { "21164pc", TYPE("pca56") },
+ { "21264", TYPE("ev6") },
+ { "21264a", TYPE("ev67") },
+};
+
+static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc = NULL;
+ char *typename;
+ int i;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL) {
+ return oc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(alpha_cpu_aliases); i++) {
+ if (strcmp(cpu_model, alpha_cpu_aliases[i].alias) == 0) {
+ oc = object_class_by_name(alpha_cpu_aliases[i].typename);
+ assert(oc != NULL);
+ return oc;
+ }
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_ALPHA_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+AlphaCPU *cpu_alpha_init(const char *cpu_model)
+{
+ AlphaCPU *cpu;
+ CPUAlphaState *env;
+ ObjectClass *cpu_class;
+
+ cpu_class = alpha_cpu_class_by_name(cpu_model);
+ if (cpu_class == NULL) {
+ /* Default to ev67; no reason not to emulate insns by default. */
+ cpu_class = object_class_by_name(TYPE("ev67"));
+ }
+ cpu = ALPHA_CPU(object_new(object_class_get_name(cpu_class)));
+ env = &cpu->env;
+
+ env->cpu_model_str = cpu_model;
+
+ alpha_cpu_realize(OBJECT(cpu), NULL);
+ return cpu;
+}
+
+static void ev4_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_2106x;
+}
+
+static const TypeInfo ev4_cpu_type_info = {
+ .name = TYPE("ev4"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev4_cpu_initfn,
+};
+
+static void ev5_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21164;
+}
+
+static const TypeInfo ev5_cpu_type_info = {
+ .name = TYPE("ev5"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev5_cpu_initfn,
+};
+
+static void ev56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_BWX;
+}
+
+static const TypeInfo ev56_cpu_type_info = {
+ .name = TYPE("ev56"),
+ .parent = TYPE("ev5"),
+ .instance_init = ev56_cpu_initfn,
+};
+
+static void pca56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_MVI;
+}
+
+static const TypeInfo pca56_cpu_type_info = {
+ .name = TYPE("pca56"),
+ .parent = TYPE("ev56"),
+ .instance_init = pca56_cpu_initfn,
+};
+
+static void ev6_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21264;
+ env->amask = AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP;
+}
+
+static const TypeInfo ev6_cpu_type_info = {
+ .name = TYPE("ev6"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev6_cpu_initfn,
+};
+
+static void ev67_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_CIX | AMASK_PREFETCH;
+}
+
+static const TypeInfo ev67_cpu_type_info = {
+ .name = TYPE("ev67"),
+ .parent = TYPE("ev6"),
+ .instance_init = ev67_cpu_initfn,
+};
+
+static const TypeInfo ev68_cpu_type_info = {
+ .name = TYPE("ev68"),
+ .parent = TYPE("ev67"),
+};
+
static void alpha_cpu_initfn(Object *obj)
{
AlphaCPU *cpu = ALPHA_CPU(obj);
/* register names */
static char cpu_reg_names[10*4+21*5 + 10*5+21*6];
- #include "gen-icount.h"
+ #include "exec/gen-icount.h"
-static void alpha_translate_init(void)
+void alpha_translate_init(void)
{
int i;
char *p;