#endif
-CPUArchState *first_cpu;
+CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
-DEFINE_TLS(CPUArchState *,cpu_single_env);
+DEFINE_TLS(CPUState *, current_cpu);
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
uint16_t ptr : 15;
};
+typedef PhysPageEntry Node[L2_SIZE];
+
struct AddressSpaceDispatch {
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
PhysPageEntry phys_map;
+ Node *nodes;
+ MemoryRegionSection *sections;
AddressSpace *as;
};
#define PHYS_SECTION_ROM 2
#define PHYS_SECTION_WATCH 3
-typedef PhysPageEntry Node[L2_SIZE];
-
typedef struct PhysPageMap {
unsigned sections_nb;
unsigned sections_nb_alloc;
MemoryRegionSection *sections;
} PhysPageMap;
-static PhysPageMap cur_map;
+static PhysPageMap *prev_map;
static PhysPageMap next_map;
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
&& mr != &io_mem_watch;
}
-static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
+static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
bool resolve_subpage)
{
MemoryRegionSection *section;
subpage_t *subpage;
- section = phys_page_find(as->dispatch->phys_map, addr >> TARGET_PAGE_BITS,
- cur_map.nodes, cur_map.sections);
+ section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
+ d->nodes, d->sections);
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
- section = &cur_map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
+ section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
return section;
}
static MemoryRegionSection *
-address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
+address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
hwaddr *plen, bool resolve_subpage)
{
MemoryRegionSection *section;
Int128 diff;
- section = address_space_lookup_region(as, addr, resolve_subpage);
+ section = address_space_lookup_region(d, addr, resolve_subpage);
/* Compute offset within MemoryRegionSection */
addr -= section->offset_within_address_space;
hwaddr len = *plen;
for (;;) {
- section = address_space_translate_internal(as, addr, &addr, plen, true);
+ section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
mr = section->mr;
if (!mr->iommu_ops) {
hwaddr *plen)
{
MemoryRegionSection *section;
- section = address_space_translate_internal(as, addr, xlat, plen, false);
+ section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
assert(!section->mr->iommu_ops);
return section;
CPUState *qemu_get_cpu(int index)
{
- CPUArchState *env = first_cpu;
- CPUState *cpu = NULL;
+ CPUState *cpu = first_cpu;
- while (env) {
- cpu = ENV_GET_CPU(env);
+ while (cpu) {
if (cpu->cpu_index == index) {
break;
}
- env = env->next_cpu;
+ cpu = cpu->next_cpu;
}
- return env ? cpu : NULL;
+ return cpu;
}
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
{
- CPUArchState *env = first_cpu;
+ CPUState *cpu;
- while (env) {
- func(ENV_GET_CPU(env), data);
- env = env->next_cpu;
+ cpu = first_cpu;
+ while (cpu) {
+ func(cpu, data);
+ cpu = cpu->next_cpu;
}
}
{
CPUState *cpu = ENV_GET_CPU(env);
CPUClass *cc = CPU_GET_CLASS(cpu);
- CPUArchState **penv;
+ CPUState **pcpu;
int cpu_index;
#if defined(CONFIG_USER_ONLY)
cpu_list_lock();
#endif
- env->next_cpu = NULL;
- penv = &first_cpu;
+ cpu->next_cpu = NULL;
+ pcpu = &first_cpu;
cpu_index = 0;
- while (*penv != NULL) {
- penv = &(*penv)->next_cpu;
+ while (*pcpu != NULL) {
+ pcpu = &(*pcpu)->next_cpu;
cpu_index++;
}
cpu->cpu_index = cpu_index;
#ifndef CONFIG_USER_ONLY
cpu->thread_id = qemu_get_thread_id();
#endif
- *penv = env;
+ *pcpu = cpu;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
CPUArchState *cpu_copy(CPUArchState *env)
{
CPUArchState *new_env = cpu_init(env->cpu_model_str);
- CPUArchState *next_cpu = new_env->next_cpu;
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
CPUWatchpoint *wp;
memcpy(new_env, env, sizeof(CPUArchState));
- /* Preserve chaining. */
- new_env->next_cpu = next_cpu;
-
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
iotlb |= PHYS_SECTION_ROM;
}
} else {
- iotlb = section - cur_map.sections;
+ iotlb = section - address_space_memory.dispatch->sections;
iotlb += xlat;
}
}
}
-static void phys_sections_clear(PhysPageMap *map)
+static void phys_sections_free(PhysPageMap *map)
{
while (map->sections_nb > 0) {
MemoryRegionSection *section = &map->sections[--map->sections_nb];
}
g_free(map->sections);
g_free(map->nodes);
+ g_free(map);
}
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
- if (dirty_flags == 0xff)
- tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
+ if (dirty_flags == 0xff) {
+ CPUArchState *env = current_cpu->env_ptr;
+ tlb_set_dirty(env, env->mem_io_vaddr);
+ }
}
static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
/* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int len_mask, int flags)
{
- CPUArchState *env = cpu_single_env;
+ CPUArchState *env = current_cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
MemoryRegion *iotlb_to_region(hwaddr index)
{
- return cur_map.sections[index & ~TARGET_PAGE_MASK].mr;
+ return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
}
static void io_mem_init(void)
static void mem_commit(MemoryListener *listener)
{
AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
- AddressSpaceDispatch *d = as->dispatch;
+ AddressSpaceDispatch *cur = as->dispatch;
+ AddressSpaceDispatch *next = as->next_dispatch;
- /* cur_map will soon be switched to next_map, too. */
- as->dispatch = as->next_dispatch;
- g_free(d);
+ next->nodes = next_map.nodes;
+ next->sections = next_map.sections;
+
+ as->dispatch = next;
+ g_free(cur);
}
static void core_begin(MemoryListener *listener)
{
uint16_t n;
+ prev_map = g_new(PhysPageMap, 1);
+ *prev_map = next_map;
+
memset(&next_map, 0, sizeof(next_map));
n = dummy_section(&io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
*/
static void core_commit(MemoryListener *listener)
{
- PhysPageMap info = cur_map;
- cur_map = next_map;
- phys_sections_clear(&info);
+ phys_sections_free(prev_map);
}
static void tcg_commit(MemoryListener *listener)
{
- CPUArchState *env;
+ CPUState *cpu;
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
/* XXX: slow ! */
- for(env = first_cpu; env != NULL; env = env->next_cpu) {
+ for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
+ CPUArchState *env = cpu->env_ptr;
+
tlb_flush(env, 1);
}
}
if (is_write) {
if (!memory_access_is_direct(mr, is_write)) {
l = memory_access_size(mr, l, addr1);
- /* XXX: could force cpu_single_env to NULL to avoid
+ /* XXX: could force current_cpu to NULL to avoid
potential bugs */
if (l == 4) {
/* 32 bit write access */