#include "hw/hw.h"
#include "osdep.h"
#include "kvm.h"
+#include "qemu-timer.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#include <signal.h>
#endif
} PageDesc;
-typedef struct PhysPageDesc {
- /* offset in host memory of the page + io_index in the low bits */
- ram_addr_t phys_offset;
- ram_addr_t region_offset;
-} PhysPageDesc;
-
-/* In system mode we want L1_MAP to be based on physical addresses,
+/* In system mode we want L1_MAP to be based on ram offsets,
while in user mode we want it to be based on virtual addresses. */
#if !defined(CONFIG_USER_ONLY)
+#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
+# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
+#else
# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
+#endif
#else
# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
#endif
static void *l1_map[V_L1_SIZE];
#if !defined(CONFIG_USER_ONLY)
+typedef struct PhysPageDesc {
+ /* offset in host memory of the page + io_index in the low bits */
+ ram_addr_t phys_offset;
+ ram_addr_t region_offset;
+} PhysPageDesc;
+
/* This is a multi-level map on the physical address space.
The bottom level has pointers to PhysPageDesc. */
static void *l1_phys_map[P_L1_SIZE];
static int log_append = 0;
/* statistics */
+#if !defined(CONFIG_USER_ONLY)
static int tlb_flush_count;
+#endif
static int tb_flush_count;
static int tb_phys_invalidate_count;
#endif
}
-static PageDesc *page_find_alloc(target_ulong index, int alloc)
+static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
{
+ PageDesc *pd;
+ void **lp;
+ int i;
+
#if defined(CONFIG_USER_ONLY)
/* We can't use qemu_malloc because it may recurse into a locked mutex.
Neither can we record the new pages we reserve while allocating a
do { P = qemu_mallocz(SIZE); } while (0)
#endif
- PageDesc *pd;
- void **lp;
- int i;
-
/* Level 1. Always allocated. */
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
return pd + (index & (L2_SIZE - 1));
}
-static inline PageDesc *page_find(target_ulong index)
+static inline PageDesc *page_find(tb_page_addr_t index)
{
return page_find_alloc(index, 0);
}
}
if (level == 0) {
PageDesc *pd = *lp;
- for (i = 0; i < L2_BITS; ++i) {
+ for (i = 0; i < L2_SIZE; ++i) {
pd[i].first_tb = NULL;
invalidate_page_bitmap(pd + i);
}
} else {
void **pp = *lp;
- for (i = 0; i < L2_BITS; ++i) {
+ for (i = 0; i < L2_SIZE; ++i) {
page_flush_tb_1 (level - 1, pp + i);
}
}
tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
}
-void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
+void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
CPUState *env;
PageDesc *p;
unsigned int h, n1;
- target_phys_addr_t phys_pc;
+ tb_page_addr_t phys_pc;
TranslationBlock *tb1, *tb2;
/* remove the TB from the hash list */
{
TranslationBlock *tb;
uint8_t *tc_ptr;
- target_ulong phys_pc, phys_page2, virt_page2;
+ tb_page_addr_t phys_pc, phys_page2;
+ target_ulong virt_page2;
int code_gen_size;
- phys_pc = get_phys_addr_code(env, pc);
+ phys_pc = get_page_addr_code(env, pc);
tb = tb_alloc(pc);
if (!tb) {
/* flush must be done */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
- phys_page2 = get_phys_addr_code(env, virt_page2);
+ phys_page2 = get_page_addr_code(env, virt_page2);
}
- tb_link_phys(tb, phys_pc, phys_page2);
+ tb_link_page(tb, phys_pc, phys_page2);
return tb;
}
the same physical page. 'is_cpu_write_access' should be true if called
from a real cpu write access: the virtual CPU will exit the current
TB if code is modified inside this TB. */
-void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
+void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access)
{
TranslationBlock *tb, *tb_next, *saved_tb;
CPUState *env = cpu_single_env;
- target_ulong tb_start, tb_end;
+ tb_page_addr_t tb_start, tb_end;
PageDesc *p;
int n;
#ifdef TARGET_HAS_PRECISE_SMC
}
/* len must be <= 8 and start must be a multiple of len */
-static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
+static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{
PageDesc *p;
int offset, b;
}
#if !defined(CONFIG_SOFTMMU)
-static void tb_invalidate_phys_page(target_phys_addr_t addr,
+static void tb_invalidate_phys_page(tb_page_addr_t addr,
unsigned long pc, void *puc)
{
TranslationBlock *tb;
/* add the tb in the target page and protect it if necessary */
static inline void tb_alloc_page(TranslationBlock *tb,
- unsigned int n, target_ulong page_addr)
+ unsigned int n, tb_page_addr_t page_addr)
{
PageDesc *p;
TranslationBlock *last_first_tb;
continue;
prot |= p2->flags;
p2->flags &= ~PAGE_WRITE;
- page_get_flags(addr);
}
mprotect(g2h(page_addr), qemu_host_page_size,
(prot & PAGE_BITS) & ~PAGE_WRITE);
/* add a new TB and link it to the physical page tables. phys_page2 is
(-1) to indicate that only one page contains the TB. */
-void tb_link_phys(TranslationBlock *tb,
- target_ulong phys_pc, target_ulong phys_page2)
+void tb_link_page(TranslationBlock *tb,
+ tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
{
unsigned int h;
TranslationBlock **ptb;
}
if (level == 0) {
PhysPageDesc *pd = *lp;
- for (i = 0; i < L2_BITS; ++i) {
+ for (i = 0; i < L2_SIZE; ++i) {
if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
client->set_memory(client, pd[i].region_offset,
TARGET_PAGE_SIZE, pd[i].phys_offset);
}
} else {
void **pp = *lp;
- for (i = 0; i < L2_BITS; ++i) {
+ for (i = 0; i < L2_SIZE; ++i) {
phys_page_for_each_1(client, level - 1, pp + i);
}
}
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
+ env->tlb_flush_addr = -1;
+ env->tlb_flush_mask = 0;
tlb_flush_count++;
}
#if defined(DEBUG_TLB)
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif
+ /* Check if we need to flush due to large pages. */
+ if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+#if defined(DEBUG_TLB)
+ printf("tlb_flush_page: forced full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+#endif
+ tlb_flush(env, 1);
+ return;
+ }
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
env->current_tb = NULL;
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
}
-/* add a new TLB entry. At most one entry for a given virtual address
- is permitted. Return 0 if OK or 2 if the page could not be mapped
- (can only happen in non SOFTMMU mode for I/O pages or pages
- conflicting with the host address space). */
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
+/* Our TLB does not support large pages, so remember the area covered by
+ large pages and trigger a full TLB flush if these are invalidated. */
+static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
+ target_ulong size)
+{
+ target_ulong mask = ~(size - 1);
+
+ if (env->tlb_flush_addr == (target_ulong)-1) {
+ env->tlb_flush_addr = vaddr & mask;
+ env->tlb_flush_mask = mask;
+ return;
+ }
+ /* Extend the existing region to include the new page.
+ This is a compromise between unnecessary flushes and the cost
+ of maintaining a full variable size TLB. */
+ mask &= env->tlb_flush_mask;
+ while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
+ mask <<= 1;
+ }
+ env->tlb_flush_addr &= mask;
+ env->tlb_flush_mask = mask;
+}
+
+/* Add a new TLB entry. At most one entry for a given virtual address
+ is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
+ supplied size is only used by tlb_flush_page. */
+void tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int mmu_idx, target_ulong size)
{
PhysPageDesc *p;
unsigned long pd;
target_ulong address;
target_ulong code_address;
target_phys_addr_t addend;
- int ret;
CPUTLBEntry *te;
CPUWatchpoint *wp;
target_phys_addr_t iotlb;
+ assert(size >= TARGET_PAGE_SIZE);
+ if (size != TARGET_PAGE_SIZE) {
+ tlb_add_large_page(env, vaddr, size);
+ }
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
pd = IO_MEM_UNASSIGNED;
vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
#endif
- ret = 0;
address = vaddr;
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
/* IO memory case (romd handled later) */
} else {
te->addr_write = -1;
}
- return ret;
}
#else
};
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
- unsigned long end, int new_prot)
+ abi_ulong end, int new_prot)
{
if (data->start != -1ul) {
int rc = data->fn(data->priv, data->start, end, data->prot);
}
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
- unsigned long base, int level, void **lp)
+ abi_ulong base, int level, void **lp)
{
- unsigned long pa;
+ abi_ulong pa;
int i, rc;
if (*lp == NULL) {
if (level == 0) {
PageDesc *pd = *lp;
- for (i = 0; i < L2_BITS; ++i) {
+ for (i = 0; i < L2_SIZE; ++i) {
int prot = pd[i].flags;
pa = base | (i << TARGET_PAGE_BITS);
}
} else {
void **pp = *lp;
- for (i = 0; i < L2_BITS; ++i) {
- pa = base | (i << (TARGET_PAGE_BITS + L2_BITS * level));
+ for (i = 0; i < L2_SIZE; ++i) {
+ pa = base | ((abi_ulong)i <<
+ (TARGET_PAGE_BITS + L2_BITS * level));
rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
if (rc != 0) {
return rc;
data.prot = 0;
for (i = 0; i < V_L1_SIZE; i++) {
- int rc = walk_memory_regions_1(&data, i << V_L1_SHIFT,
+ int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
V_L1_SHIFT / L2_BITS - 1, l1_map + i);
if (rc != 0) {
return rc;
return walk_memory_regions_end(&data, 0, 0);
}
-static int dump_region(void *priv, unsigned long start,
- unsigned long end, unsigned long prot)
+static int dump_region(void *priv, abi_ulong start,
+ abi_ulong end, unsigned long prot)
{
FILE *f = (FILE *)priv;
- (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
+ (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
+ " "TARGET_ABI_FMT_lx" %c%c%c\n",
start, end, end - start,
((prot & PAGE_READ) ? 'r' : '-'),
((prot & PAGE_WRITE) ? 'w' : '-'),
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
-#if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
- assert(end < (1ul << L1_MAP_ADDR_SPACE_BITS));
+#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
+ assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
assert(start < end);
/* This function should never be called with addresses outside the
guest address space. If this assert fires, it probably indicates
a missing call to h2g_valid. */
-#if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
- assert(start < (1ul << L1_MAP_ADDR_SPACE_BITS));
+#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
+ assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
if (start + len - 1 < start) {
} while (ret != 0 && errno == EINTR);
if (ret != 0) {
- perror("statfs");
+ perror(path);
return 0;
}
fd = mkstemp(filename);
if (fd < 0) {
- perror("mkstemp");
+ perror("unable to create backing store for hugepages");
free(filename);
return NULL;
}
cpu_resume_from_signal(env, NULL);
}
+#if !defined(CONFIG_USER_ONLY)
+
void dump_exec_info(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
{
tcg_dump_info(f, cpu_fprintf);
}
-#if !defined(CONFIG_USER_ONLY)
-
#define MMUSUFFIX _cmmu
#define GETPC() NULL
#define env cpu_single_env