#include "qemu/plugin.h"
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
-#include "tcg/tcg.h"
+#include "gdbstub/user.h"
+#include "tcg/startup.h"
#include "qemu/timer.h"
#include "qemu/envlist.h"
#include "qemu/guest-random.h"
#include "signal-common.h"
#include "loader.h"
#include "user-mmap.h"
+#include "accel/tcg/perf.h"
+
+#ifdef CONFIG_SEMIHOSTING
+#include "semihosting/semihost.h"
+#endif
#ifndef AT_FLAGS_PRESERVE_ARGV0
#define AT_FLAGS_PRESERVE_ARGV0_BIT 0
#endif
char *exec_path;
+char real_exec_path[PATH_MAX];
-int singlestep;
+static bool opt_one_insn_per_tb;
static const char *argv0;
static const char *gdbstub;
static envlist_t *envlist;
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
-/* There are a number of places where we assign reserved_va to a variable
- of type abi_ulong and expect it to fit. Avoid the last page. */
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
# else
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
# endif
# else
# define MAX_RESERVED_VA(CPU) 0
static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
const char *qemu_uname_release;
+#if !defined(TARGET_DEFAULT_STACK_SIZE)
/* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
we allocate a bigger stack. Need a better solution, for example
by remapping the process stack directly at the right place */
-unsigned long guest_stack_size = 8 * 1024 * 1024UL;
+#define TARGET_DEFAULT_STACK_SIZE 8 * 1024 * 1024UL
+#endif
+
+unsigned long guest_stack_size = TARGET_DEFAULT_STACK_SIZE;
/***********************************************************/
/* Helper routines for implementing atomic operations. */
start_exclusive();
mmap_fork_start();
cpu_list_lock();
+ qemu_plugin_user_prefork_lock();
}
void fork_end(int child)
{
+ qemu_plugin_user_postfork(child);
mmap_fork_end(child);
if (child) {
CPUState *cpu, *next_cpu;
Discard information about the parent threads. */
CPU_FOREACH_SAFE(cpu, next_cpu) {
if (cpu != thread_cpu) {
- QTAILQ_REMOVE_RCU(&cpus, cpu, node);
+ QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node);
}
}
qemu_init_cpu_list();
gdbserver_fork(thread_cpu);
- /* qemu_init_cpu_list() takes care of reinitializing the
- * exclusive state, so we don't need to end_exclusive() here.
- */
} else {
cpu_list_unlock();
- end_exclusive();
}
+ /*
+ * qemu_init_cpu_list() reinitialized the child exclusive state, but we
+ * also need to keep current_cpu consistent, so call end_exclusive() for
+ * both child and parent.
+ */
+ end_exclusive();
}
__thread CPUState *thread_cpu;
{
CPUState *cpu = env_cpu(env);
CPUState *new_cpu = cpu_create(cpu_type);
- CPUArchState *new_env = new_cpu->env_ptr;
+ CPUArchState *new_env = cpu_env(new_cpu);
CPUBreakpoint *bp;
/* Reset non arch specific state */
new_cpu->tcg_cflags = cpu->tcg_cflags;
memcpy(new_env, env, sizeof(CPUArchState));
+#if defined(TARGET_I386) || defined(TARGET_X86_64)
+ new_env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ memcpy(g2h_untagged(new_env->gdt.base), g2h_untagged(env->gdt.base),
+ sizeof(uint64_t) * TARGET_GDT_ENTRIES);
+ OBJECT(new_cpu)->free = OBJECT(cpu)->free;
+#endif
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
{
cpu_model = strdup(arg);
if (cpu_model == NULL || is_help_option(cpu_model)) {
- /* XXX: implement xxx_cpu_list for targets that still miss it */
-#if defined(cpu_list)
- cpu_list();
-#endif
+ list_cpus();
exit(EXIT_FAILURE);
}
}
{
char *p;
int shift = 0;
- reserved_va = strtoul(arg, &p, 0);
+ unsigned long val;
+
+ val = strtoul(arg, &p, 0);
switch (*p) {
case 'k':
case 'K':
break;
}
if (shift) {
- unsigned long unshifted = reserved_va;
+ unsigned long unshifted = val;
p++;
- reserved_va <<= shift;
- if (reserved_va >> shift != unshifted) {
+ val <<= shift;
+ if (val >> shift != unshifted) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
exit(EXIT_FAILURE);
}
+ /* The representation is size - 1, with 0 remaining "default". */
+ reserved_va = val ? val - 1 : 0;
}
-static void handle_arg_singlestep(const char *arg)
+static void handle_arg_one_insn_per_tb(const char *arg)
{
- singlestep = 1;
+ opt_one_insn_per_tb = true;
}
static void handle_arg_strace(const char *arg)
}
#endif
+static void handle_arg_perfmap(const char *arg)
+{
+ perf_enable_perfmap();
+}
+
+static void handle_arg_jitdump(const char *arg)
+{
+ perf_enable_jitdump();
+}
+
static QemuPluginList plugins = QTAILQ_HEAD_INITIALIZER(plugins);
#ifdef CONFIG_PLUGIN
"logfile", "write logs to 'logfile' (default stderr)"},
{"p", "QEMU_PAGESIZE", true, handle_arg_pagesize,
"pagesize", "set the host page size to 'pagesize'"},
- {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep,
- "", "run in singlestep mode"},
+ {"one-insn-per-tb",
+ "QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb,
+ "", "run with one guest instruction per emulated TB"},
{"strace", "QEMU_STRACE", false, handle_arg_strace,
"", "log system calls"},
{"seed", "QEMU_RAND_SEED", true, handle_arg_seed,
{"xtensa-abi-call0", "QEMU_XTENSA_ABI_CALL0", false, handle_arg_abi_call0,
"", "assume CALL0 Xtensa ABI"},
#endif
+ {"perfmap", "QEMU_PERFMAP", false, handle_arg_perfmap,
+ "", "Generate a /tmp/perf-${pid}.map file for perf"},
+ {"jitdump", "QEMU_JITDUMP", false, handle_arg_jitdump,
+ "", "Generate a jit-${pid}.dump file for perf"},
{NULL, NULL, false, NULL, NULL, NULL}
};
envlist = envlist_create();
- /* add current environment into the list */
+ /*
+ * add current environment into the list
+ * envlist_setenv adds to the front of the list; to preserve environ
+ * order add from back to front
+ */
for (wrk = environ; *wrk != NULL; wrk++) {
+ continue;
+ }
+ while (wrk != environ) {
+ wrk--;
(void) envlist_setenv(envlist, *wrk);
}
struct rlimit lim;
if (getrlimit(RLIMIT_STACK, &lim) == 0
&& lim.rlim_cur != RLIM_INFINITY
- && lim.rlim_cur == (target_long)lim.rlim_cur) {
+ && lim.rlim_cur == (target_long)lim.rlim_cur
+ && lim.rlim_cur > guest_stack_size) {
guest_stack_size = lim.rlim_cur;
}
}
}
}
+ /* Resolve executable file name to full path name */
+ if (realpath(exec_path, real_exec_path)) {
+ exec_path = real_exec_path;
+ }
+
/*
* get binfmt_misc flags
*/
/* init tcg before creating CPUs and to get qemu_host_page_size */
{
- AccelClass *ac = ACCEL_GET_CLASS(current_accel());
+ AccelState *accel = current_accel();
+ AccelClass *ac = ACCEL_GET_CLASS(accel);
accel_init_interfaces(ac);
+ object_property_set_bool(OBJECT(accel), "one-insn-per-tb",
+ opt_one_insn_per_tb, &error_abort);
ac->init_machine(NULL);
}
cpu = cpu_create(cpu_type);
- env = cpu->env_ptr;
+ env = cpu_env(cpu);
cpu_reset(cpu);
thread_cpu = cpu;
*/
max_reserved_va = MAX_RESERVED_VA(cpu);
if (reserved_va != 0) {
+ if ((reserved_va + 1) % qemu_host_page_size) {
+ char *s = size_to_str(qemu_host_page_size);
+ fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
+ g_free(s);
+ exit(EXIT_FAILURE);
+ }
if (max_reserved_va && reserved_va > max_reserved_va) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
- /*
- * reserved_va must be aligned with the host page size
- * as it is used with mmap()
- */
- reserved_va = max_reserved_va & qemu_host_page_mask;
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
+ reserved_va = max_reserved_va;
+ }
+
+ /*
+ * Temporarily disable
+ * "comparison is always false due to limited range of data type"
+ * due to comparison between (possible) uint64_t and uintptr_t.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wtype-limits"
+
+ /*
+ * Select an initial value for task_unmapped_base that is in range.
+ */
+ if (reserved_va) {
+ if (TASK_UNMAPPED_BASE < reserved_va) {
+ task_unmapped_base = TASK_UNMAPPED_BASE;
+ } else {
+ /* The most common default formula is TASK_SIZE / 3. */
+ task_unmapped_base = TARGET_PAGE_ALIGN(reserved_va / 3);
+ }
+ } else if (TASK_UNMAPPED_BASE < UINTPTR_MAX) {
+ task_unmapped_base = TASK_UNMAPPED_BASE;
+ } else {
+ /* 32-bit host: pick something medium size. */
+ task_unmapped_base = 0x10000000;
+ }
+ mmap_next_start = task_unmapped_base;
+
+ /* Similarly for elf_et_dyn_base. */
+ if (reserved_va) {
+ if (ELF_ET_DYN_BASE < reserved_va) {
+ elf_et_dyn_base = ELF_ET_DYN_BASE;
+ } else {
+ /* The most common default formula is TASK_SIZE / 3 * 2. */
+ elf_et_dyn_base = TARGET_PAGE_ALIGN(reserved_va / 3) * 2;
+ }
+ } else if (ELF_ET_DYN_BASE < UINTPTR_MAX) {
+ elf_et_dyn_base = ELF_ET_DYN_BASE;
+ } else {
+ /* 32-bit host: pick something medium size. */
+ elf_et_dyn_base = 0x18000000;
}
+#pragma GCC diagnostic pop
+
{
Error *err = NULL;
if (seed_optarg != NULL) {
fprintf(f, "page layout changed following binary load\n");
page_dump(f);
- fprintf(f, "start_brk 0x" TARGET_ABI_FMT_lx "\n",
- info->start_brk);
fprintf(f, "end_code 0x" TARGET_ABI_FMT_lx "\n",
info->end_code);
fprintf(f, "start_code 0x" TARGET_ABI_FMT_lx "\n",
/* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
generating the prologue until now so that the prologue can take
the real value of GUEST_BASE into account. */
- tcg_prologue_init(tcg_ctx);
+ tcg_prologue_init();
target_cpu_copy_regs(env, regs);
}
gdb_handlesig(cpu, 0);
}
+
+#ifdef CONFIG_SEMIHOSTING
+ qemu_semihosting_guestfd_init();
+#endif
+
cpu_loop(env);
/* never exits */
return 0;