#include "qemu/guest-random.h"
#include "qemu/units.h"
#include "qemu/selfmap.h"
+#include "qemu/lockable.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "target_signal.h"
#include "accel/tcg/debuginfo.h"
* has specified -R reserved_va, which would trigger an assert().
*/
if (reserved_va != 0 &&
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
error_report("Cannot allocate vsyscall page");
exit(EXIT_FAILURE);
}
page_set_flags(TARGET_VSYSCALL_PAGE,
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
+ TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK,
PAGE_EXEC | PAGE_VALID);
return true;
}
exit(EXIT_FAILURE);
}
- page_set_flags(commpage, commpage + qemu_host_page_size,
+ page_set_flags(commpage, commpage | ~qemu_host_page_mask,
PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
(*regs)[36] = tswapreg(env->lr);
(*regs)[37] = tswapreg(cpu_read_xer(env));
- for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
- ccr |= env->crf[i] << (32 - ((i + 1) * 4));
- }
+ ccr = ppc_get_cr(env);
(*regs)[38] = tswapreg(ccr);
}
exit(EXIT_FAILURE);
}
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
PAGE_READ | PAGE_EXEC | PAGE_VALID);
return true;
}
* and implement syscalls. Here, simply mark the page executable.
* Special case the entry points during translation (see do_page_zero).
*/
- page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK,
PAGE_EXEC | PAGE_VALID);
return true;
}
regs->windowstart = 1;
regs->areg[1] = infop->start_stack;
regs->pc = infop->entry;
+ if (info_is_fdpic(infop)) {
+ regs->areg[4] = infop->loadmap_addr;
+ regs->areg[5] = infop->interpreter_loadmap_addr;
+ if (infop->interpreter_loadmap_addr) {
+ regs->areg[6] = infop->interpreter_pt_dynamic_addr;
+ } else {
+ regs->areg[6] = infop->pt_dynamic_addr;
+ }
+ }
}
/* See linux kernel: arch/xtensa/include/asm/elf.h. */
/* Ensure that the bss page(s) are valid */
if ((page_get_flags(last_bss-1) & prot) != prot) {
- page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
+ page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss - 1,
+ prot | PAGE_VALID);
}
if (host_start < host_map_start) {
}
}
-#ifdef TARGET_ARM
+#if defined(TARGET_ARM)
static int elf_is_fdpic(struct elfhdr *exec)
{
return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
}
+#elif defined(TARGET_XTENSA)
+static int elf_is_fdpic(struct elfhdr *exec)
+{
+ return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC;
+}
#else
/* Default implementation, always false. */
static int elf_is_fdpic(struct elfhdr *exec)
if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
error_report("%s: requires more virtual address space "
"than the host can provide (0x%" PRIx64 ")",
- image_name, (uint64_t)guest_hiaddr - guest_base);
+ image_name, (uint64_t)guest_hiaddr + 1 - guest_base);
exit(EXIT_FAILURE);
}
#endif
/* Reserve the address space for the binary, or reserved_va. */
test = g2h_untagged(guest_loaddr);
- addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
+ addr = mmap(test, guest_hiaddr - guest_loaddr + 1, PROT_NONE, flags, -1, 0);
if (test != addr) {
pgb_fail_in_use(image_name);
}
qemu_log_mask(CPU_LOG_PAGE,
- "%s: base @ %p for " TARGET_ABI_FMT_ld " bytes\n",
- __func__, addr, guest_hiaddr - guest_loaddr);
+ "%s: base @ %p for %" PRIu64 " bytes\n",
+ __func__, addr, (uint64_t)guest_hiaddr - guest_loaddr + 1);
}
/**
if (hiaddr != orig_hiaddr) {
error_report("%s: requires virtual address space that the "
"host cannot provide (0x%" PRIx64 ")",
- image_name, (uint64_t)orig_hiaddr);
+ image_name, (uint64_t)orig_hiaddr + 1);
exit(EXIT_FAILURE);
}
* arithmetic wraps around.
*/
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
- hiaddr = (uintptr_t) 4 << 30;
+ hiaddr = UINT32_MAX;
} else {
offset = -(HI_COMMPAGE & -align);
}
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
}
- addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
+ addr = pgb_find_hole(loaddr, hiaddr - loaddr + 1, align, offset);
if (addr == -1) {
/*
* If HI_COMMPAGE, there *might* be a non-consecutive allocation
/* Reserve the memory on the host. */
assert(guest_base != 0);
test = g2h_untagged(0);
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
+ addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED || addr != test) {
error_report("Unable to reserve 0x%lx bytes of virtual address "
"space at %p (%s) for use as guest address space (check your "
"virtual memory ulimit setting, min_mmap_addr or reserve less "
- "using -R option)", reserved_va, test, strerror(errno));
+ "using -R option)", reserved_va + 1, test, strerror(errno));
exit(EXIT_FAILURE);
}
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
- __func__, addr, reserved_va);
+ __func__, addr, reserved_va + 1);
}
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
if (a < loaddr) {
loaddr = a;
}
- a = eppnt->p_vaddr + eppnt->p_memsz;
+ a = eppnt->p_vaddr + eppnt->p_memsz - 1;
if (a > hiaddr) {
hiaddr = a;
}
* In both cases, we will overwrite pages in this range with mappings
* from the executable.
*/
- load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
+ load_addr = target_mmap(loaddr, (size_t)hiaddr - loaddr + 1, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
(ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
-1, 0);
static int symfind(const void *s0, const void *s1)
{
- target_ulong addr = *(target_ulong *)s0;
struct elf_sym *sym = (struct elf_sym *)s1;
+ __typeof(sym->st_value) addr = *(uint64_t *)s0;
int result = 0;
+
if (addr < sym->st_value) {
result = -1;
} else if (addr >= sym->st_value + sym->st_size) {
return result;
}
-static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
+static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr)
{
#if ELF_CLASS == ELFCLASS32
struct elf_sym *syms = s->disas_symtab.elf32;
info->notes_size += note_size(&info->notes[i]);
/* read and fill status of all threads */
- cpu_list_lock();
- CPU_FOREACH(cpu) {
- if (cpu == thread_cpu) {
- continue;
+ WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
+ CPU_FOREACH(cpu) {
+ if (cpu == thread_cpu) {
+ continue;
+ }
+ fill_thread_info(info, cpu->env_ptr);
}
- fill_thread_info(info, cpu->env_ptr);
}
- cpu_list_unlock();
return (0);
}