* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include <sys/shm.h>
#include "trace.h"
#include "exec/log.h"
#include "qemu.h"
#include "user-internals.h"
#include "user-mmap.h"
#include "target_mman.h"
+#include "qemu/interval-tree.h"
+
+#ifdef TARGET_ARM
+#include "target/arm/cpu-features.h"
+#endif
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
static __thread int mmap_lock_count;
}
}
+/* Protected by mmap_lock. */
+static IntervalTreeRoot shm_regions;
+
+static void shm_region_add(abi_ptr start, abi_ptr last)
+{
+ IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
+
+ i->start = start;
+ i->last = last;
+ interval_tree_insert(i, &shm_regions);
+}
+
+static abi_ptr shm_region_find(abi_ptr start)
+{
+ IntervalTreeNode *i;
+
+ for (i = interval_tree_iter_first(&shm_regions, start, start); i;
+ i = interval_tree_iter_next(i, start, start)) {
+ if (i->start == start) {
+ return i->last;
+ }
+ }
+ return 0;
+}
+
+static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
+{
+ IntervalTreeNode *i, *n;
+
+ for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
+ n = interval_tree_iter_next(i, start, last);
+ if (i->start >= start && i->last <= last) {
+ interval_tree_remove(i, &shm_regions);
+ g_free(i);
+ }
+ }
+}
+
/*
* Validate target prot bitmask.
* Return the prot bitmask for the host in *HOST_PROT.
void *p = mmap(host_start, qemu_host_page_size,
target_to_host_prot(prot),
flags | MAP_ANONYMOUS, -1, 0);
- if (p == MAP_FAILED) {
+ if (p != host_start) {
+ if (p != MAP_FAILED) {
+ munmap(p, qemu_host_page_size);
+ errno = EEXIST;
+ }
return false;
}
prot_old = prot;
return true;
}
-#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
-#ifdef TARGET_AARCH64
-# define TASK_UNMAPPED_BASE 0x5500000000
-#else
-# define TASK_UNMAPPED_BASE (1ul << 38)
-#endif
-#else
-#ifdef TARGET_HPPA
-# define TASK_UNMAPPED_BASE 0xfa000000
-#else
-# define TASK_UNMAPPED_BASE 0x40000000
-#endif
-#endif
-abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
-
-unsigned long last_brk;
+abi_ulong task_unmapped_base;
+abi_ulong elf_et_dyn_base;
+abi_ulong mmap_next_start;
/*
* Subroutine of mmap_find_vma, used when we have pre-allocated
if ((addr & (align - 1)) == 0) {
/* Success. */
- if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
+ if (start == mmap_next_start && addr >= task_unmapped_base) {
mmap_next_start = addr + size;
}
return addr;
/* map the middle (easier) */
if (real_start < real_last) {
- void *p;
+ void *p, *want_p;
off_t offset1;
+ size_t len1;
if (flags & MAP_ANONYMOUS) {
offset1 = 0;
} else {
offset1 = offset + real_start - start;
}
- p = mmap(g2h_untagged(real_start), real_last - real_start + 1,
- target_to_host_prot(target_prot), flags, fd, offset1);
- if (p == MAP_FAILED) {
+ len1 = real_last - real_start + 1;
+ want_p = g2h_untagged(real_start);
+
+ p = mmap(want_p, len1, target_to_host_prot(target_prot),
+ flags, fd, offset1);
+ if (p != want_p) {
+ if (p != MAP_FAILED) {
+ munmap(p, len1);
+ errno = EEXIST;
+ }
goto fail;
}
passthrough_start = real_start;
page_set_flags(passthrough_last + 1, last, page_flags);
}
}
+ shm_region_rm_complete(start, last);
the_end:
trace_target_mmap_complete(start);
if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
return -1;
}
-static void mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
+static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
{
abi_ulong real_start;
abi_ulong real_last;
prot |= page_get_flags(a + 1);
}
if (prot != 0) {
- return;
+ return 0;
}
} else {
for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
}
if (real_last < real_start) {
- return;
+ return 0;
}
}
void *ptr = mmap(host_start, real_len, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS
| MAP_PRIVATE | MAP_NORESERVE, -1, 0);
- assert(ptr == host_start);
- } else {
- int ret = munmap(host_start, real_len);
- assert(ret == 0);
+ return ptr == host_start ? 0 : -1;
}
+ return munmap(host_start, real_len);
}
int target_munmap(abi_ulong start, abi_ulong len)
{
+ int ret;
+
trace_target_munmap(start, len);
if (start & ~TARGET_PAGE_MASK) {
- return -TARGET_EINVAL;
+ errno = EINVAL;
+ return -1;
}
len = TARGET_PAGE_ALIGN(len);
if (len == 0 || !guest_range_valid_untagged(start, len)) {
- return -TARGET_EINVAL;
+ errno = EINVAL;
+ return -1;
}
mmap_lock();
- mmap_reserve_or_unmap(start, len);
- page_set_flags(start, start + len - 1, 0);
+ ret = mmap_reserve_or_unmap(start, len);
+ if (likely(ret == 0)) {
+ page_set_flags(start, start + len - 1, 0);
+ shm_region_rm_complete(start, start + len - 1);
+ }
mmap_unlock();
- return 0;
+ return ret;
}
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
}
}
} else {
- int prot = 0;
+ int page_flags = 0;
if (reserved_va && old_size < new_size) {
abi_ulong addr;
for (addr = old_addr + old_size;
addr < old_addr + new_size;
addr++) {
- prot |= page_get_flags(addr);
+ page_flags |= page_get_flags(addr);
}
}
- if (prot == 0) {
+ if (page_flags == 0) {
host_addr = mremap(g2h_untagged(old_addr),
old_size, new_size, flags);
new_addr = h2g(host_addr);
prot = page_get_flags(old_addr);
page_set_flags(old_addr, old_addr + old_size - 1, 0);
+ shm_region_rm_complete(old_addr, old_addr + old_size - 1);
page_set_flags(new_addr, new_addr + new_size - 1,
prot | PAGE_VALID | PAGE_RESET);
+ shm_region_rm_complete(new_addr, new_addr + new_size - 1);
}
mmap_unlock();
return new_addr;
return ret;
}
+
+#ifndef TARGET_FORCE_SHMLBA
+/*
+ * For most architectures, SHMLBA is the same as the page size;
+ * some architectures have larger values, in which case they should
+ * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
+ * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
+ * and defining its own value for SHMLBA.
+ *
+ * The kernel also permits SHMLBA to be set by the architecture to a
+ * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
+ * this means that addresses are rounded to the large size if
+ * SHM_RND is set but addresses not aligned to that size are not rejected
+ * as long as they are at least page-aligned. Since the only architecture
+ * which uses this is ia64 this code doesn't provide for that oddity.
+ */
+static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
+{
+ return TARGET_PAGE_SIZE;
+}
+#endif
+
+abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
+ abi_ulong shmaddr, int shmflg)
+{
+ CPUState *cpu = env_cpu(cpu_env);
+ abi_ulong raddr;
+ struct shmid_ds shm_info;
+ int ret;
+ abi_ulong shmlba;
+
+ /* shmat pointers are always untagged */
+
+ /* find out the length of the shared memory segment */
+ ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
+ if (is_error(ret)) {
+ /* can't get length, bail out */
+ return ret;
+ }
+
+ shmlba = target_shmlba(cpu_env);
+
+ if (shmaddr & (shmlba - 1)) {
+ if (shmflg & SHM_RND) {
+ shmaddr &= ~(shmlba - 1);
+ } else {
+ return -TARGET_EINVAL;
+ }
+ }
+ if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
+ return -TARGET_EINVAL;
+ }
+
+ WITH_MMAP_LOCK_GUARD() {
+ void *host_raddr;
+ abi_ulong last;
+
+ if (shmaddr) {
+ host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
+ } else {
+ abi_ulong mmap_start;
+
+ /* In order to use the host shmat, we need to honor host SHMLBA. */
+ mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
+ MAX(SHMLBA, shmlba));
+
+ if (mmap_start == -1) {
+ return -TARGET_ENOMEM;
+ }
+ host_raddr = shmat(shmid, g2h_untagged(mmap_start),
+ shmflg | SHM_REMAP);
+ }
+
+ if (host_raddr == (void *)-1) {
+ return get_errno(-1);
+ }
+ raddr = h2g(host_raddr);
+ last = raddr + shm_info.shm_segsz - 1;
+
+ page_set_flags(raddr, last,
+ PAGE_VALID | PAGE_RESET | PAGE_READ |
+ (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
+
+ shm_region_rm_complete(raddr, last);
+ shm_region_add(raddr, last);
+ }
+
+ /*
+ * We're mapping shared memory, so ensure we generate code for parallel
+ * execution and flush old translations. This will work up to the level
+ * supported by the host -- anything that requires EXCP_ATOMIC will not
+ * be atomic with respect to an external process.
+ */
+ if (!(cpu->tcg_cflags & CF_PARALLEL)) {
+ cpu->tcg_cflags |= CF_PARALLEL;
+ tb_flush(cpu);
+ }
+
+ return raddr;
+}
+
+abi_long target_shmdt(abi_ulong shmaddr)
+{
+ abi_long rv;
+
+ /* shmdt pointers are always untagged */
+
+ WITH_MMAP_LOCK_GUARD() {
+ abi_ulong last = shm_region_find(shmaddr);
+ if (last == 0) {
+ return -TARGET_EINVAL;
+ }
+
+ rv = get_errno(shmdt(g2h_untagged(shmaddr)));
+ if (rv == 0) {
+ abi_ulong size = last - shmaddr + 1;
+
+ page_set_flags(shmaddr, last, 0);
+ shm_region_rm_complete(shmaddr, last);
+ mmap_reserve_or_unmap(shmaddr, size);
+ }
+ }
+ return rv;
+}