#include "qemu/osdep.h"
#include "qemu.h"
-#include "qemu-common.h"
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
static __thread int mmap_lock_count;
void mmap_unlock(void)
{
+ assert(mmap_lock_count > 0);
if (--mmap_lock_count == 0) {
pthread_mutex_unlock(&mmap_mutex);
}
if (ret != 0)
goto error;
}
- page_set_flags(start, start + len, prot | PAGE_VALID);
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
mmap_unlock();
return 0;
error:
return ret;
}
-/* map an incomplete host page */
+/*
+ * map an incomplete host page
+ *
+ * mmap_frag can be called with a valid fd, if flags doesn't contain one of
+ * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
+ * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
+ * added.
+ *
+ * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
+ * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
+ * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD
+ * in sys/vm/vm_mmap.c.
+ * * If flags contains MAP_ANON it doesn't matter if we add it or not.
+ * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
+ * matter if we add it or not either. See enforcing of constraints for
+ * MAP_STACK in kern_mmap.
+ *
+ * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
+ * flags directly, with the assumption that future flags that require fd == -1
+ * will also not require MAP_ANON.
+ */
static int mmap_frag(abi_ulong real_start,
abi_ulong start, abi_ulong end,
int prot, int flags, int fd, abi_ulong offset)
}
if (prot1 == 0) {
- /* no page was there, so we allocate one */
+ /* no page was there, so we allocate one. See also above. */
void *p = mmap(host_start, qemu_host_page_size, prot,
- flags | MAP_ANON, -1, 0);
+ flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
if (p == MAP_FAILED)
return -1;
prot1 = prot;
prot1 &= PAGE_BITS;
prot_new = prot | prot1;
- if (!(flags & MAP_ANON)) {
+ if (fd != -1) {
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
#endif
abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
-unsigned long last_brk;
-
/*
* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
* address space.
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
abi_ulong alignment)
{
- abi_ulong addr;
- abi_ulong end_addr;
- int prot;
- int looped = 0;
-
- if (size > reserved_va) {
- return (abi_ulong)-1;
- }
+ abi_ulong ret;
- size = HOST_PAGE_ALIGN(size) + alignment;
- end_addr = start + size;
- if (end_addr > reserved_va) {
- end_addr = reserved_va;
+ ret = page_find_range_empty(start, reserved_va, size, alignment);
+ if (ret == -1 && start > TARGET_PAGE_SIZE) {
+ /* Restart at the beginning of the address space. */
+ ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
+ size, alignment);
}
- addr = end_addr - qemu_host_page_size;
- while (1) {
- if (addr > end_addr) {
- if (looped) {
- return (abi_ulong)-1;
- }
- end_addr = reserved_va;
- addr = end_addr - qemu_host_page_size;
- looped = 1;
- continue;
- }
- prot = page_get_flags(addr);
- if (prot) {
- end_addr = addr;
- }
- if (end_addr - addr >= size) {
- break;
- }
- addr -= qemu_host_page_size;
- }
-
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
- /* addr is sufficiently low to align it up */
- if (alignment != 0) {
- addr = (addr + alignment) & ~(alignment - 1);
- }
- return addr;
+ return ret;
}
/*
if (reserved_va) {
return mmap_find_vma_reserved(start, size,
- (alignment != 0 ? 1 << alignment : 0));
+ (alignment != 0 ? 1 << alignment :
+ MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
}
addr = start;
* up to the targets page boundary.
*/
- if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
+ if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
struct stat sb;
if (fstat(fd, &sb) == -1) {
* It can fail only on 64-bit host with 32-bit target.
* On any other target/host host mmap() handles this error correctly.
*/
-#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
- if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
+ if (!guest_range_valid_untagged(start, len)) {
errno = EINVAL;
goto fail;
}
-#endif
/*
* worst case: we cannot map the file because the offset is not
* aligned, so we read it
*/
- if (!(flags & MAP_ANON) &&
+ if (fd != -1 &&
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
/*
* msync() won't work here, so we return an error if write is
}
if (!(prot & PROT_WRITE)) {
ret = target_mprotect(start, len, prot);
- if (ret != 0) {
- start = ret;
- goto the_end;
- }
+ assert(ret == 0);
}
goto the_end;
}
+ /* Reject the mapping if any page within the range is mapped */
+ if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
+ errno = EINVAL;
+ goto fail;
+ }
+
/* handle the start of the mapping */
if (start > real_start) {
if (real_end == real_start + qemu_host_page_size) {
}
}
the_end1:
- page_set_flags(start, start + len, prot | PAGE_VALID);
+ page_set_flags(start, start + len - 1, prot | PAGE_VALID);
the_end:
#ifdef DEBUG_MMAP
printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
page_dump(stdout);
printf("\n");
#endif
- tb_invalidate_phys_range(start, start + len);
mmap_unlock();
return start;
fail:
return -1;
}
-static void mmap_reserve(abi_ulong start, abi_ulong size)
+void mmap_reserve(abi_ulong start, abi_ulong size)
{
abi_ulong real_start;
abi_ulong real_end;
}
if (ret == 0) {
- page_set_flags(start, start + len, 0);
- tb_invalidate_phys_range(start, start + len);
+ page_set_flags(start, start + len - 1, 0);
}
mmap_unlock();
return ret;