}
#endif
-void *qemu_vmalloc(size_t size)
+static void *bsd_vmalloc(size_t size)
{
void *p;
- unsigned long addr;
mmap_lock();
/* Use map and mark the pages as used. */
p = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
- addr = (unsigned long)p;
- if (addr == (target_ulong) addr) {
+ if (h2g_valid(p)) {
/* Allocated region overlaps guest address space.
This may recurse. */
+ abi_ulong addr = h2g(p);
page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
PAGE_RESERVED);
}
return p;
}
-void *qemu_malloc(size_t size)
+void *g_malloc(size_t size)
{
char * p;
size += 16;
- p = qemu_vmalloc(size);
+ p = bsd_vmalloc(size);
*(size_t *)p = size;
return p + 16;
}
/* We use map, which is always zero initialized. */
-void * qemu_mallocz(size_t size)
+void * g_malloc0(size_t size)
{
- return qemu_malloc(size);
+ return g_malloc(size);
}
-void qemu_free(void *ptr)
+void g_free(void *ptr)
{
/* FIXME: We should unmark the reserved pages here. However this gets
complicated when one target page spans multiple host pages, so we
munmap(p, *p);
}
-void *qemu_realloc(void *ptr, size_t size)
+void *g_realloc(void *ptr, size_t size)
{
size_t old_size, copy;
void *new_ptr;
if (!ptr)
- return qemu_malloc(size);
+ return g_malloc(size);
old_size = *(size_t *)((char *)ptr - 16);
copy = old_size < size ? old_size : size;
- new_ptr = qemu_malloc(size);
+ new_ptr = g_malloc(size);
memcpy(new_ptr, ptr, copy);
- qemu_free(ptr);
+ g_free(ptr);
return new_ptr;
}
possible while it is a shared mapping */
if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
(prot & PROT_WRITE))
- return -EINVAL;
+ return -1;
/* adjust protection to be able to read */
if (!(prot1 & PROT_WRITE))