]> git.proxmox.com Git - qemu.git/blame - linux-user/mmap.c
Fix usermode virtual address type
[qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004
FB
18 */
19#include <stdlib.h>
20#include <stdio.h>
21#include <stdarg.h>
22#include <string.h>
23#include <unistd.h>
24#include <errno.h>
54c5a2ae
EI
25#include <sys/types.h>
26#include <sys/stat.h>
54936004 27#include <sys/mman.h>
3af72a4d
BS
28#include <linux/mman.h>
29#include <linux/unistd.h>
54936004
FB
30
31#include "qemu.h"
78f5bf1e 32#include "qemu-common.h"
54936004
FB
33
34//#define DEBUG_MMAP
35
2f7bb878 36#if defined(CONFIG_USE_NPTL)
1e6eec8b 37static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 38static __thread int mmap_lock_count;
c8a706fe
PB
39
40void mmap_lock(void)
41{
42 if (mmap_lock_count++ == 0) {
43 pthread_mutex_lock(&mmap_mutex);
44 }
45}
46
47void mmap_unlock(void)
48{
49 if (--mmap_lock_count == 0) {
50 pthread_mutex_unlock(&mmap_mutex);
51 }
52}
d5975363
PB
53
54/* Grab lock to make sure things are in a consistent state after fork(). */
55void mmap_fork_start(void)
56{
57 if (mmap_lock_count)
58 abort();
59 pthread_mutex_lock(&mmap_mutex);
60}
61
62void mmap_fork_end(int child)
63{
64 if (child)
65 pthread_mutex_init(&mmap_mutex, NULL);
66 else
67 pthread_mutex_unlock(&mmap_mutex);
68}
c8a706fe
PB
69#else
70/* We aren't threadsafe to start with, so no need to worry about locking. */
71void mmap_lock(void)
72{
73}
74
75void mmap_unlock(void)
76{
77}
78#endif
79
17e2377a
PB
80void *qemu_vmalloc(size_t size)
81{
82 void *p;
7bc7b099 83
17e2377a
PB
84 mmap_lock();
85 /* Use map and mark the pages as used. */
86 p = mmap(NULL, size, PROT_READ | PROT_WRITE,
87 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
88
7bc7b099
RH
89 if (h2g_valid(p)) {
90 /* Allocated region overlaps guest address space. This may recurse. */
b480d9b7 91 abi_ulong addr = h2g(p);
17e2377a
PB
92 page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
93 PAGE_RESERVED);
94 }
95
96 mmap_unlock();
97 return p;
98}
99
100void *qemu_malloc(size_t size)
101{
102 char * p;
103 size += 16;
104 p = qemu_vmalloc(size);
105 *(size_t *)p = size;
106 return p + 16;
107}
108
109/* We use map, which is always zero initialized. */
110void * qemu_mallocz(size_t size)
111{
112 return qemu_malloc(size);
113}
114
115void qemu_free(void *ptr)
116{
117 /* FIXME: We should unmark the reserved pages here. However this gets
118 complicated when one target page spans multiple host pages, so we
119 don't bother. */
120 size_t *p;
121 p = (size_t *)((char *)ptr - 16);
122 munmap(p, *p);
123}
124
1a6f0dbc
AL
125void *qemu_realloc(void *ptr, size_t size)
126{
127 size_t old_size, copy;
128 void *new_ptr;
129
baa8c602 130 if (!ptr)
131 return qemu_malloc(size);
1a6f0dbc
AL
132 old_size = *(size_t *)((char *)ptr - 16);
133 copy = old_size < size ? old_size : size;
134 new_ptr = qemu_malloc(size);
135 memcpy(new_ptr, ptr, copy);
136 qemu_free(ptr);
137 return new_ptr;
138}
139
53a5960a 140/* NOTE: all the constants are the HOST ones, but addresses are target. */
992f48a0 141int target_mprotect(abi_ulong start, abi_ulong len, int prot)
54936004 142{
992f48a0 143 abi_ulong end, host_start, host_end, addr;
54936004
FB
144 int prot1, ret;
145
146#ifdef DEBUG_MMAP
0bf9e31a
BS
147 printf("mprotect: start=0x" TARGET_ABI_FMT_lx
148 "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
54936004
FB
149 prot & PROT_READ ? 'r' : '-',
150 prot & PROT_WRITE ? 'w' : '-',
151 prot & PROT_EXEC ? 'x' : '-');
152#endif
153
154 if ((start & ~TARGET_PAGE_MASK) != 0)
155 return -EINVAL;
156 len = TARGET_PAGE_ALIGN(len);
157 end = start + len;
158 if (end < start)
159 return -EINVAL;
171cd1cd 160 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
54936004
FB
161 if (len == 0)
162 return 0;
3b46e624 163
c8a706fe 164 mmap_lock();
83fb7adf 165 host_start = start & qemu_host_page_mask;
54936004
FB
166 host_end = HOST_PAGE_ALIGN(end);
167 if (start > host_start) {
168 /* handle host page containing start */
169 prot1 = prot;
170 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
171 prot1 |= page_get_flags(addr);
172 }
83fb7adf 173 if (host_end == host_start + qemu_host_page_size) {
d418c81e
FB
174 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
175 prot1 |= page_get_flags(addr);
176 }
177 end = host_end;
178 }
53a5960a 179 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
54936004 180 if (ret != 0)
c8a706fe 181 goto error;
83fb7adf 182 host_start += qemu_host_page_size;
54936004
FB
183 }
184 if (end < host_end) {
54936004
FB
185 prot1 = prot;
186 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
187 prot1 |= page_get_flags(addr);
188 }
5fafdf24 189 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
54936004
FB
190 prot1 & PAGE_BITS);
191 if (ret != 0)
c8a706fe 192 goto error;
83fb7adf 193 host_end -= qemu_host_page_size;
54936004 194 }
3b46e624 195
54936004
FB
196 /* handle the pages in the middle */
197 if (host_start < host_end) {
53a5960a 198 ret = mprotect(g2h(host_start), host_end - host_start, prot);
54936004 199 if (ret != 0)
c8a706fe 200 goto error;
54936004 201 }
54936004 202 page_set_flags(start, start + len, prot | PAGE_VALID);
c8a706fe 203 mmap_unlock();
54936004 204 return 0;
c8a706fe
PB
205error:
206 mmap_unlock();
207 return ret;
54936004
FB
208}
209
210/* map an incomplete host page */
992f48a0
BS
211static int mmap_frag(abi_ulong real_start,
212 abi_ulong start, abi_ulong end,
213 int prot, int flags, int fd, abi_ulong offset)
54936004 214{
80210bcd 215 abi_ulong real_end, addr;
53a5960a 216 void *host_start;
54936004
FB
217 int prot1, prot_new;
218
53a5960a
PB
219 real_end = real_start + qemu_host_page_size;
220 host_start = g2h(real_start);
54936004
FB
221
222 /* get the protection of the target pages outside the mapping */
223 prot1 = 0;
53a5960a 224 for(addr = real_start; addr < real_end; addr++) {
54936004
FB
225 if (addr < start || addr >= end)
226 prot1 |= page_get_flags(addr);
227 }
3b46e624 228
54936004
FB
229 if (prot1 == 0) {
230 /* no page was there, so we allocate one */
80210bcd
TS
231 void *p = mmap(host_start, qemu_host_page_size, prot,
232 flags | MAP_ANONYMOUS, -1, 0);
233 if (p == MAP_FAILED)
234 return -1;
53a5960a 235 prot1 = prot;
54936004
FB
236 }
237 prot1 &= PAGE_BITS;
238
239 prot_new = prot | prot1;
240 if (!(flags & MAP_ANONYMOUS)) {
241 /* msync() won't work here, so we return an error if write is
242 possible while it is a shared mapping */
243 if ((flags & MAP_TYPE) == MAP_SHARED &&
244 (prot & PROT_WRITE))
ee636500 245 return -1;
54936004
FB
246
247 /* adjust protection to be able to read */
248 if (!(prot1 & PROT_WRITE))
53a5960a 249 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
3b46e624 250
54936004 251 /* read the corresponding file data */
fb7e378c
KS
252 if (pread(fd, g2h(start), end - start, offset) == -1)
253 return -1;
3b46e624 254
54936004
FB
255 /* put final protection */
256 if (prot_new != (prot1 | PROT_WRITE))
53a5960a 257 mprotect(host_start, qemu_host_page_size, prot_new);
54936004
FB
258 } else {
259 /* just update the protection */
260 if (prot_new != prot1) {
53a5960a 261 mprotect(host_start, qemu_host_page_size, prot_new);
54936004
FB
262 }
263 }
264 return 0;
265}
266
14f24e14
RH
267#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
268# define TASK_UNMAPPED_BASE (1ul << 38)
269#elif defined(__CYGWIN__)
a03e2d42 270/* Cygwin doesn't have a whole lot of address space. */
14f24e14 271# define TASK_UNMAPPED_BASE 0x18000000
a03e2d42 272#else
14f24e14 273# define TASK_UNMAPPED_BASE 0x40000000
a03e2d42 274#endif
14f24e14 275static abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
a03e2d42 276
0776590d
PB
277unsigned long last_brk;
278
fe3b4152
KS
279/*
280 * Find and reserve a free memory area of size 'size'. The search
281 * starts at 'start'.
282 * It must be called with mmap_lock() held.
283 * Return -1 if error.
284 */
9ad197d9 285abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
a03e2d42 286{
14f24e14 287 void *ptr, *prev;
fe3b4152 288 abi_ulong addr;
14f24e14 289 int wrapped, repeat;
fe3b4152
KS
290
291 /* If 'start' == 0, then a default start address is used. */
14f24e14 292 if (start == 0) {
fe3b4152 293 start = mmap_next_start;
14f24e14
RH
294 } else {
295 start &= qemu_host_page_mask;
296 }
297
298 size = HOST_PAGE_ALIGN(size);
fe3b4152 299
a03e2d42 300 addr = start;
14f24e14
RH
301 wrapped = repeat = 0;
302 prev = 0;
fe3b4152 303
14f24e14 304 for (;; prev = ptr) {
fe3b4152
KS
305 /*
306 * Reserve needed memory area to avoid a race.
307 * It should be discarded using:
308 * - mmap() with MAP_FIXED flag
309 * - mremap() with MREMAP_FIXED flag
310 * - shmat() with SHM_REMAP flag
311 */
14f24e14 312 ptr = mmap(g2h(addr), size, PROT_NONE,
fe3b4152
KS
313 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
314
315 /* ENOMEM, if host address space has no memory */
14f24e14 316 if (ptr == MAP_FAILED) {
fe3b4152 317 return (abi_ulong)-1;
14f24e14
RH
318 }
319
320 /* Count the number of sequential returns of the same address.
321 This is used to modify the search algorithm below. */
322 repeat = (ptr == prev ? repeat + 1 : 0);
323
324 if (h2g_valid(ptr + size - 1)) {
325 addr = h2g(ptr);
fe3b4152 326
14f24e14
RH
327 if ((addr & ~TARGET_PAGE_MASK) == 0) {
328 /* Success. */
329 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
330 mmap_next_start = addr + size;
331 }
332 return addr;
333 }
fe3b4152 334
14f24e14
RH
335 /* The address is not properly aligned for the target. */
336 switch (repeat) {
337 case 0:
338 /* Assume the result that the kernel gave us is the
339 first with enough free space, so start again at the
340 next higher target page. */
341 addr = TARGET_PAGE_ALIGN(addr);
342 break;
343 case 1:
344 /* Sometimes the kernel decides to perform the allocation
345 at the top end of memory instead. */
346 addr &= TARGET_PAGE_MASK;
347 break;
348 case 2:
349 /* Start over at low memory. */
350 addr = 0;
351 break;
352 default:
353 /* Fail. This unaligned block must the last. */
354 addr = -1;
355 break;
356 }
357 } else {
358 /* Since the result the kernel gave didn't fit, start
359 again at low memory. If any repetition, fail. */
360 addr = (repeat ? -1 : 0);
361 }
362
363 /* Unmap and try again. */
fe3b4152 364 munmap(ptr, size);
fe3b4152 365
14f24e14
RH
366 /* ENOMEM if we checked the whole of the target address space. */
367 if (addr == -1ul) {
a03e2d42 368 return (abi_ulong)-1;
14f24e14
RH
369 } else if (addr == 0) {
370 if (wrapped) {
371 return (abi_ulong)-1;
372 }
373 wrapped = 1;
374 /* Don't actually use 0 when wrapping, instead indicate
375 that we'd truely like an allocation in low memory. */
376 addr = (mmap_min_addr > TARGET_PAGE_SIZE
377 ? TARGET_PAGE_ALIGN(mmap_min_addr)
378 : TARGET_PAGE_SIZE);
379 } else if (wrapped && addr >= start) {
380 return (abi_ulong)-1;
381 }
a03e2d42 382 }
a03e2d42
FB
383}
384
54936004 385/* NOTE: all the constants are the HOST ones */
992f48a0
BS
386abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
387 int flags, int fd, abi_ulong offset)
54936004 388{
992f48a0 389 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
a5b85f79 390 unsigned long host_start;
54936004 391
c8a706fe 392 mmap_lock();
54936004
FB
393#ifdef DEBUG_MMAP
394 {
0bf9e31a
BS
395 printf("mmap: start=0x" TARGET_ABI_FMT_lx
396 " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
5fafdf24 397 start, len,
54936004
FB
398 prot & PROT_READ ? 'r' : '-',
399 prot & PROT_WRITE ? 'w' : '-',
400 prot & PROT_EXEC ? 'x' : '-');
401 if (flags & MAP_FIXED)
402 printf("MAP_FIXED ");
403 if (flags & MAP_ANONYMOUS)
404 printf("MAP_ANON ");
405 switch(flags & MAP_TYPE) {
406 case MAP_PRIVATE:
407 printf("MAP_PRIVATE ");
408 break;
409 case MAP_SHARED:
410 printf("MAP_SHARED ");
411 break;
412 default:
413 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
414 break;
415 }
0bf9e31a 416 printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
54936004
FB
417 }
418#endif
419
e89f07d3
PB
420 if (offset & ~TARGET_PAGE_MASK) {
421 errno = EINVAL;
c8a706fe 422 goto fail;
e89f07d3 423 }
54936004
FB
424
425 len = TARGET_PAGE_ALIGN(len);
426 if (len == 0)
c8a706fe 427 goto the_end;
53a5960a 428 real_start = start & qemu_host_page_mask;
54936004 429
54c5a2ae
EI
430 /* When mapping files into a memory area larger than the file, accesses
431 to pages beyond the file size will cause a SIGBUS.
432
433 For example, if mmaping a file of 100 bytes on a host with 4K pages
434 emulating a target with 8K pages, the target expects to be able to
435 access the first 8K. But the host will trap us on any access beyond
436 4K.
437
438 When emulating a target with a larger page-size than the hosts, we
439 may need to truncate file maps at EOF and add extra anonymous pages
440 up to the targets page boundary. */
441
442 if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
443 && !(flags & MAP_ANONYMOUS)) {
444 struct stat sb;
445
446 if (fstat (fd, &sb) == -1)
447 goto fail;
448
449 /* Are we trying to create a map beyond EOF?. */
450 if (offset + len > sb.st_size) {
451 /* If so, truncate the file map at eof aligned with
452 the hosts real pagesize. Additional anonymous maps
453 will be created beyond EOF. */
454 len = (sb.st_size - offset);
455 len += qemu_real_host_page_size - 1;
456 len &= ~(qemu_real_host_page_size - 1);
457 }
458 }
459
54936004 460 if (!(flags & MAP_FIXED)) {
a03e2d42
FB
461 abi_ulong mmap_start;
462 void *p;
463 host_offset = offset & qemu_host_page_mask;
464 host_len = len + offset - host_offset;
465 host_len = HOST_PAGE_ALIGN(host_len);
466 mmap_start = mmap_find_vma(real_start, host_len);
467 if (mmap_start == (abi_ulong)-1) {
468 errno = ENOMEM;
c8a706fe 469 goto fail;
54936004 470 }
a03e2d42
FB
471 /* Note: we prefer to control the mapping address. It is
472 especially important if qemu_host_page_size >
473 qemu_real_host_page_size */
474 p = mmap(g2h(mmap_start),
54c5a2ae 475 host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
a03e2d42 476 if (p == MAP_FAILED)
c8a706fe 477 goto fail;
a03e2d42
FB
478 /* update start so that it points to the file position at 'offset' */
479 host_start = (unsigned long)p;
54c5a2ae
EI
480 if (!(flags & MAP_ANONYMOUS)) {
481 p = mmap(g2h(mmap_start), len, prot,
482 flags | MAP_FIXED, fd, host_offset);
a03e2d42 483 host_start += offset - host_offset;
54c5a2ae 484 }
a03e2d42
FB
485 start = h2g(host_start);
486 } else {
7ab240ad
AZ
487 int flg;
488 target_ulong addr;
489
a03e2d42 490 if (start & ~TARGET_PAGE_MASK) {
e89f07d3 491 errno = EINVAL;
c8a706fe 492 goto fail;
e89f07d3 493 }
a03e2d42
FB
494 end = start + len;
495 real_end = HOST_PAGE_ALIGN(end);
7ab240ad 496
45bc1f52
AJ
497 /*
498 * Test if requested memory area fits target address space
499 * It can fail only on 64-bit host with 32-bit target.
500 * On any other target/host host mmap() handles this error correctly.
501 */
502 if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
503 errno = EINVAL;
504 goto fail;
505 }
506
7ab240ad
AZ
507 for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
508 flg = page_get_flags(addr);
509 if (flg & PAGE_RESERVED) {
510 errno = ENXIO;
c8a706fe 511 goto fail;
7ab240ad
AZ
512 }
513 }
514
a03e2d42
FB
515 /* worst case: we cannot map the file because the offset is not
516 aligned, so we read it */
517 if (!(flags & MAP_ANONYMOUS) &&
518 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
519 /* msync() won't work here, so we return an error if write is
520 possible while it is a shared mapping */
521 if ((flags & MAP_TYPE) == MAP_SHARED &&
522 (prot & PROT_WRITE)) {
523 errno = EINVAL;
c8a706fe 524 goto fail;
a03e2d42
FB
525 }
526 retaddr = target_mmap(start, len, prot | PROT_WRITE,
527 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
528 -1, 0);
529 if (retaddr == -1)
c8a706fe 530 goto fail;
fb7e378c
KS
531 if (pread(fd, g2h(start), len, offset) == -1)
532 goto fail;
a03e2d42
FB
533 if (!(prot & PROT_WRITE)) {
534 ret = target_mprotect(start, len, prot);
c8a706fe
PB
535 if (ret != 0) {
536 start = ret;
537 goto the_end;
538 }
a03e2d42
FB
539 }
540 goto the_end;
54936004 541 }
a03e2d42
FB
542
543 /* handle the start of the mapping */
544 if (start > real_start) {
545 if (real_end == real_start + qemu_host_page_size) {
546 /* one single host page */
547 ret = mmap_frag(real_start, start, end,
548 prot, flags, fd, offset);
549 if (ret == -1)
c8a706fe 550 goto fail;
a03e2d42
FB
551 goto the_end1;
552 }
553 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
54936004
FB
554 prot, flags, fd, offset);
555 if (ret == -1)
c8a706fe 556 goto fail;
a03e2d42
FB
557 real_start += qemu_host_page_size;
558 }
559 /* handle the end of the mapping */
560 if (end < real_end) {
561 ret = mmap_frag(real_end - qemu_host_page_size,
562 real_end - qemu_host_page_size, real_end,
563 prot, flags, fd,
564 offset + real_end - qemu_host_page_size - start);
565 if (ret == -1)
c8a706fe 566 goto fail;
a03e2d42 567 real_end -= qemu_host_page_size;
54936004 568 }
3b46e624 569
a03e2d42
FB
570 /* map the middle (easier) */
571 if (real_start < real_end) {
572 void *p;
573 unsigned long offset1;
574 if (flags & MAP_ANONYMOUS)
575 offset1 = 0;
576 else
577 offset1 = offset + real_start - start;
578 p = mmap(g2h(real_start), real_end - real_start,
579 prot, flags, fd, offset1);
580 if (p == MAP_FAILED)
c8a706fe 581 goto fail;
a03e2d42 582 }
54936004
FB
583 }
584 the_end1:
585 page_set_flags(start, start + len, prot | PAGE_VALID);
586 the_end:
587#ifdef DEBUG_MMAP
0bf9e31a 588 printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
54936004
FB
589 page_dump(stdout);
590 printf("\n");
591#endif
c8a706fe 592 mmap_unlock();
54936004 593 return start;
c8a706fe
PB
594fail:
595 mmap_unlock();
596 return -1;
54936004
FB
597}
598
992f48a0 599int target_munmap(abi_ulong start, abi_ulong len)
54936004 600{
992f48a0 601 abi_ulong end, real_start, real_end, addr;
54936004
FB
602 int prot, ret;
603
604#ifdef DEBUG_MMAP
0bf9e31a
BS
605 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
606 TARGET_ABI_FMT_lx "\n",
607 start, len);
54936004
FB
608#endif
609 if (start & ~TARGET_PAGE_MASK)
610 return -EINVAL;
611 len = TARGET_PAGE_ALIGN(len);
612 if (len == 0)
613 return -EINVAL;
c8a706fe 614 mmap_lock();
54936004 615 end = start + len;
53a5960a
PB
616 real_start = start & qemu_host_page_mask;
617 real_end = HOST_PAGE_ALIGN(end);
54936004 618
53a5960a 619 if (start > real_start) {
54936004
FB
620 /* handle host page containing start */
621 prot = 0;
53a5960a 622 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
623 prot |= page_get_flags(addr);
624 }
53a5960a
PB
625 if (real_end == real_start + qemu_host_page_size) {
626 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
627 prot |= page_get_flags(addr);
628 }
53a5960a 629 end = real_end;
d418c81e 630 }
54936004 631 if (prot != 0)
53a5960a 632 real_start += qemu_host_page_size;
54936004 633 }
53a5960a 634 if (end < real_end) {
54936004 635 prot = 0;
53a5960a 636 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
637 prot |= page_get_flags(addr);
638 }
639 if (prot != 0)
53a5960a 640 real_end -= qemu_host_page_size;
54936004 641 }
3b46e624 642
c8a706fe 643 ret = 0;
54936004 644 /* unmap what we can */
53a5960a 645 if (real_start < real_end) {
4118a970 646 ret = munmap(g2h(real_start), real_end - real_start);
54936004
FB
647 }
648
c8a706fe
PB
649 if (ret == 0)
650 page_set_flags(start, start + len, 0);
651 mmap_unlock();
652 return ret;
54936004
FB
653}
654
992f48a0
BS
655abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
656 abi_ulong new_size, unsigned long flags,
657 abi_ulong new_addr)
54936004
FB
658{
659 int prot;
f19412a2 660 void *host_addr;
54936004 661
c8a706fe 662 mmap_lock();
f19412a2
AJ
663
664 if (flags & MREMAP_FIXED)
3af72a4d
BS
665 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
666 old_size, new_size,
667 flags,
668 new_addr);
f19412a2
AJ
669 else if (flags & MREMAP_MAYMOVE) {
670 abi_ulong mmap_start;
671
672 mmap_start = mmap_find_vma(0, new_size);
673
674 if (mmap_start == -1) {
675 errno = ENOMEM;
676 host_addr = MAP_FAILED;
677 } else
3af72a4d
BS
678 host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
679 old_size, new_size,
680 flags | MREMAP_FIXED,
681 g2h(mmap_start));
682 } else {
f19412a2
AJ
683 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
684 /* Check if address fits target address space */
685 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
686 /* Revert mremap() changes */
687 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
688 errno = ENOMEM;
689 host_addr = MAP_FAILED;
690 }
691 }
692
693 if (host_addr == MAP_FAILED) {
c8a706fe
PB
694 new_addr = -1;
695 } else {
696 new_addr = h2g(host_addr);
697 prot = page_get_flags(old_addr);
698 page_set_flags(old_addr, old_addr + old_size, 0);
699 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
700 }
701 mmap_unlock();
54936004
FB
702 return new_addr;
703}
704
992f48a0 705int target_msync(abi_ulong start, abi_ulong len, int flags)
54936004 706{
992f48a0 707 abi_ulong end;
54936004
FB
708
709 if (start & ~TARGET_PAGE_MASK)
710 return -EINVAL;
711 len = TARGET_PAGE_ALIGN(len);
54936004 712 end = start + len;
d418c81e
FB
713 if (end < start)
714 return -EINVAL;
715 if (end == start)
716 return 0;
3b46e624 717
83fb7adf 718 start &= qemu_host_page_mask;
53a5960a 719 return msync(g2h(start), end - start, flags);
54936004 720}