]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/mmap.c
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
d39594e9 19#include "qemu/osdep.h"
11d96056 20#include "trace.h"
10d0d505 21#include "exec/log.h"
54936004
FB
22#include "qemu.h"
23
1e6eec8b 24static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 25static __thread int mmap_lock_count;
c8a706fe
PB
26
27void mmap_lock(void)
28{
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
31 }
32}
33
34void mmap_unlock(void)
35{
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
38 }
39}
d5975363 40
301e40ed
AB
41bool have_mmap_lock(void)
42{
43 return mmap_lock_count > 0 ? true : false;
44}
45
d5975363
PB
46/* Grab lock to make sure things are in a consistent state after fork(). */
47void mmap_fork_start(void)
48{
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
52}
53
54void mmap_fork_end(int child)
55{
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
60}
c8a706fe 61
53a5960a 62/* NOTE: all the constants are the HOST ones, but addresses are target. */
992f48a0 63int target_mprotect(abi_ulong start, abi_ulong len, int prot)
54936004 64{
992f48a0 65 abi_ulong end, host_start, host_end, addr;
54936004
FB
66 int prot1, ret;
67
11d96056 68 trace_target_mprotect(start, len, prot);
54936004
FB
69
70 if ((start & ~TARGET_PAGE_MASK) != 0)
78cf3390 71 return -TARGET_EINVAL;
54936004
FB
72 len = TARGET_PAGE_ALIGN(len);
73 end = start + len;
ebf9a363 74 if (!guest_range_valid(start, len)) {
78cf3390 75 return -TARGET_ENOMEM;
ebf9a363 76 }
171cd1cd 77 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
54936004
FB
78 if (len == 0)
79 return 0;
3b46e624 80
c8a706fe 81 mmap_lock();
83fb7adf 82 host_start = start & qemu_host_page_mask;
54936004
FB
83 host_end = HOST_PAGE_ALIGN(end);
84 if (start > host_start) {
85 /* handle host page containing start */
86 prot1 = prot;
87 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
88 prot1 |= page_get_flags(addr);
89 }
83fb7adf 90 if (host_end == host_start + qemu_host_page_size) {
d418c81e
FB
91 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
92 prot1 |= page_get_flags(addr);
93 }
94 end = host_end;
95 }
53a5960a 96 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
54936004 97 if (ret != 0)
c8a706fe 98 goto error;
83fb7adf 99 host_start += qemu_host_page_size;
54936004
FB
100 }
101 if (end < host_end) {
54936004
FB
102 prot1 = prot;
103 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
104 prot1 |= page_get_flags(addr);
105 }
5fafdf24 106 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
54936004
FB
107 prot1 & PAGE_BITS);
108 if (ret != 0)
c8a706fe 109 goto error;
83fb7adf 110 host_end -= qemu_host_page_size;
54936004 111 }
3b46e624 112
54936004
FB
113 /* handle the pages in the middle */
114 if (host_start < host_end) {
53a5960a 115 ret = mprotect(g2h(host_start), host_end - host_start, prot);
54936004 116 if (ret != 0)
c8a706fe 117 goto error;
54936004 118 }
54936004 119 page_set_flags(start, start + len, prot | PAGE_VALID);
c8a706fe 120 mmap_unlock();
54936004 121 return 0;
c8a706fe
PB
122error:
123 mmap_unlock();
124 return ret;
54936004
FB
125}
126
127/* map an incomplete host page */
992f48a0
BS
128static int mmap_frag(abi_ulong real_start,
129 abi_ulong start, abi_ulong end,
130 int prot, int flags, int fd, abi_ulong offset)
54936004 131{
80210bcd 132 abi_ulong real_end, addr;
53a5960a 133 void *host_start;
54936004
FB
134 int prot1, prot_new;
135
53a5960a
PB
136 real_end = real_start + qemu_host_page_size;
137 host_start = g2h(real_start);
54936004
FB
138
139 /* get the protection of the target pages outside the mapping */
140 prot1 = 0;
53a5960a 141 for(addr = real_start; addr < real_end; addr++) {
54936004
FB
142 if (addr < start || addr >= end)
143 prot1 |= page_get_flags(addr);
144 }
3b46e624 145
54936004
FB
146 if (prot1 == 0) {
147 /* no page was there, so we allocate one */
80210bcd
TS
148 void *p = mmap(host_start, qemu_host_page_size, prot,
149 flags | MAP_ANONYMOUS, -1, 0);
150 if (p == MAP_FAILED)
151 return -1;
53a5960a 152 prot1 = prot;
54936004
FB
153 }
154 prot1 &= PAGE_BITS;
155
156 prot_new = prot | prot1;
157 if (!(flags & MAP_ANONYMOUS)) {
158 /* msync() won't work here, so we return an error if write is
159 possible while it is a shared mapping */
160 if ((flags & MAP_TYPE) == MAP_SHARED &&
161 (prot & PROT_WRITE))
ee636500 162 return -1;
54936004
FB
163
164 /* adjust protection to be able to read */
165 if (!(prot1 & PROT_WRITE))
53a5960a 166 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
3b46e624 167
54936004 168 /* read the corresponding file data */
fb7e378c
KS
169 if (pread(fd, g2h(start), end - start, offset) == -1)
170 return -1;
3b46e624 171
54936004
FB
172 /* put final protection */
173 if (prot_new != (prot1 | PROT_WRITE))
53a5960a 174 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 175 } else {
54936004 176 if (prot_new != prot1) {
53a5960a 177 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 178 }
e6deac9c
CG
179 if (prot_new & PROT_WRITE) {
180 memset(g2h(start), 0, end - start);
181 }
54936004
FB
182 }
183 return 0;
184}
185
14f24e14
RH
186#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
187# define TASK_UNMAPPED_BASE (1ul << 38)
a03e2d42 188#else
14f24e14 189# define TASK_UNMAPPED_BASE 0x40000000
a03e2d42 190#endif
59e9d91c 191abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
a03e2d42 192
0776590d
PB
193unsigned long last_brk;
194
68a1c816
PB
195/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
196 of guest address space. */
30ab9ef2
RH
197static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
198 abi_ulong align)
68a1c816 199{
30ab9ef2 200 abi_ulong addr, end_addr, incr = qemu_host_page_size;
68a1c816 201 int prot;
30ab9ef2 202 bool looped = false;
68a1c816 203
b76f21a7 204 if (size > reserved_va) {
68a1c816
PB
205 return (abi_ulong)-1;
206 }
207
30ab9ef2
RH
208 /* Note that start and size have already been aligned by mmap_find_vma. */
209
59e9d91c 210 end_addr = start + size;
30ab9ef2
RH
211 if (start > reserved_va - size) {
212 /* Start at the top of the address space. */
213 end_addr = ((reserved_va - size) & -align) + size;
214 looped = true;
59e9d91c 215 }
59e9d91c 216
30ab9ef2
RH
217 /* Search downward from END_ADDR, checking to see if a page is in use. */
218 addr = end_addr;
59e9d91c 219 while (1) {
30ab9ef2 220 addr -= incr;
59e9d91c 221 if (addr > end_addr) {
68a1c816 222 if (looped) {
30ab9ef2 223 /* Failure. The entire address space has been searched. */
68a1c816
PB
224 return (abi_ulong)-1;
225 }
30ab9ef2
RH
226 /* Re-start at the top of the address space. */
227 addr = end_addr = ((reserved_va - size) & -align) + size;
228 looped = true;
229 } else {
230 prot = page_get_flags(addr);
231 if (prot) {
232 /* Page in use. Restart below this page. */
233 addr = end_addr = ((addr - size) & -align) + size;
234 } else if (addr && addr + size == end_addr) {
235 /* Success! All pages between ADDR and END_ADDR are free. */
236 if (start == mmap_next_start) {
237 mmap_next_start = addr;
238 }
239 return addr;
240 }
68a1c816
PB
241 }
242 }
68a1c816
PB
243}
244
fe3b4152
KS
245/*
246 * Find and reserve a free memory area of size 'size'. The search
247 * starts at 'start'.
248 * It must be called with mmap_lock() held.
249 * Return -1 if error.
250 */
30ab9ef2 251abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
a03e2d42 252{
14f24e14 253 void *ptr, *prev;
fe3b4152 254 abi_ulong addr;
14f24e14 255 int wrapped, repeat;
fe3b4152 256
443b7505
RH
257 align = MAX(align, qemu_host_page_size);
258
fe3b4152 259 /* If 'start' == 0, then a default start address is used. */
14f24e14 260 if (start == 0) {
fe3b4152 261 start = mmap_next_start;
14f24e14
RH
262 } else {
263 start &= qemu_host_page_mask;
264 }
30ab9ef2 265 start = ROUND_UP(start, align);
14f24e14
RH
266
267 size = HOST_PAGE_ALIGN(size);
fe3b4152 268
b76f21a7 269 if (reserved_va) {
30ab9ef2 270 return mmap_find_vma_reserved(start, size, align);
68a1c816
PB
271 }
272
a03e2d42 273 addr = start;
14f24e14
RH
274 wrapped = repeat = 0;
275 prev = 0;
fe3b4152 276
14f24e14 277 for (;; prev = ptr) {
fe3b4152
KS
278 /*
279 * Reserve needed memory area to avoid a race.
280 * It should be discarded using:
281 * - mmap() with MAP_FIXED flag
282 * - mremap() with MREMAP_FIXED flag
283 * - shmat() with SHM_REMAP flag
284 */
14f24e14 285 ptr = mmap(g2h(addr), size, PROT_NONE,
fe3b4152
KS
286 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
287
288 /* ENOMEM, if host address space has no memory */
14f24e14 289 if (ptr == MAP_FAILED) {
fe3b4152 290 return (abi_ulong)-1;
14f24e14
RH
291 }
292
293 /* Count the number of sequential returns of the same address.
294 This is used to modify the search algorithm below. */
295 repeat = (ptr == prev ? repeat + 1 : 0);
296
297 if (h2g_valid(ptr + size - 1)) {
298 addr = h2g(ptr);
fe3b4152 299
30ab9ef2 300 if ((addr & (align - 1)) == 0) {
14f24e14
RH
301 /* Success. */
302 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
303 mmap_next_start = addr + size;
304 }
305 return addr;
306 }
fe3b4152 307
14f24e14
RH
308 /* The address is not properly aligned for the target. */
309 switch (repeat) {
310 case 0:
311 /* Assume the result that the kernel gave us is the
312 first with enough free space, so start again at the
313 next higher target page. */
30ab9ef2 314 addr = ROUND_UP(addr, align);
14f24e14
RH
315 break;
316 case 1:
317 /* Sometimes the kernel decides to perform the allocation
318 at the top end of memory instead. */
30ab9ef2 319 addr &= -align;
14f24e14
RH
320 break;
321 case 2:
322 /* Start over at low memory. */
323 addr = 0;
324 break;
325 default:
326 /* Fail. This unaligned block must the last. */
327 addr = -1;
328 break;
329 }
330 } else {
331 /* Since the result the kernel gave didn't fit, start
332 again at low memory. If any repetition, fail. */
333 addr = (repeat ? -1 : 0);
334 }
335
336 /* Unmap and try again. */
fe3b4152 337 munmap(ptr, size);
fe3b4152 338
14f24e14 339 /* ENOMEM if we checked the whole of the target address space. */
d0b3e4f5 340 if (addr == (abi_ulong)-1) {
a03e2d42 341 return (abi_ulong)-1;
14f24e14
RH
342 } else if (addr == 0) {
343 if (wrapped) {
344 return (abi_ulong)-1;
345 }
346 wrapped = 1;
347 /* Don't actually use 0 when wrapping, instead indicate
8186e783 348 that we'd truly like an allocation in low memory. */
14f24e14
RH
349 addr = (mmap_min_addr > TARGET_PAGE_SIZE
350 ? TARGET_PAGE_ALIGN(mmap_min_addr)
351 : TARGET_PAGE_SIZE);
352 } else if (wrapped && addr >= start) {
353 return (abi_ulong)-1;
354 }
a03e2d42 355 }
a03e2d42
FB
356}
357
54936004 358/* NOTE: all the constants are the HOST ones */
992f48a0
BS
359abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
360 int flags, int fd, abi_ulong offset)
54936004 361{
992f48a0 362 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
54936004 363
c8a706fe 364 mmap_lock();
5a67bb96 365 trace_target_mmap(start, len, prot, flags, fd, offset);
54936004 366
38138fab 367 if (!len) {
e89f07d3 368 errno = EINVAL;
c8a706fe 369 goto fail;
e89f07d3 370 }
54936004 371
38138fab 372 /* Also check for overflows... */
54936004 373 len = TARGET_PAGE_ALIGN(len);
38138fab
AB
374 if (!len) {
375 errno = ENOMEM;
376 goto fail;
377 }
378
379 if (offset & ~TARGET_PAGE_MASK) {
380 errno = EINVAL;
381 goto fail;
382 }
383
53a5960a 384 real_start = start & qemu_host_page_mask;
a5e7ee46
RH
385 host_offset = offset & qemu_host_page_mask;
386
387 /* If the user is asking for the kernel to find a location, do that
388 before we truncate the length for mapping files below. */
389 if (!(flags & MAP_FIXED)) {
390 host_len = len + offset - host_offset;
391 host_len = HOST_PAGE_ALIGN(host_len);
30ab9ef2 392 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
a5e7ee46
RH
393 if (start == (abi_ulong)-1) {
394 errno = ENOMEM;
395 goto fail;
396 }
397 }
54936004 398
54c5a2ae
EI
399 /* When mapping files into a memory area larger than the file, accesses
400 to pages beyond the file size will cause a SIGBUS.
401
402 For example, if mmaping a file of 100 bytes on a host with 4K pages
403 emulating a target with 8K pages, the target expects to be able to
404 access the first 8K. But the host will trap us on any access beyond
405 4K.
406
407 When emulating a target with a larger page-size than the hosts, we
408 may need to truncate file maps at EOF and add extra anonymous pages
409 up to the targets page boundary. */
410
35f2fd04
MAL
411 if ((qemu_real_host_page_size < qemu_host_page_size) &&
412 !(flags & MAP_ANONYMOUS)) {
413 struct stat sb;
54c5a2ae
EI
414
415 if (fstat (fd, &sb) == -1)
416 goto fail;
417
418 /* Are we trying to create a map beyond EOF?. */
419 if (offset + len > sb.st_size) {
420 /* If so, truncate the file map at eof aligned with
421 the hosts real pagesize. Additional anonymous maps
422 will be created beyond EOF. */
0c2d70c4 423 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
54c5a2ae
EI
424 }
425 }
426
54936004 427 if (!(flags & MAP_FIXED)) {
a5e7ee46 428 unsigned long host_start;
a03e2d42 429 void *p;
a5e7ee46 430
a03e2d42
FB
431 host_len = len + offset - host_offset;
432 host_len = HOST_PAGE_ALIGN(host_len);
a5e7ee46 433
a03e2d42
FB
434 /* Note: we prefer to control the mapping address. It is
435 especially important if qemu_host_page_size >
436 qemu_real_host_page_size */
a5e7ee46
RH
437 p = mmap(g2h(start), host_len, prot,
438 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
a03e2d42 439 if (p == MAP_FAILED)
c8a706fe 440 goto fail;
a03e2d42
FB
441 /* update start so that it points to the file position at 'offset' */
442 host_start = (unsigned long)p;
54c5a2ae 443 if (!(flags & MAP_ANONYMOUS)) {
a5e7ee46 444 p = mmap(g2h(start), len, prot,
54c5a2ae 445 flags | MAP_FIXED, fd, host_offset);
8384274e
JB
446 if (p == MAP_FAILED) {
447 munmap(g2h(start), host_len);
448 goto fail;
449 }
a03e2d42 450 host_start += offset - host_offset;
54c5a2ae 451 }
a03e2d42
FB
452 start = h2g(host_start);
453 } else {
454 if (start & ~TARGET_PAGE_MASK) {
e89f07d3 455 errno = EINVAL;
c8a706fe 456 goto fail;
e89f07d3 457 }
a03e2d42
FB
458 end = start + len;
459 real_end = HOST_PAGE_ALIGN(end);
7ab240ad 460
7d37435b
PB
461 /*
462 * Test if requested memory area fits target address space
463 * It can fail only on 64-bit host with 32-bit target.
464 * On any other target/host host mmap() handles this error correctly.
465 */
ebf9a363
MF
466 if (!guest_range_valid(start, len)) {
467 errno = ENOMEM;
45bc1f52
AJ
468 goto fail;
469 }
470
a03e2d42
FB
471 /* worst case: we cannot map the file because the offset is not
472 aligned, so we read it */
473 if (!(flags & MAP_ANONYMOUS) &&
474 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
475 /* msync() won't work here, so we return an error if write is
476 possible while it is a shared mapping */
477 if ((flags & MAP_TYPE) == MAP_SHARED &&
478 (prot & PROT_WRITE)) {
479 errno = EINVAL;
c8a706fe 480 goto fail;
a03e2d42
FB
481 }
482 retaddr = target_mmap(start, len, prot | PROT_WRITE,
483 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
484 -1, 0);
485 if (retaddr == -1)
c8a706fe 486 goto fail;
fb7e378c
KS
487 if (pread(fd, g2h(start), len, offset) == -1)
488 goto fail;
a03e2d42
FB
489 if (!(prot & PROT_WRITE)) {
490 ret = target_mprotect(start, len, prot);
86abac06 491 assert(ret == 0);
a03e2d42
FB
492 }
493 goto the_end;
54936004 494 }
a03e2d42
FB
495
496 /* handle the start of the mapping */
497 if (start > real_start) {
498 if (real_end == real_start + qemu_host_page_size) {
499 /* one single host page */
500 ret = mmap_frag(real_start, start, end,
501 prot, flags, fd, offset);
502 if (ret == -1)
c8a706fe 503 goto fail;
a03e2d42
FB
504 goto the_end1;
505 }
506 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
54936004
FB
507 prot, flags, fd, offset);
508 if (ret == -1)
c8a706fe 509 goto fail;
a03e2d42
FB
510 real_start += qemu_host_page_size;
511 }
512 /* handle the end of the mapping */
513 if (end < real_end) {
514 ret = mmap_frag(real_end - qemu_host_page_size,
530c0032 515 real_end - qemu_host_page_size, end,
a03e2d42
FB
516 prot, flags, fd,
517 offset + real_end - qemu_host_page_size - start);
518 if (ret == -1)
c8a706fe 519 goto fail;
a03e2d42 520 real_end -= qemu_host_page_size;
54936004 521 }
3b46e624 522
a03e2d42
FB
523 /* map the middle (easier) */
524 if (real_start < real_end) {
525 void *p;
526 unsigned long offset1;
527 if (flags & MAP_ANONYMOUS)
528 offset1 = 0;
529 else
530 offset1 = offset + real_start - start;
531 p = mmap(g2h(real_start), real_end - real_start,
532 prot, flags, fd, offset1);
533 if (p == MAP_FAILED)
c8a706fe 534 goto fail;
a03e2d42 535 }
54936004
FB
536 }
537 the_end1:
538 page_set_flags(start, start + len, prot | PAGE_VALID);
539 the_end:
d0e165ae 540 trace_target_mmap_complete(start);
10d0d505
AB
541 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
542 log_page_dump(__func__);
543 }
35865339 544 tb_invalidate_phys_range(start, start + len);
c8a706fe 545 mmap_unlock();
54936004 546 return start;
c8a706fe
PB
547fail:
548 mmap_unlock();
549 return -1;
54936004
FB
550}
551
68a1c816
PB
552static void mmap_reserve(abi_ulong start, abi_ulong size)
553{
554 abi_ulong real_start;
555 abi_ulong real_end;
556 abi_ulong addr;
557 abi_ulong end;
558 int prot;
559
560 real_start = start & qemu_host_page_mask;
561 real_end = HOST_PAGE_ALIGN(start + size);
562 end = start + size;
563 if (start > real_start) {
564 /* handle host page containing start */
565 prot = 0;
566 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
567 prot |= page_get_flags(addr);
568 }
569 if (real_end == real_start + qemu_host_page_size) {
570 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
571 prot |= page_get_flags(addr);
572 }
573 end = real_end;
574 }
575 if (prot != 0)
576 real_start += qemu_host_page_size;
577 }
578 if (end < real_end) {
579 prot = 0;
580 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
581 prot |= page_get_flags(addr);
582 }
583 if (prot != 0)
584 real_end -= qemu_host_page_size;
585 }
586 if (real_start != real_end) {
587 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
588 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
589 -1, 0);
590 }
591}
592
992f48a0 593int target_munmap(abi_ulong start, abi_ulong len)
54936004 594{
992f48a0 595 abi_ulong end, real_start, real_end, addr;
54936004
FB
596 int prot, ret;
597
b7b18d26
AB
598 trace_target_munmap(start, len);
599
54936004 600 if (start & ~TARGET_PAGE_MASK)
78cf3390 601 return -TARGET_EINVAL;
54936004 602 len = TARGET_PAGE_ALIGN(len);
ebf9a363 603 if (len == 0 || !guest_range_valid(start, len)) {
78cf3390 604 return -TARGET_EINVAL;
ebf9a363
MF
605 }
606
c8a706fe 607 mmap_lock();
54936004 608 end = start + len;
53a5960a
PB
609 real_start = start & qemu_host_page_mask;
610 real_end = HOST_PAGE_ALIGN(end);
54936004 611
53a5960a 612 if (start > real_start) {
54936004
FB
613 /* handle host page containing start */
614 prot = 0;
53a5960a 615 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
616 prot |= page_get_flags(addr);
617 }
53a5960a
PB
618 if (real_end == real_start + qemu_host_page_size) {
619 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
620 prot |= page_get_flags(addr);
621 }
53a5960a 622 end = real_end;
d418c81e 623 }
54936004 624 if (prot != 0)
53a5960a 625 real_start += qemu_host_page_size;
54936004 626 }
53a5960a 627 if (end < real_end) {
54936004 628 prot = 0;
53a5960a 629 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
630 prot |= page_get_flags(addr);
631 }
632 if (prot != 0)
53a5960a 633 real_end -= qemu_host_page_size;
54936004 634 }
3b46e624 635
c8a706fe 636 ret = 0;
54936004 637 /* unmap what we can */
53a5960a 638 if (real_start < real_end) {
b76f21a7 639 if (reserved_va) {
68a1c816
PB
640 mmap_reserve(real_start, real_end - real_start);
641 } else {
642 ret = munmap(g2h(real_start), real_end - real_start);
643 }
54936004
FB
644 }
645
77a8f1a5 646 if (ret == 0) {
c8a706fe 647 page_set_flags(start, start + len, 0);
35865339 648 tb_invalidate_phys_range(start, start + len);
77a8f1a5 649 }
c8a706fe
PB
650 mmap_unlock();
651 return ret;
54936004
FB
652}
653
992f48a0
BS
654abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
655 abi_ulong new_size, unsigned long flags,
656 abi_ulong new_addr)
54936004
FB
657{
658 int prot;
f19412a2 659 void *host_addr;
54936004 660
ebf9a363
MF
661 if (!guest_range_valid(old_addr, old_size) ||
662 ((flags & MREMAP_FIXED) &&
663 !guest_range_valid(new_addr, new_size))) {
664 errno = ENOMEM;
665 return -1;
666 }
667
c8a706fe 668 mmap_lock();
f19412a2 669
68a1c816 670 if (flags & MREMAP_FIXED) {
52956a9b
FJ
671 host_addr = mremap(g2h(old_addr), old_size, new_size,
672 flags, g2h(new_addr));
68a1c816 673
b76f21a7 674 if (reserved_va && host_addr != MAP_FAILED) {
68a1c816
PB
675 /* If new and old addresses overlap then the above mremap will
676 already have failed with EINVAL. */
677 mmap_reserve(old_addr, old_size);
678 }
679 } else if (flags & MREMAP_MAYMOVE) {
f19412a2
AJ
680 abi_ulong mmap_start;
681
30ab9ef2 682 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
f19412a2
AJ
683
684 if (mmap_start == -1) {
685 errno = ENOMEM;
686 host_addr = MAP_FAILED;
68a1c816 687 } else {
52956a9b
FJ
688 host_addr = mremap(g2h(old_addr), old_size, new_size,
689 flags | MREMAP_FIXED, g2h(mmap_start));
b76f21a7 690 if (reserved_va) {
c65ffe6d 691 mmap_reserve(old_addr, old_size);
692 }
68a1c816 693 }
3af72a4d 694 } else {
68a1c816 695 int prot = 0;
b76f21a7 696 if (reserved_va && old_size < new_size) {
68a1c816
PB
697 abi_ulong addr;
698 for (addr = old_addr + old_size;
699 addr < old_addr + new_size;
700 addr++) {
701 prot |= page_get_flags(addr);
702 }
703 }
704 if (prot == 0) {
705 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
b76f21a7 706 if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
68a1c816
PB
707 mmap_reserve(old_addr + old_size, new_size - old_size);
708 }
709 } else {
710 errno = ENOMEM;
711 host_addr = MAP_FAILED;
712 }
f19412a2
AJ
713 /* Check if address fits target address space */
714 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
715 /* Revert mremap() changes */
716 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
717 errno = ENOMEM;
718 host_addr = MAP_FAILED;
719 }
720 }
721
722 if (host_addr == MAP_FAILED) {
c8a706fe
PB
723 new_addr = -1;
724 } else {
725 new_addr = h2g(host_addr);
726 prot = page_get_flags(old_addr);
727 page_set_flags(old_addr, old_addr + old_size, 0);
728 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
729 }
35865339 730 tb_invalidate_phys_range(new_addr, new_addr + new_size);
c8a706fe 731 mmap_unlock();
54936004
FB
732 return new_addr;
733}