]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/mmap.c
meson: remove duplicate qxl sources
[mirror_qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
d39594e9 19#include "qemu/osdep.h"
11d96056 20#include "trace.h"
10d0d505 21#include "exec/log.h"
54936004
FB
22#include "qemu.h"
23
1e6eec8b 24static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 25static __thread int mmap_lock_count;
c8a706fe
PB
26
27void mmap_lock(void)
28{
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
31 }
32}
33
34void mmap_unlock(void)
35{
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
38 }
39}
d5975363 40
301e40ed
AB
41bool have_mmap_lock(void)
42{
43 return mmap_lock_count > 0 ? true : false;
44}
45
d5975363
PB
46/* Grab lock to make sure things are in a consistent state after fork(). */
47void mmap_fork_start(void)
48{
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
52}
53
54void mmap_fork_end(int child)
55{
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
60}
c8a706fe 61
9dba3ca5
RH
62/*
63 * Validate target prot bitmask.
64 * Return the prot bitmask for the host in *HOST_PROT.
65 * Return 0 if the target prot bitmask is invalid, otherwise
66 * the internal qemu page_flags (which will include PAGE_VALID).
67 */
68static int validate_prot_to_pageflags(int *host_prot, int prot)
69{
70 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
72
73 /*
74 * For the host, we need not pass anything except read/write/exec.
75 * While PROT_SEM is allowed by all hosts, it is also ignored, so
76 * don't bother transforming guest bit to host bit. Any other
77 * target-specific prot bits will not be understood by the host
78 * and will need to be encoded into page_flags for qemu emulation.
4eaa960d
RH
79 *
80 * Pages that are executable by the guest will never be executed
81 * by the host, but the host will need to be able to read them.
9dba3ca5 82 */
4eaa960d
RH
83 *host_prot = (prot & (PROT_READ | PROT_WRITE))
84 | (prot & PROT_EXEC ? PROT_READ : 0);
9dba3ca5
RH
85
86 return prot & ~valid ? 0 : page_flags;
87}
88
53a5960a 89/* NOTE: all the constants are the HOST ones, but addresses are target. */
9dba3ca5 90int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
54936004 91{
992f48a0 92 abi_ulong end, host_start, host_end, addr;
9dba3ca5 93 int prot1, ret, page_flags, host_prot;
54936004 94
9dba3ca5 95 trace_target_mprotect(start, len, target_prot);
54936004 96
9dba3ca5 97 if ((start & ~TARGET_PAGE_MASK) != 0) {
78cf3390 98 return -TARGET_EINVAL;
9dba3ca5
RH
99 }
100 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
101 if (!page_flags) {
102 return -TARGET_EINVAL;
103 }
54936004
FB
104 len = TARGET_PAGE_ALIGN(len);
105 end = start + len;
ebf9a363 106 if (!guest_range_valid(start, len)) {
78cf3390 107 return -TARGET_ENOMEM;
ebf9a363 108 }
9dba3ca5 109 if (len == 0) {
54936004 110 return 0;
9dba3ca5 111 }
3b46e624 112
c8a706fe 113 mmap_lock();
83fb7adf 114 host_start = start & qemu_host_page_mask;
54936004
FB
115 host_end = HOST_PAGE_ALIGN(end);
116 if (start > host_start) {
117 /* handle host page containing start */
9dba3ca5
RH
118 prot1 = host_prot;
119 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
120 prot1 |= page_get_flags(addr);
121 }
83fb7adf 122 if (host_end == host_start + qemu_host_page_size) {
9dba3ca5 123 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
124 prot1 |= page_get_flags(addr);
125 }
126 end = host_end;
127 }
9dba3ca5
RH
128 ret = mprotect(g2h(host_start), qemu_host_page_size,
129 prot1 & PAGE_BITS);
130 if (ret != 0) {
c8a706fe 131 goto error;
9dba3ca5 132 }
83fb7adf 133 host_start += qemu_host_page_size;
54936004
FB
134 }
135 if (end < host_end) {
9dba3ca5
RH
136 prot1 = host_prot;
137 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
138 prot1 |= page_get_flags(addr);
139 }
9dba3ca5
RH
140 ret = mprotect(g2h(host_end - qemu_host_page_size),
141 qemu_host_page_size, prot1 & PAGE_BITS);
142 if (ret != 0) {
c8a706fe 143 goto error;
9dba3ca5 144 }
83fb7adf 145 host_end -= qemu_host_page_size;
54936004 146 }
3b46e624 147
54936004
FB
148 /* handle the pages in the middle */
149 if (host_start < host_end) {
9dba3ca5
RH
150 ret = mprotect(g2h(host_start), host_end - host_start, host_prot);
151 if (ret != 0) {
c8a706fe 152 goto error;
9dba3ca5 153 }
54936004 154 }
9dba3ca5 155 page_set_flags(start, start + len, page_flags);
c8a706fe 156 mmap_unlock();
54936004 157 return 0;
c8a706fe
PB
158error:
159 mmap_unlock();
160 return ret;
54936004
FB
161}
162
163/* map an incomplete host page */
992f48a0
BS
164static int mmap_frag(abi_ulong real_start,
165 abi_ulong start, abi_ulong end,
166 int prot, int flags, int fd, abi_ulong offset)
54936004 167{
80210bcd 168 abi_ulong real_end, addr;
53a5960a 169 void *host_start;
54936004
FB
170 int prot1, prot_new;
171
53a5960a
PB
172 real_end = real_start + qemu_host_page_size;
173 host_start = g2h(real_start);
54936004
FB
174
175 /* get the protection of the target pages outside the mapping */
176 prot1 = 0;
53a5960a 177 for(addr = real_start; addr < real_end; addr++) {
54936004
FB
178 if (addr < start || addr >= end)
179 prot1 |= page_get_flags(addr);
180 }
3b46e624 181
54936004
FB
182 if (prot1 == 0) {
183 /* no page was there, so we allocate one */
80210bcd
TS
184 void *p = mmap(host_start, qemu_host_page_size, prot,
185 flags | MAP_ANONYMOUS, -1, 0);
186 if (p == MAP_FAILED)
187 return -1;
53a5960a 188 prot1 = prot;
54936004
FB
189 }
190 prot1 &= PAGE_BITS;
191
192 prot_new = prot | prot1;
193 if (!(flags & MAP_ANONYMOUS)) {
194 /* msync() won't work here, so we return an error if write is
195 possible while it is a shared mapping */
196 if ((flags & MAP_TYPE) == MAP_SHARED &&
197 (prot & PROT_WRITE))
ee636500 198 return -1;
54936004
FB
199
200 /* adjust protection to be able to read */
201 if (!(prot1 & PROT_WRITE))
53a5960a 202 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
3b46e624 203
54936004 204 /* read the corresponding file data */
fb7e378c
KS
205 if (pread(fd, g2h(start), end - start, offset) == -1)
206 return -1;
3b46e624 207
54936004
FB
208 /* put final protection */
209 if (prot_new != (prot1 | PROT_WRITE))
53a5960a 210 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 211 } else {
54936004 212 if (prot_new != prot1) {
53a5960a 213 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 214 }
e6deac9c
CG
215 if (prot_new & PROT_WRITE) {
216 memset(g2h(start), 0, end - start);
217 }
54936004
FB
218 }
219 return 0;
220}
221
14f24e14 222#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
aab613fb
LY
223#ifdef TARGET_AARCH64
224# define TASK_UNMAPPED_BASE 0x5500000000
225#else
14f24e14 226# define TASK_UNMAPPED_BASE (1ul << 38)
aab613fb 227#endif
a03e2d42 228#else
14f24e14 229# define TASK_UNMAPPED_BASE 0x40000000
a03e2d42 230#endif
59e9d91c 231abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
a03e2d42 232
0776590d
PB
233unsigned long last_brk;
234
68a1c816
PB
235/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
236 of guest address space. */
30ab9ef2
RH
237static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
238 abi_ulong align)
68a1c816 239{
30ab9ef2 240 abi_ulong addr, end_addr, incr = qemu_host_page_size;
68a1c816 241 int prot;
30ab9ef2 242 bool looped = false;
68a1c816 243
b76f21a7 244 if (size > reserved_va) {
68a1c816
PB
245 return (abi_ulong)-1;
246 }
247
30ab9ef2
RH
248 /* Note that start and size have already been aligned by mmap_find_vma. */
249
59e9d91c 250 end_addr = start + size;
30ab9ef2
RH
251 if (start > reserved_va - size) {
252 /* Start at the top of the address space. */
253 end_addr = ((reserved_va - size) & -align) + size;
254 looped = true;
59e9d91c 255 }
59e9d91c 256
30ab9ef2
RH
257 /* Search downward from END_ADDR, checking to see if a page is in use. */
258 addr = end_addr;
59e9d91c 259 while (1) {
30ab9ef2 260 addr -= incr;
59e9d91c 261 if (addr > end_addr) {
68a1c816 262 if (looped) {
30ab9ef2 263 /* Failure. The entire address space has been searched. */
68a1c816
PB
264 return (abi_ulong)-1;
265 }
30ab9ef2
RH
266 /* Re-start at the top of the address space. */
267 addr = end_addr = ((reserved_va - size) & -align) + size;
268 looped = true;
269 } else {
270 prot = page_get_flags(addr);
271 if (prot) {
272 /* Page in use. Restart below this page. */
273 addr = end_addr = ((addr - size) & -align) + size;
274 } else if (addr && addr + size == end_addr) {
275 /* Success! All pages between ADDR and END_ADDR are free. */
276 if (start == mmap_next_start) {
277 mmap_next_start = addr;
278 }
279 return addr;
280 }
68a1c816
PB
281 }
282 }
68a1c816
PB
283}
284
fe3b4152
KS
285/*
286 * Find and reserve a free memory area of size 'size'. The search
287 * starts at 'start'.
288 * It must be called with mmap_lock() held.
289 * Return -1 if error.
290 */
30ab9ef2 291abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
a03e2d42 292{
14f24e14 293 void *ptr, *prev;
fe3b4152 294 abi_ulong addr;
14f24e14 295 int wrapped, repeat;
fe3b4152 296
443b7505
RH
297 align = MAX(align, qemu_host_page_size);
298
fe3b4152 299 /* If 'start' == 0, then a default start address is used. */
14f24e14 300 if (start == 0) {
fe3b4152 301 start = mmap_next_start;
14f24e14
RH
302 } else {
303 start &= qemu_host_page_mask;
304 }
30ab9ef2 305 start = ROUND_UP(start, align);
14f24e14
RH
306
307 size = HOST_PAGE_ALIGN(size);
fe3b4152 308
b76f21a7 309 if (reserved_va) {
30ab9ef2 310 return mmap_find_vma_reserved(start, size, align);
68a1c816
PB
311 }
312
a03e2d42 313 addr = start;
14f24e14
RH
314 wrapped = repeat = 0;
315 prev = 0;
fe3b4152 316
14f24e14 317 for (;; prev = ptr) {
fe3b4152
KS
318 /*
319 * Reserve needed memory area to avoid a race.
320 * It should be discarded using:
321 * - mmap() with MAP_FIXED flag
322 * - mremap() with MREMAP_FIXED flag
323 * - shmat() with SHM_REMAP flag
324 */
14f24e14 325 ptr = mmap(g2h(addr), size, PROT_NONE,
fe3b4152
KS
326 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
327
328 /* ENOMEM, if host address space has no memory */
14f24e14 329 if (ptr == MAP_FAILED) {
fe3b4152 330 return (abi_ulong)-1;
14f24e14
RH
331 }
332
333 /* Count the number of sequential returns of the same address.
334 This is used to modify the search algorithm below. */
335 repeat = (ptr == prev ? repeat + 1 : 0);
336
337 if (h2g_valid(ptr + size - 1)) {
338 addr = h2g(ptr);
fe3b4152 339
30ab9ef2 340 if ((addr & (align - 1)) == 0) {
14f24e14
RH
341 /* Success. */
342 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
343 mmap_next_start = addr + size;
344 }
345 return addr;
346 }
fe3b4152 347
14f24e14
RH
348 /* The address is not properly aligned for the target. */
349 switch (repeat) {
350 case 0:
351 /* Assume the result that the kernel gave us is the
352 first with enough free space, so start again at the
353 next higher target page. */
30ab9ef2 354 addr = ROUND_UP(addr, align);
14f24e14
RH
355 break;
356 case 1:
357 /* Sometimes the kernel decides to perform the allocation
358 at the top end of memory instead. */
30ab9ef2 359 addr &= -align;
14f24e14
RH
360 break;
361 case 2:
362 /* Start over at low memory. */
363 addr = 0;
364 break;
365 default:
366 /* Fail. This unaligned block must the last. */
367 addr = -1;
368 break;
369 }
370 } else {
371 /* Since the result the kernel gave didn't fit, start
372 again at low memory. If any repetition, fail. */
373 addr = (repeat ? -1 : 0);
374 }
375
376 /* Unmap and try again. */
fe3b4152 377 munmap(ptr, size);
fe3b4152 378
14f24e14 379 /* ENOMEM if we checked the whole of the target address space. */
d0b3e4f5 380 if (addr == (abi_ulong)-1) {
a03e2d42 381 return (abi_ulong)-1;
14f24e14
RH
382 } else if (addr == 0) {
383 if (wrapped) {
384 return (abi_ulong)-1;
385 }
386 wrapped = 1;
387 /* Don't actually use 0 when wrapping, instead indicate
8186e783 388 that we'd truly like an allocation in low memory. */
14f24e14
RH
389 addr = (mmap_min_addr > TARGET_PAGE_SIZE
390 ? TARGET_PAGE_ALIGN(mmap_min_addr)
391 : TARGET_PAGE_SIZE);
392 } else if (wrapped && addr >= start) {
393 return (abi_ulong)-1;
394 }
a03e2d42 395 }
a03e2d42
FB
396}
397
54936004 398/* NOTE: all the constants are the HOST ones */
9dba3ca5 399abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
992f48a0 400 int flags, int fd, abi_ulong offset)
54936004 401{
992f48a0 402 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
9dba3ca5 403 int page_flags, host_prot;
54936004 404
c8a706fe 405 mmap_lock();
9dba3ca5 406 trace_target_mmap(start, len, target_prot, flags, fd, offset);
54936004 407
38138fab 408 if (!len) {
e89f07d3 409 errno = EINVAL;
c8a706fe 410 goto fail;
e89f07d3 411 }
54936004 412
9dba3ca5
RH
413 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
414 if (!page_flags) {
415 errno = EINVAL;
416 goto fail;
417 }
418
38138fab 419 /* Also check for overflows... */
54936004 420 len = TARGET_PAGE_ALIGN(len);
38138fab
AB
421 if (!len) {
422 errno = ENOMEM;
423 goto fail;
424 }
425
426 if (offset & ~TARGET_PAGE_MASK) {
427 errno = EINVAL;
428 goto fail;
429 }
430
53a5960a 431 real_start = start & qemu_host_page_mask;
a5e7ee46
RH
432 host_offset = offset & qemu_host_page_mask;
433
434 /* If the user is asking for the kernel to find a location, do that
435 before we truncate the length for mapping files below. */
436 if (!(flags & MAP_FIXED)) {
437 host_len = len + offset - host_offset;
438 host_len = HOST_PAGE_ALIGN(host_len);
30ab9ef2 439 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
a5e7ee46
RH
440 if (start == (abi_ulong)-1) {
441 errno = ENOMEM;
442 goto fail;
443 }
444 }
54936004 445
54c5a2ae
EI
446 /* When mapping files into a memory area larger than the file, accesses
447 to pages beyond the file size will cause a SIGBUS.
448
449 For example, if mmaping a file of 100 bytes on a host with 4K pages
450 emulating a target with 8K pages, the target expects to be able to
451 access the first 8K. But the host will trap us on any access beyond
452 4K.
453
454 When emulating a target with a larger page-size than the hosts, we
455 may need to truncate file maps at EOF and add extra anonymous pages
456 up to the targets page boundary. */
457
35f2fd04
MAL
458 if ((qemu_real_host_page_size < qemu_host_page_size) &&
459 !(flags & MAP_ANONYMOUS)) {
460 struct stat sb;
54c5a2ae
EI
461
462 if (fstat (fd, &sb) == -1)
463 goto fail;
464
465 /* Are we trying to create a map beyond EOF?. */
466 if (offset + len > sb.st_size) {
467 /* If so, truncate the file map at eof aligned with
468 the hosts real pagesize. Additional anonymous maps
469 will be created beyond EOF. */
0c2d70c4 470 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
54c5a2ae
EI
471 }
472 }
473
54936004 474 if (!(flags & MAP_FIXED)) {
a5e7ee46 475 unsigned long host_start;
a03e2d42 476 void *p;
a5e7ee46 477
a03e2d42
FB
478 host_len = len + offset - host_offset;
479 host_len = HOST_PAGE_ALIGN(host_len);
a5e7ee46 480
a03e2d42
FB
481 /* Note: we prefer to control the mapping address. It is
482 especially important if qemu_host_page_size >
483 qemu_real_host_page_size */
9dba3ca5 484 p = mmap(g2h(start), host_len, host_prot,
a5e7ee46 485 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
9dba3ca5 486 if (p == MAP_FAILED) {
c8a706fe 487 goto fail;
9dba3ca5 488 }
a03e2d42
FB
489 /* update start so that it points to the file position at 'offset' */
490 host_start = (unsigned long)p;
54c5a2ae 491 if (!(flags & MAP_ANONYMOUS)) {
9dba3ca5 492 p = mmap(g2h(start), len, host_prot,
54c5a2ae 493 flags | MAP_FIXED, fd, host_offset);
8384274e
JB
494 if (p == MAP_FAILED) {
495 munmap(g2h(start), host_len);
496 goto fail;
497 }
a03e2d42 498 host_start += offset - host_offset;
54c5a2ae 499 }
a03e2d42
FB
500 start = h2g(host_start);
501 } else {
502 if (start & ~TARGET_PAGE_MASK) {
e89f07d3 503 errno = EINVAL;
c8a706fe 504 goto fail;
e89f07d3 505 }
a03e2d42
FB
506 end = start + len;
507 real_end = HOST_PAGE_ALIGN(end);
7ab240ad 508
7d37435b
PB
509 /*
510 * Test if requested memory area fits target address space
511 * It can fail only on 64-bit host with 32-bit target.
512 * On any other target/host host mmap() handles this error correctly.
513 */
8ef61885 514 if (end < start || !guest_range_valid(start, len)) {
ebf9a363 515 errno = ENOMEM;
45bc1f52
AJ
516 goto fail;
517 }
518
a03e2d42
FB
519 /* worst case: we cannot map the file because the offset is not
520 aligned, so we read it */
521 if (!(flags & MAP_ANONYMOUS) &&
522 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
523 /* msync() won't work here, so we return an error if write is
524 possible while it is a shared mapping */
525 if ((flags & MAP_TYPE) == MAP_SHARED &&
9dba3ca5 526 (host_prot & PROT_WRITE)) {
a03e2d42 527 errno = EINVAL;
c8a706fe 528 goto fail;
a03e2d42 529 }
9dba3ca5 530 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
a03e2d42
FB
531 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
532 -1, 0);
533 if (retaddr == -1)
c8a706fe 534 goto fail;
fb7e378c
KS
535 if (pread(fd, g2h(start), len, offset) == -1)
536 goto fail;
9dba3ca5
RH
537 if (!(host_prot & PROT_WRITE)) {
538 ret = target_mprotect(start, len, target_prot);
86abac06 539 assert(ret == 0);
a03e2d42
FB
540 }
541 goto the_end;
54936004 542 }
a03e2d42
FB
543
544 /* handle the start of the mapping */
545 if (start > real_start) {
546 if (real_end == real_start + qemu_host_page_size) {
547 /* one single host page */
548 ret = mmap_frag(real_start, start, end,
9dba3ca5 549 host_prot, flags, fd, offset);
a03e2d42 550 if (ret == -1)
c8a706fe 551 goto fail;
a03e2d42
FB
552 goto the_end1;
553 }
554 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
9dba3ca5 555 host_prot, flags, fd, offset);
54936004 556 if (ret == -1)
c8a706fe 557 goto fail;
a03e2d42
FB
558 real_start += qemu_host_page_size;
559 }
560 /* handle the end of the mapping */
561 if (end < real_end) {
562 ret = mmap_frag(real_end - qemu_host_page_size,
530c0032 563 real_end - qemu_host_page_size, end,
9dba3ca5 564 host_prot, flags, fd,
a03e2d42
FB
565 offset + real_end - qemu_host_page_size - start);
566 if (ret == -1)
c8a706fe 567 goto fail;
a03e2d42 568 real_end -= qemu_host_page_size;
54936004 569 }
3b46e624 570
a03e2d42
FB
571 /* map the middle (easier) */
572 if (real_start < real_end) {
573 void *p;
574 unsigned long offset1;
575 if (flags & MAP_ANONYMOUS)
576 offset1 = 0;
577 else
578 offset1 = offset + real_start - start;
579 p = mmap(g2h(real_start), real_end - real_start,
9dba3ca5 580 host_prot, flags, fd, offset1);
a03e2d42 581 if (p == MAP_FAILED)
c8a706fe 582 goto fail;
a03e2d42 583 }
54936004
FB
584 }
585 the_end1:
9dba3ca5 586 page_set_flags(start, start + len, page_flags);
54936004 587 the_end:
d0e165ae 588 trace_target_mmap_complete(start);
10d0d505
AB
589 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
590 log_page_dump(__func__);
591 }
35865339 592 tb_invalidate_phys_range(start, start + len);
c8a706fe 593 mmap_unlock();
54936004 594 return start;
c8a706fe
PB
595fail:
596 mmap_unlock();
597 return -1;
54936004
FB
598}
599
68a1c816
PB
600static void mmap_reserve(abi_ulong start, abi_ulong size)
601{
602 abi_ulong real_start;
603 abi_ulong real_end;
604 abi_ulong addr;
605 abi_ulong end;
606 int prot;
607
608 real_start = start & qemu_host_page_mask;
609 real_end = HOST_PAGE_ALIGN(start + size);
610 end = start + size;
611 if (start > real_start) {
612 /* handle host page containing start */
613 prot = 0;
614 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
615 prot |= page_get_flags(addr);
616 }
617 if (real_end == real_start + qemu_host_page_size) {
618 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
619 prot |= page_get_flags(addr);
620 }
621 end = real_end;
622 }
623 if (prot != 0)
624 real_start += qemu_host_page_size;
625 }
626 if (end < real_end) {
627 prot = 0;
628 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
629 prot |= page_get_flags(addr);
630 }
631 if (prot != 0)
632 real_end -= qemu_host_page_size;
633 }
634 if (real_start != real_end) {
635 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
636 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
637 -1, 0);
638 }
639}
640
992f48a0 641int target_munmap(abi_ulong start, abi_ulong len)
54936004 642{
992f48a0 643 abi_ulong end, real_start, real_end, addr;
54936004
FB
644 int prot, ret;
645
b7b18d26
AB
646 trace_target_munmap(start, len);
647
54936004 648 if (start & ~TARGET_PAGE_MASK)
78cf3390 649 return -TARGET_EINVAL;
54936004 650 len = TARGET_PAGE_ALIGN(len);
ebf9a363 651 if (len == 0 || !guest_range_valid(start, len)) {
78cf3390 652 return -TARGET_EINVAL;
ebf9a363
MF
653 }
654
c8a706fe 655 mmap_lock();
54936004 656 end = start + len;
53a5960a
PB
657 real_start = start & qemu_host_page_mask;
658 real_end = HOST_PAGE_ALIGN(end);
54936004 659
53a5960a 660 if (start > real_start) {
54936004
FB
661 /* handle host page containing start */
662 prot = 0;
53a5960a 663 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
664 prot |= page_get_flags(addr);
665 }
53a5960a
PB
666 if (real_end == real_start + qemu_host_page_size) {
667 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
668 prot |= page_get_flags(addr);
669 }
53a5960a 670 end = real_end;
d418c81e 671 }
54936004 672 if (prot != 0)
53a5960a 673 real_start += qemu_host_page_size;
54936004 674 }
53a5960a 675 if (end < real_end) {
54936004 676 prot = 0;
53a5960a 677 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
678 prot |= page_get_flags(addr);
679 }
680 if (prot != 0)
53a5960a 681 real_end -= qemu_host_page_size;
54936004 682 }
3b46e624 683
c8a706fe 684 ret = 0;
54936004 685 /* unmap what we can */
53a5960a 686 if (real_start < real_end) {
b76f21a7 687 if (reserved_va) {
68a1c816
PB
688 mmap_reserve(real_start, real_end - real_start);
689 } else {
690 ret = munmap(g2h(real_start), real_end - real_start);
691 }
54936004
FB
692 }
693
77a8f1a5 694 if (ret == 0) {
c8a706fe 695 page_set_flags(start, start + len, 0);
35865339 696 tb_invalidate_phys_range(start, start + len);
77a8f1a5 697 }
c8a706fe
PB
698 mmap_unlock();
699 return ret;
54936004
FB
700}
701
992f48a0
BS
702abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
703 abi_ulong new_size, unsigned long flags,
704 abi_ulong new_addr)
54936004
FB
705{
706 int prot;
f19412a2 707 void *host_addr;
54936004 708
ebf9a363
MF
709 if (!guest_range_valid(old_addr, old_size) ||
710 ((flags & MREMAP_FIXED) &&
711 !guest_range_valid(new_addr, new_size))) {
712 errno = ENOMEM;
713 return -1;
714 }
715
c8a706fe 716 mmap_lock();
f19412a2 717
68a1c816 718 if (flags & MREMAP_FIXED) {
52956a9b
FJ
719 host_addr = mremap(g2h(old_addr), old_size, new_size,
720 flags, g2h(new_addr));
68a1c816 721
b76f21a7 722 if (reserved_va && host_addr != MAP_FAILED) {
68a1c816
PB
723 /* If new and old addresses overlap then the above mremap will
724 already have failed with EINVAL. */
725 mmap_reserve(old_addr, old_size);
726 }
727 } else if (flags & MREMAP_MAYMOVE) {
f19412a2
AJ
728 abi_ulong mmap_start;
729
30ab9ef2 730 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
f19412a2
AJ
731
732 if (mmap_start == -1) {
733 errno = ENOMEM;
734 host_addr = MAP_FAILED;
68a1c816 735 } else {
52956a9b
FJ
736 host_addr = mremap(g2h(old_addr), old_size, new_size,
737 flags | MREMAP_FIXED, g2h(mmap_start));
b76f21a7 738 if (reserved_va) {
c65ffe6d 739 mmap_reserve(old_addr, old_size);
740 }
68a1c816 741 }
3af72a4d 742 } else {
68a1c816 743 int prot = 0;
b76f21a7 744 if (reserved_va && old_size < new_size) {
68a1c816
PB
745 abi_ulong addr;
746 for (addr = old_addr + old_size;
747 addr < old_addr + new_size;
748 addr++) {
749 prot |= page_get_flags(addr);
750 }
751 }
752 if (prot == 0) {
753 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
b76f21a7 754 if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
257a7e21 755 mmap_reserve(old_addr + old_size, old_size - new_size);
68a1c816
PB
756 }
757 } else {
758 errno = ENOMEM;
759 host_addr = MAP_FAILED;
760 }
f19412a2
AJ
761 /* Check if address fits target address space */
762 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
763 /* Revert mremap() changes */
764 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
765 errno = ENOMEM;
766 host_addr = MAP_FAILED;
767 }
768 }
769
770 if (host_addr == MAP_FAILED) {
c8a706fe
PB
771 new_addr = -1;
772 } else {
773 new_addr = h2g(host_addr);
774 prot = page_get_flags(old_addr);
775 page_set_flags(old_addr, old_addr + old_size, 0);
776 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
777 }
35865339 778 tb_invalidate_phys_range(new_addr, new_addr + new_size);
c8a706fe 779 mmap_unlock();
54936004
FB
780 return new_addr;
781}