]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/mmap.c
linux-user: Split loader-related prototypes into loader.h
[mirror_qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
d39594e9 19#include "qemu/osdep.h"
11d96056 20#include "trace.h"
10d0d505 21#include "exec/log.h"
54936004
FB
22#include "qemu.h"
23
1e6eec8b 24static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 25static __thread int mmap_lock_count;
c8a706fe
PB
26
27void mmap_lock(void)
28{
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
31 }
32}
33
34void mmap_unlock(void)
35{
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
38 }
39}
d5975363 40
301e40ed
AB
41bool have_mmap_lock(void)
42{
43 return mmap_lock_count > 0 ? true : false;
44}
45
d5975363
PB
46/* Grab lock to make sure things are in a consistent state after fork(). */
47void mmap_fork_start(void)
48{
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
52}
53
54void mmap_fork_end(int child)
55{
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
60}
c8a706fe 61
9dba3ca5
RH
62/*
63 * Validate target prot bitmask.
64 * Return the prot bitmask for the host in *HOST_PROT.
65 * Return 0 if the target prot bitmask is invalid, otherwise
66 * the internal qemu page_flags (which will include PAGE_VALID).
67 */
68static int validate_prot_to_pageflags(int *host_prot, int prot)
69{
70 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
72
73 /*
74 * For the host, we need not pass anything except read/write/exec.
75 * While PROT_SEM is allowed by all hosts, it is also ignored, so
76 * don't bother transforming guest bit to host bit. Any other
77 * target-specific prot bits will not be understood by the host
78 * and will need to be encoded into page_flags for qemu emulation.
4eaa960d
RH
79 *
80 * Pages that are executable by the guest will never be executed
81 * by the host, but the host will need to be able to read them.
9dba3ca5 82 */
4eaa960d
RH
83 *host_prot = (prot & (PROT_READ | PROT_WRITE))
84 | (prot & PROT_EXEC ? PROT_READ : 0);
9dba3ca5 85
be5d6f48 86#ifdef TARGET_AARCH64
d109b46d 87 {
be5d6f48 88 ARMCPU *cpu = ARM_CPU(thread_cpu);
d109b46d
RH
89
90 /*
91 * The PROT_BTI bit is only accepted if the cpu supports the feature.
92 * Since this is the unusual case, don't bother checking unless
93 * the bit has been requested. If set and valid, record the bit
94 * within QEMU's page_flags.
95 */
96 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
be5d6f48
RH
97 valid |= TARGET_PROT_BTI;
98 page_flags |= PAGE_BTI;
99 }
d109b46d
RH
100 /* Similarly for the PROT_MTE bit. */
101 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
102 valid |= TARGET_PROT_MTE;
103 page_flags |= PAGE_MTE;
104 }
be5d6f48
RH
105 }
106#endif
107
9dba3ca5
RH
108 return prot & ~valid ? 0 : page_flags;
109}
110
53a5960a 111/* NOTE: all the constants are the HOST ones, but addresses are target. */
9dba3ca5 112int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
54936004 113{
992f48a0 114 abi_ulong end, host_start, host_end, addr;
9dba3ca5 115 int prot1, ret, page_flags, host_prot;
54936004 116
9dba3ca5 117 trace_target_mprotect(start, len, target_prot);
54936004 118
9dba3ca5 119 if ((start & ~TARGET_PAGE_MASK) != 0) {
78cf3390 120 return -TARGET_EINVAL;
9dba3ca5
RH
121 }
122 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
123 if (!page_flags) {
124 return -TARGET_EINVAL;
125 }
54936004
FB
126 len = TARGET_PAGE_ALIGN(len);
127 end = start + len;
46b12f46 128 if (!guest_range_valid_untagged(start, len)) {
78cf3390 129 return -TARGET_ENOMEM;
ebf9a363 130 }
9dba3ca5 131 if (len == 0) {
54936004 132 return 0;
9dba3ca5 133 }
3b46e624 134
c8a706fe 135 mmap_lock();
83fb7adf 136 host_start = start & qemu_host_page_mask;
54936004
FB
137 host_end = HOST_PAGE_ALIGN(end);
138 if (start > host_start) {
139 /* handle host page containing start */
9dba3ca5
RH
140 prot1 = host_prot;
141 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
142 prot1 |= page_get_flags(addr);
143 }
83fb7adf 144 if (host_end == host_start + qemu_host_page_size) {
9dba3ca5 145 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
146 prot1 |= page_get_flags(addr);
147 }
148 end = host_end;
149 }
3e8f1628 150 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
9dba3ca5
RH
151 prot1 & PAGE_BITS);
152 if (ret != 0) {
c8a706fe 153 goto error;
9dba3ca5 154 }
83fb7adf 155 host_start += qemu_host_page_size;
54936004
FB
156 }
157 if (end < host_end) {
9dba3ca5
RH
158 prot1 = host_prot;
159 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
160 prot1 |= page_get_flags(addr);
161 }
3e8f1628 162 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
9dba3ca5
RH
163 qemu_host_page_size, prot1 & PAGE_BITS);
164 if (ret != 0) {
c8a706fe 165 goto error;
9dba3ca5 166 }
83fb7adf 167 host_end -= qemu_host_page_size;
54936004 168 }
3b46e624 169
54936004
FB
170 /* handle the pages in the middle */
171 if (host_start < host_end) {
3e8f1628
RH
172 ret = mprotect(g2h_untagged(host_start),
173 host_end - host_start, host_prot);
9dba3ca5 174 if (ret != 0) {
c8a706fe 175 goto error;
9dba3ca5 176 }
54936004 177 }
9dba3ca5 178 page_set_flags(start, start + len, page_flags);
c8a706fe 179 mmap_unlock();
54936004 180 return 0;
c8a706fe
PB
181error:
182 mmap_unlock();
183 return ret;
54936004
FB
184}
185
186/* map an incomplete host page */
992f48a0
BS
187static int mmap_frag(abi_ulong real_start,
188 abi_ulong start, abi_ulong end,
189 int prot, int flags, int fd, abi_ulong offset)
54936004 190{
80210bcd 191 abi_ulong real_end, addr;
53a5960a 192 void *host_start;
54936004
FB
193 int prot1, prot_new;
194
53a5960a 195 real_end = real_start + qemu_host_page_size;
3e8f1628 196 host_start = g2h_untagged(real_start);
54936004
FB
197
198 /* get the protection of the target pages outside the mapping */
199 prot1 = 0;
53a5960a 200 for(addr = real_start; addr < real_end; addr++) {
54936004
FB
201 if (addr < start || addr >= end)
202 prot1 |= page_get_flags(addr);
203 }
3b46e624 204
54936004
FB
205 if (prot1 == 0) {
206 /* no page was there, so we allocate one */
80210bcd
TS
207 void *p = mmap(host_start, qemu_host_page_size, prot,
208 flags | MAP_ANONYMOUS, -1, 0);
209 if (p == MAP_FAILED)
210 return -1;
53a5960a 211 prot1 = prot;
54936004
FB
212 }
213 prot1 &= PAGE_BITS;
214
215 prot_new = prot | prot1;
216 if (!(flags & MAP_ANONYMOUS)) {
217 /* msync() won't work here, so we return an error if write is
218 possible while it is a shared mapping */
219 if ((flags & MAP_TYPE) == MAP_SHARED &&
220 (prot & PROT_WRITE))
ee636500 221 return -1;
54936004
FB
222
223 /* adjust protection to be able to read */
224 if (!(prot1 & PROT_WRITE))
53a5960a 225 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
3b46e624 226
54936004 227 /* read the corresponding file data */
3e8f1628 228 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
fb7e378c 229 return -1;
3b46e624 230
54936004
FB
231 /* put final protection */
232 if (prot_new != (prot1 | PROT_WRITE))
53a5960a 233 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 234 } else {
54936004 235 if (prot_new != prot1) {
53a5960a 236 mprotect(host_start, qemu_host_page_size, prot_new);
54936004 237 }
e6deac9c 238 if (prot_new & PROT_WRITE) {
3e8f1628 239 memset(g2h_untagged(start), 0, end - start);
e6deac9c 240 }
54936004
FB
241 }
242 return 0;
243}
244
14f24e14 245#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
aab613fb
LY
246#ifdef TARGET_AARCH64
247# define TASK_UNMAPPED_BASE 0x5500000000
248#else
14f24e14 249# define TASK_UNMAPPED_BASE (1ul << 38)
aab613fb 250#endif
a03e2d42 251#else
14f24e14 252# define TASK_UNMAPPED_BASE 0x40000000
a03e2d42 253#endif
59e9d91c 254abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
a03e2d42 255
0776590d
PB
256unsigned long last_brk;
257
68a1c816
PB
258/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
259 of guest address space. */
30ab9ef2
RH
260static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
261 abi_ulong align)
68a1c816 262{
30ab9ef2 263 abi_ulong addr, end_addr, incr = qemu_host_page_size;
68a1c816 264 int prot;
30ab9ef2 265 bool looped = false;
68a1c816 266
b76f21a7 267 if (size > reserved_va) {
68a1c816
PB
268 return (abi_ulong)-1;
269 }
270
30ab9ef2
RH
271 /* Note that start and size have already been aligned by mmap_find_vma. */
272
59e9d91c 273 end_addr = start + size;
30ab9ef2
RH
274 if (start > reserved_va - size) {
275 /* Start at the top of the address space. */
276 end_addr = ((reserved_va - size) & -align) + size;
277 looped = true;
59e9d91c 278 }
59e9d91c 279
30ab9ef2
RH
280 /* Search downward from END_ADDR, checking to see if a page is in use. */
281 addr = end_addr;
59e9d91c 282 while (1) {
30ab9ef2 283 addr -= incr;
59e9d91c 284 if (addr > end_addr) {
68a1c816 285 if (looped) {
30ab9ef2 286 /* Failure. The entire address space has been searched. */
68a1c816
PB
287 return (abi_ulong)-1;
288 }
30ab9ef2
RH
289 /* Re-start at the top of the address space. */
290 addr = end_addr = ((reserved_va - size) & -align) + size;
291 looped = true;
292 } else {
293 prot = page_get_flags(addr);
294 if (prot) {
295 /* Page in use. Restart below this page. */
296 addr = end_addr = ((addr - size) & -align) + size;
297 } else if (addr && addr + size == end_addr) {
298 /* Success! All pages between ADDR and END_ADDR are free. */
299 if (start == mmap_next_start) {
300 mmap_next_start = addr;
301 }
302 return addr;
303 }
68a1c816
PB
304 }
305 }
68a1c816
PB
306}
307
fe3b4152
KS
308/*
309 * Find and reserve a free memory area of size 'size'. The search
310 * starts at 'start'.
311 * It must be called with mmap_lock() held.
312 * Return -1 if error.
313 */
30ab9ef2 314abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
a03e2d42 315{
14f24e14 316 void *ptr, *prev;
fe3b4152 317 abi_ulong addr;
14f24e14 318 int wrapped, repeat;
fe3b4152 319
443b7505
RH
320 align = MAX(align, qemu_host_page_size);
321
fe3b4152 322 /* If 'start' == 0, then a default start address is used. */
14f24e14 323 if (start == 0) {
fe3b4152 324 start = mmap_next_start;
14f24e14
RH
325 } else {
326 start &= qemu_host_page_mask;
327 }
30ab9ef2 328 start = ROUND_UP(start, align);
14f24e14
RH
329
330 size = HOST_PAGE_ALIGN(size);
fe3b4152 331
b76f21a7 332 if (reserved_va) {
30ab9ef2 333 return mmap_find_vma_reserved(start, size, align);
68a1c816
PB
334 }
335
a03e2d42 336 addr = start;
14f24e14
RH
337 wrapped = repeat = 0;
338 prev = 0;
fe3b4152 339
14f24e14 340 for (;; prev = ptr) {
fe3b4152
KS
341 /*
342 * Reserve needed memory area to avoid a race.
343 * It should be discarded using:
344 * - mmap() with MAP_FIXED flag
345 * - mremap() with MREMAP_FIXED flag
346 * - shmat() with SHM_REMAP flag
347 */
3e8f1628 348 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
fe3b4152
KS
349 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
350
351 /* ENOMEM, if host address space has no memory */
14f24e14 352 if (ptr == MAP_FAILED) {
fe3b4152 353 return (abi_ulong)-1;
14f24e14
RH
354 }
355
356 /* Count the number of sequential returns of the same address.
357 This is used to modify the search algorithm below. */
358 repeat = (ptr == prev ? repeat + 1 : 0);
359
360 if (h2g_valid(ptr + size - 1)) {
361 addr = h2g(ptr);
fe3b4152 362
30ab9ef2 363 if ((addr & (align - 1)) == 0) {
14f24e14
RH
364 /* Success. */
365 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
366 mmap_next_start = addr + size;
367 }
368 return addr;
369 }
fe3b4152 370
14f24e14
RH
371 /* The address is not properly aligned for the target. */
372 switch (repeat) {
373 case 0:
374 /* Assume the result that the kernel gave us is the
375 first with enough free space, so start again at the
376 next higher target page. */
30ab9ef2 377 addr = ROUND_UP(addr, align);
14f24e14
RH
378 break;
379 case 1:
380 /* Sometimes the kernel decides to perform the allocation
381 at the top end of memory instead. */
30ab9ef2 382 addr &= -align;
14f24e14
RH
383 break;
384 case 2:
385 /* Start over at low memory. */
386 addr = 0;
387 break;
388 default:
389 /* Fail. This unaligned block must the last. */
390 addr = -1;
391 break;
392 }
393 } else {
394 /* Since the result the kernel gave didn't fit, start
395 again at low memory. If any repetition, fail. */
396 addr = (repeat ? -1 : 0);
397 }
398
399 /* Unmap and try again. */
fe3b4152 400 munmap(ptr, size);
fe3b4152 401
14f24e14 402 /* ENOMEM if we checked the whole of the target address space. */
d0b3e4f5 403 if (addr == (abi_ulong)-1) {
a03e2d42 404 return (abi_ulong)-1;
14f24e14
RH
405 } else if (addr == 0) {
406 if (wrapped) {
407 return (abi_ulong)-1;
408 }
409 wrapped = 1;
410 /* Don't actually use 0 when wrapping, instead indicate
8186e783 411 that we'd truly like an allocation in low memory. */
14f24e14
RH
412 addr = (mmap_min_addr > TARGET_PAGE_SIZE
413 ? TARGET_PAGE_ALIGN(mmap_min_addr)
414 : TARGET_PAGE_SIZE);
415 } else if (wrapped && addr >= start) {
416 return (abi_ulong)-1;
417 }
a03e2d42 418 }
a03e2d42
FB
419}
420
54936004 421/* NOTE: all the constants are the HOST ones */
9dba3ca5 422abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
992f48a0 423 int flags, int fd, abi_ulong offset)
54936004 424{
992f48a0 425 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
9dba3ca5 426 int page_flags, host_prot;
54936004 427
c8a706fe 428 mmap_lock();
9dba3ca5 429 trace_target_mmap(start, len, target_prot, flags, fd, offset);
54936004 430
38138fab 431 if (!len) {
e89f07d3 432 errno = EINVAL;
c8a706fe 433 goto fail;
e89f07d3 434 }
54936004 435
9dba3ca5
RH
436 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
437 if (!page_flags) {
438 errno = EINVAL;
439 goto fail;
440 }
441
38138fab 442 /* Also check for overflows... */
54936004 443 len = TARGET_PAGE_ALIGN(len);
38138fab
AB
444 if (!len) {
445 errno = ENOMEM;
446 goto fail;
447 }
448
449 if (offset & ~TARGET_PAGE_MASK) {
450 errno = EINVAL;
451 goto fail;
452 }
453
228168cb
RH
454 /*
455 * If we're mapping shared memory, ensure we generate code for parallel
456 * execution and flush old translations. This will work up to the level
457 * supported by the host -- anything that requires EXCP_ATOMIC will not
458 * be atomic with respect to an external process.
459 */
460 if (flags & MAP_SHARED) {
461 CPUState *cpu = thread_cpu;
462 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
463 cpu->tcg_cflags |= CF_PARALLEL;
464 tb_flush(cpu);
465 }
466 }
467
53a5960a 468 real_start = start & qemu_host_page_mask;
a5e7ee46
RH
469 host_offset = offset & qemu_host_page_mask;
470
471 /* If the user is asking for the kernel to find a location, do that
472 before we truncate the length for mapping files below. */
473 if (!(flags & MAP_FIXED)) {
474 host_len = len + offset - host_offset;
475 host_len = HOST_PAGE_ALIGN(host_len);
30ab9ef2 476 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
a5e7ee46
RH
477 if (start == (abi_ulong)-1) {
478 errno = ENOMEM;
479 goto fail;
480 }
481 }
54936004 482
54c5a2ae
EI
483 /* When mapping files into a memory area larger than the file, accesses
484 to pages beyond the file size will cause a SIGBUS.
485
486 For example, if mmaping a file of 100 bytes on a host with 4K pages
487 emulating a target with 8K pages, the target expects to be able to
488 access the first 8K. But the host will trap us on any access beyond
489 4K.
490
491 When emulating a target with a larger page-size than the hosts, we
492 may need to truncate file maps at EOF and add extra anonymous pages
493 up to the targets page boundary. */
494
35f2fd04
MAL
495 if ((qemu_real_host_page_size < qemu_host_page_size) &&
496 !(flags & MAP_ANONYMOUS)) {
497 struct stat sb;
54c5a2ae
EI
498
499 if (fstat (fd, &sb) == -1)
500 goto fail;
501
502 /* Are we trying to create a map beyond EOF?. */
503 if (offset + len > sb.st_size) {
504 /* If so, truncate the file map at eof aligned with
505 the hosts real pagesize. Additional anonymous maps
506 will be created beyond EOF. */
0c2d70c4 507 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
54c5a2ae
EI
508 }
509 }
510
54936004 511 if (!(flags & MAP_FIXED)) {
a5e7ee46 512 unsigned long host_start;
a03e2d42 513 void *p;
a5e7ee46 514
a03e2d42
FB
515 host_len = len + offset - host_offset;
516 host_len = HOST_PAGE_ALIGN(host_len);
a5e7ee46 517
a03e2d42
FB
518 /* Note: we prefer to control the mapping address. It is
519 especially important if qemu_host_page_size >
520 qemu_real_host_page_size */
3e8f1628 521 p = mmap(g2h_untagged(start), host_len, host_prot,
a5e7ee46 522 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
9dba3ca5 523 if (p == MAP_FAILED) {
c8a706fe 524 goto fail;
9dba3ca5 525 }
a03e2d42
FB
526 /* update start so that it points to the file position at 'offset' */
527 host_start = (unsigned long)p;
54c5a2ae 528 if (!(flags & MAP_ANONYMOUS)) {
3e8f1628 529 p = mmap(g2h_untagged(start), len, host_prot,
54c5a2ae 530 flags | MAP_FIXED, fd, host_offset);
8384274e 531 if (p == MAP_FAILED) {
3e8f1628 532 munmap(g2h_untagged(start), host_len);
8384274e
JB
533 goto fail;
534 }
a03e2d42 535 host_start += offset - host_offset;
54c5a2ae 536 }
a03e2d42
FB
537 start = h2g(host_start);
538 } else {
539 if (start & ~TARGET_PAGE_MASK) {
e89f07d3 540 errno = EINVAL;
c8a706fe 541 goto fail;
e89f07d3 542 }
a03e2d42
FB
543 end = start + len;
544 real_end = HOST_PAGE_ALIGN(end);
7ab240ad 545
7d37435b
PB
546 /*
547 * Test if requested memory area fits target address space
548 * It can fail only on 64-bit host with 32-bit target.
549 * On any other target/host host mmap() handles this error correctly.
550 */
46b12f46 551 if (end < start || !guest_range_valid_untagged(start, len)) {
ebf9a363 552 errno = ENOMEM;
45bc1f52
AJ
553 goto fail;
554 }
555
a03e2d42
FB
556 /* worst case: we cannot map the file because the offset is not
557 aligned, so we read it */
558 if (!(flags & MAP_ANONYMOUS) &&
559 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
560 /* msync() won't work here, so we return an error if write is
561 possible while it is a shared mapping */
562 if ((flags & MAP_TYPE) == MAP_SHARED &&
9dba3ca5 563 (host_prot & PROT_WRITE)) {
a03e2d42 564 errno = EINVAL;
c8a706fe 565 goto fail;
a03e2d42 566 }
9dba3ca5 567 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
a03e2d42
FB
568 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
569 -1, 0);
570 if (retaddr == -1)
c8a706fe 571 goto fail;
3e8f1628 572 if (pread(fd, g2h_untagged(start), len, offset) == -1)
fb7e378c 573 goto fail;
9dba3ca5
RH
574 if (!(host_prot & PROT_WRITE)) {
575 ret = target_mprotect(start, len, target_prot);
86abac06 576 assert(ret == 0);
a03e2d42
FB
577 }
578 goto the_end;
54936004 579 }
a03e2d42
FB
580
581 /* handle the start of the mapping */
582 if (start > real_start) {
583 if (real_end == real_start + qemu_host_page_size) {
584 /* one single host page */
585 ret = mmap_frag(real_start, start, end,
9dba3ca5 586 host_prot, flags, fd, offset);
a03e2d42 587 if (ret == -1)
c8a706fe 588 goto fail;
a03e2d42
FB
589 goto the_end1;
590 }
591 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
9dba3ca5 592 host_prot, flags, fd, offset);
54936004 593 if (ret == -1)
c8a706fe 594 goto fail;
a03e2d42
FB
595 real_start += qemu_host_page_size;
596 }
597 /* handle the end of the mapping */
598 if (end < real_end) {
599 ret = mmap_frag(real_end - qemu_host_page_size,
530c0032 600 real_end - qemu_host_page_size, end,
9dba3ca5 601 host_prot, flags, fd,
a03e2d42
FB
602 offset + real_end - qemu_host_page_size - start);
603 if (ret == -1)
c8a706fe 604 goto fail;
a03e2d42 605 real_end -= qemu_host_page_size;
54936004 606 }
3b46e624 607
a03e2d42
FB
608 /* map the middle (easier) */
609 if (real_start < real_end) {
610 void *p;
611 unsigned long offset1;
612 if (flags & MAP_ANONYMOUS)
613 offset1 = 0;
614 else
615 offset1 = offset + real_start - start;
3e8f1628 616 p = mmap(g2h_untagged(real_start), real_end - real_start,
9dba3ca5 617 host_prot, flags, fd, offset1);
a03e2d42 618 if (p == MAP_FAILED)
c8a706fe 619 goto fail;
a03e2d42 620 }
54936004
FB
621 }
622 the_end1:
26bab757
RH
623 if (flags & MAP_ANONYMOUS) {
624 page_flags |= PAGE_ANON;
625 }
d9c58585 626 page_flags |= PAGE_RESET;
9dba3ca5 627 page_set_flags(start, start + len, page_flags);
54936004 628 the_end:
d0e165ae 629 trace_target_mmap_complete(start);
10d0d505
AB
630 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
631 log_page_dump(__func__);
632 }
35865339 633 tb_invalidate_phys_range(start, start + len);
c8a706fe 634 mmap_unlock();
54936004 635 return start;
c8a706fe
PB
636fail:
637 mmap_unlock();
638 return -1;
54936004
FB
639}
640
68a1c816
PB
641static void mmap_reserve(abi_ulong start, abi_ulong size)
642{
643 abi_ulong real_start;
644 abi_ulong real_end;
645 abi_ulong addr;
646 abi_ulong end;
647 int prot;
648
649 real_start = start & qemu_host_page_mask;
650 real_end = HOST_PAGE_ALIGN(start + size);
651 end = start + size;
652 if (start > real_start) {
653 /* handle host page containing start */
654 prot = 0;
655 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
656 prot |= page_get_flags(addr);
657 }
658 if (real_end == real_start + qemu_host_page_size) {
659 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
660 prot |= page_get_flags(addr);
661 }
662 end = real_end;
663 }
664 if (prot != 0)
665 real_start += qemu_host_page_size;
666 }
667 if (end < real_end) {
668 prot = 0;
669 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
670 prot |= page_get_flags(addr);
671 }
672 if (prot != 0)
673 real_end -= qemu_host_page_size;
674 }
675 if (real_start != real_end) {
3e8f1628 676 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
68a1c816
PB
677 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
678 -1, 0);
679 }
680}
681
992f48a0 682int target_munmap(abi_ulong start, abi_ulong len)
54936004 683{
992f48a0 684 abi_ulong end, real_start, real_end, addr;
54936004
FB
685 int prot, ret;
686
b7b18d26
AB
687 trace_target_munmap(start, len);
688
54936004 689 if (start & ~TARGET_PAGE_MASK)
78cf3390 690 return -TARGET_EINVAL;
54936004 691 len = TARGET_PAGE_ALIGN(len);
46b12f46 692 if (len == 0 || !guest_range_valid_untagged(start, len)) {
78cf3390 693 return -TARGET_EINVAL;
ebf9a363
MF
694 }
695
c8a706fe 696 mmap_lock();
54936004 697 end = start + len;
53a5960a
PB
698 real_start = start & qemu_host_page_mask;
699 real_end = HOST_PAGE_ALIGN(end);
54936004 700
53a5960a 701 if (start > real_start) {
54936004
FB
702 /* handle host page containing start */
703 prot = 0;
53a5960a 704 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
54936004
FB
705 prot |= page_get_flags(addr);
706 }
53a5960a
PB
707 if (real_end == real_start + qemu_host_page_size) {
708 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
d418c81e
FB
709 prot |= page_get_flags(addr);
710 }
53a5960a 711 end = real_end;
d418c81e 712 }
54936004 713 if (prot != 0)
53a5960a 714 real_start += qemu_host_page_size;
54936004 715 }
53a5960a 716 if (end < real_end) {
54936004 717 prot = 0;
53a5960a 718 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
54936004
FB
719 prot |= page_get_flags(addr);
720 }
721 if (prot != 0)
53a5960a 722 real_end -= qemu_host_page_size;
54936004 723 }
3b46e624 724
c8a706fe 725 ret = 0;
54936004 726 /* unmap what we can */
53a5960a 727 if (real_start < real_end) {
b76f21a7 728 if (reserved_va) {
68a1c816
PB
729 mmap_reserve(real_start, real_end - real_start);
730 } else {
3e8f1628 731 ret = munmap(g2h_untagged(real_start), real_end - real_start);
68a1c816 732 }
54936004
FB
733 }
734
77a8f1a5 735 if (ret == 0) {
c8a706fe 736 page_set_flags(start, start + len, 0);
35865339 737 tb_invalidate_phys_range(start, start + len);
77a8f1a5 738 }
c8a706fe
PB
739 mmap_unlock();
740 return ret;
54936004
FB
741}
742
992f48a0
BS
743abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
744 abi_ulong new_size, unsigned long flags,
745 abi_ulong new_addr)
54936004
FB
746{
747 int prot;
f19412a2 748 void *host_addr;
54936004 749
46b12f46 750 if (!guest_range_valid_untagged(old_addr, old_size) ||
ebf9a363 751 ((flags & MREMAP_FIXED) &&
46b12f46 752 !guest_range_valid_untagged(new_addr, new_size)) ||
ccc5ccc1 753 ((flags & MREMAP_MAYMOVE) == 0 &&
46b12f46 754 !guest_range_valid_untagged(old_addr, new_size))) {
ebf9a363
MF
755 errno = ENOMEM;
756 return -1;
757 }
758
c8a706fe 759 mmap_lock();
f19412a2 760
68a1c816 761 if (flags & MREMAP_FIXED) {
3e8f1628
RH
762 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
763 flags, g2h_untagged(new_addr));
68a1c816 764
b76f21a7 765 if (reserved_va && host_addr != MAP_FAILED) {
68a1c816
PB
766 /* If new and old addresses overlap then the above mremap will
767 already have failed with EINVAL. */
768 mmap_reserve(old_addr, old_size);
769 }
770 } else if (flags & MREMAP_MAYMOVE) {
f19412a2
AJ
771 abi_ulong mmap_start;
772
30ab9ef2 773 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
f19412a2
AJ
774
775 if (mmap_start == -1) {
776 errno = ENOMEM;
777 host_addr = MAP_FAILED;
68a1c816 778 } else {
3e8f1628
RH
779 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
780 flags | MREMAP_FIXED,
781 g2h_untagged(mmap_start));
b76f21a7 782 if (reserved_va) {
c65ffe6d 783 mmap_reserve(old_addr, old_size);
784 }
68a1c816 785 }
3af72a4d 786 } else {
68a1c816 787 int prot = 0;
b76f21a7 788 if (reserved_va && old_size < new_size) {
68a1c816
PB
789 abi_ulong addr;
790 for (addr = old_addr + old_size;
791 addr < old_addr + new_size;
792 addr++) {
793 prot |= page_get_flags(addr);
794 }
795 }
796 if (prot == 0) {
3e8f1628
RH
797 host_addr = mremap(g2h_untagged(old_addr),
798 old_size, new_size, flags);
56d19084
TK
799
800 if (host_addr != MAP_FAILED) {
801 /* Check if address fits target address space */
46b12f46 802 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
56d19084 803 /* Revert mremap() changes */
3e8f1628
RH
804 host_addr = mremap(g2h_untagged(old_addr),
805 new_size, old_size, flags);
56d19084
TK
806 errno = ENOMEM;
807 host_addr = MAP_FAILED;
808 } else if (reserved_va && old_size > new_size) {
809 mmap_reserve(old_addr + old_size, old_size - new_size);
810 }
68a1c816
PB
811 }
812 } else {
813 errno = ENOMEM;
814 host_addr = MAP_FAILED;
815 }
f19412a2
AJ
816 }
817
818 if (host_addr == MAP_FAILED) {
c8a706fe
PB
819 new_addr = -1;
820 } else {
821 new_addr = h2g(host_addr);
822 prot = page_get_flags(old_addr);
823 page_set_flags(old_addr, old_addr + old_size, 0);
d9c58585
RH
824 page_set_flags(new_addr, new_addr + new_size,
825 prot | PAGE_VALID | PAGE_RESET);
c8a706fe 826 }
35865339 827 tb_invalidate_phys_range(new_addr, new_addr + new_size);
c8a706fe 828 mmap_unlock();
54936004
FB
829 return new_addr;
830}