]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/mmap.c
4b182444bbdc691780dcbbbc74c9580f3c9d47b9
[mirror_qemu.git] / linux-user / mmap.c
1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "exec/log.h"
22 #include "qemu.h"
23 #include "user-mmap.h"
24
25 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
26 static __thread int mmap_lock_count;
27
28 void mmap_lock(void)
29 {
30 if (mmap_lock_count++ == 0) {
31 pthread_mutex_lock(&mmap_mutex);
32 }
33 }
34
35 void mmap_unlock(void)
36 {
37 if (--mmap_lock_count == 0) {
38 pthread_mutex_unlock(&mmap_mutex);
39 }
40 }
41
42 bool have_mmap_lock(void)
43 {
44 return mmap_lock_count > 0 ? true : false;
45 }
46
47 /* Grab lock to make sure things are in a consistent state after fork(). */
48 void mmap_fork_start(void)
49 {
50 if (mmap_lock_count)
51 abort();
52 pthread_mutex_lock(&mmap_mutex);
53 }
54
55 void mmap_fork_end(int child)
56 {
57 if (child)
58 pthread_mutex_init(&mmap_mutex, NULL);
59 else
60 pthread_mutex_unlock(&mmap_mutex);
61 }
62
63 /*
64 * Validate target prot bitmask.
65 * Return the prot bitmask for the host in *HOST_PROT.
66 * Return 0 if the target prot bitmask is invalid, otherwise
67 * the internal qemu page_flags (which will include PAGE_VALID).
68 */
69 static int validate_prot_to_pageflags(int *host_prot, int prot)
70 {
71 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
72 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
73
74 /*
75 * For the host, we need not pass anything except read/write/exec.
76 * While PROT_SEM is allowed by all hosts, it is also ignored, so
77 * don't bother transforming guest bit to host bit. Any other
78 * target-specific prot bits will not be understood by the host
79 * and will need to be encoded into page_flags for qemu emulation.
80 *
81 * Pages that are executable by the guest will never be executed
82 * by the host, but the host will need to be able to read them.
83 */
84 *host_prot = (prot & (PROT_READ | PROT_WRITE))
85 | (prot & PROT_EXEC ? PROT_READ : 0);
86
87 #ifdef TARGET_AARCH64
88 {
89 ARMCPU *cpu = ARM_CPU(thread_cpu);
90
91 /*
92 * The PROT_BTI bit is only accepted if the cpu supports the feature.
93 * Since this is the unusual case, don't bother checking unless
94 * the bit has been requested. If set and valid, record the bit
95 * within QEMU's page_flags.
96 */
97 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
98 valid |= TARGET_PROT_BTI;
99 page_flags |= PAGE_BTI;
100 }
101 /* Similarly for the PROT_MTE bit. */
102 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
103 valid |= TARGET_PROT_MTE;
104 page_flags |= PAGE_MTE;
105 }
106 }
107 #endif
108
109 return prot & ~valid ? 0 : page_flags;
110 }
111
112 /* NOTE: all the constants are the HOST ones, but addresses are target. */
113 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
114 {
115 abi_ulong end, host_start, host_end, addr;
116 int prot1, ret, page_flags, host_prot;
117
118 trace_target_mprotect(start, len, target_prot);
119
120 if ((start & ~TARGET_PAGE_MASK) != 0) {
121 return -TARGET_EINVAL;
122 }
123 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
124 if (!page_flags) {
125 return -TARGET_EINVAL;
126 }
127 len = TARGET_PAGE_ALIGN(len);
128 end = start + len;
129 if (!guest_range_valid_untagged(start, len)) {
130 return -TARGET_ENOMEM;
131 }
132 if (len == 0) {
133 return 0;
134 }
135
136 mmap_lock();
137 host_start = start & qemu_host_page_mask;
138 host_end = HOST_PAGE_ALIGN(end);
139 if (start > host_start) {
140 /* handle host page containing start */
141 prot1 = host_prot;
142 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
143 prot1 |= page_get_flags(addr);
144 }
145 if (host_end == host_start + qemu_host_page_size) {
146 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
147 prot1 |= page_get_flags(addr);
148 }
149 end = host_end;
150 }
151 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
152 prot1 & PAGE_BITS);
153 if (ret != 0) {
154 goto error;
155 }
156 host_start += qemu_host_page_size;
157 }
158 if (end < host_end) {
159 prot1 = host_prot;
160 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
161 prot1 |= page_get_flags(addr);
162 }
163 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
164 qemu_host_page_size, prot1 & PAGE_BITS);
165 if (ret != 0) {
166 goto error;
167 }
168 host_end -= qemu_host_page_size;
169 }
170
171 /* handle the pages in the middle */
172 if (host_start < host_end) {
173 ret = mprotect(g2h_untagged(host_start),
174 host_end - host_start, host_prot);
175 if (ret != 0) {
176 goto error;
177 }
178 }
179 page_set_flags(start, start + len, page_flags);
180 mmap_unlock();
181 return 0;
182 error:
183 mmap_unlock();
184 return ret;
185 }
186
187 /* map an incomplete host page */
188 static int mmap_frag(abi_ulong real_start,
189 abi_ulong start, abi_ulong end,
190 int prot, int flags, int fd, abi_ulong offset)
191 {
192 abi_ulong real_end, addr;
193 void *host_start;
194 int prot1, prot_new;
195
196 real_end = real_start + qemu_host_page_size;
197 host_start = g2h_untagged(real_start);
198
199 /* get the protection of the target pages outside the mapping */
200 prot1 = 0;
201 for(addr = real_start; addr < real_end; addr++) {
202 if (addr < start || addr >= end)
203 prot1 |= page_get_flags(addr);
204 }
205
206 if (prot1 == 0) {
207 /* no page was there, so we allocate one */
208 void *p = mmap(host_start, qemu_host_page_size, prot,
209 flags | MAP_ANONYMOUS, -1, 0);
210 if (p == MAP_FAILED)
211 return -1;
212 prot1 = prot;
213 }
214 prot1 &= PAGE_BITS;
215
216 prot_new = prot | prot1;
217 if (!(flags & MAP_ANONYMOUS)) {
218 /* msync() won't work here, so we return an error if write is
219 possible while it is a shared mapping */
220 if ((flags & MAP_TYPE) == MAP_SHARED &&
221 (prot & PROT_WRITE))
222 return -1;
223
224 /* adjust protection to be able to read */
225 if (!(prot1 & PROT_WRITE))
226 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
227
228 /* read the corresponding file data */
229 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
230 return -1;
231
232 /* put final protection */
233 if (prot_new != (prot1 | PROT_WRITE))
234 mprotect(host_start, qemu_host_page_size, prot_new);
235 } else {
236 if (prot_new != prot1) {
237 mprotect(host_start, qemu_host_page_size, prot_new);
238 }
239 if (prot_new & PROT_WRITE) {
240 memset(g2h_untagged(start), 0, end - start);
241 }
242 }
243 return 0;
244 }
245
246 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
247 #ifdef TARGET_AARCH64
248 # define TASK_UNMAPPED_BASE 0x5500000000
249 #else
250 # define TASK_UNMAPPED_BASE (1ul << 38)
251 #endif
252 #else
253 # define TASK_UNMAPPED_BASE 0x40000000
254 #endif
255 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
256
257 unsigned long last_brk;
258
259 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
260 of guest address space. */
261 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
262 abi_ulong align)
263 {
264 abi_ulong addr, end_addr, incr = qemu_host_page_size;
265 int prot;
266 bool looped = false;
267
268 if (size > reserved_va) {
269 return (abi_ulong)-1;
270 }
271
272 /* Note that start and size have already been aligned by mmap_find_vma. */
273
274 end_addr = start + size;
275 if (start > reserved_va - size) {
276 /* Start at the top of the address space. */
277 end_addr = ((reserved_va - size) & -align) + size;
278 looped = true;
279 }
280
281 /* Search downward from END_ADDR, checking to see if a page is in use. */
282 addr = end_addr;
283 while (1) {
284 addr -= incr;
285 if (addr > end_addr) {
286 if (looped) {
287 /* Failure. The entire address space has been searched. */
288 return (abi_ulong)-1;
289 }
290 /* Re-start at the top of the address space. */
291 addr = end_addr = ((reserved_va - size) & -align) + size;
292 looped = true;
293 } else {
294 prot = page_get_flags(addr);
295 if (prot) {
296 /* Page in use. Restart below this page. */
297 addr = end_addr = ((addr - size) & -align) + size;
298 } else if (addr && addr + size == end_addr) {
299 /* Success! All pages between ADDR and END_ADDR are free. */
300 if (start == mmap_next_start) {
301 mmap_next_start = addr;
302 }
303 return addr;
304 }
305 }
306 }
307 }
308
309 /*
310 * Find and reserve a free memory area of size 'size'. The search
311 * starts at 'start'.
312 * It must be called with mmap_lock() held.
313 * Return -1 if error.
314 */
315 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
316 {
317 void *ptr, *prev;
318 abi_ulong addr;
319 int wrapped, repeat;
320
321 align = MAX(align, qemu_host_page_size);
322
323 /* If 'start' == 0, then a default start address is used. */
324 if (start == 0) {
325 start = mmap_next_start;
326 } else {
327 start &= qemu_host_page_mask;
328 }
329 start = ROUND_UP(start, align);
330
331 size = HOST_PAGE_ALIGN(size);
332
333 if (reserved_va) {
334 return mmap_find_vma_reserved(start, size, align);
335 }
336
337 addr = start;
338 wrapped = repeat = 0;
339 prev = 0;
340
341 for (;; prev = ptr) {
342 /*
343 * Reserve needed memory area to avoid a race.
344 * It should be discarded using:
345 * - mmap() with MAP_FIXED flag
346 * - mremap() with MREMAP_FIXED flag
347 * - shmat() with SHM_REMAP flag
348 */
349 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
350 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
351
352 /* ENOMEM, if host address space has no memory */
353 if (ptr == MAP_FAILED) {
354 return (abi_ulong)-1;
355 }
356
357 /* Count the number of sequential returns of the same address.
358 This is used to modify the search algorithm below. */
359 repeat = (ptr == prev ? repeat + 1 : 0);
360
361 if (h2g_valid(ptr + size - 1)) {
362 addr = h2g(ptr);
363
364 if ((addr & (align - 1)) == 0) {
365 /* Success. */
366 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
367 mmap_next_start = addr + size;
368 }
369 return addr;
370 }
371
372 /* The address is not properly aligned for the target. */
373 switch (repeat) {
374 case 0:
375 /* Assume the result that the kernel gave us is the
376 first with enough free space, so start again at the
377 next higher target page. */
378 addr = ROUND_UP(addr, align);
379 break;
380 case 1:
381 /* Sometimes the kernel decides to perform the allocation
382 at the top end of memory instead. */
383 addr &= -align;
384 break;
385 case 2:
386 /* Start over at low memory. */
387 addr = 0;
388 break;
389 default:
390 /* Fail. This unaligned block must the last. */
391 addr = -1;
392 break;
393 }
394 } else {
395 /* Since the result the kernel gave didn't fit, start
396 again at low memory. If any repetition, fail. */
397 addr = (repeat ? -1 : 0);
398 }
399
400 /* Unmap and try again. */
401 munmap(ptr, size);
402
403 /* ENOMEM if we checked the whole of the target address space. */
404 if (addr == (abi_ulong)-1) {
405 return (abi_ulong)-1;
406 } else if (addr == 0) {
407 if (wrapped) {
408 return (abi_ulong)-1;
409 }
410 wrapped = 1;
411 /* Don't actually use 0 when wrapping, instead indicate
412 that we'd truly like an allocation in low memory. */
413 addr = (mmap_min_addr > TARGET_PAGE_SIZE
414 ? TARGET_PAGE_ALIGN(mmap_min_addr)
415 : TARGET_PAGE_SIZE);
416 } else if (wrapped && addr >= start) {
417 return (abi_ulong)-1;
418 }
419 }
420 }
421
422 /* NOTE: all the constants are the HOST ones */
423 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
424 int flags, int fd, abi_ulong offset)
425 {
426 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
427 int page_flags, host_prot;
428
429 mmap_lock();
430 trace_target_mmap(start, len, target_prot, flags, fd, offset);
431
432 if (!len) {
433 errno = EINVAL;
434 goto fail;
435 }
436
437 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
438 if (!page_flags) {
439 errno = EINVAL;
440 goto fail;
441 }
442
443 /* Also check for overflows... */
444 len = TARGET_PAGE_ALIGN(len);
445 if (!len) {
446 errno = ENOMEM;
447 goto fail;
448 }
449
450 if (offset & ~TARGET_PAGE_MASK) {
451 errno = EINVAL;
452 goto fail;
453 }
454
455 /*
456 * If we're mapping shared memory, ensure we generate code for parallel
457 * execution and flush old translations. This will work up to the level
458 * supported by the host -- anything that requires EXCP_ATOMIC will not
459 * be atomic with respect to an external process.
460 */
461 if (flags & MAP_SHARED) {
462 CPUState *cpu = thread_cpu;
463 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
464 cpu->tcg_cflags |= CF_PARALLEL;
465 tb_flush(cpu);
466 }
467 }
468
469 real_start = start & qemu_host_page_mask;
470 host_offset = offset & qemu_host_page_mask;
471
472 /* If the user is asking for the kernel to find a location, do that
473 before we truncate the length for mapping files below. */
474 if (!(flags & MAP_FIXED)) {
475 host_len = len + offset - host_offset;
476 host_len = HOST_PAGE_ALIGN(host_len);
477 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
478 if (start == (abi_ulong)-1) {
479 errno = ENOMEM;
480 goto fail;
481 }
482 }
483
484 /* When mapping files into a memory area larger than the file, accesses
485 to pages beyond the file size will cause a SIGBUS.
486
487 For example, if mmaping a file of 100 bytes on a host with 4K pages
488 emulating a target with 8K pages, the target expects to be able to
489 access the first 8K. But the host will trap us on any access beyond
490 4K.
491
492 When emulating a target with a larger page-size than the hosts, we
493 may need to truncate file maps at EOF and add extra anonymous pages
494 up to the targets page boundary. */
495
496 if ((qemu_real_host_page_size < qemu_host_page_size) &&
497 !(flags & MAP_ANONYMOUS)) {
498 struct stat sb;
499
500 if (fstat (fd, &sb) == -1)
501 goto fail;
502
503 /* Are we trying to create a map beyond EOF?. */
504 if (offset + len > sb.st_size) {
505 /* If so, truncate the file map at eof aligned with
506 the hosts real pagesize. Additional anonymous maps
507 will be created beyond EOF. */
508 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
509 }
510 }
511
512 if (!(flags & MAP_FIXED)) {
513 unsigned long host_start;
514 void *p;
515
516 host_len = len + offset - host_offset;
517 host_len = HOST_PAGE_ALIGN(host_len);
518
519 /* Note: we prefer to control the mapping address. It is
520 especially important if qemu_host_page_size >
521 qemu_real_host_page_size */
522 p = mmap(g2h_untagged(start), host_len, host_prot,
523 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
524 if (p == MAP_FAILED) {
525 goto fail;
526 }
527 /* update start so that it points to the file position at 'offset' */
528 host_start = (unsigned long)p;
529 if (!(flags & MAP_ANONYMOUS)) {
530 p = mmap(g2h_untagged(start), len, host_prot,
531 flags | MAP_FIXED, fd, host_offset);
532 if (p == MAP_FAILED) {
533 munmap(g2h_untagged(start), host_len);
534 goto fail;
535 }
536 host_start += offset - host_offset;
537 }
538 start = h2g(host_start);
539 } else {
540 if (start & ~TARGET_PAGE_MASK) {
541 errno = EINVAL;
542 goto fail;
543 }
544 end = start + len;
545 real_end = HOST_PAGE_ALIGN(end);
546
547 /*
548 * Test if requested memory area fits target address space
549 * It can fail only on 64-bit host with 32-bit target.
550 * On any other target/host host mmap() handles this error correctly.
551 */
552 if (end < start || !guest_range_valid_untagged(start, len)) {
553 errno = ENOMEM;
554 goto fail;
555 }
556
557 /* worst case: we cannot map the file because the offset is not
558 aligned, so we read it */
559 if (!(flags & MAP_ANONYMOUS) &&
560 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
561 /* msync() won't work here, so we return an error if write is
562 possible while it is a shared mapping */
563 if ((flags & MAP_TYPE) == MAP_SHARED &&
564 (host_prot & PROT_WRITE)) {
565 errno = EINVAL;
566 goto fail;
567 }
568 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
569 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
570 -1, 0);
571 if (retaddr == -1)
572 goto fail;
573 if (pread(fd, g2h_untagged(start), len, offset) == -1)
574 goto fail;
575 if (!(host_prot & PROT_WRITE)) {
576 ret = target_mprotect(start, len, target_prot);
577 assert(ret == 0);
578 }
579 goto the_end;
580 }
581
582 /* handle the start of the mapping */
583 if (start > real_start) {
584 if (real_end == real_start + qemu_host_page_size) {
585 /* one single host page */
586 ret = mmap_frag(real_start, start, end,
587 host_prot, flags, fd, offset);
588 if (ret == -1)
589 goto fail;
590 goto the_end1;
591 }
592 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
593 host_prot, flags, fd, offset);
594 if (ret == -1)
595 goto fail;
596 real_start += qemu_host_page_size;
597 }
598 /* handle the end of the mapping */
599 if (end < real_end) {
600 ret = mmap_frag(real_end - qemu_host_page_size,
601 real_end - qemu_host_page_size, end,
602 host_prot, flags, fd,
603 offset + real_end - qemu_host_page_size - start);
604 if (ret == -1)
605 goto fail;
606 real_end -= qemu_host_page_size;
607 }
608
609 /* map the middle (easier) */
610 if (real_start < real_end) {
611 void *p;
612 unsigned long offset1;
613 if (flags & MAP_ANONYMOUS)
614 offset1 = 0;
615 else
616 offset1 = offset + real_start - start;
617 p = mmap(g2h_untagged(real_start), real_end - real_start,
618 host_prot, flags, fd, offset1);
619 if (p == MAP_FAILED)
620 goto fail;
621 }
622 }
623 the_end1:
624 if (flags & MAP_ANONYMOUS) {
625 page_flags |= PAGE_ANON;
626 }
627 page_flags |= PAGE_RESET;
628 page_set_flags(start, start + len, page_flags);
629 the_end:
630 trace_target_mmap_complete(start);
631 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
632 log_page_dump(__func__);
633 }
634 tb_invalidate_phys_range(start, start + len);
635 mmap_unlock();
636 return start;
637 fail:
638 mmap_unlock();
639 return -1;
640 }
641
642 static void mmap_reserve(abi_ulong start, abi_ulong size)
643 {
644 abi_ulong real_start;
645 abi_ulong real_end;
646 abi_ulong addr;
647 abi_ulong end;
648 int prot;
649
650 real_start = start & qemu_host_page_mask;
651 real_end = HOST_PAGE_ALIGN(start + size);
652 end = start + size;
653 if (start > real_start) {
654 /* handle host page containing start */
655 prot = 0;
656 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
657 prot |= page_get_flags(addr);
658 }
659 if (real_end == real_start + qemu_host_page_size) {
660 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
661 prot |= page_get_flags(addr);
662 }
663 end = real_end;
664 }
665 if (prot != 0)
666 real_start += qemu_host_page_size;
667 }
668 if (end < real_end) {
669 prot = 0;
670 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
671 prot |= page_get_flags(addr);
672 }
673 if (prot != 0)
674 real_end -= qemu_host_page_size;
675 }
676 if (real_start != real_end) {
677 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
678 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
679 -1, 0);
680 }
681 }
682
683 int target_munmap(abi_ulong start, abi_ulong len)
684 {
685 abi_ulong end, real_start, real_end, addr;
686 int prot, ret;
687
688 trace_target_munmap(start, len);
689
690 if (start & ~TARGET_PAGE_MASK)
691 return -TARGET_EINVAL;
692 len = TARGET_PAGE_ALIGN(len);
693 if (len == 0 || !guest_range_valid_untagged(start, len)) {
694 return -TARGET_EINVAL;
695 }
696
697 mmap_lock();
698 end = start + len;
699 real_start = start & qemu_host_page_mask;
700 real_end = HOST_PAGE_ALIGN(end);
701
702 if (start > real_start) {
703 /* handle host page containing start */
704 prot = 0;
705 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
706 prot |= page_get_flags(addr);
707 }
708 if (real_end == real_start + qemu_host_page_size) {
709 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
710 prot |= page_get_flags(addr);
711 }
712 end = real_end;
713 }
714 if (prot != 0)
715 real_start += qemu_host_page_size;
716 }
717 if (end < real_end) {
718 prot = 0;
719 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
720 prot |= page_get_flags(addr);
721 }
722 if (prot != 0)
723 real_end -= qemu_host_page_size;
724 }
725
726 ret = 0;
727 /* unmap what we can */
728 if (real_start < real_end) {
729 if (reserved_va) {
730 mmap_reserve(real_start, real_end - real_start);
731 } else {
732 ret = munmap(g2h_untagged(real_start), real_end - real_start);
733 }
734 }
735
736 if (ret == 0) {
737 page_set_flags(start, start + len, 0);
738 tb_invalidate_phys_range(start, start + len);
739 }
740 mmap_unlock();
741 return ret;
742 }
743
744 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
745 abi_ulong new_size, unsigned long flags,
746 abi_ulong new_addr)
747 {
748 int prot;
749 void *host_addr;
750
751 if (!guest_range_valid_untagged(old_addr, old_size) ||
752 ((flags & MREMAP_FIXED) &&
753 !guest_range_valid_untagged(new_addr, new_size)) ||
754 ((flags & MREMAP_MAYMOVE) == 0 &&
755 !guest_range_valid_untagged(old_addr, new_size))) {
756 errno = ENOMEM;
757 return -1;
758 }
759
760 mmap_lock();
761
762 if (flags & MREMAP_FIXED) {
763 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
764 flags, g2h_untagged(new_addr));
765
766 if (reserved_va && host_addr != MAP_FAILED) {
767 /* If new and old addresses overlap then the above mremap will
768 already have failed with EINVAL. */
769 mmap_reserve(old_addr, old_size);
770 }
771 } else if (flags & MREMAP_MAYMOVE) {
772 abi_ulong mmap_start;
773
774 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
775
776 if (mmap_start == -1) {
777 errno = ENOMEM;
778 host_addr = MAP_FAILED;
779 } else {
780 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
781 flags | MREMAP_FIXED,
782 g2h_untagged(mmap_start));
783 if (reserved_va) {
784 mmap_reserve(old_addr, old_size);
785 }
786 }
787 } else {
788 int prot = 0;
789 if (reserved_va && old_size < new_size) {
790 abi_ulong addr;
791 for (addr = old_addr + old_size;
792 addr < old_addr + new_size;
793 addr++) {
794 prot |= page_get_flags(addr);
795 }
796 }
797 if (prot == 0) {
798 host_addr = mremap(g2h_untagged(old_addr),
799 old_size, new_size, flags);
800
801 if (host_addr != MAP_FAILED) {
802 /* Check if address fits target address space */
803 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
804 /* Revert mremap() changes */
805 host_addr = mremap(g2h_untagged(old_addr),
806 new_size, old_size, flags);
807 errno = ENOMEM;
808 host_addr = MAP_FAILED;
809 } else if (reserved_va && old_size > new_size) {
810 mmap_reserve(old_addr + old_size, old_size - new_size);
811 }
812 }
813 } else {
814 errno = ENOMEM;
815 host_addr = MAP_FAILED;
816 }
817 }
818
819 if (host_addr == MAP_FAILED) {
820 new_addr = -1;
821 } else {
822 new_addr = h2g(host_addr);
823 prot = page_get_flags(old_addr);
824 page_set_flags(old_addr, old_addr + old_size, 0);
825 page_set_flags(new_addr, new_addr + new_size,
826 prot | PAGE_VALID | PAGE_RESET);
827 }
828 tb_invalidate_phys_range(new_addr, new_addr + new_size);
829 mmap_unlock();
830 return new_addr;
831 }