2 * mmap support for qemu
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "user-mmap.h"
25 static pthread_mutex_t mmap_mutex
= PTHREAD_MUTEX_INITIALIZER
;
26 static __thread
int mmap_lock_count
;
30 if (mmap_lock_count
++ == 0) {
31 pthread_mutex_lock(&mmap_mutex
);
35 void mmap_unlock(void)
37 if (--mmap_lock_count
== 0) {
38 pthread_mutex_unlock(&mmap_mutex
);
42 bool have_mmap_lock(void)
44 return mmap_lock_count
> 0 ? true : false;
47 /* Grab lock to make sure things are in a consistent state after fork(). */
48 void mmap_fork_start(void)
52 pthread_mutex_lock(&mmap_mutex
);
55 void mmap_fork_end(int child
)
58 pthread_mutex_init(&mmap_mutex
, NULL
);
60 pthread_mutex_unlock(&mmap_mutex
);
64 * Validate target prot bitmask.
65 * Return the prot bitmask for the host in *HOST_PROT.
66 * Return 0 if the target prot bitmask is invalid, otherwise
67 * the internal qemu page_flags (which will include PAGE_VALID).
69 static int validate_prot_to_pageflags(int *host_prot
, int prot
)
71 int valid
= PROT_READ
| PROT_WRITE
| PROT_EXEC
| TARGET_PROT_SEM
;
72 int page_flags
= (prot
& PAGE_BITS
) | PAGE_VALID
;
75 * For the host, we need not pass anything except read/write/exec.
76 * While PROT_SEM is allowed by all hosts, it is also ignored, so
77 * don't bother transforming guest bit to host bit. Any other
78 * target-specific prot bits will not be understood by the host
79 * and will need to be encoded into page_flags for qemu emulation.
81 * Pages that are executable by the guest will never be executed
82 * by the host, but the host will need to be able to read them.
84 *host_prot
= (prot
& (PROT_READ
| PROT_WRITE
))
85 | (prot
& PROT_EXEC
? PROT_READ
: 0);
89 ARMCPU
*cpu
= ARM_CPU(thread_cpu
);
92 * The PROT_BTI bit is only accepted if the cpu supports the feature.
93 * Since this is the unusual case, don't bother checking unless
94 * the bit has been requested. If set and valid, record the bit
95 * within QEMU's page_flags.
97 if ((prot
& TARGET_PROT_BTI
) && cpu_isar_feature(aa64_bti
, cpu
)) {
98 valid
|= TARGET_PROT_BTI
;
99 page_flags
|= PAGE_BTI
;
101 /* Similarly for the PROT_MTE bit. */
102 if ((prot
& TARGET_PROT_MTE
) && cpu_isar_feature(aa64_mte
, cpu
)) {
103 valid
|= TARGET_PROT_MTE
;
104 page_flags
|= PAGE_MTE
;
109 return prot
& ~valid
? 0 : page_flags
;
112 /* NOTE: all the constants are the HOST ones, but addresses are target. */
113 int target_mprotect(abi_ulong start
, abi_ulong len
, int target_prot
)
115 abi_ulong end
, host_start
, host_end
, addr
;
116 int prot1
, ret
, page_flags
, host_prot
;
118 trace_target_mprotect(start
, len
, target_prot
);
120 if ((start
& ~TARGET_PAGE_MASK
) != 0) {
121 return -TARGET_EINVAL
;
123 page_flags
= validate_prot_to_pageflags(&host_prot
, target_prot
);
125 return -TARGET_EINVAL
;
127 len
= TARGET_PAGE_ALIGN(len
);
129 if (!guest_range_valid_untagged(start
, len
)) {
130 return -TARGET_ENOMEM
;
137 host_start
= start
& qemu_host_page_mask
;
138 host_end
= HOST_PAGE_ALIGN(end
);
139 if (start
> host_start
) {
140 /* handle host page containing start */
142 for (addr
= host_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
143 prot1
|= page_get_flags(addr
);
145 if (host_end
== host_start
+ qemu_host_page_size
) {
146 for (addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
147 prot1
|= page_get_flags(addr
);
151 ret
= mprotect(g2h_untagged(host_start
), qemu_host_page_size
,
156 host_start
+= qemu_host_page_size
;
158 if (end
< host_end
) {
160 for (addr
= end
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
161 prot1
|= page_get_flags(addr
);
163 ret
= mprotect(g2h_untagged(host_end
- qemu_host_page_size
),
164 qemu_host_page_size
, prot1
& PAGE_BITS
);
168 host_end
-= qemu_host_page_size
;
171 /* handle the pages in the middle */
172 if (host_start
< host_end
) {
173 ret
= mprotect(g2h_untagged(host_start
),
174 host_end
- host_start
, host_prot
);
179 page_set_flags(start
, start
+ len
, page_flags
);
187 /* map an incomplete host page */
188 static int mmap_frag(abi_ulong real_start
,
189 abi_ulong start
, abi_ulong end
,
190 int prot
, int flags
, int fd
, abi_ulong offset
)
192 abi_ulong real_end
, addr
;
196 real_end
= real_start
+ qemu_host_page_size
;
197 host_start
= g2h_untagged(real_start
);
199 /* get the protection of the target pages outside the mapping */
201 for(addr
= real_start
; addr
< real_end
; addr
++) {
202 if (addr
< start
|| addr
>= end
)
203 prot1
|= page_get_flags(addr
);
207 /* no page was there, so we allocate one */
208 void *p
= mmap(host_start
, qemu_host_page_size
, prot
,
209 flags
| MAP_ANONYMOUS
, -1, 0);
216 prot_new
= prot
| prot1
;
217 if (!(flags
& MAP_ANONYMOUS
)) {
218 /* msync() won't work here, so we return an error if write is
219 possible while it is a shared mapping */
220 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
224 /* adjust protection to be able to read */
225 if (!(prot1
& PROT_WRITE
))
226 mprotect(host_start
, qemu_host_page_size
, prot1
| PROT_WRITE
);
228 /* read the corresponding file data */
229 if (pread(fd
, g2h_untagged(start
), end
- start
, offset
) == -1)
232 /* put final protection */
233 if (prot_new
!= (prot1
| PROT_WRITE
))
234 mprotect(host_start
, qemu_host_page_size
, prot_new
);
236 if (prot_new
!= prot1
) {
237 mprotect(host_start
, qemu_host_page_size
, prot_new
);
239 if (prot_new
& PROT_WRITE
) {
240 memset(g2h_untagged(start
), 0, end
- start
);
246 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
247 #ifdef TARGET_AARCH64
248 # define TASK_UNMAPPED_BASE 0x5500000000
250 # define TASK_UNMAPPED_BASE (1ul << 38)
253 # define TASK_UNMAPPED_BASE 0x40000000
255 abi_ulong mmap_next_start
= TASK_UNMAPPED_BASE
;
257 unsigned long last_brk
;
259 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
260 of guest address space. */
261 static abi_ulong
mmap_find_vma_reserved(abi_ulong start
, abi_ulong size
,
264 abi_ulong addr
, end_addr
, incr
= qemu_host_page_size
;
268 if (size
> reserved_va
) {
269 return (abi_ulong
)-1;
272 /* Note that start and size have already been aligned by mmap_find_vma. */
274 end_addr
= start
+ size
;
275 if (start
> reserved_va
- size
) {
276 /* Start at the top of the address space. */
277 end_addr
= ((reserved_va
- size
) & -align
) + size
;
281 /* Search downward from END_ADDR, checking to see if a page is in use. */
285 if (addr
> end_addr
) {
287 /* Failure. The entire address space has been searched. */
288 return (abi_ulong
)-1;
290 /* Re-start at the top of the address space. */
291 addr
= end_addr
= ((reserved_va
- size
) & -align
) + size
;
294 prot
= page_get_flags(addr
);
296 /* Page in use. Restart below this page. */
297 addr
= end_addr
= ((addr
- size
) & -align
) + size
;
298 } else if (addr
&& addr
+ size
== end_addr
) {
299 /* Success! All pages between ADDR and END_ADDR are free. */
300 if (start
== mmap_next_start
) {
301 mmap_next_start
= addr
;
310 * Find and reserve a free memory area of size 'size'. The search
312 * It must be called with mmap_lock() held.
313 * Return -1 if error.
315 abi_ulong
mmap_find_vma(abi_ulong start
, abi_ulong size
, abi_ulong align
)
321 align
= MAX(align
, qemu_host_page_size
);
323 /* If 'start' == 0, then a default start address is used. */
325 start
= mmap_next_start
;
327 start
&= qemu_host_page_mask
;
329 start
= ROUND_UP(start
, align
);
331 size
= HOST_PAGE_ALIGN(size
);
334 return mmap_find_vma_reserved(start
, size
, align
);
338 wrapped
= repeat
= 0;
341 for (;; prev
= ptr
) {
343 * Reserve needed memory area to avoid a race.
344 * It should be discarded using:
345 * - mmap() with MAP_FIXED flag
346 * - mremap() with MREMAP_FIXED flag
347 * - shmat() with SHM_REMAP flag
349 ptr
= mmap(g2h_untagged(addr
), size
, PROT_NONE
,
350 MAP_ANONYMOUS
|MAP_PRIVATE
|MAP_NORESERVE
, -1, 0);
352 /* ENOMEM, if host address space has no memory */
353 if (ptr
== MAP_FAILED
) {
354 return (abi_ulong
)-1;
357 /* Count the number of sequential returns of the same address.
358 This is used to modify the search algorithm below. */
359 repeat
= (ptr
== prev
? repeat
+ 1 : 0);
361 if (h2g_valid(ptr
+ size
- 1)) {
364 if ((addr
& (align
- 1)) == 0) {
366 if (start
== mmap_next_start
&& addr
>= TASK_UNMAPPED_BASE
) {
367 mmap_next_start
= addr
+ size
;
372 /* The address is not properly aligned for the target. */
375 /* Assume the result that the kernel gave us is the
376 first with enough free space, so start again at the
377 next higher target page. */
378 addr
= ROUND_UP(addr
, align
);
381 /* Sometimes the kernel decides to perform the allocation
382 at the top end of memory instead. */
386 /* Start over at low memory. */
390 /* Fail. This unaligned block must the last. */
395 /* Since the result the kernel gave didn't fit, start
396 again at low memory. If any repetition, fail. */
397 addr
= (repeat
? -1 : 0);
400 /* Unmap and try again. */
403 /* ENOMEM if we checked the whole of the target address space. */
404 if (addr
== (abi_ulong
)-1) {
405 return (abi_ulong
)-1;
406 } else if (addr
== 0) {
408 return (abi_ulong
)-1;
411 /* Don't actually use 0 when wrapping, instead indicate
412 that we'd truly like an allocation in low memory. */
413 addr
= (mmap_min_addr
> TARGET_PAGE_SIZE
414 ? TARGET_PAGE_ALIGN(mmap_min_addr
)
416 } else if (wrapped
&& addr
>= start
) {
417 return (abi_ulong
)-1;
422 /* NOTE: all the constants are the HOST ones */
423 abi_long
target_mmap(abi_ulong start
, abi_ulong len
, int target_prot
,
424 int flags
, int fd
, abi_ulong offset
)
426 abi_ulong ret
, end
, real_start
, real_end
, retaddr
, host_offset
, host_len
;
427 int page_flags
, host_prot
;
430 trace_target_mmap(start
, len
, target_prot
, flags
, fd
, offset
);
437 page_flags
= validate_prot_to_pageflags(&host_prot
, target_prot
);
443 /* Also check for overflows... */
444 len
= TARGET_PAGE_ALIGN(len
);
450 if (offset
& ~TARGET_PAGE_MASK
) {
456 * If we're mapping shared memory, ensure we generate code for parallel
457 * execution and flush old translations. This will work up to the level
458 * supported by the host -- anything that requires EXCP_ATOMIC will not
459 * be atomic with respect to an external process.
461 if (flags
& MAP_SHARED
) {
462 CPUState
*cpu
= thread_cpu
;
463 if (!(cpu
->tcg_cflags
& CF_PARALLEL
)) {
464 cpu
->tcg_cflags
|= CF_PARALLEL
;
469 real_start
= start
& qemu_host_page_mask
;
470 host_offset
= offset
& qemu_host_page_mask
;
472 /* If the user is asking for the kernel to find a location, do that
473 before we truncate the length for mapping files below. */
474 if (!(flags
& MAP_FIXED
)) {
475 host_len
= len
+ offset
- host_offset
;
476 host_len
= HOST_PAGE_ALIGN(host_len
);
477 start
= mmap_find_vma(real_start
, host_len
, TARGET_PAGE_SIZE
);
478 if (start
== (abi_ulong
)-1) {
484 /* When mapping files into a memory area larger than the file, accesses
485 to pages beyond the file size will cause a SIGBUS.
487 For example, if mmaping a file of 100 bytes on a host with 4K pages
488 emulating a target with 8K pages, the target expects to be able to
489 access the first 8K. But the host will trap us on any access beyond
492 When emulating a target with a larger page-size than the hosts, we
493 may need to truncate file maps at EOF and add extra anonymous pages
494 up to the targets page boundary. */
496 if ((qemu_real_host_page_size
< qemu_host_page_size
) &&
497 !(flags
& MAP_ANONYMOUS
)) {
500 if (fstat (fd
, &sb
) == -1)
503 /* Are we trying to create a map beyond EOF?. */
504 if (offset
+ len
> sb
.st_size
) {
505 /* If so, truncate the file map at eof aligned with
506 the hosts real pagesize. Additional anonymous maps
507 will be created beyond EOF. */
508 len
= REAL_HOST_PAGE_ALIGN(sb
.st_size
- offset
);
512 if (!(flags
& MAP_FIXED
)) {
513 unsigned long host_start
;
516 host_len
= len
+ offset
- host_offset
;
517 host_len
= HOST_PAGE_ALIGN(host_len
);
519 /* Note: we prefer to control the mapping address. It is
520 especially important if qemu_host_page_size >
521 qemu_real_host_page_size */
522 p
= mmap(g2h_untagged(start
), host_len
, host_prot
,
523 flags
| MAP_FIXED
| MAP_ANONYMOUS
, -1, 0);
524 if (p
== MAP_FAILED
) {
527 /* update start so that it points to the file position at 'offset' */
528 host_start
= (unsigned long)p
;
529 if (!(flags
& MAP_ANONYMOUS
)) {
530 p
= mmap(g2h_untagged(start
), len
, host_prot
,
531 flags
| MAP_FIXED
, fd
, host_offset
);
532 if (p
== MAP_FAILED
) {
533 munmap(g2h_untagged(start
), host_len
);
536 host_start
+= offset
- host_offset
;
538 start
= h2g(host_start
);
540 if (start
& ~TARGET_PAGE_MASK
) {
545 real_end
= HOST_PAGE_ALIGN(end
);
548 * Test if requested memory area fits target address space
549 * It can fail only on 64-bit host with 32-bit target.
550 * On any other target/host host mmap() handles this error correctly.
552 if (end
< start
|| !guest_range_valid_untagged(start
, len
)) {
557 /* worst case: we cannot map the file because the offset is not
558 aligned, so we read it */
559 if (!(flags
& MAP_ANONYMOUS
) &&
560 (offset
& ~qemu_host_page_mask
) != (start
& ~qemu_host_page_mask
)) {
561 /* msync() won't work here, so we return an error if write is
562 possible while it is a shared mapping */
563 if ((flags
& MAP_TYPE
) == MAP_SHARED
&&
564 (host_prot
& PROT_WRITE
)) {
568 retaddr
= target_mmap(start
, len
, target_prot
| PROT_WRITE
,
569 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
573 if (pread(fd
, g2h_untagged(start
), len
, offset
) == -1)
575 if (!(host_prot
& PROT_WRITE
)) {
576 ret
= target_mprotect(start
, len
, target_prot
);
582 /* handle the start of the mapping */
583 if (start
> real_start
) {
584 if (real_end
== real_start
+ qemu_host_page_size
) {
585 /* one single host page */
586 ret
= mmap_frag(real_start
, start
, end
,
587 host_prot
, flags
, fd
, offset
);
592 ret
= mmap_frag(real_start
, start
, real_start
+ qemu_host_page_size
,
593 host_prot
, flags
, fd
, offset
);
596 real_start
+= qemu_host_page_size
;
598 /* handle the end of the mapping */
599 if (end
< real_end
) {
600 ret
= mmap_frag(real_end
- qemu_host_page_size
,
601 real_end
- qemu_host_page_size
, end
,
602 host_prot
, flags
, fd
,
603 offset
+ real_end
- qemu_host_page_size
- start
);
606 real_end
-= qemu_host_page_size
;
609 /* map the middle (easier) */
610 if (real_start
< real_end
) {
612 unsigned long offset1
;
613 if (flags
& MAP_ANONYMOUS
)
616 offset1
= offset
+ real_start
- start
;
617 p
= mmap(g2h_untagged(real_start
), real_end
- real_start
,
618 host_prot
, flags
, fd
, offset1
);
624 if (flags
& MAP_ANONYMOUS
) {
625 page_flags
|= PAGE_ANON
;
627 page_flags
|= PAGE_RESET
;
628 page_set_flags(start
, start
+ len
, page_flags
);
630 trace_target_mmap_complete(start
);
631 if (qemu_loglevel_mask(CPU_LOG_PAGE
)) {
632 log_page_dump(__func__
);
634 tb_invalidate_phys_range(start
, start
+ len
);
642 static void mmap_reserve(abi_ulong start
, abi_ulong size
)
644 abi_ulong real_start
;
650 real_start
= start
& qemu_host_page_mask
;
651 real_end
= HOST_PAGE_ALIGN(start
+ size
);
653 if (start
> real_start
) {
654 /* handle host page containing start */
656 for (addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
657 prot
|= page_get_flags(addr
);
659 if (real_end
== real_start
+ qemu_host_page_size
) {
660 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
661 prot
|= page_get_flags(addr
);
666 real_start
+= qemu_host_page_size
;
668 if (end
< real_end
) {
670 for (addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
671 prot
|= page_get_flags(addr
);
674 real_end
-= qemu_host_page_size
;
676 if (real_start
!= real_end
) {
677 mmap(g2h_untagged(real_start
), real_end
- real_start
, PROT_NONE
,
678 MAP_FIXED
| MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_NORESERVE
,
683 int target_munmap(abi_ulong start
, abi_ulong len
)
685 abi_ulong end
, real_start
, real_end
, addr
;
688 trace_target_munmap(start
, len
);
690 if (start
& ~TARGET_PAGE_MASK
)
691 return -TARGET_EINVAL
;
692 len
= TARGET_PAGE_ALIGN(len
);
693 if (len
== 0 || !guest_range_valid_untagged(start
, len
)) {
694 return -TARGET_EINVAL
;
699 real_start
= start
& qemu_host_page_mask
;
700 real_end
= HOST_PAGE_ALIGN(end
);
702 if (start
> real_start
) {
703 /* handle host page containing start */
705 for(addr
= real_start
; addr
< start
; addr
+= TARGET_PAGE_SIZE
) {
706 prot
|= page_get_flags(addr
);
708 if (real_end
== real_start
+ qemu_host_page_size
) {
709 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
710 prot
|= page_get_flags(addr
);
715 real_start
+= qemu_host_page_size
;
717 if (end
< real_end
) {
719 for(addr
= end
; addr
< real_end
; addr
+= TARGET_PAGE_SIZE
) {
720 prot
|= page_get_flags(addr
);
723 real_end
-= qemu_host_page_size
;
727 /* unmap what we can */
728 if (real_start
< real_end
) {
730 mmap_reserve(real_start
, real_end
- real_start
);
732 ret
= munmap(g2h_untagged(real_start
), real_end
- real_start
);
737 page_set_flags(start
, start
+ len
, 0);
738 tb_invalidate_phys_range(start
, start
+ len
);
744 abi_long
target_mremap(abi_ulong old_addr
, abi_ulong old_size
,
745 abi_ulong new_size
, unsigned long flags
,
751 if (!guest_range_valid_untagged(old_addr
, old_size
) ||
752 ((flags
& MREMAP_FIXED
) &&
753 !guest_range_valid_untagged(new_addr
, new_size
)) ||
754 ((flags
& MREMAP_MAYMOVE
) == 0 &&
755 !guest_range_valid_untagged(old_addr
, new_size
))) {
762 if (flags
& MREMAP_FIXED
) {
763 host_addr
= mremap(g2h_untagged(old_addr
), old_size
, new_size
,
764 flags
, g2h_untagged(new_addr
));
766 if (reserved_va
&& host_addr
!= MAP_FAILED
) {
767 /* If new and old addresses overlap then the above mremap will
768 already have failed with EINVAL. */
769 mmap_reserve(old_addr
, old_size
);
771 } else if (flags
& MREMAP_MAYMOVE
) {
772 abi_ulong mmap_start
;
774 mmap_start
= mmap_find_vma(0, new_size
, TARGET_PAGE_SIZE
);
776 if (mmap_start
== -1) {
778 host_addr
= MAP_FAILED
;
780 host_addr
= mremap(g2h_untagged(old_addr
), old_size
, new_size
,
781 flags
| MREMAP_FIXED
,
782 g2h_untagged(mmap_start
));
784 mmap_reserve(old_addr
, old_size
);
789 if (reserved_va
&& old_size
< new_size
) {
791 for (addr
= old_addr
+ old_size
;
792 addr
< old_addr
+ new_size
;
794 prot
|= page_get_flags(addr
);
798 host_addr
= mremap(g2h_untagged(old_addr
),
799 old_size
, new_size
, flags
);
801 if (host_addr
!= MAP_FAILED
) {
802 /* Check if address fits target address space */
803 if (!guest_range_valid_untagged(h2g(host_addr
), new_size
)) {
804 /* Revert mremap() changes */
805 host_addr
= mremap(g2h_untagged(old_addr
),
806 new_size
, old_size
, flags
);
808 host_addr
= MAP_FAILED
;
809 } else if (reserved_va
&& old_size
> new_size
) {
810 mmap_reserve(old_addr
+ old_size
, old_size
- new_size
);
815 host_addr
= MAP_FAILED
;
819 if (host_addr
== MAP_FAILED
) {
822 new_addr
= h2g(host_addr
);
823 prot
= page_get_flags(old_addr
);
824 page_set_flags(old_addr
, old_addr
+ old_size
, 0);
825 page_set_flags(new_addr
, new_addr
+ new_size
,
826 prot
| PAGE_VALID
| PAGE_RESET
);
828 tb_invalidate_phys_range(new_addr
, new_addr
+ new_size
);