]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/mmap.c
Merge remote-tracking branch 'remotes/thuth-gitlab/tags/pull-request-2021-03-09'...
[mirror_qemu.git] / linux-user / mmap.c
1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "exec/log.h"
22 #include "qemu.h"
23
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
26
27 void mmap_lock(void)
28 {
29 if (mmap_lock_count++ == 0) {
30 pthread_mutex_lock(&mmap_mutex);
31 }
32 }
33
34 void mmap_unlock(void)
35 {
36 if (--mmap_lock_count == 0) {
37 pthread_mutex_unlock(&mmap_mutex);
38 }
39 }
40
41 bool have_mmap_lock(void)
42 {
43 return mmap_lock_count > 0 ? true : false;
44 }
45
46 /* Grab lock to make sure things are in a consistent state after fork(). */
47 void mmap_fork_start(void)
48 {
49 if (mmap_lock_count)
50 abort();
51 pthread_mutex_lock(&mmap_mutex);
52 }
53
54 void mmap_fork_end(int child)
55 {
56 if (child)
57 pthread_mutex_init(&mmap_mutex, NULL);
58 else
59 pthread_mutex_unlock(&mmap_mutex);
60 }
61
62 /*
63 * Validate target prot bitmask.
64 * Return the prot bitmask for the host in *HOST_PROT.
65 * Return 0 if the target prot bitmask is invalid, otherwise
66 * the internal qemu page_flags (which will include PAGE_VALID).
67 */
68 static int validate_prot_to_pageflags(int *host_prot, int prot)
69 {
70 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
72
73 /*
74 * For the host, we need not pass anything except read/write/exec.
75 * While PROT_SEM is allowed by all hosts, it is also ignored, so
76 * don't bother transforming guest bit to host bit. Any other
77 * target-specific prot bits will not be understood by the host
78 * and will need to be encoded into page_flags for qemu emulation.
79 *
80 * Pages that are executable by the guest will never be executed
81 * by the host, but the host will need to be able to read them.
82 */
83 *host_prot = (prot & (PROT_READ | PROT_WRITE))
84 | (prot & PROT_EXEC ? PROT_READ : 0);
85
86 #ifdef TARGET_AARCH64
87 {
88 ARMCPU *cpu = ARM_CPU(thread_cpu);
89
90 /*
91 * The PROT_BTI bit is only accepted if the cpu supports the feature.
92 * Since this is the unusual case, don't bother checking unless
93 * the bit has been requested. If set and valid, record the bit
94 * within QEMU's page_flags.
95 */
96 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
97 valid |= TARGET_PROT_BTI;
98 page_flags |= PAGE_BTI;
99 }
100 /* Similarly for the PROT_MTE bit. */
101 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
102 valid |= TARGET_PROT_MTE;
103 page_flags |= PAGE_MTE;
104 }
105 }
106 #endif
107
108 return prot & ~valid ? 0 : page_flags;
109 }
110
111 /* NOTE: all the constants are the HOST ones, but addresses are target. */
112 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
113 {
114 abi_ulong end, host_start, host_end, addr;
115 int prot1, ret, page_flags, host_prot;
116
117 trace_target_mprotect(start, len, target_prot);
118
119 if ((start & ~TARGET_PAGE_MASK) != 0) {
120 return -TARGET_EINVAL;
121 }
122 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
123 if (!page_flags) {
124 return -TARGET_EINVAL;
125 }
126 len = TARGET_PAGE_ALIGN(len);
127 end = start + len;
128 if (!guest_range_valid_untagged(start, len)) {
129 return -TARGET_ENOMEM;
130 }
131 if (len == 0) {
132 return 0;
133 }
134
135 mmap_lock();
136 host_start = start & qemu_host_page_mask;
137 host_end = HOST_PAGE_ALIGN(end);
138 if (start > host_start) {
139 /* handle host page containing start */
140 prot1 = host_prot;
141 for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
142 prot1 |= page_get_flags(addr);
143 }
144 if (host_end == host_start + qemu_host_page_size) {
145 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
146 prot1 |= page_get_flags(addr);
147 }
148 end = host_end;
149 }
150 ret = mprotect(g2h_untagged(host_start), qemu_host_page_size,
151 prot1 & PAGE_BITS);
152 if (ret != 0) {
153 goto error;
154 }
155 host_start += qemu_host_page_size;
156 }
157 if (end < host_end) {
158 prot1 = host_prot;
159 for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
160 prot1 |= page_get_flags(addr);
161 }
162 ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
163 qemu_host_page_size, prot1 & PAGE_BITS);
164 if (ret != 0) {
165 goto error;
166 }
167 host_end -= qemu_host_page_size;
168 }
169
170 /* handle the pages in the middle */
171 if (host_start < host_end) {
172 ret = mprotect(g2h_untagged(host_start),
173 host_end - host_start, host_prot);
174 if (ret != 0) {
175 goto error;
176 }
177 }
178 page_set_flags(start, start + len, page_flags);
179 mmap_unlock();
180 return 0;
181 error:
182 mmap_unlock();
183 return ret;
184 }
185
186 /* map an incomplete host page */
187 static int mmap_frag(abi_ulong real_start,
188 abi_ulong start, abi_ulong end,
189 int prot, int flags, int fd, abi_ulong offset)
190 {
191 abi_ulong real_end, addr;
192 void *host_start;
193 int prot1, prot_new;
194
195 real_end = real_start + qemu_host_page_size;
196 host_start = g2h_untagged(real_start);
197
198 /* get the protection of the target pages outside the mapping */
199 prot1 = 0;
200 for(addr = real_start; addr < real_end; addr++) {
201 if (addr < start || addr >= end)
202 prot1 |= page_get_flags(addr);
203 }
204
205 if (prot1 == 0) {
206 /* no page was there, so we allocate one */
207 void *p = mmap(host_start, qemu_host_page_size, prot,
208 flags | MAP_ANONYMOUS, -1, 0);
209 if (p == MAP_FAILED)
210 return -1;
211 prot1 = prot;
212 }
213 prot1 &= PAGE_BITS;
214
215 prot_new = prot | prot1;
216 if (!(flags & MAP_ANONYMOUS)) {
217 /* msync() won't work here, so we return an error if write is
218 possible while it is a shared mapping */
219 if ((flags & MAP_TYPE) == MAP_SHARED &&
220 (prot & PROT_WRITE))
221 return -1;
222
223 /* adjust protection to be able to read */
224 if (!(prot1 & PROT_WRITE))
225 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
226
227 /* read the corresponding file data */
228 if (pread(fd, g2h_untagged(start), end - start, offset) == -1)
229 return -1;
230
231 /* put final protection */
232 if (prot_new != (prot1 | PROT_WRITE))
233 mprotect(host_start, qemu_host_page_size, prot_new);
234 } else {
235 if (prot_new != prot1) {
236 mprotect(host_start, qemu_host_page_size, prot_new);
237 }
238 if (prot_new & PROT_WRITE) {
239 memset(g2h_untagged(start), 0, end - start);
240 }
241 }
242 return 0;
243 }
244
245 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
246 #ifdef TARGET_AARCH64
247 # define TASK_UNMAPPED_BASE 0x5500000000
248 #else
249 # define TASK_UNMAPPED_BASE (1ul << 38)
250 #endif
251 #else
252 # define TASK_UNMAPPED_BASE 0x40000000
253 #endif
254 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
255
256 unsigned long last_brk;
257
258 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
259 of guest address space. */
260 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
261 abi_ulong align)
262 {
263 abi_ulong addr, end_addr, incr = qemu_host_page_size;
264 int prot;
265 bool looped = false;
266
267 if (size > reserved_va) {
268 return (abi_ulong)-1;
269 }
270
271 /* Note that start and size have already been aligned by mmap_find_vma. */
272
273 end_addr = start + size;
274 if (start > reserved_va - size) {
275 /* Start at the top of the address space. */
276 end_addr = ((reserved_va - size) & -align) + size;
277 looped = true;
278 }
279
280 /* Search downward from END_ADDR, checking to see if a page is in use. */
281 addr = end_addr;
282 while (1) {
283 addr -= incr;
284 if (addr > end_addr) {
285 if (looped) {
286 /* Failure. The entire address space has been searched. */
287 return (abi_ulong)-1;
288 }
289 /* Re-start at the top of the address space. */
290 addr = end_addr = ((reserved_va - size) & -align) + size;
291 looped = true;
292 } else {
293 prot = page_get_flags(addr);
294 if (prot) {
295 /* Page in use. Restart below this page. */
296 addr = end_addr = ((addr - size) & -align) + size;
297 } else if (addr && addr + size == end_addr) {
298 /* Success! All pages between ADDR and END_ADDR are free. */
299 if (start == mmap_next_start) {
300 mmap_next_start = addr;
301 }
302 return addr;
303 }
304 }
305 }
306 }
307
308 /*
309 * Find and reserve a free memory area of size 'size'. The search
310 * starts at 'start'.
311 * It must be called with mmap_lock() held.
312 * Return -1 if error.
313 */
314 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
315 {
316 void *ptr, *prev;
317 abi_ulong addr;
318 int wrapped, repeat;
319
320 align = MAX(align, qemu_host_page_size);
321
322 /* If 'start' == 0, then a default start address is used. */
323 if (start == 0) {
324 start = mmap_next_start;
325 } else {
326 start &= qemu_host_page_mask;
327 }
328 start = ROUND_UP(start, align);
329
330 size = HOST_PAGE_ALIGN(size);
331
332 if (reserved_va) {
333 return mmap_find_vma_reserved(start, size, align);
334 }
335
336 addr = start;
337 wrapped = repeat = 0;
338 prev = 0;
339
340 for (;; prev = ptr) {
341 /*
342 * Reserve needed memory area to avoid a race.
343 * It should be discarded using:
344 * - mmap() with MAP_FIXED flag
345 * - mremap() with MREMAP_FIXED flag
346 * - shmat() with SHM_REMAP flag
347 */
348 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
349 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
350
351 /* ENOMEM, if host address space has no memory */
352 if (ptr == MAP_FAILED) {
353 return (abi_ulong)-1;
354 }
355
356 /* Count the number of sequential returns of the same address.
357 This is used to modify the search algorithm below. */
358 repeat = (ptr == prev ? repeat + 1 : 0);
359
360 if (h2g_valid(ptr + size - 1)) {
361 addr = h2g(ptr);
362
363 if ((addr & (align - 1)) == 0) {
364 /* Success. */
365 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
366 mmap_next_start = addr + size;
367 }
368 return addr;
369 }
370
371 /* The address is not properly aligned for the target. */
372 switch (repeat) {
373 case 0:
374 /* Assume the result that the kernel gave us is the
375 first with enough free space, so start again at the
376 next higher target page. */
377 addr = ROUND_UP(addr, align);
378 break;
379 case 1:
380 /* Sometimes the kernel decides to perform the allocation
381 at the top end of memory instead. */
382 addr &= -align;
383 break;
384 case 2:
385 /* Start over at low memory. */
386 addr = 0;
387 break;
388 default:
389 /* Fail. This unaligned block must the last. */
390 addr = -1;
391 break;
392 }
393 } else {
394 /* Since the result the kernel gave didn't fit, start
395 again at low memory. If any repetition, fail. */
396 addr = (repeat ? -1 : 0);
397 }
398
399 /* Unmap and try again. */
400 munmap(ptr, size);
401
402 /* ENOMEM if we checked the whole of the target address space. */
403 if (addr == (abi_ulong)-1) {
404 return (abi_ulong)-1;
405 } else if (addr == 0) {
406 if (wrapped) {
407 return (abi_ulong)-1;
408 }
409 wrapped = 1;
410 /* Don't actually use 0 when wrapping, instead indicate
411 that we'd truly like an allocation in low memory. */
412 addr = (mmap_min_addr > TARGET_PAGE_SIZE
413 ? TARGET_PAGE_ALIGN(mmap_min_addr)
414 : TARGET_PAGE_SIZE);
415 } else if (wrapped && addr >= start) {
416 return (abi_ulong)-1;
417 }
418 }
419 }
420
421 /* NOTE: all the constants are the HOST ones */
422 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
423 int flags, int fd, abi_ulong offset)
424 {
425 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
426 int page_flags, host_prot;
427
428 mmap_lock();
429 trace_target_mmap(start, len, target_prot, flags, fd, offset);
430
431 if (!len) {
432 errno = EINVAL;
433 goto fail;
434 }
435
436 page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
437 if (!page_flags) {
438 errno = EINVAL;
439 goto fail;
440 }
441
442 /* Also check for overflows... */
443 len = TARGET_PAGE_ALIGN(len);
444 if (!len) {
445 errno = ENOMEM;
446 goto fail;
447 }
448
449 if (offset & ~TARGET_PAGE_MASK) {
450 errno = EINVAL;
451 goto fail;
452 }
453
454 real_start = start & qemu_host_page_mask;
455 host_offset = offset & qemu_host_page_mask;
456
457 /* If the user is asking for the kernel to find a location, do that
458 before we truncate the length for mapping files below. */
459 if (!(flags & MAP_FIXED)) {
460 host_len = len + offset - host_offset;
461 host_len = HOST_PAGE_ALIGN(host_len);
462 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
463 if (start == (abi_ulong)-1) {
464 errno = ENOMEM;
465 goto fail;
466 }
467 }
468
469 /* When mapping files into a memory area larger than the file, accesses
470 to pages beyond the file size will cause a SIGBUS.
471
472 For example, if mmaping a file of 100 bytes on a host with 4K pages
473 emulating a target with 8K pages, the target expects to be able to
474 access the first 8K. But the host will trap us on any access beyond
475 4K.
476
477 When emulating a target with a larger page-size than the hosts, we
478 may need to truncate file maps at EOF and add extra anonymous pages
479 up to the targets page boundary. */
480
481 if ((qemu_real_host_page_size < qemu_host_page_size) &&
482 !(flags & MAP_ANONYMOUS)) {
483 struct stat sb;
484
485 if (fstat (fd, &sb) == -1)
486 goto fail;
487
488 /* Are we trying to create a map beyond EOF?. */
489 if (offset + len > sb.st_size) {
490 /* If so, truncate the file map at eof aligned with
491 the hosts real pagesize. Additional anonymous maps
492 will be created beyond EOF. */
493 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
494 }
495 }
496
497 if (!(flags & MAP_FIXED)) {
498 unsigned long host_start;
499 void *p;
500
501 host_len = len + offset - host_offset;
502 host_len = HOST_PAGE_ALIGN(host_len);
503
504 /* Note: we prefer to control the mapping address. It is
505 especially important if qemu_host_page_size >
506 qemu_real_host_page_size */
507 p = mmap(g2h_untagged(start), host_len, host_prot,
508 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
509 if (p == MAP_FAILED) {
510 goto fail;
511 }
512 /* update start so that it points to the file position at 'offset' */
513 host_start = (unsigned long)p;
514 if (!(flags & MAP_ANONYMOUS)) {
515 p = mmap(g2h_untagged(start), len, host_prot,
516 flags | MAP_FIXED, fd, host_offset);
517 if (p == MAP_FAILED) {
518 munmap(g2h_untagged(start), host_len);
519 goto fail;
520 }
521 host_start += offset - host_offset;
522 }
523 start = h2g(host_start);
524 } else {
525 if (start & ~TARGET_PAGE_MASK) {
526 errno = EINVAL;
527 goto fail;
528 }
529 end = start + len;
530 real_end = HOST_PAGE_ALIGN(end);
531
532 /*
533 * Test if requested memory area fits target address space
534 * It can fail only on 64-bit host with 32-bit target.
535 * On any other target/host host mmap() handles this error correctly.
536 */
537 if (end < start || !guest_range_valid_untagged(start, len)) {
538 errno = ENOMEM;
539 goto fail;
540 }
541
542 /* worst case: we cannot map the file because the offset is not
543 aligned, so we read it */
544 if (!(flags & MAP_ANONYMOUS) &&
545 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
546 /* msync() won't work here, so we return an error if write is
547 possible while it is a shared mapping */
548 if ((flags & MAP_TYPE) == MAP_SHARED &&
549 (host_prot & PROT_WRITE)) {
550 errno = EINVAL;
551 goto fail;
552 }
553 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
554 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
555 -1, 0);
556 if (retaddr == -1)
557 goto fail;
558 if (pread(fd, g2h_untagged(start), len, offset) == -1)
559 goto fail;
560 if (!(host_prot & PROT_WRITE)) {
561 ret = target_mprotect(start, len, target_prot);
562 assert(ret == 0);
563 }
564 goto the_end;
565 }
566
567 /* handle the start of the mapping */
568 if (start > real_start) {
569 if (real_end == real_start + qemu_host_page_size) {
570 /* one single host page */
571 ret = mmap_frag(real_start, start, end,
572 host_prot, flags, fd, offset);
573 if (ret == -1)
574 goto fail;
575 goto the_end1;
576 }
577 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
578 host_prot, flags, fd, offset);
579 if (ret == -1)
580 goto fail;
581 real_start += qemu_host_page_size;
582 }
583 /* handle the end of the mapping */
584 if (end < real_end) {
585 ret = mmap_frag(real_end - qemu_host_page_size,
586 real_end - qemu_host_page_size, end,
587 host_prot, flags, fd,
588 offset + real_end - qemu_host_page_size - start);
589 if (ret == -1)
590 goto fail;
591 real_end -= qemu_host_page_size;
592 }
593
594 /* map the middle (easier) */
595 if (real_start < real_end) {
596 void *p;
597 unsigned long offset1;
598 if (flags & MAP_ANONYMOUS)
599 offset1 = 0;
600 else
601 offset1 = offset + real_start - start;
602 p = mmap(g2h_untagged(real_start), real_end - real_start,
603 host_prot, flags, fd, offset1);
604 if (p == MAP_FAILED)
605 goto fail;
606 }
607 }
608 the_end1:
609 if (flags & MAP_ANONYMOUS) {
610 page_flags |= PAGE_ANON;
611 }
612 page_flags |= PAGE_RESET;
613 page_set_flags(start, start + len, page_flags);
614 the_end:
615 trace_target_mmap_complete(start);
616 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
617 log_page_dump(__func__);
618 }
619 tb_invalidate_phys_range(start, start + len);
620 mmap_unlock();
621 return start;
622 fail:
623 mmap_unlock();
624 return -1;
625 }
626
627 static void mmap_reserve(abi_ulong start, abi_ulong size)
628 {
629 abi_ulong real_start;
630 abi_ulong real_end;
631 abi_ulong addr;
632 abi_ulong end;
633 int prot;
634
635 real_start = start & qemu_host_page_mask;
636 real_end = HOST_PAGE_ALIGN(start + size);
637 end = start + size;
638 if (start > real_start) {
639 /* handle host page containing start */
640 prot = 0;
641 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
642 prot |= page_get_flags(addr);
643 }
644 if (real_end == real_start + qemu_host_page_size) {
645 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
646 prot |= page_get_flags(addr);
647 }
648 end = real_end;
649 }
650 if (prot != 0)
651 real_start += qemu_host_page_size;
652 }
653 if (end < real_end) {
654 prot = 0;
655 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
656 prot |= page_get_flags(addr);
657 }
658 if (prot != 0)
659 real_end -= qemu_host_page_size;
660 }
661 if (real_start != real_end) {
662 mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
663 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
664 -1, 0);
665 }
666 }
667
668 int target_munmap(abi_ulong start, abi_ulong len)
669 {
670 abi_ulong end, real_start, real_end, addr;
671 int prot, ret;
672
673 trace_target_munmap(start, len);
674
675 if (start & ~TARGET_PAGE_MASK)
676 return -TARGET_EINVAL;
677 len = TARGET_PAGE_ALIGN(len);
678 if (len == 0 || !guest_range_valid_untagged(start, len)) {
679 return -TARGET_EINVAL;
680 }
681
682 mmap_lock();
683 end = start + len;
684 real_start = start & qemu_host_page_mask;
685 real_end = HOST_PAGE_ALIGN(end);
686
687 if (start > real_start) {
688 /* handle host page containing start */
689 prot = 0;
690 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
691 prot |= page_get_flags(addr);
692 }
693 if (real_end == real_start + qemu_host_page_size) {
694 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
695 prot |= page_get_flags(addr);
696 }
697 end = real_end;
698 }
699 if (prot != 0)
700 real_start += qemu_host_page_size;
701 }
702 if (end < real_end) {
703 prot = 0;
704 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
705 prot |= page_get_flags(addr);
706 }
707 if (prot != 0)
708 real_end -= qemu_host_page_size;
709 }
710
711 ret = 0;
712 /* unmap what we can */
713 if (real_start < real_end) {
714 if (reserved_va) {
715 mmap_reserve(real_start, real_end - real_start);
716 } else {
717 ret = munmap(g2h_untagged(real_start), real_end - real_start);
718 }
719 }
720
721 if (ret == 0) {
722 page_set_flags(start, start + len, 0);
723 tb_invalidate_phys_range(start, start + len);
724 }
725 mmap_unlock();
726 return ret;
727 }
728
729 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
730 abi_ulong new_size, unsigned long flags,
731 abi_ulong new_addr)
732 {
733 int prot;
734 void *host_addr;
735
736 if (!guest_range_valid_untagged(old_addr, old_size) ||
737 ((flags & MREMAP_FIXED) &&
738 !guest_range_valid_untagged(new_addr, new_size)) ||
739 ((flags & MREMAP_MAYMOVE) == 0 &&
740 !guest_range_valid_untagged(old_addr, new_size))) {
741 errno = ENOMEM;
742 return -1;
743 }
744
745 mmap_lock();
746
747 if (flags & MREMAP_FIXED) {
748 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
749 flags, g2h_untagged(new_addr));
750
751 if (reserved_va && host_addr != MAP_FAILED) {
752 /* If new and old addresses overlap then the above mremap will
753 already have failed with EINVAL. */
754 mmap_reserve(old_addr, old_size);
755 }
756 } else if (flags & MREMAP_MAYMOVE) {
757 abi_ulong mmap_start;
758
759 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
760
761 if (mmap_start == -1) {
762 errno = ENOMEM;
763 host_addr = MAP_FAILED;
764 } else {
765 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
766 flags | MREMAP_FIXED,
767 g2h_untagged(mmap_start));
768 if (reserved_va) {
769 mmap_reserve(old_addr, old_size);
770 }
771 }
772 } else {
773 int prot = 0;
774 if (reserved_va && old_size < new_size) {
775 abi_ulong addr;
776 for (addr = old_addr + old_size;
777 addr < old_addr + new_size;
778 addr++) {
779 prot |= page_get_flags(addr);
780 }
781 }
782 if (prot == 0) {
783 host_addr = mremap(g2h_untagged(old_addr),
784 old_size, new_size, flags);
785
786 if (host_addr != MAP_FAILED) {
787 /* Check if address fits target address space */
788 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
789 /* Revert mremap() changes */
790 host_addr = mremap(g2h_untagged(old_addr),
791 new_size, old_size, flags);
792 errno = ENOMEM;
793 host_addr = MAP_FAILED;
794 } else if (reserved_va && old_size > new_size) {
795 mmap_reserve(old_addr + old_size, old_size - new_size);
796 }
797 }
798 } else {
799 errno = ENOMEM;
800 host_addr = MAP_FAILED;
801 }
802 }
803
804 if (host_addr == MAP_FAILED) {
805 new_addr = -1;
806 } else {
807 new_addr = h2g(host_addr);
808 prot = page_get_flags(old_addr);
809 page_set_flags(old_addr, old_addr + old_size, 0);
810 page_set_flags(new_addr, new_addr + new_size,
811 prot | PAGE_VALID | PAGE_RESET);
812 }
813 tb_invalidate_phys_range(new_addr, new_addr + new_size);
814 mmap_unlock();
815 return new_addr;
816 }