]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/mmap.c
linux-user: Split out mmap_end
[mirror_qemu.git] / linux-user / mmap.c
1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include <sys/shm.h>
21 #include "trace.h"
22 #include "exec/log.h"
23 #include "qemu.h"
24 #include "user-internals.h"
25 #include "user-mmap.h"
26 #include "target_mman.h"
27 #include "qemu/interval-tree.h"
28
29 #ifdef TARGET_ARM
30 #include "target/arm/cpu-features.h"
31 #endif
32
33 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
34 static __thread int mmap_lock_count;
35
36 void mmap_lock(void)
37 {
38 if (mmap_lock_count++ == 0) {
39 pthread_mutex_lock(&mmap_mutex);
40 }
41 }
42
43 void mmap_unlock(void)
44 {
45 assert(mmap_lock_count > 0);
46 if (--mmap_lock_count == 0) {
47 pthread_mutex_unlock(&mmap_mutex);
48 }
49 }
50
51 bool have_mmap_lock(void)
52 {
53 return mmap_lock_count > 0 ? true : false;
54 }
55
56 /* Grab lock to make sure things are in a consistent state after fork(). */
57 void mmap_fork_start(void)
58 {
59 if (mmap_lock_count)
60 abort();
61 pthread_mutex_lock(&mmap_mutex);
62 }
63
64 void mmap_fork_end(int child)
65 {
66 if (child) {
67 pthread_mutex_init(&mmap_mutex, NULL);
68 } else {
69 pthread_mutex_unlock(&mmap_mutex);
70 }
71 }
72
73 /* Protected by mmap_lock. */
74 static IntervalTreeRoot shm_regions;
75
76 static void shm_region_add(abi_ptr start, abi_ptr last)
77 {
78 IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
79
80 i->start = start;
81 i->last = last;
82 interval_tree_insert(i, &shm_regions);
83 }
84
85 static abi_ptr shm_region_find(abi_ptr start)
86 {
87 IntervalTreeNode *i;
88
89 for (i = interval_tree_iter_first(&shm_regions, start, start); i;
90 i = interval_tree_iter_next(i, start, start)) {
91 if (i->start == start) {
92 return i->last;
93 }
94 }
95 return 0;
96 }
97
98 static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
99 {
100 IntervalTreeNode *i, *n;
101
102 for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
103 n = interval_tree_iter_next(i, start, last);
104 if (i->start >= start && i->last <= last) {
105 interval_tree_remove(i, &shm_regions);
106 g_free(i);
107 }
108 }
109 }
110
111 /*
112 * Validate target prot bitmask.
113 * Return the prot bitmask for the host in *HOST_PROT.
114 * Return 0 if the target prot bitmask is invalid, otherwise
115 * the internal qemu page_flags (which will include PAGE_VALID).
116 */
117 static int validate_prot_to_pageflags(int prot)
118 {
119 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
120 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
121
122 #ifdef TARGET_AARCH64
123 {
124 ARMCPU *cpu = ARM_CPU(thread_cpu);
125
126 /*
127 * The PROT_BTI bit is only accepted if the cpu supports the feature.
128 * Since this is the unusual case, don't bother checking unless
129 * the bit has been requested. If set and valid, record the bit
130 * within QEMU's page_flags.
131 */
132 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
133 valid |= TARGET_PROT_BTI;
134 page_flags |= PAGE_BTI;
135 }
136 /* Similarly for the PROT_MTE bit. */
137 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
138 valid |= TARGET_PROT_MTE;
139 page_flags |= PAGE_MTE;
140 }
141 }
142 #elif defined(TARGET_HPPA)
143 valid |= PROT_GROWSDOWN | PROT_GROWSUP;
144 #endif
145
146 return prot & ~valid ? 0 : page_flags;
147 }
148
149 /*
150 * For the host, we need not pass anything except read/write/exec.
151 * While PROT_SEM is allowed by all hosts, it is also ignored, so
152 * don't bother transforming guest bit to host bit. Any other
153 * target-specific prot bits will not be understood by the host
154 * and will need to be encoded into page_flags for qemu emulation.
155 *
156 * Pages that are executable by the guest will never be executed
157 * by the host, but the host will need to be able to read them.
158 */
159 static int target_to_host_prot(int prot)
160 {
161 return (prot & (PROT_READ | PROT_WRITE)) |
162 (prot & PROT_EXEC ? PROT_READ : 0);
163 }
164
165 /* NOTE: all the constants are the HOST ones, but addresses are target. */
166 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
167 {
168 int host_page_size = qemu_real_host_page_size();
169 abi_ulong starts[3];
170 abi_ulong lens[3];
171 int prots[3];
172 abi_ulong host_start, host_last, last;
173 int prot1, ret, page_flags, nranges;
174
175 trace_target_mprotect(start, len, target_prot);
176
177 if ((start & ~TARGET_PAGE_MASK) != 0) {
178 return -TARGET_EINVAL;
179 }
180 page_flags = validate_prot_to_pageflags(target_prot);
181 if (!page_flags) {
182 return -TARGET_EINVAL;
183 }
184 if (len == 0) {
185 return 0;
186 }
187 len = TARGET_PAGE_ALIGN(len);
188 if (!guest_range_valid_untagged(start, len)) {
189 return -TARGET_ENOMEM;
190 }
191
192 last = start + len - 1;
193 host_start = start & -host_page_size;
194 host_last = ROUND_UP(last, host_page_size) - 1;
195 nranges = 0;
196
197 mmap_lock();
198
199 if (host_last - host_start < host_page_size) {
200 /* Single host page contains all guest pages: sum the prot. */
201 prot1 = target_prot;
202 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
203 prot1 |= page_get_flags(a);
204 }
205 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
206 prot1 |= page_get_flags(a + 1);
207 }
208 starts[nranges] = host_start;
209 lens[nranges] = host_page_size;
210 prots[nranges] = prot1;
211 nranges++;
212 } else {
213 if (host_start < start) {
214 /* Host page contains more than one guest page: sum the prot. */
215 prot1 = target_prot;
216 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
217 prot1 |= page_get_flags(a);
218 }
219 /* If the resulting sum differs, create a new range. */
220 if (prot1 != target_prot) {
221 starts[nranges] = host_start;
222 lens[nranges] = host_page_size;
223 prots[nranges] = prot1;
224 nranges++;
225 host_start += host_page_size;
226 }
227 }
228
229 if (last < host_last) {
230 /* Host page contains more than one guest page: sum the prot. */
231 prot1 = target_prot;
232 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
233 prot1 |= page_get_flags(a + 1);
234 }
235 /* If the resulting sum differs, create a new range. */
236 if (prot1 != target_prot) {
237 host_last -= host_page_size;
238 starts[nranges] = host_last + 1;
239 lens[nranges] = host_page_size;
240 prots[nranges] = prot1;
241 nranges++;
242 }
243 }
244
245 /* Create a range for the middle, if any remains. */
246 if (host_start < host_last) {
247 starts[nranges] = host_start;
248 lens[nranges] = host_last - host_start + 1;
249 prots[nranges] = target_prot;
250 nranges++;
251 }
252 }
253
254 for (int i = 0; i < nranges; ++i) {
255 ret = mprotect(g2h_untagged(starts[i]), lens[i],
256 target_to_host_prot(prots[i]));
257 if (ret != 0) {
258 goto error;
259 }
260 }
261
262 page_set_flags(start, last, page_flags);
263 ret = 0;
264
265 error:
266 mmap_unlock();
267 return ret;
268 }
269
270 /* map an incomplete host page */
271 static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
272 int prot, int flags, int fd, off_t offset)
273 {
274 int host_page_size = qemu_real_host_page_size();
275 abi_ulong real_last;
276 void *host_start;
277 int prot_old, prot_new;
278 int host_prot_old, host_prot_new;
279
280 if (!(flags & MAP_ANONYMOUS)
281 && (flags & MAP_TYPE) == MAP_SHARED
282 && (prot & PROT_WRITE)) {
283 /*
284 * msync() won't work with the partial page, so we return an
285 * error if write is possible while it is a shared mapping.
286 */
287 errno = EINVAL;
288 return false;
289 }
290
291 real_last = real_start + host_page_size - 1;
292 host_start = g2h_untagged(real_start);
293
294 /* Get the protection of the target pages outside the mapping. */
295 prot_old = 0;
296 for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
297 prot_old |= page_get_flags(a);
298 }
299 for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
300 prot_old |= page_get_flags(a);
301 }
302
303 if (prot_old == 0) {
304 /*
305 * Since !(prot_old & PAGE_VALID), there were no guest pages
306 * outside of the fragment we need to map. Allocate a new host
307 * page to cover, discarding whatever else may have been present.
308 */
309 void *p = mmap(host_start, host_page_size,
310 target_to_host_prot(prot),
311 flags | MAP_ANONYMOUS, -1, 0);
312 if (p != host_start) {
313 if (p != MAP_FAILED) {
314 munmap(p, host_page_size);
315 errno = EEXIST;
316 }
317 return false;
318 }
319 prot_old = prot;
320 }
321 prot_new = prot | prot_old;
322
323 host_prot_old = target_to_host_prot(prot_old);
324 host_prot_new = target_to_host_prot(prot_new);
325
326 /* Adjust protection to be able to write. */
327 if (!(host_prot_old & PROT_WRITE)) {
328 host_prot_old |= PROT_WRITE;
329 mprotect(host_start, host_page_size, host_prot_old);
330 }
331
332 /* Read or zero the new guest pages. */
333 if (flags & MAP_ANONYMOUS) {
334 memset(g2h_untagged(start), 0, last - start + 1);
335 } else {
336 if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
337 return false;
338 }
339 }
340
341 /* Put final protection */
342 if (host_prot_new != host_prot_old) {
343 mprotect(host_start, host_page_size, host_prot_new);
344 }
345 return true;
346 }
347
348 abi_ulong task_unmapped_base;
349 abi_ulong elf_et_dyn_base;
350 abi_ulong mmap_next_start;
351
352 /*
353 * Subroutine of mmap_find_vma, used when we have pre-allocated
354 * a chunk of guest address space.
355 */
356 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
357 abi_ulong align)
358 {
359 target_ulong ret;
360
361 ret = page_find_range_empty(start, reserved_va, size, align);
362 if (ret == -1 && start > mmap_min_addr) {
363 /* Restart at the beginning of the address space. */
364 ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
365 }
366
367 return ret;
368 }
369
370 /*
371 * Find and reserve a free memory area of size 'size'. The search
372 * starts at 'start'.
373 * It must be called with mmap_lock() held.
374 * Return -1 if error.
375 */
376 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
377 {
378 int host_page_size = qemu_real_host_page_size();
379 void *ptr, *prev;
380 abi_ulong addr;
381 int wrapped, repeat;
382
383 align = MAX(align, host_page_size);
384
385 /* If 'start' == 0, then a default start address is used. */
386 if (start == 0) {
387 start = mmap_next_start;
388 } else {
389 start &= -host_page_size;
390 }
391 start = ROUND_UP(start, align);
392 size = ROUND_UP(size, host_page_size);
393
394 if (reserved_va) {
395 return mmap_find_vma_reserved(start, size, align);
396 }
397
398 addr = start;
399 wrapped = repeat = 0;
400 prev = 0;
401
402 for (;; prev = ptr) {
403 /*
404 * Reserve needed memory area to avoid a race.
405 * It should be discarded using:
406 * - mmap() with MAP_FIXED flag
407 * - mremap() with MREMAP_FIXED flag
408 * - shmat() with SHM_REMAP flag
409 */
410 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
411 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
412
413 /* ENOMEM, if host address space has no memory */
414 if (ptr == MAP_FAILED) {
415 return (abi_ulong)-1;
416 }
417
418 /*
419 * Count the number of sequential returns of the same address.
420 * This is used to modify the search algorithm below.
421 */
422 repeat = (ptr == prev ? repeat + 1 : 0);
423
424 if (h2g_valid(ptr + size - 1)) {
425 addr = h2g(ptr);
426
427 if ((addr & (align - 1)) == 0) {
428 /* Success. */
429 if (start == mmap_next_start && addr >= task_unmapped_base) {
430 mmap_next_start = addr + size;
431 }
432 return addr;
433 }
434
435 /* The address is not properly aligned for the target. */
436 switch (repeat) {
437 case 0:
438 /*
439 * Assume the result that the kernel gave us is the
440 * first with enough free space, so start again at the
441 * next higher target page.
442 */
443 addr = ROUND_UP(addr, align);
444 break;
445 case 1:
446 /*
447 * Sometimes the kernel decides to perform the allocation
448 * at the top end of memory instead.
449 */
450 addr &= -align;
451 break;
452 case 2:
453 /* Start over at low memory. */
454 addr = 0;
455 break;
456 default:
457 /* Fail. This unaligned block must the last. */
458 addr = -1;
459 break;
460 }
461 } else {
462 /*
463 * Since the result the kernel gave didn't fit, start
464 * again at low memory. If any repetition, fail.
465 */
466 addr = (repeat ? -1 : 0);
467 }
468
469 /* Unmap and try again. */
470 munmap(ptr, size);
471
472 /* ENOMEM if we checked the whole of the target address space. */
473 if (addr == (abi_ulong)-1) {
474 return (abi_ulong)-1;
475 } else if (addr == 0) {
476 if (wrapped) {
477 return (abi_ulong)-1;
478 }
479 wrapped = 1;
480 /*
481 * Don't actually use 0 when wrapping, instead indicate
482 * that we'd truly like an allocation in low memory.
483 */
484 addr = (mmap_min_addr > TARGET_PAGE_SIZE
485 ? TARGET_PAGE_ALIGN(mmap_min_addr)
486 : TARGET_PAGE_SIZE);
487 } else if (wrapped && addr >= start) {
488 return (abi_ulong)-1;
489 }
490 }
491 }
492
493 /*
494 * Record a successful mmap within the user-exec interval tree.
495 */
496 static abi_long mmap_end(abi_ulong start, abi_ulong last,
497 abi_ulong passthrough_start,
498 abi_ulong passthrough_last,
499 int flags, int page_flags)
500 {
501 if (flags & MAP_ANONYMOUS) {
502 page_flags |= PAGE_ANON;
503 }
504 page_flags |= PAGE_RESET;
505 if (passthrough_start > passthrough_last) {
506 page_set_flags(start, last, page_flags);
507 } else {
508 if (start < passthrough_start) {
509 page_set_flags(start, passthrough_start - 1, page_flags);
510 }
511 page_set_flags(passthrough_start, passthrough_last,
512 page_flags | PAGE_PASSTHROUGH);
513 if (passthrough_last < last) {
514 page_set_flags(passthrough_last + 1, last, page_flags);
515 }
516 }
517 shm_region_rm_complete(start, last);
518 trace_target_mmap_complete(start);
519 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
520 FILE *f = qemu_log_trylock();
521 if (f) {
522 fprintf(f, "page layout changed following mmap\n");
523 page_dump(f);
524 qemu_log_unlock(f);
525 }
526 }
527 return start;
528 }
529
530 static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
531 int target_prot, int flags, int page_flags,
532 int fd, off_t offset)
533 {
534 int host_page_size = qemu_real_host_page_size();
535 abi_ulong ret, last, real_start, real_last, retaddr, host_len;
536 abi_ulong passthrough_start = -1, passthrough_last = 0;
537 off_t host_offset;
538
539 real_start = start & -host_page_size;
540 host_offset = offset & -host_page_size;
541
542 /*
543 * If the user is asking for the kernel to find a location, do that
544 * before we truncate the length for mapping files below.
545 */
546 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
547 host_len = len + offset - host_offset;
548 host_len = ROUND_UP(host_len, host_page_size);
549 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
550 if (start == (abi_ulong)-1) {
551 errno = ENOMEM;
552 return -1;
553 }
554 }
555
556 /*
557 * When mapping files into a memory area larger than the file, accesses
558 * to pages beyond the file size will cause a SIGBUS.
559 *
560 * For example, if mmaping a file of 100 bytes on a host with 4K pages
561 * emulating a target with 8K pages, the target expects to be able to
562 * access the first 8K. But the host will trap us on any access beyond
563 * 4K.
564 *
565 * When emulating a target with a larger page-size than the hosts, we
566 * may need to truncate file maps at EOF and add extra anonymous pages
567 * up to the targets page boundary.
568 */
569 if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) {
570 struct stat sb;
571
572 if (fstat(fd, &sb) == -1) {
573 return -1;
574 }
575
576 /* Are we trying to create a map beyond EOF?. */
577 if (offset + len > sb.st_size) {
578 /*
579 * If so, truncate the file map at eof aligned with
580 * the hosts real pagesize. Additional anonymous maps
581 * will be created beyond EOF.
582 */
583 len = ROUND_UP(sb.st_size - offset, host_page_size);
584 }
585 }
586
587 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
588 uintptr_t host_start;
589 int host_prot;
590 void *p;
591
592 host_len = len + offset - host_offset;
593 host_len = ROUND_UP(host_len, host_page_size);
594 host_prot = target_to_host_prot(target_prot);
595
596 /* Note: we prefer to control the mapping address. */
597 p = mmap(g2h_untagged(start), host_len, host_prot,
598 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
599 if (p == MAP_FAILED) {
600 return -1;
601 }
602 /* update start so that it points to the file position at 'offset' */
603 host_start = (uintptr_t)p;
604 if (!(flags & MAP_ANONYMOUS)) {
605 p = mmap(g2h_untagged(start), len, host_prot,
606 flags | MAP_FIXED, fd, host_offset);
607 if (p == MAP_FAILED) {
608 munmap(g2h_untagged(start), host_len);
609 return -1;
610 }
611 host_start += offset - host_offset;
612 }
613 start = h2g(host_start);
614 last = start + len - 1;
615 passthrough_start = start;
616 passthrough_last = last;
617 } else {
618 last = start + len - 1;
619 real_last = ROUND_UP(last, host_page_size) - 1;
620
621 if (flags & MAP_FIXED_NOREPLACE) {
622 /* Validate that the chosen range is empty. */
623 if (!page_check_range_empty(start, last)) {
624 errno = EEXIST;
625 return -1;
626 }
627
628 /*
629 * With reserved_va, the entire address space is mmaped in the
630 * host to ensure it isn't accidentally used for something else.
631 * We have just checked that the guest address is not mapped
632 * within the guest, but need to replace the host reservation.
633 *
634 * Without reserved_va, despite the guest address check above,
635 * keep MAP_FIXED_NOREPLACE so that the guest does not overwrite
636 * any host address mappings.
637 */
638 if (reserved_va) {
639 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
640 }
641 }
642
643 /*
644 * worst case: we cannot map the file because the offset is not
645 * aligned, so we read it
646 */
647 if (!(flags & MAP_ANONYMOUS) &&
648 (offset & (host_page_size - 1)) != (start & (host_page_size - 1))) {
649 /*
650 * msync() won't work here, so we return an error if write is
651 * possible while it is a shared mapping
652 */
653 if ((flags & MAP_TYPE) == MAP_SHARED
654 && (target_prot & PROT_WRITE)) {
655 errno = EINVAL;
656 return -1;
657 }
658 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
659 (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
660 | MAP_PRIVATE | MAP_ANONYMOUS,
661 -1, 0);
662 if (retaddr == -1) {
663 return -1;
664 }
665 if (pread(fd, g2h_untagged(start), len, offset) == -1) {
666 return -1;
667 }
668 if (!(target_prot & PROT_WRITE)) {
669 ret = target_mprotect(start, len, target_prot);
670 assert(ret == 0);
671 }
672 return mmap_end(start, last, -1, 0, flags, page_flags);
673 }
674
675 /* handle the start of the mapping */
676 if (start > real_start) {
677 if (real_last == real_start + host_page_size - 1) {
678 /* one single host page */
679 if (!mmap_frag(real_start, start, last,
680 target_prot, flags, fd, offset)) {
681 return -1;
682 }
683 return mmap_end(start, last, -1, 0, flags, page_flags);
684 }
685 if (!mmap_frag(real_start, start,
686 real_start + host_page_size - 1,
687 target_prot, flags, fd, offset)) {
688 return -1;
689 }
690 real_start += host_page_size;
691 }
692 /* handle the end of the mapping */
693 if (last < real_last) {
694 abi_ulong real_page = real_last - host_page_size + 1;
695 if (!mmap_frag(real_page, real_page, last,
696 target_prot, flags, fd,
697 offset + real_page - start)) {
698 return -1;
699 }
700 real_last -= host_page_size;
701 }
702
703 /* map the middle (easier) */
704 if (real_start < real_last) {
705 void *p, *want_p;
706 off_t offset1;
707 size_t len1;
708
709 if (flags & MAP_ANONYMOUS) {
710 offset1 = 0;
711 } else {
712 offset1 = offset + real_start - start;
713 }
714 len1 = real_last - real_start + 1;
715 want_p = g2h_untagged(real_start);
716
717 p = mmap(want_p, len1, target_to_host_prot(target_prot),
718 flags, fd, offset1);
719 if (p != want_p) {
720 if (p != MAP_FAILED) {
721 munmap(p, len1);
722 errno = EEXIST;
723 }
724 return -1;
725 }
726 passthrough_start = real_start;
727 passthrough_last = real_last;
728 }
729 }
730 return mmap_end(start, last, passthrough_start, passthrough_last,
731 flags, page_flags);
732 }
733
734 /* NOTE: all the constants are the HOST ones */
735 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
736 int flags, int fd, off_t offset)
737 {
738 abi_long ret;
739 int page_flags;
740
741 trace_target_mmap(start, len, target_prot, flags, fd, offset);
742
743 if (!len) {
744 errno = EINVAL;
745 return -1;
746 }
747
748 page_flags = validate_prot_to_pageflags(target_prot);
749 if (!page_flags) {
750 errno = EINVAL;
751 return -1;
752 }
753
754 /* Also check for overflows... */
755 len = TARGET_PAGE_ALIGN(len);
756 if (!len || len != (size_t)len) {
757 errno = ENOMEM;
758 return -1;
759 }
760
761 if (offset & ~TARGET_PAGE_MASK) {
762 errno = EINVAL;
763 return -1;
764 }
765 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
766 if (start & ~TARGET_PAGE_MASK) {
767 errno = EINVAL;
768 return -1;
769 }
770 if (!guest_range_valid_untagged(start, len)) {
771 errno = ENOMEM;
772 return -1;
773 }
774 }
775
776 mmap_lock();
777
778 ret = target_mmap__locked(start, len, target_prot, flags,
779 page_flags, fd, offset);
780
781 mmap_unlock();
782
783 /*
784 * If we're mapping shared memory, ensure we generate code for parallel
785 * execution and flush old translations. This will work up to the level
786 * supported by the host -- anything that requires EXCP_ATOMIC will not
787 * be atomic with respect to an external process.
788 */
789 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
790 CPUState *cpu = thread_cpu;
791 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
792 cpu->tcg_cflags |= CF_PARALLEL;
793 tb_flush(cpu);
794 }
795 }
796
797 return ret;
798 }
799
800 static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
801 {
802 int host_page_size = qemu_real_host_page_size();
803 abi_ulong real_start;
804 abi_ulong real_last;
805 abi_ulong real_len;
806 abi_ulong last;
807 abi_ulong a;
808 void *host_start;
809 int prot;
810
811 last = start + len - 1;
812 real_start = start & -host_page_size;
813 real_last = ROUND_UP(last, host_page_size) - 1;
814
815 /*
816 * If guest pages remain on the first or last host pages,
817 * adjust the deallocation to retain those guest pages.
818 * The single page special case is required for the last page,
819 * lest real_start overflow to zero.
820 */
821 if (real_last - real_start < host_page_size) {
822 prot = 0;
823 for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
824 prot |= page_get_flags(a);
825 }
826 for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
827 prot |= page_get_flags(a + 1);
828 }
829 if (prot != 0) {
830 return 0;
831 }
832 } else {
833 for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
834 prot |= page_get_flags(a);
835 }
836 if (prot != 0) {
837 real_start += host_page_size;
838 }
839
840 for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
841 prot |= page_get_flags(a + 1);
842 }
843 if (prot != 0) {
844 real_last -= host_page_size;
845 }
846
847 if (real_last < real_start) {
848 return 0;
849 }
850 }
851
852 real_len = real_last - real_start + 1;
853 host_start = g2h_untagged(real_start);
854
855 if (reserved_va) {
856 void *ptr = mmap(host_start, real_len, PROT_NONE,
857 MAP_FIXED | MAP_ANONYMOUS
858 | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
859 return ptr == host_start ? 0 : -1;
860 }
861 return munmap(host_start, real_len);
862 }
863
864 int target_munmap(abi_ulong start, abi_ulong len)
865 {
866 int ret;
867
868 trace_target_munmap(start, len);
869
870 if (start & ~TARGET_PAGE_MASK) {
871 errno = EINVAL;
872 return -1;
873 }
874 len = TARGET_PAGE_ALIGN(len);
875 if (len == 0 || !guest_range_valid_untagged(start, len)) {
876 errno = EINVAL;
877 return -1;
878 }
879
880 mmap_lock();
881 ret = mmap_reserve_or_unmap(start, len);
882 if (likely(ret == 0)) {
883 page_set_flags(start, start + len - 1, 0);
884 shm_region_rm_complete(start, start + len - 1);
885 }
886 mmap_unlock();
887
888 return ret;
889 }
890
891 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
892 abi_ulong new_size, unsigned long flags,
893 abi_ulong new_addr)
894 {
895 int prot;
896 void *host_addr;
897
898 if (!guest_range_valid_untagged(old_addr, old_size) ||
899 ((flags & MREMAP_FIXED) &&
900 !guest_range_valid_untagged(new_addr, new_size)) ||
901 ((flags & MREMAP_MAYMOVE) == 0 &&
902 !guest_range_valid_untagged(old_addr, new_size))) {
903 errno = ENOMEM;
904 return -1;
905 }
906
907 mmap_lock();
908
909 if (flags & MREMAP_FIXED) {
910 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
911 flags, g2h_untagged(new_addr));
912
913 if (reserved_va && host_addr != MAP_FAILED) {
914 /*
915 * If new and old addresses overlap then the above mremap will
916 * already have failed with EINVAL.
917 */
918 mmap_reserve_or_unmap(old_addr, old_size);
919 }
920 } else if (flags & MREMAP_MAYMOVE) {
921 abi_ulong mmap_start;
922
923 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
924
925 if (mmap_start == -1) {
926 errno = ENOMEM;
927 host_addr = MAP_FAILED;
928 } else {
929 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
930 flags | MREMAP_FIXED,
931 g2h_untagged(mmap_start));
932 if (reserved_va) {
933 mmap_reserve_or_unmap(old_addr, old_size);
934 }
935 }
936 } else {
937 int page_flags = 0;
938 if (reserved_va && old_size < new_size) {
939 abi_ulong addr;
940 for (addr = old_addr + old_size;
941 addr < old_addr + new_size;
942 addr++) {
943 page_flags |= page_get_flags(addr);
944 }
945 }
946 if (page_flags == 0) {
947 host_addr = mremap(g2h_untagged(old_addr),
948 old_size, new_size, flags);
949
950 if (host_addr != MAP_FAILED) {
951 /* Check if address fits target address space */
952 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
953 /* Revert mremap() changes */
954 host_addr = mremap(g2h_untagged(old_addr),
955 new_size, old_size, flags);
956 errno = ENOMEM;
957 host_addr = MAP_FAILED;
958 } else if (reserved_va && old_size > new_size) {
959 mmap_reserve_or_unmap(old_addr + old_size,
960 old_size - new_size);
961 }
962 }
963 } else {
964 errno = ENOMEM;
965 host_addr = MAP_FAILED;
966 }
967 }
968
969 if (host_addr == MAP_FAILED) {
970 new_addr = -1;
971 } else {
972 new_addr = h2g(host_addr);
973 prot = page_get_flags(old_addr);
974 page_set_flags(old_addr, old_addr + old_size - 1, 0);
975 shm_region_rm_complete(old_addr, old_addr + old_size - 1);
976 page_set_flags(new_addr, new_addr + new_size - 1,
977 prot | PAGE_VALID | PAGE_RESET);
978 shm_region_rm_complete(new_addr, new_addr + new_size - 1);
979 }
980 mmap_unlock();
981 return new_addr;
982 }
983
984 abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
985 {
986 abi_ulong len;
987 int ret = 0;
988
989 if (start & ~TARGET_PAGE_MASK) {
990 return -TARGET_EINVAL;
991 }
992 if (len_in == 0) {
993 return 0;
994 }
995 len = TARGET_PAGE_ALIGN(len_in);
996 if (len == 0 || !guest_range_valid_untagged(start, len)) {
997 return -TARGET_EINVAL;
998 }
999
1000 /* Translate for some architectures which have different MADV_xxx values */
1001 switch (advice) {
1002 case TARGET_MADV_DONTNEED: /* alpha */
1003 advice = MADV_DONTNEED;
1004 break;
1005 case TARGET_MADV_WIPEONFORK: /* parisc */
1006 advice = MADV_WIPEONFORK;
1007 break;
1008 case TARGET_MADV_KEEPONFORK: /* parisc */
1009 advice = MADV_KEEPONFORK;
1010 break;
1011 /* we do not care about the other MADV_xxx values yet */
1012 }
1013
1014 /*
1015 * Most advice values are hints, so ignoring and returning success is ok.
1016 *
1017 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
1018 * MADV_KEEPONFORK are not hints and need to be emulated.
1019 *
1020 * A straight passthrough for those may not be safe because qemu sometimes
1021 * turns private file-backed mappings into anonymous mappings.
1022 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1023 * same semantics for the host as for the guest.
1024 *
1025 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
1026 * return failure if not.
1027 *
1028 * MADV_DONTNEED is passed through as well, if possible.
1029 * If passthrough isn't possible, we nevertheless (wrongly!) return
1030 * success, which is broken but some userspace programs fail to work
1031 * otherwise. Completely implementing such emulation is quite complicated
1032 * though.
1033 */
1034 mmap_lock();
1035 switch (advice) {
1036 case MADV_WIPEONFORK:
1037 case MADV_KEEPONFORK:
1038 ret = -EINVAL;
1039 /* fall through */
1040 case MADV_DONTNEED:
1041 if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
1042 ret = get_errno(madvise(g2h_untagged(start), len, advice));
1043 if ((advice == MADV_DONTNEED) && (ret == 0)) {
1044 page_reset_target_data(start, start + len - 1);
1045 }
1046 }
1047 }
1048 mmap_unlock();
1049
1050 return ret;
1051 }
1052
1053 #ifndef TARGET_FORCE_SHMLBA
1054 /*
1055 * For most architectures, SHMLBA is the same as the page size;
1056 * some architectures have larger values, in which case they should
1057 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1058 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1059 * and defining its own value for SHMLBA.
1060 *
1061 * The kernel also permits SHMLBA to be set by the architecture to a
1062 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1063 * this means that addresses are rounded to the large size if
1064 * SHM_RND is set but addresses not aligned to that size are not rejected
1065 * as long as they are at least page-aligned. Since the only architecture
1066 * which uses this is ia64 this code doesn't provide for that oddity.
1067 */
1068 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1069 {
1070 return TARGET_PAGE_SIZE;
1071 }
1072 #endif
1073
1074 abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1075 abi_ulong shmaddr, int shmflg)
1076 {
1077 CPUState *cpu = env_cpu(cpu_env);
1078 abi_ulong raddr;
1079 struct shmid_ds shm_info;
1080 int ret;
1081 abi_ulong shmlba;
1082
1083 /* shmat pointers are always untagged */
1084
1085 /* find out the length of the shared memory segment */
1086 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1087 if (is_error(ret)) {
1088 /* can't get length, bail out */
1089 return ret;
1090 }
1091
1092 shmlba = target_shmlba(cpu_env);
1093
1094 if (shmaddr & (shmlba - 1)) {
1095 if (shmflg & SHM_RND) {
1096 shmaddr &= ~(shmlba - 1);
1097 } else {
1098 return -TARGET_EINVAL;
1099 }
1100 }
1101 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
1102 return -TARGET_EINVAL;
1103 }
1104
1105 WITH_MMAP_LOCK_GUARD() {
1106 void *host_raddr;
1107 abi_ulong last;
1108
1109 if (shmaddr) {
1110 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
1111 } else {
1112 abi_ulong mmap_start;
1113
1114 /* In order to use the host shmat, we need to honor host SHMLBA. */
1115 mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
1116 MAX(SHMLBA, shmlba));
1117
1118 if (mmap_start == -1) {
1119 return -TARGET_ENOMEM;
1120 }
1121 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
1122 shmflg | SHM_REMAP);
1123 }
1124
1125 if (host_raddr == (void *)-1) {
1126 return get_errno(-1);
1127 }
1128 raddr = h2g(host_raddr);
1129 last = raddr + shm_info.shm_segsz - 1;
1130
1131 page_set_flags(raddr, last,
1132 PAGE_VALID | PAGE_RESET | PAGE_READ |
1133 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
1134
1135 shm_region_rm_complete(raddr, last);
1136 shm_region_add(raddr, last);
1137 }
1138
1139 /*
1140 * We're mapping shared memory, so ensure we generate code for parallel
1141 * execution and flush old translations. This will work up to the level
1142 * supported by the host -- anything that requires EXCP_ATOMIC will not
1143 * be atomic with respect to an external process.
1144 */
1145 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
1146 cpu->tcg_cflags |= CF_PARALLEL;
1147 tb_flush(cpu);
1148 }
1149
1150 return raddr;
1151 }
1152
1153 abi_long target_shmdt(abi_ulong shmaddr)
1154 {
1155 abi_long rv;
1156
1157 /* shmdt pointers are always untagged */
1158
1159 WITH_MMAP_LOCK_GUARD() {
1160 abi_ulong last = shm_region_find(shmaddr);
1161 if (last == 0) {
1162 return -TARGET_EINVAL;
1163 }
1164
1165 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
1166 if (rv == 0) {
1167 abi_ulong size = last - shmaddr + 1;
1168
1169 page_set_flags(shmaddr, last, 0);
1170 shm_region_rm_complete(shmaddr, last);
1171 mmap_reserve_or_unmap(shmaddr, size);
1172 }
1173 }
1174 return rv;
1175 }