]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/mmap.c
linux-user: Split out mmap_end
[mirror_qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
d39594e9 19#include "qemu/osdep.h"
225a206c 20#include <sys/shm.h>
11d96056 21#include "trace.h"
10d0d505 22#include "exec/log.h"
54936004 23#include "qemu.h"
3b249d26 24#include "user-internals.h"
5423e6d3 25#include "user-mmap.h"
8655b4c7 26#include "target_mman.h"
044e95c8 27#include "qemu/interval-tree.h"
54936004 28
5a534314
PM
29#ifdef TARGET_ARM
30#include "target/arm/cpu-features.h"
31#endif
32
1e6eec8b 33static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 34static __thread int mmap_lock_count;
c8a706fe
PB
35
36void mmap_lock(void)
37{
38 if (mmap_lock_count++ == 0) {
39 pthread_mutex_lock(&mmap_mutex);
40 }
41}
42
43void mmap_unlock(void)
44{
990ef918 45 assert(mmap_lock_count > 0);
c8a706fe
PB
46 if (--mmap_lock_count == 0) {
47 pthread_mutex_unlock(&mmap_mutex);
48 }
49}
d5975363 50
301e40ed
AB
51bool have_mmap_lock(void)
52{
53 return mmap_lock_count > 0 ? true : false;
54}
55
d5975363
PB
56/* Grab lock to make sure things are in a consistent state after fork(). */
57void mmap_fork_start(void)
58{
59 if (mmap_lock_count)
60 abort();
61 pthread_mutex_lock(&mmap_mutex);
62}
63
64void mmap_fork_end(int child)
65{
2b730f79 66 if (child) {
d5975363 67 pthread_mutex_init(&mmap_mutex, NULL);
2b730f79 68 } else {
d5975363 69 pthread_mutex_unlock(&mmap_mutex);
2b730f79 70 }
d5975363 71}
c8a706fe 72
044e95c8
RH
73/* Protected by mmap_lock. */
74static IntervalTreeRoot shm_regions;
75
76static void shm_region_add(abi_ptr start, abi_ptr last)
77{
78 IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
79
80 i->start = start;
81 i->last = last;
82 interval_tree_insert(i, &shm_regions);
83}
84
85static abi_ptr shm_region_find(abi_ptr start)
86{
87 IntervalTreeNode *i;
88
89 for (i = interval_tree_iter_first(&shm_regions, start, start); i;
90 i = interval_tree_iter_next(i, start, start)) {
91 if (i->start == start) {
92 return i->last;
93 }
94 }
95 return 0;
96}
97
98static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
99{
100 IntervalTreeNode *i, *n;
101
102 for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
103 n = interval_tree_iter_next(i, start, last);
104 if (i->start >= start && i->last <= last) {
105 interval_tree_remove(i, &shm_regions);
106 g_free(i);
107 }
108 }
109}
110
9dba3ca5
RH
111/*
112 * Validate target prot bitmask.
113 * Return the prot bitmask for the host in *HOST_PROT.
114 * Return 0 if the target prot bitmask is invalid, otherwise
115 * the internal qemu page_flags (which will include PAGE_VALID).
116 */
0dd55812 117static int validate_prot_to_pageflags(int prot)
9dba3ca5
RH
118{
119 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
120 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
121
be5d6f48 122#ifdef TARGET_AARCH64
d109b46d 123 {
be5d6f48 124 ARMCPU *cpu = ARM_CPU(thread_cpu);
d109b46d
RH
125
126 /*
127 * The PROT_BTI bit is only accepted if the cpu supports the feature.
128 * Since this is the unusual case, don't bother checking unless
129 * the bit has been requested. If set and valid, record the bit
130 * within QEMU's page_flags.
131 */
132 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
be5d6f48
RH
133 valid |= TARGET_PROT_BTI;
134 page_flags |= PAGE_BTI;
135 }
d109b46d
RH
136 /* Similarly for the PROT_MTE bit. */
137 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
138 valid |= TARGET_PROT_MTE;
139 page_flags |= PAGE_MTE;
140 }
be5d6f48 141 }
4c184e70
HD
142#elif defined(TARGET_HPPA)
143 valid |= PROT_GROWSDOWN | PROT_GROWSUP;
be5d6f48
RH
144#endif
145
9dba3ca5
RH
146 return prot & ~valid ? 0 : page_flags;
147}
148
0dd55812
RH
149/*
150 * For the host, we need not pass anything except read/write/exec.
151 * While PROT_SEM is allowed by all hosts, it is also ignored, so
152 * don't bother transforming guest bit to host bit. Any other
153 * target-specific prot bits will not be understood by the host
154 * and will need to be encoded into page_flags for qemu emulation.
155 *
156 * Pages that are executable by the guest will never be executed
157 * by the host, but the host will need to be able to read them.
158 */
159static int target_to_host_prot(int prot)
160{
161 return (prot & (PROT_READ | PROT_WRITE)) |
162 (prot & PROT_EXEC ? PROT_READ : 0);
163}
164
53a5960a 165/* NOTE: all the constants are the HOST ones, but addresses are target. */
9dba3ca5 166int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
54936004 167{
621ac47d 168 int host_page_size = qemu_real_host_page_size();
7bdc1acc
RH
169 abi_ulong starts[3];
170 abi_ulong lens[3];
171 int prots[3];
172 abi_ulong host_start, host_last, last;
173 int prot1, ret, page_flags, nranges;
54936004 174
9dba3ca5 175 trace_target_mprotect(start, len, target_prot);
54936004 176
9dba3ca5 177 if ((start & ~TARGET_PAGE_MASK) != 0) {
78cf3390 178 return -TARGET_EINVAL;
9dba3ca5 179 }
0dd55812 180 page_flags = validate_prot_to_pageflags(target_prot);
9dba3ca5
RH
181 if (!page_flags) {
182 return -TARGET_EINVAL;
183 }
7bdc1acc
RH
184 if (len == 0) {
185 return 0;
186 }
54936004 187 len = TARGET_PAGE_ALIGN(len);
46b12f46 188 if (!guest_range_valid_untagged(start, len)) {
78cf3390 189 return -TARGET_ENOMEM;
ebf9a363 190 }
3b46e624 191
7bdc1acc 192 last = start + len - 1;
621ac47d 193 host_start = start & -host_page_size;
b36b2b1d 194 host_last = ROUND_UP(last, host_page_size) - 1;
7bdc1acc
RH
195 nranges = 0;
196
197 mmap_lock();
198
621ac47d 199 if (host_last - host_start < host_page_size) {
7bdc1acc 200 /* Single host page contains all guest pages: sum the prot. */
0dd55812 201 prot1 = target_prot;
7bdc1acc
RH
202 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
203 prot1 |= page_get_flags(a);
54936004 204 }
7bdc1acc
RH
205 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
206 prot1 |= page_get_flags(a + 1);
d418c81e 207 }
7bdc1acc 208 starts[nranges] = host_start;
621ac47d 209 lens[nranges] = host_page_size;
7bdc1acc
RH
210 prots[nranges] = prot1;
211 nranges++;
212 } else {
213 if (host_start < start) {
214 /* Host page contains more than one guest page: sum the prot. */
215 prot1 = target_prot;
216 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
217 prot1 |= page_get_flags(a);
218 }
219 /* If the resulting sum differs, create a new range. */
220 if (prot1 != target_prot) {
221 starts[nranges] = host_start;
621ac47d 222 lens[nranges] = host_page_size;
7bdc1acc
RH
223 prots[nranges] = prot1;
224 nranges++;
621ac47d 225 host_start += host_page_size;
7bdc1acc 226 }
9dba3ca5 227 }
7bdc1acc
RH
228
229 if (last < host_last) {
230 /* Host page contains more than one guest page: sum the prot. */
231 prot1 = target_prot;
232 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
233 prot1 |= page_get_flags(a + 1);
234 }
235 /* If the resulting sum differs, create a new range. */
236 if (prot1 != target_prot) {
621ac47d 237 host_last -= host_page_size;
7bdc1acc 238 starts[nranges] = host_last + 1;
621ac47d 239 lens[nranges] = host_page_size;
7bdc1acc
RH
240 prots[nranges] = prot1;
241 nranges++;
242 }
54936004 243 }
7bdc1acc
RH
244
245 /* Create a range for the middle, if any remains. */
246 if (host_start < host_last) {
247 starts[nranges] = host_start;
248 lens[nranges] = host_last - host_start + 1;
249 prots[nranges] = target_prot;
250 nranges++;
9dba3ca5 251 }
54936004 252 }
3b46e624 253
7bdc1acc
RH
254 for (int i = 0; i < nranges; ++i) {
255 ret = mprotect(g2h_untagged(starts[i]), lens[i],
256 target_to_host_prot(prots[i]));
9dba3ca5 257 if (ret != 0) {
c8a706fe 258 goto error;
9dba3ca5 259 }
54936004 260 }
aa98e2d8 261
7bdc1acc 262 page_set_flags(start, last, page_flags);
aa98e2d8
IL
263 ret = 0;
264
7bdc1acc 265 error:
c8a706fe
PB
266 mmap_unlock();
267 return ret;
54936004
FB
268}
269
270/* map an incomplete host page */
99982beb
RH
271static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
272 int prot, int flags, int fd, off_t offset)
54936004 273{
621ac47d 274 int host_page_size = qemu_real_host_page_size();
99982beb 275 abi_ulong real_last;
53a5960a 276 void *host_start;
99982beb
RH
277 int prot_old, prot_new;
278 int host_prot_old, host_prot_new;
54936004 279
99982beb
RH
280 if (!(flags & MAP_ANONYMOUS)
281 && (flags & MAP_TYPE) == MAP_SHARED
282 && (prot & PROT_WRITE)) {
283 /*
284 * msync() won't work with the partial page, so we return an
285 * error if write is possible while it is a shared mapping.
286 */
287 errno = EINVAL;
288 return false;
289 }
290
621ac47d 291 real_last = real_start + host_page_size - 1;
3e8f1628 292 host_start = g2h_untagged(real_start);
54936004 293
99982beb
RH
294 /* Get the protection of the target pages outside the mapping. */
295 prot_old = 0;
296 for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
297 prot_old |= page_get_flags(a);
298 }
299 for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
300 prot_old |= page_get_flags(a);
54936004 301 }
3b46e624 302
99982beb
RH
303 if (prot_old == 0) {
304 /*
305 * Since !(prot_old & PAGE_VALID), there were no guest pages
306 * outside of the fragment we need to map. Allocate a new host
307 * page to cover, discarding whatever else may have been present.
308 */
621ac47d 309 void *p = mmap(host_start, host_page_size,
0dd55812 310 target_to_host_prot(prot),
80210bcd 311 flags | MAP_ANONYMOUS, -1, 0);
ddcdd8c4
AO
312 if (p != host_start) {
313 if (p != MAP_FAILED) {
621ac47d 314 munmap(p, host_page_size);
ddcdd8c4
AO
315 errno = EEXIST;
316 }
99982beb 317 return false;
2b730f79 318 }
99982beb 319 prot_old = prot;
54936004 320 }
99982beb 321 prot_new = prot | prot_old;
54936004 322
99982beb
RH
323 host_prot_old = target_to_host_prot(prot_old);
324 host_prot_new = target_to_host_prot(prot_new);
3b46e624 325
99982beb
RH
326 /* Adjust protection to be able to write. */
327 if (!(host_prot_old & PROT_WRITE)) {
328 host_prot_old |= PROT_WRITE;
621ac47d 329 mprotect(host_start, host_page_size, host_prot_old);
99982beb 330 }
3b46e624 331
99982beb
RH
332 /* Read or zero the new guest pages. */
333 if (flags & MAP_ANONYMOUS) {
334 memset(g2h_untagged(start), 0, last - start + 1);
54936004 335 } else {
99982beb
RH
336 if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
337 return false;
e6deac9c 338 }
54936004 339 }
99982beb
RH
340
341 /* Put final protection */
342 if (host_prot_new != host_prot_old) {
621ac47d 343 mprotect(host_start, host_page_size, host_prot_new);
99982beb
RH
344 }
345 return true;
54936004
FB
346}
347
c8fb5cf9 348abi_ulong task_unmapped_base;
da2b71fa 349abi_ulong elf_et_dyn_base;
c8fb5cf9 350abi_ulong mmap_next_start;
a03e2d42 351
2b730f79
RH
352/*
353 * Subroutine of mmap_find_vma, used when we have pre-allocated
354 * a chunk of guest address space.
355 */
30ab9ef2
RH
356static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
357 abi_ulong align)
68a1c816 358{
4c13048e 359 target_ulong ret;
68a1c816 360
4c13048e
RH
361 ret = page_find_range_empty(start, reserved_va, size, align);
362 if (ret == -1 && start > mmap_min_addr) {
363 /* Restart at the beginning of the address space. */
364 ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
68a1c816
PB
365 }
366
4c13048e 367 return ret;
68a1c816
PB
368}
369
fe3b4152
KS
370/*
371 * Find and reserve a free memory area of size 'size'. The search
372 * starts at 'start'.
373 * It must be called with mmap_lock() held.
374 * Return -1 if error.
375 */
30ab9ef2 376abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
a03e2d42 377{
621ac47d 378 int host_page_size = qemu_real_host_page_size();
14f24e14 379 void *ptr, *prev;
fe3b4152 380 abi_ulong addr;
14f24e14 381 int wrapped, repeat;
fe3b4152 382
621ac47d 383 align = MAX(align, host_page_size);
443b7505 384
fe3b4152 385 /* If 'start' == 0, then a default start address is used. */
14f24e14 386 if (start == 0) {
fe3b4152 387 start = mmap_next_start;
14f24e14 388 } else {
621ac47d 389 start &= -host_page_size;
14f24e14 390 }
30ab9ef2 391 start = ROUND_UP(start, align);
b36b2b1d 392 size = ROUND_UP(size, host_page_size);
fe3b4152 393
b76f21a7 394 if (reserved_va) {
30ab9ef2 395 return mmap_find_vma_reserved(start, size, align);
68a1c816
PB
396 }
397
a03e2d42 398 addr = start;
14f24e14
RH
399 wrapped = repeat = 0;
400 prev = 0;
fe3b4152 401
14f24e14 402 for (;; prev = ptr) {
fe3b4152
KS
403 /*
404 * Reserve needed memory area to avoid a race.
405 * It should be discarded using:
406 * - mmap() with MAP_FIXED flag
407 * - mremap() with MREMAP_FIXED flag
408 * - shmat() with SHM_REMAP flag
409 */
3e8f1628 410 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
2b730f79 411 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
fe3b4152
KS
412
413 /* ENOMEM, if host address space has no memory */
14f24e14 414 if (ptr == MAP_FAILED) {
fe3b4152 415 return (abi_ulong)-1;
14f24e14
RH
416 }
417
2b730f79
RH
418 /*
419 * Count the number of sequential returns of the same address.
420 * This is used to modify the search algorithm below.
421 */
14f24e14
RH
422 repeat = (ptr == prev ? repeat + 1 : 0);
423
424 if (h2g_valid(ptr + size - 1)) {
425 addr = h2g(ptr);
fe3b4152 426
30ab9ef2 427 if ((addr & (align - 1)) == 0) {
14f24e14 428 /* Success. */
c8fb5cf9 429 if (start == mmap_next_start && addr >= task_unmapped_base) {
14f24e14
RH
430 mmap_next_start = addr + size;
431 }
432 return addr;
433 }
fe3b4152 434
14f24e14
RH
435 /* The address is not properly aligned for the target. */
436 switch (repeat) {
437 case 0:
2b730f79
RH
438 /*
439 * Assume the result that the kernel gave us is the
440 * first with enough free space, so start again at the
441 * next higher target page.
442 */
30ab9ef2 443 addr = ROUND_UP(addr, align);
14f24e14
RH
444 break;
445 case 1:
2b730f79
RH
446 /*
447 * Sometimes the kernel decides to perform the allocation
448 * at the top end of memory instead.
449 */
30ab9ef2 450 addr &= -align;
14f24e14
RH
451 break;
452 case 2:
453 /* Start over at low memory. */
454 addr = 0;
455 break;
456 default:
457 /* Fail. This unaligned block must the last. */
458 addr = -1;
459 break;
460 }
461 } else {
2b730f79
RH
462 /*
463 * Since the result the kernel gave didn't fit, start
464 * again at low memory. If any repetition, fail.
465 */
14f24e14
RH
466 addr = (repeat ? -1 : 0);
467 }
468
469 /* Unmap and try again. */
fe3b4152 470 munmap(ptr, size);
fe3b4152 471
14f24e14 472 /* ENOMEM if we checked the whole of the target address space. */
d0b3e4f5 473 if (addr == (abi_ulong)-1) {
a03e2d42 474 return (abi_ulong)-1;
14f24e14
RH
475 } else if (addr == 0) {
476 if (wrapped) {
477 return (abi_ulong)-1;
478 }
479 wrapped = 1;
2b730f79
RH
480 /*
481 * Don't actually use 0 when wrapping, instead indicate
482 * that we'd truly like an allocation in low memory.
483 */
14f24e14
RH
484 addr = (mmap_min_addr > TARGET_PAGE_SIZE
485 ? TARGET_PAGE_ALIGN(mmap_min_addr)
486 : TARGET_PAGE_SIZE);
487 } else if (wrapped && addr >= start) {
488 return (abi_ulong)-1;
489 }
a03e2d42 490 }
a03e2d42
FB
491}
492
6ecc2557
RH
493/*
494 * Record a successful mmap within the user-exec interval tree.
495 */
496static abi_long mmap_end(abi_ulong start, abi_ulong last,
497 abi_ulong passthrough_start,
498 abi_ulong passthrough_last,
499 int flags, int page_flags)
500{
501 if (flags & MAP_ANONYMOUS) {
502 page_flags |= PAGE_ANON;
503 }
504 page_flags |= PAGE_RESET;
505 if (passthrough_start > passthrough_last) {
506 page_set_flags(start, last, page_flags);
507 } else {
508 if (start < passthrough_start) {
509 page_set_flags(start, passthrough_start - 1, page_flags);
510 }
511 page_set_flags(passthrough_start, passthrough_last,
512 page_flags | PAGE_PASSTHROUGH);
513 if (passthrough_last < last) {
514 page_set_flags(passthrough_last + 1, last, page_flags);
515 }
516 }
517 shm_region_rm_complete(start, last);
518 trace_target_mmap_complete(start);
519 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
520 FILE *f = qemu_log_trylock();
521 if (f) {
522 fprintf(f, "page layout changed following mmap\n");
523 page_dump(f);
524 qemu_log_unlock(f);
525 }
526 }
527 return start;
528}
529
d558c395 530static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
e8cec51b 531 int target_prot, int flags, int page_flags,
d558c395 532 int fd, off_t offset)
54936004 533{
621ac47d 534 int host_page_size = qemu_real_host_page_size();
f9cd8f5e
RH
535 abi_ulong ret, last, real_start, real_last, retaddr, host_len;
536 abi_ulong passthrough_start = -1, passthrough_last = 0;
55baec0f 537 off_t host_offset;
54936004 538
621ac47d
RH
539 real_start = start & -host_page_size;
540 host_offset = offset & -host_page_size;
a5e7ee46 541
2b730f79
RH
542 /*
543 * If the user is asking for the kernel to find a location, do that
544 * before we truncate the length for mapping files below.
545 */
03798605 546 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
a5e7ee46 547 host_len = len + offset - host_offset;
b36b2b1d 548 host_len = ROUND_UP(host_len, host_page_size);
30ab9ef2 549 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
a5e7ee46
RH
550 if (start == (abi_ulong)-1) {
551 errno = ENOMEM;
d558c395 552 return -1;
a5e7ee46
RH
553 }
554 }
54936004 555
2b730f79
RH
556 /*
557 * When mapping files into a memory area larger than the file, accesses
558 * to pages beyond the file size will cause a SIGBUS.
559 *
560 * For example, if mmaping a file of 100 bytes on a host with 4K pages
561 * emulating a target with 8K pages, the target expects to be able to
562 * access the first 8K. But the host will trap us on any access beyond
563 * 4K.
564 *
565 * When emulating a target with a larger page-size than the hosts, we
566 * may need to truncate file maps at EOF and add extra anonymous pages
567 * up to the targets page boundary.
568 */
621ac47d 569 if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) {
35f2fd04 570 struct stat sb;
54c5a2ae 571
2b730f79 572 if (fstat(fd, &sb) == -1) {
d558c395 573 return -1;
2b730f79 574 }
54c5a2ae 575
2b730f79
RH
576 /* Are we trying to create a map beyond EOF?. */
577 if (offset + len > sb.st_size) {
578 /*
579 * If so, truncate the file map at eof aligned with
580 * the hosts real pagesize. Additional anonymous maps
581 * will be created beyond EOF.
582 */
e56922ab 583 len = ROUND_UP(sb.st_size - offset, host_page_size);
2b730f79 584 }
54c5a2ae
EI
585 }
586
03798605 587 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
55baec0f 588 uintptr_t host_start;
0dd55812 589 int host_prot;
a03e2d42 590 void *p;
a5e7ee46 591
a03e2d42 592 host_len = len + offset - host_offset;
b36b2b1d 593 host_len = ROUND_UP(host_len, host_page_size);
0dd55812 594 host_prot = target_to_host_prot(target_prot);
a5e7ee46 595
621ac47d 596 /* Note: we prefer to control the mapping address. */
3e8f1628 597 p = mmap(g2h_untagged(start), host_len, host_prot,
a5e7ee46 598 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
9dba3ca5 599 if (p == MAP_FAILED) {
d558c395 600 return -1;
9dba3ca5 601 }
a03e2d42 602 /* update start so that it points to the file position at 'offset' */
55baec0f 603 host_start = (uintptr_t)p;
54c5a2ae 604 if (!(flags & MAP_ANONYMOUS)) {
3e8f1628 605 p = mmap(g2h_untagged(start), len, host_prot,
54c5a2ae 606 flags | MAP_FIXED, fd, host_offset);
8384274e 607 if (p == MAP_FAILED) {
3e8f1628 608 munmap(g2h_untagged(start), host_len);
d558c395 609 return -1;
8384274e 610 }
a03e2d42 611 host_start += offset - host_offset;
54c5a2ae 612 }
a03e2d42 613 start = h2g(host_start);
f9cd8f5e 614 last = start + len - 1;
f93b7695 615 passthrough_start = start;
f9cd8f5e 616 passthrough_last = last;
a03e2d42 617 } else {
f9cd8f5e 618 last = start + len - 1;
b36b2b1d 619 real_last = ROUND_UP(last, host_page_size) - 1;
7ab240ad 620
c3dd50da
AO
621 if (flags & MAP_FIXED_NOREPLACE) {
622 /* Validate that the chosen range is empty. */
623 if (!page_check_range_empty(start, last)) {
624 errno = EEXIST;
d558c395 625 return -1;
c3dd50da
AO
626 }
627
628 /*
629 * With reserved_va, the entire address space is mmaped in the
630 * host to ensure it isn't accidentally used for something else.
631 * We have just checked that the guest address is not mapped
632 * within the guest, but need to replace the host reservation.
633 *
634 * Without reserved_va, despite the guest address check above,
635 * keep MAP_FIXED_NOREPLACE so that the guest does not overwrite
636 * any host address mappings.
637 */
638 if (reserved_va) {
639 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
640 }
03798605
RH
641 }
642
2b730f79
RH
643 /*
644 * worst case: we cannot map the file because the offset is not
645 * aligned, so we read it
646 */
a03e2d42 647 if (!(flags & MAP_ANONYMOUS) &&
621ac47d 648 (offset & (host_page_size - 1)) != (start & (host_page_size - 1))) {
2b730f79
RH
649 /*
650 * msync() won't work here, so we return an error if write is
651 * possible while it is a shared mapping
652 */
0dd55812
RH
653 if ((flags & MAP_TYPE) == MAP_SHARED
654 && (target_prot & PROT_WRITE)) {
a03e2d42 655 errno = EINVAL;
d558c395 656 return -1;
a03e2d42 657 }
9dba3ca5 658 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
03798605
RH
659 (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
660 | MAP_PRIVATE | MAP_ANONYMOUS,
a03e2d42 661 -1, 0);
2b730f79 662 if (retaddr == -1) {
d558c395 663 return -1;
2b730f79
RH
664 }
665 if (pread(fd, g2h_untagged(start), len, offset) == -1) {
d558c395 666 return -1;
2b730f79 667 }
0dd55812 668 if (!(target_prot & PROT_WRITE)) {
9dba3ca5 669 ret = target_mprotect(start, len, target_prot);
86abac06 670 assert(ret == 0);
a03e2d42 671 }
6ecc2557 672 return mmap_end(start, last, -1, 0, flags, page_flags);
54936004 673 }
2b730f79 674
a03e2d42
FB
675 /* handle the start of the mapping */
676 if (start > real_start) {
621ac47d 677 if (real_last == real_start + host_page_size - 1) {
a03e2d42 678 /* one single host page */
f9cd8f5e 679 if (!mmap_frag(real_start, start, last,
99982beb 680 target_prot, flags, fd, offset)) {
d558c395 681 return -1;
2b730f79 682 }
6ecc2557 683 return mmap_end(start, last, -1, 0, flags, page_flags);
a03e2d42 684 }
99982beb 685 if (!mmap_frag(real_start, start,
621ac47d 686 real_start + host_page_size - 1,
99982beb 687 target_prot, flags, fd, offset)) {
d558c395 688 return -1;
2b730f79 689 }
621ac47d 690 real_start += host_page_size;
a03e2d42
FB
691 }
692 /* handle the end of the mapping */
f9cd8f5e 693 if (last < real_last) {
621ac47d 694 abi_ulong real_page = real_last - host_page_size + 1;
f9cd8f5e 695 if (!mmap_frag(real_page, real_page, last,
99982beb 696 target_prot, flags, fd,
f9cd8f5e 697 offset + real_page - start)) {
d558c395 698 return -1;
2b730f79 699 }
621ac47d 700 real_last -= host_page_size;
54936004 701 }
3b46e624 702
a03e2d42 703 /* map the middle (easier) */
f9cd8f5e 704 if (real_start < real_last) {
ddcdd8c4 705 void *p, *want_p;
55baec0f 706 off_t offset1;
ddcdd8c4 707 size_t len1;
55baec0f 708
2b730f79 709 if (flags & MAP_ANONYMOUS) {
a03e2d42 710 offset1 = 0;
2b730f79 711 } else {
a03e2d42 712 offset1 = offset + real_start - start;
2b730f79 713 }
ddcdd8c4
AO
714 len1 = real_last - real_start + 1;
715 want_p = g2h_untagged(real_start);
716
717 p = mmap(want_p, len1, target_to_host_prot(target_prot),
718 flags, fd, offset1);
719 if (p != want_p) {
720 if (p != MAP_FAILED) {
721 munmap(p, len1);
722 errno = EEXIST;
723 }
d558c395 724 return -1;
2b730f79 725 }
f93b7695 726 passthrough_start = real_start;
f9cd8f5e 727 passthrough_last = real_last;
a03e2d42 728 }
54936004 729 }
6ecc2557
RH
730 return mmap_end(start, last, passthrough_start, passthrough_last,
731 flags, page_flags);
d558c395
RH
732}
733
734/* NOTE: all the constants are the HOST ones */
735abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
736 int flags, int fd, off_t offset)
737{
738 abi_long ret;
e8cec51b 739 int page_flags;
d558c395
RH
740
741 trace_target_mmap(start, len, target_prot, flags, fd, offset);
e8cec51b
RH
742
743 if (!len) {
744 errno = EINVAL;
745 return -1;
746 }
747
748 page_flags = validate_prot_to_pageflags(target_prot);
749 if (!page_flags) {
750 errno = EINVAL;
751 return -1;
752 }
753
754 /* Also check for overflows... */
755 len = TARGET_PAGE_ALIGN(len);
756 if (!len || len != (size_t)len) {
757 errno = ENOMEM;
758 return -1;
759 }
760
761 if (offset & ~TARGET_PAGE_MASK) {
762 errno = EINVAL;
763 return -1;
764 }
765 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
766 if (start & ~TARGET_PAGE_MASK) {
767 errno = EINVAL;
768 return -1;
769 }
770 if (!guest_range_valid_untagged(start, len)) {
771 errno = ENOMEM;
772 return -1;
773 }
774 }
775
d558c395
RH
776 mmap_lock();
777
e8cec51b
RH
778 ret = target_mmap__locked(start, len, target_prot, flags,
779 page_flags, fd, offset);
d558c395 780
c8a706fe 781 mmap_unlock();
e8cec51b
RH
782
783 /*
784 * If we're mapping shared memory, ensure we generate code for parallel
785 * execution and flush old translations. This will work up to the level
786 * supported by the host -- anything that requires EXCP_ATOMIC will not
787 * be atomic with respect to an external process.
788 */
789 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
790 CPUState *cpu = thread_cpu;
791 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
792 cpu->tcg_cflags |= CF_PARALLEL;
793 tb_flush(cpu);
794 }
795 }
796
d558c395 797 return ret;
54936004
FB
798}
799
912ff698 800static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
68a1c816 801{
621ac47d 802 int host_page_size = qemu_real_host_page_size();
68a1c816 803 abi_ulong real_start;
260561d8
RH
804 abi_ulong real_last;
805 abi_ulong real_len;
806 abi_ulong last;
807 abi_ulong a;
558a4411 808 void *host_start;
68a1c816
PB
809 int prot;
810
260561d8 811 last = start + len - 1;
621ac47d 812 real_start = start & -host_page_size;
b36b2b1d 813 real_last = ROUND_UP(last, host_page_size) - 1;
260561d8
RH
814
815 /*
816 * If guest pages remain on the first or last host pages,
817 * adjust the deallocation to retain those guest pages.
818 * The single page special case is required for the last page,
819 * lest real_start overflow to zero.
820 */
621ac47d 821 if (real_last - real_start < host_page_size) {
68a1c816 822 prot = 0;
260561d8
RH
823 for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
824 prot |= page_get_flags(a);
68a1c816 825 }
260561d8
RH
826 for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
827 prot |= page_get_flags(a + 1);
828 }
829 if (prot != 0) {
912ff698 830 return 0;
260561d8
RH
831 }
832 } else {
833 for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
834 prot |= page_get_flags(a);
68a1c816 835 }
2b730f79 836 if (prot != 0) {
621ac47d 837 real_start += host_page_size;
2b730f79 838 }
260561d8
RH
839
840 for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
841 prot |= page_get_flags(a + 1);
68a1c816 842 }
2b730f79 843 if (prot != 0) {
621ac47d 844 real_last -= host_page_size;
260561d8
RH
845 }
846
847 if (real_last < real_start) {
912ff698 848 return 0;
2b730f79 849 }
68a1c816 850 }
260561d8
RH
851
852 real_len = real_last - real_start + 1;
853 host_start = g2h_untagged(real_start);
854
558a4411
RH
855 if (reserved_va) {
856 void *ptr = mmap(host_start, real_len, PROT_NONE,
857 MAP_FIXED | MAP_ANONYMOUS
858 | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
912ff698 859 return ptr == host_start ? 0 : -1;
558a4411 860 }
912ff698 861 return munmap(host_start, real_len);
68a1c816
PB
862}
863
992f48a0 864int target_munmap(abi_ulong start, abi_ulong len)
54936004 865{
912ff698
RH
866 int ret;
867
b7b18d26
AB
868 trace_target_munmap(start, len);
869
2b730f79 870 if (start & ~TARGET_PAGE_MASK) {
912ff698
RH
871 errno = EINVAL;
872 return -1;
2b730f79 873 }
54936004 874 len = TARGET_PAGE_ALIGN(len);
46b12f46 875 if (len == 0 || !guest_range_valid_untagged(start, len)) {
912ff698
RH
876 errno = EINVAL;
877 return -1;
ebf9a363
MF
878 }
879
c8a706fe 880 mmap_lock();
912ff698
RH
881 ret = mmap_reserve_or_unmap(start, len);
882 if (likely(ret == 0)) {
883 page_set_flags(start, start + len - 1, 0);
884 shm_region_rm_complete(start, start + len - 1);
885 }
c8a706fe 886 mmap_unlock();
d7b0c5d0 887
912ff698 888 return ret;
54936004
FB
889}
890
992f48a0
BS
891abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
892 abi_ulong new_size, unsigned long flags,
893 abi_ulong new_addr)
54936004
FB
894{
895 int prot;
f19412a2 896 void *host_addr;
54936004 897
46b12f46 898 if (!guest_range_valid_untagged(old_addr, old_size) ||
ebf9a363 899 ((flags & MREMAP_FIXED) &&
46b12f46 900 !guest_range_valid_untagged(new_addr, new_size)) ||
ccc5ccc1 901 ((flags & MREMAP_MAYMOVE) == 0 &&
46b12f46 902 !guest_range_valid_untagged(old_addr, new_size))) {
ebf9a363
MF
903 errno = ENOMEM;
904 return -1;
905 }
906
c8a706fe 907 mmap_lock();
f19412a2 908
68a1c816 909 if (flags & MREMAP_FIXED) {
3e8f1628
RH
910 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
911 flags, g2h_untagged(new_addr));
68a1c816 912
b76f21a7 913 if (reserved_va && host_addr != MAP_FAILED) {
2b730f79
RH
914 /*
915 * If new and old addresses overlap then the above mremap will
916 * already have failed with EINVAL.
917 */
558a4411 918 mmap_reserve_or_unmap(old_addr, old_size);
68a1c816
PB
919 }
920 } else if (flags & MREMAP_MAYMOVE) {
f19412a2
AJ
921 abi_ulong mmap_start;
922
30ab9ef2 923 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
f19412a2
AJ
924
925 if (mmap_start == -1) {
926 errno = ENOMEM;
927 host_addr = MAP_FAILED;
68a1c816 928 } else {
3e8f1628
RH
929 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
930 flags | MREMAP_FIXED,
931 g2h_untagged(mmap_start));
b76f21a7 932 if (reserved_va) {
558a4411 933 mmap_reserve_or_unmap(old_addr, old_size);
c65ffe6d 934 }
68a1c816 935 }
3af72a4d 936 } else {
ea800033 937 int page_flags = 0;
b76f21a7 938 if (reserved_va && old_size < new_size) {
68a1c816
PB
939 abi_ulong addr;
940 for (addr = old_addr + old_size;
941 addr < old_addr + new_size;
942 addr++) {
ea800033 943 page_flags |= page_get_flags(addr);
68a1c816
PB
944 }
945 }
ea800033 946 if (page_flags == 0) {
3e8f1628
RH
947 host_addr = mremap(g2h_untagged(old_addr),
948 old_size, new_size, flags);
56d19084
TK
949
950 if (host_addr != MAP_FAILED) {
951 /* Check if address fits target address space */
46b12f46 952 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
56d19084 953 /* Revert mremap() changes */
3e8f1628
RH
954 host_addr = mremap(g2h_untagged(old_addr),
955 new_size, old_size, flags);
56d19084
TK
956 errno = ENOMEM;
957 host_addr = MAP_FAILED;
958 } else if (reserved_va && old_size > new_size) {
558a4411
RH
959 mmap_reserve_or_unmap(old_addr + old_size,
960 old_size - new_size);
56d19084 961 }
68a1c816
PB
962 }
963 } else {
964 errno = ENOMEM;
965 host_addr = MAP_FAILED;
966 }
f19412a2
AJ
967 }
968
969 if (host_addr == MAP_FAILED) {
c8a706fe
PB
970 new_addr = -1;
971 } else {
972 new_addr = h2g(host_addr);
973 prot = page_get_flags(old_addr);
49840a4a 974 page_set_flags(old_addr, old_addr + old_size - 1, 0);
044e95c8 975 shm_region_rm_complete(old_addr, old_addr + old_size - 1);
49840a4a 976 page_set_flags(new_addr, new_addr + new_size - 1,
d9c58585 977 prot | PAGE_VALID | PAGE_RESET);
044e95c8 978 shm_region_rm_complete(new_addr, new_addr + new_size - 1);
c8a706fe
PB
979 }
980 mmap_unlock();
54936004
FB
981 return new_addr;
982}
892a4f6a 983
892a4f6a
IL
984abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
985{
e230ec09 986 abi_ulong len;
892a4f6a
IL
987 int ret = 0;
988
989 if (start & ~TARGET_PAGE_MASK) {
990 return -TARGET_EINVAL;
991 }
e230ec09 992 if (len_in == 0) {
892a4f6a
IL
993 return 0;
994 }
e230ec09
RH
995 len = TARGET_PAGE_ALIGN(len_in);
996 if (len == 0 || !guest_range_valid_untagged(start, len)) {
892a4f6a
IL
997 return -TARGET_EINVAL;
998 }
999
4530deb1
HD
1000 /* Translate for some architectures which have different MADV_xxx values */
1001 switch (advice) {
1002 case TARGET_MADV_DONTNEED: /* alpha */
1003 advice = MADV_DONTNEED;
1004 break;
1005 case TARGET_MADV_WIPEONFORK: /* parisc */
1006 advice = MADV_WIPEONFORK;
1007 break;
1008 case TARGET_MADV_KEEPONFORK: /* parisc */
1009 advice = MADV_KEEPONFORK;
1010 break;
1011 /* we do not care about the other MADV_xxx values yet */
1012 }
1013
892a4f6a 1014 /*
4530deb1
HD
1015 * Most advice values are hints, so ignoring and returning success is ok.
1016 *
1017 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
1018 * MADV_KEEPONFORK are not hints and need to be emulated.
892a4f6a 1019 *
4530deb1
HD
1020 * A straight passthrough for those may not be safe because qemu sometimes
1021 * turns private file-backed mappings into anonymous mappings.
ecb796db
RH
1022 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1023 * same semantics for the host as for the guest.
892a4f6a 1024 *
4530deb1
HD
1025 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
1026 * return failure if not.
1027 *
1028 * MADV_DONTNEED is passed through as well, if possible.
1029 * If passthrough isn't possible, we nevertheless (wrongly!) return
1030 * success, which is broken but some userspace programs fail to work
1031 * otherwise. Completely implementing such emulation is quite complicated
1032 * though.
892a4f6a
IL
1033 */
1034 mmap_lock();
4530deb1
HD
1035 switch (advice) {
1036 case MADV_WIPEONFORK:
1037 case MADV_KEEPONFORK:
1038 ret = -EINVAL;
1039 /* fall through */
1040 case MADV_DONTNEED:
ecb796db 1041 if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
4530deb1
HD
1042 ret = get_errno(madvise(g2h_untagged(start), len, advice));
1043 if ((advice == MADV_DONTNEED) && (ret == 0)) {
10310cbd 1044 page_reset_target_data(start, start + len - 1);
4530deb1 1045 }
dbbf8975 1046 }
892a4f6a
IL
1047 }
1048 mmap_unlock();
1049
1050 return ret;
1051}
225a206c
RH
1052
1053#ifndef TARGET_FORCE_SHMLBA
1054/*
1055 * For most architectures, SHMLBA is the same as the page size;
1056 * some architectures have larger values, in which case they should
1057 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1058 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1059 * and defining its own value for SHMLBA.
1060 *
1061 * The kernel also permits SHMLBA to be set by the architecture to a
1062 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1063 * this means that addresses are rounded to the large size if
1064 * SHM_RND is set but addresses not aligned to that size are not rejected
1065 * as long as they are at least page-aligned. Since the only architecture
1066 * which uses this is ia64 this code doesn't provide for that oddity.
1067 */
1068static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1069{
1070 return TARGET_PAGE_SIZE;
1071}
1072#endif
1073
1074abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1075 abi_ulong shmaddr, int shmflg)
1076{
1077 CPUState *cpu = env_cpu(cpu_env);
1078 abi_ulong raddr;
225a206c 1079 struct shmid_ds shm_info;
69fa2708 1080 int ret;
225a206c
RH
1081 abi_ulong shmlba;
1082
1083 /* shmat pointers are always untagged */
1084
1085 /* find out the length of the shared memory segment */
1086 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1087 if (is_error(ret)) {
1088 /* can't get length, bail out */
1089 return ret;
1090 }
1091
1092 shmlba = target_shmlba(cpu_env);
1093
1094 if (shmaddr & (shmlba - 1)) {
1095 if (shmflg & SHM_RND) {
1096 shmaddr &= ~(shmlba - 1);
1097 } else {
1098 return -TARGET_EINVAL;
1099 }
1100 }
1101 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
1102 return -TARGET_EINVAL;
1103 }
1104
69fa2708
RH
1105 WITH_MMAP_LOCK_GUARD() {
1106 void *host_raddr;
044e95c8 1107 abi_ulong last;
69fa2708
RH
1108
1109 if (shmaddr) {
1110 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
1111 } else {
1112 abi_ulong mmap_start;
1113
1114 /* In order to use the host shmat, we need to honor host SHMLBA. */
1115 mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
1116 MAX(SHMLBA, shmlba));
1117
1118 if (mmap_start == -1) {
1119 return -TARGET_ENOMEM;
1120 }
1121 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
1122 shmflg | SHM_REMAP);
1123 }
1124
1125 if (host_raddr == (void *)-1) {
1126 return get_errno(-1);
1127 }
1128 raddr = h2g(host_raddr);
044e95c8 1129 last = raddr + shm_info.shm_segsz - 1;
69fa2708 1130
044e95c8 1131 page_set_flags(raddr, last,
69fa2708
RH
1132 PAGE_VALID | PAGE_RESET | PAGE_READ |
1133 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
1134
044e95c8
RH
1135 shm_region_rm_complete(raddr, last);
1136 shm_region_add(raddr, last);
69fa2708 1137 }
225a206c
RH
1138
1139 /*
1140 * We're mapping shared memory, so ensure we generate code for parallel
1141 * execution and flush old translations. This will work up to the level
1142 * supported by the host -- anything that requires EXCP_ATOMIC will not
1143 * be atomic with respect to an external process.
1144 */
1145 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
1146 cpu->tcg_cflags |= CF_PARALLEL;
1147 tb_flush(cpu);
1148 }
1149
225a206c
RH
1150 return raddr;
1151}
1152
1153abi_long target_shmdt(abi_ulong shmaddr)
1154{
225a206c
RH
1155 abi_long rv;
1156
1157 /* shmdt pointers are always untagged */
1158
69fa2708 1159 WITH_MMAP_LOCK_GUARD() {
044e95c8
RH
1160 abi_ulong last = shm_region_find(shmaddr);
1161 if (last == 0) {
ceda5688
RH
1162 return -TARGET_EINVAL;
1163 }
1164
69fa2708 1165 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
ceda5688 1166 if (rv == 0) {
044e95c8 1167 abi_ulong size = last - shmaddr + 1;
ceda5688 1168
044e95c8
RH
1169 page_set_flags(shmaddr, last, 0);
1170 shm_region_rm_complete(shmaddr, last);
ceda5688
RH
1171 mmap_reserve_or_unmap(shmaddr, size);
1172 }
225a206c 1173 }
225a206c
RH
1174 return rv;
1175}