]> git.proxmox.com Git - mirror_qemu.git/blame - linux-user/mmap.c
linux-user: Split out mmap_h_eq_g
[mirror_qemu.git] / linux-user / mmap.c
CommitLineData
54936004
FB
1/*
2 * mmap support for qemu
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
d39594e9 19#include "qemu/osdep.h"
225a206c 20#include <sys/shm.h>
11d96056 21#include "trace.h"
10d0d505 22#include "exec/log.h"
54936004 23#include "qemu.h"
3b249d26 24#include "user-internals.h"
5423e6d3 25#include "user-mmap.h"
8655b4c7 26#include "target_mman.h"
044e95c8 27#include "qemu/interval-tree.h"
54936004 28
5a534314
PM
29#ifdef TARGET_ARM
30#include "target/arm/cpu-features.h"
31#endif
32
1e6eec8b 33static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
dfd3f85c 34static __thread int mmap_lock_count;
c8a706fe
PB
35
36void mmap_lock(void)
37{
38 if (mmap_lock_count++ == 0) {
39 pthread_mutex_lock(&mmap_mutex);
40 }
41}
42
43void mmap_unlock(void)
44{
990ef918 45 assert(mmap_lock_count > 0);
c8a706fe
PB
46 if (--mmap_lock_count == 0) {
47 pthread_mutex_unlock(&mmap_mutex);
48 }
49}
d5975363 50
301e40ed
AB
51bool have_mmap_lock(void)
52{
53 return mmap_lock_count > 0 ? true : false;
54}
55
d5975363
PB
56/* Grab lock to make sure things are in a consistent state after fork(). */
57void mmap_fork_start(void)
58{
59 if (mmap_lock_count)
60 abort();
61 pthread_mutex_lock(&mmap_mutex);
62}
63
64void mmap_fork_end(int child)
65{
2b730f79 66 if (child) {
d5975363 67 pthread_mutex_init(&mmap_mutex, NULL);
2b730f79 68 } else {
d5975363 69 pthread_mutex_unlock(&mmap_mutex);
2b730f79 70 }
d5975363 71}
c8a706fe 72
044e95c8
RH
73/* Protected by mmap_lock. */
74static IntervalTreeRoot shm_regions;
75
76static void shm_region_add(abi_ptr start, abi_ptr last)
77{
78 IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
79
80 i->start = start;
81 i->last = last;
82 interval_tree_insert(i, &shm_regions);
83}
84
85static abi_ptr shm_region_find(abi_ptr start)
86{
87 IntervalTreeNode *i;
88
89 for (i = interval_tree_iter_first(&shm_regions, start, start); i;
90 i = interval_tree_iter_next(i, start, start)) {
91 if (i->start == start) {
92 return i->last;
93 }
94 }
95 return 0;
96}
97
98static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
99{
100 IntervalTreeNode *i, *n;
101
102 for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
103 n = interval_tree_iter_next(i, start, last);
104 if (i->start >= start && i->last <= last) {
105 interval_tree_remove(i, &shm_regions);
106 g_free(i);
107 }
108 }
109}
110
9dba3ca5
RH
111/*
112 * Validate target prot bitmask.
113 * Return the prot bitmask for the host in *HOST_PROT.
114 * Return 0 if the target prot bitmask is invalid, otherwise
115 * the internal qemu page_flags (which will include PAGE_VALID).
116 */
0dd55812 117static int validate_prot_to_pageflags(int prot)
9dba3ca5
RH
118{
119 int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
120 int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
121
be5d6f48 122#ifdef TARGET_AARCH64
d109b46d 123 {
be5d6f48 124 ARMCPU *cpu = ARM_CPU(thread_cpu);
d109b46d
RH
125
126 /*
127 * The PROT_BTI bit is only accepted if the cpu supports the feature.
128 * Since this is the unusual case, don't bother checking unless
129 * the bit has been requested. If set and valid, record the bit
130 * within QEMU's page_flags.
131 */
132 if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
be5d6f48
RH
133 valid |= TARGET_PROT_BTI;
134 page_flags |= PAGE_BTI;
135 }
d109b46d
RH
136 /* Similarly for the PROT_MTE bit. */
137 if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
138 valid |= TARGET_PROT_MTE;
139 page_flags |= PAGE_MTE;
140 }
be5d6f48 141 }
4c184e70
HD
142#elif defined(TARGET_HPPA)
143 valid |= PROT_GROWSDOWN | PROT_GROWSUP;
be5d6f48
RH
144#endif
145
9dba3ca5
RH
146 return prot & ~valid ? 0 : page_flags;
147}
148
0dd55812
RH
149/*
150 * For the host, we need not pass anything except read/write/exec.
151 * While PROT_SEM is allowed by all hosts, it is also ignored, so
152 * don't bother transforming guest bit to host bit. Any other
153 * target-specific prot bits will not be understood by the host
154 * and will need to be encoded into page_flags for qemu emulation.
155 *
156 * Pages that are executable by the guest will never be executed
157 * by the host, but the host will need to be able to read them.
158 */
159static int target_to_host_prot(int prot)
160{
161 return (prot & (PROT_READ | PROT_WRITE)) |
162 (prot & PROT_EXEC ? PROT_READ : 0);
163}
164
53a5960a 165/* NOTE: all the constants are the HOST ones, but addresses are target. */
9dba3ca5 166int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
54936004 167{
621ac47d 168 int host_page_size = qemu_real_host_page_size();
7bdc1acc
RH
169 abi_ulong starts[3];
170 abi_ulong lens[3];
171 int prots[3];
172 abi_ulong host_start, host_last, last;
173 int prot1, ret, page_flags, nranges;
54936004 174
9dba3ca5 175 trace_target_mprotect(start, len, target_prot);
54936004 176
9dba3ca5 177 if ((start & ~TARGET_PAGE_MASK) != 0) {
78cf3390 178 return -TARGET_EINVAL;
9dba3ca5 179 }
0dd55812 180 page_flags = validate_prot_to_pageflags(target_prot);
9dba3ca5
RH
181 if (!page_flags) {
182 return -TARGET_EINVAL;
183 }
7bdc1acc
RH
184 if (len == 0) {
185 return 0;
186 }
54936004 187 len = TARGET_PAGE_ALIGN(len);
46b12f46 188 if (!guest_range_valid_untagged(start, len)) {
78cf3390 189 return -TARGET_ENOMEM;
ebf9a363 190 }
3b46e624 191
7bdc1acc 192 last = start + len - 1;
621ac47d 193 host_start = start & -host_page_size;
b36b2b1d 194 host_last = ROUND_UP(last, host_page_size) - 1;
7bdc1acc
RH
195 nranges = 0;
196
197 mmap_lock();
198
621ac47d 199 if (host_last - host_start < host_page_size) {
7bdc1acc 200 /* Single host page contains all guest pages: sum the prot. */
0dd55812 201 prot1 = target_prot;
7bdc1acc
RH
202 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
203 prot1 |= page_get_flags(a);
54936004 204 }
7bdc1acc
RH
205 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
206 prot1 |= page_get_flags(a + 1);
d418c81e 207 }
7bdc1acc 208 starts[nranges] = host_start;
621ac47d 209 lens[nranges] = host_page_size;
7bdc1acc
RH
210 prots[nranges] = prot1;
211 nranges++;
212 } else {
213 if (host_start < start) {
214 /* Host page contains more than one guest page: sum the prot. */
215 prot1 = target_prot;
216 for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
217 prot1 |= page_get_flags(a);
218 }
219 /* If the resulting sum differs, create a new range. */
220 if (prot1 != target_prot) {
221 starts[nranges] = host_start;
621ac47d 222 lens[nranges] = host_page_size;
7bdc1acc
RH
223 prots[nranges] = prot1;
224 nranges++;
621ac47d 225 host_start += host_page_size;
7bdc1acc 226 }
9dba3ca5 227 }
7bdc1acc
RH
228
229 if (last < host_last) {
230 /* Host page contains more than one guest page: sum the prot. */
231 prot1 = target_prot;
232 for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
233 prot1 |= page_get_flags(a + 1);
234 }
235 /* If the resulting sum differs, create a new range. */
236 if (prot1 != target_prot) {
621ac47d 237 host_last -= host_page_size;
7bdc1acc 238 starts[nranges] = host_last + 1;
621ac47d 239 lens[nranges] = host_page_size;
7bdc1acc
RH
240 prots[nranges] = prot1;
241 nranges++;
242 }
54936004 243 }
7bdc1acc
RH
244
245 /* Create a range for the middle, if any remains. */
246 if (host_start < host_last) {
247 starts[nranges] = host_start;
248 lens[nranges] = host_last - host_start + 1;
249 prots[nranges] = target_prot;
250 nranges++;
9dba3ca5 251 }
54936004 252 }
3b46e624 253
7bdc1acc
RH
254 for (int i = 0; i < nranges; ++i) {
255 ret = mprotect(g2h_untagged(starts[i]), lens[i],
256 target_to_host_prot(prots[i]));
9dba3ca5 257 if (ret != 0) {
c8a706fe 258 goto error;
9dba3ca5 259 }
54936004 260 }
aa98e2d8 261
7bdc1acc 262 page_set_flags(start, last, page_flags);
aa98e2d8
IL
263 ret = 0;
264
7bdc1acc 265 error:
c8a706fe
PB
266 mmap_unlock();
267 return ret;
54936004
FB
268}
269
2952b642
RH
270/*
271 * Perform munmap on behalf of the target, with host parameters.
272 * If reserved_va, we must replace the memory reservation.
273 */
274static int do_munmap(void *addr, size_t len)
275{
276 if (reserved_va) {
277 void *ptr = mmap(addr, len, PROT_NONE,
278 MAP_FIXED | MAP_ANONYMOUS
279 | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
280 return ptr == addr ? 0 : -1;
281 }
282 return munmap(addr, len);
283}
284
54936004 285/* map an incomplete host page */
99982beb
RH
286static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
287 int prot, int flags, int fd, off_t offset)
54936004 288{
621ac47d 289 int host_page_size = qemu_real_host_page_size();
99982beb 290 abi_ulong real_last;
53a5960a 291 void *host_start;
99982beb
RH
292 int prot_old, prot_new;
293 int host_prot_old, host_prot_new;
54936004 294
99982beb
RH
295 if (!(flags & MAP_ANONYMOUS)
296 && (flags & MAP_TYPE) == MAP_SHARED
297 && (prot & PROT_WRITE)) {
298 /*
299 * msync() won't work with the partial page, so we return an
300 * error if write is possible while it is a shared mapping.
301 */
302 errno = EINVAL;
303 return false;
304 }
305
621ac47d 306 real_last = real_start + host_page_size - 1;
3e8f1628 307 host_start = g2h_untagged(real_start);
54936004 308
99982beb
RH
309 /* Get the protection of the target pages outside the mapping. */
310 prot_old = 0;
311 for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
312 prot_old |= page_get_flags(a);
313 }
314 for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
315 prot_old |= page_get_flags(a);
54936004 316 }
3b46e624 317
99982beb
RH
318 if (prot_old == 0) {
319 /*
320 * Since !(prot_old & PAGE_VALID), there were no guest pages
321 * outside of the fragment we need to map. Allocate a new host
322 * page to cover, discarding whatever else may have been present.
323 */
621ac47d 324 void *p = mmap(host_start, host_page_size,
0dd55812 325 target_to_host_prot(prot),
80210bcd 326 flags | MAP_ANONYMOUS, -1, 0);
ddcdd8c4
AO
327 if (p != host_start) {
328 if (p != MAP_FAILED) {
3bfa271e 329 do_munmap(p, host_page_size);
ddcdd8c4
AO
330 errno = EEXIST;
331 }
99982beb 332 return false;
2b730f79 333 }
99982beb 334 prot_old = prot;
54936004 335 }
99982beb 336 prot_new = prot | prot_old;
54936004 337
99982beb
RH
338 host_prot_old = target_to_host_prot(prot_old);
339 host_prot_new = target_to_host_prot(prot_new);
3b46e624 340
99982beb
RH
341 /* Adjust protection to be able to write. */
342 if (!(host_prot_old & PROT_WRITE)) {
343 host_prot_old |= PROT_WRITE;
621ac47d 344 mprotect(host_start, host_page_size, host_prot_old);
99982beb 345 }
3b46e624 346
99982beb
RH
347 /* Read or zero the new guest pages. */
348 if (flags & MAP_ANONYMOUS) {
349 memset(g2h_untagged(start), 0, last - start + 1);
54936004 350 } else {
99982beb
RH
351 if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
352 return false;
e6deac9c 353 }
54936004 354 }
99982beb
RH
355
356 /* Put final protection */
357 if (host_prot_new != host_prot_old) {
621ac47d 358 mprotect(host_start, host_page_size, host_prot_new);
99982beb
RH
359 }
360 return true;
54936004
FB
361}
362
c8fb5cf9 363abi_ulong task_unmapped_base;
da2b71fa 364abi_ulong elf_et_dyn_base;
c8fb5cf9 365abi_ulong mmap_next_start;
a03e2d42 366
2b730f79
RH
367/*
368 * Subroutine of mmap_find_vma, used when we have pre-allocated
369 * a chunk of guest address space.
370 */
30ab9ef2
RH
371static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
372 abi_ulong align)
68a1c816 373{
4c13048e 374 target_ulong ret;
68a1c816 375
4c13048e
RH
376 ret = page_find_range_empty(start, reserved_va, size, align);
377 if (ret == -1 && start > mmap_min_addr) {
378 /* Restart at the beginning of the address space. */
379 ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
68a1c816
PB
380 }
381
4c13048e 382 return ret;
68a1c816
PB
383}
384
fe3b4152
KS
385/*
386 * Find and reserve a free memory area of size 'size'. The search
387 * starts at 'start'.
388 * It must be called with mmap_lock() held.
389 * Return -1 if error.
390 */
30ab9ef2 391abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
a03e2d42 392{
621ac47d 393 int host_page_size = qemu_real_host_page_size();
14f24e14 394 void *ptr, *prev;
fe3b4152 395 abi_ulong addr;
14f24e14 396 int wrapped, repeat;
fe3b4152 397
621ac47d 398 align = MAX(align, host_page_size);
443b7505 399
fe3b4152 400 /* If 'start' == 0, then a default start address is used. */
14f24e14 401 if (start == 0) {
fe3b4152 402 start = mmap_next_start;
14f24e14 403 } else {
621ac47d 404 start &= -host_page_size;
14f24e14 405 }
30ab9ef2 406 start = ROUND_UP(start, align);
b36b2b1d 407 size = ROUND_UP(size, host_page_size);
fe3b4152 408
b76f21a7 409 if (reserved_va) {
30ab9ef2 410 return mmap_find_vma_reserved(start, size, align);
68a1c816
PB
411 }
412
a03e2d42 413 addr = start;
14f24e14
RH
414 wrapped = repeat = 0;
415 prev = 0;
fe3b4152 416
14f24e14 417 for (;; prev = ptr) {
fe3b4152
KS
418 /*
419 * Reserve needed memory area to avoid a race.
420 * It should be discarded using:
421 * - mmap() with MAP_FIXED flag
422 * - mremap() with MREMAP_FIXED flag
423 * - shmat() with SHM_REMAP flag
424 */
3e8f1628 425 ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
2b730f79 426 MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
fe3b4152
KS
427
428 /* ENOMEM, if host address space has no memory */
14f24e14 429 if (ptr == MAP_FAILED) {
fe3b4152 430 return (abi_ulong)-1;
14f24e14
RH
431 }
432
2b730f79
RH
433 /*
434 * Count the number of sequential returns of the same address.
435 * This is used to modify the search algorithm below.
436 */
14f24e14
RH
437 repeat = (ptr == prev ? repeat + 1 : 0);
438
439 if (h2g_valid(ptr + size - 1)) {
440 addr = h2g(ptr);
fe3b4152 441
30ab9ef2 442 if ((addr & (align - 1)) == 0) {
14f24e14 443 /* Success. */
c8fb5cf9 444 if (start == mmap_next_start && addr >= task_unmapped_base) {
14f24e14
RH
445 mmap_next_start = addr + size;
446 }
447 return addr;
448 }
fe3b4152 449
14f24e14
RH
450 /* The address is not properly aligned for the target. */
451 switch (repeat) {
452 case 0:
2b730f79
RH
453 /*
454 * Assume the result that the kernel gave us is the
455 * first with enough free space, so start again at the
456 * next higher target page.
457 */
30ab9ef2 458 addr = ROUND_UP(addr, align);
14f24e14
RH
459 break;
460 case 1:
2b730f79
RH
461 /*
462 * Sometimes the kernel decides to perform the allocation
463 * at the top end of memory instead.
464 */
30ab9ef2 465 addr &= -align;
14f24e14
RH
466 break;
467 case 2:
468 /* Start over at low memory. */
469 addr = 0;
470 break;
471 default:
472 /* Fail. This unaligned block must the last. */
473 addr = -1;
474 break;
475 }
476 } else {
2b730f79
RH
477 /*
478 * Since the result the kernel gave didn't fit, start
479 * again at low memory. If any repetition, fail.
480 */
14f24e14
RH
481 addr = (repeat ? -1 : 0);
482 }
483
484 /* Unmap and try again. */
fe3b4152 485 munmap(ptr, size);
fe3b4152 486
14f24e14 487 /* ENOMEM if we checked the whole of the target address space. */
d0b3e4f5 488 if (addr == (abi_ulong)-1) {
a03e2d42 489 return (abi_ulong)-1;
14f24e14
RH
490 } else if (addr == 0) {
491 if (wrapped) {
492 return (abi_ulong)-1;
493 }
494 wrapped = 1;
2b730f79
RH
495 /*
496 * Don't actually use 0 when wrapping, instead indicate
497 * that we'd truly like an allocation in low memory.
498 */
14f24e14
RH
499 addr = (mmap_min_addr > TARGET_PAGE_SIZE
500 ? TARGET_PAGE_ALIGN(mmap_min_addr)
501 : TARGET_PAGE_SIZE);
502 } else if (wrapped && addr >= start) {
503 return (abi_ulong)-1;
504 }
a03e2d42 505 }
a03e2d42
FB
506}
507
6ecc2557
RH
508/*
509 * Record a successful mmap within the user-exec interval tree.
510 */
511static abi_long mmap_end(abi_ulong start, abi_ulong last,
512 abi_ulong passthrough_start,
513 abi_ulong passthrough_last,
514 int flags, int page_flags)
515{
516 if (flags & MAP_ANONYMOUS) {
517 page_flags |= PAGE_ANON;
518 }
519 page_flags |= PAGE_RESET;
520 if (passthrough_start > passthrough_last) {
521 page_set_flags(start, last, page_flags);
522 } else {
523 if (start < passthrough_start) {
524 page_set_flags(start, passthrough_start - 1, page_flags);
525 }
526 page_set_flags(passthrough_start, passthrough_last,
527 page_flags | PAGE_PASSTHROUGH);
528 if (passthrough_last < last) {
529 page_set_flags(passthrough_last + 1, last, page_flags);
530 }
531 }
532 shm_region_rm_complete(start, last);
533 trace_target_mmap_complete(start);
534 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
535 FILE *f = qemu_log_trylock();
536 if (f) {
537 fprintf(f, "page layout changed following mmap\n");
538 page_dump(f);
539 qemu_log_unlock(f);
540 }
541 }
542 return start;
543}
544
68098de9
RH
545/*
546 * Special case host page size == target page size,
547 * where there are no edge conditions.
548 */
549static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
550 int host_prot, int flags, int page_flags,
551 int fd, off_t offset)
552{
553 void *p, *want_p = g2h_untagged(start);
554 abi_ulong last;
555
556 p = mmap(want_p, len, host_prot, flags, fd, offset);
557 if (p == MAP_FAILED) {
558 return -1;
559 }
560 /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
561 if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
562 do_munmap(p, len);
563 errno = EEXIST;
564 return -1;
565 }
566
567 start = h2g(p);
568 last = start + len - 1;
569 return mmap_end(start, last, start, last, flags, page_flags);
570}
571
d558c395 572static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
e8cec51b 573 int target_prot, int flags, int page_flags,
d558c395 574 int fd, off_t offset)
54936004 575{
621ac47d 576 int host_page_size = qemu_real_host_page_size();
f9cd8f5e
RH
577 abi_ulong ret, last, real_start, real_last, retaddr, host_len;
578 abi_ulong passthrough_start = -1, passthrough_last = 0;
55baec0f 579 off_t host_offset;
68098de9 580 int host_prot;
54936004 581
621ac47d
RH
582 real_start = start & -host_page_size;
583 host_offset = offset & -host_page_size;
a5e7ee46 584
2b730f79 585 /*
ad87d26e
RH
586 * For reserved_va, we are in full control of the allocation.
587 * Find a suitable hole and convert to MAP_FIXED.
2b730f79 588 */
68098de9
RH
589 if (reserved_va) {
590 if (flags & MAP_FIXED_NOREPLACE) {
591 /* Validate that the chosen range is empty. */
592 if (!page_check_range_empty(start, start + len - 1)) {
593 errno = EEXIST;
594 return -1;
595 }
596 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
597 } else if (!(flags & MAP_FIXED)) {
598 size_t real_len = len + offset - host_offset;
599 abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
600
601 start = mmap_find_vma(real_start, real_len, align);
602 if (start == (abi_ulong)-1) {
603 errno = ENOMEM;
604 return -1;
605 }
606 start += offset - host_offset;
607 flags |= MAP_FIXED;
a5e7ee46 608 }
68098de9
RH
609 }
610
611 host_prot = target_to_host_prot(target_prot);
612
613 if (host_page_size == TARGET_PAGE_SIZE) {
614 return mmap_h_eq_g(start, len, host_prot, flags,
615 page_flags, fd, offset);
a5e7ee46 616 }
54936004 617
2b730f79
RH
618 /*
619 * When mapping files into a memory area larger than the file, accesses
620 * to pages beyond the file size will cause a SIGBUS.
621 *
622 * For example, if mmaping a file of 100 bytes on a host with 4K pages
623 * emulating a target with 8K pages, the target expects to be able to
624 * access the first 8K. But the host will trap us on any access beyond
625 * 4K.
626 *
627 * When emulating a target with a larger page-size than the hosts, we
628 * may need to truncate file maps at EOF and add extra anonymous pages
629 * up to the targets page boundary.
630 */
621ac47d 631 if (host_page_size < TARGET_PAGE_SIZE && !(flags & MAP_ANONYMOUS)) {
35f2fd04 632 struct stat sb;
54c5a2ae 633
2b730f79 634 if (fstat(fd, &sb) == -1) {
d558c395 635 return -1;
2b730f79 636 }
54c5a2ae 637
2b730f79
RH
638 /* Are we trying to create a map beyond EOF?. */
639 if (offset + len > sb.st_size) {
640 /*
641 * If so, truncate the file map at eof aligned with
642 * the hosts real pagesize. Additional anonymous maps
643 * will be created beyond EOF.
644 */
e56922ab 645 len = ROUND_UP(sb.st_size - offset, host_page_size);
2b730f79 646 }
54c5a2ae
EI
647 }
648
03798605 649 if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
55baec0f 650 uintptr_t host_start;
a03e2d42 651 void *p;
a5e7ee46 652
a03e2d42 653 host_len = len + offset - host_offset;
b36b2b1d 654 host_len = ROUND_UP(host_len, host_page_size);
a5e7ee46 655
621ac47d 656 /* Note: we prefer to control the mapping address. */
3e8f1628 657 p = mmap(g2h_untagged(start), host_len, host_prot,
a5e7ee46 658 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
9dba3ca5 659 if (p == MAP_FAILED) {
d558c395 660 return -1;
9dba3ca5 661 }
a03e2d42 662 /* update start so that it points to the file position at 'offset' */
55baec0f 663 host_start = (uintptr_t)p;
54c5a2ae 664 if (!(flags & MAP_ANONYMOUS)) {
3e8f1628 665 p = mmap(g2h_untagged(start), len, host_prot,
54c5a2ae 666 flags | MAP_FIXED, fd, host_offset);
8384274e 667 if (p == MAP_FAILED) {
3bfa271e 668 do_munmap(g2h_untagged(start), host_len);
d558c395 669 return -1;
8384274e 670 }
a03e2d42 671 host_start += offset - host_offset;
54c5a2ae 672 }
a03e2d42 673 start = h2g(host_start);
f9cd8f5e 674 last = start + len - 1;
f93b7695 675 passthrough_start = start;
f9cd8f5e 676 passthrough_last = last;
a03e2d42 677 } else {
f9cd8f5e 678 last = start + len - 1;
b36b2b1d 679 real_last = ROUND_UP(last, host_page_size) - 1;
7ab240ad 680
c3dd50da
AO
681 if (flags & MAP_FIXED_NOREPLACE) {
682 /* Validate that the chosen range is empty. */
683 if (!page_check_range_empty(start, last)) {
684 errno = EEXIST;
d558c395 685 return -1;
c3dd50da
AO
686 }
687
688 /*
689 * With reserved_va, the entire address space is mmaped in the
690 * host to ensure it isn't accidentally used for something else.
691 * We have just checked that the guest address is not mapped
692 * within the guest, but need to replace the host reservation.
693 *
694 * Without reserved_va, despite the guest address check above,
695 * keep MAP_FIXED_NOREPLACE so that the guest does not overwrite
696 * any host address mappings.
697 */
698 if (reserved_va) {
699 flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
700 }
03798605
RH
701 }
702
2b730f79
RH
703 /*
704 * worst case: we cannot map the file because the offset is not
705 * aligned, so we read it
706 */
a03e2d42 707 if (!(flags & MAP_ANONYMOUS) &&
621ac47d 708 (offset & (host_page_size - 1)) != (start & (host_page_size - 1))) {
2b730f79
RH
709 /*
710 * msync() won't work here, so we return an error if write is
711 * possible while it is a shared mapping
712 */
0dd55812
RH
713 if ((flags & MAP_TYPE) == MAP_SHARED
714 && (target_prot & PROT_WRITE)) {
a03e2d42 715 errno = EINVAL;
d558c395 716 return -1;
a03e2d42 717 }
9dba3ca5 718 retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
03798605
RH
719 (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))
720 | MAP_PRIVATE | MAP_ANONYMOUS,
a03e2d42 721 -1, 0);
2b730f79 722 if (retaddr == -1) {
d558c395 723 return -1;
2b730f79
RH
724 }
725 if (pread(fd, g2h_untagged(start), len, offset) == -1) {
d558c395 726 return -1;
2b730f79 727 }
0dd55812 728 if (!(target_prot & PROT_WRITE)) {
9dba3ca5 729 ret = target_mprotect(start, len, target_prot);
86abac06 730 assert(ret == 0);
a03e2d42 731 }
6ecc2557 732 return mmap_end(start, last, -1, 0, flags, page_flags);
54936004 733 }
2b730f79 734
a03e2d42
FB
735 /* handle the start of the mapping */
736 if (start > real_start) {
621ac47d 737 if (real_last == real_start + host_page_size - 1) {
a03e2d42 738 /* one single host page */
f9cd8f5e 739 if (!mmap_frag(real_start, start, last,
99982beb 740 target_prot, flags, fd, offset)) {
d558c395 741 return -1;
2b730f79 742 }
6ecc2557 743 return mmap_end(start, last, -1, 0, flags, page_flags);
a03e2d42 744 }
99982beb 745 if (!mmap_frag(real_start, start,
621ac47d 746 real_start + host_page_size - 1,
99982beb 747 target_prot, flags, fd, offset)) {
d558c395 748 return -1;
2b730f79 749 }
621ac47d 750 real_start += host_page_size;
a03e2d42
FB
751 }
752 /* handle the end of the mapping */
f9cd8f5e 753 if (last < real_last) {
621ac47d 754 abi_ulong real_page = real_last - host_page_size + 1;
f9cd8f5e 755 if (!mmap_frag(real_page, real_page, last,
99982beb 756 target_prot, flags, fd,
f9cd8f5e 757 offset + real_page - start)) {
d558c395 758 return -1;
2b730f79 759 }
621ac47d 760 real_last -= host_page_size;
54936004 761 }
3b46e624 762
a03e2d42 763 /* map the middle (easier) */
f9cd8f5e 764 if (real_start < real_last) {
ddcdd8c4 765 void *p, *want_p;
55baec0f 766 off_t offset1;
ddcdd8c4 767 size_t len1;
55baec0f 768
2b730f79 769 if (flags & MAP_ANONYMOUS) {
a03e2d42 770 offset1 = 0;
2b730f79 771 } else {
a03e2d42 772 offset1 = offset + real_start - start;
2b730f79 773 }
ddcdd8c4
AO
774 len1 = real_last - real_start + 1;
775 want_p = g2h_untagged(real_start);
776
68098de9 777 p = mmap(want_p, len1, host_prot, flags, fd, offset1);
ddcdd8c4
AO
778 if (p != want_p) {
779 if (p != MAP_FAILED) {
3bfa271e 780 do_munmap(p, len1);
ddcdd8c4
AO
781 errno = EEXIST;
782 }
d558c395 783 return -1;
2b730f79 784 }
f93b7695 785 passthrough_start = real_start;
f9cd8f5e 786 passthrough_last = real_last;
a03e2d42 787 }
54936004 788 }
6ecc2557
RH
789 return mmap_end(start, last, passthrough_start, passthrough_last,
790 flags, page_flags);
d558c395
RH
791}
792
793/* NOTE: all the constants are the HOST ones */
794abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
795 int flags, int fd, off_t offset)
796{
797 abi_long ret;
e8cec51b 798 int page_flags;
d558c395
RH
799
800 trace_target_mmap(start, len, target_prot, flags, fd, offset);
e8cec51b
RH
801
802 if (!len) {
803 errno = EINVAL;
804 return -1;
805 }
806
807 page_flags = validate_prot_to_pageflags(target_prot);
808 if (!page_flags) {
809 errno = EINVAL;
810 return -1;
811 }
812
813 /* Also check for overflows... */
814 len = TARGET_PAGE_ALIGN(len);
815 if (!len || len != (size_t)len) {
816 errno = ENOMEM;
817 return -1;
818 }
819
820 if (offset & ~TARGET_PAGE_MASK) {
821 errno = EINVAL;
822 return -1;
823 }
824 if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
825 if (start & ~TARGET_PAGE_MASK) {
826 errno = EINVAL;
827 return -1;
828 }
829 if (!guest_range_valid_untagged(start, len)) {
830 errno = ENOMEM;
831 return -1;
832 }
833 }
834
d558c395
RH
835 mmap_lock();
836
e8cec51b
RH
837 ret = target_mmap__locked(start, len, target_prot, flags,
838 page_flags, fd, offset);
d558c395 839
c8a706fe 840 mmap_unlock();
e8cec51b
RH
841
842 /*
843 * If we're mapping shared memory, ensure we generate code for parallel
844 * execution and flush old translations. This will work up to the level
845 * supported by the host -- anything that requires EXCP_ATOMIC will not
846 * be atomic with respect to an external process.
847 */
848 if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
849 CPUState *cpu = thread_cpu;
850 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
851 cpu->tcg_cflags |= CF_PARALLEL;
852 tb_flush(cpu);
853 }
854 }
855
d558c395 856 return ret;
54936004
FB
857}
858
912ff698 859static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
68a1c816 860{
621ac47d 861 int host_page_size = qemu_real_host_page_size();
68a1c816 862 abi_ulong real_start;
260561d8
RH
863 abi_ulong real_last;
864 abi_ulong real_len;
865 abi_ulong last;
866 abi_ulong a;
558a4411 867 void *host_start;
68a1c816
PB
868 int prot;
869
260561d8 870 last = start + len - 1;
621ac47d 871 real_start = start & -host_page_size;
b36b2b1d 872 real_last = ROUND_UP(last, host_page_size) - 1;
260561d8
RH
873
874 /*
875 * If guest pages remain on the first or last host pages,
876 * adjust the deallocation to retain those guest pages.
877 * The single page special case is required for the last page,
878 * lest real_start overflow to zero.
879 */
621ac47d 880 if (real_last - real_start < host_page_size) {
68a1c816 881 prot = 0;
260561d8
RH
882 for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
883 prot |= page_get_flags(a);
68a1c816 884 }
260561d8
RH
885 for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
886 prot |= page_get_flags(a + 1);
887 }
888 if (prot != 0) {
912ff698 889 return 0;
260561d8
RH
890 }
891 } else {
892 for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
893 prot |= page_get_flags(a);
68a1c816 894 }
2b730f79 895 if (prot != 0) {
621ac47d 896 real_start += host_page_size;
2b730f79 897 }
260561d8
RH
898
899 for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
900 prot |= page_get_flags(a + 1);
68a1c816 901 }
2b730f79 902 if (prot != 0) {
621ac47d 903 real_last -= host_page_size;
260561d8
RH
904 }
905
906 if (real_last < real_start) {
912ff698 907 return 0;
2b730f79 908 }
68a1c816 909 }
260561d8
RH
910
911 real_len = real_last - real_start + 1;
912 host_start = g2h_untagged(real_start);
913
2952b642 914 return do_munmap(host_start, real_len);
68a1c816
PB
915}
916
992f48a0 917int target_munmap(abi_ulong start, abi_ulong len)
54936004 918{
912ff698
RH
919 int ret;
920
b7b18d26
AB
921 trace_target_munmap(start, len);
922
2b730f79 923 if (start & ~TARGET_PAGE_MASK) {
912ff698
RH
924 errno = EINVAL;
925 return -1;
2b730f79 926 }
54936004 927 len = TARGET_PAGE_ALIGN(len);
46b12f46 928 if (len == 0 || !guest_range_valid_untagged(start, len)) {
912ff698
RH
929 errno = EINVAL;
930 return -1;
ebf9a363
MF
931 }
932
c8a706fe 933 mmap_lock();
912ff698
RH
934 ret = mmap_reserve_or_unmap(start, len);
935 if (likely(ret == 0)) {
936 page_set_flags(start, start + len - 1, 0);
937 shm_region_rm_complete(start, start + len - 1);
938 }
c8a706fe 939 mmap_unlock();
d7b0c5d0 940
912ff698 941 return ret;
54936004
FB
942}
943
992f48a0
BS
944abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
945 abi_ulong new_size, unsigned long flags,
946 abi_ulong new_addr)
54936004
FB
947{
948 int prot;
f19412a2 949 void *host_addr;
54936004 950
46b12f46 951 if (!guest_range_valid_untagged(old_addr, old_size) ||
ebf9a363 952 ((flags & MREMAP_FIXED) &&
46b12f46 953 !guest_range_valid_untagged(new_addr, new_size)) ||
ccc5ccc1 954 ((flags & MREMAP_MAYMOVE) == 0 &&
46b12f46 955 !guest_range_valid_untagged(old_addr, new_size))) {
ebf9a363
MF
956 errno = ENOMEM;
957 return -1;
958 }
959
c8a706fe 960 mmap_lock();
f19412a2 961
68a1c816 962 if (flags & MREMAP_FIXED) {
3e8f1628
RH
963 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
964 flags, g2h_untagged(new_addr));
68a1c816 965
b76f21a7 966 if (reserved_va && host_addr != MAP_FAILED) {
2b730f79
RH
967 /*
968 * If new and old addresses overlap then the above mremap will
969 * already have failed with EINVAL.
970 */
558a4411 971 mmap_reserve_or_unmap(old_addr, old_size);
68a1c816
PB
972 }
973 } else if (flags & MREMAP_MAYMOVE) {
f19412a2
AJ
974 abi_ulong mmap_start;
975
30ab9ef2 976 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
f19412a2
AJ
977
978 if (mmap_start == -1) {
979 errno = ENOMEM;
980 host_addr = MAP_FAILED;
68a1c816 981 } else {
3e8f1628
RH
982 host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
983 flags | MREMAP_FIXED,
984 g2h_untagged(mmap_start));
b76f21a7 985 if (reserved_va) {
558a4411 986 mmap_reserve_or_unmap(old_addr, old_size);
c65ffe6d 987 }
68a1c816 988 }
3af72a4d 989 } else {
ea800033 990 int page_flags = 0;
b76f21a7 991 if (reserved_va && old_size < new_size) {
68a1c816
PB
992 abi_ulong addr;
993 for (addr = old_addr + old_size;
994 addr < old_addr + new_size;
995 addr++) {
ea800033 996 page_flags |= page_get_flags(addr);
68a1c816
PB
997 }
998 }
ea800033 999 if (page_flags == 0) {
3e8f1628
RH
1000 host_addr = mremap(g2h_untagged(old_addr),
1001 old_size, new_size, flags);
56d19084
TK
1002
1003 if (host_addr != MAP_FAILED) {
1004 /* Check if address fits target address space */
46b12f46 1005 if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
56d19084 1006 /* Revert mremap() changes */
3e8f1628
RH
1007 host_addr = mremap(g2h_untagged(old_addr),
1008 new_size, old_size, flags);
56d19084
TK
1009 errno = ENOMEM;
1010 host_addr = MAP_FAILED;
1011 } else if (reserved_va && old_size > new_size) {
558a4411
RH
1012 mmap_reserve_or_unmap(old_addr + old_size,
1013 old_size - new_size);
56d19084 1014 }
68a1c816
PB
1015 }
1016 } else {
1017 errno = ENOMEM;
1018 host_addr = MAP_FAILED;
1019 }
f19412a2
AJ
1020 }
1021
1022 if (host_addr == MAP_FAILED) {
c8a706fe
PB
1023 new_addr = -1;
1024 } else {
1025 new_addr = h2g(host_addr);
1026 prot = page_get_flags(old_addr);
49840a4a 1027 page_set_flags(old_addr, old_addr + old_size - 1, 0);
044e95c8 1028 shm_region_rm_complete(old_addr, old_addr + old_size - 1);
49840a4a 1029 page_set_flags(new_addr, new_addr + new_size - 1,
d9c58585 1030 prot | PAGE_VALID | PAGE_RESET);
044e95c8 1031 shm_region_rm_complete(new_addr, new_addr + new_size - 1);
c8a706fe
PB
1032 }
1033 mmap_unlock();
54936004
FB
1034 return new_addr;
1035}
892a4f6a 1036
892a4f6a
IL
1037abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
1038{
e230ec09 1039 abi_ulong len;
892a4f6a
IL
1040 int ret = 0;
1041
1042 if (start & ~TARGET_PAGE_MASK) {
1043 return -TARGET_EINVAL;
1044 }
e230ec09 1045 if (len_in == 0) {
892a4f6a
IL
1046 return 0;
1047 }
e230ec09
RH
1048 len = TARGET_PAGE_ALIGN(len_in);
1049 if (len == 0 || !guest_range_valid_untagged(start, len)) {
892a4f6a
IL
1050 return -TARGET_EINVAL;
1051 }
1052
4530deb1
HD
1053 /* Translate for some architectures which have different MADV_xxx values */
1054 switch (advice) {
1055 case TARGET_MADV_DONTNEED: /* alpha */
1056 advice = MADV_DONTNEED;
1057 break;
1058 case TARGET_MADV_WIPEONFORK: /* parisc */
1059 advice = MADV_WIPEONFORK;
1060 break;
1061 case TARGET_MADV_KEEPONFORK: /* parisc */
1062 advice = MADV_KEEPONFORK;
1063 break;
1064 /* we do not care about the other MADV_xxx values yet */
1065 }
1066
892a4f6a 1067 /*
4530deb1
HD
1068 * Most advice values are hints, so ignoring and returning success is ok.
1069 *
1070 * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
1071 * MADV_KEEPONFORK are not hints and need to be emulated.
892a4f6a 1072 *
4530deb1
HD
1073 * A straight passthrough for those may not be safe because qemu sometimes
1074 * turns private file-backed mappings into anonymous mappings.
ecb796db
RH
1075 * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1076 * same semantics for the host as for the guest.
892a4f6a 1077 *
4530deb1
HD
1078 * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
1079 * return failure if not.
1080 *
1081 * MADV_DONTNEED is passed through as well, if possible.
1082 * If passthrough isn't possible, we nevertheless (wrongly!) return
1083 * success, which is broken but some userspace programs fail to work
1084 * otherwise. Completely implementing such emulation is quite complicated
1085 * though.
892a4f6a
IL
1086 */
1087 mmap_lock();
4530deb1
HD
1088 switch (advice) {
1089 case MADV_WIPEONFORK:
1090 case MADV_KEEPONFORK:
1091 ret = -EINVAL;
1092 /* fall through */
1093 case MADV_DONTNEED:
ecb796db 1094 if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
4530deb1
HD
1095 ret = get_errno(madvise(g2h_untagged(start), len, advice));
1096 if ((advice == MADV_DONTNEED) && (ret == 0)) {
10310cbd 1097 page_reset_target_data(start, start + len - 1);
4530deb1 1098 }
dbbf8975 1099 }
892a4f6a
IL
1100 }
1101 mmap_unlock();
1102
1103 return ret;
1104}
225a206c
RH
1105
1106#ifndef TARGET_FORCE_SHMLBA
1107/*
1108 * For most architectures, SHMLBA is the same as the page size;
1109 * some architectures have larger values, in which case they should
1110 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1111 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1112 * and defining its own value for SHMLBA.
1113 *
1114 * The kernel also permits SHMLBA to be set by the architecture to a
1115 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1116 * this means that addresses are rounded to the large size if
1117 * SHM_RND is set but addresses not aligned to that size are not rejected
1118 * as long as they are at least page-aligned. Since the only architecture
1119 * which uses this is ia64 this code doesn't provide for that oddity.
1120 */
1121static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1122{
1123 return TARGET_PAGE_SIZE;
1124}
1125#endif
1126
1127abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1128 abi_ulong shmaddr, int shmflg)
1129{
1130 CPUState *cpu = env_cpu(cpu_env);
1131 abi_ulong raddr;
225a206c 1132 struct shmid_ds shm_info;
69fa2708 1133 int ret;
225a206c
RH
1134 abi_ulong shmlba;
1135
1136 /* shmat pointers are always untagged */
1137
1138 /* find out the length of the shared memory segment */
1139 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1140 if (is_error(ret)) {
1141 /* can't get length, bail out */
1142 return ret;
1143 }
1144
1145 shmlba = target_shmlba(cpu_env);
1146
1147 if (shmaddr & (shmlba - 1)) {
1148 if (shmflg & SHM_RND) {
1149 shmaddr &= ~(shmlba - 1);
1150 } else {
1151 return -TARGET_EINVAL;
1152 }
1153 }
1154 if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
1155 return -TARGET_EINVAL;
1156 }
1157
69fa2708
RH
1158 WITH_MMAP_LOCK_GUARD() {
1159 void *host_raddr;
044e95c8 1160 abi_ulong last;
69fa2708
RH
1161
1162 if (shmaddr) {
1163 host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
1164 } else {
1165 abi_ulong mmap_start;
1166
1167 /* In order to use the host shmat, we need to honor host SHMLBA. */
1168 mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
1169 MAX(SHMLBA, shmlba));
1170
1171 if (mmap_start == -1) {
1172 return -TARGET_ENOMEM;
1173 }
1174 host_raddr = shmat(shmid, g2h_untagged(mmap_start),
1175 shmflg | SHM_REMAP);
1176 }
1177
1178 if (host_raddr == (void *)-1) {
1179 return get_errno(-1);
1180 }
1181 raddr = h2g(host_raddr);
044e95c8 1182 last = raddr + shm_info.shm_segsz - 1;
69fa2708 1183
044e95c8 1184 page_set_flags(raddr, last,
69fa2708
RH
1185 PAGE_VALID | PAGE_RESET | PAGE_READ |
1186 (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
1187
044e95c8
RH
1188 shm_region_rm_complete(raddr, last);
1189 shm_region_add(raddr, last);
69fa2708 1190 }
225a206c
RH
1191
1192 /*
1193 * We're mapping shared memory, so ensure we generate code for parallel
1194 * execution and flush old translations. This will work up to the level
1195 * supported by the host -- anything that requires EXCP_ATOMIC will not
1196 * be atomic with respect to an external process.
1197 */
1198 if (!(cpu->tcg_cflags & CF_PARALLEL)) {
1199 cpu->tcg_cflags |= CF_PARALLEL;
1200 tb_flush(cpu);
1201 }
1202
225a206c
RH
1203 return raddr;
1204}
1205
1206abi_long target_shmdt(abi_ulong shmaddr)
1207{
225a206c
RH
1208 abi_long rv;
1209
1210 /* shmdt pointers are always untagged */
1211
69fa2708 1212 WITH_MMAP_LOCK_GUARD() {
044e95c8
RH
1213 abi_ulong last = shm_region_find(shmaddr);
1214 if (last == 0) {
ceda5688
RH
1215 return -TARGET_EINVAL;
1216 }
1217
69fa2708 1218 rv = get_errno(shmdt(g2h_untagged(shmaddr)));
ceda5688 1219 if (rv == 0) {
044e95c8 1220 abi_ulong size = last - shmaddr + 1;
ceda5688 1221
044e95c8
RH
1222 page_set_flags(shmaddr, last, 0);
1223 shm_region_rm_complete(shmaddr, last);
ceda5688
RH
1224 mmap_reserve_or_unmap(shmaddr, size);
1225 }
225a206c 1226 }
225a206c
RH
1227 return rv;
1228}