]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/mmap.c
0b1b43ac3c060e4f306ed46ab94c80b9a1f1b27a
[mirror_qemu.git] / linux-user / mmap.c
1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "qemu.h"
22
23 //#define DEBUG_MMAP
24
25 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
26 static __thread int mmap_lock_count;
27
28 void mmap_lock(void)
29 {
30 if (mmap_lock_count++ == 0) {
31 pthread_mutex_lock(&mmap_mutex);
32 }
33 }
34
35 void mmap_unlock(void)
36 {
37 if (--mmap_lock_count == 0) {
38 pthread_mutex_unlock(&mmap_mutex);
39 }
40 }
41
42 bool have_mmap_lock(void)
43 {
44 return mmap_lock_count > 0 ? true : false;
45 }
46
47 /* Grab lock to make sure things are in a consistent state after fork(). */
48 void mmap_fork_start(void)
49 {
50 if (mmap_lock_count)
51 abort();
52 pthread_mutex_lock(&mmap_mutex);
53 }
54
55 void mmap_fork_end(int child)
56 {
57 if (child)
58 pthread_mutex_init(&mmap_mutex, NULL);
59 else
60 pthread_mutex_unlock(&mmap_mutex);
61 }
62
63 /* NOTE: all the constants are the HOST ones, but addresses are target. */
64 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
65 {
66 abi_ulong end, host_start, host_end, addr;
67 int prot1, ret;
68
69 trace_target_mprotect(start, len, prot);
70
71 if ((start & ~TARGET_PAGE_MASK) != 0)
72 return -TARGET_EINVAL;
73 len = TARGET_PAGE_ALIGN(len);
74 end = start + len;
75 if (!guest_range_valid(start, len)) {
76 return -TARGET_ENOMEM;
77 }
78 prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
79 if (len == 0)
80 return 0;
81
82 mmap_lock();
83 host_start = start & qemu_host_page_mask;
84 host_end = HOST_PAGE_ALIGN(end);
85 if (start > host_start) {
86 /* handle host page containing start */
87 prot1 = prot;
88 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
89 prot1 |= page_get_flags(addr);
90 }
91 if (host_end == host_start + qemu_host_page_size) {
92 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
93 prot1 |= page_get_flags(addr);
94 }
95 end = host_end;
96 }
97 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
98 if (ret != 0)
99 goto error;
100 host_start += qemu_host_page_size;
101 }
102 if (end < host_end) {
103 prot1 = prot;
104 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
105 prot1 |= page_get_flags(addr);
106 }
107 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
108 prot1 & PAGE_BITS);
109 if (ret != 0)
110 goto error;
111 host_end -= qemu_host_page_size;
112 }
113
114 /* handle the pages in the middle */
115 if (host_start < host_end) {
116 ret = mprotect(g2h(host_start), host_end - host_start, prot);
117 if (ret != 0)
118 goto error;
119 }
120 page_set_flags(start, start + len, prot | PAGE_VALID);
121 mmap_unlock();
122 return 0;
123 error:
124 mmap_unlock();
125 return ret;
126 }
127
128 /* map an incomplete host page */
129 static int mmap_frag(abi_ulong real_start,
130 abi_ulong start, abi_ulong end,
131 int prot, int flags, int fd, abi_ulong offset)
132 {
133 abi_ulong real_end, addr;
134 void *host_start;
135 int prot1, prot_new;
136
137 real_end = real_start + qemu_host_page_size;
138 host_start = g2h(real_start);
139
140 /* get the protection of the target pages outside the mapping */
141 prot1 = 0;
142 for(addr = real_start; addr < real_end; addr++) {
143 if (addr < start || addr >= end)
144 prot1 |= page_get_flags(addr);
145 }
146
147 if (prot1 == 0) {
148 /* no page was there, so we allocate one */
149 void *p = mmap(host_start, qemu_host_page_size, prot,
150 flags | MAP_ANONYMOUS, -1, 0);
151 if (p == MAP_FAILED)
152 return -1;
153 prot1 = prot;
154 }
155 prot1 &= PAGE_BITS;
156
157 prot_new = prot | prot1;
158 if (!(flags & MAP_ANONYMOUS)) {
159 /* msync() won't work here, so we return an error if write is
160 possible while it is a shared mapping */
161 if ((flags & MAP_TYPE) == MAP_SHARED &&
162 (prot & PROT_WRITE))
163 return -1;
164
165 /* adjust protection to be able to read */
166 if (!(prot1 & PROT_WRITE))
167 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
168
169 /* read the corresponding file data */
170 if (pread(fd, g2h(start), end - start, offset) == -1)
171 return -1;
172
173 /* put final protection */
174 if (prot_new != (prot1 | PROT_WRITE))
175 mprotect(host_start, qemu_host_page_size, prot_new);
176 } else {
177 if (prot_new != prot1) {
178 mprotect(host_start, qemu_host_page_size, prot_new);
179 }
180 if (prot_new & PROT_WRITE) {
181 memset(g2h(start), 0, end - start);
182 }
183 }
184 return 0;
185 }
186
187 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
188 # define TASK_UNMAPPED_BASE (1ul << 38)
189 #else
190 # define TASK_UNMAPPED_BASE 0x40000000
191 #endif
192 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
193
194 unsigned long last_brk;
195
196 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
197 of guest address space. */
198 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
199 abi_ulong align)
200 {
201 abi_ulong addr, end_addr, incr = qemu_host_page_size;
202 int prot;
203 bool looped = false;
204
205 if (size > reserved_va) {
206 return (abi_ulong)-1;
207 }
208
209 /* Note that start and size have already been aligned by mmap_find_vma. */
210
211 end_addr = start + size;
212 if (start > reserved_va - size) {
213 /* Start at the top of the address space. */
214 end_addr = ((reserved_va - size) & -align) + size;
215 looped = true;
216 }
217
218 /* Search downward from END_ADDR, checking to see if a page is in use. */
219 addr = end_addr;
220 while (1) {
221 addr -= incr;
222 if (addr > end_addr) {
223 if (looped) {
224 /* Failure. The entire address space has been searched. */
225 return (abi_ulong)-1;
226 }
227 /* Re-start at the top of the address space. */
228 addr = end_addr = ((reserved_va - size) & -align) + size;
229 looped = true;
230 } else {
231 prot = page_get_flags(addr);
232 if (prot) {
233 /* Page in use. Restart below this page. */
234 addr = end_addr = ((addr - size) & -align) + size;
235 } else if (addr && addr + size == end_addr) {
236 /* Success! All pages between ADDR and END_ADDR are free. */
237 if (start == mmap_next_start) {
238 mmap_next_start = addr;
239 }
240 return addr;
241 }
242 }
243 }
244 }
245
246 /*
247 * Find and reserve a free memory area of size 'size'. The search
248 * starts at 'start'.
249 * It must be called with mmap_lock() held.
250 * Return -1 if error.
251 */
252 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
253 {
254 void *ptr, *prev;
255 abi_ulong addr;
256 int wrapped, repeat;
257
258 align = MAX(align, qemu_host_page_size);
259
260 /* If 'start' == 0, then a default start address is used. */
261 if (start == 0) {
262 start = mmap_next_start;
263 } else {
264 start &= qemu_host_page_mask;
265 }
266 start = ROUND_UP(start, align);
267
268 size = HOST_PAGE_ALIGN(size);
269
270 if (reserved_va) {
271 return mmap_find_vma_reserved(start, size, align);
272 }
273
274 addr = start;
275 wrapped = repeat = 0;
276 prev = 0;
277
278 for (;; prev = ptr) {
279 /*
280 * Reserve needed memory area to avoid a race.
281 * It should be discarded using:
282 * - mmap() with MAP_FIXED flag
283 * - mremap() with MREMAP_FIXED flag
284 * - shmat() with SHM_REMAP flag
285 */
286 ptr = mmap(g2h(addr), size, PROT_NONE,
287 MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
288
289 /* ENOMEM, if host address space has no memory */
290 if (ptr == MAP_FAILED) {
291 return (abi_ulong)-1;
292 }
293
294 /* Count the number of sequential returns of the same address.
295 This is used to modify the search algorithm below. */
296 repeat = (ptr == prev ? repeat + 1 : 0);
297
298 if (h2g_valid(ptr + size - 1)) {
299 addr = h2g(ptr);
300
301 if ((addr & (align - 1)) == 0) {
302 /* Success. */
303 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
304 mmap_next_start = addr + size;
305 }
306 return addr;
307 }
308
309 /* The address is not properly aligned for the target. */
310 switch (repeat) {
311 case 0:
312 /* Assume the result that the kernel gave us is the
313 first with enough free space, so start again at the
314 next higher target page. */
315 addr = ROUND_UP(addr, align);
316 break;
317 case 1:
318 /* Sometimes the kernel decides to perform the allocation
319 at the top end of memory instead. */
320 addr &= -align;
321 break;
322 case 2:
323 /* Start over at low memory. */
324 addr = 0;
325 break;
326 default:
327 /* Fail. This unaligned block must the last. */
328 addr = -1;
329 break;
330 }
331 } else {
332 /* Since the result the kernel gave didn't fit, start
333 again at low memory. If any repetition, fail. */
334 addr = (repeat ? -1 : 0);
335 }
336
337 /* Unmap and try again. */
338 munmap(ptr, size);
339
340 /* ENOMEM if we checked the whole of the target address space. */
341 if (addr == (abi_ulong)-1) {
342 return (abi_ulong)-1;
343 } else if (addr == 0) {
344 if (wrapped) {
345 return (abi_ulong)-1;
346 }
347 wrapped = 1;
348 /* Don't actually use 0 when wrapping, instead indicate
349 that we'd truly like an allocation in low memory. */
350 addr = (mmap_min_addr > TARGET_PAGE_SIZE
351 ? TARGET_PAGE_ALIGN(mmap_min_addr)
352 : TARGET_PAGE_SIZE);
353 } else if (wrapped && addr >= start) {
354 return (abi_ulong)-1;
355 }
356 }
357 }
358
359 /* NOTE: all the constants are the HOST ones */
360 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
361 int flags, int fd, abi_ulong offset)
362 {
363 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
364
365 mmap_lock();
366 trace_target_mmap(start, len, prot, flags, fd, offset);
367
368 if (!len) {
369 errno = EINVAL;
370 goto fail;
371 }
372
373 /* Also check for overflows... */
374 len = TARGET_PAGE_ALIGN(len);
375 if (!len) {
376 errno = ENOMEM;
377 goto fail;
378 }
379
380 if (offset & ~TARGET_PAGE_MASK) {
381 errno = EINVAL;
382 goto fail;
383 }
384
385 real_start = start & qemu_host_page_mask;
386 host_offset = offset & qemu_host_page_mask;
387
388 /* If the user is asking for the kernel to find a location, do that
389 before we truncate the length for mapping files below. */
390 if (!(flags & MAP_FIXED)) {
391 host_len = len + offset - host_offset;
392 host_len = HOST_PAGE_ALIGN(host_len);
393 start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
394 if (start == (abi_ulong)-1) {
395 errno = ENOMEM;
396 goto fail;
397 }
398 }
399
400 /* When mapping files into a memory area larger than the file, accesses
401 to pages beyond the file size will cause a SIGBUS.
402
403 For example, if mmaping a file of 100 bytes on a host with 4K pages
404 emulating a target with 8K pages, the target expects to be able to
405 access the first 8K. But the host will trap us on any access beyond
406 4K.
407
408 When emulating a target with a larger page-size than the hosts, we
409 may need to truncate file maps at EOF and add extra anonymous pages
410 up to the targets page boundary. */
411
412 if ((qemu_real_host_page_size < qemu_host_page_size) &&
413 !(flags & MAP_ANONYMOUS)) {
414 struct stat sb;
415
416 if (fstat (fd, &sb) == -1)
417 goto fail;
418
419 /* Are we trying to create a map beyond EOF?. */
420 if (offset + len > sb.st_size) {
421 /* If so, truncate the file map at eof aligned with
422 the hosts real pagesize. Additional anonymous maps
423 will be created beyond EOF. */
424 len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
425 }
426 }
427
428 if (!(flags & MAP_FIXED)) {
429 unsigned long host_start;
430 void *p;
431
432 host_len = len + offset - host_offset;
433 host_len = HOST_PAGE_ALIGN(host_len);
434
435 /* Note: we prefer to control the mapping address. It is
436 especially important if qemu_host_page_size >
437 qemu_real_host_page_size */
438 p = mmap(g2h(start), host_len, prot,
439 flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
440 if (p == MAP_FAILED)
441 goto fail;
442 /* update start so that it points to the file position at 'offset' */
443 host_start = (unsigned long)p;
444 if (!(flags & MAP_ANONYMOUS)) {
445 p = mmap(g2h(start), len, prot,
446 flags | MAP_FIXED, fd, host_offset);
447 if (p == MAP_FAILED) {
448 munmap(g2h(start), host_len);
449 goto fail;
450 }
451 host_start += offset - host_offset;
452 }
453 start = h2g(host_start);
454 } else {
455 if (start & ~TARGET_PAGE_MASK) {
456 errno = EINVAL;
457 goto fail;
458 }
459 end = start + len;
460 real_end = HOST_PAGE_ALIGN(end);
461
462 /*
463 * Test if requested memory area fits target address space
464 * It can fail only on 64-bit host with 32-bit target.
465 * On any other target/host host mmap() handles this error correctly.
466 */
467 if (!guest_range_valid(start, len)) {
468 errno = ENOMEM;
469 goto fail;
470 }
471
472 /* worst case: we cannot map the file because the offset is not
473 aligned, so we read it */
474 if (!(flags & MAP_ANONYMOUS) &&
475 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
476 /* msync() won't work here, so we return an error if write is
477 possible while it is a shared mapping */
478 if ((flags & MAP_TYPE) == MAP_SHARED &&
479 (prot & PROT_WRITE)) {
480 errno = EINVAL;
481 goto fail;
482 }
483 retaddr = target_mmap(start, len, prot | PROT_WRITE,
484 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
485 -1, 0);
486 if (retaddr == -1)
487 goto fail;
488 if (pread(fd, g2h(start), len, offset) == -1)
489 goto fail;
490 if (!(prot & PROT_WRITE)) {
491 ret = target_mprotect(start, len, prot);
492 assert(ret == 0);
493 }
494 goto the_end;
495 }
496
497 /* handle the start of the mapping */
498 if (start > real_start) {
499 if (real_end == real_start + qemu_host_page_size) {
500 /* one single host page */
501 ret = mmap_frag(real_start, start, end,
502 prot, flags, fd, offset);
503 if (ret == -1)
504 goto fail;
505 goto the_end1;
506 }
507 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
508 prot, flags, fd, offset);
509 if (ret == -1)
510 goto fail;
511 real_start += qemu_host_page_size;
512 }
513 /* handle the end of the mapping */
514 if (end < real_end) {
515 ret = mmap_frag(real_end - qemu_host_page_size,
516 real_end - qemu_host_page_size, end,
517 prot, flags, fd,
518 offset + real_end - qemu_host_page_size - start);
519 if (ret == -1)
520 goto fail;
521 real_end -= qemu_host_page_size;
522 }
523
524 /* map the middle (easier) */
525 if (real_start < real_end) {
526 void *p;
527 unsigned long offset1;
528 if (flags & MAP_ANONYMOUS)
529 offset1 = 0;
530 else
531 offset1 = offset + real_start - start;
532 p = mmap(g2h(real_start), real_end - real_start,
533 prot, flags, fd, offset1);
534 if (p == MAP_FAILED)
535 goto fail;
536 }
537 }
538 the_end1:
539 page_set_flags(start, start + len, prot | PAGE_VALID);
540 the_end:
541 trace_target_mmap_complete(start);
542 #ifdef DEBUG_MMAP
543 page_dump(stdout);
544 printf("\n");
545 #endif
546 tb_invalidate_phys_range(start, start + len);
547 mmap_unlock();
548 return start;
549 fail:
550 mmap_unlock();
551 return -1;
552 }
553
554 static void mmap_reserve(abi_ulong start, abi_ulong size)
555 {
556 abi_ulong real_start;
557 abi_ulong real_end;
558 abi_ulong addr;
559 abi_ulong end;
560 int prot;
561
562 real_start = start & qemu_host_page_mask;
563 real_end = HOST_PAGE_ALIGN(start + size);
564 end = start + size;
565 if (start > real_start) {
566 /* handle host page containing start */
567 prot = 0;
568 for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
569 prot |= page_get_flags(addr);
570 }
571 if (real_end == real_start + qemu_host_page_size) {
572 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
573 prot |= page_get_flags(addr);
574 }
575 end = real_end;
576 }
577 if (prot != 0)
578 real_start += qemu_host_page_size;
579 }
580 if (end < real_end) {
581 prot = 0;
582 for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
583 prot |= page_get_flags(addr);
584 }
585 if (prot != 0)
586 real_end -= qemu_host_page_size;
587 }
588 if (real_start != real_end) {
589 mmap(g2h(real_start), real_end - real_start, PROT_NONE,
590 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
591 -1, 0);
592 }
593 }
594
595 int target_munmap(abi_ulong start, abi_ulong len)
596 {
597 abi_ulong end, real_start, real_end, addr;
598 int prot, ret;
599
600 #ifdef DEBUG_MMAP
601 printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
602 TARGET_ABI_FMT_lx "\n",
603 start, len);
604 #endif
605 if (start & ~TARGET_PAGE_MASK)
606 return -TARGET_EINVAL;
607 len = TARGET_PAGE_ALIGN(len);
608 if (len == 0 || !guest_range_valid(start, len)) {
609 return -TARGET_EINVAL;
610 }
611
612 mmap_lock();
613 end = start + len;
614 real_start = start & qemu_host_page_mask;
615 real_end = HOST_PAGE_ALIGN(end);
616
617 if (start > real_start) {
618 /* handle host page containing start */
619 prot = 0;
620 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
621 prot |= page_get_flags(addr);
622 }
623 if (real_end == real_start + qemu_host_page_size) {
624 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
625 prot |= page_get_flags(addr);
626 }
627 end = real_end;
628 }
629 if (prot != 0)
630 real_start += qemu_host_page_size;
631 }
632 if (end < real_end) {
633 prot = 0;
634 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
635 prot |= page_get_flags(addr);
636 }
637 if (prot != 0)
638 real_end -= qemu_host_page_size;
639 }
640
641 ret = 0;
642 /* unmap what we can */
643 if (real_start < real_end) {
644 if (reserved_va) {
645 mmap_reserve(real_start, real_end - real_start);
646 } else {
647 ret = munmap(g2h(real_start), real_end - real_start);
648 }
649 }
650
651 if (ret == 0) {
652 page_set_flags(start, start + len, 0);
653 tb_invalidate_phys_range(start, start + len);
654 }
655 mmap_unlock();
656 return ret;
657 }
658
659 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
660 abi_ulong new_size, unsigned long flags,
661 abi_ulong new_addr)
662 {
663 int prot;
664 void *host_addr;
665
666 if (!guest_range_valid(old_addr, old_size) ||
667 ((flags & MREMAP_FIXED) &&
668 !guest_range_valid(new_addr, new_size))) {
669 errno = ENOMEM;
670 return -1;
671 }
672
673 mmap_lock();
674
675 if (flags & MREMAP_FIXED) {
676 host_addr = mremap(g2h(old_addr), old_size, new_size,
677 flags, g2h(new_addr));
678
679 if (reserved_va && host_addr != MAP_FAILED) {
680 /* If new and old addresses overlap then the above mremap will
681 already have failed with EINVAL. */
682 mmap_reserve(old_addr, old_size);
683 }
684 } else if (flags & MREMAP_MAYMOVE) {
685 abi_ulong mmap_start;
686
687 mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
688
689 if (mmap_start == -1) {
690 errno = ENOMEM;
691 host_addr = MAP_FAILED;
692 } else {
693 host_addr = mremap(g2h(old_addr), old_size, new_size,
694 flags | MREMAP_FIXED, g2h(mmap_start));
695 if (reserved_va) {
696 mmap_reserve(old_addr, old_size);
697 }
698 }
699 } else {
700 int prot = 0;
701 if (reserved_va && old_size < new_size) {
702 abi_ulong addr;
703 for (addr = old_addr + old_size;
704 addr < old_addr + new_size;
705 addr++) {
706 prot |= page_get_flags(addr);
707 }
708 }
709 if (prot == 0) {
710 host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
711 if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
712 mmap_reserve(old_addr + old_size, new_size - old_size);
713 }
714 } else {
715 errno = ENOMEM;
716 host_addr = MAP_FAILED;
717 }
718 /* Check if address fits target address space */
719 if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
720 /* Revert mremap() changes */
721 host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
722 errno = ENOMEM;
723 host_addr = MAP_FAILED;
724 }
725 }
726
727 if (host_addr == MAP_FAILED) {
728 new_addr = -1;
729 } else {
730 new_addr = h2g(host_addr);
731 prot = page_get_flags(old_addr);
732 page_set_flags(old_addr, old_addr + old_size, 0);
733 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
734 }
735 tb_invalidate_phys_range(new_addr, new_addr + new_size);
736 mmap_unlock();
737 return new_addr;
738 }