]> git.proxmox.com Git - qemu.git/blob - linux-user/mmap.c
Support for 32 bit ABI on 64 bit targets (only enabled Sparc64)
[qemu.git] / linux-user / mmap.c
1 /*
2 * mmap support for qemu
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <unistd.h>
25 #include <errno.h>
26 #include <sys/mman.h>
27
28 #include "qemu.h"
29
30 //#define DEBUG_MMAP
31
32 /* NOTE: all the constants are the HOST ones, but addresses are target. */
33 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
34 {
35 abi_ulong end, host_start, host_end, addr;
36 int prot1, ret;
37
38 #ifdef DEBUG_MMAP
39 printf("mprotect: start=0x" TARGET_FMT_lx
40 "len=0x" TARGET_FMT_lx " prot=%c%c%c\n", start, len,
41 prot & PROT_READ ? 'r' : '-',
42 prot & PROT_WRITE ? 'w' : '-',
43 prot & PROT_EXEC ? 'x' : '-');
44 #endif
45
46 if ((start & ~TARGET_PAGE_MASK) != 0)
47 return -EINVAL;
48 len = TARGET_PAGE_ALIGN(len);
49 end = start + len;
50 if (end < start)
51 return -EINVAL;
52 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
53 return -EINVAL;
54 if (len == 0)
55 return 0;
56
57 host_start = start & qemu_host_page_mask;
58 host_end = HOST_PAGE_ALIGN(end);
59 if (start > host_start) {
60 /* handle host page containing start */
61 prot1 = prot;
62 for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
63 prot1 |= page_get_flags(addr);
64 }
65 if (host_end == host_start + qemu_host_page_size) {
66 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
67 prot1 |= page_get_flags(addr);
68 }
69 end = host_end;
70 }
71 ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
72 if (ret != 0)
73 return ret;
74 host_start += qemu_host_page_size;
75 }
76 if (end < host_end) {
77 prot1 = prot;
78 for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
79 prot1 |= page_get_flags(addr);
80 }
81 ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
82 prot1 & PAGE_BITS);
83 if (ret != 0)
84 return ret;
85 host_end -= qemu_host_page_size;
86 }
87
88 /* handle the pages in the middle */
89 if (host_start < host_end) {
90 ret = mprotect(g2h(host_start), host_end - host_start, prot);
91 if (ret != 0)
92 return ret;
93 }
94 page_set_flags(start, start + len, prot | PAGE_VALID);
95 return 0;
96 }
97
98 /* map an incomplete host page */
99 static int mmap_frag(abi_ulong real_start,
100 abi_ulong start, abi_ulong end,
101 int prot, int flags, int fd, abi_ulong offset)
102 {
103 abi_ulong real_end, ret, addr;
104 void *host_start;
105 int prot1, prot_new;
106
107 real_end = real_start + qemu_host_page_size;
108 host_start = g2h(real_start);
109
110 /* get the protection of the target pages outside the mapping */
111 prot1 = 0;
112 for(addr = real_start; addr < real_end; addr++) {
113 if (addr < start || addr >= end)
114 prot1 |= page_get_flags(addr);
115 }
116
117 if (prot1 == 0) {
118 /* no page was there, so we allocate one */
119 ret = (long)mmap(host_start, qemu_host_page_size, prot,
120 flags | MAP_ANONYMOUS, -1, 0);
121 if (ret == -1)
122 return ret;
123 prot1 = prot;
124 }
125 prot1 &= PAGE_BITS;
126
127 prot_new = prot | prot1;
128 if (!(flags & MAP_ANONYMOUS)) {
129 /* msync() won't work here, so we return an error if write is
130 possible while it is a shared mapping */
131 if ((flags & MAP_TYPE) == MAP_SHARED &&
132 (prot & PROT_WRITE))
133 return -EINVAL;
134
135 /* adjust protection to be able to read */
136 if (!(prot1 & PROT_WRITE))
137 mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
138
139 /* read the corresponding file data */
140 pread(fd, g2h(start), end - start, offset);
141
142 /* put final protection */
143 if (prot_new != (prot1 | PROT_WRITE))
144 mprotect(host_start, qemu_host_page_size, prot_new);
145 } else {
146 /* just update the protection */
147 if (prot_new != prot1) {
148 mprotect(host_start, qemu_host_page_size, prot_new);
149 }
150 }
151 return 0;
152 }
153
154 /* NOTE: all the constants are the HOST ones */
155 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
156 int flags, int fd, abi_ulong offset)
157 {
158 abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
159 unsigned long host_start;
160 #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \
161 defined(__ia64) || defined(__mips__)
162 static abi_ulong last_start = 0x40000000;
163 #elif defined(__CYGWIN__)
164 /* Cygwin doesn't have a whole lot of address space. */
165 static abi_ulong last_start = 0x18000000;
166 #endif
167
168 #ifdef DEBUG_MMAP
169 {
170 printf("mmap: start=0x" TARGET_FMT_lx
171 " len=0x" TARGET_FMT_lx " prot=%c%c%c flags=",
172 start, len,
173 prot & PROT_READ ? 'r' : '-',
174 prot & PROT_WRITE ? 'w' : '-',
175 prot & PROT_EXEC ? 'x' : '-');
176 if (flags & MAP_FIXED)
177 printf("MAP_FIXED ");
178 if (flags & MAP_ANONYMOUS)
179 printf("MAP_ANON ");
180 switch(flags & MAP_TYPE) {
181 case MAP_PRIVATE:
182 printf("MAP_PRIVATE ");
183 break;
184 case MAP_SHARED:
185 printf("MAP_SHARED ");
186 break;
187 default:
188 printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
189 break;
190 }
191 printf("fd=%d offset=" TARGET_FMT_lx "\n", fd, offset);
192 }
193 #endif
194
195 if (offset & ~TARGET_PAGE_MASK) {
196 errno = EINVAL;
197 return -1;
198 }
199
200 len = TARGET_PAGE_ALIGN(len);
201 if (len == 0)
202 return start;
203 real_start = start & qemu_host_page_mask;
204
205 if (!(flags & MAP_FIXED)) {
206 #if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__) || \
207 defined(__ia64) || defined(__mips__) || defined(__CYGWIN__)
208 /* tell the kernel to search at the same place as i386 */
209 if (real_start == 0) {
210 real_start = last_start;
211 last_start += HOST_PAGE_ALIGN(len);
212 }
213 #endif
214 host_offset = offset & qemu_host_page_mask;
215 host_len = len + offset - host_offset;
216
217 if (qemu_host_page_size > qemu_real_host_page_size) {
218 /*
219 * The guest expects to see mmapped areas aligned to it's pagesize.
220 * If the host's real page size is smaller than the guest's, we need
221 * to fixup the maps. It is done by allocating a larger area,
222 * displacing the map (if needed) and finally chopping off the spare
223 * room at the edges.
224 */
225
226 /*
227 * We assume qemu_host_page_size is always the same as
228 * TARGET_PAGE_SIZE, see exec.c. qemu_real_host_page_size is the
229 * hosts real page size.
230 */
231 abi_ulong host_end;
232 unsigned long host_aligned_start;
233
234 host_len = HOST_PAGE_ALIGN(host_len + qemu_host_page_size
235 - qemu_real_host_page_size);
236 host_start = (unsigned long) mmap(real_start ?
237 g2h(real_start) : NULL,
238 host_len, prot, flags,
239 fd, host_offset);
240 if (host_start == -1)
241 return -1;
242
243 host_end = host_start + host_len;
244
245 /* Find start and end, aligned to the targets pagesize with-in the
246 large mmaped area. */
247 host_aligned_start = TARGET_PAGE_ALIGN(host_start);
248 if (!(flags & MAP_ANONYMOUS))
249 host_aligned_start += offset - host_offset;
250
251 start = h2g(host_aligned_start);
252 end = start + TARGET_PAGE_ALIGN(len);
253
254 /* Chop off the leftovers, if any. */
255 if (host_aligned_start > host_start)
256 munmap((void *)host_start, host_aligned_start - host_start);
257 if (end < host_end)
258 munmap((void *)g2h(end), host_end - end);
259
260 goto the_end1;
261 } else {
262 /* if not fixed, no need to do anything */
263 host_start = (long)mmap(real_start ? g2h(real_start) : NULL,
264 host_len, prot, flags, fd, host_offset);
265 if (host_start == -1)
266 return -1;
267 /* update start so that it points to the file position at 'offset' */
268 if (!(flags & MAP_ANONYMOUS))
269 host_start += offset - host_offset;
270 start = h2g(host_start);
271 goto the_end1;
272 }
273 }
274
275 if (start & ~TARGET_PAGE_MASK) {
276 errno = EINVAL;
277 return -1;
278 }
279 end = start + len;
280 real_end = HOST_PAGE_ALIGN(end);
281
282 /* worst case: we cannot map the file because the offset is not
283 aligned, so we read it */
284 if (!(flags & MAP_ANONYMOUS) &&
285 (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
286 /* msync() won't work here, so we return an error if write is
287 possible while it is a shared mapping */
288 if ((flags & MAP_TYPE) == MAP_SHARED &&
289 (prot & PROT_WRITE)) {
290 errno = EINVAL;
291 return -1;
292 }
293 retaddr = target_mmap(start, len, prot | PROT_WRITE,
294 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
295 -1, 0);
296 if (retaddr == -1)
297 return -1;
298 pread(fd, g2h(start), len, offset);
299 if (!(prot & PROT_WRITE)) {
300 ret = target_mprotect(start, len, prot);
301 if (ret != 0)
302 return ret;
303 }
304 goto the_end;
305 }
306
307 /* handle the start of the mapping */
308 if (start > real_start) {
309 if (real_end == real_start + qemu_host_page_size) {
310 /* one single host page */
311 ret = mmap_frag(real_start, start, end,
312 prot, flags, fd, offset);
313 if (ret == -1)
314 return ret;
315 goto the_end1;
316 }
317 ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
318 prot, flags, fd, offset);
319 if (ret == -1)
320 return ret;
321 real_start += qemu_host_page_size;
322 }
323 /* handle the end of the mapping */
324 if (end < real_end) {
325 ret = mmap_frag(real_end - qemu_host_page_size,
326 real_end - qemu_host_page_size, real_end,
327 prot, flags, fd,
328 offset + real_end - qemu_host_page_size - start);
329 if (ret == -1)
330 return -1;
331 real_end -= qemu_host_page_size;
332 }
333
334 /* map the middle (easier) */
335 if (real_start < real_end) {
336 unsigned long offset1;
337 if (flags & MAP_ANONYMOUS)
338 offset1 = 0;
339 else
340 offset1 = offset + real_start - start;
341 ret = (long)mmap(g2h(real_start), real_end - real_start,
342 prot, flags, fd, offset1);
343 if (ret == -1)
344 return -1;
345 }
346 the_end1:
347 page_set_flags(start, start + len, prot | PAGE_VALID);
348 the_end:
349 #ifdef DEBUG_MMAP
350 printf("ret=0x%llx\n", start);
351 page_dump(stdout);
352 printf("\n");
353 #endif
354 return start;
355 }
356
357 int target_munmap(abi_ulong start, abi_ulong len)
358 {
359 abi_ulong end, real_start, real_end, addr;
360 int prot, ret;
361
362 #ifdef DEBUG_MMAP
363 printf("munmap: start=0x%lx len=0x%lx\n", start, len);
364 #endif
365 if (start & ~TARGET_PAGE_MASK)
366 return -EINVAL;
367 len = TARGET_PAGE_ALIGN(len);
368 if (len == 0)
369 return -EINVAL;
370 end = start + len;
371 real_start = start & qemu_host_page_mask;
372 real_end = HOST_PAGE_ALIGN(end);
373
374 if (start > real_start) {
375 /* handle host page containing start */
376 prot = 0;
377 for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
378 prot |= page_get_flags(addr);
379 }
380 if (real_end == real_start + qemu_host_page_size) {
381 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
382 prot |= page_get_flags(addr);
383 }
384 end = real_end;
385 }
386 if (prot != 0)
387 real_start += qemu_host_page_size;
388 }
389 if (end < real_end) {
390 prot = 0;
391 for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
392 prot |= page_get_flags(addr);
393 }
394 if (prot != 0)
395 real_end -= qemu_host_page_size;
396 }
397
398 /* unmap what we can */
399 if (real_start < real_end) {
400 ret = munmap(g2h(real_start), real_end - real_start);
401 if (ret != 0)
402 return ret;
403 }
404
405 page_set_flags(start, start + len, 0);
406 return 0;
407 }
408
409 /* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED
410 blocks which have been allocated starting on a host page */
411 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
412 abi_ulong new_size, unsigned long flags,
413 abi_ulong new_addr)
414 {
415 int prot;
416 unsigned long host_addr;
417
418 /* XXX: use 5 args syscall */
419 host_addr = (long)mremap(g2h(old_addr), old_size, new_size, flags);
420 if (host_addr == -1)
421 return -1;
422 new_addr = h2g(host_addr);
423 prot = page_get_flags(old_addr);
424 page_set_flags(old_addr, old_addr + old_size, 0);
425 page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
426 return new_addr;
427 }
428
429 int target_msync(abi_ulong start, abi_ulong len, int flags)
430 {
431 abi_ulong end;
432
433 if (start & ~TARGET_PAGE_MASK)
434 return -EINVAL;
435 len = TARGET_PAGE_ALIGN(len);
436 end = start + len;
437 if (end < start)
438 return -EINVAL;
439 if (end == start)
440 return 0;
441
442 start &= qemu_host_page_mask;
443 return msync(g2h(start), end - start, flags);
444 }
445