]> git.proxmox.com Git - qemu.git/blob - exec.c
added getrusage
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <unistd.h>
26 #include <inttypes.h>
27 #include <sys/mman.h>
28
29 #include "cpu-i386.h"
30
31 //#define DEBUG_TB_INVALIDATE
32 #define DEBUG_FLUSH
33
34 /* make various TB consistency checks */
35 //#define DEBUG_TB_CHECK
36
37 /* threshold to flush the translated code buffer */
38 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
39
40 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
41
42 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
43 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
44 int nb_tbs;
45 /* any access to the tbs or the page table must use this lock */
46 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
47
48 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
49 uint8_t *code_gen_ptr;
50
51 /* XXX: pack the flags in the low bits of the pointer ? */
52 typedef struct PageDesc {
53 unsigned long flags;
54 TranslationBlock *first_tb;
55 } PageDesc;
56
57 #define L2_BITS 10
58 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
59
60 #define L1_SIZE (1 << L1_BITS)
61 #define L2_SIZE (1 << L2_BITS)
62
63 static void tb_invalidate_page(unsigned long address);
64
65 unsigned long real_host_page_size;
66 unsigned long host_page_bits;
67 unsigned long host_page_size;
68 unsigned long host_page_mask;
69
70 static PageDesc *l1_map[L1_SIZE];
71
72 void page_init(void)
73 {
74 /* NOTE: we can always suppose that host_page_size >=
75 TARGET_PAGE_SIZE */
76 real_host_page_size = getpagesize();
77 if (host_page_size == 0)
78 host_page_size = real_host_page_size;
79 if (host_page_size < TARGET_PAGE_SIZE)
80 host_page_size = TARGET_PAGE_SIZE;
81 host_page_bits = 0;
82 while ((1 << host_page_bits) < host_page_size)
83 host_page_bits++;
84 host_page_mask = ~(host_page_size - 1);
85 }
86
87 /* dump memory mappings */
88 void page_dump(FILE *f)
89 {
90 unsigned long start, end;
91 int i, j, prot, prot1;
92 PageDesc *p;
93
94 fprintf(f, "%-8s %-8s %-8s %s\n",
95 "start", "end", "size", "prot");
96 start = -1;
97 end = -1;
98 prot = 0;
99 for(i = 0; i <= L1_SIZE; i++) {
100 if (i < L1_SIZE)
101 p = l1_map[i];
102 else
103 p = NULL;
104 for(j = 0;j < L2_SIZE; j++) {
105 if (!p)
106 prot1 = 0;
107 else
108 prot1 = p[j].flags;
109 if (prot1 != prot) {
110 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
111 if (start != -1) {
112 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
113 start, end, end - start,
114 prot & PAGE_READ ? 'r' : '-',
115 prot & PAGE_WRITE ? 'w' : '-',
116 prot & PAGE_EXEC ? 'x' : '-');
117 }
118 if (prot1 != 0)
119 start = end;
120 else
121 start = -1;
122 prot = prot1;
123 }
124 if (!p)
125 break;
126 }
127 }
128 }
129
130 static inline PageDesc *page_find_alloc(unsigned int index)
131 {
132 PageDesc **lp, *p;
133
134 lp = &l1_map[index >> L2_BITS];
135 p = *lp;
136 if (!p) {
137 /* allocate if not found */
138 p = malloc(sizeof(PageDesc) * L2_SIZE);
139 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
140 *lp = p;
141 }
142 return p + (index & (L2_SIZE - 1));
143 }
144
145 static inline PageDesc *page_find(unsigned int index)
146 {
147 PageDesc *p;
148
149 p = l1_map[index >> L2_BITS];
150 if (!p)
151 return 0;
152 return p + (index & (L2_SIZE - 1));
153 }
154
155 int page_get_flags(unsigned long address)
156 {
157 PageDesc *p;
158
159 p = page_find(address >> TARGET_PAGE_BITS);
160 if (!p)
161 return 0;
162 return p->flags;
163 }
164
165 /* modify the flags of a page and invalidate the code if
166 necessary. The flag PAGE_WRITE_ORG is positionned automatically
167 depending on PAGE_WRITE */
168 void page_set_flags(unsigned long start, unsigned long end, int flags)
169 {
170 PageDesc *p;
171 unsigned long addr;
172
173 start = start & TARGET_PAGE_MASK;
174 end = TARGET_PAGE_ALIGN(end);
175 if (flags & PAGE_WRITE)
176 flags |= PAGE_WRITE_ORG;
177 spin_lock(&tb_lock);
178 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
179 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
180 /* if the write protection is set, then we invalidate the code
181 inside */
182 if (!(p->flags & PAGE_WRITE) &&
183 (flags & PAGE_WRITE) &&
184 p->first_tb) {
185 tb_invalidate_page(addr);
186 }
187 p->flags = flags;
188 }
189 spin_unlock(&tb_lock);
190 }
191
192 void cpu_x86_tblocks_init(void)
193 {
194 if (!code_gen_ptr) {
195 code_gen_ptr = code_gen_buffer;
196 }
197 }
198
199 /* set to NULL all the 'first_tb' fields in all PageDescs */
200 static void page_flush_tb(void)
201 {
202 int i, j;
203 PageDesc *p;
204
205 for(i = 0; i < L1_SIZE; i++) {
206 p = l1_map[i];
207 if (p) {
208 for(j = 0; j < L2_SIZE; j++)
209 p[j].first_tb = NULL;
210 }
211 }
212 }
213
214 /* flush all the translation blocks */
215 void tb_flush(void)
216 {
217 int i;
218 #ifdef DEBUG_FLUSH
219 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
220 code_gen_ptr - code_gen_buffer,
221 nb_tbs,
222 (code_gen_ptr - code_gen_buffer) / nb_tbs);
223 #endif
224 nb_tbs = 0;
225 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
226 tb_hash[i] = NULL;
227 page_flush_tb();
228 code_gen_ptr = code_gen_buffer;
229 /* XXX: flush processor icache at this point */
230 }
231
232 #ifdef DEBUG_TB_CHECK
233
234 static void tb_invalidate_check(unsigned long address)
235 {
236 TranslationBlock *tb;
237 int i;
238 address &= TARGET_PAGE_MASK;
239 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
240 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
241 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
242 address >= tb->pc + tb->size)) {
243 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
244 address, tb->pc, tb->size);
245 }
246 }
247 }
248 }
249
250 /* verify that all the pages have correct rights for code */
251 static void tb_page_check(void)
252 {
253 TranslationBlock *tb;
254 int i, flags1, flags2;
255
256 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
257 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
258 flags1 = page_get_flags(tb->pc);
259 flags2 = page_get_flags(tb->pc + tb->size - 1);
260 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
261 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
262 tb->pc, tb->size, flags1, flags2);
263 }
264 }
265 }
266 }
267
268 #endif
269
270 /* invalidate one TB */
271 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
272 int next_offset)
273 {
274 TranslationBlock *tb1;
275 for(;;) {
276 tb1 = *ptb;
277 if (tb1 == tb) {
278 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
279 break;
280 }
281 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
282 }
283 }
284
285 static inline void tb_invalidate(TranslationBlock *tb, int parity)
286 {
287 PageDesc *p;
288 unsigned int page_index1, page_index2;
289 unsigned int h;
290
291 /* remove the TB from the hash list */
292 h = tb_hash_func(tb->pc);
293 tb_remove(&tb_hash[h], tb,
294 offsetof(TranslationBlock, hash_next));
295 /* remove the TB from the page list */
296 page_index1 = tb->pc >> TARGET_PAGE_BITS;
297 if ((page_index1 & 1) == parity) {
298 p = page_find(page_index1);
299 tb_remove(&p->first_tb, tb,
300 offsetof(TranslationBlock, page_next[page_index1 & 1]));
301 }
302 page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
303 if ((page_index2 & 1) == parity) {
304 p = page_find(page_index2);
305 tb_remove(&p->first_tb, tb,
306 offsetof(TranslationBlock, page_next[page_index2 & 1]));
307 }
308 }
309
310 /* invalidate all TBs which intersect with the target page starting at addr */
311 static void tb_invalidate_page(unsigned long address)
312 {
313 TranslationBlock *tb_next, *tb;
314 unsigned int page_index;
315 int parity1, parity2;
316 PageDesc *p;
317 #ifdef DEBUG_TB_INVALIDATE
318 printf("tb_invalidate_page: %lx\n", address);
319 #endif
320
321 page_index = address >> TARGET_PAGE_BITS;
322 p = page_find(page_index);
323 if (!p)
324 return;
325 tb = p->first_tb;
326 parity1 = page_index & 1;
327 parity2 = parity1 ^ 1;
328 while (tb != NULL) {
329 tb_next = tb->page_next[parity1];
330 tb_invalidate(tb, parity2);
331 tb = tb_next;
332 }
333 p->first_tb = NULL;
334 }
335
336 /* add the tb in the target page and protect it if necessary */
337 static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
338 {
339 PageDesc *p;
340 unsigned long host_start, host_end, addr, page_addr;
341 int prot;
342
343 p = page_find_alloc(page_index);
344 tb->page_next[page_index & 1] = p->first_tb;
345 p->first_tb = tb;
346 if (p->flags & PAGE_WRITE) {
347 /* force the host page as non writable (writes will have a
348 page fault + mprotect overhead) */
349 page_addr = (page_index << TARGET_PAGE_BITS);
350 host_start = page_addr & host_page_mask;
351 host_end = host_start + host_page_size;
352 prot = 0;
353 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
354 prot |= page_get_flags(addr);
355 mprotect((void *)host_start, host_page_size,
356 (prot & PAGE_BITS) & ~PAGE_WRITE);
357 #ifdef DEBUG_TB_INVALIDATE
358 printf("protecting code page: 0x%08lx\n",
359 host_start);
360 #endif
361 p->flags &= ~PAGE_WRITE;
362 #ifdef DEBUG_TB_CHECK
363 tb_page_check();
364 #endif
365 }
366 }
367
368 /* Allocate a new translation block. Flush the translation buffer if
369 too many translation blocks or too much generated code. */
370 TranslationBlock *tb_alloc(unsigned long pc,
371 unsigned long size)
372 {
373 TranslationBlock *tb;
374 unsigned int page_index1, page_index2;
375
376 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
377 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
378 tb_flush();
379 tb = &tbs[nb_tbs++];
380 tb->pc = pc;
381 tb->size = size;
382
383 /* add in the page list */
384 page_index1 = pc >> TARGET_PAGE_BITS;
385 tb_alloc_page(tb, page_index1);
386 page_index2 = (pc + size - 1) >> TARGET_PAGE_BITS;
387 if (page_index2 != page_index1) {
388 tb_alloc_page(tb, page_index2);
389 }
390 return tb;
391 }
392
393 /* called from signal handler: invalidate the code and unprotect the
394 page. Return TRUE if the fault was succesfully handled. */
395 int page_unprotect(unsigned long address)
396 {
397 unsigned int page_index, prot;
398 PageDesc *p;
399 unsigned long host_start, host_end, addr;
400
401 page_index = address >> TARGET_PAGE_BITS;
402 p = page_find(page_index);
403 if (!p)
404 return 0;
405 if ((p->flags & (PAGE_WRITE_ORG | PAGE_WRITE)) == PAGE_WRITE_ORG) {
406 /* if the page was really writable, then we change its
407 protection back to writable */
408 host_start = address & host_page_mask;
409 host_end = host_start + host_page_size;
410 prot = 0;
411 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
412 prot |= page_get_flags(addr);
413 mprotect((void *)host_start, host_page_size,
414 (prot & PAGE_BITS) | PAGE_WRITE);
415 p->flags |= PAGE_WRITE;
416
417 /* and since the content will be modified, we must invalidate
418 the corresponding translated code. */
419 tb_invalidate_page(address);
420 #ifdef DEBUG_TB_CHECK
421 tb_invalidate_check(address);
422 #endif
423 return 1;
424 } else {
425 return 0;
426 }
427 }
428
429 /* call this function when system calls directly modify a memory area */
430 void page_unprotect_range(uint8_t *data, unsigned long data_size)
431 {
432 unsigned long start, end, addr;
433
434 start = (unsigned long)data;
435 end = start + data_size;
436 start &= TARGET_PAGE_MASK;
437 end = TARGET_PAGE_ALIGN(end);
438 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
439 page_unprotect(addr);
440 }
441 }