]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * virtual page mapping and translated block handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #include "config.h" | |
21 | #include <stdlib.h> | |
22 | #include <stdio.h> | |
23 | #include <stdarg.h> | |
24 | #include <string.h> | |
25 | #include <errno.h> | |
26 | #include <unistd.h> | |
27 | #include <inttypes.h> | |
28 | #if !defined(CONFIG_SOFTMMU) | |
29 | #include <sys/mman.h> | |
30 | #endif | |
31 | ||
32 | #include "cpu.h" | |
33 | #include "exec-all.h" | |
34 | ||
35 | //#define DEBUG_TB_INVALIDATE | |
36 | //#define DEBUG_FLUSH | |
37 | //#define DEBUG_TLB | |
38 | ||
39 | /* make various TB consistency checks */ | |
40 | //#define DEBUG_TB_CHECK | |
41 | //#define DEBUG_TLB_CHECK | |
42 | ||
43 | /* threshold to flush the translated code buffer */ | |
44 | #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) | |
45 | ||
46 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
47 | ||
48 | #define MMAP_AREA_START 0x00000000 | |
49 | #define MMAP_AREA_END 0xa8000000 | |
50 | ||
51 | TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; | |
52 | TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE]; | |
53 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
54 | int nb_tbs; | |
55 | /* any access to the tbs or the page table must use this lock */ | |
56 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
57 | ||
58 | uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE]; | |
59 | uint8_t *code_gen_ptr; | |
60 | ||
61 | int phys_ram_size; | |
62 | int phys_ram_fd; | |
63 | uint8_t *phys_ram_base; | |
64 | uint8_t *phys_ram_dirty; | |
65 | ||
66 | typedef struct PageDesc { | |
67 | /* offset in host memory of the page + io_index in the low 12 bits */ | |
68 | unsigned long phys_offset; | |
69 | /* list of TBs intersecting this physical page */ | |
70 | TranslationBlock *first_tb; | |
71 | /* in order to optimize self modifying code, we count the number | |
72 | of lookups we do to a given page to use a bitmap */ | |
73 | unsigned int code_write_count; | |
74 | uint8_t *code_bitmap; | |
75 | #if defined(CONFIG_USER_ONLY) | |
76 | unsigned long flags; | |
77 | #endif | |
78 | } PageDesc; | |
79 | ||
80 | typedef struct VirtPageDesc { | |
81 | /* physical address of code page. It is valid only if 'valid_tag' | |
82 | matches 'virt_valid_tag' */ | |
83 | target_ulong phys_addr; | |
84 | unsigned int valid_tag; | |
85 | #if !defined(CONFIG_SOFTMMU) | |
86 | /* original page access rights. It is valid only if 'valid_tag' | |
87 | matches 'virt_valid_tag' */ | |
88 | unsigned int prot; | |
89 | #endif | |
90 | } VirtPageDesc; | |
91 | ||
92 | #define L2_BITS 10 | |
93 | #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) | |
94 | ||
95 | #define L1_SIZE (1 << L1_BITS) | |
96 | #define L2_SIZE (1 << L2_BITS) | |
97 | ||
98 | static void io_mem_init(void); | |
99 | ||
100 | unsigned long real_host_page_size; | |
101 | unsigned long host_page_bits; | |
102 | unsigned long host_page_size; | |
103 | unsigned long host_page_mask; | |
104 | ||
105 | static PageDesc *l1_map[L1_SIZE]; | |
106 | ||
107 | #if !defined(CONFIG_USER_ONLY) | |
108 | static VirtPageDesc *l1_virt_map[L1_SIZE]; | |
109 | static unsigned int virt_valid_tag; | |
110 | #endif | |
111 | ||
112 | /* io memory support */ | |
113 | CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; | |
114 | CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
115 | static int io_mem_nb; | |
116 | ||
117 | /* log support */ | |
118 | char *logfilename = "/tmp/qemu.log"; | |
119 | FILE *logfile; | |
120 | int loglevel; | |
121 | ||
122 | static void page_init(void) | |
123 | { | |
124 | /* NOTE: we can always suppose that host_page_size >= | |
125 | TARGET_PAGE_SIZE */ | |
126 | #ifdef _WIN32 | |
127 | real_host_page_size = 4096; | |
128 | #else | |
129 | real_host_page_size = getpagesize(); | |
130 | #endif | |
131 | if (host_page_size == 0) | |
132 | host_page_size = real_host_page_size; | |
133 | if (host_page_size < TARGET_PAGE_SIZE) | |
134 | host_page_size = TARGET_PAGE_SIZE; | |
135 | host_page_bits = 0; | |
136 | while ((1 << host_page_bits) < host_page_size) | |
137 | host_page_bits++; | |
138 | host_page_mask = ~(host_page_size - 1); | |
139 | #if !defined(CONFIG_USER_ONLY) | |
140 | virt_valid_tag = 1; | |
141 | #endif | |
142 | } | |
143 | ||
144 | static inline PageDesc *page_find_alloc(unsigned int index) | |
145 | { | |
146 | PageDesc **lp, *p; | |
147 | ||
148 | lp = &l1_map[index >> L2_BITS]; | |
149 | p = *lp; | |
150 | if (!p) { | |
151 | /* allocate if not found */ | |
152 | p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); | |
153 | memset(p, 0, sizeof(PageDesc) * L2_SIZE); | |
154 | *lp = p; | |
155 | } | |
156 | return p + (index & (L2_SIZE - 1)); | |
157 | } | |
158 | ||
159 | static inline PageDesc *page_find(unsigned int index) | |
160 | { | |
161 | PageDesc *p; | |
162 | ||
163 | p = l1_map[index >> L2_BITS]; | |
164 | if (!p) | |
165 | return 0; | |
166 | return p + (index & (L2_SIZE - 1)); | |
167 | } | |
168 | ||
169 | #if !defined(CONFIG_USER_ONLY) | |
170 | static void tlb_protect_code(CPUState *env, target_ulong addr); | |
171 | static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr); | |
172 | ||
173 | static inline VirtPageDesc *virt_page_find_alloc(unsigned int index) | |
174 | { | |
175 | VirtPageDesc **lp, *p; | |
176 | ||
177 | lp = &l1_virt_map[index >> L2_BITS]; | |
178 | p = *lp; | |
179 | if (!p) { | |
180 | /* allocate if not found */ | |
181 | p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE); | |
182 | memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE); | |
183 | *lp = p; | |
184 | } | |
185 | return p + (index & (L2_SIZE - 1)); | |
186 | } | |
187 | ||
188 | static inline VirtPageDesc *virt_page_find(unsigned int index) | |
189 | { | |
190 | VirtPageDesc *p; | |
191 | ||
192 | p = l1_virt_map[index >> L2_BITS]; | |
193 | if (!p) | |
194 | return 0; | |
195 | return p + (index & (L2_SIZE - 1)); | |
196 | } | |
197 | ||
198 | static void virt_page_flush(void) | |
199 | { | |
200 | int i, j; | |
201 | VirtPageDesc *p; | |
202 | ||
203 | virt_valid_tag++; | |
204 | ||
205 | if (virt_valid_tag == 0) { | |
206 | virt_valid_tag = 1; | |
207 | for(i = 0; i < L1_SIZE; i++) { | |
208 | p = l1_virt_map[i]; | |
209 | if (p) { | |
210 | for(j = 0; j < L2_SIZE; j++) | |
211 | p[j].valid_tag = 0; | |
212 | } | |
213 | } | |
214 | } | |
215 | } | |
216 | #else | |
217 | static void virt_page_flush(void) | |
218 | { | |
219 | } | |
220 | #endif | |
221 | ||
222 | void cpu_exec_init(void) | |
223 | { | |
224 | if (!code_gen_ptr) { | |
225 | code_gen_ptr = code_gen_buffer; | |
226 | page_init(); | |
227 | io_mem_init(); | |
228 | } | |
229 | } | |
230 | ||
231 | static inline void invalidate_page_bitmap(PageDesc *p) | |
232 | { | |
233 | if (p->code_bitmap) { | |
234 | qemu_free(p->code_bitmap); | |
235 | p->code_bitmap = NULL; | |
236 | } | |
237 | p->code_write_count = 0; | |
238 | } | |
239 | ||
240 | /* set to NULL all the 'first_tb' fields in all PageDescs */ | |
241 | static void page_flush_tb(void) | |
242 | { | |
243 | int i, j; | |
244 | PageDesc *p; | |
245 | ||
246 | for(i = 0; i < L1_SIZE; i++) { | |
247 | p = l1_map[i]; | |
248 | if (p) { | |
249 | for(j = 0; j < L2_SIZE; j++) { | |
250 | p->first_tb = NULL; | |
251 | invalidate_page_bitmap(p); | |
252 | p++; | |
253 | } | |
254 | } | |
255 | } | |
256 | } | |
257 | ||
258 | /* flush all the translation blocks */ | |
259 | /* XXX: tb_flush is currently not thread safe */ | |
260 | void tb_flush(CPUState *env) | |
261 | { | |
262 | int i; | |
263 | #if defined(DEBUG_FLUSH) | |
264 | printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", | |
265 | code_gen_ptr - code_gen_buffer, | |
266 | nb_tbs, | |
267 | nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); | |
268 | #endif | |
269 | nb_tbs = 0; | |
270 | for(i = 0;i < CODE_GEN_HASH_SIZE; i++) | |
271 | tb_hash[i] = NULL; | |
272 | virt_page_flush(); | |
273 | ||
274 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) | |
275 | tb_phys_hash[i] = NULL; | |
276 | page_flush_tb(); | |
277 | ||
278 | code_gen_ptr = code_gen_buffer; | |
279 | /* XXX: flush processor icache at this point if cache flush is | |
280 | expensive */ | |
281 | } | |
282 | ||
283 | #ifdef DEBUG_TB_CHECK | |
284 | ||
285 | static void tb_invalidate_check(unsigned long address) | |
286 | { | |
287 | TranslationBlock *tb; | |
288 | int i; | |
289 | address &= TARGET_PAGE_MASK; | |
290 | for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { | |
291 | for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { | |
292 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || | |
293 | address >= tb->pc + tb->size)) { | |
294 | printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", | |
295 | address, tb->pc, tb->size); | |
296 | } | |
297 | } | |
298 | } | |
299 | } | |
300 | ||
301 | /* verify that all the pages have correct rights for code */ | |
302 | static void tb_page_check(void) | |
303 | { | |
304 | TranslationBlock *tb; | |
305 | int i, flags1, flags2; | |
306 | ||
307 | for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { | |
308 | for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { | |
309 | flags1 = page_get_flags(tb->pc); | |
310 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
311 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
312 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
313 | tb->pc, tb->size, flags1, flags2); | |
314 | } | |
315 | } | |
316 | } | |
317 | } | |
318 | ||
319 | void tb_jmp_check(TranslationBlock *tb) | |
320 | { | |
321 | TranslationBlock *tb1; | |
322 | unsigned int n1; | |
323 | ||
324 | /* suppress any remaining jumps to this TB */ | |
325 | tb1 = tb->jmp_first; | |
326 | for(;;) { | |
327 | n1 = (long)tb1 & 3; | |
328 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
329 | if (n1 == 2) | |
330 | break; | |
331 | tb1 = tb1->jmp_next[n1]; | |
332 | } | |
333 | /* check end of list */ | |
334 | if (tb1 != tb) { | |
335 | printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); | |
336 | } | |
337 | } | |
338 | ||
339 | #endif | |
340 | ||
341 | /* invalidate one TB */ | |
342 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
343 | int next_offset) | |
344 | { | |
345 | TranslationBlock *tb1; | |
346 | for(;;) { | |
347 | tb1 = *ptb; | |
348 | if (tb1 == tb) { | |
349 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
350 | break; | |
351 | } | |
352 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
353 | } | |
354 | } | |
355 | ||
356 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
357 | { | |
358 | TranslationBlock *tb1; | |
359 | unsigned int n1; | |
360 | ||
361 | for(;;) { | |
362 | tb1 = *ptb; | |
363 | n1 = (long)tb1 & 3; | |
364 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
365 | if (tb1 == tb) { | |
366 | *ptb = tb1->page_next[n1]; | |
367 | break; | |
368 | } | |
369 | ptb = &tb1->page_next[n1]; | |
370 | } | |
371 | } | |
372 | ||
373 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
374 | { | |
375 | TranslationBlock *tb1, **ptb; | |
376 | unsigned int n1; | |
377 | ||
378 | ptb = &tb->jmp_next[n]; | |
379 | tb1 = *ptb; | |
380 | if (tb1) { | |
381 | /* find tb(n) in circular list */ | |
382 | for(;;) { | |
383 | tb1 = *ptb; | |
384 | n1 = (long)tb1 & 3; | |
385 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
386 | if (n1 == n && tb1 == tb) | |
387 | break; | |
388 | if (n1 == 2) { | |
389 | ptb = &tb1->jmp_first; | |
390 | } else { | |
391 | ptb = &tb1->jmp_next[n1]; | |
392 | } | |
393 | } | |
394 | /* now we can suppress tb(n) from the list */ | |
395 | *ptb = tb->jmp_next[n]; | |
396 | ||
397 | tb->jmp_next[n] = NULL; | |
398 | } | |
399 | } | |
400 | ||
401 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
402 | another TB */ | |
403 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
404 | { | |
405 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); | |
406 | } | |
407 | ||
408 | static inline void tb_invalidate(TranslationBlock *tb) | |
409 | { | |
410 | unsigned int h, n1; | |
411 | TranslationBlock *tb1, *tb2, **ptb; | |
412 | ||
413 | tb_invalidated_flag = 1; | |
414 | ||
415 | /* remove the TB from the hash list */ | |
416 | h = tb_hash_func(tb->pc); | |
417 | ptb = &tb_hash[h]; | |
418 | for(;;) { | |
419 | tb1 = *ptb; | |
420 | /* NOTE: the TB is not necessarily linked in the hash. It | |
421 | indicates that it is not currently used */ | |
422 | if (tb1 == NULL) | |
423 | return; | |
424 | if (tb1 == tb) { | |
425 | *ptb = tb1->hash_next; | |
426 | break; | |
427 | } | |
428 | ptb = &tb1->hash_next; | |
429 | } | |
430 | ||
431 | /* suppress this TB from the two jump lists */ | |
432 | tb_jmp_remove(tb, 0); | |
433 | tb_jmp_remove(tb, 1); | |
434 | ||
435 | /* suppress any remaining jumps to this TB */ | |
436 | tb1 = tb->jmp_first; | |
437 | for(;;) { | |
438 | n1 = (long)tb1 & 3; | |
439 | if (n1 == 2) | |
440 | break; | |
441 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
442 | tb2 = tb1->jmp_next[n1]; | |
443 | tb_reset_jump(tb1, n1); | |
444 | tb1->jmp_next[n1] = NULL; | |
445 | tb1 = tb2; | |
446 | } | |
447 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ | |
448 | } | |
449 | ||
450 | static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) | |
451 | { | |
452 | PageDesc *p; | |
453 | unsigned int h; | |
454 | target_ulong phys_pc; | |
455 | ||
456 | /* remove the TB from the hash list */ | |
457 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
458 | h = tb_phys_hash_func(phys_pc); | |
459 | tb_remove(&tb_phys_hash[h], tb, | |
460 | offsetof(TranslationBlock, phys_hash_next)); | |
461 | ||
462 | /* remove the TB from the page list */ | |
463 | if (tb->page_addr[0] != page_addr) { | |
464 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
465 | tb_page_remove(&p->first_tb, tb); | |
466 | invalidate_page_bitmap(p); | |
467 | } | |
468 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
469 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
470 | tb_page_remove(&p->first_tb, tb); | |
471 | invalidate_page_bitmap(p); | |
472 | } | |
473 | ||
474 | tb_invalidate(tb); | |
475 | } | |
476 | ||
477 | static inline void set_bits(uint8_t *tab, int start, int len) | |
478 | { | |
479 | int end, mask, end1; | |
480 | ||
481 | end = start + len; | |
482 | tab += start >> 3; | |
483 | mask = 0xff << (start & 7); | |
484 | if ((start & ~7) == (end & ~7)) { | |
485 | if (start < end) { | |
486 | mask &= ~(0xff << (end & 7)); | |
487 | *tab |= mask; | |
488 | } | |
489 | } else { | |
490 | *tab++ |= mask; | |
491 | start = (start + 8) & ~7; | |
492 | end1 = end & ~7; | |
493 | while (start < end1) { | |
494 | *tab++ = 0xff; | |
495 | start += 8; | |
496 | } | |
497 | if (start < end) { | |
498 | mask = ~(0xff << (end & 7)); | |
499 | *tab |= mask; | |
500 | } | |
501 | } | |
502 | } | |
503 | ||
504 | static void build_page_bitmap(PageDesc *p) | |
505 | { | |
506 | int n, tb_start, tb_end; | |
507 | TranslationBlock *tb; | |
508 | ||
509 | p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); | |
510 | if (!p->code_bitmap) | |
511 | return; | |
512 | memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); | |
513 | ||
514 | tb = p->first_tb; | |
515 | while (tb != NULL) { | |
516 | n = (long)tb & 3; | |
517 | tb = (TranslationBlock *)((long)tb & ~3); | |
518 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
519 | if (n == 0) { | |
520 | /* NOTE: tb_end may be after the end of the page, but | |
521 | it is not a problem */ | |
522 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
523 | tb_end = tb_start + tb->size; | |
524 | if (tb_end > TARGET_PAGE_SIZE) | |
525 | tb_end = TARGET_PAGE_SIZE; | |
526 | } else { | |
527 | tb_start = 0; | |
528 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
529 | } | |
530 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
531 | tb = tb->page_next[n]; | |
532 | } | |
533 | } | |
534 | ||
535 | #ifdef TARGET_HAS_PRECISE_SMC | |
536 | ||
537 | static void tb_gen_code(CPUState *env, | |
538 | target_ulong pc, target_ulong cs_base, int flags, | |
539 | int cflags) | |
540 | { | |
541 | TranslationBlock *tb; | |
542 | uint8_t *tc_ptr; | |
543 | target_ulong phys_pc, phys_page2, virt_page2; | |
544 | int code_gen_size; | |
545 | ||
546 | phys_pc = get_phys_addr_code(env, (unsigned long)pc); | |
547 | tb = tb_alloc((unsigned long)pc); | |
548 | if (!tb) { | |
549 | /* flush must be done */ | |
550 | tb_flush(env); | |
551 | /* cannot fail at this point */ | |
552 | tb = tb_alloc((unsigned long)pc); | |
553 | } | |
554 | tc_ptr = code_gen_ptr; | |
555 | tb->tc_ptr = tc_ptr; | |
556 | tb->cs_base = cs_base; | |
557 | tb->flags = flags; | |
558 | tb->cflags = cflags; | |
559 | cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); | |
560 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
561 | ||
562 | /* check next page if needed */ | |
563 | virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK; | |
564 | phys_page2 = -1; | |
565 | if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) { | |
566 | phys_page2 = get_phys_addr_code(env, virt_page2); | |
567 | } | |
568 | tb_link_phys(tb, phys_pc, phys_page2); | |
569 | } | |
570 | #endif | |
571 | ||
572 | /* invalidate all TBs which intersect with the target physical page | |
573 | starting in range [start;end[. NOTE: start and end must refer to | |
574 | the same physical page. 'is_cpu_write_access' should be true if called | |
575 | from a real cpu write access: the virtual CPU will exit the current | |
576 | TB if code is modified inside this TB. */ | |
577 | void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, | |
578 | int is_cpu_write_access) | |
579 | { | |
580 | int n, current_tb_modified, current_tb_not_found, current_flags; | |
581 | #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY) | |
582 | CPUState *env = cpu_single_env; | |
583 | #endif | |
584 | PageDesc *p; | |
585 | TranslationBlock *tb, *tb_next, *current_tb; | |
586 | target_ulong tb_start, tb_end; | |
587 | target_ulong current_pc, current_cs_base; | |
588 | ||
589 | p = page_find(start >> TARGET_PAGE_BITS); | |
590 | if (!p) | |
591 | return; | |
592 | if (!p->code_bitmap && | |
593 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && | |
594 | is_cpu_write_access) { | |
595 | /* build code bitmap */ | |
596 | build_page_bitmap(p); | |
597 | } | |
598 | ||
599 | /* we remove all the TBs in the range [start, end[ */ | |
600 | /* XXX: see if in some cases it could be faster to invalidate all the code */ | |
601 | current_tb_not_found = is_cpu_write_access; | |
602 | current_tb_modified = 0; | |
603 | current_tb = NULL; /* avoid warning */ | |
604 | current_pc = 0; /* avoid warning */ | |
605 | current_cs_base = 0; /* avoid warning */ | |
606 | current_flags = 0; /* avoid warning */ | |
607 | tb = p->first_tb; | |
608 | while (tb != NULL) { | |
609 | n = (long)tb & 3; | |
610 | tb = (TranslationBlock *)((long)tb & ~3); | |
611 | tb_next = tb->page_next[n]; | |
612 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
613 | if (n == 0) { | |
614 | /* NOTE: tb_end may be after the end of the page, but | |
615 | it is not a problem */ | |
616 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
617 | tb_end = tb_start + tb->size; | |
618 | } else { | |
619 | tb_start = tb->page_addr[1]; | |
620 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
621 | } | |
622 | if (!(tb_end <= start || tb_start >= end)) { | |
623 | #ifdef TARGET_HAS_PRECISE_SMC | |
624 | if (current_tb_not_found) { | |
625 | current_tb_not_found = 0; | |
626 | current_tb = NULL; | |
627 | if (env->mem_write_pc) { | |
628 | /* now we have a real cpu fault */ | |
629 | current_tb = tb_find_pc(env->mem_write_pc); | |
630 | } | |
631 | } | |
632 | if (current_tb == tb && | |
633 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
634 | /* If we are modifying the current TB, we must stop | |
635 | its execution. We could be more precise by checking | |
636 | that the modification is after the current PC, but it | |
637 | would require a specialized function to partially | |
638 | restore the CPU state */ | |
639 | ||
640 | current_tb_modified = 1; | |
641 | cpu_restore_state(current_tb, env, | |
642 | env->mem_write_pc, NULL); | |
643 | #if defined(TARGET_I386) | |
644 | current_flags = env->hflags; | |
645 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
646 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
647 | current_pc = current_cs_base + env->eip; | |
648 | #else | |
649 | #error unsupported CPU | |
650 | #endif | |
651 | } | |
652 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
653 | tb_phys_invalidate(tb, -1); | |
654 | } | |
655 | tb = tb_next; | |
656 | } | |
657 | #if !defined(CONFIG_USER_ONLY) | |
658 | /* if no code remaining, no need to continue to use slow writes */ | |
659 | if (!p->first_tb) { | |
660 | invalidate_page_bitmap(p); | |
661 | if (is_cpu_write_access) { | |
662 | tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); | |
663 | } | |
664 | } | |
665 | #endif | |
666 | #ifdef TARGET_HAS_PRECISE_SMC | |
667 | if (current_tb_modified) { | |
668 | /* we generate a block containing just the instruction | |
669 | modifying the memory. It will ensure that it cannot modify | |
670 | itself */ | |
671 | tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
672 | CF_SINGLE_INSN); | |
673 | cpu_resume_from_signal(env, NULL); | |
674 | } | |
675 | #endif | |
676 | } | |
677 | ||
678 | /* len must be <= 8 and start must be a multiple of len */ | |
679 | static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) | |
680 | { | |
681 | PageDesc *p; | |
682 | int offset, b; | |
683 | #if 0 | |
684 | if (cpu_single_env->cr[0] & CR0_PE_MASK) { | |
685 | printf("modifying code at 0x%x size=%d EIP=%x\n", | |
686 | (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len, | |
687 | cpu_single_env->eip); | |
688 | } | |
689 | #endif | |
690 | p = page_find(start >> TARGET_PAGE_BITS); | |
691 | if (!p) | |
692 | return; | |
693 | if (p->code_bitmap) { | |
694 | offset = start & ~TARGET_PAGE_MASK; | |
695 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
696 | if (b & ((1 << len) - 1)) | |
697 | goto do_invalidate; | |
698 | } else { | |
699 | do_invalidate: | |
700 | tb_invalidate_phys_page_range(start, start + len, 1); | |
701 | } | |
702 | } | |
703 | ||
704 | #if !defined(CONFIG_SOFTMMU) | |
705 | static void tb_invalidate_phys_page(target_ulong addr, | |
706 | unsigned long pc, void *puc) | |
707 | { | |
708 | int n, current_flags, current_tb_modified; | |
709 | target_ulong current_pc, current_cs_base; | |
710 | PageDesc *p; | |
711 | TranslationBlock *tb, *current_tb; | |
712 | #ifdef TARGET_HAS_PRECISE_SMC | |
713 | CPUState *env = cpu_single_env; | |
714 | #endif | |
715 | ||
716 | addr &= TARGET_PAGE_MASK; | |
717 | p = page_find(addr >> TARGET_PAGE_BITS); | |
718 | if (!p) | |
719 | return; | |
720 | tb = p->first_tb; | |
721 | current_tb_modified = 0; | |
722 | current_tb = NULL; | |
723 | current_pc = 0; /* avoid warning */ | |
724 | current_cs_base = 0; /* avoid warning */ | |
725 | current_flags = 0; /* avoid warning */ | |
726 | #ifdef TARGET_HAS_PRECISE_SMC | |
727 | if (tb && pc != 0) { | |
728 | current_tb = tb_find_pc(pc); | |
729 | } | |
730 | #endif | |
731 | while (tb != NULL) { | |
732 | n = (long)tb & 3; | |
733 | tb = (TranslationBlock *)((long)tb & ~3); | |
734 | #ifdef TARGET_HAS_PRECISE_SMC | |
735 | if (current_tb == tb && | |
736 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
737 | /* If we are modifying the current TB, we must stop | |
738 | its execution. We could be more precise by checking | |
739 | that the modification is after the current PC, but it | |
740 | would require a specialized function to partially | |
741 | restore the CPU state */ | |
742 | ||
743 | current_tb_modified = 1; | |
744 | cpu_restore_state(current_tb, env, pc, puc); | |
745 | #if defined(TARGET_I386) | |
746 | current_flags = env->hflags; | |
747 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
748 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
749 | current_pc = current_cs_base + env->eip; | |
750 | #else | |
751 | #error unsupported CPU | |
752 | #endif | |
753 | } | |
754 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
755 | tb_phys_invalidate(tb, addr); | |
756 | tb = tb->page_next[n]; | |
757 | } | |
758 | p->first_tb = NULL; | |
759 | #ifdef TARGET_HAS_PRECISE_SMC | |
760 | if (current_tb_modified) { | |
761 | /* we generate a block containing just the instruction | |
762 | modifying the memory. It will ensure that it cannot modify | |
763 | itself */ | |
764 | tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
765 | CF_SINGLE_INSN); | |
766 | cpu_resume_from_signal(env, puc); | |
767 | } | |
768 | #endif | |
769 | } | |
770 | #endif | |
771 | ||
772 | /* add the tb in the target page and protect it if necessary */ | |
773 | static inline void tb_alloc_page(TranslationBlock *tb, | |
774 | unsigned int n, unsigned int page_addr) | |
775 | { | |
776 | PageDesc *p; | |
777 | TranslationBlock *last_first_tb; | |
778 | ||
779 | tb->page_addr[n] = page_addr; | |
780 | p = page_find(page_addr >> TARGET_PAGE_BITS); | |
781 | tb->page_next[n] = p->first_tb; | |
782 | last_first_tb = p->first_tb; | |
783 | p->first_tb = (TranslationBlock *)((long)tb | n); | |
784 | invalidate_page_bitmap(p); | |
785 | ||
786 | #ifdef TARGET_HAS_SMC | |
787 | ||
788 | #if defined(CONFIG_USER_ONLY) | |
789 | if (p->flags & PAGE_WRITE) { | |
790 | unsigned long host_start, host_end, addr; | |
791 | int prot; | |
792 | ||
793 | /* force the host page as non writable (writes will have a | |
794 | page fault + mprotect overhead) */ | |
795 | host_start = page_addr & host_page_mask; | |
796 | host_end = host_start + host_page_size; | |
797 | prot = 0; | |
798 | for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) | |
799 | prot |= page_get_flags(addr); | |
800 | mprotect((void *)host_start, host_page_size, | |
801 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
802 | #ifdef DEBUG_TB_INVALIDATE | |
803 | printf("protecting code page: 0x%08lx\n", | |
804 | host_start); | |
805 | #endif | |
806 | p->flags &= ~PAGE_WRITE; | |
807 | } | |
808 | #else | |
809 | /* if some code is already present, then the pages are already | |
810 | protected. So we handle the case where only the first TB is | |
811 | allocated in a physical page */ | |
812 | if (!last_first_tb) { | |
813 | target_ulong virt_addr; | |
814 | ||
815 | virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS); | |
816 | tlb_protect_code(cpu_single_env, virt_addr); | |
817 | } | |
818 | #endif | |
819 | ||
820 | #endif /* TARGET_HAS_SMC */ | |
821 | } | |
822 | ||
823 | /* Allocate a new translation block. Flush the translation buffer if | |
824 | too many translation blocks or too much generated code. */ | |
825 | TranslationBlock *tb_alloc(unsigned long pc) | |
826 | { | |
827 | TranslationBlock *tb; | |
828 | ||
829 | if (nb_tbs >= CODE_GEN_MAX_BLOCKS || | |
830 | (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) | |
831 | return NULL; | |
832 | tb = &tbs[nb_tbs++]; | |
833 | tb->pc = pc; | |
834 | tb->cflags = 0; | |
835 | return tb; | |
836 | } | |
837 | ||
838 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
839 | (-1) to indicate that only one page contains the TB. */ | |
840 | void tb_link_phys(TranslationBlock *tb, | |
841 | target_ulong phys_pc, target_ulong phys_page2) | |
842 | { | |
843 | unsigned int h; | |
844 | TranslationBlock **ptb; | |
845 | ||
846 | /* add in the physical hash table */ | |
847 | h = tb_phys_hash_func(phys_pc); | |
848 | ptb = &tb_phys_hash[h]; | |
849 | tb->phys_hash_next = *ptb; | |
850 | *ptb = tb; | |
851 | ||
852 | /* add in the page list */ | |
853 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
854 | if (phys_page2 != -1) | |
855 | tb_alloc_page(tb, 1, phys_page2); | |
856 | else | |
857 | tb->page_addr[1] = -1; | |
858 | #ifdef DEBUG_TB_CHECK | |
859 | tb_page_check(); | |
860 | #endif | |
861 | } | |
862 | ||
863 | /* link the tb with the other TBs */ | |
864 | void tb_link(TranslationBlock *tb) | |
865 | { | |
866 | #if !defined(CONFIG_USER_ONLY) | |
867 | { | |
868 | VirtPageDesc *vp; | |
869 | target_ulong addr; | |
870 | ||
871 | /* save the code memory mappings (needed to invalidate the code) */ | |
872 | addr = tb->pc & TARGET_PAGE_MASK; | |
873 | vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); | |
874 | #ifdef DEBUG_TLB_CHECK | |
875 | if (vp->valid_tag == virt_valid_tag && | |
876 | vp->phys_addr != tb->page_addr[0]) { | |
877 | printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", | |
878 | addr, tb->page_addr[0], vp->phys_addr); | |
879 | } | |
880 | #endif | |
881 | vp->phys_addr = tb->page_addr[0]; | |
882 | if (vp->valid_tag != virt_valid_tag) { | |
883 | vp->valid_tag = virt_valid_tag; | |
884 | #if !defined(CONFIG_SOFTMMU) | |
885 | vp->prot = 0; | |
886 | #endif | |
887 | } | |
888 | ||
889 | if (tb->page_addr[1] != -1) { | |
890 | addr += TARGET_PAGE_SIZE; | |
891 | vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS); | |
892 | #ifdef DEBUG_TLB_CHECK | |
893 | if (vp->valid_tag == virt_valid_tag && | |
894 | vp->phys_addr != tb->page_addr[1]) { | |
895 | printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n", | |
896 | addr, tb->page_addr[1], vp->phys_addr); | |
897 | } | |
898 | #endif | |
899 | vp->phys_addr = tb->page_addr[1]; | |
900 | if (vp->valid_tag != virt_valid_tag) { | |
901 | vp->valid_tag = virt_valid_tag; | |
902 | #if !defined(CONFIG_SOFTMMU) | |
903 | vp->prot = 0; | |
904 | #endif | |
905 | } | |
906 | } | |
907 | } | |
908 | #endif | |
909 | ||
910 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); | |
911 | tb->jmp_next[0] = NULL; | |
912 | tb->jmp_next[1] = NULL; | |
913 | #ifdef USE_CODE_COPY | |
914 | tb->cflags &= ~CF_FP_USED; | |
915 | if (tb->cflags & CF_TB_FP_USED) | |
916 | tb->cflags |= CF_FP_USED; | |
917 | #endif | |
918 | ||
919 | /* init original jump addresses */ | |
920 | if (tb->tb_next_offset[0] != 0xffff) | |
921 | tb_reset_jump(tb, 0); | |
922 | if (tb->tb_next_offset[1] != 0xffff) | |
923 | tb_reset_jump(tb, 1); | |
924 | } | |
925 | ||
926 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < | |
927 | tb[1].tc_ptr. Return NULL if not found */ | |
928 | TranslationBlock *tb_find_pc(unsigned long tc_ptr) | |
929 | { | |
930 | int m_min, m_max, m; | |
931 | unsigned long v; | |
932 | TranslationBlock *tb; | |
933 | ||
934 | if (nb_tbs <= 0) | |
935 | return NULL; | |
936 | if (tc_ptr < (unsigned long)code_gen_buffer || | |
937 | tc_ptr >= (unsigned long)code_gen_ptr) | |
938 | return NULL; | |
939 | /* binary search (cf Knuth) */ | |
940 | m_min = 0; | |
941 | m_max = nb_tbs - 1; | |
942 | while (m_min <= m_max) { | |
943 | m = (m_min + m_max) >> 1; | |
944 | tb = &tbs[m]; | |
945 | v = (unsigned long)tb->tc_ptr; | |
946 | if (v == tc_ptr) | |
947 | return tb; | |
948 | else if (tc_ptr < v) { | |
949 | m_max = m - 1; | |
950 | } else { | |
951 | m_min = m + 1; | |
952 | } | |
953 | } | |
954 | return &tbs[m_max]; | |
955 | } | |
956 | ||
957 | static void tb_reset_jump_recursive(TranslationBlock *tb); | |
958 | ||
959 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
960 | { | |
961 | TranslationBlock *tb1, *tb_next, **ptb; | |
962 | unsigned int n1; | |
963 | ||
964 | tb1 = tb->jmp_next[n]; | |
965 | if (tb1 != NULL) { | |
966 | /* find head of list */ | |
967 | for(;;) { | |
968 | n1 = (long)tb1 & 3; | |
969 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
970 | if (n1 == 2) | |
971 | break; | |
972 | tb1 = tb1->jmp_next[n1]; | |
973 | } | |
974 | /* we are now sure now that tb jumps to tb1 */ | |
975 | tb_next = tb1; | |
976 | ||
977 | /* remove tb from the jmp_first list */ | |
978 | ptb = &tb_next->jmp_first; | |
979 | for(;;) { | |
980 | tb1 = *ptb; | |
981 | n1 = (long)tb1 & 3; | |
982 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
983 | if (n1 == n && tb1 == tb) | |
984 | break; | |
985 | ptb = &tb1->jmp_next[n1]; | |
986 | } | |
987 | *ptb = tb->jmp_next[n]; | |
988 | tb->jmp_next[n] = NULL; | |
989 | ||
990 | /* suppress the jump to next tb in generated code */ | |
991 | tb_reset_jump(tb, n); | |
992 | ||
993 | /* suppress jumps in the tb on which we could have jumped */ | |
994 | tb_reset_jump_recursive(tb_next); | |
995 | } | |
996 | } | |
997 | ||
998 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
999 | { | |
1000 | tb_reset_jump_recursive2(tb, 0); | |
1001 | tb_reset_jump_recursive2(tb, 1); | |
1002 | } | |
1003 | ||
1004 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) | |
1005 | { | |
1006 | target_ulong phys_addr; | |
1007 | ||
1008 | phys_addr = cpu_get_phys_page_debug(env, pc); | |
1009 | tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0); | |
1010 | } | |
1011 | ||
1012 | /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a | |
1013 | breakpoint is reached */ | |
1014 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc) | |
1015 | { | |
1016 | #if defined(TARGET_I386) || defined(TARGET_PPC) | |
1017 | int i; | |
1018 | ||
1019 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1020 | if (env->breakpoints[i] == pc) | |
1021 | return 0; | |
1022 | } | |
1023 | ||
1024 | if (env->nb_breakpoints >= MAX_BREAKPOINTS) | |
1025 | return -1; | |
1026 | env->breakpoints[env->nb_breakpoints++] = pc; | |
1027 | ||
1028 | breakpoint_invalidate(env, pc); | |
1029 | return 0; | |
1030 | #else | |
1031 | return -1; | |
1032 | #endif | |
1033 | } | |
1034 | ||
1035 | /* remove a breakpoint */ | |
1036 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc) | |
1037 | { | |
1038 | #if defined(TARGET_I386) || defined(TARGET_PPC) | |
1039 | int i; | |
1040 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1041 | if (env->breakpoints[i] == pc) | |
1042 | goto found; | |
1043 | } | |
1044 | return -1; | |
1045 | found: | |
1046 | memmove(&env->breakpoints[i], &env->breakpoints[i + 1], | |
1047 | (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0])); | |
1048 | env->nb_breakpoints--; | |
1049 | ||
1050 | breakpoint_invalidate(env, pc); | |
1051 | return 0; | |
1052 | #else | |
1053 | return -1; | |
1054 | #endif | |
1055 | } | |
1056 | ||
1057 | /* enable or disable single step mode. EXCP_DEBUG is returned by the | |
1058 | CPU loop after each instruction */ | |
1059 | void cpu_single_step(CPUState *env, int enabled) | |
1060 | { | |
1061 | #if defined(TARGET_I386) || defined(TARGET_PPC) | |
1062 | if (env->singlestep_enabled != enabled) { | |
1063 | env->singlestep_enabled = enabled; | |
1064 | /* must flush all the translated code to avoid inconsistancies */ | |
1065 | /* XXX: only flush what is necessary */ | |
1066 | tb_flush(env); | |
1067 | } | |
1068 | #endif | |
1069 | } | |
1070 | ||
1071 | /* enable or disable low levels log */ | |
1072 | void cpu_set_log(int log_flags) | |
1073 | { | |
1074 | loglevel = log_flags; | |
1075 | if (loglevel && !logfile) { | |
1076 | logfile = fopen(logfilename, "w"); | |
1077 | if (!logfile) { | |
1078 | perror(logfilename); | |
1079 | _exit(1); | |
1080 | } | |
1081 | #if !defined(CONFIG_SOFTMMU) | |
1082 | /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ | |
1083 | { | |
1084 | static uint8_t logfile_buf[4096]; | |
1085 | setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); | |
1086 | } | |
1087 | #else | |
1088 | setvbuf(logfile, NULL, _IOLBF, 0); | |
1089 | #endif | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | void cpu_set_log_filename(const char *filename) | |
1094 | { | |
1095 | logfilename = strdup(filename); | |
1096 | } | |
1097 | ||
1098 | /* mask must never be zero, except for A20 change call */ | |
1099 | void cpu_interrupt(CPUState *env, int mask) | |
1100 | { | |
1101 | TranslationBlock *tb; | |
1102 | static int interrupt_lock; | |
1103 | ||
1104 | env->interrupt_request |= mask; | |
1105 | /* if the cpu is currently executing code, we must unlink it and | |
1106 | all the potentially executing TB */ | |
1107 | tb = env->current_tb; | |
1108 | if (tb && !testandset(&interrupt_lock)) { | |
1109 | env->current_tb = NULL; | |
1110 | tb_reset_jump_recursive(tb); | |
1111 | interrupt_lock = 0; | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | CPULogItem cpu_log_items[] = { | |
1116 | { CPU_LOG_TB_OUT_ASM, "out_asm", | |
1117 | "show generated host assembly code for each compiled TB" }, | |
1118 | { CPU_LOG_TB_IN_ASM, "in_asm", | |
1119 | "show target assembly code for each compiled TB" }, | |
1120 | { CPU_LOG_TB_OP, "op", | |
1121 | "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, | |
1122 | #ifdef TARGET_I386 | |
1123 | { CPU_LOG_TB_OP_OPT, "op_opt", | |
1124 | "show micro ops after optimization for each compiled TB" }, | |
1125 | #endif | |
1126 | { CPU_LOG_INT, "int", | |
1127 | "show interrupts/exceptions in short format" }, | |
1128 | { CPU_LOG_EXEC, "exec", | |
1129 | "show trace before each executed TB (lots of logs)" }, | |
1130 | #ifdef TARGET_I386 | |
1131 | { CPU_LOG_PCALL, "pcall", | |
1132 | "show protected mode far calls/returns/exceptions" }, | |
1133 | #endif | |
1134 | { 0, NULL, NULL }, | |
1135 | }; | |
1136 | ||
1137 | static int cmp1(const char *s1, int n, const char *s2) | |
1138 | { | |
1139 | if (strlen(s2) != n) | |
1140 | return 0; | |
1141 | return memcmp(s1, s2, n) == 0; | |
1142 | } | |
1143 | ||
1144 | /* takes a comma separated list of log masks. Return 0 if error. */ | |
1145 | int cpu_str_to_log_mask(const char *str) | |
1146 | { | |
1147 | CPULogItem *item; | |
1148 | int mask; | |
1149 | const char *p, *p1; | |
1150 | ||
1151 | p = str; | |
1152 | mask = 0; | |
1153 | for(;;) { | |
1154 | p1 = strchr(p, ','); | |
1155 | if (!p1) | |
1156 | p1 = p + strlen(p); | |
1157 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1158 | if (cmp1(p, p1 - p, item->name)) | |
1159 | goto found; | |
1160 | } | |
1161 | return 0; | |
1162 | found: | |
1163 | mask |= item->mask; | |
1164 | if (*p1 != ',') | |
1165 | break; | |
1166 | p = p1 + 1; | |
1167 | } | |
1168 | return mask; | |
1169 | } | |
1170 | ||
1171 | void cpu_abort(CPUState *env, const char *fmt, ...) | |
1172 | { | |
1173 | va_list ap; | |
1174 | ||
1175 | va_start(ap, fmt); | |
1176 | fprintf(stderr, "qemu: fatal: "); | |
1177 | vfprintf(stderr, fmt, ap); | |
1178 | fprintf(stderr, "\n"); | |
1179 | #ifdef TARGET_I386 | |
1180 | cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP); | |
1181 | #endif | |
1182 | va_end(ap); | |
1183 | abort(); | |
1184 | } | |
1185 | ||
1186 | #if !defined(CONFIG_USER_ONLY) | |
1187 | ||
1188 | /* NOTE: if flush_global is true, also flush global entries (not | |
1189 | implemented yet) */ | |
1190 | void tlb_flush(CPUState *env, int flush_global) | |
1191 | { | |
1192 | int i; | |
1193 | ||
1194 | #if defined(DEBUG_TLB) | |
1195 | printf("tlb_flush:\n"); | |
1196 | #endif | |
1197 | /* must reset current TB so that interrupts cannot modify the | |
1198 | links while we are modifying them */ | |
1199 | env->current_tb = NULL; | |
1200 | ||
1201 | for(i = 0; i < CPU_TLB_SIZE; i++) { | |
1202 | env->tlb_read[0][i].address = -1; | |
1203 | env->tlb_write[0][i].address = -1; | |
1204 | env->tlb_read[1][i].address = -1; | |
1205 | env->tlb_write[1][i].address = -1; | |
1206 | } | |
1207 | ||
1208 | virt_page_flush(); | |
1209 | for(i = 0;i < CODE_GEN_HASH_SIZE; i++) | |
1210 | tb_hash[i] = NULL; | |
1211 | ||
1212 | #if !defined(CONFIG_SOFTMMU) | |
1213 | munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); | |
1214 | #endif | |
1215 | } | |
1216 | ||
1217 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr) | |
1218 | { | |
1219 | if (addr == (tlb_entry->address & | |
1220 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) | |
1221 | tlb_entry->address = -1; | |
1222 | } | |
1223 | ||
1224 | void tlb_flush_page(CPUState *env, target_ulong addr) | |
1225 | { | |
1226 | int i, n; | |
1227 | VirtPageDesc *vp; | |
1228 | PageDesc *p; | |
1229 | TranslationBlock *tb; | |
1230 | ||
1231 | #if defined(DEBUG_TLB) | |
1232 | printf("tlb_flush_page: 0x%08x\n", addr); | |
1233 | #endif | |
1234 | /* must reset current TB so that interrupts cannot modify the | |
1235 | links while we are modifying them */ | |
1236 | env->current_tb = NULL; | |
1237 | ||
1238 | addr &= TARGET_PAGE_MASK; | |
1239 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1240 | tlb_flush_entry(&env->tlb_read[0][i], addr); | |
1241 | tlb_flush_entry(&env->tlb_write[0][i], addr); | |
1242 | tlb_flush_entry(&env->tlb_read[1][i], addr); | |
1243 | tlb_flush_entry(&env->tlb_write[1][i], addr); | |
1244 | ||
1245 | /* remove from the virtual pc hash table all the TB at this | |
1246 | virtual address */ | |
1247 | ||
1248 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); | |
1249 | if (vp && vp->valid_tag == virt_valid_tag) { | |
1250 | p = page_find(vp->phys_addr >> TARGET_PAGE_BITS); | |
1251 | if (p) { | |
1252 | /* we remove all the links to the TBs in this virtual page */ | |
1253 | tb = p->first_tb; | |
1254 | while (tb != NULL) { | |
1255 | n = (long)tb & 3; | |
1256 | tb = (TranslationBlock *)((long)tb & ~3); | |
1257 | if ((tb->pc & TARGET_PAGE_MASK) == addr || | |
1258 | ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) { | |
1259 | tb_invalidate(tb); | |
1260 | } | |
1261 | tb = tb->page_next[n]; | |
1262 | } | |
1263 | } | |
1264 | vp->valid_tag = 0; | |
1265 | } | |
1266 | ||
1267 | #if !defined(CONFIG_SOFTMMU) | |
1268 | if (addr < MMAP_AREA_END) | |
1269 | munmap((void *)addr, TARGET_PAGE_SIZE); | |
1270 | #endif | |
1271 | } | |
1272 | ||
1273 | static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr) | |
1274 | { | |
1275 | if (addr == (tlb_entry->address & | |
1276 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) && | |
1277 | (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE && | |
1278 | (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) { | |
1279 | tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE; | |
1280 | } | |
1281 | } | |
1282 | ||
1283 | /* update the TLBs so that writes to code in the virtual page 'addr' | |
1284 | can be detected */ | |
1285 | static void tlb_protect_code(CPUState *env, target_ulong addr) | |
1286 | { | |
1287 | int i; | |
1288 | ||
1289 | addr &= TARGET_PAGE_MASK; | |
1290 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1291 | tlb_protect_code1(&env->tlb_write[0][i], addr); | |
1292 | tlb_protect_code1(&env->tlb_write[1][i], addr); | |
1293 | #if !defined(CONFIG_SOFTMMU) | |
1294 | /* NOTE: as we generated the code for this page, it is already at | |
1295 | least readable */ | |
1296 | if (addr < MMAP_AREA_END) | |
1297 | mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ); | |
1298 | #endif | |
1299 | } | |
1300 | ||
1301 | static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry, | |
1302 | unsigned long phys_addr) | |
1303 | { | |
1304 | if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE && | |
1305 | ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) { | |
1306 | tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; | |
1307 | } | |
1308 | } | |
1309 | ||
1310 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
1311 | tested self modifying code */ | |
1312 | static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr) | |
1313 | { | |
1314 | int i; | |
1315 | ||
1316 | phys_addr &= TARGET_PAGE_MASK; | |
1317 | phys_addr += (long)phys_ram_base; | |
1318 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1319 | tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr); | |
1320 | tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr); | |
1321 | } | |
1322 | ||
1323 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | |
1324 | unsigned long start, unsigned long length) | |
1325 | { | |
1326 | unsigned long addr; | |
1327 | if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { | |
1328 | addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1329 | if ((addr - start) < length) { | |
1330 | tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; | |
1331 | } | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end) | |
1336 | { | |
1337 | CPUState *env; | |
1338 | unsigned long length, start1; | |
1339 | int i; | |
1340 | ||
1341 | start &= TARGET_PAGE_MASK; | |
1342 | end = TARGET_PAGE_ALIGN(end); | |
1343 | ||
1344 | length = end - start; | |
1345 | if (length == 0) | |
1346 | return; | |
1347 | memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS); | |
1348 | ||
1349 | env = cpu_single_env; | |
1350 | /* we modify the TLB cache so that the dirty bit will be set again | |
1351 | when accessing the range */ | |
1352 | start1 = start + (unsigned long)phys_ram_base; | |
1353 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1354 | tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length); | |
1355 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1356 | tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length); | |
1357 | ||
1358 | #if !defined(CONFIG_SOFTMMU) | |
1359 | /* XXX: this is expensive */ | |
1360 | { | |
1361 | VirtPageDesc *p; | |
1362 | int j; | |
1363 | target_ulong addr; | |
1364 | ||
1365 | for(i = 0; i < L1_SIZE; i++) { | |
1366 | p = l1_virt_map[i]; | |
1367 | if (p) { | |
1368 | addr = i << (TARGET_PAGE_BITS + L2_BITS); | |
1369 | for(j = 0; j < L2_SIZE; j++) { | |
1370 | if (p->valid_tag == virt_valid_tag && | |
1371 | p->phys_addr >= start && p->phys_addr < end && | |
1372 | (p->prot & PROT_WRITE)) { | |
1373 | if (addr < MMAP_AREA_END) { | |
1374 | mprotect((void *)addr, TARGET_PAGE_SIZE, | |
1375 | p->prot & ~PROT_WRITE); | |
1376 | } | |
1377 | } | |
1378 | addr += TARGET_PAGE_SIZE; | |
1379 | p++; | |
1380 | } | |
1381 | } | |
1382 | } | |
1383 | } | |
1384 | #endif | |
1385 | } | |
1386 | ||
1387 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, | |
1388 | unsigned long start) | |
1389 | { | |
1390 | unsigned long addr; | |
1391 | if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { | |
1392 | addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1393 | if (addr == start) { | |
1394 | tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM; | |
1395 | } | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | /* update the TLB corresponding to virtual page vaddr and phys addr | |
1400 | addr so that it is no longer dirty */ | |
1401 | static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) | |
1402 | { | |
1403 | CPUState *env = cpu_single_env; | |
1404 | int i; | |
1405 | ||
1406 | phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1; | |
1407 | ||
1408 | addr &= TARGET_PAGE_MASK; | |
1409 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1410 | tlb_set_dirty1(&env->tlb_write[0][i], addr); | |
1411 | tlb_set_dirty1(&env->tlb_write[1][i], addr); | |
1412 | } | |
1413 | ||
1414 | /* add a new TLB entry. At most one entry for a given virtual address | |
1415 | is permitted. Return 0 if OK or 2 if the page could not be mapped | |
1416 | (can only happen in non SOFTMMU mode for I/O pages or pages | |
1417 | conflicting with the host address space). */ | |
1418 | int tlb_set_page(CPUState *env, target_ulong vaddr, | |
1419 | target_phys_addr_t paddr, int prot, | |
1420 | int is_user, int is_softmmu) | |
1421 | { | |
1422 | PageDesc *p; | |
1423 | unsigned long pd; | |
1424 | TranslationBlock *first_tb; | |
1425 | unsigned int index; | |
1426 | target_ulong address; | |
1427 | unsigned long addend; | |
1428 | int ret; | |
1429 | ||
1430 | p = page_find(paddr >> TARGET_PAGE_BITS); | |
1431 | if (!p) { | |
1432 | pd = IO_MEM_UNASSIGNED; | |
1433 | first_tb = NULL; | |
1434 | } else { | |
1435 | pd = p->phys_offset; | |
1436 | first_tb = p->first_tb; | |
1437 | } | |
1438 | #if defined(DEBUG_TLB) | |
1439 | printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n", | |
1440 | vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd); | |
1441 | #endif | |
1442 | ||
1443 | ret = 0; | |
1444 | #if !defined(CONFIG_SOFTMMU) | |
1445 | if (is_softmmu) | |
1446 | #endif | |
1447 | { | |
1448 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1449 | /* IO memory case */ | |
1450 | address = vaddr | pd; | |
1451 | addend = paddr; | |
1452 | } else { | |
1453 | /* standard memory */ | |
1454 | address = vaddr; | |
1455 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); | |
1456 | } | |
1457 | ||
1458 | index = (vaddr >> 12) & (CPU_TLB_SIZE - 1); | |
1459 | addend -= vaddr; | |
1460 | if (prot & PAGE_READ) { | |
1461 | env->tlb_read[is_user][index].address = address; | |
1462 | env->tlb_read[is_user][index].addend = addend; | |
1463 | } else { | |
1464 | env->tlb_read[is_user][index].address = -1; | |
1465 | env->tlb_read[is_user][index].addend = -1; | |
1466 | } | |
1467 | if (prot & PAGE_WRITE) { | |
1468 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { | |
1469 | /* ROM: access is ignored (same as unassigned) */ | |
1470 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM; | |
1471 | env->tlb_write[is_user][index].addend = addend; | |
1472 | } else | |
1473 | /* XXX: the PowerPC code seems not ready to handle | |
1474 | self modifying code with DCBI */ | |
1475 | #if defined(TARGET_HAS_SMC) || 1 | |
1476 | if (first_tb) { | |
1477 | /* if code is present, we use a specific memory | |
1478 | handler. It works only for physical memory access */ | |
1479 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE; | |
1480 | env->tlb_write[is_user][index].addend = addend; | |
1481 | } else | |
1482 | #endif | |
1483 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
1484 | !cpu_physical_memory_is_dirty(pd)) { | |
1485 | env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY; | |
1486 | env->tlb_write[is_user][index].addend = addend; | |
1487 | } else { | |
1488 | env->tlb_write[is_user][index].address = address; | |
1489 | env->tlb_write[is_user][index].addend = addend; | |
1490 | } | |
1491 | } else { | |
1492 | env->tlb_write[is_user][index].address = -1; | |
1493 | env->tlb_write[is_user][index].addend = -1; | |
1494 | } | |
1495 | } | |
1496 | #if !defined(CONFIG_SOFTMMU) | |
1497 | else { | |
1498 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1499 | /* IO access: no mapping is done as it will be handled by the | |
1500 | soft MMU */ | |
1501 | if (!(env->hflags & HF_SOFTMMU_MASK)) | |
1502 | ret = 2; | |
1503 | } else { | |
1504 | void *map_addr; | |
1505 | ||
1506 | if (vaddr >= MMAP_AREA_END) { | |
1507 | ret = 2; | |
1508 | } else { | |
1509 | if (prot & PROT_WRITE) { | |
1510 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
1511 | #if defined(TARGET_HAS_SMC) || 1 | |
1512 | first_tb || | |
1513 | #endif | |
1514 | ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
1515 | !cpu_physical_memory_is_dirty(pd))) { | |
1516 | /* ROM: we do as if code was inside */ | |
1517 | /* if code is present, we only map as read only and save the | |
1518 | original mapping */ | |
1519 | VirtPageDesc *vp; | |
1520 | ||
1521 | vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS); | |
1522 | vp->phys_addr = pd; | |
1523 | vp->prot = prot; | |
1524 | vp->valid_tag = virt_valid_tag; | |
1525 | prot &= ~PAGE_WRITE; | |
1526 | } | |
1527 | } | |
1528 | map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
1529 | MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
1530 | if (map_addr == MAP_FAILED) { | |
1531 | cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
1532 | paddr, vaddr); | |
1533 | } | |
1534 | } | |
1535 | } | |
1536 | } | |
1537 | #endif | |
1538 | return ret; | |
1539 | } | |
1540 | ||
1541 | /* called from signal handler: invalidate the code and unprotect the | |
1542 | page. Return TRUE if the fault was succesfully handled. */ | |
1543 | int page_unprotect(unsigned long addr, unsigned long pc, void *puc) | |
1544 | { | |
1545 | #if !defined(CONFIG_SOFTMMU) | |
1546 | VirtPageDesc *vp; | |
1547 | ||
1548 | #if defined(DEBUG_TLB) | |
1549 | printf("page_unprotect: addr=0x%08x\n", addr); | |
1550 | #endif | |
1551 | addr &= TARGET_PAGE_MASK; | |
1552 | ||
1553 | /* if it is not mapped, no need to worry here */ | |
1554 | if (addr >= MMAP_AREA_END) | |
1555 | return 0; | |
1556 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); | |
1557 | if (!vp) | |
1558 | return 0; | |
1559 | /* NOTE: in this case, validate_tag is _not_ tested as it | |
1560 | validates only the code TLB */ | |
1561 | if (vp->valid_tag != virt_valid_tag) | |
1562 | return 0; | |
1563 | if (!(vp->prot & PAGE_WRITE)) | |
1564 | return 0; | |
1565 | #if defined(DEBUG_TLB) | |
1566 | printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", | |
1567 | addr, vp->phys_addr, vp->prot); | |
1568 | #endif | |
1569 | if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) | |
1570 | cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", | |
1571 | (unsigned long)addr, vp->prot); | |
1572 | /* set the dirty bit */ | |
1573 | phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1; | |
1574 | /* flush the code inside */ | |
1575 | tb_invalidate_phys_page(vp->phys_addr, pc, puc); | |
1576 | return 1; | |
1577 | #else | |
1578 | return 0; | |
1579 | #endif | |
1580 | } | |
1581 | ||
1582 | #else | |
1583 | ||
1584 | void tlb_flush(CPUState *env, int flush_global) | |
1585 | { | |
1586 | } | |
1587 | ||
1588 | void tlb_flush_page(CPUState *env, target_ulong addr) | |
1589 | { | |
1590 | } | |
1591 | ||
1592 | int tlb_set_page(CPUState *env, target_ulong vaddr, | |
1593 | target_phys_addr_t paddr, int prot, | |
1594 | int is_user, int is_softmmu) | |
1595 | { | |
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | /* dump memory mappings */ | |
1600 | void page_dump(FILE *f) | |
1601 | { | |
1602 | unsigned long start, end; | |
1603 | int i, j, prot, prot1; | |
1604 | PageDesc *p; | |
1605 | ||
1606 | fprintf(f, "%-8s %-8s %-8s %s\n", | |
1607 | "start", "end", "size", "prot"); | |
1608 | start = -1; | |
1609 | end = -1; | |
1610 | prot = 0; | |
1611 | for(i = 0; i <= L1_SIZE; i++) { | |
1612 | if (i < L1_SIZE) | |
1613 | p = l1_map[i]; | |
1614 | else | |
1615 | p = NULL; | |
1616 | for(j = 0;j < L2_SIZE; j++) { | |
1617 | if (!p) | |
1618 | prot1 = 0; | |
1619 | else | |
1620 | prot1 = p[j].flags; | |
1621 | if (prot1 != prot) { | |
1622 | end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
1623 | if (start != -1) { | |
1624 | fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
1625 | start, end, end - start, | |
1626 | prot & PAGE_READ ? 'r' : '-', | |
1627 | prot & PAGE_WRITE ? 'w' : '-', | |
1628 | prot & PAGE_EXEC ? 'x' : '-'); | |
1629 | } | |
1630 | if (prot1 != 0) | |
1631 | start = end; | |
1632 | else | |
1633 | start = -1; | |
1634 | prot = prot1; | |
1635 | } | |
1636 | if (!p) | |
1637 | break; | |
1638 | } | |
1639 | } | |
1640 | } | |
1641 | ||
1642 | int page_get_flags(unsigned long address) | |
1643 | { | |
1644 | PageDesc *p; | |
1645 | ||
1646 | p = page_find(address >> TARGET_PAGE_BITS); | |
1647 | if (!p) | |
1648 | return 0; | |
1649 | return p->flags; | |
1650 | } | |
1651 | ||
1652 | /* modify the flags of a page and invalidate the code if | |
1653 | necessary. The flag PAGE_WRITE_ORG is positionned automatically | |
1654 | depending on PAGE_WRITE */ | |
1655 | void page_set_flags(unsigned long start, unsigned long end, int flags) | |
1656 | { | |
1657 | PageDesc *p; | |
1658 | unsigned long addr; | |
1659 | ||
1660 | start = start & TARGET_PAGE_MASK; | |
1661 | end = TARGET_PAGE_ALIGN(end); | |
1662 | if (flags & PAGE_WRITE) | |
1663 | flags |= PAGE_WRITE_ORG; | |
1664 | spin_lock(&tb_lock); | |
1665 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1666 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
1667 | /* if the write protection is set, then we invalidate the code | |
1668 | inside */ | |
1669 | if (!(p->flags & PAGE_WRITE) && | |
1670 | (flags & PAGE_WRITE) && | |
1671 | p->first_tb) { | |
1672 | tb_invalidate_phys_page(addr, 0, NULL); | |
1673 | } | |
1674 | p->flags = flags; | |
1675 | } | |
1676 | spin_unlock(&tb_lock); | |
1677 | } | |
1678 | ||
1679 | /* called from signal handler: invalidate the code and unprotect the | |
1680 | page. Return TRUE if the fault was succesfully handled. */ | |
1681 | int page_unprotect(unsigned long address, unsigned long pc, void *puc) | |
1682 | { | |
1683 | unsigned int page_index, prot, pindex; | |
1684 | PageDesc *p, *p1; | |
1685 | unsigned long host_start, host_end, addr; | |
1686 | ||
1687 | host_start = address & host_page_mask; | |
1688 | page_index = host_start >> TARGET_PAGE_BITS; | |
1689 | p1 = page_find(page_index); | |
1690 | if (!p1) | |
1691 | return 0; | |
1692 | host_end = host_start + host_page_size; | |
1693 | p = p1; | |
1694 | prot = 0; | |
1695 | for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { | |
1696 | prot |= p->flags; | |
1697 | p++; | |
1698 | } | |
1699 | /* if the page was really writable, then we change its | |
1700 | protection back to writable */ | |
1701 | if (prot & PAGE_WRITE_ORG) { | |
1702 | pindex = (address - host_start) >> TARGET_PAGE_BITS; | |
1703 | if (!(p1[pindex].flags & PAGE_WRITE)) { | |
1704 | mprotect((void *)host_start, host_page_size, | |
1705 | (prot & PAGE_BITS) | PAGE_WRITE); | |
1706 | p1[pindex].flags |= PAGE_WRITE; | |
1707 | /* and since the content will be modified, we must invalidate | |
1708 | the corresponding translated code. */ | |
1709 | tb_invalidate_phys_page(address, pc, puc); | |
1710 | #ifdef DEBUG_TB_CHECK | |
1711 | tb_invalidate_check(address); | |
1712 | #endif | |
1713 | return 1; | |
1714 | } | |
1715 | } | |
1716 | return 0; | |
1717 | } | |
1718 | ||
1719 | /* call this function when system calls directly modify a memory area */ | |
1720 | void page_unprotect_range(uint8_t *data, unsigned long data_size) | |
1721 | { | |
1722 | unsigned long start, end, addr; | |
1723 | ||
1724 | start = (unsigned long)data; | |
1725 | end = start + data_size; | |
1726 | start &= TARGET_PAGE_MASK; | |
1727 | end = TARGET_PAGE_ALIGN(end); | |
1728 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1729 | page_unprotect(addr, 0, NULL); | |
1730 | } | |
1731 | } | |
1732 | ||
1733 | static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr) | |
1734 | { | |
1735 | } | |
1736 | #endif /* defined(CONFIG_USER_ONLY) */ | |
1737 | ||
1738 | /* register physical memory. 'size' must be a multiple of the target | |
1739 | page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an | |
1740 | io memory page */ | |
1741 | void cpu_register_physical_memory(target_phys_addr_t start_addr, | |
1742 | unsigned long size, | |
1743 | unsigned long phys_offset) | |
1744 | { | |
1745 | unsigned long addr, end_addr; | |
1746 | PageDesc *p; | |
1747 | ||
1748 | end_addr = start_addr + size; | |
1749 | for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) { | |
1750 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
1751 | p->phys_offset = phys_offset; | |
1752 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) | |
1753 | phys_offset += TARGET_PAGE_SIZE; | |
1754 | } | |
1755 | } | |
1756 | ||
1757 | static uint32_t unassigned_mem_readb(target_phys_addr_t addr) | |
1758 | { | |
1759 | return 0; | |
1760 | } | |
1761 | ||
1762 | static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val) | |
1763 | { | |
1764 | } | |
1765 | ||
1766 | static CPUReadMemoryFunc *unassigned_mem_read[3] = { | |
1767 | unassigned_mem_readb, | |
1768 | unassigned_mem_readb, | |
1769 | unassigned_mem_readb, | |
1770 | }; | |
1771 | ||
1772 | static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | |
1773 | unassigned_mem_writeb, | |
1774 | unassigned_mem_writeb, | |
1775 | unassigned_mem_writeb, | |
1776 | }; | |
1777 | ||
1778 | /* self modifying code support in soft mmu mode : writing to a page | |
1779 | containing code comes to these functions */ | |
1780 | ||
1781 | static void code_mem_writeb(target_phys_addr_t addr, uint32_t val) | |
1782 | { | |
1783 | unsigned long phys_addr; | |
1784 | ||
1785 | phys_addr = addr - (long)phys_ram_base; | |
1786 | #if !defined(CONFIG_USER_ONLY) | |
1787 | tb_invalidate_phys_page_fast(phys_addr, 1); | |
1788 | #endif | |
1789 | stb_raw((uint8_t *)addr, val); | |
1790 | phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; | |
1791 | } | |
1792 | ||
1793 | static void code_mem_writew(target_phys_addr_t addr, uint32_t val) | |
1794 | { | |
1795 | unsigned long phys_addr; | |
1796 | ||
1797 | phys_addr = addr - (long)phys_ram_base; | |
1798 | #if !defined(CONFIG_USER_ONLY) | |
1799 | tb_invalidate_phys_page_fast(phys_addr, 2); | |
1800 | #endif | |
1801 | stw_raw((uint8_t *)addr, val); | |
1802 | phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; | |
1803 | } | |
1804 | ||
1805 | static void code_mem_writel(target_phys_addr_t addr, uint32_t val) | |
1806 | { | |
1807 | unsigned long phys_addr; | |
1808 | ||
1809 | phys_addr = addr - (long)phys_ram_base; | |
1810 | #if !defined(CONFIG_USER_ONLY) | |
1811 | tb_invalidate_phys_page_fast(phys_addr, 4); | |
1812 | #endif | |
1813 | stl_raw((uint8_t *)addr, val); | |
1814 | phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1; | |
1815 | } | |
1816 | ||
1817 | static CPUReadMemoryFunc *code_mem_read[3] = { | |
1818 | NULL, /* never used */ | |
1819 | NULL, /* never used */ | |
1820 | NULL, /* never used */ | |
1821 | }; | |
1822 | ||
1823 | static CPUWriteMemoryFunc *code_mem_write[3] = { | |
1824 | code_mem_writeb, | |
1825 | code_mem_writew, | |
1826 | code_mem_writel, | |
1827 | }; | |
1828 | ||
1829 | static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val) | |
1830 | { | |
1831 | stb_raw((uint8_t *)addr, val); | |
1832 | tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | |
1833 | } | |
1834 | ||
1835 | static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val) | |
1836 | { | |
1837 | stw_raw((uint8_t *)addr, val); | |
1838 | tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | |
1839 | } | |
1840 | ||
1841 | static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val) | |
1842 | { | |
1843 | stl_raw((uint8_t *)addr, val); | |
1844 | tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr); | |
1845 | } | |
1846 | ||
1847 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { | |
1848 | notdirty_mem_writeb, | |
1849 | notdirty_mem_writew, | |
1850 | notdirty_mem_writel, | |
1851 | }; | |
1852 | ||
1853 | static void io_mem_init(void) | |
1854 | { | |
1855 | cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write); | |
1856 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write); | |
1857 | cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write); | |
1858 | cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write); | |
1859 | io_mem_nb = 5; | |
1860 | ||
1861 | /* alloc dirty bits array */ | |
1862 | phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS); | |
1863 | } | |
1864 | ||
1865 | /* mem_read and mem_write are arrays of functions containing the | |
1866 | function to access byte (index 0), word (index 1) and dword (index | |
1867 | 2). All functions must be supplied. If io_index is non zero, the | |
1868 | corresponding io zone is modified. If it is zero, a new io zone is | |
1869 | allocated. The return value can be used with | |
1870 | cpu_register_physical_memory(). (-1) is returned if error. */ | |
1871 | int cpu_register_io_memory(int io_index, | |
1872 | CPUReadMemoryFunc **mem_read, | |
1873 | CPUWriteMemoryFunc **mem_write) | |
1874 | { | |
1875 | int i; | |
1876 | ||
1877 | if (io_index <= 0) { | |
1878 | if (io_index >= IO_MEM_NB_ENTRIES) | |
1879 | return -1; | |
1880 | io_index = io_mem_nb++; | |
1881 | } else { | |
1882 | if (io_index >= IO_MEM_NB_ENTRIES) | |
1883 | return -1; | |
1884 | } | |
1885 | ||
1886 | for(i = 0;i < 3; i++) { | |
1887 | io_mem_read[io_index][i] = mem_read[i]; | |
1888 | io_mem_write[io_index][i] = mem_write[i]; | |
1889 | } | |
1890 | return io_index << IO_MEM_SHIFT; | |
1891 | } | |
1892 | ||
1893 | /* physical memory access (slow version, mainly for debug) */ | |
1894 | #if defined(CONFIG_USER_ONLY) | |
1895 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
1896 | int len, int is_write) | |
1897 | { | |
1898 | int l, flags; | |
1899 | target_ulong page; | |
1900 | ||
1901 | while (len > 0) { | |
1902 | page = addr & TARGET_PAGE_MASK; | |
1903 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1904 | if (l > len) | |
1905 | l = len; | |
1906 | flags = page_get_flags(page); | |
1907 | if (!(flags & PAGE_VALID)) | |
1908 | return; | |
1909 | if (is_write) { | |
1910 | if (!(flags & PAGE_WRITE)) | |
1911 | return; | |
1912 | memcpy((uint8_t *)addr, buf, len); | |
1913 | } else { | |
1914 | if (!(flags & PAGE_READ)) | |
1915 | return; | |
1916 | memcpy(buf, (uint8_t *)addr, len); | |
1917 | } | |
1918 | len -= l; | |
1919 | buf += l; | |
1920 | addr += l; | |
1921 | } | |
1922 | } | |
1923 | #else | |
1924 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
1925 | int len, int is_write) | |
1926 | { | |
1927 | int l, io_index; | |
1928 | uint8_t *ptr; | |
1929 | uint32_t val; | |
1930 | target_phys_addr_t page; | |
1931 | unsigned long pd; | |
1932 | PageDesc *p; | |
1933 | ||
1934 | while (len > 0) { | |
1935 | page = addr & TARGET_PAGE_MASK; | |
1936 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1937 | if (l > len) | |
1938 | l = len; | |
1939 | p = page_find(page >> TARGET_PAGE_BITS); | |
1940 | if (!p) { | |
1941 | pd = IO_MEM_UNASSIGNED; | |
1942 | } else { | |
1943 | pd = p->phys_offset; | |
1944 | } | |
1945 | ||
1946 | if (is_write) { | |
1947 | if ((pd & ~TARGET_PAGE_MASK) != 0) { | |
1948 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
1949 | if (l >= 4 && ((addr & 3) == 0)) { | |
1950 | /* 32 bit read access */ | |
1951 | val = ldl_raw(buf); | |
1952 | io_mem_write[io_index][2](addr, val); | |
1953 | l = 4; | |
1954 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
1955 | /* 16 bit read access */ | |
1956 | val = lduw_raw(buf); | |
1957 | io_mem_write[io_index][1](addr, val); | |
1958 | l = 2; | |
1959 | } else { | |
1960 | /* 8 bit access */ | |
1961 | val = ldub_raw(buf); | |
1962 | io_mem_write[io_index][0](addr, val); | |
1963 | l = 1; | |
1964 | } | |
1965 | } else { | |
1966 | unsigned long addr1; | |
1967 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
1968 | /* RAM case */ | |
1969 | ptr = phys_ram_base + addr1; | |
1970 | memcpy(ptr, buf, l); | |
1971 | /* invalidate code */ | |
1972 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
1973 | /* set dirty bit */ | |
1974 | phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1; | |
1975 | } | |
1976 | } else { | |
1977 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && | |
1978 | (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) { | |
1979 | /* I/O case */ | |
1980 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
1981 | if (l >= 4 && ((addr & 3) == 0)) { | |
1982 | /* 32 bit read access */ | |
1983 | val = io_mem_read[io_index][2](addr); | |
1984 | stl_raw(buf, val); | |
1985 | l = 4; | |
1986 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
1987 | /* 16 bit read access */ | |
1988 | val = io_mem_read[io_index][1](addr); | |
1989 | stw_raw(buf, val); | |
1990 | l = 2; | |
1991 | } else { | |
1992 | /* 8 bit access */ | |
1993 | val = io_mem_read[io_index][0](addr); | |
1994 | stb_raw(buf, val); | |
1995 | l = 1; | |
1996 | } | |
1997 | } else { | |
1998 | /* RAM case */ | |
1999 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2000 | (addr & ~TARGET_PAGE_MASK); | |
2001 | memcpy(buf, ptr, l); | |
2002 | } | |
2003 | } | |
2004 | len -= l; | |
2005 | buf += l; | |
2006 | addr += l; | |
2007 | } | |
2008 | } | |
2009 | #endif | |
2010 | ||
2011 | /* virtual memory access for debug */ | |
2012 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, | |
2013 | uint8_t *buf, int len, int is_write) | |
2014 | { | |
2015 | int l; | |
2016 | target_ulong page, phys_addr; | |
2017 | ||
2018 | while (len > 0) { | |
2019 | page = addr & TARGET_PAGE_MASK; | |
2020 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2021 | /* if no physical page mapped, return an error */ | |
2022 | if (phys_addr == -1) | |
2023 | return -1; | |
2024 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2025 | if (l > len) | |
2026 | l = len; | |
2027 | cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), | |
2028 | buf, l, is_write); | |
2029 | len -= l; | |
2030 | buf += l; | |
2031 | addr += l; | |
2032 | } | |
2033 | return 0; | |
2034 | } | |
2035 | ||
2036 | #if !defined(CONFIG_USER_ONLY) | |
2037 | ||
2038 | #define MMUSUFFIX _cmmu | |
2039 | #define GETPC() NULL | |
2040 | #define env cpu_single_env | |
2041 | ||
2042 | #define SHIFT 0 | |
2043 | #include "softmmu_template.h" | |
2044 | ||
2045 | #define SHIFT 1 | |
2046 | #include "softmmu_template.h" | |
2047 | ||
2048 | #define SHIFT 2 | |
2049 | #include "softmmu_template.h" | |
2050 | ||
2051 | #define SHIFT 3 | |
2052 | #include "softmmu_template.h" | |
2053 | ||
2054 | #undef env | |
2055 | ||
2056 | #endif |