]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * virtual page mapping and translated block handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #include "config.h" | |
21 | #ifdef _WIN32 | |
22 | #include <windows.h> | |
23 | #else | |
24 | #include <sys/types.h> | |
25 | #include <sys/mman.h> | |
26 | #endif | |
27 | #include <stdlib.h> | |
28 | #include <stdio.h> | |
29 | #include <stdarg.h> | |
30 | #include <string.h> | |
31 | #include <errno.h> | |
32 | #include <unistd.h> | |
33 | #include <inttypes.h> | |
34 | ||
35 | #include "cpu.h" | |
36 | #include "exec-all.h" | |
37 | #if defined(CONFIG_USER_ONLY) | |
38 | #include <qemu.h> | |
39 | #endif | |
40 | ||
41 | //#define DEBUG_TB_INVALIDATE | |
42 | //#define DEBUG_FLUSH | |
43 | //#define DEBUG_TLB | |
44 | ||
45 | /* make various TB consistency checks */ | |
46 | //#define DEBUG_TB_CHECK | |
47 | //#define DEBUG_TLB_CHECK | |
48 | ||
49 | #if !defined(CONFIG_USER_ONLY) | |
50 | /* TB consistency checks only implemented for usermode emulation. */ | |
51 | #undef DEBUG_TB_CHECK | |
52 | #endif | |
53 | ||
54 | /* threshold to flush the translated code buffer */ | |
55 | #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) | |
56 | ||
57 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
58 | ||
59 | #define MMAP_AREA_START 0x00000000 | |
60 | #define MMAP_AREA_END 0xa8000000 | |
61 | ||
62 | #if defined(TARGET_SPARC64) | |
63 | #define TARGET_PHYS_ADDR_SPACE_BITS 41 | |
64 | #elif defined(TARGET_PPC64) | |
65 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 | |
66 | #else | |
67 | /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */ | |
68 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | |
69 | #endif | |
70 | ||
71 | TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; | |
72 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
73 | int nb_tbs; | |
74 | /* any access to the tbs or the page table must use this lock */ | |
75 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
76 | ||
77 | uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32))); | |
78 | uint8_t *code_gen_ptr; | |
79 | ||
80 | int phys_ram_size; | |
81 | int phys_ram_fd; | |
82 | uint8_t *phys_ram_base; | |
83 | uint8_t *phys_ram_dirty; | |
84 | ||
85 | CPUState *first_cpu; | |
86 | /* current CPU in the current thread. It is only valid inside | |
87 | cpu_exec() */ | |
88 | CPUState *cpu_single_env; | |
89 | ||
90 | typedef struct PageDesc { | |
91 | /* list of TBs intersecting this ram page */ | |
92 | TranslationBlock *first_tb; | |
93 | /* in order to optimize self modifying code, we count the number | |
94 | of lookups we do to a given page to use a bitmap */ | |
95 | unsigned int code_write_count; | |
96 | uint8_t *code_bitmap; | |
97 | #if defined(CONFIG_USER_ONLY) | |
98 | unsigned long flags; | |
99 | #endif | |
100 | } PageDesc; | |
101 | ||
102 | typedef struct PhysPageDesc { | |
103 | /* offset in host memory of the page + io_index in the low 12 bits */ | |
104 | uint32_t phys_offset; | |
105 | } PhysPageDesc; | |
106 | ||
107 | #define L2_BITS 10 | |
108 | #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) | |
109 | ||
110 | #define L1_SIZE (1 << L1_BITS) | |
111 | #define L2_SIZE (1 << L2_BITS) | |
112 | ||
113 | static void io_mem_init(void); | |
114 | ||
115 | unsigned long qemu_real_host_page_size; | |
116 | unsigned long qemu_host_page_bits; | |
117 | unsigned long qemu_host_page_size; | |
118 | unsigned long qemu_host_page_mask; | |
119 | ||
120 | /* XXX: for system emulation, it could just be an array */ | |
121 | static PageDesc *l1_map[L1_SIZE]; | |
122 | PhysPageDesc **l1_phys_map; | |
123 | ||
124 | /* io memory support */ | |
125 | CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; | |
126 | CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
127 | void *io_mem_opaque[IO_MEM_NB_ENTRIES]; | |
128 | static int io_mem_nb; | |
129 | ||
130 | /* log support */ | |
131 | char *logfilename = "/tmp/qemu.log"; | |
132 | FILE *logfile; | |
133 | int loglevel; | |
134 | ||
135 | /* statistics */ | |
136 | static int tlb_flush_count; | |
137 | static int tb_flush_count; | |
138 | static int tb_phys_invalidate_count; | |
139 | ||
140 | static void page_init(void) | |
141 | { | |
142 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
143 | TARGET_PAGE_SIZE */ | |
144 | #ifdef _WIN32 | |
145 | { | |
146 | SYSTEM_INFO system_info; | |
147 | DWORD old_protect; | |
148 | ||
149 | GetSystemInfo(&system_info); | |
150 | qemu_real_host_page_size = system_info.dwPageSize; | |
151 | ||
152 | VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer), | |
153 | PAGE_EXECUTE_READWRITE, &old_protect); | |
154 | } | |
155 | #else | |
156 | qemu_real_host_page_size = getpagesize(); | |
157 | { | |
158 | unsigned long start, end; | |
159 | ||
160 | start = (unsigned long)code_gen_buffer; | |
161 | start &= ~(qemu_real_host_page_size - 1); | |
162 | ||
163 | end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); | |
164 | end += qemu_real_host_page_size - 1; | |
165 | end &= ~(qemu_real_host_page_size - 1); | |
166 | ||
167 | mprotect((void *)start, end - start, | |
168 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
169 | } | |
170 | #endif | |
171 | ||
172 | if (qemu_host_page_size == 0) | |
173 | qemu_host_page_size = qemu_real_host_page_size; | |
174 | if (qemu_host_page_size < TARGET_PAGE_SIZE) | |
175 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
176 | qemu_host_page_bits = 0; | |
177 | while ((1 << qemu_host_page_bits) < qemu_host_page_size) | |
178 | qemu_host_page_bits++; | |
179 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
180 | l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); | |
181 | memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); | |
182 | } | |
183 | ||
184 | static inline PageDesc *page_find_alloc(unsigned int index) | |
185 | { | |
186 | PageDesc **lp, *p; | |
187 | ||
188 | lp = &l1_map[index >> L2_BITS]; | |
189 | p = *lp; | |
190 | if (!p) { | |
191 | /* allocate if not found */ | |
192 | p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); | |
193 | memset(p, 0, sizeof(PageDesc) * L2_SIZE); | |
194 | *lp = p; | |
195 | } | |
196 | return p + (index & (L2_SIZE - 1)); | |
197 | } | |
198 | ||
199 | static inline PageDesc *page_find(unsigned int index) | |
200 | { | |
201 | PageDesc *p; | |
202 | ||
203 | p = l1_map[index >> L2_BITS]; | |
204 | if (!p) | |
205 | return 0; | |
206 | return p + (index & (L2_SIZE - 1)); | |
207 | } | |
208 | ||
209 | static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) | |
210 | { | |
211 | void **lp, **p; | |
212 | PhysPageDesc *pd; | |
213 | ||
214 | p = (void **)l1_phys_map; | |
215 | #if TARGET_PHYS_ADDR_SPACE_BITS > 32 | |
216 | ||
217 | #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) | |
218 | #error unsupported TARGET_PHYS_ADDR_SPACE_BITS | |
219 | #endif | |
220 | lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); | |
221 | p = *lp; | |
222 | if (!p) { | |
223 | /* allocate if not found */ | |
224 | if (!alloc) | |
225 | return NULL; | |
226 | p = qemu_vmalloc(sizeof(void *) * L1_SIZE); | |
227 | memset(p, 0, sizeof(void *) * L1_SIZE); | |
228 | *lp = p; | |
229 | } | |
230 | #endif | |
231 | lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); | |
232 | pd = *lp; | |
233 | if (!pd) { | |
234 | int i; | |
235 | /* allocate if not found */ | |
236 | if (!alloc) | |
237 | return NULL; | |
238 | pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); | |
239 | *lp = pd; | |
240 | for (i = 0; i < L2_SIZE; i++) | |
241 | pd[i].phys_offset = IO_MEM_UNASSIGNED; | |
242 | } | |
243 | return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); | |
244 | } | |
245 | ||
246 | static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) | |
247 | { | |
248 | return phys_page_find_alloc(index, 0); | |
249 | } | |
250 | ||
251 | #if !defined(CONFIG_USER_ONLY) | |
252 | static void tlb_protect_code(ram_addr_t ram_addr); | |
253 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
254 | target_ulong vaddr); | |
255 | #endif | |
256 | ||
257 | void cpu_exec_init(CPUState *env) | |
258 | { | |
259 | CPUState **penv; | |
260 | int cpu_index; | |
261 | ||
262 | if (!code_gen_ptr) { | |
263 | code_gen_ptr = code_gen_buffer; | |
264 | page_init(); | |
265 | io_mem_init(); | |
266 | } | |
267 | env->next_cpu = NULL; | |
268 | penv = &first_cpu; | |
269 | cpu_index = 0; | |
270 | while (*penv != NULL) { | |
271 | penv = (CPUState **)&(*penv)->next_cpu; | |
272 | cpu_index++; | |
273 | } | |
274 | env->cpu_index = cpu_index; | |
275 | *penv = env; | |
276 | } | |
277 | ||
278 | static inline void invalidate_page_bitmap(PageDesc *p) | |
279 | { | |
280 | if (p->code_bitmap) { | |
281 | qemu_free(p->code_bitmap); | |
282 | p->code_bitmap = NULL; | |
283 | } | |
284 | p->code_write_count = 0; | |
285 | } | |
286 | ||
287 | /* set to NULL all the 'first_tb' fields in all PageDescs */ | |
288 | static void page_flush_tb(void) | |
289 | { | |
290 | int i, j; | |
291 | PageDesc *p; | |
292 | ||
293 | for(i = 0; i < L1_SIZE; i++) { | |
294 | p = l1_map[i]; | |
295 | if (p) { | |
296 | for(j = 0; j < L2_SIZE; j++) { | |
297 | p->first_tb = NULL; | |
298 | invalidate_page_bitmap(p); | |
299 | p++; | |
300 | } | |
301 | } | |
302 | } | |
303 | } | |
304 | ||
305 | /* flush all the translation blocks */ | |
306 | /* XXX: tb_flush is currently not thread safe */ | |
307 | void tb_flush(CPUState *env1) | |
308 | { | |
309 | CPUState *env; | |
310 | #if defined(DEBUG_FLUSH) | |
311 | printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", | |
312 | code_gen_ptr - code_gen_buffer, | |
313 | nb_tbs, | |
314 | nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); | |
315 | #endif | |
316 | nb_tbs = 0; | |
317 | ||
318 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
319 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
320 | } | |
321 | ||
322 | memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); | |
323 | page_flush_tb(); | |
324 | ||
325 | code_gen_ptr = code_gen_buffer; | |
326 | /* XXX: flush processor icache at this point if cache flush is | |
327 | expensive */ | |
328 | tb_flush_count++; | |
329 | } | |
330 | ||
331 | #ifdef DEBUG_TB_CHECK | |
332 | ||
333 | static void tb_invalidate_check(unsigned long address) | |
334 | { | |
335 | TranslationBlock *tb; | |
336 | int i; | |
337 | address &= TARGET_PAGE_MASK; | |
338 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
339 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
340 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || | |
341 | address >= tb->pc + tb->size)) { | |
342 | printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", | |
343 | address, (long)tb->pc, tb->size); | |
344 | } | |
345 | } | |
346 | } | |
347 | } | |
348 | ||
349 | /* verify that all the pages have correct rights for code */ | |
350 | static void tb_page_check(void) | |
351 | { | |
352 | TranslationBlock *tb; | |
353 | int i, flags1, flags2; | |
354 | ||
355 | for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { | |
356 | for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { | |
357 | flags1 = page_get_flags(tb->pc); | |
358 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
359 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
360 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
361 | (long)tb->pc, tb->size, flags1, flags2); | |
362 | } | |
363 | } | |
364 | } | |
365 | } | |
366 | ||
367 | void tb_jmp_check(TranslationBlock *tb) | |
368 | { | |
369 | TranslationBlock *tb1; | |
370 | unsigned int n1; | |
371 | ||
372 | /* suppress any remaining jumps to this TB */ | |
373 | tb1 = tb->jmp_first; | |
374 | for(;;) { | |
375 | n1 = (long)tb1 & 3; | |
376 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
377 | if (n1 == 2) | |
378 | break; | |
379 | tb1 = tb1->jmp_next[n1]; | |
380 | } | |
381 | /* check end of list */ | |
382 | if (tb1 != tb) { | |
383 | printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); | |
384 | } | |
385 | } | |
386 | ||
387 | #endif | |
388 | ||
389 | /* invalidate one TB */ | |
390 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
391 | int next_offset) | |
392 | { | |
393 | TranslationBlock *tb1; | |
394 | for(;;) { | |
395 | tb1 = *ptb; | |
396 | if (tb1 == tb) { | |
397 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
398 | break; | |
399 | } | |
400 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
401 | } | |
402 | } | |
403 | ||
404 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
405 | { | |
406 | TranslationBlock *tb1; | |
407 | unsigned int n1; | |
408 | ||
409 | for(;;) { | |
410 | tb1 = *ptb; | |
411 | n1 = (long)tb1 & 3; | |
412 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
413 | if (tb1 == tb) { | |
414 | *ptb = tb1->page_next[n1]; | |
415 | break; | |
416 | } | |
417 | ptb = &tb1->page_next[n1]; | |
418 | } | |
419 | } | |
420 | ||
421 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
422 | { | |
423 | TranslationBlock *tb1, **ptb; | |
424 | unsigned int n1; | |
425 | ||
426 | ptb = &tb->jmp_next[n]; | |
427 | tb1 = *ptb; | |
428 | if (tb1) { | |
429 | /* find tb(n) in circular list */ | |
430 | for(;;) { | |
431 | tb1 = *ptb; | |
432 | n1 = (long)tb1 & 3; | |
433 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
434 | if (n1 == n && tb1 == tb) | |
435 | break; | |
436 | if (n1 == 2) { | |
437 | ptb = &tb1->jmp_first; | |
438 | } else { | |
439 | ptb = &tb1->jmp_next[n1]; | |
440 | } | |
441 | } | |
442 | /* now we can suppress tb(n) from the list */ | |
443 | *ptb = tb->jmp_next[n]; | |
444 | ||
445 | tb->jmp_next[n] = NULL; | |
446 | } | |
447 | } | |
448 | ||
449 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
450 | another TB */ | |
451 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
452 | { | |
453 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); | |
454 | } | |
455 | ||
456 | static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) | |
457 | { | |
458 | CPUState *env; | |
459 | PageDesc *p; | |
460 | unsigned int h, n1; | |
461 | target_ulong phys_pc; | |
462 | TranslationBlock *tb1, *tb2; | |
463 | ||
464 | /* remove the TB from the hash list */ | |
465 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
466 | h = tb_phys_hash_func(phys_pc); | |
467 | tb_remove(&tb_phys_hash[h], tb, | |
468 | offsetof(TranslationBlock, phys_hash_next)); | |
469 | ||
470 | /* remove the TB from the page list */ | |
471 | if (tb->page_addr[0] != page_addr) { | |
472 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
473 | tb_page_remove(&p->first_tb, tb); | |
474 | invalidate_page_bitmap(p); | |
475 | } | |
476 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
477 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
478 | tb_page_remove(&p->first_tb, tb); | |
479 | invalidate_page_bitmap(p); | |
480 | } | |
481 | ||
482 | tb_invalidated_flag = 1; | |
483 | ||
484 | /* remove the TB from the hash list */ | |
485 | h = tb_jmp_cache_hash_func(tb->pc); | |
486 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
487 | if (env->tb_jmp_cache[h] == tb) | |
488 | env->tb_jmp_cache[h] = NULL; | |
489 | } | |
490 | ||
491 | /* suppress this TB from the two jump lists */ | |
492 | tb_jmp_remove(tb, 0); | |
493 | tb_jmp_remove(tb, 1); | |
494 | ||
495 | /* suppress any remaining jumps to this TB */ | |
496 | tb1 = tb->jmp_first; | |
497 | for(;;) { | |
498 | n1 = (long)tb1 & 3; | |
499 | if (n1 == 2) | |
500 | break; | |
501 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
502 | tb2 = tb1->jmp_next[n1]; | |
503 | tb_reset_jump(tb1, n1); | |
504 | tb1->jmp_next[n1] = NULL; | |
505 | tb1 = tb2; | |
506 | } | |
507 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ | |
508 | ||
509 | tb_phys_invalidate_count++; | |
510 | } | |
511 | ||
512 | static inline void set_bits(uint8_t *tab, int start, int len) | |
513 | { | |
514 | int end, mask, end1; | |
515 | ||
516 | end = start + len; | |
517 | tab += start >> 3; | |
518 | mask = 0xff << (start & 7); | |
519 | if ((start & ~7) == (end & ~7)) { | |
520 | if (start < end) { | |
521 | mask &= ~(0xff << (end & 7)); | |
522 | *tab |= mask; | |
523 | } | |
524 | } else { | |
525 | *tab++ |= mask; | |
526 | start = (start + 8) & ~7; | |
527 | end1 = end & ~7; | |
528 | while (start < end1) { | |
529 | *tab++ = 0xff; | |
530 | start += 8; | |
531 | } | |
532 | if (start < end) { | |
533 | mask = ~(0xff << (end & 7)); | |
534 | *tab |= mask; | |
535 | } | |
536 | } | |
537 | } | |
538 | ||
539 | static void build_page_bitmap(PageDesc *p) | |
540 | { | |
541 | int n, tb_start, tb_end; | |
542 | TranslationBlock *tb; | |
543 | ||
544 | p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); | |
545 | if (!p->code_bitmap) | |
546 | return; | |
547 | memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); | |
548 | ||
549 | tb = p->first_tb; | |
550 | while (tb != NULL) { | |
551 | n = (long)tb & 3; | |
552 | tb = (TranslationBlock *)((long)tb & ~3); | |
553 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
554 | if (n == 0) { | |
555 | /* NOTE: tb_end may be after the end of the page, but | |
556 | it is not a problem */ | |
557 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
558 | tb_end = tb_start + tb->size; | |
559 | if (tb_end > TARGET_PAGE_SIZE) | |
560 | tb_end = TARGET_PAGE_SIZE; | |
561 | } else { | |
562 | tb_start = 0; | |
563 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
564 | } | |
565 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
566 | tb = tb->page_next[n]; | |
567 | } | |
568 | } | |
569 | ||
570 | #ifdef TARGET_HAS_PRECISE_SMC | |
571 | ||
572 | static void tb_gen_code(CPUState *env, | |
573 | target_ulong pc, target_ulong cs_base, int flags, | |
574 | int cflags) | |
575 | { | |
576 | TranslationBlock *tb; | |
577 | uint8_t *tc_ptr; | |
578 | target_ulong phys_pc, phys_page2, virt_page2; | |
579 | int code_gen_size; | |
580 | ||
581 | phys_pc = get_phys_addr_code(env, pc); | |
582 | tb = tb_alloc(pc); | |
583 | if (!tb) { | |
584 | /* flush must be done */ | |
585 | tb_flush(env); | |
586 | /* cannot fail at this point */ | |
587 | tb = tb_alloc(pc); | |
588 | } | |
589 | tc_ptr = code_gen_ptr; | |
590 | tb->tc_ptr = tc_ptr; | |
591 | tb->cs_base = cs_base; | |
592 | tb->flags = flags; | |
593 | tb->cflags = cflags; | |
594 | cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); | |
595 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
596 | ||
597 | /* check next page if needed */ | |
598 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
599 | phys_page2 = -1; | |
600 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
601 | phys_page2 = get_phys_addr_code(env, virt_page2); | |
602 | } | |
603 | tb_link_phys(tb, phys_pc, phys_page2); | |
604 | } | |
605 | #endif | |
606 | ||
607 | /* invalidate all TBs which intersect with the target physical page | |
608 | starting in range [start;end[. NOTE: start and end must refer to | |
609 | the same physical page. 'is_cpu_write_access' should be true if called | |
610 | from a real cpu write access: the virtual CPU will exit the current | |
611 | TB if code is modified inside this TB. */ | |
612 | void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, | |
613 | int is_cpu_write_access) | |
614 | { | |
615 | int n, current_tb_modified, current_tb_not_found, current_flags; | |
616 | CPUState *env = cpu_single_env; | |
617 | PageDesc *p; | |
618 | TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; | |
619 | target_ulong tb_start, tb_end; | |
620 | target_ulong current_pc, current_cs_base; | |
621 | ||
622 | p = page_find(start >> TARGET_PAGE_BITS); | |
623 | if (!p) | |
624 | return; | |
625 | if (!p->code_bitmap && | |
626 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && | |
627 | is_cpu_write_access) { | |
628 | /* build code bitmap */ | |
629 | build_page_bitmap(p); | |
630 | } | |
631 | ||
632 | /* we remove all the TBs in the range [start, end[ */ | |
633 | /* XXX: see if in some cases it could be faster to invalidate all the code */ | |
634 | current_tb_not_found = is_cpu_write_access; | |
635 | current_tb_modified = 0; | |
636 | current_tb = NULL; /* avoid warning */ | |
637 | current_pc = 0; /* avoid warning */ | |
638 | current_cs_base = 0; /* avoid warning */ | |
639 | current_flags = 0; /* avoid warning */ | |
640 | tb = p->first_tb; | |
641 | while (tb != NULL) { | |
642 | n = (long)tb & 3; | |
643 | tb = (TranslationBlock *)((long)tb & ~3); | |
644 | tb_next = tb->page_next[n]; | |
645 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
646 | if (n == 0) { | |
647 | /* NOTE: tb_end may be after the end of the page, but | |
648 | it is not a problem */ | |
649 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
650 | tb_end = tb_start + tb->size; | |
651 | } else { | |
652 | tb_start = tb->page_addr[1]; | |
653 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
654 | } | |
655 | if (!(tb_end <= start || tb_start >= end)) { | |
656 | #ifdef TARGET_HAS_PRECISE_SMC | |
657 | if (current_tb_not_found) { | |
658 | current_tb_not_found = 0; | |
659 | current_tb = NULL; | |
660 | if (env->mem_write_pc) { | |
661 | /* now we have a real cpu fault */ | |
662 | current_tb = tb_find_pc(env->mem_write_pc); | |
663 | } | |
664 | } | |
665 | if (current_tb == tb && | |
666 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
667 | /* If we are modifying the current TB, we must stop | |
668 | its execution. We could be more precise by checking | |
669 | that the modification is after the current PC, but it | |
670 | would require a specialized function to partially | |
671 | restore the CPU state */ | |
672 | ||
673 | current_tb_modified = 1; | |
674 | cpu_restore_state(current_tb, env, | |
675 | env->mem_write_pc, NULL); | |
676 | #if defined(TARGET_I386) | |
677 | current_flags = env->hflags; | |
678 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
679 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
680 | current_pc = current_cs_base + env->eip; | |
681 | #else | |
682 | #error unsupported CPU | |
683 | #endif | |
684 | } | |
685 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
686 | /* we need to do that to handle the case where a signal | |
687 | occurs while doing tb_phys_invalidate() */ | |
688 | saved_tb = NULL; | |
689 | if (env) { | |
690 | saved_tb = env->current_tb; | |
691 | env->current_tb = NULL; | |
692 | } | |
693 | tb_phys_invalidate(tb, -1); | |
694 | if (env) { | |
695 | env->current_tb = saved_tb; | |
696 | if (env->interrupt_request && env->current_tb) | |
697 | cpu_interrupt(env, env->interrupt_request); | |
698 | } | |
699 | } | |
700 | tb = tb_next; | |
701 | } | |
702 | #if !defined(CONFIG_USER_ONLY) | |
703 | /* if no code remaining, no need to continue to use slow writes */ | |
704 | if (!p->first_tb) { | |
705 | invalidate_page_bitmap(p); | |
706 | if (is_cpu_write_access) { | |
707 | tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); | |
708 | } | |
709 | } | |
710 | #endif | |
711 | #ifdef TARGET_HAS_PRECISE_SMC | |
712 | if (current_tb_modified) { | |
713 | /* we generate a block containing just the instruction | |
714 | modifying the memory. It will ensure that it cannot modify | |
715 | itself */ | |
716 | env->current_tb = NULL; | |
717 | tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
718 | CF_SINGLE_INSN); | |
719 | cpu_resume_from_signal(env, NULL); | |
720 | } | |
721 | #endif | |
722 | } | |
723 | ||
724 | /* len must be <= 8 and start must be a multiple of len */ | |
725 | static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) | |
726 | { | |
727 | PageDesc *p; | |
728 | int offset, b; | |
729 | #if 0 | |
730 | if (1) { | |
731 | if (loglevel) { | |
732 | fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
733 | cpu_single_env->mem_write_vaddr, len, | |
734 | cpu_single_env->eip, | |
735 | cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); | |
736 | } | |
737 | } | |
738 | #endif | |
739 | p = page_find(start >> TARGET_PAGE_BITS); | |
740 | if (!p) | |
741 | return; | |
742 | if (p->code_bitmap) { | |
743 | offset = start & ~TARGET_PAGE_MASK; | |
744 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
745 | if (b & ((1 << len) - 1)) | |
746 | goto do_invalidate; | |
747 | } else { | |
748 | do_invalidate: | |
749 | tb_invalidate_phys_page_range(start, start + len, 1); | |
750 | } | |
751 | } | |
752 | ||
753 | #if !defined(CONFIG_SOFTMMU) | |
754 | static void tb_invalidate_phys_page(target_ulong addr, | |
755 | unsigned long pc, void *puc) | |
756 | { | |
757 | int n, current_flags, current_tb_modified; | |
758 | target_ulong current_pc, current_cs_base; | |
759 | PageDesc *p; | |
760 | TranslationBlock *tb, *current_tb; | |
761 | #ifdef TARGET_HAS_PRECISE_SMC | |
762 | CPUState *env = cpu_single_env; | |
763 | #endif | |
764 | ||
765 | addr &= TARGET_PAGE_MASK; | |
766 | p = page_find(addr >> TARGET_PAGE_BITS); | |
767 | if (!p) | |
768 | return; | |
769 | tb = p->first_tb; | |
770 | current_tb_modified = 0; | |
771 | current_tb = NULL; | |
772 | current_pc = 0; /* avoid warning */ | |
773 | current_cs_base = 0; /* avoid warning */ | |
774 | current_flags = 0; /* avoid warning */ | |
775 | #ifdef TARGET_HAS_PRECISE_SMC | |
776 | if (tb && pc != 0) { | |
777 | current_tb = tb_find_pc(pc); | |
778 | } | |
779 | #endif | |
780 | while (tb != NULL) { | |
781 | n = (long)tb & 3; | |
782 | tb = (TranslationBlock *)((long)tb & ~3); | |
783 | #ifdef TARGET_HAS_PRECISE_SMC | |
784 | if (current_tb == tb && | |
785 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
786 | /* If we are modifying the current TB, we must stop | |
787 | its execution. We could be more precise by checking | |
788 | that the modification is after the current PC, but it | |
789 | would require a specialized function to partially | |
790 | restore the CPU state */ | |
791 | ||
792 | current_tb_modified = 1; | |
793 | cpu_restore_state(current_tb, env, pc, puc); | |
794 | #if defined(TARGET_I386) | |
795 | current_flags = env->hflags; | |
796 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
797 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
798 | current_pc = current_cs_base + env->eip; | |
799 | #else | |
800 | #error unsupported CPU | |
801 | #endif | |
802 | } | |
803 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
804 | tb_phys_invalidate(tb, addr); | |
805 | tb = tb->page_next[n]; | |
806 | } | |
807 | p->first_tb = NULL; | |
808 | #ifdef TARGET_HAS_PRECISE_SMC | |
809 | if (current_tb_modified) { | |
810 | /* we generate a block containing just the instruction | |
811 | modifying the memory. It will ensure that it cannot modify | |
812 | itself */ | |
813 | env->current_tb = NULL; | |
814 | tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
815 | CF_SINGLE_INSN); | |
816 | cpu_resume_from_signal(env, puc); | |
817 | } | |
818 | #endif | |
819 | } | |
820 | #endif | |
821 | ||
822 | /* add the tb in the target page and protect it if necessary */ | |
823 | static inline void tb_alloc_page(TranslationBlock *tb, | |
824 | unsigned int n, target_ulong page_addr) | |
825 | { | |
826 | PageDesc *p; | |
827 | TranslationBlock *last_first_tb; | |
828 | ||
829 | tb->page_addr[n] = page_addr; | |
830 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); | |
831 | tb->page_next[n] = p->first_tb; | |
832 | last_first_tb = p->first_tb; | |
833 | p->first_tb = (TranslationBlock *)((long)tb | n); | |
834 | invalidate_page_bitmap(p); | |
835 | ||
836 | #if defined(TARGET_HAS_SMC) || 1 | |
837 | ||
838 | #if defined(CONFIG_USER_ONLY) | |
839 | if (p->flags & PAGE_WRITE) { | |
840 | target_ulong addr; | |
841 | PageDesc *p2; | |
842 | int prot; | |
843 | ||
844 | /* force the host page as non writable (writes will have a | |
845 | page fault + mprotect overhead) */ | |
846 | page_addr &= qemu_host_page_mask; | |
847 | prot = 0; | |
848 | for(addr = page_addr; addr < page_addr + qemu_host_page_size; | |
849 | addr += TARGET_PAGE_SIZE) { | |
850 | ||
851 | p2 = page_find (addr >> TARGET_PAGE_BITS); | |
852 | if (!p2) | |
853 | continue; | |
854 | prot |= p2->flags; | |
855 | p2->flags &= ~PAGE_WRITE; | |
856 | page_get_flags(addr); | |
857 | } | |
858 | mprotect(g2h(page_addr), qemu_host_page_size, | |
859 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
860 | #ifdef DEBUG_TB_INVALIDATE | |
861 | printf("protecting code page: 0x%08lx\n", | |
862 | page_addr); | |
863 | #endif | |
864 | } | |
865 | #else | |
866 | /* if some code is already present, then the pages are already | |
867 | protected. So we handle the case where only the first TB is | |
868 | allocated in a physical page */ | |
869 | if (!last_first_tb) { | |
870 | tlb_protect_code(page_addr); | |
871 | } | |
872 | #endif | |
873 | ||
874 | #endif /* TARGET_HAS_SMC */ | |
875 | } | |
876 | ||
877 | /* Allocate a new translation block. Flush the translation buffer if | |
878 | too many translation blocks or too much generated code. */ | |
879 | TranslationBlock *tb_alloc(target_ulong pc) | |
880 | { | |
881 | TranslationBlock *tb; | |
882 | ||
883 | if (nb_tbs >= CODE_GEN_MAX_BLOCKS || | |
884 | (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) | |
885 | return NULL; | |
886 | tb = &tbs[nb_tbs++]; | |
887 | tb->pc = pc; | |
888 | tb->cflags = 0; | |
889 | return tb; | |
890 | } | |
891 | ||
892 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
893 | (-1) to indicate that only one page contains the TB. */ | |
894 | void tb_link_phys(TranslationBlock *tb, | |
895 | target_ulong phys_pc, target_ulong phys_page2) | |
896 | { | |
897 | unsigned int h; | |
898 | TranslationBlock **ptb; | |
899 | ||
900 | /* add in the physical hash table */ | |
901 | h = tb_phys_hash_func(phys_pc); | |
902 | ptb = &tb_phys_hash[h]; | |
903 | tb->phys_hash_next = *ptb; | |
904 | *ptb = tb; | |
905 | ||
906 | /* add in the page list */ | |
907 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
908 | if (phys_page2 != -1) | |
909 | tb_alloc_page(tb, 1, phys_page2); | |
910 | else | |
911 | tb->page_addr[1] = -1; | |
912 | ||
913 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); | |
914 | tb->jmp_next[0] = NULL; | |
915 | tb->jmp_next[1] = NULL; | |
916 | #ifdef USE_CODE_COPY | |
917 | tb->cflags &= ~CF_FP_USED; | |
918 | if (tb->cflags & CF_TB_FP_USED) | |
919 | tb->cflags |= CF_FP_USED; | |
920 | #endif | |
921 | ||
922 | /* init original jump addresses */ | |
923 | if (tb->tb_next_offset[0] != 0xffff) | |
924 | tb_reset_jump(tb, 0); | |
925 | if (tb->tb_next_offset[1] != 0xffff) | |
926 | tb_reset_jump(tb, 1); | |
927 | ||
928 | #ifdef DEBUG_TB_CHECK | |
929 | tb_page_check(); | |
930 | #endif | |
931 | } | |
932 | ||
933 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < | |
934 | tb[1].tc_ptr. Return NULL if not found */ | |
935 | TranslationBlock *tb_find_pc(unsigned long tc_ptr) | |
936 | { | |
937 | int m_min, m_max, m; | |
938 | unsigned long v; | |
939 | TranslationBlock *tb; | |
940 | ||
941 | if (nb_tbs <= 0) | |
942 | return NULL; | |
943 | if (tc_ptr < (unsigned long)code_gen_buffer || | |
944 | tc_ptr >= (unsigned long)code_gen_ptr) | |
945 | return NULL; | |
946 | /* binary search (cf Knuth) */ | |
947 | m_min = 0; | |
948 | m_max = nb_tbs - 1; | |
949 | while (m_min <= m_max) { | |
950 | m = (m_min + m_max) >> 1; | |
951 | tb = &tbs[m]; | |
952 | v = (unsigned long)tb->tc_ptr; | |
953 | if (v == tc_ptr) | |
954 | return tb; | |
955 | else if (tc_ptr < v) { | |
956 | m_max = m - 1; | |
957 | } else { | |
958 | m_min = m + 1; | |
959 | } | |
960 | } | |
961 | return &tbs[m_max]; | |
962 | } | |
963 | ||
964 | static void tb_reset_jump_recursive(TranslationBlock *tb); | |
965 | ||
966 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
967 | { | |
968 | TranslationBlock *tb1, *tb_next, **ptb; | |
969 | unsigned int n1; | |
970 | ||
971 | tb1 = tb->jmp_next[n]; | |
972 | if (tb1 != NULL) { | |
973 | /* find head of list */ | |
974 | for(;;) { | |
975 | n1 = (long)tb1 & 3; | |
976 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
977 | if (n1 == 2) | |
978 | break; | |
979 | tb1 = tb1->jmp_next[n1]; | |
980 | } | |
981 | /* we are now sure now that tb jumps to tb1 */ | |
982 | tb_next = tb1; | |
983 | ||
984 | /* remove tb from the jmp_first list */ | |
985 | ptb = &tb_next->jmp_first; | |
986 | for(;;) { | |
987 | tb1 = *ptb; | |
988 | n1 = (long)tb1 & 3; | |
989 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
990 | if (n1 == n && tb1 == tb) | |
991 | break; | |
992 | ptb = &tb1->jmp_next[n1]; | |
993 | } | |
994 | *ptb = tb->jmp_next[n]; | |
995 | tb->jmp_next[n] = NULL; | |
996 | ||
997 | /* suppress the jump to next tb in generated code */ | |
998 | tb_reset_jump(tb, n); | |
999 | ||
1000 | /* suppress jumps in the tb on which we could have jumped */ | |
1001 | tb_reset_jump_recursive(tb_next); | |
1002 | } | |
1003 | } | |
1004 | ||
1005 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
1006 | { | |
1007 | tb_reset_jump_recursive2(tb, 0); | |
1008 | tb_reset_jump_recursive2(tb, 1); | |
1009 | } | |
1010 | ||
1011 | #if defined(TARGET_HAS_ICE) | |
1012 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) | |
1013 | { | |
1014 | target_ulong addr, pd; | |
1015 | ram_addr_t ram_addr; | |
1016 | PhysPageDesc *p; | |
1017 | ||
1018 | addr = cpu_get_phys_page_debug(env, pc); | |
1019 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1020 | if (!p) { | |
1021 | pd = IO_MEM_UNASSIGNED; | |
1022 | } else { | |
1023 | pd = p->phys_offset; | |
1024 | } | |
1025 | ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); | |
1026 | tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); | |
1027 | } | |
1028 | #endif | |
1029 | ||
1030 | /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a | |
1031 | breakpoint is reached */ | |
1032 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc) | |
1033 | { | |
1034 | #if defined(TARGET_HAS_ICE) | |
1035 | int i; | |
1036 | ||
1037 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1038 | if (env->breakpoints[i] == pc) | |
1039 | return 0; | |
1040 | } | |
1041 | ||
1042 | if (env->nb_breakpoints >= MAX_BREAKPOINTS) | |
1043 | return -1; | |
1044 | env->breakpoints[env->nb_breakpoints++] = pc; | |
1045 | ||
1046 | breakpoint_invalidate(env, pc); | |
1047 | return 0; | |
1048 | #else | |
1049 | return -1; | |
1050 | #endif | |
1051 | } | |
1052 | ||
1053 | /* remove a breakpoint */ | |
1054 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc) | |
1055 | { | |
1056 | #if defined(TARGET_HAS_ICE) | |
1057 | int i; | |
1058 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1059 | if (env->breakpoints[i] == pc) | |
1060 | goto found; | |
1061 | } | |
1062 | return -1; | |
1063 | found: | |
1064 | env->nb_breakpoints--; | |
1065 | if (i < env->nb_breakpoints) | |
1066 | env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; | |
1067 | ||
1068 | breakpoint_invalidate(env, pc); | |
1069 | return 0; | |
1070 | #else | |
1071 | return -1; | |
1072 | #endif | |
1073 | } | |
1074 | ||
1075 | /* enable or disable single step mode. EXCP_DEBUG is returned by the | |
1076 | CPU loop after each instruction */ | |
1077 | void cpu_single_step(CPUState *env, int enabled) | |
1078 | { | |
1079 | #if defined(TARGET_HAS_ICE) | |
1080 | if (env->singlestep_enabled != enabled) { | |
1081 | env->singlestep_enabled = enabled; | |
1082 | /* must flush all the translated code to avoid inconsistancies */ | |
1083 | /* XXX: only flush what is necessary */ | |
1084 | tb_flush(env); | |
1085 | } | |
1086 | #endif | |
1087 | } | |
1088 | ||
1089 | /* enable or disable low levels log */ | |
1090 | void cpu_set_log(int log_flags) | |
1091 | { | |
1092 | loglevel = log_flags; | |
1093 | if (loglevel && !logfile) { | |
1094 | logfile = fopen(logfilename, "w"); | |
1095 | if (!logfile) { | |
1096 | perror(logfilename); | |
1097 | _exit(1); | |
1098 | } | |
1099 | #if !defined(CONFIG_SOFTMMU) | |
1100 | /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ | |
1101 | { | |
1102 | static uint8_t logfile_buf[4096]; | |
1103 | setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); | |
1104 | } | |
1105 | #else | |
1106 | setvbuf(logfile, NULL, _IOLBF, 0); | |
1107 | #endif | |
1108 | } | |
1109 | } | |
1110 | ||
1111 | void cpu_set_log_filename(const char *filename) | |
1112 | { | |
1113 | logfilename = strdup(filename); | |
1114 | } | |
1115 | ||
1116 | /* mask must never be zero, except for A20 change call */ | |
1117 | void cpu_interrupt(CPUState *env, int mask) | |
1118 | { | |
1119 | TranslationBlock *tb; | |
1120 | static int interrupt_lock; | |
1121 | ||
1122 | env->interrupt_request |= mask; | |
1123 | /* if the cpu is currently executing code, we must unlink it and | |
1124 | all the potentially executing TB */ | |
1125 | tb = env->current_tb; | |
1126 | if (tb && !testandset(&interrupt_lock)) { | |
1127 | env->current_tb = NULL; | |
1128 | tb_reset_jump_recursive(tb); | |
1129 | interrupt_lock = 0; | |
1130 | } | |
1131 | } | |
1132 | ||
1133 | void cpu_reset_interrupt(CPUState *env, int mask) | |
1134 | { | |
1135 | env->interrupt_request &= ~mask; | |
1136 | } | |
1137 | ||
1138 | CPULogItem cpu_log_items[] = { | |
1139 | { CPU_LOG_TB_OUT_ASM, "out_asm", | |
1140 | "show generated host assembly code for each compiled TB" }, | |
1141 | { CPU_LOG_TB_IN_ASM, "in_asm", | |
1142 | "show target assembly code for each compiled TB" }, | |
1143 | { CPU_LOG_TB_OP, "op", | |
1144 | "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, | |
1145 | #ifdef TARGET_I386 | |
1146 | { CPU_LOG_TB_OP_OPT, "op_opt", | |
1147 | "show micro ops after optimization for each compiled TB" }, | |
1148 | #endif | |
1149 | { CPU_LOG_INT, "int", | |
1150 | "show interrupts/exceptions in short format" }, | |
1151 | { CPU_LOG_EXEC, "exec", | |
1152 | "show trace before each executed TB (lots of logs)" }, | |
1153 | { CPU_LOG_TB_CPU, "cpu", | |
1154 | "show CPU state before bloc translation" }, | |
1155 | #ifdef TARGET_I386 | |
1156 | { CPU_LOG_PCALL, "pcall", | |
1157 | "show protected mode far calls/returns/exceptions" }, | |
1158 | #endif | |
1159 | #ifdef DEBUG_IOPORT | |
1160 | { CPU_LOG_IOPORT, "ioport", | |
1161 | "show all i/o ports accesses" }, | |
1162 | #endif | |
1163 | { 0, NULL, NULL }, | |
1164 | }; | |
1165 | ||
1166 | static int cmp1(const char *s1, int n, const char *s2) | |
1167 | { | |
1168 | if (strlen(s2) != n) | |
1169 | return 0; | |
1170 | return memcmp(s1, s2, n) == 0; | |
1171 | } | |
1172 | ||
1173 | /* takes a comma separated list of log masks. Return 0 if error. */ | |
1174 | int cpu_str_to_log_mask(const char *str) | |
1175 | { | |
1176 | CPULogItem *item; | |
1177 | int mask; | |
1178 | const char *p, *p1; | |
1179 | ||
1180 | p = str; | |
1181 | mask = 0; | |
1182 | for(;;) { | |
1183 | p1 = strchr(p, ','); | |
1184 | if (!p1) | |
1185 | p1 = p + strlen(p); | |
1186 | if(cmp1(p,p1-p,"all")) { | |
1187 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1188 | mask |= item->mask; | |
1189 | } | |
1190 | } else { | |
1191 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1192 | if (cmp1(p, p1 - p, item->name)) | |
1193 | goto found; | |
1194 | } | |
1195 | return 0; | |
1196 | } | |
1197 | found: | |
1198 | mask |= item->mask; | |
1199 | if (*p1 != ',') | |
1200 | break; | |
1201 | p = p1 + 1; | |
1202 | } | |
1203 | return mask; | |
1204 | } | |
1205 | ||
1206 | void cpu_abort(CPUState *env, const char *fmt, ...) | |
1207 | { | |
1208 | va_list ap; | |
1209 | ||
1210 | va_start(ap, fmt); | |
1211 | fprintf(stderr, "qemu: fatal: "); | |
1212 | vfprintf(stderr, fmt, ap); | |
1213 | fprintf(stderr, "\n"); | |
1214 | #ifdef TARGET_I386 | |
1215 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); | |
1216 | #else | |
1217 | cpu_dump_state(env, stderr, fprintf, 0); | |
1218 | #endif | |
1219 | va_end(ap); | |
1220 | abort(); | |
1221 | } | |
1222 | ||
1223 | #if !defined(CONFIG_USER_ONLY) | |
1224 | ||
1225 | /* NOTE: if flush_global is true, also flush global entries (not | |
1226 | implemented yet) */ | |
1227 | void tlb_flush(CPUState *env, int flush_global) | |
1228 | { | |
1229 | int i; | |
1230 | ||
1231 | #if defined(DEBUG_TLB) | |
1232 | printf("tlb_flush:\n"); | |
1233 | #endif | |
1234 | /* must reset current TB so that interrupts cannot modify the | |
1235 | links while we are modifying them */ | |
1236 | env->current_tb = NULL; | |
1237 | ||
1238 | for(i = 0; i < CPU_TLB_SIZE; i++) { | |
1239 | env->tlb_table[0][i].addr_read = -1; | |
1240 | env->tlb_table[0][i].addr_write = -1; | |
1241 | env->tlb_table[0][i].addr_code = -1; | |
1242 | env->tlb_table[1][i].addr_read = -1; | |
1243 | env->tlb_table[1][i].addr_write = -1; | |
1244 | env->tlb_table[1][i].addr_code = -1; | |
1245 | } | |
1246 | ||
1247 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
1248 | ||
1249 | #if !defined(CONFIG_SOFTMMU) | |
1250 | munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); | |
1251 | #endif | |
1252 | #ifdef USE_KQEMU | |
1253 | if (env->kqemu_enabled) { | |
1254 | kqemu_flush(env, flush_global); | |
1255 | } | |
1256 | #endif | |
1257 | tlb_flush_count++; | |
1258 | } | |
1259 | ||
1260 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) | |
1261 | { | |
1262 | if (addr == (tlb_entry->addr_read & | |
1263 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1264 | addr == (tlb_entry->addr_write & | |
1265 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1266 | addr == (tlb_entry->addr_code & | |
1267 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
1268 | tlb_entry->addr_read = -1; | |
1269 | tlb_entry->addr_write = -1; | |
1270 | tlb_entry->addr_code = -1; | |
1271 | } | |
1272 | } | |
1273 | ||
1274 | void tlb_flush_page(CPUState *env, target_ulong addr) | |
1275 | { | |
1276 | int i; | |
1277 | TranslationBlock *tb; | |
1278 | ||
1279 | #if defined(DEBUG_TLB) | |
1280 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); | |
1281 | #endif | |
1282 | /* must reset current TB so that interrupts cannot modify the | |
1283 | links while we are modifying them */ | |
1284 | env->current_tb = NULL; | |
1285 | ||
1286 | addr &= TARGET_PAGE_MASK; | |
1287 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1288 | tlb_flush_entry(&env->tlb_table[0][i], addr); | |
1289 | tlb_flush_entry(&env->tlb_table[1][i], addr); | |
1290 | ||
1291 | for(i = 0; i < TB_JMP_CACHE_SIZE; i++) { | |
1292 | tb = env->tb_jmp_cache[i]; | |
1293 | if (tb && | |
1294 | ((tb->pc & TARGET_PAGE_MASK) == addr || | |
1295 | ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) { | |
1296 | env->tb_jmp_cache[i] = NULL; | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | #if !defined(CONFIG_SOFTMMU) | |
1301 | if (addr < MMAP_AREA_END) | |
1302 | munmap((void *)addr, TARGET_PAGE_SIZE); | |
1303 | #endif | |
1304 | #ifdef USE_KQEMU | |
1305 | if (env->kqemu_enabled) { | |
1306 | kqemu_flush_page(env, addr); | |
1307 | } | |
1308 | #endif | |
1309 | } | |
1310 | ||
1311 | /* update the TLBs so that writes to code in the virtual page 'addr' | |
1312 | can be detected */ | |
1313 | static void tlb_protect_code(ram_addr_t ram_addr) | |
1314 | { | |
1315 | cpu_physical_memory_reset_dirty(ram_addr, | |
1316 | ram_addr + TARGET_PAGE_SIZE, | |
1317 | CODE_DIRTY_FLAG); | |
1318 | } | |
1319 | ||
1320 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
1321 | tested for self modifying code */ | |
1322 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
1323 | target_ulong vaddr) | |
1324 | { | |
1325 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; | |
1326 | } | |
1327 | ||
1328 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | |
1329 | unsigned long start, unsigned long length) | |
1330 | { | |
1331 | unsigned long addr; | |
1332 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { | |
1333 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1334 | if ((addr - start) < length) { | |
1335 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; | |
1336 | } | |
1337 | } | |
1338 | } | |
1339 | ||
1340 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | |
1341 | int dirty_flags) | |
1342 | { | |
1343 | CPUState *env; | |
1344 | unsigned long length, start1; | |
1345 | int i, mask, len; | |
1346 | uint8_t *p; | |
1347 | ||
1348 | start &= TARGET_PAGE_MASK; | |
1349 | end = TARGET_PAGE_ALIGN(end); | |
1350 | ||
1351 | length = end - start; | |
1352 | if (length == 0) | |
1353 | return; | |
1354 | len = length >> TARGET_PAGE_BITS; | |
1355 | #ifdef USE_KQEMU | |
1356 | /* XXX: should not depend on cpu context */ | |
1357 | env = first_cpu; | |
1358 | if (env->kqemu_enabled) { | |
1359 | ram_addr_t addr; | |
1360 | addr = start; | |
1361 | for(i = 0; i < len; i++) { | |
1362 | kqemu_set_notdirty(env, addr); | |
1363 | addr += TARGET_PAGE_SIZE; | |
1364 | } | |
1365 | } | |
1366 | #endif | |
1367 | mask = ~dirty_flags; | |
1368 | p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); | |
1369 | for(i = 0; i < len; i++) | |
1370 | p[i] &= mask; | |
1371 | ||
1372 | /* we modify the TLB cache so that the dirty bit will be set again | |
1373 | when accessing the range */ | |
1374 | start1 = start + (unsigned long)phys_ram_base; | |
1375 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1376 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1377 | tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); | |
1378 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1379 | tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); | |
1380 | } | |
1381 | ||
1382 | #if !defined(CONFIG_SOFTMMU) | |
1383 | /* XXX: this is expensive */ | |
1384 | { | |
1385 | VirtPageDesc *p; | |
1386 | int j; | |
1387 | target_ulong addr; | |
1388 | ||
1389 | for(i = 0; i < L1_SIZE; i++) { | |
1390 | p = l1_virt_map[i]; | |
1391 | if (p) { | |
1392 | addr = i << (TARGET_PAGE_BITS + L2_BITS); | |
1393 | for(j = 0; j < L2_SIZE; j++) { | |
1394 | if (p->valid_tag == virt_valid_tag && | |
1395 | p->phys_addr >= start && p->phys_addr < end && | |
1396 | (p->prot & PROT_WRITE)) { | |
1397 | if (addr < MMAP_AREA_END) { | |
1398 | mprotect((void *)addr, TARGET_PAGE_SIZE, | |
1399 | p->prot & ~PROT_WRITE); | |
1400 | } | |
1401 | } | |
1402 | addr += TARGET_PAGE_SIZE; | |
1403 | p++; | |
1404 | } | |
1405 | } | |
1406 | } | |
1407 | } | |
1408 | #endif | |
1409 | } | |
1410 | ||
1411 | static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) | |
1412 | { | |
1413 | ram_addr_t ram_addr; | |
1414 | ||
1415 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { | |
1416 | ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + | |
1417 | tlb_entry->addend - (unsigned long)phys_ram_base; | |
1418 | if (!cpu_physical_memory_is_dirty(ram_addr)) { | |
1419 | tlb_entry->addr_write |= IO_MEM_NOTDIRTY; | |
1420 | } | |
1421 | } | |
1422 | } | |
1423 | ||
1424 | /* update the TLB according to the current state of the dirty bits */ | |
1425 | void cpu_tlb_update_dirty(CPUState *env) | |
1426 | { | |
1427 | int i; | |
1428 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1429 | tlb_update_dirty(&env->tlb_table[0][i]); | |
1430 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1431 | tlb_update_dirty(&env->tlb_table[1][i]); | |
1432 | } | |
1433 | ||
1434 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, | |
1435 | unsigned long start) | |
1436 | { | |
1437 | unsigned long addr; | |
1438 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { | |
1439 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1440 | if (addr == start) { | |
1441 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; | |
1442 | } | |
1443 | } | |
1444 | } | |
1445 | ||
1446 | /* update the TLB corresponding to virtual page vaddr and phys addr | |
1447 | addr so that it is no longer dirty */ | |
1448 | static inline void tlb_set_dirty(CPUState *env, | |
1449 | unsigned long addr, target_ulong vaddr) | |
1450 | { | |
1451 | int i; | |
1452 | ||
1453 | addr &= TARGET_PAGE_MASK; | |
1454 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1455 | tlb_set_dirty1(&env->tlb_table[0][i], addr); | |
1456 | tlb_set_dirty1(&env->tlb_table[1][i], addr); | |
1457 | } | |
1458 | ||
1459 | /* add a new TLB entry. At most one entry for a given virtual address | |
1460 | is permitted. Return 0 if OK or 2 if the page could not be mapped | |
1461 | (can only happen in non SOFTMMU mode for I/O pages or pages | |
1462 | conflicting with the host address space). */ | |
1463 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, | |
1464 | target_phys_addr_t paddr, int prot, | |
1465 | int is_user, int is_softmmu) | |
1466 | { | |
1467 | PhysPageDesc *p; | |
1468 | unsigned long pd; | |
1469 | unsigned int index; | |
1470 | target_ulong address; | |
1471 | target_phys_addr_t addend; | |
1472 | int ret; | |
1473 | CPUTLBEntry *te; | |
1474 | ||
1475 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); | |
1476 | if (!p) { | |
1477 | pd = IO_MEM_UNASSIGNED; | |
1478 | } else { | |
1479 | pd = p->phys_offset; | |
1480 | } | |
1481 | #if defined(DEBUG_TLB) | |
1482 | printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", | |
1483 | vaddr, (int)paddr, prot, is_user, is_softmmu, pd); | |
1484 | #endif | |
1485 | ||
1486 | ret = 0; | |
1487 | #if !defined(CONFIG_SOFTMMU) | |
1488 | if (is_softmmu) | |
1489 | #endif | |
1490 | { | |
1491 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1492 | /* IO memory case */ | |
1493 | address = vaddr | pd; | |
1494 | addend = paddr; | |
1495 | } else { | |
1496 | /* standard memory */ | |
1497 | address = vaddr; | |
1498 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); | |
1499 | } | |
1500 | ||
1501 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1502 | addend -= vaddr; | |
1503 | te = &env->tlb_table[is_user][index]; | |
1504 | te->addend = addend; | |
1505 | if (prot & PAGE_READ) { | |
1506 | te->addr_read = address; | |
1507 | } else { | |
1508 | te->addr_read = -1; | |
1509 | } | |
1510 | if (prot & PAGE_EXEC) { | |
1511 | te->addr_code = address; | |
1512 | } else { | |
1513 | te->addr_code = -1; | |
1514 | } | |
1515 | if (prot & PAGE_WRITE) { | |
1516 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { | |
1517 | /* ROM: access is ignored (same as unassigned) */ | |
1518 | te->addr_write = vaddr | IO_MEM_ROM; | |
1519 | } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
1520 | !cpu_physical_memory_is_dirty(pd)) { | |
1521 | te->addr_write = vaddr | IO_MEM_NOTDIRTY; | |
1522 | } else { | |
1523 | te->addr_write = address; | |
1524 | } | |
1525 | } else { | |
1526 | te->addr_write = -1; | |
1527 | } | |
1528 | } | |
1529 | #if !defined(CONFIG_SOFTMMU) | |
1530 | else { | |
1531 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1532 | /* IO access: no mapping is done as it will be handled by the | |
1533 | soft MMU */ | |
1534 | if (!(env->hflags & HF_SOFTMMU_MASK)) | |
1535 | ret = 2; | |
1536 | } else { | |
1537 | void *map_addr; | |
1538 | ||
1539 | if (vaddr >= MMAP_AREA_END) { | |
1540 | ret = 2; | |
1541 | } else { | |
1542 | if (prot & PROT_WRITE) { | |
1543 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
1544 | #if defined(TARGET_HAS_SMC) || 1 | |
1545 | first_tb || | |
1546 | #endif | |
1547 | ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
1548 | !cpu_physical_memory_is_dirty(pd))) { | |
1549 | /* ROM: we do as if code was inside */ | |
1550 | /* if code is present, we only map as read only and save the | |
1551 | original mapping */ | |
1552 | VirtPageDesc *vp; | |
1553 | ||
1554 | vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); | |
1555 | vp->phys_addr = pd; | |
1556 | vp->prot = prot; | |
1557 | vp->valid_tag = virt_valid_tag; | |
1558 | prot &= ~PAGE_WRITE; | |
1559 | } | |
1560 | } | |
1561 | map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
1562 | MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
1563 | if (map_addr == MAP_FAILED) { | |
1564 | cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
1565 | paddr, vaddr); | |
1566 | } | |
1567 | } | |
1568 | } | |
1569 | } | |
1570 | #endif | |
1571 | return ret; | |
1572 | } | |
1573 | ||
1574 | /* called from signal handler: invalidate the code and unprotect the | |
1575 | page. Return TRUE if the fault was succesfully handled. */ | |
1576 | int page_unprotect(target_ulong addr, unsigned long pc, void *puc) | |
1577 | { | |
1578 | #if !defined(CONFIG_SOFTMMU) | |
1579 | VirtPageDesc *vp; | |
1580 | ||
1581 | #if defined(DEBUG_TLB) | |
1582 | printf("page_unprotect: addr=0x%08x\n", addr); | |
1583 | #endif | |
1584 | addr &= TARGET_PAGE_MASK; | |
1585 | ||
1586 | /* if it is not mapped, no need to worry here */ | |
1587 | if (addr >= MMAP_AREA_END) | |
1588 | return 0; | |
1589 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); | |
1590 | if (!vp) | |
1591 | return 0; | |
1592 | /* NOTE: in this case, validate_tag is _not_ tested as it | |
1593 | validates only the code TLB */ | |
1594 | if (vp->valid_tag != virt_valid_tag) | |
1595 | return 0; | |
1596 | if (!(vp->prot & PAGE_WRITE)) | |
1597 | return 0; | |
1598 | #if defined(DEBUG_TLB) | |
1599 | printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", | |
1600 | addr, vp->phys_addr, vp->prot); | |
1601 | #endif | |
1602 | if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) | |
1603 | cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", | |
1604 | (unsigned long)addr, vp->prot); | |
1605 | /* set the dirty bit */ | |
1606 | phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; | |
1607 | /* flush the code inside */ | |
1608 | tb_invalidate_phys_page(vp->phys_addr, pc, puc); | |
1609 | return 1; | |
1610 | #else | |
1611 | return 0; | |
1612 | #endif | |
1613 | } | |
1614 | ||
1615 | #else | |
1616 | ||
1617 | void tlb_flush(CPUState *env, int flush_global) | |
1618 | { | |
1619 | } | |
1620 | ||
1621 | void tlb_flush_page(CPUState *env, target_ulong addr) | |
1622 | { | |
1623 | } | |
1624 | ||
1625 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, | |
1626 | target_phys_addr_t paddr, int prot, | |
1627 | int is_user, int is_softmmu) | |
1628 | { | |
1629 | return 0; | |
1630 | } | |
1631 | ||
1632 | /* dump memory mappings */ | |
1633 | void page_dump(FILE *f) | |
1634 | { | |
1635 | unsigned long start, end; | |
1636 | int i, j, prot, prot1; | |
1637 | PageDesc *p; | |
1638 | ||
1639 | fprintf(f, "%-8s %-8s %-8s %s\n", | |
1640 | "start", "end", "size", "prot"); | |
1641 | start = -1; | |
1642 | end = -1; | |
1643 | prot = 0; | |
1644 | for(i = 0; i <= L1_SIZE; i++) { | |
1645 | if (i < L1_SIZE) | |
1646 | p = l1_map[i]; | |
1647 | else | |
1648 | p = NULL; | |
1649 | for(j = 0;j < L2_SIZE; j++) { | |
1650 | if (!p) | |
1651 | prot1 = 0; | |
1652 | else | |
1653 | prot1 = p[j].flags; | |
1654 | if (prot1 != prot) { | |
1655 | end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
1656 | if (start != -1) { | |
1657 | fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
1658 | start, end, end - start, | |
1659 | prot & PAGE_READ ? 'r' : '-', | |
1660 | prot & PAGE_WRITE ? 'w' : '-', | |
1661 | prot & PAGE_EXEC ? 'x' : '-'); | |
1662 | } | |
1663 | if (prot1 != 0) | |
1664 | start = end; | |
1665 | else | |
1666 | start = -1; | |
1667 | prot = prot1; | |
1668 | } | |
1669 | if (!p) | |
1670 | break; | |
1671 | } | |
1672 | } | |
1673 | } | |
1674 | ||
1675 | int page_get_flags(target_ulong address) | |
1676 | { | |
1677 | PageDesc *p; | |
1678 | ||
1679 | p = page_find(address >> TARGET_PAGE_BITS); | |
1680 | if (!p) | |
1681 | return 0; | |
1682 | return p->flags; | |
1683 | } | |
1684 | ||
1685 | /* modify the flags of a page and invalidate the code if | |
1686 | necessary. The flag PAGE_WRITE_ORG is positionned automatically | |
1687 | depending on PAGE_WRITE */ | |
1688 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1689 | { | |
1690 | PageDesc *p; | |
1691 | target_ulong addr; | |
1692 | ||
1693 | start = start & TARGET_PAGE_MASK; | |
1694 | end = TARGET_PAGE_ALIGN(end); | |
1695 | if (flags & PAGE_WRITE) | |
1696 | flags |= PAGE_WRITE_ORG; | |
1697 | spin_lock(&tb_lock); | |
1698 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1699 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
1700 | /* if the write protection is set, then we invalidate the code | |
1701 | inside */ | |
1702 | if (!(p->flags & PAGE_WRITE) && | |
1703 | (flags & PAGE_WRITE) && | |
1704 | p->first_tb) { | |
1705 | tb_invalidate_phys_page(addr, 0, NULL); | |
1706 | } | |
1707 | p->flags = flags; | |
1708 | } | |
1709 | spin_unlock(&tb_lock); | |
1710 | } | |
1711 | ||
1712 | /* called from signal handler: invalidate the code and unprotect the | |
1713 | page. Return TRUE if the fault was succesfully handled. */ | |
1714 | int page_unprotect(target_ulong address, unsigned long pc, void *puc) | |
1715 | { | |
1716 | unsigned int page_index, prot, pindex; | |
1717 | PageDesc *p, *p1; | |
1718 | target_ulong host_start, host_end, addr; | |
1719 | ||
1720 | host_start = address & qemu_host_page_mask; | |
1721 | page_index = host_start >> TARGET_PAGE_BITS; | |
1722 | p1 = page_find(page_index); | |
1723 | if (!p1) | |
1724 | return 0; | |
1725 | host_end = host_start + qemu_host_page_size; | |
1726 | p = p1; | |
1727 | prot = 0; | |
1728 | for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { | |
1729 | prot |= p->flags; | |
1730 | p++; | |
1731 | } | |
1732 | /* if the page was really writable, then we change its | |
1733 | protection back to writable */ | |
1734 | if (prot & PAGE_WRITE_ORG) { | |
1735 | pindex = (address - host_start) >> TARGET_PAGE_BITS; | |
1736 | if (!(p1[pindex].flags & PAGE_WRITE)) { | |
1737 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
1738 | (prot & PAGE_BITS) | PAGE_WRITE); | |
1739 | p1[pindex].flags |= PAGE_WRITE; | |
1740 | /* and since the content will be modified, we must invalidate | |
1741 | the corresponding translated code. */ | |
1742 | tb_invalidate_phys_page(address, pc, puc); | |
1743 | #ifdef DEBUG_TB_CHECK | |
1744 | tb_invalidate_check(address); | |
1745 | #endif | |
1746 | return 1; | |
1747 | } | |
1748 | } | |
1749 | return 0; | |
1750 | } | |
1751 | ||
1752 | /* call this function when system calls directly modify a memory area */ | |
1753 | /* ??? This should be redundant now we have lock_user. */ | |
1754 | void page_unprotect_range(target_ulong data, target_ulong data_size) | |
1755 | { | |
1756 | target_ulong start, end, addr; | |
1757 | ||
1758 | start = data; | |
1759 | end = start + data_size; | |
1760 | start &= TARGET_PAGE_MASK; | |
1761 | end = TARGET_PAGE_ALIGN(end); | |
1762 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1763 | page_unprotect(addr, 0, NULL); | |
1764 | } | |
1765 | } | |
1766 | ||
1767 | static inline void tlb_set_dirty(CPUState *env, | |
1768 | unsigned long addr, target_ulong vaddr) | |
1769 | { | |
1770 | } | |
1771 | #endif /* defined(CONFIG_USER_ONLY) */ | |
1772 | ||
1773 | /* register physical memory. 'size' must be a multiple of the target | |
1774 | page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an | |
1775 | io memory page */ | |
1776 | void cpu_register_physical_memory(target_phys_addr_t start_addr, | |
1777 | unsigned long size, | |
1778 | unsigned long phys_offset) | |
1779 | { | |
1780 | target_phys_addr_t addr, end_addr; | |
1781 | PhysPageDesc *p; | |
1782 | ||
1783 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; | |
1784 | end_addr = start_addr + size; | |
1785 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { | |
1786 | p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1787 | p->phys_offset = phys_offset; | |
1788 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) | |
1789 | phys_offset += TARGET_PAGE_SIZE; | |
1790 | } | |
1791 | } | |
1792 | ||
1793 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) | |
1794 | { | |
1795 | return 0; | |
1796 | } | |
1797 | ||
1798 | static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1799 | { | |
1800 | } | |
1801 | ||
1802 | static CPUReadMemoryFunc *unassigned_mem_read[3] = { | |
1803 | unassigned_mem_readb, | |
1804 | unassigned_mem_readb, | |
1805 | unassigned_mem_readb, | |
1806 | }; | |
1807 | ||
1808 | static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | |
1809 | unassigned_mem_writeb, | |
1810 | unassigned_mem_writeb, | |
1811 | unassigned_mem_writeb, | |
1812 | }; | |
1813 | ||
1814 | static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1815 | { | |
1816 | unsigned long ram_addr; | |
1817 | int dirty_flags; | |
1818 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1819 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1820 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
1821 | #if !defined(CONFIG_USER_ONLY) | |
1822 | tb_invalidate_phys_page_fast(ram_addr, 1); | |
1823 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1824 | #endif | |
1825 | } | |
1826 | stb_p((uint8_t *)(long)addr, val); | |
1827 | #ifdef USE_KQEMU | |
1828 | if (cpu_single_env->kqemu_enabled && | |
1829 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1830 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1831 | #endif | |
1832 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); | |
1833 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1834 | /* we remove the notdirty callback only if the code has been | |
1835 | flushed */ | |
1836 | if (dirty_flags == 0xff) | |
1837 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); | |
1838 | } | |
1839 | ||
1840 | static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1841 | { | |
1842 | unsigned long ram_addr; | |
1843 | int dirty_flags; | |
1844 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1845 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1846 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
1847 | #if !defined(CONFIG_USER_ONLY) | |
1848 | tb_invalidate_phys_page_fast(ram_addr, 2); | |
1849 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1850 | #endif | |
1851 | } | |
1852 | stw_p((uint8_t *)(long)addr, val); | |
1853 | #ifdef USE_KQEMU | |
1854 | if (cpu_single_env->kqemu_enabled && | |
1855 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1856 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1857 | #endif | |
1858 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); | |
1859 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1860 | /* we remove the notdirty callback only if the code has been | |
1861 | flushed */ | |
1862 | if (dirty_flags == 0xff) | |
1863 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); | |
1864 | } | |
1865 | ||
1866 | static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1867 | { | |
1868 | unsigned long ram_addr; | |
1869 | int dirty_flags; | |
1870 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1871 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1872 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
1873 | #if !defined(CONFIG_USER_ONLY) | |
1874 | tb_invalidate_phys_page_fast(ram_addr, 4); | |
1875 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1876 | #endif | |
1877 | } | |
1878 | stl_p((uint8_t *)(long)addr, val); | |
1879 | #ifdef USE_KQEMU | |
1880 | if (cpu_single_env->kqemu_enabled && | |
1881 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1882 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1883 | #endif | |
1884 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); | |
1885 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1886 | /* we remove the notdirty callback only if the code has been | |
1887 | flushed */ | |
1888 | if (dirty_flags == 0xff) | |
1889 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); | |
1890 | } | |
1891 | ||
1892 | static CPUReadMemoryFunc *error_mem_read[3] = { | |
1893 | NULL, /* never used */ | |
1894 | NULL, /* never used */ | |
1895 | NULL, /* never used */ | |
1896 | }; | |
1897 | ||
1898 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { | |
1899 | notdirty_mem_writeb, | |
1900 | notdirty_mem_writew, | |
1901 | notdirty_mem_writel, | |
1902 | }; | |
1903 | ||
1904 | static void io_mem_init(void) | |
1905 | { | |
1906 | cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); | |
1907 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); | |
1908 | cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); | |
1909 | io_mem_nb = 5; | |
1910 | ||
1911 | /* alloc dirty bits array */ | |
1912 | phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); | |
1913 | memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); | |
1914 | } | |
1915 | ||
1916 | /* mem_read and mem_write are arrays of functions containing the | |
1917 | function to access byte (index 0), word (index 1) and dword (index | |
1918 | 2). All functions must be supplied. If io_index is non zero, the | |
1919 | corresponding io zone is modified. If it is zero, a new io zone is | |
1920 | allocated. The return value can be used with | |
1921 | cpu_register_physical_memory(). (-1) is returned if error. */ | |
1922 | int cpu_register_io_memory(int io_index, | |
1923 | CPUReadMemoryFunc **mem_read, | |
1924 | CPUWriteMemoryFunc **mem_write, | |
1925 | void *opaque) | |
1926 | { | |
1927 | int i; | |
1928 | ||
1929 | if (io_index <= 0) { | |
1930 | if (io_mem_nb >= IO_MEM_NB_ENTRIES) | |
1931 | return -1; | |
1932 | io_index = io_mem_nb++; | |
1933 | } else { | |
1934 | if (io_index >= IO_MEM_NB_ENTRIES) | |
1935 | return -1; | |
1936 | } | |
1937 | ||
1938 | for(i = 0;i < 3; i++) { | |
1939 | io_mem_read[io_index][i] = mem_read[i]; | |
1940 | io_mem_write[io_index][i] = mem_write[i]; | |
1941 | } | |
1942 | io_mem_opaque[io_index] = opaque; | |
1943 | return io_index << IO_MEM_SHIFT; | |
1944 | } | |
1945 | ||
1946 | CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) | |
1947 | { | |
1948 | return io_mem_write[io_index >> IO_MEM_SHIFT]; | |
1949 | } | |
1950 | ||
1951 | CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) | |
1952 | { | |
1953 | return io_mem_read[io_index >> IO_MEM_SHIFT]; | |
1954 | } | |
1955 | ||
1956 | /* physical memory access (slow version, mainly for debug) */ | |
1957 | #if defined(CONFIG_USER_ONLY) | |
1958 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
1959 | int len, int is_write) | |
1960 | { | |
1961 | int l, flags; | |
1962 | target_ulong page; | |
1963 | void * p; | |
1964 | ||
1965 | while (len > 0) { | |
1966 | page = addr & TARGET_PAGE_MASK; | |
1967 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1968 | if (l > len) | |
1969 | l = len; | |
1970 | flags = page_get_flags(page); | |
1971 | if (!(flags & PAGE_VALID)) | |
1972 | return; | |
1973 | if (is_write) { | |
1974 | if (!(flags & PAGE_WRITE)) | |
1975 | return; | |
1976 | p = lock_user(addr, len, 0); | |
1977 | memcpy(p, buf, len); | |
1978 | unlock_user(p, addr, len); | |
1979 | } else { | |
1980 | if (!(flags & PAGE_READ)) | |
1981 | return; | |
1982 | p = lock_user(addr, len, 1); | |
1983 | memcpy(buf, p, len); | |
1984 | unlock_user(p, addr, 0); | |
1985 | } | |
1986 | len -= l; | |
1987 | buf += l; | |
1988 | addr += l; | |
1989 | } | |
1990 | } | |
1991 | ||
1992 | #else | |
1993 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
1994 | int len, int is_write) | |
1995 | { | |
1996 | int l, io_index; | |
1997 | uint8_t *ptr; | |
1998 | uint32_t val; | |
1999 | target_phys_addr_t page; | |
2000 | unsigned long pd; | |
2001 | PhysPageDesc *p; | |
2002 | ||
2003 | while (len > 0) { | |
2004 | page = addr & TARGET_PAGE_MASK; | |
2005 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2006 | if (l > len) | |
2007 | l = len; | |
2008 | p = phys_page_find(page >> TARGET_PAGE_BITS); | |
2009 | if (!p) { | |
2010 | pd = IO_MEM_UNASSIGNED; | |
2011 | } else { | |
2012 | pd = p->phys_offset; | |
2013 | } | |
2014 | ||
2015 | if (is_write) { | |
2016 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2017 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2018 | /* XXX: could force cpu_single_env to NULL to avoid | |
2019 | potential bugs */ | |
2020 | if (l >= 4 && ((addr & 3) == 0)) { | |
2021 | /* 32 bit write access */ | |
2022 | val = ldl_p(buf); | |
2023 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2024 | l = 4; | |
2025 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
2026 | /* 16 bit write access */ | |
2027 | val = lduw_p(buf); | |
2028 | io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); | |
2029 | l = 2; | |
2030 | } else { | |
2031 | /* 8 bit write access */ | |
2032 | val = ldub_p(buf); | |
2033 | io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); | |
2034 | l = 1; | |
2035 | } | |
2036 | } else { | |
2037 | unsigned long addr1; | |
2038 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2039 | /* RAM case */ | |
2040 | ptr = phys_ram_base + addr1; | |
2041 | memcpy(ptr, buf, l); | |
2042 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2043 | /* invalidate code */ | |
2044 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
2045 | /* set dirty bit */ | |
2046 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | |
2047 | (0xff & ~CODE_DIRTY_FLAG); | |
2048 | } | |
2049 | } | |
2050 | } else { | |
2051 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
2052 | /* I/O case */ | |
2053 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2054 | if (l >= 4 && ((addr & 3) == 0)) { | |
2055 | /* 32 bit read access */ | |
2056 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2057 | stl_p(buf, val); | |
2058 | l = 4; | |
2059 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
2060 | /* 16 bit read access */ | |
2061 | val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); | |
2062 | stw_p(buf, val); | |
2063 | l = 2; | |
2064 | } else { | |
2065 | /* 8 bit read access */ | |
2066 | val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); | |
2067 | stb_p(buf, val); | |
2068 | l = 1; | |
2069 | } | |
2070 | } else { | |
2071 | /* RAM case */ | |
2072 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2073 | (addr & ~TARGET_PAGE_MASK); | |
2074 | memcpy(buf, ptr, l); | |
2075 | } | |
2076 | } | |
2077 | len -= l; | |
2078 | buf += l; | |
2079 | addr += l; | |
2080 | } | |
2081 | } | |
2082 | ||
2083 | /* warning: addr must be aligned */ | |
2084 | uint32_t ldl_phys(target_phys_addr_t addr) | |
2085 | { | |
2086 | int io_index; | |
2087 | uint8_t *ptr; | |
2088 | uint32_t val; | |
2089 | unsigned long pd; | |
2090 | PhysPageDesc *p; | |
2091 | ||
2092 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2093 | if (!p) { | |
2094 | pd = IO_MEM_UNASSIGNED; | |
2095 | } else { | |
2096 | pd = p->phys_offset; | |
2097 | } | |
2098 | ||
2099 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
2100 | /* I/O case */ | |
2101 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2102 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2103 | } else { | |
2104 | /* RAM case */ | |
2105 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2106 | (addr & ~TARGET_PAGE_MASK); | |
2107 | val = ldl_p(ptr); | |
2108 | } | |
2109 | return val; | |
2110 | } | |
2111 | ||
2112 | /* warning: addr must be aligned */ | |
2113 | uint64_t ldq_phys(target_phys_addr_t addr) | |
2114 | { | |
2115 | int io_index; | |
2116 | uint8_t *ptr; | |
2117 | uint64_t val; | |
2118 | unsigned long pd; | |
2119 | PhysPageDesc *p; | |
2120 | ||
2121 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2122 | if (!p) { | |
2123 | pd = IO_MEM_UNASSIGNED; | |
2124 | } else { | |
2125 | pd = p->phys_offset; | |
2126 | } | |
2127 | ||
2128 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
2129 | /* I/O case */ | |
2130 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2131 | #ifdef TARGET_WORDS_BIGENDIAN | |
2132 | val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; | |
2133 | val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); | |
2134 | #else | |
2135 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2136 | val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; | |
2137 | #endif | |
2138 | } else { | |
2139 | /* RAM case */ | |
2140 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2141 | (addr & ~TARGET_PAGE_MASK); | |
2142 | val = ldq_p(ptr); | |
2143 | } | |
2144 | return val; | |
2145 | } | |
2146 | ||
2147 | /* XXX: optimize */ | |
2148 | uint32_t ldub_phys(target_phys_addr_t addr) | |
2149 | { | |
2150 | uint8_t val; | |
2151 | cpu_physical_memory_read(addr, &val, 1); | |
2152 | return val; | |
2153 | } | |
2154 | ||
2155 | /* XXX: optimize */ | |
2156 | uint32_t lduw_phys(target_phys_addr_t addr) | |
2157 | { | |
2158 | uint16_t val; | |
2159 | cpu_physical_memory_read(addr, (uint8_t *)&val, 2); | |
2160 | return tswap16(val); | |
2161 | } | |
2162 | ||
2163 | /* warning: addr must be aligned. The ram page is not masked as dirty | |
2164 | and the code inside is not invalidated. It is useful if the dirty | |
2165 | bits are used to track modified PTEs */ | |
2166 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | |
2167 | { | |
2168 | int io_index; | |
2169 | uint8_t *ptr; | |
2170 | unsigned long pd; | |
2171 | PhysPageDesc *p; | |
2172 | ||
2173 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2174 | if (!p) { | |
2175 | pd = IO_MEM_UNASSIGNED; | |
2176 | } else { | |
2177 | pd = p->phys_offset; | |
2178 | } | |
2179 | ||
2180 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2181 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2182 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2183 | } else { | |
2184 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2185 | (addr & ~TARGET_PAGE_MASK); | |
2186 | stl_p(ptr, val); | |
2187 | } | |
2188 | } | |
2189 | ||
2190 | /* warning: addr must be aligned */ | |
2191 | void stl_phys(target_phys_addr_t addr, uint32_t val) | |
2192 | { | |
2193 | int io_index; | |
2194 | uint8_t *ptr; | |
2195 | unsigned long pd; | |
2196 | PhysPageDesc *p; | |
2197 | ||
2198 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2199 | if (!p) { | |
2200 | pd = IO_MEM_UNASSIGNED; | |
2201 | } else { | |
2202 | pd = p->phys_offset; | |
2203 | } | |
2204 | ||
2205 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2206 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2207 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2208 | } else { | |
2209 | unsigned long addr1; | |
2210 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2211 | /* RAM case */ | |
2212 | ptr = phys_ram_base + addr1; | |
2213 | stl_p(ptr, val); | |
2214 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2215 | /* invalidate code */ | |
2216 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2217 | /* set dirty bit */ | |
2218 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | |
2219 | (0xff & ~CODE_DIRTY_FLAG); | |
2220 | } | |
2221 | } | |
2222 | } | |
2223 | ||
2224 | /* XXX: optimize */ | |
2225 | void stb_phys(target_phys_addr_t addr, uint32_t val) | |
2226 | { | |
2227 | uint8_t v = val; | |
2228 | cpu_physical_memory_write(addr, &v, 1); | |
2229 | } | |
2230 | ||
2231 | /* XXX: optimize */ | |
2232 | void stw_phys(target_phys_addr_t addr, uint32_t val) | |
2233 | { | |
2234 | uint16_t v = tswap16(val); | |
2235 | cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); | |
2236 | } | |
2237 | ||
2238 | /* XXX: optimize */ | |
2239 | void stq_phys(target_phys_addr_t addr, uint64_t val) | |
2240 | { | |
2241 | val = tswap64(val); | |
2242 | cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); | |
2243 | } | |
2244 | ||
2245 | #endif | |
2246 | ||
2247 | /* virtual memory access for debug */ | |
2248 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, | |
2249 | uint8_t *buf, int len, int is_write) | |
2250 | { | |
2251 | int l; | |
2252 | target_ulong page, phys_addr; | |
2253 | ||
2254 | while (len > 0) { | |
2255 | page = addr & TARGET_PAGE_MASK; | |
2256 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2257 | /* if no physical page mapped, return an error */ | |
2258 | if (phys_addr == -1) | |
2259 | return -1; | |
2260 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2261 | if (l > len) | |
2262 | l = len; | |
2263 | cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), | |
2264 | buf, l, is_write); | |
2265 | len -= l; | |
2266 | buf += l; | |
2267 | addr += l; | |
2268 | } | |
2269 | return 0; | |
2270 | } | |
2271 | ||
2272 | void dump_exec_info(FILE *f, | |
2273 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) | |
2274 | { | |
2275 | int i, target_code_size, max_target_code_size; | |
2276 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
2277 | TranslationBlock *tb; | |
2278 | ||
2279 | target_code_size = 0; | |
2280 | max_target_code_size = 0; | |
2281 | cross_page = 0; | |
2282 | direct_jmp_count = 0; | |
2283 | direct_jmp2_count = 0; | |
2284 | for(i = 0; i < nb_tbs; i++) { | |
2285 | tb = &tbs[i]; | |
2286 | target_code_size += tb->size; | |
2287 | if (tb->size > max_target_code_size) | |
2288 | max_target_code_size = tb->size; | |
2289 | if (tb->page_addr[1] != -1) | |
2290 | cross_page++; | |
2291 | if (tb->tb_next_offset[0] != 0xffff) { | |
2292 | direct_jmp_count++; | |
2293 | if (tb->tb_next_offset[1] != 0xffff) { | |
2294 | direct_jmp2_count++; | |
2295 | } | |
2296 | } | |
2297 | } | |
2298 | /* XXX: avoid using doubles ? */ | |
2299 | cpu_fprintf(f, "TB count %d\n", nb_tbs); | |
2300 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", | |
2301 | nb_tbs ? target_code_size / nb_tbs : 0, | |
2302 | max_target_code_size); | |
2303 | cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", | |
2304 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, | |
2305 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); | |
2306 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", | |
2307 | cross_page, | |
2308 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); | |
2309 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
2310 | direct_jmp_count, | |
2311 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, | |
2312 | direct_jmp2_count, | |
2313 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
2314 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); | |
2315 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
2316 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
2317 | } | |
2318 | ||
2319 | #if !defined(CONFIG_USER_ONLY) | |
2320 | ||
2321 | #define MMUSUFFIX _cmmu | |
2322 | #define GETPC() NULL | |
2323 | #define env cpu_single_env | |
2324 | #define SOFTMMU_CODE_ACCESS | |
2325 | ||
2326 | #define SHIFT 0 | |
2327 | #include "softmmu_template.h" | |
2328 | ||
2329 | #define SHIFT 1 | |
2330 | #include "softmmu_template.h" | |
2331 | ||
2332 | #define SHIFT 2 | |
2333 | #include "softmmu_template.h" | |
2334 | ||
2335 | #define SHIFT 3 | |
2336 | #include "softmmu_template.h" | |
2337 | ||
2338 | #undef env | |
2339 | ||
2340 | #endif |