]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * virtual page mapping and translated block handling | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | */ | |
20 | #include "config.h" | |
21 | #ifdef _WIN32 | |
22 | #include <windows.h> | |
23 | #else | |
24 | #include <sys/types.h> | |
25 | #include <sys/mman.h> | |
26 | #endif | |
27 | #include <stdlib.h> | |
28 | #include <stdio.h> | |
29 | #include <stdarg.h> | |
30 | #include <string.h> | |
31 | #include <errno.h> | |
32 | #include <unistd.h> | |
33 | #include <inttypes.h> | |
34 | ||
35 | #include "cpu.h" | |
36 | #include "exec-all.h" | |
37 | #if defined(CONFIG_USER_ONLY) | |
38 | #include <qemu.h> | |
39 | #endif | |
40 | ||
41 | //#define DEBUG_TB_INVALIDATE | |
42 | //#define DEBUG_FLUSH | |
43 | //#define DEBUG_TLB | |
44 | ||
45 | /* make various TB consistency checks */ | |
46 | //#define DEBUG_TB_CHECK | |
47 | //#define DEBUG_TLB_CHECK | |
48 | ||
49 | /* threshold to flush the translated code buffer */ | |
50 | #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE) | |
51 | ||
52 | #define SMC_BITMAP_USE_THRESHOLD 10 | |
53 | ||
54 | #define MMAP_AREA_START 0x00000000 | |
55 | #define MMAP_AREA_END 0xa8000000 | |
56 | ||
57 | #if defined(TARGET_SPARC64) | |
58 | #define TARGET_PHYS_ADDR_SPACE_BITS 41 | |
59 | #elif defined(TARGET_PPC64) | |
60 | #define TARGET_PHYS_ADDR_SPACE_BITS 42 | |
61 | #else | |
62 | /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */ | |
63 | #define TARGET_PHYS_ADDR_SPACE_BITS 32 | |
64 | #endif | |
65 | ||
66 | TranslationBlock tbs[CODE_GEN_MAX_BLOCKS]; | |
67 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; | |
68 | int nb_tbs; | |
69 | /* any access to the tbs or the page table must use this lock */ | |
70 | spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; | |
71 | ||
72 | uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32))); | |
73 | uint8_t *code_gen_ptr; | |
74 | ||
75 | int phys_ram_size; | |
76 | int phys_ram_fd; | |
77 | uint8_t *phys_ram_base; | |
78 | uint8_t *phys_ram_dirty; | |
79 | ||
80 | CPUState *first_cpu; | |
81 | /* current CPU in the current thread. It is only valid inside | |
82 | cpu_exec() */ | |
83 | CPUState *cpu_single_env; | |
84 | ||
85 | typedef struct PageDesc { | |
86 | /* list of TBs intersecting this ram page */ | |
87 | TranslationBlock *first_tb; | |
88 | /* in order to optimize self modifying code, we count the number | |
89 | of lookups we do to a given page to use a bitmap */ | |
90 | unsigned int code_write_count; | |
91 | uint8_t *code_bitmap; | |
92 | #if defined(CONFIG_USER_ONLY) | |
93 | unsigned long flags; | |
94 | #endif | |
95 | } PageDesc; | |
96 | ||
97 | typedef struct PhysPageDesc { | |
98 | /* offset in host memory of the page + io_index in the low 12 bits */ | |
99 | uint32_t phys_offset; | |
100 | } PhysPageDesc; | |
101 | ||
102 | #define L2_BITS 10 | |
103 | #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) | |
104 | ||
105 | #define L1_SIZE (1 << L1_BITS) | |
106 | #define L2_SIZE (1 << L2_BITS) | |
107 | ||
108 | static void io_mem_init(void); | |
109 | ||
110 | unsigned long qemu_real_host_page_size; | |
111 | unsigned long qemu_host_page_bits; | |
112 | unsigned long qemu_host_page_size; | |
113 | unsigned long qemu_host_page_mask; | |
114 | ||
115 | /* XXX: for system emulation, it could just be an array */ | |
116 | static PageDesc *l1_map[L1_SIZE]; | |
117 | PhysPageDesc **l1_phys_map; | |
118 | ||
119 | /* io memory support */ | |
120 | CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; | |
121 | CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; | |
122 | void *io_mem_opaque[IO_MEM_NB_ENTRIES]; | |
123 | static int io_mem_nb; | |
124 | ||
125 | /* log support */ | |
126 | char *logfilename = "/tmp/qemu.log"; | |
127 | FILE *logfile; | |
128 | int loglevel; | |
129 | ||
130 | /* statistics */ | |
131 | static int tlb_flush_count; | |
132 | static int tb_flush_count; | |
133 | static int tb_phys_invalidate_count; | |
134 | ||
135 | static void page_init(void) | |
136 | { | |
137 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
138 | TARGET_PAGE_SIZE */ | |
139 | #ifdef _WIN32 | |
140 | { | |
141 | SYSTEM_INFO system_info; | |
142 | DWORD old_protect; | |
143 | ||
144 | GetSystemInfo(&system_info); | |
145 | qemu_real_host_page_size = system_info.dwPageSize; | |
146 | ||
147 | VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer), | |
148 | PAGE_EXECUTE_READWRITE, &old_protect); | |
149 | } | |
150 | #else | |
151 | qemu_real_host_page_size = getpagesize(); | |
152 | { | |
153 | unsigned long start, end; | |
154 | ||
155 | start = (unsigned long)code_gen_buffer; | |
156 | start &= ~(qemu_real_host_page_size - 1); | |
157 | ||
158 | end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer); | |
159 | end += qemu_real_host_page_size - 1; | |
160 | end &= ~(qemu_real_host_page_size - 1); | |
161 | ||
162 | mprotect((void *)start, end - start, | |
163 | PROT_READ | PROT_WRITE | PROT_EXEC); | |
164 | } | |
165 | #endif | |
166 | ||
167 | if (qemu_host_page_size == 0) | |
168 | qemu_host_page_size = qemu_real_host_page_size; | |
169 | if (qemu_host_page_size < TARGET_PAGE_SIZE) | |
170 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
171 | qemu_host_page_bits = 0; | |
172 | while ((1 << qemu_host_page_bits) < qemu_host_page_size) | |
173 | qemu_host_page_bits++; | |
174 | qemu_host_page_mask = ~(qemu_host_page_size - 1); | |
175 | l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); | |
176 | memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); | |
177 | } | |
178 | ||
179 | static inline PageDesc *page_find_alloc(unsigned int index) | |
180 | { | |
181 | PageDesc **lp, *p; | |
182 | ||
183 | lp = &l1_map[index >> L2_BITS]; | |
184 | p = *lp; | |
185 | if (!p) { | |
186 | /* allocate if not found */ | |
187 | p = qemu_malloc(sizeof(PageDesc) * L2_SIZE); | |
188 | memset(p, 0, sizeof(PageDesc) * L2_SIZE); | |
189 | *lp = p; | |
190 | } | |
191 | return p + (index & (L2_SIZE - 1)); | |
192 | } | |
193 | ||
194 | static inline PageDesc *page_find(unsigned int index) | |
195 | { | |
196 | PageDesc *p; | |
197 | ||
198 | p = l1_map[index >> L2_BITS]; | |
199 | if (!p) | |
200 | return 0; | |
201 | return p + (index & (L2_SIZE - 1)); | |
202 | } | |
203 | ||
204 | static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) | |
205 | { | |
206 | void **lp, **p; | |
207 | ||
208 | p = (void **)l1_phys_map; | |
209 | #if TARGET_PHYS_ADDR_SPACE_BITS > 32 | |
210 | ||
211 | #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) | |
212 | #error unsupported TARGET_PHYS_ADDR_SPACE_BITS | |
213 | #endif | |
214 | lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); | |
215 | p = *lp; | |
216 | if (!p) { | |
217 | /* allocate if not found */ | |
218 | if (!alloc) | |
219 | return NULL; | |
220 | p = qemu_vmalloc(sizeof(void *) * L1_SIZE); | |
221 | memset(p, 0, sizeof(void *) * L1_SIZE); | |
222 | *lp = p; | |
223 | } | |
224 | #endif | |
225 | lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); | |
226 | p = *lp; | |
227 | if (!p) { | |
228 | /* allocate if not found */ | |
229 | if (!alloc) | |
230 | return NULL; | |
231 | p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); | |
232 | memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); | |
233 | *lp = p; | |
234 | } | |
235 | return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1)); | |
236 | } | |
237 | ||
238 | static inline PhysPageDesc *phys_page_find(target_phys_addr_t index) | |
239 | { | |
240 | return phys_page_find_alloc(index, 0); | |
241 | } | |
242 | ||
243 | #if !defined(CONFIG_USER_ONLY) | |
244 | static void tlb_protect_code(ram_addr_t ram_addr); | |
245 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
246 | target_ulong vaddr); | |
247 | #endif | |
248 | ||
249 | void cpu_exec_init(CPUState *env) | |
250 | { | |
251 | CPUState **penv; | |
252 | int cpu_index; | |
253 | ||
254 | if (!code_gen_ptr) { | |
255 | code_gen_ptr = code_gen_buffer; | |
256 | page_init(); | |
257 | io_mem_init(); | |
258 | } | |
259 | env->next_cpu = NULL; | |
260 | penv = &first_cpu; | |
261 | cpu_index = 0; | |
262 | while (*penv != NULL) { | |
263 | penv = (CPUState **)&(*penv)->next_cpu; | |
264 | cpu_index++; | |
265 | } | |
266 | env->cpu_index = cpu_index; | |
267 | *penv = env; | |
268 | } | |
269 | ||
270 | static inline void invalidate_page_bitmap(PageDesc *p) | |
271 | { | |
272 | if (p->code_bitmap) { | |
273 | qemu_free(p->code_bitmap); | |
274 | p->code_bitmap = NULL; | |
275 | } | |
276 | p->code_write_count = 0; | |
277 | } | |
278 | ||
279 | /* set to NULL all the 'first_tb' fields in all PageDescs */ | |
280 | static void page_flush_tb(void) | |
281 | { | |
282 | int i, j; | |
283 | PageDesc *p; | |
284 | ||
285 | for(i = 0; i < L1_SIZE; i++) { | |
286 | p = l1_map[i]; | |
287 | if (p) { | |
288 | for(j = 0; j < L2_SIZE; j++) { | |
289 | p->first_tb = NULL; | |
290 | invalidate_page_bitmap(p); | |
291 | p++; | |
292 | } | |
293 | } | |
294 | } | |
295 | } | |
296 | ||
297 | /* flush all the translation blocks */ | |
298 | /* XXX: tb_flush is currently not thread safe */ | |
299 | void tb_flush(CPUState *env1) | |
300 | { | |
301 | CPUState *env; | |
302 | #if defined(DEBUG_FLUSH) | |
303 | printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", | |
304 | code_gen_ptr - code_gen_buffer, | |
305 | nb_tbs, | |
306 | nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0); | |
307 | #endif | |
308 | nb_tbs = 0; | |
309 | ||
310 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
311 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
312 | } | |
313 | ||
314 | memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); | |
315 | page_flush_tb(); | |
316 | ||
317 | code_gen_ptr = code_gen_buffer; | |
318 | /* XXX: flush processor icache at this point if cache flush is | |
319 | expensive */ | |
320 | tb_flush_count++; | |
321 | } | |
322 | ||
323 | #ifdef DEBUG_TB_CHECK | |
324 | ||
325 | static void tb_invalidate_check(unsigned long address) | |
326 | { | |
327 | TranslationBlock *tb; | |
328 | int i; | |
329 | address &= TARGET_PAGE_MASK; | |
330 | for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { | |
331 | for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { | |
332 | if (!(address + TARGET_PAGE_SIZE <= tb->pc || | |
333 | address >= tb->pc + tb->size)) { | |
334 | printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n", | |
335 | address, tb->pc, tb->size); | |
336 | } | |
337 | } | |
338 | } | |
339 | } | |
340 | ||
341 | /* verify that all the pages have correct rights for code */ | |
342 | static void tb_page_check(void) | |
343 | { | |
344 | TranslationBlock *tb; | |
345 | int i, flags1, flags2; | |
346 | ||
347 | for(i = 0;i < CODE_GEN_HASH_SIZE; i++) { | |
348 | for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) { | |
349 | flags1 = page_get_flags(tb->pc); | |
350 | flags2 = page_get_flags(tb->pc + tb->size - 1); | |
351 | if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { | |
352 | printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", | |
353 | tb->pc, tb->size, flags1, flags2); | |
354 | } | |
355 | } | |
356 | } | |
357 | } | |
358 | ||
359 | void tb_jmp_check(TranslationBlock *tb) | |
360 | { | |
361 | TranslationBlock *tb1; | |
362 | unsigned int n1; | |
363 | ||
364 | /* suppress any remaining jumps to this TB */ | |
365 | tb1 = tb->jmp_first; | |
366 | for(;;) { | |
367 | n1 = (long)tb1 & 3; | |
368 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
369 | if (n1 == 2) | |
370 | break; | |
371 | tb1 = tb1->jmp_next[n1]; | |
372 | } | |
373 | /* check end of list */ | |
374 | if (tb1 != tb) { | |
375 | printf("ERROR: jmp_list from 0x%08lx\n", (long)tb); | |
376 | } | |
377 | } | |
378 | ||
379 | #endif | |
380 | ||
381 | /* invalidate one TB */ | |
382 | static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, | |
383 | int next_offset) | |
384 | { | |
385 | TranslationBlock *tb1; | |
386 | for(;;) { | |
387 | tb1 = *ptb; | |
388 | if (tb1 == tb) { | |
389 | *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); | |
390 | break; | |
391 | } | |
392 | ptb = (TranslationBlock **)((char *)tb1 + next_offset); | |
393 | } | |
394 | } | |
395 | ||
396 | static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) | |
397 | { | |
398 | TranslationBlock *tb1; | |
399 | unsigned int n1; | |
400 | ||
401 | for(;;) { | |
402 | tb1 = *ptb; | |
403 | n1 = (long)tb1 & 3; | |
404 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
405 | if (tb1 == tb) { | |
406 | *ptb = tb1->page_next[n1]; | |
407 | break; | |
408 | } | |
409 | ptb = &tb1->page_next[n1]; | |
410 | } | |
411 | } | |
412 | ||
413 | static inline void tb_jmp_remove(TranslationBlock *tb, int n) | |
414 | { | |
415 | TranslationBlock *tb1, **ptb; | |
416 | unsigned int n1; | |
417 | ||
418 | ptb = &tb->jmp_next[n]; | |
419 | tb1 = *ptb; | |
420 | if (tb1) { | |
421 | /* find tb(n) in circular list */ | |
422 | for(;;) { | |
423 | tb1 = *ptb; | |
424 | n1 = (long)tb1 & 3; | |
425 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
426 | if (n1 == n && tb1 == tb) | |
427 | break; | |
428 | if (n1 == 2) { | |
429 | ptb = &tb1->jmp_first; | |
430 | } else { | |
431 | ptb = &tb1->jmp_next[n1]; | |
432 | } | |
433 | } | |
434 | /* now we can suppress tb(n) from the list */ | |
435 | *ptb = tb->jmp_next[n]; | |
436 | ||
437 | tb->jmp_next[n] = NULL; | |
438 | } | |
439 | } | |
440 | ||
441 | /* reset the jump entry 'n' of a TB so that it is not chained to | |
442 | another TB */ | |
443 | static inline void tb_reset_jump(TranslationBlock *tb, int n) | |
444 | { | |
445 | tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n])); | |
446 | } | |
447 | ||
448 | static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr) | |
449 | { | |
450 | CPUState *env; | |
451 | PageDesc *p; | |
452 | unsigned int h, n1; | |
453 | target_ulong phys_pc; | |
454 | TranslationBlock *tb1, *tb2; | |
455 | ||
456 | /* remove the TB from the hash list */ | |
457 | phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
458 | h = tb_phys_hash_func(phys_pc); | |
459 | tb_remove(&tb_phys_hash[h], tb, | |
460 | offsetof(TranslationBlock, phys_hash_next)); | |
461 | ||
462 | /* remove the TB from the page list */ | |
463 | if (tb->page_addr[0] != page_addr) { | |
464 | p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); | |
465 | tb_page_remove(&p->first_tb, tb); | |
466 | invalidate_page_bitmap(p); | |
467 | } | |
468 | if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { | |
469 | p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); | |
470 | tb_page_remove(&p->first_tb, tb); | |
471 | invalidate_page_bitmap(p); | |
472 | } | |
473 | ||
474 | tb_invalidated_flag = 1; | |
475 | ||
476 | /* remove the TB from the hash list */ | |
477 | h = tb_jmp_cache_hash_func(tb->pc); | |
478 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
479 | if (env->tb_jmp_cache[h] == tb) | |
480 | env->tb_jmp_cache[h] = NULL; | |
481 | } | |
482 | ||
483 | /* suppress this TB from the two jump lists */ | |
484 | tb_jmp_remove(tb, 0); | |
485 | tb_jmp_remove(tb, 1); | |
486 | ||
487 | /* suppress any remaining jumps to this TB */ | |
488 | tb1 = tb->jmp_first; | |
489 | for(;;) { | |
490 | n1 = (long)tb1 & 3; | |
491 | if (n1 == 2) | |
492 | break; | |
493 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
494 | tb2 = tb1->jmp_next[n1]; | |
495 | tb_reset_jump(tb1, n1); | |
496 | tb1->jmp_next[n1] = NULL; | |
497 | tb1 = tb2; | |
498 | } | |
499 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */ | |
500 | ||
501 | tb_phys_invalidate_count++; | |
502 | } | |
503 | ||
504 | static inline void set_bits(uint8_t *tab, int start, int len) | |
505 | { | |
506 | int end, mask, end1; | |
507 | ||
508 | end = start + len; | |
509 | tab += start >> 3; | |
510 | mask = 0xff << (start & 7); | |
511 | if ((start & ~7) == (end & ~7)) { | |
512 | if (start < end) { | |
513 | mask &= ~(0xff << (end & 7)); | |
514 | *tab |= mask; | |
515 | } | |
516 | } else { | |
517 | *tab++ |= mask; | |
518 | start = (start + 8) & ~7; | |
519 | end1 = end & ~7; | |
520 | while (start < end1) { | |
521 | *tab++ = 0xff; | |
522 | start += 8; | |
523 | } | |
524 | if (start < end) { | |
525 | mask = ~(0xff << (end & 7)); | |
526 | *tab |= mask; | |
527 | } | |
528 | } | |
529 | } | |
530 | ||
531 | static void build_page_bitmap(PageDesc *p) | |
532 | { | |
533 | int n, tb_start, tb_end; | |
534 | TranslationBlock *tb; | |
535 | ||
536 | p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8); | |
537 | if (!p->code_bitmap) | |
538 | return; | |
539 | memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8); | |
540 | ||
541 | tb = p->first_tb; | |
542 | while (tb != NULL) { | |
543 | n = (long)tb & 3; | |
544 | tb = (TranslationBlock *)((long)tb & ~3); | |
545 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
546 | if (n == 0) { | |
547 | /* NOTE: tb_end may be after the end of the page, but | |
548 | it is not a problem */ | |
549 | tb_start = tb->pc & ~TARGET_PAGE_MASK; | |
550 | tb_end = tb_start + tb->size; | |
551 | if (tb_end > TARGET_PAGE_SIZE) | |
552 | tb_end = TARGET_PAGE_SIZE; | |
553 | } else { | |
554 | tb_start = 0; | |
555 | tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
556 | } | |
557 | set_bits(p->code_bitmap, tb_start, tb_end - tb_start); | |
558 | tb = tb->page_next[n]; | |
559 | } | |
560 | } | |
561 | ||
562 | #ifdef TARGET_HAS_PRECISE_SMC | |
563 | ||
564 | static void tb_gen_code(CPUState *env, | |
565 | target_ulong pc, target_ulong cs_base, int flags, | |
566 | int cflags) | |
567 | { | |
568 | TranslationBlock *tb; | |
569 | uint8_t *tc_ptr; | |
570 | target_ulong phys_pc, phys_page2, virt_page2; | |
571 | int code_gen_size; | |
572 | ||
573 | phys_pc = get_phys_addr_code(env, pc); | |
574 | tb = tb_alloc(pc); | |
575 | if (!tb) { | |
576 | /* flush must be done */ | |
577 | tb_flush(env); | |
578 | /* cannot fail at this point */ | |
579 | tb = tb_alloc(pc); | |
580 | } | |
581 | tc_ptr = code_gen_ptr; | |
582 | tb->tc_ptr = tc_ptr; | |
583 | tb->cs_base = cs_base; | |
584 | tb->flags = flags; | |
585 | tb->cflags = cflags; | |
586 | cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size); | |
587 | code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); | |
588 | ||
589 | /* check next page if needed */ | |
590 | virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; | |
591 | phys_page2 = -1; | |
592 | if ((pc & TARGET_PAGE_MASK) != virt_page2) { | |
593 | phys_page2 = get_phys_addr_code(env, virt_page2); | |
594 | } | |
595 | tb_link_phys(tb, phys_pc, phys_page2); | |
596 | } | |
597 | #endif | |
598 | ||
599 | /* invalidate all TBs which intersect with the target physical page | |
600 | starting in range [start;end[. NOTE: start and end must refer to | |
601 | the same physical page. 'is_cpu_write_access' should be true if called | |
602 | from a real cpu write access: the virtual CPU will exit the current | |
603 | TB if code is modified inside this TB. */ | |
604 | void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, | |
605 | int is_cpu_write_access) | |
606 | { | |
607 | int n, current_tb_modified, current_tb_not_found, current_flags; | |
608 | CPUState *env = cpu_single_env; | |
609 | PageDesc *p; | |
610 | TranslationBlock *tb, *tb_next, *current_tb, *saved_tb; | |
611 | target_ulong tb_start, tb_end; | |
612 | target_ulong current_pc, current_cs_base; | |
613 | ||
614 | p = page_find(start >> TARGET_PAGE_BITS); | |
615 | if (!p) | |
616 | return; | |
617 | if (!p->code_bitmap && | |
618 | ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && | |
619 | is_cpu_write_access) { | |
620 | /* build code bitmap */ | |
621 | build_page_bitmap(p); | |
622 | } | |
623 | ||
624 | /* we remove all the TBs in the range [start, end[ */ | |
625 | /* XXX: see if in some cases it could be faster to invalidate all the code */ | |
626 | current_tb_not_found = is_cpu_write_access; | |
627 | current_tb_modified = 0; | |
628 | current_tb = NULL; /* avoid warning */ | |
629 | current_pc = 0; /* avoid warning */ | |
630 | current_cs_base = 0; /* avoid warning */ | |
631 | current_flags = 0; /* avoid warning */ | |
632 | tb = p->first_tb; | |
633 | while (tb != NULL) { | |
634 | n = (long)tb & 3; | |
635 | tb = (TranslationBlock *)((long)tb & ~3); | |
636 | tb_next = tb->page_next[n]; | |
637 | /* NOTE: this is subtle as a TB may span two physical pages */ | |
638 | if (n == 0) { | |
639 | /* NOTE: tb_end may be after the end of the page, but | |
640 | it is not a problem */ | |
641 | tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); | |
642 | tb_end = tb_start + tb->size; | |
643 | } else { | |
644 | tb_start = tb->page_addr[1]; | |
645 | tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); | |
646 | } | |
647 | if (!(tb_end <= start || tb_start >= end)) { | |
648 | #ifdef TARGET_HAS_PRECISE_SMC | |
649 | if (current_tb_not_found) { | |
650 | current_tb_not_found = 0; | |
651 | current_tb = NULL; | |
652 | if (env->mem_write_pc) { | |
653 | /* now we have a real cpu fault */ | |
654 | current_tb = tb_find_pc(env->mem_write_pc); | |
655 | } | |
656 | } | |
657 | if (current_tb == tb && | |
658 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
659 | /* If we are modifying the current TB, we must stop | |
660 | its execution. We could be more precise by checking | |
661 | that the modification is after the current PC, but it | |
662 | would require a specialized function to partially | |
663 | restore the CPU state */ | |
664 | ||
665 | current_tb_modified = 1; | |
666 | cpu_restore_state(current_tb, env, | |
667 | env->mem_write_pc, NULL); | |
668 | #if defined(TARGET_I386) | |
669 | current_flags = env->hflags; | |
670 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
671 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
672 | current_pc = current_cs_base + env->eip; | |
673 | #else | |
674 | #error unsupported CPU | |
675 | #endif | |
676 | } | |
677 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
678 | /* we need to do that to handle the case where a signal | |
679 | occurs while doing tb_phys_invalidate() */ | |
680 | saved_tb = NULL; | |
681 | if (env) { | |
682 | saved_tb = env->current_tb; | |
683 | env->current_tb = NULL; | |
684 | } | |
685 | tb_phys_invalidate(tb, -1); | |
686 | if (env) { | |
687 | env->current_tb = saved_tb; | |
688 | if (env->interrupt_request && env->current_tb) | |
689 | cpu_interrupt(env, env->interrupt_request); | |
690 | } | |
691 | } | |
692 | tb = tb_next; | |
693 | } | |
694 | #if !defined(CONFIG_USER_ONLY) | |
695 | /* if no code remaining, no need to continue to use slow writes */ | |
696 | if (!p->first_tb) { | |
697 | invalidate_page_bitmap(p); | |
698 | if (is_cpu_write_access) { | |
699 | tlb_unprotect_code_phys(env, start, env->mem_write_vaddr); | |
700 | } | |
701 | } | |
702 | #endif | |
703 | #ifdef TARGET_HAS_PRECISE_SMC | |
704 | if (current_tb_modified) { | |
705 | /* we generate a block containing just the instruction | |
706 | modifying the memory. It will ensure that it cannot modify | |
707 | itself */ | |
708 | env->current_tb = NULL; | |
709 | tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
710 | CF_SINGLE_INSN); | |
711 | cpu_resume_from_signal(env, NULL); | |
712 | } | |
713 | #endif | |
714 | } | |
715 | ||
716 | /* len must be <= 8 and start must be a multiple of len */ | |
717 | static inline void tb_invalidate_phys_page_fast(target_ulong start, int len) | |
718 | { | |
719 | PageDesc *p; | |
720 | int offset, b; | |
721 | #if 0 | |
722 | if (1) { | |
723 | if (loglevel) { | |
724 | fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", | |
725 | cpu_single_env->mem_write_vaddr, len, | |
726 | cpu_single_env->eip, | |
727 | cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base); | |
728 | } | |
729 | } | |
730 | #endif | |
731 | p = page_find(start >> TARGET_PAGE_BITS); | |
732 | if (!p) | |
733 | return; | |
734 | if (p->code_bitmap) { | |
735 | offset = start & ~TARGET_PAGE_MASK; | |
736 | b = p->code_bitmap[offset >> 3] >> (offset & 7); | |
737 | if (b & ((1 << len) - 1)) | |
738 | goto do_invalidate; | |
739 | } else { | |
740 | do_invalidate: | |
741 | tb_invalidate_phys_page_range(start, start + len, 1); | |
742 | } | |
743 | } | |
744 | ||
745 | #if !defined(CONFIG_SOFTMMU) | |
746 | static void tb_invalidate_phys_page(target_ulong addr, | |
747 | unsigned long pc, void *puc) | |
748 | { | |
749 | int n, current_flags, current_tb_modified; | |
750 | target_ulong current_pc, current_cs_base; | |
751 | PageDesc *p; | |
752 | TranslationBlock *tb, *current_tb; | |
753 | #ifdef TARGET_HAS_PRECISE_SMC | |
754 | CPUState *env = cpu_single_env; | |
755 | #endif | |
756 | ||
757 | addr &= TARGET_PAGE_MASK; | |
758 | p = page_find(addr >> TARGET_PAGE_BITS); | |
759 | if (!p) | |
760 | return; | |
761 | tb = p->first_tb; | |
762 | current_tb_modified = 0; | |
763 | current_tb = NULL; | |
764 | current_pc = 0; /* avoid warning */ | |
765 | current_cs_base = 0; /* avoid warning */ | |
766 | current_flags = 0; /* avoid warning */ | |
767 | #ifdef TARGET_HAS_PRECISE_SMC | |
768 | if (tb && pc != 0) { | |
769 | current_tb = tb_find_pc(pc); | |
770 | } | |
771 | #endif | |
772 | while (tb != NULL) { | |
773 | n = (long)tb & 3; | |
774 | tb = (TranslationBlock *)((long)tb & ~3); | |
775 | #ifdef TARGET_HAS_PRECISE_SMC | |
776 | if (current_tb == tb && | |
777 | !(current_tb->cflags & CF_SINGLE_INSN)) { | |
778 | /* If we are modifying the current TB, we must stop | |
779 | its execution. We could be more precise by checking | |
780 | that the modification is after the current PC, but it | |
781 | would require a specialized function to partially | |
782 | restore the CPU state */ | |
783 | ||
784 | current_tb_modified = 1; | |
785 | cpu_restore_state(current_tb, env, pc, puc); | |
786 | #if defined(TARGET_I386) | |
787 | current_flags = env->hflags; | |
788 | current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); | |
789 | current_cs_base = (target_ulong)env->segs[R_CS].base; | |
790 | current_pc = current_cs_base + env->eip; | |
791 | #else | |
792 | #error unsupported CPU | |
793 | #endif | |
794 | } | |
795 | #endif /* TARGET_HAS_PRECISE_SMC */ | |
796 | tb_phys_invalidate(tb, addr); | |
797 | tb = tb->page_next[n]; | |
798 | } | |
799 | p->first_tb = NULL; | |
800 | #ifdef TARGET_HAS_PRECISE_SMC | |
801 | if (current_tb_modified) { | |
802 | /* we generate a block containing just the instruction | |
803 | modifying the memory. It will ensure that it cannot modify | |
804 | itself */ | |
805 | env->current_tb = NULL; | |
806 | tb_gen_code(env, current_pc, current_cs_base, current_flags, | |
807 | CF_SINGLE_INSN); | |
808 | cpu_resume_from_signal(env, puc); | |
809 | } | |
810 | #endif | |
811 | } | |
812 | #endif | |
813 | ||
814 | /* add the tb in the target page and protect it if necessary */ | |
815 | static inline void tb_alloc_page(TranslationBlock *tb, | |
816 | unsigned int n, target_ulong page_addr) | |
817 | { | |
818 | PageDesc *p; | |
819 | TranslationBlock *last_first_tb; | |
820 | ||
821 | tb->page_addr[n] = page_addr; | |
822 | p = page_find_alloc(page_addr >> TARGET_PAGE_BITS); | |
823 | tb->page_next[n] = p->first_tb; | |
824 | last_first_tb = p->first_tb; | |
825 | p->first_tb = (TranslationBlock *)((long)tb | n); | |
826 | invalidate_page_bitmap(p); | |
827 | ||
828 | #if defined(TARGET_HAS_SMC) || 1 | |
829 | ||
830 | #if defined(CONFIG_USER_ONLY) | |
831 | if (p->flags & PAGE_WRITE) { | |
832 | target_ulong addr; | |
833 | PageDesc *p2; | |
834 | int prot; | |
835 | ||
836 | /* force the host page as non writable (writes will have a | |
837 | page fault + mprotect overhead) */ | |
838 | page_addr &= qemu_host_page_mask; | |
839 | prot = 0; | |
840 | for(addr = page_addr; addr < page_addr + qemu_host_page_size; | |
841 | addr += TARGET_PAGE_SIZE) { | |
842 | ||
843 | p2 = page_find (addr >> TARGET_PAGE_BITS); | |
844 | if (!p2) | |
845 | continue; | |
846 | prot |= p2->flags; | |
847 | p2->flags &= ~PAGE_WRITE; | |
848 | page_get_flags(addr); | |
849 | } | |
850 | mprotect(g2h(page_addr), qemu_host_page_size, | |
851 | (prot & PAGE_BITS) & ~PAGE_WRITE); | |
852 | #ifdef DEBUG_TB_INVALIDATE | |
853 | printf("protecting code page: 0x%08lx\n", | |
854 | page_addr); | |
855 | #endif | |
856 | } | |
857 | #else | |
858 | /* if some code is already present, then the pages are already | |
859 | protected. So we handle the case where only the first TB is | |
860 | allocated in a physical page */ | |
861 | if (!last_first_tb) { | |
862 | tlb_protect_code(page_addr); | |
863 | } | |
864 | #endif | |
865 | ||
866 | #endif /* TARGET_HAS_SMC */ | |
867 | } | |
868 | ||
869 | /* Allocate a new translation block. Flush the translation buffer if | |
870 | too many translation blocks or too much generated code. */ | |
871 | TranslationBlock *tb_alloc(target_ulong pc) | |
872 | { | |
873 | TranslationBlock *tb; | |
874 | ||
875 | if (nb_tbs >= CODE_GEN_MAX_BLOCKS || | |
876 | (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE) | |
877 | return NULL; | |
878 | tb = &tbs[nb_tbs++]; | |
879 | tb->pc = pc; | |
880 | tb->cflags = 0; | |
881 | return tb; | |
882 | } | |
883 | ||
884 | /* add a new TB and link it to the physical page tables. phys_page2 is | |
885 | (-1) to indicate that only one page contains the TB. */ | |
886 | void tb_link_phys(TranslationBlock *tb, | |
887 | target_ulong phys_pc, target_ulong phys_page2) | |
888 | { | |
889 | unsigned int h; | |
890 | TranslationBlock **ptb; | |
891 | ||
892 | /* add in the physical hash table */ | |
893 | h = tb_phys_hash_func(phys_pc); | |
894 | ptb = &tb_phys_hash[h]; | |
895 | tb->phys_hash_next = *ptb; | |
896 | *ptb = tb; | |
897 | ||
898 | /* add in the page list */ | |
899 | tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); | |
900 | if (phys_page2 != -1) | |
901 | tb_alloc_page(tb, 1, phys_page2); | |
902 | else | |
903 | tb->page_addr[1] = -1; | |
904 | ||
905 | tb->jmp_first = (TranslationBlock *)((long)tb | 2); | |
906 | tb->jmp_next[0] = NULL; | |
907 | tb->jmp_next[1] = NULL; | |
908 | #ifdef USE_CODE_COPY | |
909 | tb->cflags &= ~CF_FP_USED; | |
910 | if (tb->cflags & CF_TB_FP_USED) | |
911 | tb->cflags |= CF_FP_USED; | |
912 | #endif | |
913 | ||
914 | /* init original jump addresses */ | |
915 | if (tb->tb_next_offset[0] != 0xffff) | |
916 | tb_reset_jump(tb, 0); | |
917 | if (tb->tb_next_offset[1] != 0xffff) | |
918 | tb_reset_jump(tb, 1); | |
919 | ||
920 | #ifdef DEBUG_TB_CHECK | |
921 | tb_page_check(); | |
922 | #endif | |
923 | } | |
924 | ||
925 | /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < | |
926 | tb[1].tc_ptr. Return NULL if not found */ | |
927 | TranslationBlock *tb_find_pc(unsigned long tc_ptr) | |
928 | { | |
929 | int m_min, m_max, m; | |
930 | unsigned long v; | |
931 | TranslationBlock *tb; | |
932 | ||
933 | if (nb_tbs <= 0) | |
934 | return NULL; | |
935 | if (tc_ptr < (unsigned long)code_gen_buffer || | |
936 | tc_ptr >= (unsigned long)code_gen_ptr) | |
937 | return NULL; | |
938 | /* binary search (cf Knuth) */ | |
939 | m_min = 0; | |
940 | m_max = nb_tbs - 1; | |
941 | while (m_min <= m_max) { | |
942 | m = (m_min + m_max) >> 1; | |
943 | tb = &tbs[m]; | |
944 | v = (unsigned long)tb->tc_ptr; | |
945 | if (v == tc_ptr) | |
946 | return tb; | |
947 | else if (tc_ptr < v) { | |
948 | m_max = m - 1; | |
949 | } else { | |
950 | m_min = m + 1; | |
951 | } | |
952 | } | |
953 | return &tbs[m_max]; | |
954 | } | |
955 | ||
956 | static void tb_reset_jump_recursive(TranslationBlock *tb); | |
957 | ||
958 | static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) | |
959 | { | |
960 | TranslationBlock *tb1, *tb_next, **ptb; | |
961 | unsigned int n1; | |
962 | ||
963 | tb1 = tb->jmp_next[n]; | |
964 | if (tb1 != NULL) { | |
965 | /* find head of list */ | |
966 | for(;;) { | |
967 | n1 = (long)tb1 & 3; | |
968 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
969 | if (n1 == 2) | |
970 | break; | |
971 | tb1 = tb1->jmp_next[n1]; | |
972 | } | |
973 | /* we are now sure now that tb jumps to tb1 */ | |
974 | tb_next = tb1; | |
975 | ||
976 | /* remove tb from the jmp_first list */ | |
977 | ptb = &tb_next->jmp_first; | |
978 | for(;;) { | |
979 | tb1 = *ptb; | |
980 | n1 = (long)tb1 & 3; | |
981 | tb1 = (TranslationBlock *)((long)tb1 & ~3); | |
982 | if (n1 == n && tb1 == tb) | |
983 | break; | |
984 | ptb = &tb1->jmp_next[n1]; | |
985 | } | |
986 | *ptb = tb->jmp_next[n]; | |
987 | tb->jmp_next[n] = NULL; | |
988 | ||
989 | /* suppress the jump to next tb in generated code */ | |
990 | tb_reset_jump(tb, n); | |
991 | ||
992 | /* suppress jumps in the tb on which we could have jumped */ | |
993 | tb_reset_jump_recursive(tb_next); | |
994 | } | |
995 | } | |
996 | ||
997 | static void tb_reset_jump_recursive(TranslationBlock *tb) | |
998 | { | |
999 | tb_reset_jump_recursive2(tb, 0); | |
1000 | tb_reset_jump_recursive2(tb, 1); | |
1001 | } | |
1002 | ||
1003 | #if defined(TARGET_HAS_ICE) | |
1004 | static void breakpoint_invalidate(CPUState *env, target_ulong pc) | |
1005 | { | |
1006 | target_ulong addr, pd; | |
1007 | ram_addr_t ram_addr; | |
1008 | PhysPageDesc *p; | |
1009 | ||
1010 | addr = cpu_get_phys_page_debug(env, pc); | |
1011 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
1012 | if (!p) { | |
1013 | pd = IO_MEM_UNASSIGNED; | |
1014 | } else { | |
1015 | pd = p->phys_offset; | |
1016 | } | |
1017 | ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK); | |
1018 | tb_invalidate_ram_page_range(ram_addr, ram_addr + 1, 0); | |
1019 | } | |
1020 | #endif | |
1021 | ||
1022 | /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a | |
1023 | breakpoint is reached */ | |
1024 | int cpu_breakpoint_insert(CPUState *env, target_ulong pc) | |
1025 | { | |
1026 | #if defined(TARGET_HAS_ICE) | |
1027 | int i; | |
1028 | ||
1029 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1030 | if (env->breakpoints[i] == pc) | |
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | if (env->nb_breakpoints >= MAX_BREAKPOINTS) | |
1035 | return -1; | |
1036 | env->breakpoints[env->nb_breakpoints++] = pc; | |
1037 | ||
1038 | breakpoint_invalidate(env, pc); | |
1039 | return 0; | |
1040 | #else | |
1041 | return -1; | |
1042 | #endif | |
1043 | } | |
1044 | ||
1045 | /* remove a breakpoint */ | |
1046 | int cpu_breakpoint_remove(CPUState *env, target_ulong pc) | |
1047 | { | |
1048 | #if defined(TARGET_HAS_ICE) | |
1049 | int i; | |
1050 | for(i = 0; i < env->nb_breakpoints; i++) { | |
1051 | if (env->breakpoints[i] == pc) | |
1052 | goto found; | |
1053 | } | |
1054 | return -1; | |
1055 | found: | |
1056 | env->nb_breakpoints--; | |
1057 | if (i < env->nb_breakpoints) | |
1058 | env->breakpoints[i] = env->breakpoints[env->nb_breakpoints]; | |
1059 | ||
1060 | breakpoint_invalidate(env, pc); | |
1061 | return 0; | |
1062 | #else | |
1063 | return -1; | |
1064 | #endif | |
1065 | } | |
1066 | ||
1067 | /* enable or disable single step mode. EXCP_DEBUG is returned by the | |
1068 | CPU loop after each instruction */ | |
1069 | void cpu_single_step(CPUState *env, int enabled) | |
1070 | { | |
1071 | #if defined(TARGET_HAS_ICE) | |
1072 | if (env->singlestep_enabled != enabled) { | |
1073 | env->singlestep_enabled = enabled; | |
1074 | /* must flush all the translated code to avoid inconsistancies */ | |
1075 | /* XXX: only flush what is necessary */ | |
1076 | tb_flush(env); | |
1077 | } | |
1078 | #endif | |
1079 | } | |
1080 | ||
1081 | /* enable or disable low levels log */ | |
1082 | void cpu_set_log(int log_flags) | |
1083 | { | |
1084 | loglevel = log_flags; | |
1085 | if (loglevel && !logfile) { | |
1086 | logfile = fopen(logfilename, "w"); | |
1087 | if (!logfile) { | |
1088 | perror(logfilename); | |
1089 | _exit(1); | |
1090 | } | |
1091 | #if !defined(CONFIG_SOFTMMU) | |
1092 | /* must avoid mmap() usage of glibc by setting a buffer "by hand" */ | |
1093 | { | |
1094 | static uint8_t logfile_buf[4096]; | |
1095 | setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf)); | |
1096 | } | |
1097 | #else | |
1098 | setvbuf(logfile, NULL, _IOLBF, 0); | |
1099 | #endif | |
1100 | } | |
1101 | } | |
1102 | ||
1103 | void cpu_set_log_filename(const char *filename) | |
1104 | { | |
1105 | logfilename = strdup(filename); | |
1106 | } | |
1107 | ||
1108 | /* mask must never be zero, except for A20 change call */ | |
1109 | void cpu_interrupt(CPUState *env, int mask) | |
1110 | { | |
1111 | TranslationBlock *tb; | |
1112 | static int interrupt_lock; | |
1113 | ||
1114 | env->interrupt_request |= mask; | |
1115 | /* if the cpu is currently executing code, we must unlink it and | |
1116 | all the potentially executing TB */ | |
1117 | tb = env->current_tb; | |
1118 | if (tb && !testandset(&interrupt_lock)) { | |
1119 | env->current_tb = NULL; | |
1120 | tb_reset_jump_recursive(tb); | |
1121 | interrupt_lock = 0; | |
1122 | } | |
1123 | } | |
1124 | ||
1125 | void cpu_reset_interrupt(CPUState *env, int mask) | |
1126 | { | |
1127 | env->interrupt_request &= ~mask; | |
1128 | } | |
1129 | ||
1130 | CPULogItem cpu_log_items[] = { | |
1131 | { CPU_LOG_TB_OUT_ASM, "out_asm", | |
1132 | "show generated host assembly code for each compiled TB" }, | |
1133 | { CPU_LOG_TB_IN_ASM, "in_asm", | |
1134 | "show target assembly code for each compiled TB" }, | |
1135 | { CPU_LOG_TB_OP, "op", | |
1136 | "show micro ops for each compiled TB (only usable if 'in_asm' used)" }, | |
1137 | #ifdef TARGET_I386 | |
1138 | { CPU_LOG_TB_OP_OPT, "op_opt", | |
1139 | "show micro ops after optimization for each compiled TB" }, | |
1140 | #endif | |
1141 | { CPU_LOG_INT, "int", | |
1142 | "show interrupts/exceptions in short format" }, | |
1143 | { CPU_LOG_EXEC, "exec", | |
1144 | "show trace before each executed TB (lots of logs)" }, | |
1145 | { CPU_LOG_TB_CPU, "cpu", | |
1146 | "show CPU state before bloc translation" }, | |
1147 | #ifdef TARGET_I386 | |
1148 | { CPU_LOG_PCALL, "pcall", | |
1149 | "show protected mode far calls/returns/exceptions" }, | |
1150 | #endif | |
1151 | #ifdef DEBUG_IOPORT | |
1152 | { CPU_LOG_IOPORT, "ioport", | |
1153 | "show all i/o ports accesses" }, | |
1154 | #endif | |
1155 | { 0, NULL, NULL }, | |
1156 | }; | |
1157 | ||
1158 | static int cmp1(const char *s1, int n, const char *s2) | |
1159 | { | |
1160 | if (strlen(s2) != n) | |
1161 | return 0; | |
1162 | return memcmp(s1, s2, n) == 0; | |
1163 | } | |
1164 | ||
1165 | /* takes a comma separated list of log masks. Return 0 if error. */ | |
1166 | int cpu_str_to_log_mask(const char *str) | |
1167 | { | |
1168 | CPULogItem *item; | |
1169 | int mask; | |
1170 | const char *p, *p1; | |
1171 | ||
1172 | p = str; | |
1173 | mask = 0; | |
1174 | for(;;) { | |
1175 | p1 = strchr(p, ','); | |
1176 | if (!p1) | |
1177 | p1 = p + strlen(p); | |
1178 | if(cmp1(p,p1-p,"all")) { | |
1179 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1180 | mask |= item->mask; | |
1181 | } | |
1182 | } else { | |
1183 | for(item = cpu_log_items; item->mask != 0; item++) { | |
1184 | if (cmp1(p, p1 - p, item->name)) | |
1185 | goto found; | |
1186 | } | |
1187 | return 0; | |
1188 | } | |
1189 | found: | |
1190 | mask |= item->mask; | |
1191 | if (*p1 != ',') | |
1192 | break; | |
1193 | p = p1 + 1; | |
1194 | } | |
1195 | return mask; | |
1196 | } | |
1197 | ||
1198 | void cpu_abort(CPUState *env, const char *fmt, ...) | |
1199 | { | |
1200 | va_list ap; | |
1201 | ||
1202 | va_start(ap, fmt); | |
1203 | fprintf(stderr, "qemu: fatal: "); | |
1204 | vfprintf(stderr, fmt, ap); | |
1205 | fprintf(stderr, "\n"); | |
1206 | #ifdef TARGET_I386 | |
1207 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP); | |
1208 | #else | |
1209 | cpu_dump_state(env, stderr, fprintf, 0); | |
1210 | #endif | |
1211 | va_end(ap); | |
1212 | abort(); | |
1213 | } | |
1214 | ||
1215 | #if !defined(CONFIG_USER_ONLY) | |
1216 | ||
1217 | /* NOTE: if flush_global is true, also flush global entries (not | |
1218 | implemented yet) */ | |
1219 | void tlb_flush(CPUState *env, int flush_global) | |
1220 | { | |
1221 | int i; | |
1222 | ||
1223 | #if defined(DEBUG_TLB) | |
1224 | printf("tlb_flush:\n"); | |
1225 | #endif | |
1226 | /* must reset current TB so that interrupts cannot modify the | |
1227 | links while we are modifying them */ | |
1228 | env->current_tb = NULL; | |
1229 | ||
1230 | for(i = 0; i < CPU_TLB_SIZE; i++) { | |
1231 | env->tlb_table[0][i].addr_read = -1; | |
1232 | env->tlb_table[0][i].addr_write = -1; | |
1233 | env->tlb_table[0][i].addr_code = -1; | |
1234 | env->tlb_table[1][i].addr_read = -1; | |
1235 | env->tlb_table[1][i].addr_write = -1; | |
1236 | env->tlb_table[1][i].addr_code = -1; | |
1237 | } | |
1238 | ||
1239 | memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); | |
1240 | ||
1241 | #if !defined(CONFIG_SOFTMMU) | |
1242 | munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START); | |
1243 | #endif | |
1244 | #ifdef USE_KQEMU | |
1245 | if (env->kqemu_enabled) { | |
1246 | kqemu_flush(env, flush_global); | |
1247 | } | |
1248 | #endif | |
1249 | tlb_flush_count++; | |
1250 | } | |
1251 | ||
1252 | static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr) | |
1253 | { | |
1254 | if (addr == (tlb_entry->addr_read & | |
1255 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1256 | addr == (tlb_entry->addr_write & | |
1257 | (TARGET_PAGE_MASK | TLB_INVALID_MASK)) || | |
1258 | addr == (tlb_entry->addr_code & | |
1259 | (TARGET_PAGE_MASK | TLB_INVALID_MASK))) { | |
1260 | tlb_entry->addr_read = -1; | |
1261 | tlb_entry->addr_write = -1; | |
1262 | tlb_entry->addr_code = -1; | |
1263 | } | |
1264 | } | |
1265 | ||
1266 | void tlb_flush_page(CPUState *env, target_ulong addr) | |
1267 | { | |
1268 | int i; | |
1269 | TranslationBlock *tb; | |
1270 | ||
1271 | #if defined(DEBUG_TLB) | |
1272 | printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); | |
1273 | #endif | |
1274 | /* must reset current TB so that interrupts cannot modify the | |
1275 | links while we are modifying them */ | |
1276 | env->current_tb = NULL; | |
1277 | ||
1278 | addr &= TARGET_PAGE_MASK; | |
1279 | i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1280 | tlb_flush_entry(&env->tlb_table[0][i], addr); | |
1281 | tlb_flush_entry(&env->tlb_table[1][i], addr); | |
1282 | ||
1283 | for(i = 0; i < TB_JMP_CACHE_SIZE; i++) { | |
1284 | tb = env->tb_jmp_cache[i]; | |
1285 | if (tb && | |
1286 | ((tb->pc & TARGET_PAGE_MASK) == addr || | |
1287 | ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) { | |
1288 | env->tb_jmp_cache[i] = NULL; | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | #if !defined(CONFIG_SOFTMMU) | |
1293 | if (addr < MMAP_AREA_END) | |
1294 | munmap((void *)addr, TARGET_PAGE_SIZE); | |
1295 | #endif | |
1296 | #ifdef USE_KQEMU | |
1297 | if (env->kqemu_enabled) { | |
1298 | kqemu_flush_page(env, addr); | |
1299 | } | |
1300 | #endif | |
1301 | } | |
1302 | ||
1303 | /* update the TLBs so that writes to code in the virtual page 'addr' | |
1304 | can be detected */ | |
1305 | static void tlb_protect_code(ram_addr_t ram_addr) | |
1306 | { | |
1307 | cpu_physical_memory_reset_dirty(ram_addr, | |
1308 | ram_addr + TARGET_PAGE_SIZE, | |
1309 | CODE_DIRTY_FLAG); | |
1310 | } | |
1311 | ||
1312 | /* update the TLB so that writes in physical page 'phys_addr' are no longer | |
1313 | tested for self modifying code */ | |
1314 | static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, | |
1315 | target_ulong vaddr) | |
1316 | { | |
1317 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; | |
1318 | } | |
1319 | ||
1320 | static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, | |
1321 | unsigned long start, unsigned long length) | |
1322 | { | |
1323 | unsigned long addr; | |
1324 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { | |
1325 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1326 | if ((addr - start) < length) { | |
1327 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY; | |
1328 | } | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, | |
1333 | int dirty_flags) | |
1334 | { | |
1335 | CPUState *env; | |
1336 | unsigned long length, start1; | |
1337 | int i, mask, len; | |
1338 | uint8_t *p; | |
1339 | ||
1340 | start &= TARGET_PAGE_MASK; | |
1341 | end = TARGET_PAGE_ALIGN(end); | |
1342 | ||
1343 | length = end - start; | |
1344 | if (length == 0) | |
1345 | return; | |
1346 | len = length >> TARGET_PAGE_BITS; | |
1347 | #ifdef USE_KQEMU | |
1348 | /* XXX: should not depend on cpu context */ | |
1349 | env = first_cpu; | |
1350 | if (env->kqemu_enabled) { | |
1351 | ram_addr_t addr; | |
1352 | addr = start; | |
1353 | for(i = 0; i < len; i++) { | |
1354 | kqemu_set_notdirty(env, addr); | |
1355 | addr += TARGET_PAGE_SIZE; | |
1356 | } | |
1357 | } | |
1358 | #endif | |
1359 | mask = ~dirty_flags; | |
1360 | p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); | |
1361 | for(i = 0; i < len; i++) | |
1362 | p[i] &= mask; | |
1363 | ||
1364 | /* we modify the TLB cache so that the dirty bit will be set again | |
1365 | when accessing the range */ | |
1366 | start1 = start + (unsigned long)phys_ram_base; | |
1367 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1368 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1369 | tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length); | |
1370 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1371 | tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length); | |
1372 | } | |
1373 | ||
1374 | #if !defined(CONFIG_SOFTMMU) | |
1375 | /* XXX: this is expensive */ | |
1376 | { | |
1377 | VirtPageDesc *p; | |
1378 | int j; | |
1379 | target_ulong addr; | |
1380 | ||
1381 | for(i = 0; i < L1_SIZE; i++) { | |
1382 | p = l1_virt_map[i]; | |
1383 | if (p) { | |
1384 | addr = i << (TARGET_PAGE_BITS + L2_BITS); | |
1385 | for(j = 0; j < L2_SIZE; j++) { | |
1386 | if (p->valid_tag == virt_valid_tag && | |
1387 | p->phys_addr >= start && p->phys_addr < end && | |
1388 | (p->prot & PROT_WRITE)) { | |
1389 | if (addr < MMAP_AREA_END) { | |
1390 | mprotect((void *)addr, TARGET_PAGE_SIZE, | |
1391 | p->prot & ~PROT_WRITE); | |
1392 | } | |
1393 | } | |
1394 | addr += TARGET_PAGE_SIZE; | |
1395 | p++; | |
1396 | } | |
1397 | } | |
1398 | } | |
1399 | } | |
1400 | #endif | |
1401 | } | |
1402 | ||
1403 | static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry) | |
1404 | { | |
1405 | ram_addr_t ram_addr; | |
1406 | ||
1407 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { | |
1408 | ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + | |
1409 | tlb_entry->addend - (unsigned long)phys_ram_base; | |
1410 | if (!cpu_physical_memory_is_dirty(ram_addr)) { | |
1411 | tlb_entry->addr_write |= IO_MEM_NOTDIRTY; | |
1412 | } | |
1413 | } | |
1414 | } | |
1415 | ||
1416 | /* update the TLB according to the current state of the dirty bits */ | |
1417 | void cpu_tlb_update_dirty(CPUState *env) | |
1418 | { | |
1419 | int i; | |
1420 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1421 | tlb_update_dirty(&env->tlb_table[0][i]); | |
1422 | for(i = 0; i < CPU_TLB_SIZE; i++) | |
1423 | tlb_update_dirty(&env->tlb_table[1][i]); | |
1424 | } | |
1425 | ||
1426 | static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, | |
1427 | unsigned long start) | |
1428 | { | |
1429 | unsigned long addr; | |
1430 | if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) { | |
1431 | addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; | |
1432 | if (addr == start) { | |
1433 | tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM; | |
1434 | } | |
1435 | } | |
1436 | } | |
1437 | ||
1438 | /* update the TLB corresponding to virtual page vaddr and phys addr | |
1439 | addr so that it is no longer dirty */ | |
1440 | static inline void tlb_set_dirty(CPUState *env, | |
1441 | unsigned long addr, target_ulong vaddr) | |
1442 | { | |
1443 | int i; | |
1444 | ||
1445 | addr &= TARGET_PAGE_MASK; | |
1446 | i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1447 | tlb_set_dirty1(&env->tlb_table[0][i], addr); | |
1448 | tlb_set_dirty1(&env->tlb_table[1][i], addr); | |
1449 | } | |
1450 | ||
1451 | /* add a new TLB entry. At most one entry for a given virtual address | |
1452 | is permitted. Return 0 if OK or 2 if the page could not be mapped | |
1453 | (can only happen in non SOFTMMU mode for I/O pages or pages | |
1454 | conflicting with the host address space). */ | |
1455 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, | |
1456 | target_phys_addr_t paddr, int prot, | |
1457 | int is_user, int is_softmmu) | |
1458 | { | |
1459 | PhysPageDesc *p; | |
1460 | unsigned long pd; | |
1461 | unsigned int index; | |
1462 | target_ulong address; | |
1463 | target_phys_addr_t addend; | |
1464 | int ret; | |
1465 | CPUTLBEntry *te; | |
1466 | ||
1467 | p = phys_page_find(paddr >> TARGET_PAGE_BITS); | |
1468 | if (!p) { | |
1469 | pd = IO_MEM_UNASSIGNED; | |
1470 | } else { | |
1471 | pd = p->phys_offset; | |
1472 | } | |
1473 | #if defined(DEBUG_TLB) | |
1474 | printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n", | |
1475 | vaddr, (int)paddr, prot, is_user, is_softmmu, pd); | |
1476 | #endif | |
1477 | ||
1478 | ret = 0; | |
1479 | #if !defined(CONFIG_SOFTMMU) | |
1480 | if (is_softmmu) | |
1481 | #endif | |
1482 | { | |
1483 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1484 | /* IO memory case */ | |
1485 | address = vaddr | pd; | |
1486 | addend = paddr; | |
1487 | } else { | |
1488 | /* standard memory */ | |
1489 | address = vaddr; | |
1490 | addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK); | |
1491 | } | |
1492 | ||
1493 | index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); | |
1494 | addend -= vaddr; | |
1495 | te = &env->tlb_table[is_user][index]; | |
1496 | te->addend = addend; | |
1497 | if (prot & PAGE_READ) { | |
1498 | te->addr_read = address; | |
1499 | } else { | |
1500 | te->addr_read = -1; | |
1501 | } | |
1502 | if (prot & PAGE_EXEC) { | |
1503 | te->addr_code = address; | |
1504 | } else { | |
1505 | te->addr_code = -1; | |
1506 | } | |
1507 | if (prot & PAGE_WRITE) { | |
1508 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) { | |
1509 | /* ROM: access is ignored (same as unassigned) */ | |
1510 | te->addr_write = vaddr | IO_MEM_ROM; | |
1511 | } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
1512 | !cpu_physical_memory_is_dirty(pd)) { | |
1513 | te->addr_write = vaddr | IO_MEM_NOTDIRTY; | |
1514 | } else { | |
1515 | te->addr_write = address; | |
1516 | } | |
1517 | } else { | |
1518 | te->addr_write = -1; | |
1519 | } | |
1520 | } | |
1521 | #if !defined(CONFIG_SOFTMMU) | |
1522 | else { | |
1523 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
1524 | /* IO access: no mapping is done as it will be handled by the | |
1525 | soft MMU */ | |
1526 | if (!(env->hflags & HF_SOFTMMU_MASK)) | |
1527 | ret = 2; | |
1528 | } else { | |
1529 | void *map_addr; | |
1530 | ||
1531 | if (vaddr >= MMAP_AREA_END) { | |
1532 | ret = 2; | |
1533 | } else { | |
1534 | if (prot & PROT_WRITE) { | |
1535 | if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || | |
1536 | #if defined(TARGET_HAS_SMC) || 1 | |
1537 | first_tb || | |
1538 | #endif | |
1539 | ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && | |
1540 | !cpu_physical_memory_is_dirty(pd))) { | |
1541 | /* ROM: we do as if code was inside */ | |
1542 | /* if code is present, we only map as read only and save the | |
1543 | original mapping */ | |
1544 | VirtPageDesc *vp; | |
1545 | ||
1546 | vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1); | |
1547 | vp->phys_addr = pd; | |
1548 | vp->prot = prot; | |
1549 | vp->valid_tag = virt_valid_tag; | |
1550 | prot &= ~PAGE_WRITE; | |
1551 | } | |
1552 | } | |
1553 | map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, | |
1554 | MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK)); | |
1555 | if (map_addr == MAP_FAILED) { | |
1556 | cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n", | |
1557 | paddr, vaddr); | |
1558 | } | |
1559 | } | |
1560 | } | |
1561 | } | |
1562 | #endif | |
1563 | return ret; | |
1564 | } | |
1565 | ||
1566 | /* called from signal handler: invalidate the code and unprotect the | |
1567 | page. Return TRUE if the fault was succesfully handled. */ | |
1568 | int page_unprotect(target_ulong addr, unsigned long pc, void *puc) | |
1569 | { | |
1570 | #if !defined(CONFIG_SOFTMMU) | |
1571 | VirtPageDesc *vp; | |
1572 | ||
1573 | #if defined(DEBUG_TLB) | |
1574 | printf("page_unprotect: addr=0x%08x\n", addr); | |
1575 | #endif | |
1576 | addr &= TARGET_PAGE_MASK; | |
1577 | ||
1578 | /* if it is not mapped, no need to worry here */ | |
1579 | if (addr >= MMAP_AREA_END) | |
1580 | return 0; | |
1581 | vp = virt_page_find(addr >> TARGET_PAGE_BITS); | |
1582 | if (!vp) | |
1583 | return 0; | |
1584 | /* NOTE: in this case, validate_tag is _not_ tested as it | |
1585 | validates only the code TLB */ | |
1586 | if (vp->valid_tag != virt_valid_tag) | |
1587 | return 0; | |
1588 | if (!(vp->prot & PAGE_WRITE)) | |
1589 | return 0; | |
1590 | #if defined(DEBUG_TLB) | |
1591 | printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", | |
1592 | addr, vp->phys_addr, vp->prot); | |
1593 | #endif | |
1594 | if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0) | |
1595 | cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n", | |
1596 | (unsigned long)addr, vp->prot); | |
1597 | /* set the dirty bit */ | |
1598 | phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff; | |
1599 | /* flush the code inside */ | |
1600 | tb_invalidate_phys_page(vp->phys_addr, pc, puc); | |
1601 | return 1; | |
1602 | #else | |
1603 | return 0; | |
1604 | #endif | |
1605 | } | |
1606 | ||
1607 | #else | |
1608 | ||
1609 | void tlb_flush(CPUState *env, int flush_global) | |
1610 | { | |
1611 | } | |
1612 | ||
1613 | void tlb_flush_page(CPUState *env, target_ulong addr) | |
1614 | { | |
1615 | } | |
1616 | ||
1617 | int tlb_set_page_exec(CPUState *env, target_ulong vaddr, | |
1618 | target_phys_addr_t paddr, int prot, | |
1619 | int is_user, int is_softmmu) | |
1620 | { | |
1621 | return 0; | |
1622 | } | |
1623 | ||
1624 | /* dump memory mappings */ | |
1625 | void page_dump(FILE *f) | |
1626 | { | |
1627 | unsigned long start, end; | |
1628 | int i, j, prot, prot1; | |
1629 | PageDesc *p; | |
1630 | ||
1631 | fprintf(f, "%-8s %-8s %-8s %s\n", | |
1632 | "start", "end", "size", "prot"); | |
1633 | start = -1; | |
1634 | end = -1; | |
1635 | prot = 0; | |
1636 | for(i = 0; i <= L1_SIZE; i++) { | |
1637 | if (i < L1_SIZE) | |
1638 | p = l1_map[i]; | |
1639 | else | |
1640 | p = NULL; | |
1641 | for(j = 0;j < L2_SIZE; j++) { | |
1642 | if (!p) | |
1643 | prot1 = 0; | |
1644 | else | |
1645 | prot1 = p[j].flags; | |
1646 | if (prot1 != prot) { | |
1647 | end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); | |
1648 | if (start != -1) { | |
1649 | fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", | |
1650 | start, end, end - start, | |
1651 | prot & PAGE_READ ? 'r' : '-', | |
1652 | prot & PAGE_WRITE ? 'w' : '-', | |
1653 | prot & PAGE_EXEC ? 'x' : '-'); | |
1654 | } | |
1655 | if (prot1 != 0) | |
1656 | start = end; | |
1657 | else | |
1658 | start = -1; | |
1659 | prot = prot1; | |
1660 | } | |
1661 | if (!p) | |
1662 | break; | |
1663 | } | |
1664 | } | |
1665 | } | |
1666 | ||
1667 | int page_get_flags(target_ulong address) | |
1668 | { | |
1669 | PageDesc *p; | |
1670 | ||
1671 | p = page_find(address >> TARGET_PAGE_BITS); | |
1672 | if (!p) | |
1673 | return 0; | |
1674 | return p->flags; | |
1675 | } | |
1676 | ||
1677 | /* modify the flags of a page and invalidate the code if | |
1678 | necessary. The flag PAGE_WRITE_ORG is positionned automatically | |
1679 | depending on PAGE_WRITE */ | |
1680 | void page_set_flags(target_ulong start, target_ulong end, int flags) | |
1681 | { | |
1682 | PageDesc *p; | |
1683 | target_ulong addr; | |
1684 | ||
1685 | start = start & TARGET_PAGE_MASK; | |
1686 | end = TARGET_PAGE_ALIGN(end); | |
1687 | if (flags & PAGE_WRITE) | |
1688 | flags |= PAGE_WRITE_ORG; | |
1689 | spin_lock(&tb_lock); | |
1690 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1691 | p = page_find_alloc(addr >> TARGET_PAGE_BITS); | |
1692 | /* if the write protection is set, then we invalidate the code | |
1693 | inside */ | |
1694 | if (!(p->flags & PAGE_WRITE) && | |
1695 | (flags & PAGE_WRITE) && | |
1696 | p->first_tb) { | |
1697 | tb_invalidate_phys_page(addr, 0, NULL); | |
1698 | } | |
1699 | p->flags = flags; | |
1700 | } | |
1701 | spin_unlock(&tb_lock); | |
1702 | } | |
1703 | ||
1704 | /* called from signal handler: invalidate the code and unprotect the | |
1705 | page. Return TRUE if the fault was succesfully handled. */ | |
1706 | int page_unprotect(target_ulong address, unsigned long pc, void *puc) | |
1707 | { | |
1708 | unsigned int page_index, prot, pindex; | |
1709 | PageDesc *p, *p1; | |
1710 | target_ulong host_start, host_end, addr; | |
1711 | ||
1712 | host_start = address & qemu_host_page_mask; | |
1713 | page_index = host_start >> TARGET_PAGE_BITS; | |
1714 | p1 = page_find(page_index); | |
1715 | if (!p1) | |
1716 | return 0; | |
1717 | host_end = host_start + qemu_host_page_size; | |
1718 | p = p1; | |
1719 | prot = 0; | |
1720 | for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { | |
1721 | prot |= p->flags; | |
1722 | p++; | |
1723 | } | |
1724 | /* if the page was really writable, then we change its | |
1725 | protection back to writable */ | |
1726 | if (prot & PAGE_WRITE_ORG) { | |
1727 | pindex = (address - host_start) >> TARGET_PAGE_BITS; | |
1728 | if (!(p1[pindex].flags & PAGE_WRITE)) { | |
1729 | mprotect((void *)g2h(host_start), qemu_host_page_size, | |
1730 | (prot & PAGE_BITS) | PAGE_WRITE); | |
1731 | p1[pindex].flags |= PAGE_WRITE; | |
1732 | /* and since the content will be modified, we must invalidate | |
1733 | the corresponding translated code. */ | |
1734 | tb_invalidate_phys_page(address, pc, puc); | |
1735 | #ifdef DEBUG_TB_CHECK | |
1736 | tb_invalidate_check(address); | |
1737 | #endif | |
1738 | return 1; | |
1739 | } | |
1740 | } | |
1741 | return 0; | |
1742 | } | |
1743 | ||
1744 | /* call this function when system calls directly modify a memory area */ | |
1745 | /* ??? This should be redundant now we have lock_user. */ | |
1746 | void page_unprotect_range(target_ulong data, target_ulong data_size) | |
1747 | { | |
1748 | target_ulong start, end, addr; | |
1749 | ||
1750 | start = data; | |
1751 | end = start + data_size; | |
1752 | start &= TARGET_PAGE_MASK; | |
1753 | end = TARGET_PAGE_ALIGN(end); | |
1754 | for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { | |
1755 | page_unprotect(addr, 0, NULL); | |
1756 | } | |
1757 | } | |
1758 | ||
1759 | static inline void tlb_set_dirty(CPUState *env, | |
1760 | unsigned long addr, target_ulong vaddr) | |
1761 | { | |
1762 | } | |
1763 | #endif /* defined(CONFIG_USER_ONLY) */ | |
1764 | ||
1765 | /* register physical memory. 'size' must be a multiple of the target | |
1766 | page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an | |
1767 | io memory page */ | |
1768 | void cpu_register_physical_memory(target_phys_addr_t start_addr, | |
1769 | unsigned long size, | |
1770 | unsigned long phys_offset) | |
1771 | { | |
1772 | target_phys_addr_t addr, end_addr; | |
1773 | PhysPageDesc *p; | |
1774 | ||
1775 | size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; | |
1776 | end_addr = start_addr + size; | |
1777 | for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) { | |
1778 | p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); | |
1779 | p->phys_offset = phys_offset; | |
1780 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) | |
1781 | phys_offset += TARGET_PAGE_SIZE; | |
1782 | } | |
1783 | } | |
1784 | ||
1785 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) | |
1786 | { | |
1787 | return 0; | |
1788 | } | |
1789 | ||
1790 | static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1791 | { | |
1792 | } | |
1793 | ||
1794 | static CPUReadMemoryFunc *unassigned_mem_read[3] = { | |
1795 | unassigned_mem_readb, | |
1796 | unassigned_mem_readb, | |
1797 | unassigned_mem_readb, | |
1798 | }; | |
1799 | ||
1800 | static CPUWriteMemoryFunc *unassigned_mem_write[3] = { | |
1801 | unassigned_mem_writeb, | |
1802 | unassigned_mem_writeb, | |
1803 | unassigned_mem_writeb, | |
1804 | }; | |
1805 | ||
1806 | static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1807 | { | |
1808 | unsigned long ram_addr; | |
1809 | int dirty_flags; | |
1810 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1811 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1812 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
1813 | #if !defined(CONFIG_USER_ONLY) | |
1814 | tb_invalidate_phys_page_fast(ram_addr, 1); | |
1815 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1816 | #endif | |
1817 | } | |
1818 | stb_p((uint8_t *)(long)addr, val); | |
1819 | #ifdef USE_KQEMU | |
1820 | if (cpu_single_env->kqemu_enabled && | |
1821 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1822 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1823 | #endif | |
1824 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); | |
1825 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1826 | /* we remove the notdirty callback only if the code has been | |
1827 | flushed */ | |
1828 | if (dirty_flags == 0xff) | |
1829 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); | |
1830 | } | |
1831 | ||
1832 | static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1833 | { | |
1834 | unsigned long ram_addr; | |
1835 | int dirty_flags; | |
1836 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1837 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1838 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
1839 | #if !defined(CONFIG_USER_ONLY) | |
1840 | tb_invalidate_phys_page_fast(ram_addr, 2); | |
1841 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1842 | #endif | |
1843 | } | |
1844 | stw_p((uint8_t *)(long)addr, val); | |
1845 | #ifdef USE_KQEMU | |
1846 | if (cpu_single_env->kqemu_enabled && | |
1847 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1848 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1849 | #endif | |
1850 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); | |
1851 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1852 | /* we remove the notdirty callback only if the code has been | |
1853 | flushed */ | |
1854 | if (dirty_flags == 0xff) | |
1855 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); | |
1856 | } | |
1857 | ||
1858 | static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val) | |
1859 | { | |
1860 | unsigned long ram_addr; | |
1861 | int dirty_flags; | |
1862 | ram_addr = addr - (unsigned long)phys_ram_base; | |
1863 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1864 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { | |
1865 | #if !defined(CONFIG_USER_ONLY) | |
1866 | tb_invalidate_phys_page_fast(ram_addr, 4); | |
1867 | dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; | |
1868 | #endif | |
1869 | } | |
1870 | stl_p((uint8_t *)(long)addr, val); | |
1871 | #ifdef USE_KQEMU | |
1872 | if (cpu_single_env->kqemu_enabled && | |
1873 | (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK) | |
1874 | kqemu_modify_page(cpu_single_env, ram_addr); | |
1875 | #endif | |
1876 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); | |
1877 | phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; | |
1878 | /* we remove the notdirty callback only if the code has been | |
1879 | flushed */ | |
1880 | if (dirty_flags == 0xff) | |
1881 | tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr); | |
1882 | } | |
1883 | ||
1884 | static CPUReadMemoryFunc *error_mem_read[3] = { | |
1885 | NULL, /* never used */ | |
1886 | NULL, /* never used */ | |
1887 | NULL, /* never used */ | |
1888 | }; | |
1889 | ||
1890 | static CPUWriteMemoryFunc *notdirty_mem_write[3] = { | |
1891 | notdirty_mem_writeb, | |
1892 | notdirty_mem_writew, | |
1893 | notdirty_mem_writel, | |
1894 | }; | |
1895 | ||
1896 | static void io_mem_init(void) | |
1897 | { | |
1898 | cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL); | |
1899 | cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL); | |
1900 | cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL); | |
1901 | io_mem_nb = 5; | |
1902 | ||
1903 | /* alloc dirty bits array */ | |
1904 | phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS); | |
1905 | memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS); | |
1906 | } | |
1907 | ||
1908 | /* mem_read and mem_write are arrays of functions containing the | |
1909 | function to access byte (index 0), word (index 1) and dword (index | |
1910 | 2). All functions must be supplied. If io_index is non zero, the | |
1911 | corresponding io zone is modified. If it is zero, a new io zone is | |
1912 | allocated. The return value can be used with | |
1913 | cpu_register_physical_memory(). (-1) is returned if error. */ | |
1914 | int cpu_register_io_memory(int io_index, | |
1915 | CPUReadMemoryFunc **mem_read, | |
1916 | CPUWriteMemoryFunc **mem_write, | |
1917 | void *opaque) | |
1918 | { | |
1919 | int i; | |
1920 | ||
1921 | if (io_index <= 0) { | |
1922 | if (io_mem_nb >= IO_MEM_NB_ENTRIES) | |
1923 | return -1; | |
1924 | io_index = io_mem_nb++; | |
1925 | } else { | |
1926 | if (io_index >= IO_MEM_NB_ENTRIES) | |
1927 | return -1; | |
1928 | } | |
1929 | ||
1930 | for(i = 0;i < 3; i++) { | |
1931 | io_mem_read[io_index][i] = mem_read[i]; | |
1932 | io_mem_write[io_index][i] = mem_write[i]; | |
1933 | } | |
1934 | io_mem_opaque[io_index] = opaque; | |
1935 | return io_index << IO_MEM_SHIFT; | |
1936 | } | |
1937 | ||
1938 | CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index) | |
1939 | { | |
1940 | return io_mem_write[io_index >> IO_MEM_SHIFT]; | |
1941 | } | |
1942 | ||
1943 | CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index) | |
1944 | { | |
1945 | return io_mem_read[io_index >> IO_MEM_SHIFT]; | |
1946 | } | |
1947 | ||
1948 | /* physical memory access (slow version, mainly for debug) */ | |
1949 | #if defined(CONFIG_USER_ONLY) | |
1950 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
1951 | int len, int is_write) | |
1952 | { | |
1953 | int l, flags; | |
1954 | target_ulong page; | |
1955 | void * p; | |
1956 | ||
1957 | while (len > 0) { | |
1958 | page = addr & TARGET_PAGE_MASK; | |
1959 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1960 | if (l > len) | |
1961 | l = len; | |
1962 | flags = page_get_flags(page); | |
1963 | if (!(flags & PAGE_VALID)) | |
1964 | return; | |
1965 | if (is_write) { | |
1966 | if (!(flags & PAGE_WRITE)) | |
1967 | return; | |
1968 | p = lock_user(addr, len, 0); | |
1969 | memcpy(p, buf, len); | |
1970 | unlock_user(p, addr, len); | |
1971 | } else { | |
1972 | if (!(flags & PAGE_READ)) | |
1973 | return; | |
1974 | p = lock_user(addr, len, 1); | |
1975 | memcpy(buf, p, len); | |
1976 | unlock_user(p, addr, 0); | |
1977 | } | |
1978 | len -= l; | |
1979 | buf += l; | |
1980 | addr += l; | |
1981 | } | |
1982 | } | |
1983 | ||
1984 | #else | |
1985 | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, | |
1986 | int len, int is_write) | |
1987 | { | |
1988 | int l, io_index; | |
1989 | uint8_t *ptr; | |
1990 | uint32_t val; | |
1991 | target_phys_addr_t page; | |
1992 | unsigned long pd; | |
1993 | PhysPageDesc *p; | |
1994 | ||
1995 | while (len > 0) { | |
1996 | page = addr & TARGET_PAGE_MASK; | |
1997 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1998 | if (l > len) | |
1999 | l = len; | |
2000 | p = phys_page_find(page >> TARGET_PAGE_BITS); | |
2001 | if (!p) { | |
2002 | pd = IO_MEM_UNASSIGNED; | |
2003 | } else { | |
2004 | pd = p->phys_offset; | |
2005 | } | |
2006 | ||
2007 | if (is_write) { | |
2008 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2009 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2010 | /* XXX: could force cpu_single_env to NULL to avoid | |
2011 | potential bugs */ | |
2012 | if (l >= 4 && ((addr & 3) == 0)) { | |
2013 | /* 32 bit write access */ | |
2014 | val = ldl_p(buf); | |
2015 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2016 | l = 4; | |
2017 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
2018 | /* 16 bit write access */ | |
2019 | val = lduw_p(buf); | |
2020 | io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); | |
2021 | l = 2; | |
2022 | } else { | |
2023 | /* 8 bit write access */ | |
2024 | val = ldub_p(buf); | |
2025 | io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); | |
2026 | l = 1; | |
2027 | } | |
2028 | } else { | |
2029 | unsigned long addr1; | |
2030 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2031 | /* RAM case */ | |
2032 | ptr = phys_ram_base + addr1; | |
2033 | memcpy(ptr, buf, l); | |
2034 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2035 | /* invalidate code */ | |
2036 | tb_invalidate_phys_page_range(addr1, addr1 + l, 0); | |
2037 | /* set dirty bit */ | |
2038 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | |
2039 | (0xff & ~CODE_DIRTY_FLAG); | |
2040 | } | |
2041 | } | |
2042 | } else { | |
2043 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
2044 | /* I/O case */ | |
2045 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2046 | if (l >= 4 && ((addr & 3) == 0)) { | |
2047 | /* 32 bit read access */ | |
2048 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2049 | stl_p(buf, val); | |
2050 | l = 4; | |
2051 | } else if (l >= 2 && ((addr & 1) == 0)) { | |
2052 | /* 16 bit read access */ | |
2053 | val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); | |
2054 | stw_p(buf, val); | |
2055 | l = 2; | |
2056 | } else { | |
2057 | /* 8 bit read access */ | |
2058 | val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); | |
2059 | stb_p(buf, val); | |
2060 | l = 1; | |
2061 | } | |
2062 | } else { | |
2063 | /* RAM case */ | |
2064 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2065 | (addr & ~TARGET_PAGE_MASK); | |
2066 | memcpy(buf, ptr, l); | |
2067 | } | |
2068 | } | |
2069 | len -= l; | |
2070 | buf += l; | |
2071 | addr += l; | |
2072 | } | |
2073 | } | |
2074 | ||
2075 | /* warning: addr must be aligned */ | |
2076 | uint32_t ldl_phys(target_phys_addr_t addr) | |
2077 | { | |
2078 | int io_index; | |
2079 | uint8_t *ptr; | |
2080 | uint32_t val; | |
2081 | unsigned long pd; | |
2082 | PhysPageDesc *p; | |
2083 | ||
2084 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2085 | if (!p) { | |
2086 | pd = IO_MEM_UNASSIGNED; | |
2087 | } else { | |
2088 | pd = p->phys_offset; | |
2089 | } | |
2090 | ||
2091 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
2092 | /* I/O case */ | |
2093 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2094 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2095 | } else { | |
2096 | /* RAM case */ | |
2097 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2098 | (addr & ~TARGET_PAGE_MASK); | |
2099 | val = ldl_p(ptr); | |
2100 | } | |
2101 | return val; | |
2102 | } | |
2103 | ||
2104 | /* warning: addr must be aligned */ | |
2105 | uint64_t ldq_phys(target_phys_addr_t addr) | |
2106 | { | |
2107 | int io_index; | |
2108 | uint8_t *ptr; | |
2109 | uint64_t val; | |
2110 | unsigned long pd; | |
2111 | PhysPageDesc *p; | |
2112 | ||
2113 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2114 | if (!p) { | |
2115 | pd = IO_MEM_UNASSIGNED; | |
2116 | } else { | |
2117 | pd = p->phys_offset; | |
2118 | } | |
2119 | ||
2120 | if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) { | |
2121 | /* I/O case */ | |
2122 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2123 | #ifdef TARGET_WORDS_BIGENDIAN | |
2124 | val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32; | |
2125 | val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4); | |
2126 | #else | |
2127 | val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); | |
2128 | val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32; | |
2129 | #endif | |
2130 | } else { | |
2131 | /* RAM case */ | |
2132 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2133 | (addr & ~TARGET_PAGE_MASK); | |
2134 | val = ldq_p(ptr); | |
2135 | } | |
2136 | return val; | |
2137 | } | |
2138 | ||
2139 | /* XXX: optimize */ | |
2140 | uint32_t ldub_phys(target_phys_addr_t addr) | |
2141 | { | |
2142 | uint8_t val; | |
2143 | cpu_physical_memory_read(addr, &val, 1); | |
2144 | return val; | |
2145 | } | |
2146 | ||
2147 | /* XXX: optimize */ | |
2148 | uint32_t lduw_phys(target_phys_addr_t addr) | |
2149 | { | |
2150 | uint16_t val; | |
2151 | cpu_physical_memory_read(addr, (uint8_t *)&val, 2); | |
2152 | return tswap16(val); | |
2153 | } | |
2154 | ||
2155 | /* warning: addr must be aligned. The ram page is not masked as dirty | |
2156 | and the code inside is not invalidated. It is useful if the dirty | |
2157 | bits are used to track modified PTEs */ | |
2158 | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val) | |
2159 | { | |
2160 | int io_index; | |
2161 | uint8_t *ptr; | |
2162 | unsigned long pd; | |
2163 | PhysPageDesc *p; | |
2164 | ||
2165 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2166 | if (!p) { | |
2167 | pd = IO_MEM_UNASSIGNED; | |
2168 | } else { | |
2169 | pd = p->phys_offset; | |
2170 | } | |
2171 | ||
2172 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2173 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2174 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2175 | } else { | |
2176 | ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + | |
2177 | (addr & ~TARGET_PAGE_MASK); | |
2178 | stl_p(ptr, val); | |
2179 | } | |
2180 | } | |
2181 | ||
2182 | /* warning: addr must be aligned */ | |
2183 | void stl_phys(target_phys_addr_t addr, uint32_t val) | |
2184 | { | |
2185 | int io_index; | |
2186 | uint8_t *ptr; | |
2187 | unsigned long pd; | |
2188 | PhysPageDesc *p; | |
2189 | ||
2190 | p = phys_page_find(addr >> TARGET_PAGE_BITS); | |
2191 | if (!p) { | |
2192 | pd = IO_MEM_UNASSIGNED; | |
2193 | } else { | |
2194 | pd = p->phys_offset; | |
2195 | } | |
2196 | ||
2197 | if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { | |
2198 | io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); | |
2199 | io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); | |
2200 | } else { | |
2201 | unsigned long addr1; | |
2202 | addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); | |
2203 | /* RAM case */ | |
2204 | ptr = phys_ram_base + addr1; | |
2205 | stl_p(ptr, val); | |
2206 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2207 | /* invalidate code */ | |
2208 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2209 | /* set dirty bit */ | |
2210 | phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= | |
2211 | (0xff & ~CODE_DIRTY_FLAG); | |
2212 | } | |
2213 | } | |
2214 | } | |
2215 | ||
2216 | /* XXX: optimize */ | |
2217 | void stb_phys(target_phys_addr_t addr, uint32_t val) | |
2218 | { | |
2219 | uint8_t v = val; | |
2220 | cpu_physical_memory_write(addr, &v, 1); | |
2221 | } | |
2222 | ||
2223 | /* XXX: optimize */ | |
2224 | void stw_phys(target_phys_addr_t addr, uint32_t val) | |
2225 | { | |
2226 | uint16_t v = tswap16(val); | |
2227 | cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); | |
2228 | } | |
2229 | ||
2230 | /* XXX: optimize */ | |
2231 | void stq_phys(target_phys_addr_t addr, uint64_t val) | |
2232 | { | |
2233 | val = tswap64(val); | |
2234 | cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); | |
2235 | } | |
2236 | ||
2237 | #endif | |
2238 | ||
2239 | /* virtual memory access for debug */ | |
2240 | int cpu_memory_rw_debug(CPUState *env, target_ulong addr, | |
2241 | uint8_t *buf, int len, int is_write) | |
2242 | { | |
2243 | int l; | |
2244 | target_ulong page, phys_addr; | |
2245 | ||
2246 | while (len > 0) { | |
2247 | page = addr & TARGET_PAGE_MASK; | |
2248 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2249 | /* if no physical page mapped, return an error */ | |
2250 | if (phys_addr == -1) | |
2251 | return -1; | |
2252 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2253 | if (l > len) | |
2254 | l = len; | |
2255 | cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), | |
2256 | buf, l, is_write); | |
2257 | len -= l; | |
2258 | buf += l; | |
2259 | addr += l; | |
2260 | } | |
2261 | return 0; | |
2262 | } | |
2263 | ||
2264 | void dump_exec_info(FILE *f, | |
2265 | int (*cpu_fprintf)(FILE *f, const char *fmt, ...)) | |
2266 | { | |
2267 | int i, target_code_size, max_target_code_size; | |
2268 | int direct_jmp_count, direct_jmp2_count, cross_page; | |
2269 | TranslationBlock *tb; | |
2270 | ||
2271 | target_code_size = 0; | |
2272 | max_target_code_size = 0; | |
2273 | cross_page = 0; | |
2274 | direct_jmp_count = 0; | |
2275 | direct_jmp2_count = 0; | |
2276 | for(i = 0; i < nb_tbs; i++) { | |
2277 | tb = &tbs[i]; | |
2278 | target_code_size += tb->size; | |
2279 | if (tb->size > max_target_code_size) | |
2280 | max_target_code_size = tb->size; | |
2281 | if (tb->page_addr[1] != -1) | |
2282 | cross_page++; | |
2283 | if (tb->tb_next_offset[0] != 0xffff) { | |
2284 | direct_jmp_count++; | |
2285 | if (tb->tb_next_offset[1] != 0xffff) { | |
2286 | direct_jmp2_count++; | |
2287 | } | |
2288 | } | |
2289 | } | |
2290 | /* XXX: avoid using doubles ? */ | |
2291 | cpu_fprintf(f, "TB count %d\n", nb_tbs); | |
2292 | cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", | |
2293 | nb_tbs ? target_code_size / nb_tbs : 0, | |
2294 | max_target_code_size); | |
2295 | cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n", | |
2296 | nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, | |
2297 | target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); | |
2298 | cpu_fprintf(f, "cross page TB count %d (%d%%)\n", | |
2299 | cross_page, | |
2300 | nb_tbs ? (cross_page * 100) / nb_tbs : 0); | |
2301 | cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", | |
2302 | direct_jmp_count, | |
2303 | nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, | |
2304 | direct_jmp2_count, | |
2305 | nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); | |
2306 | cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); | |
2307 | cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); | |
2308 | cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); | |
2309 | } | |
2310 | ||
2311 | #if !defined(CONFIG_USER_ONLY) | |
2312 | ||
2313 | #define MMUSUFFIX _cmmu | |
2314 | #define GETPC() NULL | |
2315 | #define env cpu_single_env | |
2316 | #define SOFTMMU_CODE_ACCESS | |
2317 | ||
2318 | #define SHIFT 0 | |
2319 | #include "softmmu_template.h" | |
2320 | ||
2321 | #define SHIFT 1 | |
2322 | #include "softmmu_template.h" | |
2323 | ||
2324 | #define SHIFT 2 | |
2325 | #include "softmmu_template.h" | |
2326 | ||
2327 | #define SHIFT 3 | |
2328 | #include "softmmu_template.h" | |
2329 | ||
2330 | #undef env | |
2331 | ||
2332 | #endif |