]> git.proxmox.com Git - qemu.git/blob - exec.c
Add support for the 'k' (kill) and 'D' (detach) packets (Jason Wessel).
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
40 #include <qemu.h>
41 #endif
42
43 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_FLUSH
45 //#define DEBUG_TLB
46 //#define DEBUG_UNASSIGNED
47
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
51
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
54
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
57 #undef DEBUG_TB_CHECK
58 #endif
59
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62
63 #define SMC_BITMAP_USE_THRESHOLD 10
64
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
67
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 #else
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 #endif
85
86 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 int nb_tbs;
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91
92 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
93 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
94 uint8_t *code_gen_ptr;
95
96 ram_addr_t phys_ram_size;
97 int phys_ram_fd;
98 uint8_t *phys_ram_base;
99 uint8_t *phys_ram_dirty;
100 static ram_addr_t phys_ram_alloc_offset = 0;
101
102 CPUState *first_cpu;
103 /* current CPU in the current thread. It is only valid inside
104 cpu_exec() */
105 CPUState *cpu_single_env;
106
107 typedef struct PageDesc {
108 /* list of TBs intersecting this ram page */
109 TranslationBlock *first_tb;
110 /* in order to optimize self modifying code, we count the number
111 of lookups we do to a given page to use a bitmap */
112 unsigned int code_write_count;
113 uint8_t *code_bitmap;
114 #if defined(CONFIG_USER_ONLY)
115 unsigned long flags;
116 #endif
117 } PageDesc;
118
119 typedef struct PhysPageDesc {
120 /* offset in host memory of the page + io_index in the low 12 bits */
121 ram_addr_t phys_offset;
122 } PhysPageDesc;
123
124 #define L2_BITS 10
125 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
126 /* XXX: this is a temporary hack for alpha target.
127 * In the future, this is to be replaced by a multi-level table
128 * to actually be able to handle the complete 64 bits address space.
129 */
130 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
131 #else
132 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
133 #endif
134
135 #define L1_SIZE (1 << L1_BITS)
136 #define L2_SIZE (1 << L2_BITS)
137
138 static void io_mem_init(void);
139
140 unsigned long qemu_real_host_page_size;
141 unsigned long qemu_host_page_bits;
142 unsigned long qemu_host_page_size;
143 unsigned long qemu_host_page_mask;
144
145 /* XXX: for system emulation, it could just be an array */
146 static PageDesc *l1_map[L1_SIZE];
147 PhysPageDesc **l1_phys_map;
148
149 /* io memory support */
150 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
151 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
152 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
153 static int io_mem_nb;
154 #if defined(CONFIG_SOFTMMU)
155 static int io_mem_watch;
156 #endif
157
158 /* log support */
159 char *logfilename = "/tmp/qemu.log";
160 FILE *logfile;
161 int loglevel;
162 static int log_append = 0;
163
164 /* statistics */
165 static int tlb_flush_count;
166 static int tb_flush_count;
167 static int tb_phys_invalidate_count;
168
169 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
170 typedef struct subpage_t {
171 target_phys_addr_t base;
172 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
173 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
174 void *opaque[TARGET_PAGE_SIZE][2][4];
175 } subpage_t;
176
177 #ifdef _WIN32
178 static void map_exec(void *addr, long size)
179 {
180 DWORD old_protect;
181 VirtualProtect(addr, size,
182 PAGE_EXECUTE_READWRITE, &old_protect);
183
184 }
185 #else
186 static void map_exec(void *addr, long size)
187 {
188 unsigned long start, end;
189
190 start = (unsigned long)addr;
191 start &= ~(qemu_real_host_page_size - 1);
192
193 end = (unsigned long)addr + size;
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
196
197 mprotect((void *)start, end - start,
198 PROT_READ | PROT_WRITE | PROT_EXEC);
199 }
200 #endif
201
202 static void page_init(void)
203 {
204 /* NOTE: we can always suppose that qemu_host_page_size >=
205 TARGET_PAGE_SIZE */
206 #ifdef _WIN32
207 {
208 SYSTEM_INFO system_info;
209 DWORD old_protect;
210
211 GetSystemInfo(&system_info);
212 qemu_real_host_page_size = system_info.dwPageSize;
213 }
214 #else
215 qemu_real_host_page_size = getpagesize();
216 #endif
217 map_exec(code_gen_buffer, sizeof(code_gen_buffer));
218 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
219
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230
231 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
237 f = fopen("/proc/self/maps", "r");
238 if (f) {
239 do {
240 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241 if (n == 2) {
242 startaddr = MIN(startaddr,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244 endaddr = MIN(endaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
247 TARGET_PAGE_ALIGN(endaddr),
248 PAGE_RESERVED);
249 }
250 } while (!feof(f));
251 fclose(f);
252 }
253 }
254 #endif
255 }
256
257 static inline PageDesc *page_find_alloc(target_ulong index)
258 {
259 PageDesc **lp, *p;
260
261 lp = &l1_map[index >> L2_BITS];
262 p = *lp;
263 if (!p) {
264 /* allocate if not found */
265 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
266 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
267 *lp = p;
268 }
269 return p + (index & (L2_SIZE - 1));
270 }
271
272 static inline PageDesc *page_find(target_ulong index)
273 {
274 PageDesc *p;
275
276 p = l1_map[index >> L2_BITS];
277 if (!p)
278 return 0;
279 return p + (index & (L2_SIZE - 1));
280 }
281
282 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
283 {
284 void **lp, **p;
285 PhysPageDesc *pd;
286
287 p = (void **)l1_phys_map;
288 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
289
290 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292 #endif
293 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
294 p = *lp;
295 if (!p) {
296 /* allocate if not found */
297 if (!alloc)
298 return NULL;
299 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300 memset(p, 0, sizeof(void *) * L1_SIZE);
301 *lp = p;
302 }
303 #endif
304 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
305 pd = *lp;
306 if (!pd) {
307 int i;
308 /* allocate if not found */
309 if (!alloc)
310 return NULL;
311 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312 *lp = pd;
313 for (i = 0; i < L2_SIZE; i++)
314 pd[i].phys_offset = IO_MEM_UNASSIGNED;
315 }
316 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
317 }
318
319 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
320 {
321 return phys_page_find_alloc(index, 0);
322 }
323
324 #if !defined(CONFIG_USER_ONLY)
325 static void tlb_protect_code(ram_addr_t ram_addr);
326 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
327 target_ulong vaddr);
328 #endif
329
330 void cpu_exec_init(CPUState *env)
331 {
332 CPUState **penv;
333 int cpu_index;
334
335 if (!code_gen_ptr) {
336 cpu_gen_init();
337 code_gen_ptr = code_gen_buffer;
338 page_init();
339 io_mem_init();
340 }
341 env->next_cpu = NULL;
342 penv = &first_cpu;
343 cpu_index = 0;
344 while (*penv != NULL) {
345 penv = (CPUState **)&(*penv)->next_cpu;
346 cpu_index++;
347 }
348 env->cpu_index = cpu_index;
349 env->nb_watchpoints = 0;
350 *penv = env;
351 }
352
353 static inline void invalidate_page_bitmap(PageDesc *p)
354 {
355 if (p->code_bitmap) {
356 qemu_free(p->code_bitmap);
357 p->code_bitmap = NULL;
358 }
359 p->code_write_count = 0;
360 }
361
362 /* set to NULL all the 'first_tb' fields in all PageDescs */
363 static void page_flush_tb(void)
364 {
365 int i, j;
366 PageDesc *p;
367
368 for(i = 0; i < L1_SIZE; i++) {
369 p = l1_map[i];
370 if (p) {
371 for(j = 0; j < L2_SIZE; j++) {
372 p->first_tb = NULL;
373 invalidate_page_bitmap(p);
374 p++;
375 }
376 }
377 }
378 }
379
380 /* flush all the translation blocks */
381 /* XXX: tb_flush is currently not thread safe */
382 void tb_flush(CPUState *env1)
383 {
384 CPUState *env;
385 #if defined(DEBUG_FLUSH)
386 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387 (unsigned long)(code_gen_ptr - code_gen_buffer),
388 nb_tbs, nb_tbs > 0 ?
389 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
390 #endif
391 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
392 cpu_abort(env1, "Internal error: code buffer overflow\n");
393
394 nb_tbs = 0;
395
396 for(env = first_cpu; env != NULL; env = env->next_cpu) {
397 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
398 }
399
400 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
401 page_flush_tb();
402
403 code_gen_ptr = code_gen_buffer;
404 /* XXX: flush processor icache at this point if cache flush is
405 expensive */
406 tb_flush_count++;
407 }
408
409 #ifdef DEBUG_TB_CHECK
410
411 static void tb_invalidate_check(target_ulong address)
412 {
413 TranslationBlock *tb;
414 int i;
415 address &= TARGET_PAGE_MASK;
416 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
417 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
418 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
419 address >= tb->pc + tb->size)) {
420 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
421 address, (long)tb->pc, tb->size);
422 }
423 }
424 }
425 }
426
427 /* verify that all the pages have correct rights for code */
428 static void tb_page_check(void)
429 {
430 TranslationBlock *tb;
431 int i, flags1, flags2;
432
433 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
434 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
435 flags1 = page_get_flags(tb->pc);
436 flags2 = page_get_flags(tb->pc + tb->size - 1);
437 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
438 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
439 (long)tb->pc, tb->size, flags1, flags2);
440 }
441 }
442 }
443 }
444
445 void tb_jmp_check(TranslationBlock *tb)
446 {
447 TranslationBlock *tb1;
448 unsigned int n1;
449
450 /* suppress any remaining jumps to this TB */
451 tb1 = tb->jmp_first;
452 for(;;) {
453 n1 = (long)tb1 & 3;
454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
455 if (n1 == 2)
456 break;
457 tb1 = tb1->jmp_next[n1];
458 }
459 /* check end of list */
460 if (tb1 != tb) {
461 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
462 }
463 }
464
465 #endif
466
467 /* invalidate one TB */
468 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
469 int next_offset)
470 {
471 TranslationBlock *tb1;
472 for(;;) {
473 tb1 = *ptb;
474 if (tb1 == tb) {
475 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
476 break;
477 }
478 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
479 }
480 }
481
482 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
483 {
484 TranslationBlock *tb1;
485 unsigned int n1;
486
487 for(;;) {
488 tb1 = *ptb;
489 n1 = (long)tb1 & 3;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 if (tb1 == tb) {
492 *ptb = tb1->page_next[n1];
493 break;
494 }
495 ptb = &tb1->page_next[n1];
496 }
497 }
498
499 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
500 {
501 TranslationBlock *tb1, **ptb;
502 unsigned int n1;
503
504 ptb = &tb->jmp_next[n];
505 tb1 = *ptb;
506 if (tb1) {
507 /* find tb(n) in circular list */
508 for(;;) {
509 tb1 = *ptb;
510 n1 = (long)tb1 & 3;
511 tb1 = (TranslationBlock *)((long)tb1 & ~3);
512 if (n1 == n && tb1 == tb)
513 break;
514 if (n1 == 2) {
515 ptb = &tb1->jmp_first;
516 } else {
517 ptb = &tb1->jmp_next[n1];
518 }
519 }
520 /* now we can suppress tb(n) from the list */
521 *ptb = tb->jmp_next[n];
522
523 tb->jmp_next[n] = NULL;
524 }
525 }
526
527 /* reset the jump entry 'n' of a TB so that it is not chained to
528 another TB */
529 static inline void tb_reset_jump(TranslationBlock *tb, int n)
530 {
531 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
532 }
533
534 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
535 {
536 CPUState *env;
537 PageDesc *p;
538 unsigned int h, n1;
539 target_phys_addr_t phys_pc;
540 TranslationBlock *tb1, *tb2;
541
542 /* remove the TB from the hash list */
543 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
544 h = tb_phys_hash_func(phys_pc);
545 tb_remove(&tb_phys_hash[h], tb,
546 offsetof(TranslationBlock, phys_hash_next));
547
548 /* remove the TB from the page list */
549 if (tb->page_addr[0] != page_addr) {
550 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
551 tb_page_remove(&p->first_tb, tb);
552 invalidate_page_bitmap(p);
553 }
554 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
555 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
556 tb_page_remove(&p->first_tb, tb);
557 invalidate_page_bitmap(p);
558 }
559
560 tb_invalidated_flag = 1;
561
562 /* remove the TB from the hash list */
563 h = tb_jmp_cache_hash_func(tb->pc);
564 for(env = first_cpu; env != NULL; env = env->next_cpu) {
565 if (env->tb_jmp_cache[h] == tb)
566 env->tb_jmp_cache[h] = NULL;
567 }
568
569 /* suppress this TB from the two jump lists */
570 tb_jmp_remove(tb, 0);
571 tb_jmp_remove(tb, 1);
572
573 /* suppress any remaining jumps to this TB */
574 tb1 = tb->jmp_first;
575 for(;;) {
576 n1 = (long)tb1 & 3;
577 if (n1 == 2)
578 break;
579 tb1 = (TranslationBlock *)((long)tb1 & ~3);
580 tb2 = tb1->jmp_next[n1];
581 tb_reset_jump(tb1, n1);
582 tb1->jmp_next[n1] = NULL;
583 tb1 = tb2;
584 }
585 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
586
587 tb_phys_invalidate_count++;
588 }
589
590 static inline void set_bits(uint8_t *tab, int start, int len)
591 {
592 int end, mask, end1;
593
594 end = start + len;
595 tab += start >> 3;
596 mask = 0xff << (start & 7);
597 if ((start & ~7) == (end & ~7)) {
598 if (start < end) {
599 mask &= ~(0xff << (end & 7));
600 *tab |= mask;
601 }
602 } else {
603 *tab++ |= mask;
604 start = (start + 8) & ~7;
605 end1 = end & ~7;
606 while (start < end1) {
607 *tab++ = 0xff;
608 start += 8;
609 }
610 if (start < end) {
611 mask = ~(0xff << (end & 7));
612 *tab |= mask;
613 }
614 }
615 }
616
617 static void build_page_bitmap(PageDesc *p)
618 {
619 int n, tb_start, tb_end;
620 TranslationBlock *tb;
621
622 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
623 if (!p->code_bitmap)
624 return;
625 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
626
627 tb = p->first_tb;
628 while (tb != NULL) {
629 n = (long)tb & 3;
630 tb = (TranslationBlock *)((long)tb & ~3);
631 /* NOTE: this is subtle as a TB may span two physical pages */
632 if (n == 0) {
633 /* NOTE: tb_end may be after the end of the page, but
634 it is not a problem */
635 tb_start = tb->pc & ~TARGET_PAGE_MASK;
636 tb_end = tb_start + tb->size;
637 if (tb_end > TARGET_PAGE_SIZE)
638 tb_end = TARGET_PAGE_SIZE;
639 } else {
640 tb_start = 0;
641 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
642 }
643 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
644 tb = tb->page_next[n];
645 }
646 }
647
648 #ifdef TARGET_HAS_PRECISE_SMC
649
650 static void tb_gen_code(CPUState *env,
651 target_ulong pc, target_ulong cs_base, int flags,
652 int cflags)
653 {
654 TranslationBlock *tb;
655 uint8_t *tc_ptr;
656 target_ulong phys_pc, phys_page2, virt_page2;
657 int code_gen_size;
658
659 phys_pc = get_phys_addr_code(env, pc);
660 tb = tb_alloc(pc);
661 if (!tb) {
662 /* flush must be done */
663 tb_flush(env);
664 /* cannot fail at this point */
665 tb = tb_alloc(pc);
666 }
667 tc_ptr = code_gen_ptr;
668 tb->tc_ptr = tc_ptr;
669 tb->cs_base = cs_base;
670 tb->flags = flags;
671 tb->cflags = cflags;
672 cpu_gen_code(env, tb, &code_gen_size);
673 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
674
675 /* check next page if needed */
676 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
677 phys_page2 = -1;
678 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
679 phys_page2 = get_phys_addr_code(env, virt_page2);
680 }
681 tb_link_phys(tb, phys_pc, phys_page2);
682 }
683 #endif
684
685 /* invalidate all TBs which intersect with the target physical page
686 starting in range [start;end[. NOTE: start and end must refer to
687 the same physical page. 'is_cpu_write_access' should be true if called
688 from a real cpu write access: the virtual CPU will exit the current
689 TB if code is modified inside this TB. */
690 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
691 int is_cpu_write_access)
692 {
693 int n, current_tb_modified, current_tb_not_found, current_flags;
694 CPUState *env = cpu_single_env;
695 PageDesc *p;
696 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
697 target_ulong tb_start, tb_end;
698 target_ulong current_pc, current_cs_base;
699
700 p = page_find(start >> TARGET_PAGE_BITS);
701 if (!p)
702 return;
703 if (!p->code_bitmap &&
704 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
705 is_cpu_write_access) {
706 /* build code bitmap */
707 build_page_bitmap(p);
708 }
709
710 /* we remove all the TBs in the range [start, end[ */
711 /* XXX: see if in some cases it could be faster to invalidate all the code */
712 current_tb_not_found = is_cpu_write_access;
713 current_tb_modified = 0;
714 current_tb = NULL; /* avoid warning */
715 current_pc = 0; /* avoid warning */
716 current_cs_base = 0; /* avoid warning */
717 current_flags = 0; /* avoid warning */
718 tb = p->first_tb;
719 while (tb != NULL) {
720 n = (long)tb & 3;
721 tb = (TranslationBlock *)((long)tb & ~3);
722 tb_next = tb->page_next[n];
723 /* NOTE: this is subtle as a TB may span two physical pages */
724 if (n == 0) {
725 /* NOTE: tb_end may be after the end of the page, but
726 it is not a problem */
727 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
728 tb_end = tb_start + tb->size;
729 } else {
730 tb_start = tb->page_addr[1];
731 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
732 }
733 if (!(tb_end <= start || tb_start >= end)) {
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_not_found) {
736 current_tb_not_found = 0;
737 current_tb = NULL;
738 if (env->mem_write_pc) {
739 /* now we have a real cpu fault */
740 current_tb = tb_find_pc(env->mem_write_pc);
741 }
742 }
743 if (current_tb == tb &&
744 !(current_tb->cflags & CF_SINGLE_INSN)) {
745 /* If we are modifying the current TB, we must stop
746 its execution. We could be more precise by checking
747 that the modification is after the current PC, but it
748 would require a specialized function to partially
749 restore the CPU state */
750
751 current_tb_modified = 1;
752 cpu_restore_state(current_tb, env,
753 env->mem_write_pc, NULL);
754 #if defined(TARGET_I386)
755 current_flags = env->hflags;
756 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
757 current_cs_base = (target_ulong)env->segs[R_CS].base;
758 current_pc = current_cs_base + env->eip;
759 #else
760 #error unsupported CPU
761 #endif
762 }
763 #endif /* TARGET_HAS_PRECISE_SMC */
764 /* we need to do that to handle the case where a signal
765 occurs while doing tb_phys_invalidate() */
766 saved_tb = NULL;
767 if (env) {
768 saved_tb = env->current_tb;
769 env->current_tb = NULL;
770 }
771 tb_phys_invalidate(tb, -1);
772 if (env) {
773 env->current_tb = saved_tb;
774 if (env->interrupt_request && env->current_tb)
775 cpu_interrupt(env, env->interrupt_request);
776 }
777 }
778 tb = tb_next;
779 }
780 #if !defined(CONFIG_USER_ONLY)
781 /* if no code remaining, no need to continue to use slow writes */
782 if (!p->first_tb) {
783 invalidate_page_bitmap(p);
784 if (is_cpu_write_access) {
785 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
786 }
787 }
788 #endif
789 #ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
793 itself */
794 env->current_tb = NULL;
795 tb_gen_code(env, current_pc, current_cs_base, current_flags,
796 CF_SINGLE_INSN);
797 cpu_resume_from_signal(env, NULL);
798 }
799 #endif
800 }
801
802 /* len must be <= 8 and start must be a multiple of len */
803 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
804 {
805 PageDesc *p;
806 int offset, b;
807 #if 0
808 if (1) {
809 if (loglevel) {
810 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811 cpu_single_env->mem_write_vaddr, len,
812 cpu_single_env->eip,
813 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
814 }
815 }
816 #endif
817 p = page_find(start >> TARGET_PAGE_BITS);
818 if (!p)
819 return;
820 if (p->code_bitmap) {
821 offset = start & ~TARGET_PAGE_MASK;
822 b = p->code_bitmap[offset >> 3] >> (offset & 7);
823 if (b & ((1 << len) - 1))
824 goto do_invalidate;
825 } else {
826 do_invalidate:
827 tb_invalidate_phys_page_range(start, start + len, 1);
828 }
829 }
830
831 #if !defined(CONFIG_SOFTMMU)
832 static void tb_invalidate_phys_page(target_phys_addr_t addr,
833 unsigned long pc, void *puc)
834 {
835 int n, current_flags, current_tb_modified;
836 target_ulong current_pc, current_cs_base;
837 PageDesc *p;
838 TranslationBlock *tb, *current_tb;
839 #ifdef TARGET_HAS_PRECISE_SMC
840 CPUState *env = cpu_single_env;
841 #endif
842
843 addr &= TARGET_PAGE_MASK;
844 p = page_find(addr >> TARGET_PAGE_BITS);
845 if (!p)
846 return;
847 tb = p->first_tb;
848 current_tb_modified = 0;
849 current_tb = NULL;
850 current_pc = 0; /* avoid warning */
851 current_cs_base = 0; /* avoid warning */
852 current_flags = 0; /* avoid warning */
853 #ifdef TARGET_HAS_PRECISE_SMC
854 if (tb && pc != 0) {
855 current_tb = tb_find_pc(pc);
856 }
857 #endif
858 while (tb != NULL) {
859 n = (long)tb & 3;
860 tb = (TranslationBlock *)((long)tb & ~3);
861 #ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb == tb &&
863 !(current_tb->cflags & CF_SINGLE_INSN)) {
864 /* If we are modifying the current TB, we must stop
865 its execution. We could be more precise by checking
866 that the modification is after the current PC, but it
867 would require a specialized function to partially
868 restore the CPU state */
869
870 current_tb_modified = 1;
871 cpu_restore_state(current_tb, env, pc, puc);
872 #if defined(TARGET_I386)
873 current_flags = env->hflags;
874 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
875 current_cs_base = (target_ulong)env->segs[R_CS].base;
876 current_pc = current_cs_base + env->eip;
877 #else
878 #error unsupported CPU
879 #endif
880 }
881 #endif /* TARGET_HAS_PRECISE_SMC */
882 tb_phys_invalidate(tb, addr);
883 tb = tb->page_next[n];
884 }
885 p->first_tb = NULL;
886 #ifdef TARGET_HAS_PRECISE_SMC
887 if (current_tb_modified) {
888 /* we generate a block containing just the instruction
889 modifying the memory. It will ensure that it cannot modify
890 itself */
891 env->current_tb = NULL;
892 tb_gen_code(env, current_pc, current_cs_base, current_flags,
893 CF_SINGLE_INSN);
894 cpu_resume_from_signal(env, puc);
895 }
896 #endif
897 }
898 #endif
899
900 /* add the tb in the target page and protect it if necessary */
901 static inline void tb_alloc_page(TranslationBlock *tb,
902 unsigned int n, target_ulong page_addr)
903 {
904 PageDesc *p;
905 TranslationBlock *last_first_tb;
906
907 tb->page_addr[n] = page_addr;
908 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
909 tb->page_next[n] = p->first_tb;
910 last_first_tb = p->first_tb;
911 p->first_tb = (TranslationBlock *)((long)tb | n);
912 invalidate_page_bitmap(p);
913
914 #if defined(TARGET_HAS_SMC) || 1
915
916 #if defined(CONFIG_USER_ONLY)
917 if (p->flags & PAGE_WRITE) {
918 target_ulong addr;
919 PageDesc *p2;
920 int prot;
921
922 /* force the host page as non writable (writes will have a
923 page fault + mprotect overhead) */
924 page_addr &= qemu_host_page_mask;
925 prot = 0;
926 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
927 addr += TARGET_PAGE_SIZE) {
928
929 p2 = page_find (addr >> TARGET_PAGE_BITS);
930 if (!p2)
931 continue;
932 prot |= p2->flags;
933 p2->flags &= ~PAGE_WRITE;
934 page_get_flags(addr);
935 }
936 mprotect(g2h(page_addr), qemu_host_page_size,
937 (prot & PAGE_BITS) & ~PAGE_WRITE);
938 #ifdef DEBUG_TB_INVALIDATE
939 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
940 page_addr);
941 #endif
942 }
943 #else
944 /* if some code is already present, then the pages are already
945 protected. So we handle the case where only the first TB is
946 allocated in a physical page */
947 if (!last_first_tb) {
948 tlb_protect_code(page_addr);
949 }
950 #endif
951
952 #endif /* TARGET_HAS_SMC */
953 }
954
955 /* Allocate a new translation block. Flush the translation buffer if
956 too many translation blocks or too much generated code. */
957 TranslationBlock *tb_alloc(target_ulong pc)
958 {
959 TranslationBlock *tb;
960
961 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
962 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
963 return NULL;
964 tb = &tbs[nb_tbs++];
965 tb->pc = pc;
966 tb->cflags = 0;
967 return tb;
968 }
969
970 /* add a new TB and link it to the physical page tables. phys_page2 is
971 (-1) to indicate that only one page contains the TB. */
972 void tb_link_phys(TranslationBlock *tb,
973 target_ulong phys_pc, target_ulong phys_page2)
974 {
975 unsigned int h;
976 TranslationBlock **ptb;
977
978 /* add in the physical hash table */
979 h = tb_phys_hash_func(phys_pc);
980 ptb = &tb_phys_hash[h];
981 tb->phys_hash_next = *ptb;
982 *ptb = tb;
983
984 /* add in the page list */
985 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
986 if (phys_page2 != -1)
987 tb_alloc_page(tb, 1, phys_page2);
988 else
989 tb->page_addr[1] = -1;
990
991 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
992 tb->jmp_next[0] = NULL;
993 tb->jmp_next[1] = NULL;
994
995 /* init original jump addresses */
996 if (tb->tb_next_offset[0] != 0xffff)
997 tb_reset_jump(tb, 0);
998 if (tb->tb_next_offset[1] != 0xffff)
999 tb_reset_jump(tb, 1);
1000
1001 #ifdef DEBUG_TB_CHECK
1002 tb_page_check();
1003 #endif
1004 }
1005
1006 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007 tb[1].tc_ptr. Return NULL if not found */
1008 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1009 {
1010 int m_min, m_max, m;
1011 unsigned long v;
1012 TranslationBlock *tb;
1013
1014 if (nb_tbs <= 0)
1015 return NULL;
1016 if (tc_ptr < (unsigned long)code_gen_buffer ||
1017 tc_ptr >= (unsigned long)code_gen_ptr)
1018 return NULL;
1019 /* binary search (cf Knuth) */
1020 m_min = 0;
1021 m_max = nb_tbs - 1;
1022 while (m_min <= m_max) {
1023 m = (m_min + m_max) >> 1;
1024 tb = &tbs[m];
1025 v = (unsigned long)tb->tc_ptr;
1026 if (v == tc_ptr)
1027 return tb;
1028 else if (tc_ptr < v) {
1029 m_max = m - 1;
1030 } else {
1031 m_min = m + 1;
1032 }
1033 }
1034 return &tbs[m_max];
1035 }
1036
1037 static void tb_reset_jump_recursive(TranslationBlock *tb);
1038
1039 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1040 {
1041 TranslationBlock *tb1, *tb_next, **ptb;
1042 unsigned int n1;
1043
1044 tb1 = tb->jmp_next[n];
1045 if (tb1 != NULL) {
1046 /* find head of list */
1047 for(;;) {
1048 n1 = (long)tb1 & 3;
1049 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1050 if (n1 == 2)
1051 break;
1052 tb1 = tb1->jmp_next[n1];
1053 }
1054 /* we are now sure now that tb jumps to tb1 */
1055 tb_next = tb1;
1056
1057 /* remove tb from the jmp_first list */
1058 ptb = &tb_next->jmp_first;
1059 for(;;) {
1060 tb1 = *ptb;
1061 n1 = (long)tb1 & 3;
1062 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1063 if (n1 == n && tb1 == tb)
1064 break;
1065 ptb = &tb1->jmp_next[n1];
1066 }
1067 *ptb = tb->jmp_next[n];
1068 tb->jmp_next[n] = NULL;
1069
1070 /* suppress the jump to next tb in generated code */
1071 tb_reset_jump(tb, n);
1072
1073 /* suppress jumps in the tb on which we could have jumped */
1074 tb_reset_jump_recursive(tb_next);
1075 }
1076 }
1077
1078 static void tb_reset_jump_recursive(TranslationBlock *tb)
1079 {
1080 tb_reset_jump_recursive2(tb, 0);
1081 tb_reset_jump_recursive2(tb, 1);
1082 }
1083
1084 #if defined(TARGET_HAS_ICE)
1085 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1086 {
1087 target_phys_addr_t addr;
1088 target_ulong pd;
1089 ram_addr_t ram_addr;
1090 PhysPageDesc *p;
1091
1092 addr = cpu_get_phys_page_debug(env, pc);
1093 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1094 if (!p) {
1095 pd = IO_MEM_UNASSIGNED;
1096 } else {
1097 pd = p->phys_offset;
1098 }
1099 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1100 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1101 }
1102 #endif
1103
1104 /* Add a watchpoint. */
1105 int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1106 {
1107 int i;
1108
1109 for (i = 0; i < env->nb_watchpoints; i++) {
1110 if (addr == env->watchpoint[i].vaddr)
1111 return 0;
1112 }
1113 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1114 return -1;
1115
1116 i = env->nb_watchpoints++;
1117 env->watchpoint[i].vaddr = addr;
1118 tlb_flush_page(env, addr);
1119 /* FIXME: This flush is needed because of the hack to make memory ops
1120 terminate the TB. It can be removed once the proper IO trap and
1121 re-execute bits are in. */
1122 tb_flush(env);
1123 return i;
1124 }
1125
1126 /* Remove a watchpoint. */
1127 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1128 {
1129 int i;
1130
1131 for (i = 0; i < env->nb_watchpoints; i++) {
1132 if (addr == env->watchpoint[i].vaddr) {
1133 env->nb_watchpoints--;
1134 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1135 tlb_flush_page(env, addr);
1136 return 0;
1137 }
1138 }
1139 return -1;
1140 }
1141
1142 /* Remove all watchpoints. */
1143 void cpu_watchpoint_remove_all(CPUState *env) {
1144 int i;
1145
1146 for (i = 0; i < env->nb_watchpoints; i++) {
1147 tlb_flush_page(env, env->watchpoint[i].vaddr);
1148 }
1149 env->nb_watchpoints = 0;
1150 }
1151
1152 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1153 breakpoint is reached */
1154 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1155 {
1156 #if defined(TARGET_HAS_ICE)
1157 int i;
1158
1159 for(i = 0; i < env->nb_breakpoints; i++) {
1160 if (env->breakpoints[i] == pc)
1161 return 0;
1162 }
1163
1164 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1165 return -1;
1166 env->breakpoints[env->nb_breakpoints++] = pc;
1167
1168 breakpoint_invalidate(env, pc);
1169 return 0;
1170 #else
1171 return -1;
1172 #endif
1173 }
1174
1175 /* remove all breakpoints */
1176 void cpu_breakpoint_remove_all(CPUState *env) {
1177 #if defined(TARGET_HAS_ICE)
1178 int i;
1179 for(i = 0; i < env->nb_breakpoints; i++) {
1180 breakpoint_invalidate(env, env->breakpoints[i]);
1181 }
1182 env->nb_breakpoints = 0;
1183 #endif
1184 }
1185
1186 /* remove a breakpoint */
1187 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1188 {
1189 #if defined(TARGET_HAS_ICE)
1190 int i;
1191 for(i = 0; i < env->nb_breakpoints; i++) {
1192 if (env->breakpoints[i] == pc)
1193 goto found;
1194 }
1195 return -1;
1196 found:
1197 env->nb_breakpoints--;
1198 if (i < env->nb_breakpoints)
1199 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1200
1201 breakpoint_invalidate(env, pc);
1202 return 0;
1203 #else
1204 return -1;
1205 #endif
1206 }
1207
1208 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1209 CPU loop after each instruction */
1210 void cpu_single_step(CPUState *env, int enabled)
1211 {
1212 #if defined(TARGET_HAS_ICE)
1213 if (env->singlestep_enabled != enabled) {
1214 env->singlestep_enabled = enabled;
1215 /* must flush all the translated code to avoid inconsistancies */
1216 /* XXX: only flush what is necessary */
1217 tb_flush(env);
1218 }
1219 #endif
1220 }
1221
1222 /* enable or disable low levels log */
1223 void cpu_set_log(int log_flags)
1224 {
1225 loglevel = log_flags;
1226 if (loglevel && !logfile) {
1227 logfile = fopen(logfilename, log_append ? "a" : "w");
1228 if (!logfile) {
1229 perror(logfilename);
1230 _exit(1);
1231 }
1232 #if !defined(CONFIG_SOFTMMU)
1233 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1234 {
1235 static uint8_t logfile_buf[4096];
1236 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1237 }
1238 #else
1239 setvbuf(logfile, NULL, _IOLBF, 0);
1240 #endif
1241 log_append = 1;
1242 }
1243 if (!loglevel && logfile) {
1244 fclose(logfile);
1245 logfile = NULL;
1246 }
1247 }
1248
1249 void cpu_set_log_filename(const char *filename)
1250 {
1251 logfilename = strdup(filename);
1252 if (logfile) {
1253 fclose(logfile);
1254 logfile = NULL;
1255 }
1256 cpu_set_log(loglevel);
1257 }
1258
1259 /* mask must never be zero, except for A20 change call */
1260 void cpu_interrupt(CPUState *env, int mask)
1261 {
1262 TranslationBlock *tb;
1263 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1264
1265 env->interrupt_request |= mask;
1266 /* if the cpu is currently executing code, we must unlink it and
1267 all the potentially executing TB */
1268 tb = env->current_tb;
1269 if (tb && !testandset(&interrupt_lock)) {
1270 env->current_tb = NULL;
1271 tb_reset_jump_recursive(tb);
1272 resetlock(&interrupt_lock);
1273 }
1274 }
1275
1276 void cpu_reset_interrupt(CPUState *env, int mask)
1277 {
1278 env->interrupt_request &= ~mask;
1279 }
1280
1281 CPULogItem cpu_log_items[] = {
1282 { CPU_LOG_TB_OUT_ASM, "out_asm",
1283 "show generated host assembly code for each compiled TB" },
1284 { CPU_LOG_TB_IN_ASM, "in_asm",
1285 "show target assembly code for each compiled TB" },
1286 { CPU_LOG_TB_OP, "op",
1287 "show micro ops for each compiled TB" },
1288 { CPU_LOG_TB_OP_OPT, "op_opt",
1289 "show micro ops "
1290 #ifdef TARGET_I386
1291 "before eflags optimization and "
1292 #endif
1293 "after liveness analysis" },
1294 { CPU_LOG_INT, "int",
1295 "show interrupts/exceptions in short format" },
1296 { CPU_LOG_EXEC, "exec",
1297 "show trace before each executed TB (lots of logs)" },
1298 { CPU_LOG_TB_CPU, "cpu",
1299 "show CPU state before block translation" },
1300 #ifdef TARGET_I386
1301 { CPU_LOG_PCALL, "pcall",
1302 "show protected mode far calls/returns/exceptions" },
1303 #endif
1304 #ifdef DEBUG_IOPORT
1305 { CPU_LOG_IOPORT, "ioport",
1306 "show all i/o ports accesses" },
1307 #endif
1308 { 0, NULL, NULL },
1309 };
1310
1311 static int cmp1(const char *s1, int n, const char *s2)
1312 {
1313 if (strlen(s2) != n)
1314 return 0;
1315 return memcmp(s1, s2, n) == 0;
1316 }
1317
1318 /* takes a comma separated list of log masks. Return 0 if error. */
1319 int cpu_str_to_log_mask(const char *str)
1320 {
1321 CPULogItem *item;
1322 int mask;
1323 const char *p, *p1;
1324
1325 p = str;
1326 mask = 0;
1327 for(;;) {
1328 p1 = strchr(p, ',');
1329 if (!p1)
1330 p1 = p + strlen(p);
1331 if(cmp1(p,p1-p,"all")) {
1332 for(item = cpu_log_items; item->mask != 0; item++) {
1333 mask |= item->mask;
1334 }
1335 } else {
1336 for(item = cpu_log_items; item->mask != 0; item++) {
1337 if (cmp1(p, p1 - p, item->name))
1338 goto found;
1339 }
1340 return 0;
1341 }
1342 found:
1343 mask |= item->mask;
1344 if (*p1 != ',')
1345 break;
1346 p = p1 + 1;
1347 }
1348 return mask;
1349 }
1350
1351 void cpu_abort(CPUState *env, const char *fmt, ...)
1352 {
1353 va_list ap;
1354 va_list ap2;
1355
1356 va_start(ap, fmt);
1357 va_copy(ap2, ap);
1358 fprintf(stderr, "qemu: fatal: ");
1359 vfprintf(stderr, fmt, ap);
1360 fprintf(stderr, "\n");
1361 #ifdef TARGET_I386
1362 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1363 #else
1364 cpu_dump_state(env, stderr, fprintf, 0);
1365 #endif
1366 if (logfile) {
1367 fprintf(logfile, "qemu: fatal: ");
1368 vfprintf(logfile, fmt, ap2);
1369 fprintf(logfile, "\n");
1370 #ifdef TARGET_I386
1371 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1372 #else
1373 cpu_dump_state(env, logfile, fprintf, 0);
1374 #endif
1375 fflush(logfile);
1376 fclose(logfile);
1377 }
1378 va_end(ap2);
1379 va_end(ap);
1380 abort();
1381 }
1382
1383 CPUState *cpu_copy(CPUState *env)
1384 {
1385 CPUState *new_env = cpu_init(env->cpu_model_str);
1386 /* preserve chaining and index */
1387 CPUState *next_cpu = new_env->next_cpu;
1388 int cpu_index = new_env->cpu_index;
1389 memcpy(new_env, env, sizeof(CPUState));
1390 new_env->next_cpu = next_cpu;
1391 new_env->cpu_index = cpu_index;
1392 return new_env;
1393 }
1394
1395 #if !defined(CONFIG_USER_ONLY)
1396
1397 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1398 {
1399 unsigned int i;
1400
1401 /* Discard jump cache entries for any tb which might potentially
1402 overlap the flushed page. */
1403 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1404 memset (&env->tb_jmp_cache[i], 0,
1405 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1406
1407 i = tb_jmp_cache_hash_page(addr);
1408 memset (&env->tb_jmp_cache[i], 0,
1409 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1410 }
1411
1412 /* NOTE: if flush_global is true, also flush global entries (not
1413 implemented yet) */
1414 void tlb_flush(CPUState *env, int flush_global)
1415 {
1416 int i;
1417
1418 #if defined(DEBUG_TLB)
1419 printf("tlb_flush:\n");
1420 #endif
1421 /* must reset current TB so that interrupts cannot modify the
1422 links while we are modifying them */
1423 env->current_tb = NULL;
1424
1425 for(i = 0; i < CPU_TLB_SIZE; i++) {
1426 env->tlb_table[0][i].addr_read = -1;
1427 env->tlb_table[0][i].addr_write = -1;
1428 env->tlb_table[0][i].addr_code = -1;
1429 env->tlb_table[1][i].addr_read = -1;
1430 env->tlb_table[1][i].addr_write = -1;
1431 env->tlb_table[1][i].addr_code = -1;
1432 #if (NB_MMU_MODES >= 3)
1433 env->tlb_table[2][i].addr_read = -1;
1434 env->tlb_table[2][i].addr_write = -1;
1435 env->tlb_table[2][i].addr_code = -1;
1436 #if (NB_MMU_MODES == 4)
1437 env->tlb_table[3][i].addr_read = -1;
1438 env->tlb_table[3][i].addr_write = -1;
1439 env->tlb_table[3][i].addr_code = -1;
1440 #endif
1441 #endif
1442 }
1443
1444 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1445
1446 #if !defined(CONFIG_SOFTMMU)
1447 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1448 #endif
1449 #ifdef USE_KQEMU
1450 if (env->kqemu_enabled) {
1451 kqemu_flush(env, flush_global);
1452 }
1453 #endif
1454 tlb_flush_count++;
1455 }
1456
1457 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1458 {
1459 if (addr == (tlb_entry->addr_read &
1460 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1461 addr == (tlb_entry->addr_write &
1462 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1463 addr == (tlb_entry->addr_code &
1464 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1465 tlb_entry->addr_read = -1;
1466 tlb_entry->addr_write = -1;
1467 tlb_entry->addr_code = -1;
1468 }
1469 }
1470
1471 void tlb_flush_page(CPUState *env, target_ulong addr)
1472 {
1473 int i;
1474
1475 #if defined(DEBUG_TLB)
1476 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1477 #endif
1478 /* must reset current TB so that interrupts cannot modify the
1479 links while we are modifying them */
1480 env->current_tb = NULL;
1481
1482 addr &= TARGET_PAGE_MASK;
1483 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1484 tlb_flush_entry(&env->tlb_table[0][i], addr);
1485 tlb_flush_entry(&env->tlb_table[1][i], addr);
1486 #if (NB_MMU_MODES >= 3)
1487 tlb_flush_entry(&env->tlb_table[2][i], addr);
1488 #if (NB_MMU_MODES == 4)
1489 tlb_flush_entry(&env->tlb_table[3][i], addr);
1490 #endif
1491 #endif
1492
1493 tlb_flush_jmp_cache(env, addr);
1494
1495 #if !defined(CONFIG_SOFTMMU)
1496 if (addr < MMAP_AREA_END)
1497 munmap((void *)addr, TARGET_PAGE_SIZE);
1498 #endif
1499 #ifdef USE_KQEMU
1500 if (env->kqemu_enabled) {
1501 kqemu_flush_page(env, addr);
1502 }
1503 #endif
1504 }
1505
1506 /* update the TLBs so that writes to code in the virtual page 'addr'
1507 can be detected */
1508 static void tlb_protect_code(ram_addr_t ram_addr)
1509 {
1510 cpu_physical_memory_reset_dirty(ram_addr,
1511 ram_addr + TARGET_PAGE_SIZE,
1512 CODE_DIRTY_FLAG);
1513 }
1514
1515 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1516 tested for self modifying code */
1517 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1518 target_ulong vaddr)
1519 {
1520 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1521 }
1522
1523 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1524 unsigned long start, unsigned long length)
1525 {
1526 unsigned long addr;
1527 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1528 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1529 if ((addr - start) < length) {
1530 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1531 }
1532 }
1533 }
1534
1535 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1536 int dirty_flags)
1537 {
1538 CPUState *env;
1539 unsigned long length, start1;
1540 int i, mask, len;
1541 uint8_t *p;
1542
1543 start &= TARGET_PAGE_MASK;
1544 end = TARGET_PAGE_ALIGN(end);
1545
1546 length = end - start;
1547 if (length == 0)
1548 return;
1549 len = length >> TARGET_PAGE_BITS;
1550 #ifdef USE_KQEMU
1551 /* XXX: should not depend on cpu context */
1552 env = first_cpu;
1553 if (env->kqemu_enabled) {
1554 ram_addr_t addr;
1555 addr = start;
1556 for(i = 0; i < len; i++) {
1557 kqemu_set_notdirty(env, addr);
1558 addr += TARGET_PAGE_SIZE;
1559 }
1560 }
1561 #endif
1562 mask = ~dirty_flags;
1563 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1564 for(i = 0; i < len; i++)
1565 p[i] &= mask;
1566
1567 /* we modify the TLB cache so that the dirty bit will be set again
1568 when accessing the range */
1569 start1 = start + (unsigned long)phys_ram_base;
1570 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1571 for(i = 0; i < CPU_TLB_SIZE; i++)
1572 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1573 for(i = 0; i < CPU_TLB_SIZE; i++)
1574 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1575 #if (NB_MMU_MODES >= 3)
1576 for(i = 0; i < CPU_TLB_SIZE; i++)
1577 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1578 #if (NB_MMU_MODES == 4)
1579 for(i = 0; i < CPU_TLB_SIZE; i++)
1580 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1581 #endif
1582 #endif
1583 }
1584
1585 #if !defined(CONFIG_SOFTMMU)
1586 /* XXX: this is expensive */
1587 {
1588 VirtPageDesc *p;
1589 int j;
1590 target_ulong addr;
1591
1592 for(i = 0; i < L1_SIZE; i++) {
1593 p = l1_virt_map[i];
1594 if (p) {
1595 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1596 for(j = 0; j < L2_SIZE; j++) {
1597 if (p->valid_tag == virt_valid_tag &&
1598 p->phys_addr >= start && p->phys_addr < end &&
1599 (p->prot & PROT_WRITE)) {
1600 if (addr < MMAP_AREA_END) {
1601 mprotect((void *)addr, TARGET_PAGE_SIZE,
1602 p->prot & ~PROT_WRITE);
1603 }
1604 }
1605 addr += TARGET_PAGE_SIZE;
1606 p++;
1607 }
1608 }
1609 }
1610 }
1611 #endif
1612 }
1613
1614 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1615 {
1616 ram_addr_t ram_addr;
1617
1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1619 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1620 tlb_entry->addend - (unsigned long)phys_ram_base;
1621 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1622 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1623 }
1624 }
1625 }
1626
1627 /* update the TLB according to the current state of the dirty bits */
1628 void cpu_tlb_update_dirty(CPUState *env)
1629 {
1630 int i;
1631 for(i = 0; i < CPU_TLB_SIZE; i++)
1632 tlb_update_dirty(&env->tlb_table[0][i]);
1633 for(i = 0; i < CPU_TLB_SIZE; i++)
1634 tlb_update_dirty(&env->tlb_table[1][i]);
1635 #if (NB_MMU_MODES >= 3)
1636 for(i = 0; i < CPU_TLB_SIZE; i++)
1637 tlb_update_dirty(&env->tlb_table[2][i]);
1638 #if (NB_MMU_MODES == 4)
1639 for(i = 0; i < CPU_TLB_SIZE; i++)
1640 tlb_update_dirty(&env->tlb_table[3][i]);
1641 #endif
1642 #endif
1643 }
1644
1645 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1646 unsigned long start)
1647 {
1648 unsigned long addr;
1649 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1650 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1651 if (addr == start) {
1652 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1653 }
1654 }
1655 }
1656
1657 /* update the TLB corresponding to virtual page vaddr and phys addr
1658 addr so that it is no longer dirty */
1659 static inline void tlb_set_dirty(CPUState *env,
1660 unsigned long addr, target_ulong vaddr)
1661 {
1662 int i;
1663
1664 addr &= TARGET_PAGE_MASK;
1665 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1666 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1667 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1668 #if (NB_MMU_MODES >= 3)
1669 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1670 #if (NB_MMU_MODES == 4)
1671 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1672 #endif
1673 #endif
1674 }
1675
1676 /* add a new TLB entry. At most one entry for a given virtual address
1677 is permitted. Return 0 if OK or 2 if the page could not be mapped
1678 (can only happen in non SOFTMMU mode for I/O pages or pages
1679 conflicting with the host address space). */
1680 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1681 target_phys_addr_t paddr, int prot,
1682 int mmu_idx, int is_softmmu)
1683 {
1684 PhysPageDesc *p;
1685 unsigned long pd;
1686 unsigned int index;
1687 target_ulong address;
1688 target_phys_addr_t addend;
1689 int ret;
1690 CPUTLBEntry *te;
1691 int i;
1692
1693 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1694 if (!p) {
1695 pd = IO_MEM_UNASSIGNED;
1696 } else {
1697 pd = p->phys_offset;
1698 }
1699 #if defined(DEBUG_TLB)
1700 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1701 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1702 #endif
1703
1704 ret = 0;
1705 #if !defined(CONFIG_SOFTMMU)
1706 if (is_softmmu)
1707 #endif
1708 {
1709 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1710 /* IO memory case */
1711 address = vaddr | pd;
1712 addend = paddr;
1713 } else {
1714 /* standard memory */
1715 address = vaddr;
1716 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1717 }
1718
1719 /* Make accesses to pages with watchpoints go via the
1720 watchpoint trap routines. */
1721 for (i = 0; i < env->nb_watchpoints; i++) {
1722 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1723 if (address & ~TARGET_PAGE_MASK) {
1724 env->watchpoint[i].addend = 0;
1725 address = vaddr | io_mem_watch;
1726 } else {
1727 env->watchpoint[i].addend = pd - paddr +
1728 (unsigned long) phys_ram_base;
1729 /* TODO: Figure out how to make read watchpoints coexist
1730 with code. */
1731 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1732 }
1733 }
1734 }
1735
1736 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1737 addend -= vaddr;
1738 te = &env->tlb_table[mmu_idx][index];
1739 te->addend = addend;
1740 if (prot & PAGE_READ) {
1741 te->addr_read = address;
1742 } else {
1743 te->addr_read = -1;
1744 }
1745
1746 if (te->addr_code != -1) {
1747 tlb_flush_jmp_cache(env, te->addr_code);
1748 }
1749 if (prot & PAGE_EXEC) {
1750 te->addr_code = address;
1751 } else {
1752 te->addr_code = -1;
1753 }
1754 if (prot & PAGE_WRITE) {
1755 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1756 (pd & IO_MEM_ROMD)) {
1757 /* write access calls the I/O callback */
1758 te->addr_write = vaddr |
1759 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1760 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1761 !cpu_physical_memory_is_dirty(pd)) {
1762 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1763 } else {
1764 te->addr_write = address;
1765 }
1766 } else {
1767 te->addr_write = -1;
1768 }
1769 }
1770 #if !defined(CONFIG_SOFTMMU)
1771 else {
1772 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1773 /* IO access: no mapping is done as it will be handled by the
1774 soft MMU */
1775 if (!(env->hflags & HF_SOFTMMU_MASK))
1776 ret = 2;
1777 } else {
1778 void *map_addr;
1779
1780 if (vaddr >= MMAP_AREA_END) {
1781 ret = 2;
1782 } else {
1783 if (prot & PROT_WRITE) {
1784 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1785 #if defined(TARGET_HAS_SMC) || 1
1786 first_tb ||
1787 #endif
1788 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1789 !cpu_physical_memory_is_dirty(pd))) {
1790 /* ROM: we do as if code was inside */
1791 /* if code is present, we only map as read only and save the
1792 original mapping */
1793 VirtPageDesc *vp;
1794
1795 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1796 vp->phys_addr = pd;
1797 vp->prot = prot;
1798 vp->valid_tag = virt_valid_tag;
1799 prot &= ~PAGE_WRITE;
1800 }
1801 }
1802 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1803 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1804 if (map_addr == MAP_FAILED) {
1805 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1806 paddr, vaddr);
1807 }
1808 }
1809 }
1810 }
1811 #endif
1812 return ret;
1813 }
1814
1815 /* called from signal handler: invalidate the code and unprotect the
1816 page. Return TRUE if the fault was succesfully handled. */
1817 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1818 {
1819 #if !defined(CONFIG_SOFTMMU)
1820 VirtPageDesc *vp;
1821
1822 #if defined(DEBUG_TLB)
1823 printf("page_unprotect: addr=0x%08x\n", addr);
1824 #endif
1825 addr &= TARGET_PAGE_MASK;
1826
1827 /* if it is not mapped, no need to worry here */
1828 if (addr >= MMAP_AREA_END)
1829 return 0;
1830 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1831 if (!vp)
1832 return 0;
1833 /* NOTE: in this case, validate_tag is _not_ tested as it
1834 validates only the code TLB */
1835 if (vp->valid_tag != virt_valid_tag)
1836 return 0;
1837 if (!(vp->prot & PAGE_WRITE))
1838 return 0;
1839 #if defined(DEBUG_TLB)
1840 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1841 addr, vp->phys_addr, vp->prot);
1842 #endif
1843 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1844 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1845 (unsigned long)addr, vp->prot);
1846 /* set the dirty bit */
1847 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1848 /* flush the code inside */
1849 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1850 return 1;
1851 #else
1852 return 0;
1853 #endif
1854 }
1855
1856 #else
1857
1858 void tlb_flush(CPUState *env, int flush_global)
1859 {
1860 }
1861
1862 void tlb_flush_page(CPUState *env, target_ulong addr)
1863 {
1864 }
1865
1866 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1867 target_phys_addr_t paddr, int prot,
1868 int mmu_idx, int is_softmmu)
1869 {
1870 return 0;
1871 }
1872
1873 /* dump memory mappings */
1874 void page_dump(FILE *f)
1875 {
1876 unsigned long start, end;
1877 int i, j, prot, prot1;
1878 PageDesc *p;
1879
1880 fprintf(f, "%-8s %-8s %-8s %s\n",
1881 "start", "end", "size", "prot");
1882 start = -1;
1883 end = -1;
1884 prot = 0;
1885 for(i = 0; i <= L1_SIZE; i++) {
1886 if (i < L1_SIZE)
1887 p = l1_map[i];
1888 else
1889 p = NULL;
1890 for(j = 0;j < L2_SIZE; j++) {
1891 if (!p)
1892 prot1 = 0;
1893 else
1894 prot1 = p[j].flags;
1895 if (prot1 != prot) {
1896 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1897 if (start != -1) {
1898 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1899 start, end, end - start,
1900 prot & PAGE_READ ? 'r' : '-',
1901 prot & PAGE_WRITE ? 'w' : '-',
1902 prot & PAGE_EXEC ? 'x' : '-');
1903 }
1904 if (prot1 != 0)
1905 start = end;
1906 else
1907 start = -1;
1908 prot = prot1;
1909 }
1910 if (!p)
1911 break;
1912 }
1913 }
1914 }
1915
1916 int page_get_flags(target_ulong address)
1917 {
1918 PageDesc *p;
1919
1920 p = page_find(address >> TARGET_PAGE_BITS);
1921 if (!p)
1922 return 0;
1923 return p->flags;
1924 }
1925
1926 /* modify the flags of a page and invalidate the code if
1927 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1928 depending on PAGE_WRITE */
1929 void page_set_flags(target_ulong start, target_ulong end, int flags)
1930 {
1931 PageDesc *p;
1932 target_ulong addr;
1933
1934 start = start & TARGET_PAGE_MASK;
1935 end = TARGET_PAGE_ALIGN(end);
1936 if (flags & PAGE_WRITE)
1937 flags |= PAGE_WRITE_ORG;
1938 spin_lock(&tb_lock);
1939 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1940 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1941 /* if the write protection is set, then we invalidate the code
1942 inside */
1943 if (!(p->flags & PAGE_WRITE) &&
1944 (flags & PAGE_WRITE) &&
1945 p->first_tb) {
1946 tb_invalidate_phys_page(addr, 0, NULL);
1947 }
1948 p->flags = flags;
1949 }
1950 spin_unlock(&tb_lock);
1951 }
1952
1953 int page_check_range(target_ulong start, target_ulong len, int flags)
1954 {
1955 PageDesc *p;
1956 target_ulong end;
1957 target_ulong addr;
1958
1959 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1960 start = start & TARGET_PAGE_MASK;
1961
1962 if( end < start )
1963 /* we've wrapped around */
1964 return -1;
1965 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1966 p = page_find(addr >> TARGET_PAGE_BITS);
1967 if( !p )
1968 return -1;
1969 if( !(p->flags & PAGE_VALID) )
1970 return -1;
1971
1972 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1973 return -1;
1974 if (flags & PAGE_WRITE) {
1975 if (!(p->flags & PAGE_WRITE_ORG))
1976 return -1;
1977 /* unprotect the page if it was put read-only because it
1978 contains translated code */
1979 if (!(p->flags & PAGE_WRITE)) {
1980 if (!page_unprotect(addr, 0, NULL))
1981 return -1;
1982 }
1983 return 0;
1984 }
1985 }
1986 return 0;
1987 }
1988
1989 /* called from signal handler: invalidate the code and unprotect the
1990 page. Return TRUE if the fault was succesfully handled. */
1991 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1992 {
1993 unsigned int page_index, prot, pindex;
1994 PageDesc *p, *p1;
1995 target_ulong host_start, host_end, addr;
1996
1997 host_start = address & qemu_host_page_mask;
1998 page_index = host_start >> TARGET_PAGE_BITS;
1999 p1 = page_find(page_index);
2000 if (!p1)
2001 return 0;
2002 host_end = host_start + qemu_host_page_size;
2003 p = p1;
2004 prot = 0;
2005 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2006 prot |= p->flags;
2007 p++;
2008 }
2009 /* if the page was really writable, then we change its
2010 protection back to writable */
2011 if (prot & PAGE_WRITE_ORG) {
2012 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2013 if (!(p1[pindex].flags & PAGE_WRITE)) {
2014 mprotect((void *)g2h(host_start), qemu_host_page_size,
2015 (prot & PAGE_BITS) | PAGE_WRITE);
2016 p1[pindex].flags |= PAGE_WRITE;
2017 /* and since the content will be modified, we must invalidate
2018 the corresponding translated code. */
2019 tb_invalidate_phys_page(address, pc, puc);
2020 #ifdef DEBUG_TB_CHECK
2021 tb_invalidate_check(address);
2022 #endif
2023 return 1;
2024 }
2025 }
2026 return 0;
2027 }
2028
2029 static inline void tlb_set_dirty(CPUState *env,
2030 unsigned long addr, target_ulong vaddr)
2031 {
2032 }
2033 #endif /* defined(CONFIG_USER_ONLY) */
2034
2035 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2036 ram_addr_t memory);
2037 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2038 ram_addr_t orig_memory);
2039 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2040 need_subpage) \
2041 do { \
2042 if (addr > start_addr) \
2043 start_addr2 = 0; \
2044 else { \
2045 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2046 if (start_addr2 > 0) \
2047 need_subpage = 1; \
2048 } \
2049 \
2050 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2051 end_addr2 = TARGET_PAGE_SIZE - 1; \
2052 else { \
2053 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2054 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2055 need_subpage = 1; \
2056 } \
2057 } while (0)
2058
2059 /* register physical memory. 'size' must be a multiple of the target
2060 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2061 io memory page */
2062 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2063 ram_addr_t size,
2064 ram_addr_t phys_offset)
2065 {
2066 target_phys_addr_t addr, end_addr;
2067 PhysPageDesc *p;
2068 CPUState *env;
2069 ram_addr_t orig_size = size;
2070 void *subpage;
2071
2072 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2073 end_addr = start_addr + (target_phys_addr_t)size;
2074 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2075 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2076 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2077 ram_addr_t orig_memory = p->phys_offset;
2078 target_phys_addr_t start_addr2, end_addr2;
2079 int need_subpage = 0;
2080
2081 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2082 need_subpage);
2083 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2084 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2085 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2086 &p->phys_offset, orig_memory);
2087 } else {
2088 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2089 >> IO_MEM_SHIFT];
2090 }
2091 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2092 } else {
2093 p->phys_offset = phys_offset;
2094 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2095 (phys_offset & IO_MEM_ROMD))
2096 phys_offset += TARGET_PAGE_SIZE;
2097 }
2098 } else {
2099 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2100 p->phys_offset = phys_offset;
2101 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2102 (phys_offset & IO_MEM_ROMD))
2103 phys_offset += TARGET_PAGE_SIZE;
2104 else {
2105 target_phys_addr_t start_addr2, end_addr2;
2106 int need_subpage = 0;
2107
2108 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2109 end_addr2, need_subpage);
2110
2111 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2112 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2113 &p->phys_offset, IO_MEM_UNASSIGNED);
2114 subpage_register(subpage, start_addr2, end_addr2,
2115 phys_offset);
2116 }
2117 }
2118 }
2119 }
2120
2121 /* since each CPU stores ram addresses in its TLB cache, we must
2122 reset the modified entries */
2123 /* XXX: slow ! */
2124 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2125 tlb_flush(env, 1);
2126 }
2127 }
2128
2129 /* XXX: temporary until new memory mapping API */
2130 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2131 {
2132 PhysPageDesc *p;
2133
2134 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2135 if (!p)
2136 return IO_MEM_UNASSIGNED;
2137 return p->phys_offset;
2138 }
2139
2140 /* XXX: better than nothing */
2141 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2142 {
2143 ram_addr_t addr;
2144 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2145 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2146 size, phys_ram_size);
2147 abort();
2148 }
2149 addr = phys_ram_alloc_offset;
2150 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2151 return addr;
2152 }
2153
2154 void qemu_ram_free(ram_addr_t addr)
2155 {
2156 }
2157
2158 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2159 {
2160 #ifdef DEBUG_UNASSIGNED
2161 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2162 #endif
2163 #ifdef TARGET_SPARC
2164 do_unassigned_access(addr, 0, 0, 0);
2165 #elif TARGET_CRIS
2166 do_unassigned_access(addr, 0, 0, 0);
2167 #endif
2168 return 0;
2169 }
2170
2171 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2172 {
2173 #ifdef DEBUG_UNASSIGNED
2174 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2175 #endif
2176 #ifdef TARGET_SPARC
2177 do_unassigned_access(addr, 1, 0, 0);
2178 #elif TARGET_CRIS
2179 do_unassigned_access(addr, 1, 0, 0);
2180 #endif
2181 }
2182
2183 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2184 unassigned_mem_readb,
2185 unassigned_mem_readb,
2186 unassigned_mem_readb,
2187 };
2188
2189 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2190 unassigned_mem_writeb,
2191 unassigned_mem_writeb,
2192 unassigned_mem_writeb,
2193 };
2194
2195 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2196 {
2197 unsigned long ram_addr;
2198 int dirty_flags;
2199 ram_addr = addr - (unsigned long)phys_ram_base;
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2202 #if !defined(CONFIG_USER_ONLY)
2203 tb_invalidate_phys_page_fast(ram_addr, 1);
2204 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2205 #endif
2206 }
2207 stb_p((uint8_t *)(long)addr, val);
2208 #ifdef USE_KQEMU
2209 if (cpu_single_env->kqemu_enabled &&
2210 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2211 kqemu_modify_page(cpu_single_env, ram_addr);
2212 #endif
2213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2214 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2215 /* we remove the notdirty callback only if the code has been
2216 flushed */
2217 if (dirty_flags == 0xff)
2218 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2219 }
2220
2221 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2222 {
2223 unsigned long ram_addr;
2224 int dirty_flags;
2225 ram_addr = addr - (unsigned long)phys_ram_base;
2226 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2227 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2228 #if !defined(CONFIG_USER_ONLY)
2229 tb_invalidate_phys_page_fast(ram_addr, 2);
2230 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2231 #endif
2232 }
2233 stw_p((uint8_t *)(long)addr, val);
2234 #ifdef USE_KQEMU
2235 if (cpu_single_env->kqemu_enabled &&
2236 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2237 kqemu_modify_page(cpu_single_env, ram_addr);
2238 #endif
2239 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2240 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2241 /* we remove the notdirty callback only if the code has been
2242 flushed */
2243 if (dirty_flags == 0xff)
2244 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2245 }
2246
2247 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2248 {
2249 unsigned long ram_addr;
2250 int dirty_flags;
2251 ram_addr = addr - (unsigned long)phys_ram_base;
2252 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2253 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2254 #if !defined(CONFIG_USER_ONLY)
2255 tb_invalidate_phys_page_fast(ram_addr, 4);
2256 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2257 #endif
2258 }
2259 stl_p((uint8_t *)(long)addr, val);
2260 #ifdef USE_KQEMU
2261 if (cpu_single_env->kqemu_enabled &&
2262 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2263 kqemu_modify_page(cpu_single_env, ram_addr);
2264 #endif
2265 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2266 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2267 /* we remove the notdirty callback only if the code has been
2268 flushed */
2269 if (dirty_flags == 0xff)
2270 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2271 }
2272
2273 static CPUReadMemoryFunc *error_mem_read[3] = {
2274 NULL, /* never used */
2275 NULL, /* never used */
2276 NULL, /* never used */
2277 };
2278
2279 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2280 notdirty_mem_writeb,
2281 notdirty_mem_writew,
2282 notdirty_mem_writel,
2283 };
2284
2285 #if defined(CONFIG_SOFTMMU)
2286 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2287 so these check for a hit then pass through to the normal out-of-line
2288 phys routines. */
2289 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2290 {
2291 return ldub_phys(addr);
2292 }
2293
2294 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2295 {
2296 return lduw_phys(addr);
2297 }
2298
2299 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2300 {
2301 return ldl_phys(addr);
2302 }
2303
2304 /* Generate a debug exception if a watchpoint has been hit.
2305 Returns the real physical address of the access. addr will be a host
2306 address in case of a RAM location. */
2307 static target_ulong check_watchpoint(target_phys_addr_t addr)
2308 {
2309 CPUState *env = cpu_single_env;
2310 target_ulong watch;
2311 target_ulong retaddr;
2312 int i;
2313
2314 retaddr = addr;
2315 for (i = 0; i < env->nb_watchpoints; i++) {
2316 watch = env->watchpoint[i].vaddr;
2317 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2318 retaddr = addr - env->watchpoint[i].addend;
2319 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2320 cpu_single_env->watchpoint_hit = i + 1;
2321 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2322 break;
2323 }
2324 }
2325 }
2326 return retaddr;
2327 }
2328
2329 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2330 uint32_t val)
2331 {
2332 addr = check_watchpoint(addr);
2333 stb_phys(addr, val);
2334 }
2335
2336 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2337 uint32_t val)
2338 {
2339 addr = check_watchpoint(addr);
2340 stw_phys(addr, val);
2341 }
2342
2343 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2344 uint32_t val)
2345 {
2346 addr = check_watchpoint(addr);
2347 stl_phys(addr, val);
2348 }
2349
2350 static CPUReadMemoryFunc *watch_mem_read[3] = {
2351 watch_mem_readb,
2352 watch_mem_readw,
2353 watch_mem_readl,
2354 };
2355
2356 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2357 watch_mem_writeb,
2358 watch_mem_writew,
2359 watch_mem_writel,
2360 };
2361 #endif
2362
2363 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2364 unsigned int len)
2365 {
2366 uint32_t ret;
2367 unsigned int idx;
2368
2369 idx = SUBPAGE_IDX(addr - mmio->base);
2370 #if defined(DEBUG_SUBPAGE)
2371 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2372 mmio, len, addr, idx);
2373 #endif
2374 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2375
2376 return ret;
2377 }
2378
2379 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2380 uint32_t value, unsigned int len)
2381 {
2382 unsigned int idx;
2383
2384 idx = SUBPAGE_IDX(addr - mmio->base);
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2387 mmio, len, addr, idx, value);
2388 #endif
2389 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2390 }
2391
2392 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2393 {
2394 #if defined(DEBUG_SUBPAGE)
2395 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2396 #endif
2397
2398 return subpage_readlen(opaque, addr, 0);
2399 }
2400
2401 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2402 uint32_t value)
2403 {
2404 #if defined(DEBUG_SUBPAGE)
2405 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2406 #endif
2407 subpage_writelen(opaque, addr, value, 0);
2408 }
2409
2410 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2411 {
2412 #if defined(DEBUG_SUBPAGE)
2413 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2414 #endif
2415
2416 return subpage_readlen(opaque, addr, 1);
2417 }
2418
2419 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2420 uint32_t value)
2421 {
2422 #if defined(DEBUG_SUBPAGE)
2423 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2424 #endif
2425 subpage_writelen(opaque, addr, value, 1);
2426 }
2427
2428 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2429 {
2430 #if defined(DEBUG_SUBPAGE)
2431 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2432 #endif
2433
2434 return subpage_readlen(opaque, addr, 2);
2435 }
2436
2437 static void subpage_writel (void *opaque,
2438 target_phys_addr_t addr, uint32_t value)
2439 {
2440 #if defined(DEBUG_SUBPAGE)
2441 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2442 #endif
2443 subpage_writelen(opaque, addr, value, 2);
2444 }
2445
2446 static CPUReadMemoryFunc *subpage_read[] = {
2447 &subpage_readb,
2448 &subpage_readw,
2449 &subpage_readl,
2450 };
2451
2452 static CPUWriteMemoryFunc *subpage_write[] = {
2453 &subpage_writeb,
2454 &subpage_writew,
2455 &subpage_writel,
2456 };
2457
2458 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2459 ram_addr_t memory)
2460 {
2461 int idx, eidx;
2462 unsigned int i;
2463
2464 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2465 return -1;
2466 idx = SUBPAGE_IDX(start);
2467 eidx = SUBPAGE_IDX(end);
2468 #if defined(DEBUG_SUBPAGE)
2469 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2470 mmio, start, end, idx, eidx, memory);
2471 #endif
2472 memory >>= IO_MEM_SHIFT;
2473 for (; idx <= eidx; idx++) {
2474 for (i = 0; i < 4; i++) {
2475 if (io_mem_read[memory][i]) {
2476 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2477 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2478 }
2479 if (io_mem_write[memory][i]) {
2480 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2481 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2482 }
2483 }
2484 }
2485
2486 return 0;
2487 }
2488
2489 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2490 ram_addr_t orig_memory)
2491 {
2492 subpage_t *mmio;
2493 int subpage_memory;
2494
2495 mmio = qemu_mallocz(sizeof(subpage_t));
2496 if (mmio != NULL) {
2497 mmio->base = base;
2498 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2499 #if defined(DEBUG_SUBPAGE)
2500 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2501 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2502 #endif
2503 *phys = subpage_memory | IO_MEM_SUBPAGE;
2504 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2505 }
2506
2507 return mmio;
2508 }
2509
2510 static void io_mem_init(void)
2511 {
2512 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2513 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2514 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2515 io_mem_nb = 5;
2516
2517 #if defined(CONFIG_SOFTMMU)
2518 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2519 watch_mem_write, NULL);
2520 #endif
2521 /* alloc dirty bits array */
2522 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2523 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2524 }
2525
2526 /* mem_read and mem_write are arrays of functions containing the
2527 function to access byte (index 0), word (index 1) and dword (index
2528 2). Functions can be omitted with a NULL function pointer. The
2529 registered functions may be modified dynamically later.
2530 If io_index is non zero, the corresponding io zone is
2531 modified. If it is zero, a new io zone is allocated. The return
2532 value can be used with cpu_register_physical_memory(). (-1) is
2533 returned if error. */
2534 int cpu_register_io_memory(int io_index,
2535 CPUReadMemoryFunc **mem_read,
2536 CPUWriteMemoryFunc **mem_write,
2537 void *opaque)
2538 {
2539 int i, subwidth = 0;
2540
2541 if (io_index <= 0) {
2542 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2543 return -1;
2544 io_index = io_mem_nb++;
2545 } else {
2546 if (io_index >= IO_MEM_NB_ENTRIES)
2547 return -1;
2548 }
2549
2550 for(i = 0;i < 3; i++) {
2551 if (!mem_read[i] || !mem_write[i])
2552 subwidth = IO_MEM_SUBWIDTH;
2553 io_mem_read[io_index][i] = mem_read[i];
2554 io_mem_write[io_index][i] = mem_write[i];
2555 }
2556 io_mem_opaque[io_index] = opaque;
2557 return (io_index << IO_MEM_SHIFT) | subwidth;
2558 }
2559
2560 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2561 {
2562 return io_mem_write[io_index >> IO_MEM_SHIFT];
2563 }
2564
2565 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2566 {
2567 return io_mem_read[io_index >> IO_MEM_SHIFT];
2568 }
2569
2570 /* physical memory access (slow version, mainly for debug) */
2571 #if defined(CONFIG_USER_ONLY)
2572 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2573 int len, int is_write)
2574 {
2575 int l, flags;
2576 target_ulong page;
2577 void * p;
2578
2579 while (len > 0) {
2580 page = addr & TARGET_PAGE_MASK;
2581 l = (page + TARGET_PAGE_SIZE) - addr;
2582 if (l > len)
2583 l = len;
2584 flags = page_get_flags(page);
2585 if (!(flags & PAGE_VALID))
2586 return;
2587 if (is_write) {
2588 if (!(flags & PAGE_WRITE))
2589 return;
2590 /* XXX: this code should not depend on lock_user */
2591 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2592 /* FIXME - should this return an error rather than just fail? */
2593 return;
2594 memcpy(p, buf, l);
2595 unlock_user(p, addr, l);
2596 } else {
2597 if (!(flags & PAGE_READ))
2598 return;
2599 /* XXX: this code should not depend on lock_user */
2600 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2601 /* FIXME - should this return an error rather than just fail? */
2602 return;
2603 memcpy(buf, p, l);
2604 unlock_user(p, addr, 0);
2605 }
2606 len -= l;
2607 buf += l;
2608 addr += l;
2609 }
2610 }
2611
2612 #else
2613 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2614 int len, int is_write)
2615 {
2616 int l, io_index;
2617 uint8_t *ptr;
2618 uint32_t val;
2619 target_phys_addr_t page;
2620 unsigned long pd;
2621 PhysPageDesc *p;
2622
2623 while (len > 0) {
2624 page = addr & TARGET_PAGE_MASK;
2625 l = (page + TARGET_PAGE_SIZE) - addr;
2626 if (l > len)
2627 l = len;
2628 p = phys_page_find(page >> TARGET_PAGE_BITS);
2629 if (!p) {
2630 pd = IO_MEM_UNASSIGNED;
2631 } else {
2632 pd = p->phys_offset;
2633 }
2634
2635 if (is_write) {
2636 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2637 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2638 /* XXX: could force cpu_single_env to NULL to avoid
2639 potential bugs */
2640 if (l >= 4 && ((addr & 3) == 0)) {
2641 /* 32 bit write access */
2642 val = ldl_p(buf);
2643 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2644 l = 4;
2645 } else if (l >= 2 && ((addr & 1) == 0)) {
2646 /* 16 bit write access */
2647 val = lduw_p(buf);
2648 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2649 l = 2;
2650 } else {
2651 /* 8 bit write access */
2652 val = ldub_p(buf);
2653 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2654 l = 1;
2655 }
2656 } else {
2657 unsigned long addr1;
2658 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2659 /* RAM case */
2660 ptr = phys_ram_base + addr1;
2661 memcpy(ptr, buf, l);
2662 if (!cpu_physical_memory_is_dirty(addr1)) {
2663 /* invalidate code */
2664 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2665 /* set dirty bit */
2666 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2667 (0xff & ~CODE_DIRTY_FLAG);
2668 }
2669 }
2670 } else {
2671 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2672 !(pd & IO_MEM_ROMD)) {
2673 /* I/O case */
2674 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2675 if (l >= 4 && ((addr & 3) == 0)) {
2676 /* 32 bit read access */
2677 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2678 stl_p(buf, val);
2679 l = 4;
2680 } else if (l >= 2 && ((addr & 1) == 0)) {
2681 /* 16 bit read access */
2682 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2683 stw_p(buf, val);
2684 l = 2;
2685 } else {
2686 /* 8 bit read access */
2687 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2688 stb_p(buf, val);
2689 l = 1;
2690 }
2691 } else {
2692 /* RAM case */
2693 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2694 (addr & ~TARGET_PAGE_MASK);
2695 memcpy(buf, ptr, l);
2696 }
2697 }
2698 len -= l;
2699 buf += l;
2700 addr += l;
2701 }
2702 }
2703
2704 /* used for ROM loading : can write in RAM and ROM */
2705 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2706 const uint8_t *buf, int len)
2707 {
2708 int l;
2709 uint8_t *ptr;
2710 target_phys_addr_t page;
2711 unsigned long pd;
2712 PhysPageDesc *p;
2713
2714 while (len > 0) {
2715 page = addr & TARGET_PAGE_MASK;
2716 l = (page + TARGET_PAGE_SIZE) - addr;
2717 if (l > len)
2718 l = len;
2719 p = phys_page_find(page >> TARGET_PAGE_BITS);
2720 if (!p) {
2721 pd = IO_MEM_UNASSIGNED;
2722 } else {
2723 pd = p->phys_offset;
2724 }
2725
2726 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2727 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2728 !(pd & IO_MEM_ROMD)) {
2729 /* do nothing */
2730 } else {
2731 unsigned long addr1;
2732 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2733 /* ROM/RAM case */
2734 ptr = phys_ram_base + addr1;
2735 memcpy(ptr, buf, l);
2736 }
2737 len -= l;
2738 buf += l;
2739 addr += l;
2740 }
2741 }
2742
2743
2744 /* warning: addr must be aligned */
2745 uint32_t ldl_phys(target_phys_addr_t addr)
2746 {
2747 int io_index;
2748 uint8_t *ptr;
2749 uint32_t val;
2750 unsigned long pd;
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p) {
2755 pd = IO_MEM_UNASSIGNED;
2756 } else {
2757 pd = p->phys_offset;
2758 }
2759
2760 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2761 !(pd & IO_MEM_ROMD)) {
2762 /* I/O case */
2763 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2764 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2765 } else {
2766 /* RAM case */
2767 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2768 (addr & ~TARGET_PAGE_MASK);
2769 val = ldl_p(ptr);
2770 }
2771 return val;
2772 }
2773
2774 /* warning: addr must be aligned */
2775 uint64_t ldq_phys(target_phys_addr_t addr)
2776 {
2777 int io_index;
2778 uint8_t *ptr;
2779 uint64_t val;
2780 unsigned long pd;
2781 PhysPageDesc *p;
2782
2783 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2784 if (!p) {
2785 pd = IO_MEM_UNASSIGNED;
2786 } else {
2787 pd = p->phys_offset;
2788 }
2789
2790 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2791 !(pd & IO_MEM_ROMD)) {
2792 /* I/O case */
2793 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2794 #ifdef TARGET_WORDS_BIGENDIAN
2795 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2796 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2797 #else
2798 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2799 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2800 #endif
2801 } else {
2802 /* RAM case */
2803 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2804 (addr & ~TARGET_PAGE_MASK);
2805 val = ldq_p(ptr);
2806 }
2807 return val;
2808 }
2809
2810 /* XXX: optimize */
2811 uint32_t ldub_phys(target_phys_addr_t addr)
2812 {
2813 uint8_t val;
2814 cpu_physical_memory_read(addr, &val, 1);
2815 return val;
2816 }
2817
2818 /* XXX: optimize */
2819 uint32_t lduw_phys(target_phys_addr_t addr)
2820 {
2821 uint16_t val;
2822 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2823 return tswap16(val);
2824 }
2825
2826 /* warning: addr must be aligned. The ram page is not masked as dirty
2827 and the code inside is not invalidated. It is useful if the dirty
2828 bits are used to track modified PTEs */
2829 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2830 {
2831 int io_index;
2832 uint8_t *ptr;
2833 unsigned long pd;
2834 PhysPageDesc *p;
2835
2836 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2837 if (!p) {
2838 pd = IO_MEM_UNASSIGNED;
2839 } else {
2840 pd = p->phys_offset;
2841 }
2842
2843 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2844 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2845 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2846 } else {
2847 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2848 (addr & ~TARGET_PAGE_MASK);
2849 stl_p(ptr, val);
2850 }
2851 }
2852
2853 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2854 {
2855 int io_index;
2856 uint8_t *ptr;
2857 unsigned long pd;
2858 PhysPageDesc *p;
2859
2860 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2861 if (!p) {
2862 pd = IO_MEM_UNASSIGNED;
2863 } else {
2864 pd = p->phys_offset;
2865 }
2866
2867 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2868 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2869 #ifdef TARGET_WORDS_BIGENDIAN
2870 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2871 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2872 #else
2873 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2875 #endif
2876 } else {
2877 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2878 (addr & ~TARGET_PAGE_MASK);
2879 stq_p(ptr, val);
2880 }
2881 }
2882
2883 /* warning: addr must be aligned */
2884 void stl_phys(target_phys_addr_t addr, uint32_t val)
2885 {
2886 int io_index;
2887 uint8_t *ptr;
2888 unsigned long pd;
2889 PhysPageDesc *p;
2890
2891 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2892 if (!p) {
2893 pd = IO_MEM_UNASSIGNED;
2894 } else {
2895 pd = p->phys_offset;
2896 }
2897
2898 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2899 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2900 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2901 } else {
2902 unsigned long addr1;
2903 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2904 /* RAM case */
2905 ptr = phys_ram_base + addr1;
2906 stl_p(ptr, val);
2907 if (!cpu_physical_memory_is_dirty(addr1)) {
2908 /* invalidate code */
2909 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2910 /* set dirty bit */
2911 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2912 (0xff & ~CODE_DIRTY_FLAG);
2913 }
2914 }
2915 }
2916
2917 /* XXX: optimize */
2918 void stb_phys(target_phys_addr_t addr, uint32_t val)
2919 {
2920 uint8_t v = val;
2921 cpu_physical_memory_write(addr, &v, 1);
2922 }
2923
2924 /* XXX: optimize */
2925 void stw_phys(target_phys_addr_t addr, uint32_t val)
2926 {
2927 uint16_t v = tswap16(val);
2928 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2929 }
2930
2931 /* XXX: optimize */
2932 void stq_phys(target_phys_addr_t addr, uint64_t val)
2933 {
2934 val = tswap64(val);
2935 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2936 }
2937
2938 #endif
2939
2940 /* virtual memory access for debug */
2941 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2942 uint8_t *buf, int len, int is_write)
2943 {
2944 int l;
2945 target_phys_addr_t phys_addr;
2946 target_ulong page;
2947
2948 while (len > 0) {
2949 page = addr & TARGET_PAGE_MASK;
2950 phys_addr = cpu_get_phys_page_debug(env, page);
2951 /* if no physical page mapped, return an error */
2952 if (phys_addr == -1)
2953 return -1;
2954 l = (page + TARGET_PAGE_SIZE) - addr;
2955 if (l > len)
2956 l = len;
2957 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2958 buf, l, is_write);
2959 len -= l;
2960 buf += l;
2961 addr += l;
2962 }
2963 return 0;
2964 }
2965
2966 void dump_exec_info(FILE *f,
2967 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2968 {
2969 int i, target_code_size, max_target_code_size;
2970 int direct_jmp_count, direct_jmp2_count, cross_page;
2971 TranslationBlock *tb;
2972
2973 target_code_size = 0;
2974 max_target_code_size = 0;
2975 cross_page = 0;
2976 direct_jmp_count = 0;
2977 direct_jmp2_count = 0;
2978 for(i = 0; i < nb_tbs; i++) {
2979 tb = &tbs[i];
2980 target_code_size += tb->size;
2981 if (tb->size > max_target_code_size)
2982 max_target_code_size = tb->size;
2983 if (tb->page_addr[1] != -1)
2984 cross_page++;
2985 if (tb->tb_next_offset[0] != 0xffff) {
2986 direct_jmp_count++;
2987 if (tb->tb_next_offset[1] != 0xffff) {
2988 direct_jmp2_count++;
2989 }
2990 }
2991 }
2992 /* XXX: avoid using doubles ? */
2993 cpu_fprintf(f, "Translation buffer state:\n");
2994 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2995 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2996 nb_tbs ? target_code_size / nb_tbs : 0,
2997 max_target_code_size);
2998 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2999 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3000 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3001 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3002 cross_page,
3003 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3004 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3005 direct_jmp_count,
3006 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3007 direct_jmp2_count,
3008 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3009 cpu_fprintf(f, "\nStatistics:\n");
3010 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3011 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3012 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3013 #ifdef CONFIG_PROFILER
3014 {
3015 int64_t tot;
3016 tot = dyngen_interm_time + dyngen_code_time;
3017 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3018 tot, tot / 2.4e9);
3019 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3020 dyngen_tb_count,
3021 dyngen_tb_count1 - dyngen_tb_count,
3022 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3023 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3024 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3025 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
3026 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3027 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3028 dyngen_tb_count ?
3029 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3030 cpu_fprintf(f, "cycles/op %0.1f\n",
3031 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3032 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3033 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3034 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3035 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3036 if (tot == 0)
3037 tot = 1;
3038 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3039 (double)dyngen_interm_time / tot * 100.0);
3040 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3041 (double)dyngen_code_time / tot * 100.0);
3042 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3043 dyngen_restore_count);
3044 cpu_fprintf(f, " avg cycles %0.1f\n",
3045 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3046 {
3047 extern void dump_op_count(void);
3048 dump_op_count();
3049 }
3050 }
3051 #endif
3052 }
3053
3054 #if !defined(CONFIG_USER_ONLY)
3055
3056 #define MMUSUFFIX _cmmu
3057 #define GETPC() NULL
3058 #define env cpu_single_env
3059 #define SOFTMMU_CODE_ACCESS
3060
3061 #define SHIFT 0
3062 #include "softmmu_template.h"
3063
3064 #define SHIFT 1
3065 #include "softmmu_template.h"
3066
3067 #define SHIFT 2
3068 #include "softmmu_template.h"
3069
3070 #define SHIFT 3
3071 #include "softmmu_template.h"
3072
3073 #undef env
3074
3075 #endif