]> git.proxmox.com Git - qemu.git/blob - exec.c
Prevent guest reusing host memory allocations.
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #if defined(CONFIG_USER_ONLY)
41 #include <qemu.h>
42 #endif
43
44 //#define DEBUG_TB_INVALIDATE
45 //#define DEBUG_FLUSH
46 //#define DEBUG_TLB
47 //#define DEBUG_UNASSIGNED
48
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
52
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
55
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
58 #undef DEBUG_TB_CHECK
59 #endif
60
61 #define SMC_BITMAP_USE_THRESHOLD 10
62
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
65
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
83
84 TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90
91 uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer;
93 unsigned long code_gen_buffer_size;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size;
96 uint8_t *code_gen_ptr;
97
98 #if !defined(CONFIG_USER_ONLY)
99 ram_addr_t phys_ram_size;
100 int phys_ram_fd;
101 uint8_t *phys_ram_base;
102 uint8_t *phys_ram_dirty;
103 static ram_addr_t phys_ram_alloc_offset = 0;
104 #endif
105
106 CPUState *first_cpu;
107 /* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
109 CPUState *cpu_single_env;
110
111 typedef struct PageDesc {
112 /* list of TBs intersecting this ram page */
113 TranslationBlock *first_tb;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118 #if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120 #endif
121 } PageDesc;
122
123 typedef struct PhysPageDesc {
124 /* offset in host memory of the page + io_index in the low bits */
125 ram_addr_t phys_offset;
126 } PhysPageDesc;
127
128 #define L2_BITS 10
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
133 */
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135 #else
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137 #endif
138
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
141
142 unsigned long qemu_real_host_page_size;
143 unsigned long qemu_host_page_bits;
144 unsigned long qemu_host_page_size;
145 unsigned long qemu_host_page_mask;
146
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc *l1_map[L1_SIZE];
149 PhysPageDesc **l1_phys_map;
150
151 #if !defined(CONFIG_USER_ONLY)
152 static void io_mem_init(void);
153
154 /* io memory support */
155 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
157 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
158 static int io_mem_nb;
159 static int io_mem_watch;
160 #endif
161
162 /* log support */
163 char *logfilename = "/tmp/qemu.log";
164 FILE *logfile;
165 int loglevel;
166 static int log_append = 0;
167
168 /* statistics */
169 static int tlb_flush_count;
170 static int tb_flush_count;
171 static int tb_phys_invalidate_count;
172
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t {
175 target_phys_addr_t base;
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
179 } subpage_t;
180
181 #ifdef _WIN32
182 static void map_exec(void *addr, long size)
183 {
184 DWORD old_protect;
185 VirtualProtect(addr, size,
186 PAGE_EXECUTE_READWRITE, &old_protect);
187
188 }
189 #else
190 static void map_exec(void *addr, long size)
191 {
192 unsigned long start, end, page_size;
193
194 page_size = getpagesize();
195 start = (unsigned long)addr;
196 start &= ~(page_size - 1);
197
198 end = (unsigned long)addr + size;
199 end += page_size - 1;
200 end &= ~(page_size - 1);
201
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
204 }
205 #endif
206
207 static void page_init(void)
208 {
209 /* NOTE: we can always suppose that qemu_host_page_size >=
210 TARGET_PAGE_SIZE */
211 #ifdef _WIN32
212 {
213 SYSTEM_INFO system_info;
214 DWORD old_protect;
215
216 GetSystemInfo(&system_info);
217 qemu_real_host_page_size = system_info.dwPageSize;
218 }
219 #else
220 qemu_real_host_page_size = getpagesize();
221 #endif
222 if (qemu_host_page_size == 0)
223 qemu_host_page_size = qemu_real_host_page_size;
224 if (qemu_host_page_size < TARGET_PAGE_SIZE)
225 qemu_host_page_size = TARGET_PAGE_SIZE;
226 qemu_host_page_bits = 0;
227 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228 qemu_host_page_bits++;
229 qemu_host_page_mask = ~(qemu_host_page_size - 1);
230 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
232
233 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234 {
235 long long startaddr, endaddr;
236 FILE *f;
237 int n;
238
239 mmap_lock();
240 last_brk = (unsigned long)sbrk(0);
241 f = fopen("/proc/self/maps", "r");
242 if (f) {
243 do {
244 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245 if (n == 2) {
246 startaddr = MIN(startaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 endaddr = MIN(endaddr,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
250 page_set_flags(startaddr & TARGET_PAGE_MASK,
251 TARGET_PAGE_ALIGN(endaddr),
252 PAGE_RESERVED);
253 }
254 } while (!feof(f));
255 fclose(f);
256 }
257 mmap_unlock();
258 }
259 #endif
260 }
261
262 static inline PageDesc *page_find_alloc(target_ulong index)
263 {
264 PageDesc **lp, *p;
265
266 #if TARGET_LONG_BITS > 32
267 /* Host memory outside guest VM. For 32-bit targets we have already
268 excluded high addresses. */
269 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
270 return NULL;
271 #endif
272 lp = &l1_map[index >> L2_BITS];
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
276 #if defined(CONFIG_USER_ONLY)
277 unsigned long addr;
278 size_t len = sizeof(PageDesc) * L2_SIZE;
279 /* Don't use qemu_malloc because it may recurse. */
280 p = mmap(0, len, PROT_READ | PROT_WRITE,
281 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
282 *lp = p;
283 addr = h2g(p);
284 if (addr == (target_ulong)addr) {
285 page_set_flags(addr & TARGET_PAGE_MASK,
286 TARGET_PAGE_ALIGN(addr + len),
287 PAGE_RESERVED);
288 }
289 #else
290 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
291 *lp = p;
292 #endif
293 }
294 return p + (index & (L2_SIZE - 1));
295 }
296
297 static inline PageDesc *page_find(target_ulong index)
298 {
299 PageDesc *p;
300
301 p = l1_map[index >> L2_BITS];
302 if (!p)
303 return 0;
304 return p + (index & (L2_SIZE - 1));
305 }
306
307 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
308 {
309 void **lp, **p;
310 PhysPageDesc *pd;
311
312 p = (void **)l1_phys_map;
313 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
314
315 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
316 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
317 #endif
318 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
319 p = *lp;
320 if (!p) {
321 /* allocate if not found */
322 if (!alloc)
323 return NULL;
324 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
325 memset(p, 0, sizeof(void *) * L1_SIZE);
326 *lp = p;
327 }
328 #endif
329 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
330 pd = *lp;
331 if (!pd) {
332 int i;
333 /* allocate if not found */
334 if (!alloc)
335 return NULL;
336 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
337 *lp = pd;
338 for (i = 0; i < L2_SIZE; i++)
339 pd[i].phys_offset = IO_MEM_UNASSIGNED;
340 }
341 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
342 }
343
344 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
345 {
346 return phys_page_find_alloc(index, 0);
347 }
348
349 #if !defined(CONFIG_USER_ONLY)
350 static void tlb_protect_code(ram_addr_t ram_addr);
351 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
352 target_ulong vaddr);
353 #define mmap_lock() do { } while(0)
354 #define mmap_unlock() do { } while(0)
355 #endif
356
357 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
358
359 #if defined(CONFIG_USER_ONLY)
360 /* Currently it is not recommanded to allocate big chunks of data in
361 user mode. It will change when a dedicated libc will be used */
362 #define USE_STATIC_CODE_GEN_BUFFER
363 #endif
364
365 #ifdef USE_STATIC_CODE_GEN_BUFFER
366 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
367 #endif
368
369 void code_gen_alloc(unsigned long tb_size)
370 {
371 #ifdef USE_STATIC_CODE_GEN_BUFFER
372 code_gen_buffer = static_code_gen_buffer;
373 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
374 map_exec(code_gen_buffer, code_gen_buffer_size);
375 #else
376 code_gen_buffer_size = tb_size;
377 if (code_gen_buffer_size == 0) {
378 #if defined(CONFIG_USER_ONLY)
379 /* in user mode, phys_ram_size is not meaningful */
380 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
381 #else
382 /* XXX: needs ajustments */
383 code_gen_buffer_size = (int)(phys_ram_size / 4);
384 #endif
385 }
386 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
387 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
388 /* The code gen buffer location may have constraints depending on
389 the host cpu and OS */
390 #if defined(__linux__)
391 {
392 int flags;
393 flags = MAP_PRIVATE | MAP_ANONYMOUS;
394 #if defined(__x86_64__)
395 flags |= MAP_32BIT;
396 /* Cannot map more than that */
397 if (code_gen_buffer_size > (800 * 1024 * 1024))
398 code_gen_buffer_size = (800 * 1024 * 1024);
399 #endif
400 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
401 PROT_WRITE | PROT_READ | PROT_EXEC,
402 flags, -1, 0);
403 if (code_gen_buffer == MAP_FAILED) {
404 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
405 exit(1);
406 }
407 }
408 #else
409 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
410 if (!code_gen_buffer) {
411 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
412 exit(1);
413 }
414 map_exec(code_gen_buffer, code_gen_buffer_size);
415 #endif
416 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
417 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
418 code_gen_buffer_max_size = code_gen_buffer_size -
419 code_gen_max_block_size();
420 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
421 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
422 }
423
424 /* Must be called before using the QEMU cpus. 'tb_size' is the size
425 (in bytes) allocated to the translation buffer. Zero means default
426 size. */
427 void cpu_exec_init_all(unsigned long tb_size)
428 {
429 cpu_gen_init();
430 code_gen_alloc(tb_size);
431 code_gen_ptr = code_gen_buffer;
432 page_init();
433 #if !defined(CONFIG_USER_ONLY)
434 io_mem_init();
435 #endif
436 }
437
438 void cpu_exec_init(CPUState *env)
439 {
440 CPUState **penv;
441 int cpu_index;
442
443 env->next_cpu = NULL;
444 penv = &first_cpu;
445 cpu_index = 0;
446 while (*penv != NULL) {
447 penv = (CPUState **)&(*penv)->next_cpu;
448 cpu_index++;
449 }
450 env->cpu_index = cpu_index;
451 env->nb_watchpoints = 0;
452 *penv = env;
453 }
454
455 static inline void invalidate_page_bitmap(PageDesc *p)
456 {
457 if (p->code_bitmap) {
458 qemu_free(p->code_bitmap);
459 p->code_bitmap = NULL;
460 }
461 p->code_write_count = 0;
462 }
463
464 /* set to NULL all the 'first_tb' fields in all PageDescs */
465 static void page_flush_tb(void)
466 {
467 int i, j;
468 PageDesc *p;
469
470 for(i = 0; i < L1_SIZE; i++) {
471 p = l1_map[i];
472 if (p) {
473 for(j = 0; j < L2_SIZE; j++) {
474 p->first_tb = NULL;
475 invalidate_page_bitmap(p);
476 p++;
477 }
478 }
479 }
480 }
481
482 /* flush all the translation blocks */
483 /* XXX: tb_flush is currently not thread safe */
484 void tb_flush(CPUState *env1)
485 {
486 CPUState *env;
487 #if defined(DEBUG_FLUSH)
488 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
489 (unsigned long)(code_gen_ptr - code_gen_buffer),
490 nb_tbs, nb_tbs > 0 ?
491 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
492 #endif
493 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
494 cpu_abort(env1, "Internal error: code buffer overflow\n");
495
496 nb_tbs = 0;
497
498 for(env = first_cpu; env != NULL; env = env->next_cpu) {
499 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
500 }
501
502 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
503 page_flush_tb();
504
505 code_gen_ptr = code_gen_buffer;
506 /* XXX: flush processor icache at this point if cache flush is
507 expensive */
508 tb_flush_count++;
509 }
510
511 #ifdef DEBUG_TB_CHECK
512
513 static void tb_invalidate_check(target_ulong address)
514 {
515 TranslationBlock *tb;
516 int i;
517 address &= TARGET_PAGE_MASK;
518 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
519 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
520 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
521 address >= tb->pc + tb->size)) {
522 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
523 address, (long)tb->pc, tb->size);
524 }
525 }
526 }
527 }
528
529 /* verify that all the pages have correct rights for code */
530 static void tb_page_check(void)
531 {
532 TranslationBlock *tb;
533 int i, flags1, flags2;
534
535 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
536 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
537 flags1 = page_get_flags(tb->pc);
538 flags2 = page_get_flags(tb->pc + tb->size - 1);
539 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
540 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
541 (long)tb->pc, tb->size, flags1, flags2);
542 }
543 }
544 }
545 }
546
547 void tb_jmp_check(TranslationBlock *tb)
548 {
549 TranslationBlock *tb1;
550 unsigned int n1;
551
552 /* suppress any remaining jumps to this TB */
553 tb1 = tb->jmp_first;
554 for(;;) {
555 n1 = (long)tb1 & 3;
556 tb1 = (TranslationBlock *)((long)tb1 & ~3);
557 if (n1 == 2)
558 break;
559 tb1 = tb1->jmp_next[n1];
560 }
561 /* check end of list */
562 if (tb1 != tb) {
563 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
564 }
565 }
566
567 #endif
568
569 /* invalidate one TB */
570 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
571 int next_offset)
572 {
573 TranslationBlock *tb1;
574 for(;;) {
575 tb1 = *ptb;
576 if (tb1 == tb) {
577 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
578 break;
579 }
580 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
581 }
582 }
583
584 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
585 {
586 TranslationBlock *tb1;
587 unsigned int n1;
588
589 for(;;) {
590 tb1 = *ptb;
591 n1 = (long)tb1 & 3;
592 tb1 = (TranslationBlock *)((long)tb1 & ~3);
593 if (tb1 == tb) {
594 *ptb = tb1->page_next[n1];
595 break;
596 }
597 ptb = &tb1->page_next[n1];
598 }
599 }
600
601 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
602 {
603 TranslationBlock *tb1, **ptb;
604 unsigned int n1;
605
606 ptb = &tb->jmp_next[n];
607 tb1 = *ptb;
608 if (tb1) {
609 /* find tb(n) in circular list */
610 for(;;) {
611 tb1 = *ptb;
612 n1 = (long)tb1 & 3;
613 tb1 = (TranslationBlock *)((long)tb1 & ~3);
614 if (n1 == n && tb1 == tb)
615 break;
616 if (n1 == 2) {
617 ptb = &tb1->jmp_first;
618 } else {
619 ptb = &tb1->jmp_next[n1];
620 }
621 }
622 /* now we can suppress tb(n) from the list */
623 *ptb = tb->jmp_next[n];
624
625 tb->jmp_next[n] = NULL;
626 }
627 }
628
629 /* reset the jump entry 'n' of a TB so that it is not chained to
630 another TB */
631 static inline void tb_reset_jump(TranslationBlock *tb, int n)
632 {
633 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
634 }
635
636 static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
637 {
638 CPUState *env;
639 PageDesc *p;
640 unsigned int h, n1;
641 target_phys_addr_t phys_pc;
642 TranslationBlock *tb1, *tb2;
643
644 /* remove the TB from the hash list */
645 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646 h = tb_phys_hash_func(phys_pc);
647 tb_remove(&tb_phys_hash[h], tb,
648 offsetof(TranslationBlock, phys_hash_next));
649
650 /* remove the TB from the page list */
651 if (tb->page_addr[0] != page_addr) {
652 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
653 tb_page_remove(&p->first_tb, tb);
654 invalidate_page_bitmap(p);
655 }
656 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
657 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
658 tb_page_remove(&p->first_tb, tb);
659 invalidate_page_bitmap(p);
660 }
661
662 tb_invalidated_flag = 1;
663
664 /* remove the TB from the hash list */
665 h = tb_jmp_cache_hash_func(tb->pc);
666 for(env = first_cpu; env != NULL; env = env->next_cpu) {
667 if (env->tb_jmp_cache[h] == tb)
668 env->tb_jmp_cache[h] = NULL;
669 }
670
671 /* suppress this TB from the two jump lists */
672 tb_jmp_remove(tb, 0);
673 tb_jmp_remove(tb, 1);
674
675 /* suppress any remaining jumps to this TB */
676 tb1 = tb->jmp_first;
677 for(;;) {
678 n1 = (long)tb1 & 3;
679 if (n1 == 2)
680 break;
681 tb1 = (TranslationBlock *)((long)tb1 & ~3);
682 tb2 = tb1->jmp_next[n1];
683 tb_reset_jump(tb1, n1);
684 tb1->jmp_next[n1] = NULL;
685 tb1 = tb2;
686 }
687 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
688
689 tb_phys_invalidate_count++;
690 }
691
692 static inline void set_bits(uint8_t *tab, int start, int len)
693 {
694 int end, mask, end1;
695
696 end = start + len;
697 tab += start >> 3;
698 mask = 0xff << (start & 7);
699 if ((start & ~7) == (end & ~7)) {
700 if (start < end) {
701 mask &= ~(0xff << (end & 7));
702 *tab |= mask;
703 }
704 } else {
705 *tab++ |= mask;
706 start = (start + 8) & ~7;
707 end1 = end & ~7;
708 while (start < end1) {
709 *tab++ = 0xff;
710 start += 8;
711 }
712 if (start < end) {
713 mask = ~(0xff << (end & 7));
714 *tab |= mask;
715 }
716 }
717 }
718
719 static void build_page_bitmap(PageDesc *p)
720 {
721 int n, tb_start, tb_end;
722 TranslationBlock *tb;
723
724 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
725 if (!p->code_bitmap)
726 return;
727 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
728
729 tb = p->first_tb;
730 while (tb != NULL) {
731 n = (long)tb & 3;
732 tb = (TranslationBlock *)((long)tb & ~3);
733 /* NOTE: this is subtle as a TB may span two physical pages */
734 if (n == 0) {
735 /* NOTE: tb_end may be after the end of the page, but
736 it is not a problem */
737 tb_start = tb->pc & ~TARGET_PAGE_MASK;
738 tb_end = tb_start + tb->size;
739 if (tb_end > TARGET_PAGE_SIZE)
740 tb_end = TARGET_PAGE_SIZE;
741 } else {
742 tb_start = 0;
743 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
744 }
745 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
746 tb = tb->page_next[n];
747 }
748 }
749
750 #ifdef TARGET_HAS_PRECISE_SMC
751
752 static void tb_gen_code(CPUState *env,
753 target_ulong pc, target_ulong cs_base, int flags,
754 int cflags)
755 {
756 TranslationBlock *tb;
757 uint8_t *tc_ptr;
758 target_ulong phys_pc, phys_page2, virt_page2;
759 int code_gen_size;
760
761 phys_pc = get_phys_addr_code(env, pc);
762 tb = tb_alloc(pc);
763 if (!tb) {
764 /* flush must be done */
765 tb_flush(env);
766 /* cannot fail at this point */
767 tb = tb_alloc(pc);
768 }
769 tc_ptr = code_gen_ptr;
770 tb->tc_ptr = tc_ptr;
771 tb->cs_base = cs_base;
772 tb->flags = flags;
773 tb->cflags = cflags;
774 cpu_gen_code(env, tb, &code_gen_size);
775 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
776
777 /* check next page if needed */
778 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
779 phys_page2 = -1;
780 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
781 phys_page2 = get_phys_addr_code(env, virt_page2);
782 }
783 tb_link_phys(tb, phys_pc, phys_page2);
784 }
785 #endif
786
787 /* invalidate all TBs which intersect with the target physical page
788 starting in range [start;end[. NOTE: start and end must refer to
789 the same physical page. 'is_cpu_write_access' should be true if called
790 from a real cpu write access: the virtual CPU will exit the current
791 TB if code is modified inside this TB. */
792 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
793 int is_cpu_write_access)
794 {
795 int n, current_tb_modified, current_tb_not_found, current_flags;
796 CPUState *env = cpu_single_env;
797 PageDesc *p;
798 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
799 target_ulong tb_start, tb_end;
800 target_ulong current_pc, current_cs_base;
801
802 p = page_find(start >> TARGET_PAGE_BITS);
803 if (!p)
804 return;
805 if (!p->code_bitmap &&
806 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
807 is_cpu_write_access) {
808 /* build code bitmap */
809 build_page_bitmap(p);
810 }
811
812 /* we remove all the TBs in the range [start, end[ */
813 /* XXX: see if in some cases it could be faster to invalidate all the code */
814 current_tb_not_found = is_cpu_write_access;
815 current_tb_modified = 0;
816 current_tb = NULL; /* avoid warning */
817 current_pc = 0; /* avoid warning */
818 current_cs_base = 0; /* avoid warning */
819 current_flags = 0; /* avoid warning */
820 tb = p->first_tb;
821 while (tb != NULL) {
822 n = (long)tb & 3;
823 tb = (TranslationBlock *)((long)tb & ~3);
824 tb_next = tb->page_next[n];
825 /* NOTE: this is subtle as a TB may span two physical pages */
826 if (n == 0) {
827 /* NOTE: tb_end may be after the end of the page, but
828 it is not a problem */
829 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
830 tb_end = tb_start + tb->size;
831 } else {
832 tb_start = tb->page_addr[1];
833 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
834 }
835 if (!(tb_end <= start || tb_start >= end)) {
836 #ifdef TARGET_HAS_PRECISE_SMC
837 if (current_tb_not_found) {
838 current_tb_not_found = 0;
839 current_tb = NULL;
840 if (env->mem_write_pc) {
841 /* now we have a real cpu fault */
842 current_tb = tb_find_pc(env->mem_write_pc);
843 }
844 }
845 if (current_tb == tb &&
846 !(current_tb->cflags & CF_SINGLE_INSN)) {
847 /* If we are modifying the current TB, we must stop
848 its execution. We could be more precise by checking
849 that the modification is after the current PC, but it
850 would require a specialized function to partially
851 restore the CPU state */
852
853 current_tb_modified = 1;
854 cpu_restore_state(current_tb, env,
855 env->mem_write_pc, NULL);
856 #if defined(TARGET_I386)
857 current_flags = env->hflags;
858 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
859 current_cs_base = (target_ulong)env->segs[R_CS].base;
860 current_pc = current_cs_base + env->eip;
861 #else
862 #error unsupported CPU
863 #endif
864 }
865 #endif /* TARGET_HAS_PRECISE_SMC */
866 /* we need to do that to handle the case where a signal
867 occurs while doing tb_phys_invalidate() */
868 saved_tb = NULL;
869 if (env) {
870 saved_tb = env->current_tb;
871 env->current_tb = NULL;
872 }
873 tb_phys_invalidate(tb, -1);
874 if (env) {
875 env->current_tb = saved_tb;
876 if (env->interrupt_request && env->current_tb)
877 cpu_interrupt(env, env->interrupt_request);
878 }
879 }
880 tb = tb_next;
881 }
882 #if !defined(CONFIG_USER_ONLY)
883 /* if no code remaining, no need to continue to use slow writes */
884 if (!p->first_tb) {
885 invalidate_page_bitmap(p);
886 if (is_cpu_write_access) {
887 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
888 }
889 }
890 #endif
891 #ifdef TARGET_HAS_PRECISE_SMC
892 if (current_tb_modified) {
893 /* we generate a block containing just the instruction
894 modifying the memory. It will ensure that it cannot modify
895 itself */
896 env->current_tb = NULL;
897 tb_gen_code(env, current_pc, current_cs_base, current_flags,
898 CF_SINGLE_INSN);
899 cpu_resume_from_signal(env, NULL);
900 }
901 #endif
902 }
903
904 /* len must be <= 8 and start must be a multiple of len */
905 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
906 {
907 PageDesc *p;
908 int offset, b;
909 #if 0
910 if (1) {
911 if (loglevel) {
912 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
913 cpu_single_env->mem_write_vaddr, len,
914 cpu_single_env->eip,
915 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
916 }
917 }
918 #endif
919 p = page_find(start >> TARGET_PAGE_BITS);
920 if (!p)
921 return;
922 if (p->code_bitmap) {
923 offset = start & ~TARGET_PAGE_MASK;
924 b = p->code_bitmap[offset >> 3] >> (offset & 7);
925 if (b & ((1 << len) - 1))
926 goto do_invalidate;
927 } else {
928 do_invalidate:
929 tb_invalidate_phys_page_range(start, start + len, 1);
930 }
931 }
932
933 #if !defined(CONFIG_SOFTMMU)
934 static void tb_invalidate_phys_page(target_phys_addr_t addr,
935 unsigned long pc, void *puc)
936 {
937 int n, current_flags, current_tb_modified;
938 target_ulong current_pc, current_cs_base;
939 PageDesc *p;
940 TranslationBlock *tb, *current_tb;
941 #ifdef TARGET_HAS_PRECISE_SMC
942 CPUState *env = cpu_single_env;
943 #endif
944
945 addr &= TARGET_PAGE_MASK;
946 p = page_find(addr >> TARGET_PAGE_BITS);
947 if (!p)
948 return;
949 tb = p->first_tb;
950 current_tb_modified = 0;
951 current_tb = NULL;
952 current_pc = 0; /* avoid warning */
953 current_cs_base = 0; /* avoid warning */
954 current_flags = 0; /* avoid warning */
955 #ifdef TARGET_HAS_PRECISE_SMC
956 if (tb && pc != 0) {
957 current_tb = tb_find_pc(pc);
958 }
959 #endif
960 while (tb != NULL) {
961 n = (long)tb & 3;
962 tb = (TranslationBlock *)((long)tb & ~3);
963 #ifdef TARGET_HAS_PRECISE_SMC
964 if (current_tb == tb &&
965 !(current_tb->cflags & CF_SINGLE_INSN)) {
966 /* If we are modifying the current TB, we must stop
967 its execution. We could be more precise by checking
968 that the modification is after the current PC, but it
969 would require a specialized function to partially
970 restore the CPU state */
971
972 current_tb_modified = 1;
973 cpu_restore_state(current_tb, env, pc, puc);
974 #if defined(TARGET_I386)
975 current_flags = env->hflags;
976 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
977 current_cs_base = (target_ulong)env->segs[R_CS].base;
978 current_pc = current_cs_base + env->eip;
979 #else
980 #error unsupported CPU
981 #endif
982 }
983 #endif /* TARGET_HAS_PRECISE_SMC */
984 tb_phys_invalidate(tb, addr);
985 tb = tb->page_next[n];
986 }
987 p->first_tb = NULL;
988 #ifdef TARGET_HAS_PRECISE_SMC
989 if (current_tb_modified) {
990 /* we generate a block containing just the instruction
991 modifying the memory. It will ensure that it cannot modify
992 itself */
993 env->current_tb = NULL;
994 tb_gen_code(env, current_pc, current_cs_base, current_flags,
995 CF_SINGLE_INSN);
996 cpu_resume_from_signal(env, puc);
997 }
998 #endif
999 }
1000 #endif
1001
1002 /* add the tb in the target page and protect it if necessary */
1003 static inline void tb_alloc_page(TranslationBlock *tb,
1004 unsigned int n, target_ulong page_addr)
1005 {
1006 PageDesc *p;
1007 TranslationBlock *last_first_tb;
1008
1009 tb->page_addr[n] = page_addr;
1010 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1011 tb->page_next[n] = p->first_tb;
1012 last_first_tb = p->first_tb;
1013 p->first_tb = (TranslationBlock *)((long)tb | n);
1014 invalidate_page_bitmap(p);
1015
1016 #if defined(TARGET_HAS_SMC) || 1
1017
1018 #if defined(CONFIG_USER_ONLY)
1019 if (p->flags & PAGE_WRITE) {
1020 target_ulong addr;
1021 PageDesc *p2;
1022 int prot;
1023
1024 /* force the host page as non writable (writes will have a
1025 page fault + mprotect overhead) */
1026 page_addr &= qemu_host_page_mask;
1027 prot = 0;
1028 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1029 addr += TARGET_PAGE_SIZE) {
1030
1031 p2 = page_find (addr >> TARGET_PAGE_BITS);
1032 if (!p2)
1033 continue;
1034 prot |= p2->flags;
1035 p2->flags &= ~PAGE_WRITE;
1036 page_get_flags(addr);
1037 }
1038 mprotect(g2h(page_addr), qemu_host_page_size,
1039 (prot & PAGE_BITS) & ~PAGE_WRITE);
1040 #ifdef DEBUG_TB_INVALIDATE
1041 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1042 page_addr);
1043 #endif
1044 }
1045 #else
1046 /* if some code is already present, then the pages are already
1047 protected. So we handle the case where only the first TB is
1048 allocated in a physical page */
1049 if (!last_first_tb) {
1050 tlb_protect_code(page_addr);
1051 }
1052 #endif
1053
1054 #endif /* TARGET_HAS_SMC */
1055 }
1056
1057 /* Allocate a new translation block. Flush the translation buffer if
1058 too many translation blocks or too much generated code. */
1059 TranslationBlock *tb_alloc(target_ulong pc)
1060 {
1061 TranslationBlock *tb;
1062
1063 if (nb_tbs >= code_gen_max_blocks ||
1064 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1065 return NULL;
1066 tb = &tbs[nb_tbs++];
1067 tb->pc = pc;
1068 tb->cflags = 0;
1069 return tb;
1070 }
1071
1072 /* add a new TB and link it to the physical page tables. phys_page2 is
1073 (-1) to indicate that only one page contains the TB. */
1074 void tb_link_phys(TranslationBlock *tb,
1075 target_ulong phys_pc, target_ulong phys_page2)
1076 {
1077 unsigned int h;
1078 TranslationBlock **ptb;
1079
1080 /* Grab the mmap lock to stop another thread invalidating this TB
1081 before we are done. */
1082 mmap_lock();
1083 /* add in the physical hash table */
1084 h = tb_phys_hash_func(phys_pc);
1085 ptb = &tb_phys_hash[h];
1086 tb->phys_hash_next = *ptb;
1087 *ptb = tb;
1088
1089 /* add in the page list */
1090 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1091 if (phys_page2 != -1)
1092 tb_alloc_page(tb, 1, phys_page2);
1093 else
1094 tb->page_addr[1] = -1;
1095
1096 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1097 tb->jmp_next[0] = NULL;
1098 tb->jmp_next[1] = NULL;
1099
1100 /* init original jump addresses */
1101 if (tb->tb_next_offset[0] != 0xffff)
1102 tb_reset_jump(tb, 0);
1103 if (tb->tb_next_offset[1] != 0xffff)
1104 tb_reset_jump(tb, 1);
1105
1106 #ifdef DEBUG_TB_CHECK
1107 tb_page_check();
1108 #endif
1109 mmap_unlock();
1110 }
1111
1112 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1113 tb[1].tc_ptr. Return NULL if not found */
1114 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1115 {
1116 int m_min, m_max, m;
1117 unsigned long v;
1118 TranslationBlock *tb;
1119
1120 if (nb_tbs <= 0)
1121 return NULL;
1122 if (tc_ptr < (unsigned long)code_gen_buffer ||
1123 tc_ptr >= (unsigned long)code_gen_ptr)
1124 return NULL;
1125 /* binary search (cf Knuth) */
1126 m_min = 0;
1127 m_max = nb_tbs - 1;
1128 while (m_min <= m_max) {
1129 m = (m_min + m_max) >> 1;
1130 tb = &tbs[m];
1131 v = (unsigned long)tb->tc_ptr;
1132 if (v == tc_ptr)
1133 return tb;
1134 else if (tc_ptr < v) {
1135 m_max = m - 1;
1136 } else {
1137 m_min = m + 1;
1138 }
1139 }
1140 return &tbs[m_max];
1141 }
1142
1143 static void tb_reset_jump_recursive(TranslationBlock *tb);
1144
1145 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1146 {
1147 TranslationBlock *tb1, *tb_next, **ptb;
1148 unsigned int n1;
1149
1150 tb1 = tb->jmp_next[n];
1151 if (tb1 != NULL) {
1152 /* find head of list */
1153 for(;;) {
1154 n1 = (long)tb1 & 3;
1155 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1156 if (n1 == 2)
1157 break;
1158 tb1 = tb1->jmp_next[n1];
1159 }
1160 /* we are now sure now that tb jumps to tb1 */
1161 tb_next = tb1;
1162
1163 /* remove tb from the jmp_first list */
1164 ptb = &tb_next->jmp_first;
1165 for(;;) {
1166 tb1 = *ptb;
1167 n1 = (long)tb1 & 3;
1168 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1169 if (n1 == n && tb1 == tb)
1170 break;
1171 ptb = &tb1->jmp_next[n1];
1172 }
1173 *ptb = tb->jmp_next[n];
1174 tb->jmp_next[n] = NULL;
1175
1176 /* suppress the jump to next tb in generated code */
1177 tb_reset_jump(tb, n);
1178
1179 /* suppress jumps in the tb on which we could have jumped */
1180 tb_reset_jump_recursive(tb_next);
1181 }
1182 }
1183
1184 static void tb_reset_jump_recursive(TranslationBlock *tb)
1185 {
1186 tb_reset_jump_recursive2(tb, 0);
1187 tb_reset_jump_recursive2(tb, 1);
1188 }
1189
1190 #if defined(TARGET_HAS_ICE)
1191 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1192 {
1193 target_phys_addr_t addr;
1194 target_ulong pd;
1195 ram_addr_t ram_addr;
1196 PhysPageDesc *p;
1197
1198 addr = cpu_get_phys_page_debug(env, pc);
1199 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1200 if (!p) {
1201 pd = IO_MEM_UNASSIGNED;
1202 } else {
1203 pd = p->phys_offset;
1204 }
1205 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1206 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1207 }
1208 #endif
1209
1210 /* Add a watchpoint. */
1211 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1212 {
1213 int i;
1214
1215 for (i = 0; i < env->nb_watchpoints; i++) {
1216 if (addr == env->watchpoint[i].vaddr)
1217 return 0;
1218 }
1219 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1220 return -1;
1221
1222 i = env->nb_watchpoints++;
1223 env->watchpoint[i].vaddr = addr;
1224 env->watchpoint[i].type = type;
1225 tlb_flush_page(env, addr);
1226 /* FIXME: This flush is needed because of the hack to make memory ops
1227 terminate the TB. It can be removed once the proper IO trap and
1228 re-execute bits are in. */
1229 tb_flush(env);
1230 return i;
1231 }
1232
1233 /* Remove a watchpoint. */
1234 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1235 {
1236 int i;
1237
1238 for (i = 0; i < env->nb_watchpoints; i++) {
1239 if (addr == env->watchpoint[i].vaddr) {
1240 env->nb_watchpoints--;
1241 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1242 tlb_flush_page(env, addr);
1243 return 0;
1244 }
1245 }
1246 return -1;
1247 }
1248
1249 /* Remove all watchpoints. */
1250 void cpu_watchpoint_remove_all(CPUState *env) {
1251 int i;
1252
1253 for (i = 0; i < env->nb_watchpoints; i++) {
1254 tlb_flush_page(env, env->watchpoint[i].vaddr);
1255 }
1256 env->nb_watchpoints = 0;
1257 }
1258
1259 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1260 breakpoint is reached */
1261 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1262 {
1263 #if defined(TARGET_HAS_ICE)
1264 int i;
1265
1266 for(i = 0; i < env->nb_breakpoints; i++) {
1267 if (env->breakpoints[i] == pc)
1268 return 0;
1269 }
1270
1271 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1272 return -1;
1273 env->breakpoints[env->nb_breakpoints++] = pc;
1274
1275 breakpoint_invalidate(env, pc);
1276 return 0;
1277 #else
1278 return -1;
1279 #endif
1280 }
1281
1282 /* remove all breakpoints */
1283 void cpu_breakpoint_remove_all(CPUState *env) {
1284 #if defined(TARGET_HAS_ICE)
1285 int i;
1286 for(i = 0; i < env->nb_breakpoints; i++) {
1287 breakpoint_invalidate(env, env->breakpoints[i]);
1288 }
1289 env->nb_breakpoints = 0;
1290 #endif
1291 }
1292
1293 /* remove a breakpoint */
1294 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1295 {
1296 #if defined(TARGET_HAS_ICE)
1297 int i;
1298 for(i = 0; i < env->nb_breakpoints; i++) {
1299 if (env->breakpoints[i] == pc)
1300 goto found;
1301 }
1302 return -1;
1303 found:
1304 env->nb_breakpoints--;
1305 if (i < env->nb_breakpoints)
1306 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1307
1308 breakpoint_invalidate(env, pc);
1309 return 0;
1310 #else
1311 return -1;
1312 #endif
1313 }
1314
1315 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1316 CPU loop after each instruction */
1317 void cpu_single_step(CPUState *env, int enabled)
1318 {
1319 #if defined(TARGET_HAS_ICE)
1320 if (env->singlestep_enabled != enabled) {
1321 env->singlestep_enabled = enabled;
1322 /* must flush all the translated code to avoid inconsistancies */
1323 /* XXX: only flush what is necessary */
1324 tb_flush(env);
1325 }
1326 #endif
1327 }
1328
1329 /* enable or disable low levels log */
1330 void cpu_set_log(int log_flags)
1331 {
1332 loglevel = log_flags;
1333 if (loglevel && !logfile) {
1334 logfile = fopen(logfilename, log_append ? "a" : "w");
1335 if (!logfile) {
1336 perror(logfilename);
1337 _exit(1);
1338 }
1339 #if !defined(CONFIG_SOFTMMU)
1340 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1341 {
1342 static uint8_t logfile_buf[4096];
1343 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1344 }
1345 #else
1346 setvbuf(logfile, NULL, _IOLBF, 0);
1347 #endif
1348 log_append = 1;
1349 }
1350 if (!loglevel && logfile) {
1351 fclose(logfile);
1352 logfile = NULL;
1353 }
1354 }
1355
1356 void cpu_set_log_filename(const char *filename)
1357 {
1358 logfilename = strdup(filename);
1359 if (logfile) {
1360 fclose(logfile);
1361 logfile = NULL;
1362 }
1363 cpu_set_log(loglevel);
1364 }
1365
1366 /* mask must never be zero, except for A20 change call */
1367 void cpu_interrupt(CPUState *env, int mask)
1368 {
1369 #if !defined(USE_NPTL)
1370 TranslationBlock *tb;
1371 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1372 #endif
1373
1374 /* FIXME: This is probably not threadsafe. A different thread could
1375 be in the mittle of a read-modify-write operation. */
1376 env->interrupt_request |= mask;
1377 #if defined(USE_NPTL)
1378 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1379 problem and hope the cpu will stop of its own accord. For userspace
1380 emulation this often isn't actually as bad as it sounds. Often
1381 signals are used primarily to interrupt blocking syscalls. */
1382 #else
1383 /* if the cpu is currently executing code, we must unlink it and
1384 all the potentially executing TB */
1385 tb = env->current_tb;
1386 if (tb && !testandset(&interrupt_lock)) {
1387 env->current_tb = NULL;
1388 tb_reset_jump_recursive(tb);
1389 resetlock(&interrupt_lock);
1390 }
1391 #endif
1392 }
1393
1394 void cpu_reset_interrupt(CPUState *env, int mask)
1395 {
1396 env->interrupt_request &= ~mask;
1397 }
1398
1399 CPULogItem cpu_log_items[] = {
1400 { CPU_LOG_TB_OUT_ASM, "out_asm",
1401 "show generated host assembly code for each compiled TB" },
1402 { CPU_LOG_TB_IN_ASM, "in_asm",
1403 "show target assembly code for each compiled TB" },
1404 { CPU_LOG_TB_OP, "op",
1405 "show micro ops for each compiled TB" },
1406 { CPU_LOG_TB_OP_OPT, "op_opt",
1407 "show micro ops "
1408 #ifdef TARGET_I386
1409 "before eflags optimization and "
1410 #endif
1411 "after liveness analysis" },
1412 { CPU_LOG_INT, "int",
1413 "show interrupts/exceptions in short format" },
1414 { CPU_LOG_EXEC, "exec",
1415 "show trace before each executed TB (lots of logs)" },
1416 { CPU_LOG_TB_CPU, "cpu",
1417 "show CPU state before block translation" },
1418 #ifdef TARGET_I386
1419 { CPU_LOG_PCALL, "pcall",
1420 "show protected mode far calls/returns/exceptions" },
1421 #endif
1422 #ifdef DEBUG_IOPORT
1423 { CPU_LOG_IOPORT, "ioport",
1424 "show all i/o ports accesses" },
1425 #endif
1426 { 0, NULL, NULL },
1427 };
1428
1429 static int cmp1(const char *s1, int n, const char *s2)
1430 {
1431 if (strlen(s2) != n)
1432 return 0;
1433 return memcmp(s1, s2, n) == 0;
1434 }
1435
1436 /* takes a comma separated list of log masks. Return 0 if error. */
1437 int cpu_str_to_log_mask(const char *str)
1438 {
1439 CPULogItem *item;
1440 int mask;
1441 const char *p, *p1;
1442
1443 p = str;
1444 mask = 0;
1445 for(;;) {
1446 p1 = strchr(p, ',');
1447 if (!p1)
1448 p1 = p + strlen(p);
1449 if(cmp1(p,p1-p,"all")) {
1450 for(item = cpu_log_items; item->mask != 0; item++) {
1451 mask |= item->mask;
1452 }
1453 } else {
1454 for(item = cpu_log_items; item->mask != 0; item++) {
1455 if (cmp1(p, p1 - p, item->name))
1456 goto found;
1457 }
1458 return 0;
1459 }
1460 found:
1461 mask |= item->mask;
1462 if (*p1 != ',')
1463 break;
1464 p = p1 + 1;
1465 }
1466 return mask;
1467 }
1468
1469 void cpu_abort(CPUState *env, const char *fmt, ...)
1470 {
1471 va_list ap;
1472 va_list ap2;
1473
1474 va_start(ap, fmt);
1475 va_copy(ap2, ap);
1476 fprintf(stderr, "qemu: fatal: ");
1477 vfprintf(stderr, fmt, ap);
1478 fprintf(stderr, "\n");
1479 #ifdef TARGET_I386
1480 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1481 #else
1482 cpu_dump_state(env, stderr, fprintf, 0);
1483 #endif
1484 if (logfile) {
1485 fprintf(logfile, "qemu: fatal: ");
1486 vfprintf(logfile, fmt, ap2);
1487 fprintf(logfile, "\n");
1488 #ifdef TARGET_I386
1489 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1490 #else
1491 cpu_dump_state(env, logfile, fprintf, 0);
1492 #endif
1493 fflush(logfile);
1494 fclose(logfile);
1495 }
1496 va_end(ap2);
1497 va_end(ap);
1498 abort();
1499 }
1500
1501 CPUState *cpu_copy(CPUState *env)
1502 {
1503 CPUState *new_env = cpu_init(env->cpu_model_str);
1504 /* preserve chaining and index */
1505 CPUState *next_cpu = new_env->next_cpu;
1506 int cpu_index = new_env->cpu_index;
1507 memcpy(new_env, env, sizeof(CPUState));
1508 new_env->next_cpu = next_cpu;
1509 new_env->cpu_index = cpu_index;
1510 return new_env;
1511 }
1512
1513 #if !defined(CONFIG_USER_ONLY)
1514
1515 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1516 {
1517 unsigned int i;
1518
1519 /* Discard jump cache entries for any tb which might potentially
1520 overlap the flushed page. */
1521 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1522 memset (&env->tb_jmp_cache[i], 0,
1523 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1524
1525 i = tb_jmp_cache_hash_page(addr);
1526 memset (&env->tb_jmp_cache[i], 0,
1527 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1528 }
1529
1530 /* NOTE: if flush_global is true, also flush global entries (not
1531 implemented yet) */
1532 void tlb_flush(CPUState *env, int flush_global)
1533 {
1534 int i;
1535
1536 #if defined(DEBUG_TLB)
1537 printf("tlb_flush:\n");
1538 #endif
1539 /* must reset current TB so that interrupts cannot modify the
1540 links while we are modifying them */
1541 env->current_tb = NULL;
1542
1543 for(i = 0; i < CPU_TLB_SIZE; i++) {
1544 env->tlb_table[0][i].addr_read = -1;
1545 env->tlb_table[0][i].addr_write = -1;
1546 env->tlb_table[0][i].addr_code = -1;
1547 env->tlb_table[1][i].addr_read = -1;
1548 env->tlb_table[1][i].addr_write = -1;
1549 env->tlb_table[1][i].addr_code = -1;
1550 #if (NB_MMU_MODES >= 3)
1551 env->tlb_table[2][i].addr_read = -1;
1552 env->tlb_table[2][i].addr_write = -1;
1553 env->tlb_table[2][i].addr_code = -1;
1554 #if (NB_MMU_MODES == 4)
1555 env->tlb_table[3][i].addr_read = -1;
1556 env->tlb_table[3][i].addr_write = -1;
1557 env->tlb_table[3][i].addr_code = -1;
1558 #endif
1559 #endif
1560 }
1561
1562 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1563
1564 #ifdef USE_KQEMU
1565 if (env->kqemu_enabled) {
1566 kqemu_flush(env, flush_global);
1567 }
1568 #endif
1569 tlb_flush_count++;
1570 }
1571
1572 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1573 {
1574 if (addr == (tlb_entry->addr_read &
1575 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1576 addr == (tlb_entry->addr_write &
1577 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1578 addr == (tlb_entry->addr_code &
1579 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1580 tlb_entry->addr_read = -1;
1581 tlb_entry->addr_write = -1;
1582 tlb_entry->addr_code = -1;
1583 }
1584 }
1585
1586 void tlb_flush_page(CPUState *env, target_ulong addr)
1587 {
1588 int i;
1589
1590 #if defined(DEBUG_TLB)
1591 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1592 #endif
1593 /* must reset current TB so that interrupts cannot modify the
1594 links while we are modifying them */
1595 env->current_tb = NULL;
1596
1597 addr &= TARGET_PAGE_MASK;
1598 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1599 tlb_flush_entry(&env->tlb_table[0][i], addr);
1600 tlb_flush_entry(&env->tlb_table[1][i], addr);
1601 #if (NB_MMU_MODES >= 3)
1602 tlb_flush_entry(&env->tlb_table[2][i], addr);
1603 #if (NB_MMU_MODES == 4)
1604 tlb_flush_entry(&env->tlb_table[3][i], addr);
1605 #endif
1606 #endif
1607
1608 tlb_flush_jmp_cache(env, addr);
1609
1610 #ifdef USE_KQEMU
1611 if (env->kqemu_enabled) {
1612 kqemu_flush_page(env, addr);
1613 }
1614 #endif
1615 }
1616
1617 /* update the TLBs so that writes to code in the virtual page 'addr'
1618 can be detected */
1619 static void tlb_protect_code(ram_addr_t ram_addr)
1620 {
1621 cpu_physical_memory_reset_dirty(ram_addr,
1622 ram_addr + TARGET_PAGE_SIZE,
1623 CODE_DIRTY_FLAG);
1624 }
1625
1626 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1627 tested for self modifying code */
1628 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1629 target_ulong vaddr)
1630 {
1631 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1632 }
1633
1634 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1635 unsigned long start, unsigned long length)
1636 {
1637 unsigned long addr;
1638 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1639 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1640 if ((addr - start) < length) {
1641 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1642 }
1643 }
1644 }
1645
1646 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1647 int dirty_flags)
1648 {
1649 CPUState *env;
1650 unsigned long length, start1;
1651 int i, mask, len;
1652 uint8_t *p;
1653
1654 start &= TARGET_PAGE_MASK;
1655 end = TARGET_PAGE_ALIGN(end);
1656
1657 length = end - start;
1658 if (length == 0)
1659 return;
1660 len = length >> TARGET_PAGE_BITS;
1661 #ifdef USE_KQEMU
1662 /* XXX: should not depend on cpu context */
1663 env = first_cpu;
1664 if (env->kqemu_enabled) {
1665 ram_addr_t addr;
1666 addr = start;
1667 for(i = 0; i < len; i++) {
1668 kqemu_set_notdirty(env, addr);
1669 addr += TARGET_PAGE_SIZE;
1670 }
1671 }
1672 #endif
1673 mask = ~dirty_flags;
1674 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1675 for(i = 0; i < len; i++)
1676 p[i] &= mask;
1677
1678 /* we modify the TLB cache so that the dirty bit will be set again
1679 when accessing the range */
1680 start1 = start + (unsigned long)phys_ram_base;
1681 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1682 for(i = 0; i < CPU_TLB_SIZE; i++)
1683 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1684 for(i = 0; i < CPU_TLB_SIZE; i++)
1685 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1686 #if (NB_MMU_MODES >= 3)
1687 for(i = 0; i < CPU_TLB_SIZE; i++)
1688 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1689 #if (NB_MMU_MODES == 4)
1690 for(i = 0; i < CPU_TLB_SIZE; i++)
1691 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1692 #endif
1693 #endif
1694 }
1695 }
1696
1697 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1698 {
1699 ram_addr_t ram_addr;
1700
1701 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1702 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1703 tlb_entry->addend - (unsigned long)phys_ram_base;
1704 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1705 tlb_entry->addr_write |= TLB_NOTDIRTY;
1706 }
1707 }
1708 }
1709
1710 /* update the TLB according to the current state of the dirty bits */
1711 void cpu_tlb_update_dirty(CPUState *env)
1712 {
1713 int i;
1714 for(i = 0; i < CPU_TLB_SIZE; i++)
1715 tlb_update_dirty(&env->tlb_table[0][i]);
1716 for(i = 0; i < CPU_TLB_SIZE; i++)
1717 tlb_update_dirty(&env->tlb_table[1][i]);
1718 #if (NB_MMU_MODES >= 3)
1719 for(i = 0; i < CPU_TLB_SIZE; i++)
1720 tlb_update_dirty(&env->tlb_table[2][i]);
1721 #if (NB_MMU_MODES == 4)
1722 for(i = 0; i < CPU_TLB_SIZE; i++)
1723 tlb_update_dirty(&env->tlb_table[3][i]);
1724 #endif
1725 #endif
1726 }
1727
1728 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1729 {
1730 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1731 tlb_entry->addr_write = vaddr;
1732 }
1733
1734 /* update the TLB corresponding to virtual page vaddr
1735 so that it is no longer dirty */
1736 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1737 {
1738 int i;
1739
1740 vaddr &= TARGET_PAGE_MASK;
1741 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1742 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1743 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1744 #if (NB_MMU_MODES >= 3)
1745 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1746 #if (NB_MMU_MODES == 4)
1747 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1748 #endif
1749 #endif
1750 }
1751
1752 /* add a new TLB entry. At most one entry for a given virtual address
1753 is permitted. Return 0 if OK or 2 if the page could not be mapped
1754 (can only happen in non SOFTMMU mode for I/O pages or pages
1755 conflicting with the host address space). */
1756 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1757 target_phys_addr_t paddr, int prot,
1758 int mmu_idx, int is_softmmu)
1759 {
1760 PhysPageDesc *p;
1761 unsigned long pd;
1762 unsigned int index;
1763 target_ulong address;
1764 target_ulong code_address;
1765 target_phys_addr_t addend;
1766 int ret;
1767 CPUTLBEntry *te;
1768 int i;
1769 target_phys_addr_t iotlb;
1770
1771 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1772 if (!p) {
1773 pd = IO_MEM_UNASSIGNED;
1774 } else {
1775 pd = p->phys_offset;
1776 }
1777 #if defined(DEBUG_TLB)
1778 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1779 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1780 #endif
1781
1782 ret = 0;
1783 address = vaddr;
1784 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1785 /* IO memory case (romd handled later) */
1786 address |= TLB_MMIO;
1787 }
1788 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1789 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1790 /* Normal RAM. */
1791 iotlb = pd & TARGET_PAGE_MASK;
1792 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1793 iotlb |= IO_MEM_NOTDIRTY;
1794 else
1795 iotlb |= IO_MEM_ROM;
1796 } else {
1797 /* IO handlers are currently passed a phsical address.
1798 It would be nice to pass an offset from the base address
1799 of that region. This would avoid having to special case RAM,
1800 and avoid full address decoding in every device.
1801 We can't use the high bits of pd for this because
1802 IO_MEM_ROMD uses these as a ram address. */
1803 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1804 }
1805
1806 code_address = address;
1807 /* Make accesses to pages with watchpoints go via the
1808 watchpoint trap routines. */
1809 for (i = 0; i < env->nb_watchpoints; i++) {
1810 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1811 iotlb = io_mem_watch + paddr;
1812 /* TODO: The memory case can be optimized by not trapping
1813 reads of pages with a write breakpoint. */
1814 address |= TLB_MMIO;
1815 }
1816 }
1817
1818 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1819 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1820 te = &env->tlb_table[mmu_idx][index];
1821 te->addend = addend - vaddr;
1822 if (prot & PAGE_READ) {
1823 te->addr_read = address;
1824 } else {
1825 te->addr_read = -1;
1826 }
1827
1828 if (prot & PAGE_EXEC) {
1829 te->addr_code = code_address;
1830 } else {
1831 te->addr_code = -1;
1832 }
1833 if (prot & PAGE_WRITE) {
1834 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1835 (pd & IO_MEM_ROMD)) {
1836 /* Write access calls the I/O callback. */
1837 te->addr_write = address | TLB_MMIO;
1838 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1839 !cpu_physical_memory_is_dirty(pd)) {
1840 te->addr_write = address | TLB_NOTDIRTY;
1841 } else {
1842 te->addr_write = address;
1843 }
1844 } else {
1845 te->addr_write = -1;
1846 }
1847 return ret;
1848 }
1849
1850 #else
1851
1852 void tlb_flush(CPUState *env, int flush_global)
1853 {
1854 }
1855
1856 void tlb_flush_page(CPUState *env, target_ulong addr)
1857 {
1858 }
1859
1860 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1861 target_phys_addr_t paddr, int prot,
1862 int mmu_idx, int is_softmmu)
1863 {
1864 return 0;
1865 }
1866
1867 /* dump memory mappings */
1868 void page_dump(FILE *f)
1869 {
1870 unsigned long start, end;
1871 int i, j, prot, prot1;
1872 PageDesc *p;
1873
1874 fprintf(f, "%-8s %-8s %-8s %s\n",
1875 "start", "end", "size", "prot");
1876 start = -1;
1877 end = -1;
1878 prot = 0;
1879 for(i = 0; i <= L1_SIZE; i++) {
1880 if (i < L1_SIZE)
1881 p = l1_map[i];
1882 else
1883 p = NULL;
1884 for(j = 0;j < L2_SIZE; j++) {
1885 if (!p)
1886 prot1 = 0;
1887 else
1888 prot1 = p[j].flags;
1889 if (prot1 != prot) {
1890 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1891 if (start != -1) {
1892 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1893 start, end, end - start,
1894 prot & PAGE_READ ? 'r' : '-',
1895 prot & PAGE_WRITE ? 'w' : '-',
1896 prot & PAGE_EXEC ? 'x' : '-');
1897 }
1898 if (prot1 != 0)
1899 start = end;
1900 else
1901 start = -1;
1902 prot = prot1;
1903 }
1904 if (!p)
1905 break;
1906 }
1907 }
1908 }
1909
1910 int page_get_flags(target_ulong address)
1911 {
1912 PageDesc *p;
1913
1914 p = page_find(address >> TARGET_PAGE_BITS);
1915 if (!p)
1916 return 0;
1917 return p->flags;
1918 }
1919
1920 /* modify the flags of a page and invalidate the code if
1921 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1922 depending on PAGE_WRITE */
1923 void page_set_flags(target_ulong start, target_ulong end, int flags)
1924 {
1925 PageDesc *p;
1926 target_ulong addr;
1927
1928 /* mmap_lock should already be held. */
1929 start = start & TARGET_PAGE_MASK;
1930 end = TARGET_PAGE_ALIGN(end);
1931 if (flags & PAGE_WRITE)
1932 flags |= PAGE_WRITE_ORG;
1933 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1934 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1935 /* We may be called for host regions that are outside guest
1936 address space. */
1937 if (!p)
1938 return;
1939 /* if the write protection is set, then we invalidate the code
1940 inside */
1941 if (!(p->flags & PAGE_WRITE) &&
1942 (flags & PAGE_WRITE) &&
1943 p->first_tb) {
1944 tb_invalidate_phys_page(addr, 0, NULL);
1945 }
1946 p->flags = flags;
1947 }
1948 }
1949
1950 int page_check_range(target_ulong start, target_ulong len, int flags)
1951 {
1952 PageDesc *p;
1953 target_ulong end;
1954 target_ulong addr;
1955
1956 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1957 start = start & TARGET_PAGE_MASK;
1958
1959 if( end < start )
1960 /* we've wrapped around */
1961 return -1;
1962 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1963 p = page_find(addr >> TARGET_PAGE_BITS);
1964 if( !p )
1965 return -1;
1966 if( !(p->flags & PAGE_VALID) )
1967 return -1;
1968
1969 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1970 return -1;
1971 if (flags & PAGE_WRITE) {
1972 if (!(p->flags & PAGE_WRITE_ORG))
1973 return -1;
1974 /* unprotect the page if it was put read-only because it
1975 contains translated code */
1976 if (!(p->flags & PAGE_WRITE)) {
1977 if (!page_unprotect(addr, 0, NULL))
1978 return -1;
1979 }
1980 return 0;
1981 }
1982 }
1983 return 0;
1984 }
1985
1986 /* called from signal handler: invalidate the code and unprotect the
1987 page. Return TRUE if the fault was succesfully handled. */
1988 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1989 {
1990 unsigned int page_index, prot, pindex;
1991 PageDesc *p, *p1;
1992 target_ulong host_start, host_end, addr;
1993
1994 /* Technically this isn't safe inside a signal handler. However we
1995 know this only ever happens in a synchronous SEGV handler, so in
1996 practice it seems to be ok. */
1997 mmap_lock();
1998
1999 host_start = address & qemu_host_page_mask;
2000 page_index = host_start >> TARGET_PAGE_BITS;
2001 p1 = page_find(page_index);
2002 if (!p1) {
2003 mmap_unlock();
2004 return 0;
2005 }
2006 host_end = host_start + qemu_host_page_size;
2007 p = p1;
2008 prot = 0;
2009 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2010 prot |= p->flags;
2011 p++;
2012 }
2013 /* if the page was really writable, then we change its
2014 protection back to writable */
2015 if (prot & PAGE_WRITE_ORG) {
2016 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2017 if (!(p1[pindex].flags & PAGE_WRITE)) {
2018 mprotect((void *)g2h(host_start), qemu_host_page_size,
2019 (prot & PAGE_BITS) | PAGE_WRITE);
2020 p1[pindex].flags |= PAGE_WRITE;
2021 /* and since the content will be modified, we must invalidate
2022 the corresponding translated code. */
2023 tb_invalidate_phys_page(address, pc, puc);
2024 #ifdef DEBUG_TB_CHECK
2025 tb_invalidate_check(address);
2026 #endif
2027 mmap_unlock();
2028 return 1;
2029 }
2030 }
2031 mmap_unlock();
2032 return 0;
2033 }
2034
2035 static inline void tlb_set_dirty(CPUState *env,
2036 unsigned long addr, target_ulong vaddr)
2037 {
2038 }
2039 #endif /* defined(CONFIG_USER_ONLY) */
2040
2041 #if !defined(CONFIG_USER_ONLY)
2042 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2043 ram_addr_t memory);
2044 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2045 ram_addr_t orig_memory);
2046 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2047 need_subpage) \
2048 do { \
2049 if (addr > start_addr) \
2050 start_addr2 = 0; \
2051 else { \
2052 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2053 if (start_addr2 > 0) \
2054 need_subpage = 1; \
2055 } \
2056 \
2057 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2058 end_addr2 = TARGET_PAGE_SIZE - 1; \
2059 else { \
2060 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2061 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2062 need_subpage = 1; \
2063 } \
2064 } while (0)
2065
2066 /* register physical memory. 'size' must be a multiple of the target
2067 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2068 io memory page */
2069 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2070 ram_addr_t size,
2071 ram_addr_t phys_offset)
2072 {
2073 target_phys_addr_t addr, end_addr;
2074 PhysPageDesc *p;
2075 CPUState *env;
2076 ram_addr_t orig_size = size;
2077 void *subpage;
2078
2079 #ifdef USE_KQEMU
2080 /* XXX: should not depend on cpu context */
2081 env = first_cpu;
2082 if (env->kqemu_enabled) {
2083 kqemu_set_phys_mem(start_addr, size, phys_offset);
2084 }
2085 #endif
2086 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2087 end_addr = start_addr + (target_phys_addr_t)size;
2088 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2089 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2090 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2091 ram_addr_t orig_memory = p->phys_offset;
2092 target_phys_addr_t start_addr2, end_addr2;
2093 int need_subpage = 0;
2094
2095 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2096 need_subpage);
2097 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2098 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2099 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2100 &p->phys_offset, orig_memory);
2101 } else {
2102 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2103 >> IO_MEM_SHIFT];
2104 }
2105 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2106 } else {
2107 p->phys_offset = phys_offset;
2108 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2109 (phys_offset & IO_MEM_ROMD))
2110 phys_offset += TARGET_PAGE_SIZE;
2111 }
2112 } else {
2113 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2114 p->phys_offset = phys_offset;
2115 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2116 (phys_offset & IO_MEM_ROMD))
2117 phys_offset += TARGET_PAGE_SIZE;
2118 else {
2119 target_phys_addr_t start_addr2, end_addr2;
2120 int need_subpage = 0;
2121
2122 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2123 end_addr2, need_subpage);
2124
2125 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2126 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2127 &p->phys_offset, IO_MEM_UNASSIGNED);
2128 subpage_register(subpage, start_addr2, end_addr2,
2129 phys_offset);
2130 }
2131 }
2132 }
2133 }
2134
2135 /* since each CPU stores ram addresses in its TLB cache, we must
2136 reset the modified entries */
2137 /* XXX: slow ! */
2138 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2139 tlb_flush(env, 1);
2140 }
2141 }
2142
2143 /* XXX: temporary until new memory mapping API */
2144 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2145 {
2146 PhysPageDesc *p;
2147
2148 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2149 if (!p)
2150 return IO_MEM_UNASSIGNED;
2151 return p->phys_offset;
2152 }
2153
2154 /* XXX: better than nothing */
2155 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2156 {
2157 ram_addr_t addr;
2158 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2159 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2160 (uint64_t)size, (uint64_t)phys_ram_size);
2161 abort();
2162 }
2163 addr = phys_ram_alloc_offset;
2164 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2165 return addr;
2166 }
2167
2168 void qemu_ram_free(ram_addr_t addr)
2169 {
2170 }
2171
2172 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2173 {
2174 #ifdef DEBUG_UNASSIGNED
2175 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2176 #endif
2177 #ifdef TARGET_SPARC
2178 do_unassigned_access(addr, 0, 0, 0);
2179 #elif TARGET_CRIS
2180 do_unassigned_access(addr, 0, 0, 0);
2181 #endif
2182 return 0;
2183 }
2184
2185 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2186 {
2187 #ifdef DEBUG_UNASSIGNED
2188 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2189 #endif
2190 #ifdef TARGET_SPARC
2191 do_unassigned_access(addr, 1, 0, 0);
2192 #elif TARGET_CRIS
2193 do_unassigned_access(addr, 1, 0, 0);
2194 #endif
2195 }
2196
2197 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2198 unassigned_mem_readb,
2199 unassigned_mem_readb,
2200 unassigned_mem_readb,
2201 };
2202
2203 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2204 unassigned_mem_writeb,
2205 unassigned_mem_writeb,
2206 unassigned_mem_writeb,
2207 };
2208
2209 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2210 uint32_t val)
2211 {
2212 int dirty_flags;
2213 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2214 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2215 #if !defined(CONFIG_USER_ONLY)
2216 tb_invalidate_phys_page_fast(ram_addr, 1);
2217 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2218 #endif
2219 }
2220 stb_p(phys_ram_base + ram_addr, val);
2221 #ifdef USE_KQEMU
2222 if (cpu_single_env->kqemu_enabled &&
2223 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2224 kqemu_modify_page(cpu_single_env, ram_addr);
2225 #endif
2226 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2227 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2228 /* we remove the notdirty callback only if the code has been
2229 flushed */
2230 if (dirty_flags == 0xff)
2231 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2232 }
2233
2234 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2235 uint32_t val)
2236 {
2237 int dirty_flags;
2238 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2239 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2240 #if !defined(CONFIG_USER_ONLY)
2241 tb_invalidate_phys_page_fast(ram_addr, 2);
2242 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2243 #endif
2244 }
2245 stw_p(phys_ram_base + ram_addr, val);
2246 #ifdef USE_KQEMU
2247 if (cpu_single_env->kqemu_enabled &&
2248 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2249 kqemu_modify_page(cpu_single_env, ram_addr);
2250 #endif
2251 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2252 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2253 /* we remove the notdirty callback only if the code has been
2254 flushed */
2255 if (dirty_flags == 0xff)
2256 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2257 }
2258
2259 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2260 uint32_t val)
2261 {
2262 int dirty_flags;
2263 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2264 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2265 #if !defined(CONFIG_USER_ONLY)
2266 tb_invalidate_phys_page_fast(ram_addr, 4);
2267 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2268 #endif
2269 }
2270 stl_p(phys_ram_base + ram_addr, val);
2271 #ifdef USE_KQEMU
2272 if (cpu_single_env->kqemu_enabled &&
2273 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2274 kqemu_modify_page(cpu_single_env, ram_addr);
2275 #endif
2276 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2277 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2278 /* we remove the notdirty callback only if the code has been
2279 flushed */
2280 if (dirty_flags == 0xff)
2281 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2282 }
2283
2284 static CPUReadMemoryFunc *error_mem_read[3] = {
2285 NULL, /* never used */
2286 NULL, /* never used */
2287 NULL, /* never used */
2288 };
2289
2290 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2291 notdirty_mem_writeb,
2292 notdirty_mem_writew,
2293 notdirty_mem_writel,
2294 };
2295
2296 /* Generate a debug exception if a watchpoint has been hit. */
2297 static void check_watchpoint(int offset, int flags)
2298 {
2299 CPUState *env = cpu_single_env;
2300 target_ulong vaddr;
2301 int i;
2302
2303 vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2304 for (i = 0; i < env->nb_watchpoints; i++) {
2305 if (vaddr == env->watchpoint[i].vaddr
2306 && (env->watchpoint[i].type & flags)) {
2307 env->watchpoint_hit = i + 1;
2308 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2309 break;
2310 }
2311 }
2312 }
2313
2314 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2315 so these check for a hit then pass through to the normal out-of-line
2316 phys routines. */
2317 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2318 {
2319 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2320 return ldub_phys(addr);
2321 }
2322
2323 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2324 {
2325 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2326 return lduw_phys(addr);
2327 }
2328
2329 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2330 {
2331 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2332 return ldl_phys(addr);
2333 }
2334
2335 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2336 uint32_t val)
2337 {
2338 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2339 stb_phys(addr, val);
2340 }
2341
2342 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2343 uint32_t val)
2344 {
2345 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2346 stw_phys(addr, val);
2347 }
2348
2349 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2350 uint32_t val)
2351 {
2352 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2353 stl_phys(addr, val);
2354 }
2355
2356 static CPUReadMemoryFunc *watch_mem_read[3] = {
2357 watch_mem_readb,
2358 watch_mem_readw,
2359 watch_mem_readl,
2360 };
2361
2362 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2363 watch_mem_writeb,
2364 watch_mem_writew,
2365 watch_mem_writel,
2366 };
2367
2368 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2369 unsigned int len)
2370 {
2371 uint32_t ret;
2372 unsigned int idx;
2373
2374 idx = SUBPAGE_IDX(addr - mmio->base);
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2377 mmio, len, addr, idx);
2378 #endif
2379 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2380
2381 return ret;
2382 }
2383
2384 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2385 uint32_t value, unsigned int len)
2386 {
2387 unsigned int idx;
2388
2389 idx = SUBPAGE_IDX(addr - mmio->base);
2390 #if defined(DEBUG_SUBPAGE)
2391 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2392 mmio, len, addr, idx, value);
2393 #endif
2394 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2395 }
2396
2397 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2398 {
2399 #if defined(DEBUG_SUBPAGE)
2400 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2401 #endif
2402
2403 return subpage_readlen(opaque, addr, 0);
2404 }
2405
2406 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2407 uint32_t value)
2408 {
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2411 #endif
2412 subpage_writelen(opaque, addr, value, 0);
2413 }
2414
2415 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2416 {
2417 #if defined(DEBUG_SUBPAGE)
2418 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2419 #endif
2420
2421 return subpage_readlen(opaque, addr, 1);
2422 }
2423
2424 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2425 uint32_t value)
2426 {
2427 #if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2429 #endif
2430 subpage_writelen(opaque, addr, value, 1);
2431 }
2432
2433 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2434 {
2435 #if defined(DEBUG_SUBPAGE)
2436 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2437 #endif
2438
2439 return subpage_readlen(opaque, addr, 2);
2440 }
2441
2442 static void subpage_writel (void *opaque,
2443 target_phys_addr_t addr, uint32_t value)
2444 {
2445 #if defined(DEBUG_SUBPAGE)
2446 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2447 #endif
2448 subpage_writelen(opaque, addr, value, 2);
2449 }
2450
2451 static CPUReadMemoryFunc *subpage_read[] = {
2452 &subpage_readb,
2453 &subpage_readw,
2454 &subpage_readl,
2455 };
2456
2457 static CPUWriteMemoryFunc *subpage_write[] = {
2458 &subpage_writeb,
2459 &subpage_writew,
2460 &subpage_writel,
2461 };
2462
2463 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2464 ram_addr_t memory)
2465 {
2466 int idx, eidx;
2467 unsigned int i;
2468
2469 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2470 return -1;
2471 idx = SUBPAGE_IDX(start);
2472 eidx = SUBPAGE_IDX(end);
2473 #if defined(DEBUG_SUBPAGE)
2474 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2475 mmio, start, end, idx, eidx, memory);
2476 #endif
2477 memory >>= IO_MEM_SHIFT;
2478 for (; idx <= eidx; idx++) {
2479 for (i = 0; i < 4; i++) {
2480 if (io_mem_read[memory][i]) {
2481 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2482 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2483 }
2484 if (io_mem_write[memory][i]) {
2485 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2486 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2487 }
2488 }
2489 }
2490
2491 return 0;
2492 }
2493
2494 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2495 ram_addr_t orig_memory)
2496 {
2497 subpage_t *mmio;
2498 int subpage_memory;
2499
2500 mmio = qemu_mallocz(sizeof(subpage_t));
2501 if (mmio != NULL) {
2502 mmio->base = base;
2503 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2504 #if defined(DEBUG_SUBPAGE)
2505 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2506 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2507 #endif
2508 *phys = subpage_memory | IO_MEM_SUBPAGE;
2509 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2510 }
2511
2512 return mmio;
2513 }
2514
2515 static void io_mem_init(void)
2516 {
2517 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2518 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2519 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2520 io_mem_nb = 5;
2521
2522 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2523 watch_mem_write, NULL);
2524 /* alloc dirty bits array */
2525 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2526 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2527 }
2528
2529 /* mem_read and mem_write are arrays of functions containing the
2530 function to access byte (index 0), word (index 1) and dword (index
2531 2). Functions can be omitted with a NULL function pointer. The
2532 registered functions may be modified dynamically later.
2533 If io_index is non zero, the corresponding io zone is
2534 modified. If it is zero, a new io zone is allocated. The return
2535 value can be used with cpu_register_physical_memory(). (-1) is
2536 returned if error. */
2537 int cpu_register_io_memory(int io_index,
2538 CPUReadMemoryFunc **mem_read,
2539 CPUWriteMemoryFunc **mem_write,
2540 void *opaque)
2541 {
2542 int i, subwidth = 0;
2543
2544 if (io_index <= 0) {
2545 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2546 return -1;
2547 io_index = io_mem_nb++;
2548 } else {
2549 if (io_index >= IO_MEM_NB_ENTRIES)
2550 return -1;
2551 }
2552
2553 for(i = 0;i < 3; i++) {
2554 if (!mem_read[i] || !mem_write[i])
2555 subwidth = IO_MEM_SUBWIDTH;
2556 io_mem_read[io_index][i] = mem_read[i];
2557 io_mem_write[io_index][i] = mem_write[i];
2558 }
2559 io_mem_opaque[io_index] = opaque;
2560 return (io_index << IO_MEM_SHIFT) | subwidth;
2561 }
2562
2563 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2564 {
2565 return io_mem_write[io_index >> IO_MEM_SHIFT];
2566 }
2567
2568 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2569 {
2570 return io_mem_read[io_index >> IO_MEM_SHIFT];
2571 }
2572
2573 #endif /* !defined(CONFIG_USER_ONLY) */
2574
2575 /* physical memory access (slow version, mainly for debug) */
2576 #if defined(CONFIG_USER_ONLY)
2577 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2578 int len, int is_write)
2579 {
2580 int l, flags;
2581 target_ulong page;
2582 void * p;
2583
2584 while (len > 0) {
2585 page = addr & TARGET_PAGE_MASK;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
2589 flags = page_get_flags(page);
2590 if (!(flags & PAGE_VALID))
2591 return;
2592 if (is_write) {
2593 if (!(flags & PAGE_WRITE))
2594 return;
2595 /* XXX: this code should not depend on lock_user */
2596 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2597 /* FIXME - should this return an error rather than just fail? */
2598 return;
2599 memcpy(p, buf, l);
2600 unlock_user(p, addr, l);
2601 } else {
2602 if (!(flags & PAGE_READ))
2603 return;
2604 /* XXX: this code should not depend on lock_user */
2605 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2606 /* FIXME - should this return an error rather than just fail? */
2607 return;
2608 memcpy(buf, p, l);
2609 unlock_user(p, addr, 0);
2610 }
2611 len -= l;
2612 buf += l;
2613 addr += l;
2614 }
2615 }
2616
2617 #else
2618 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2619 int len, int is_write)
2620 {
2621 int l, io_index;
2622 uint8_t *ptr;
2623 uint32_t val;
2624 target_phys_addr_t page;
2625 unsigned long pd;
2626 PhysPageDesc *p;
2627
2628 while (len > 0) {
2629 page = addr & TARGET_PAGE_MASK;
2630 l = (page + TARGET_PAGE_SIZE) - addr;
2631 if (l > len)
2632 l = len;
2633 p = phys_page_find(page >> TARGET_PAGE_BITS);
2634 if (!p) {
2635 pd = IO_MEM_UNASSIGNED;
2636 } else {
2637 pd = p->phys_offset;
2638 }
2639
2640 if (is_write) {
2641 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2642 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2643 /* XXX: could force cpu_single_env to NULL to avoid
2644 potential bugs */
2645 if (l >= 4 && ((addr & 3) == 0)) {
2646 /* 32 bit write access */
2647 val = ldl_p(buf);
2648 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2649 l = 4;
2650 } else if (l >= 2 && ((addr & 1) == 0)) {
2651 /* 16 bit write access */
2652 val = lduw_p(buf);
2653 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2654 l = 2;
2655 } else {
2656 /* 8 bit write access */
2657 val = ldub_p(buf);
2658 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2659 l = 1;
2660 }
2661 } else {
2662 unsigned long addr1;
2663 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2664 /* RAM case */
2665 ptr = phys_ram_base + addr1;
2666 memcpy(ptr, buf, l);
2667 if (!cpu_physical_memory_is_dirty(addr1)) {
2668 /* invalidate code */
2669 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2670 /* set dirty bit */
2671 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2672 (0xff & ~CODE_DIRTY_FLAG);
2673 }
2674 }
2675 } else {
2676 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2677 !(pd & IO_MEM_ROMD)) {
2678 /* I/O case */
2679 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2680 if (l >= 4 && ((addr & 3) == 0)) {
2681 /* 32 bit read access */
2682 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2683 stl_p(buf, val);
2684 l = 4;
2685 } else if (l >= 2 && ((addr & 1) == 0)) {
2686 /* 16 bit read access */
2687 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2688 stw_p(buf, val);
2689 l = 2;
2690 } else {
2691 /* 8 bit read access */
2692 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2693 stb_p(buf, val);
2694 l = 1;
2695 }
2696 } else {
2697 /* RAM case */
2698 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2699 (addr & ~TARGET_PAGE_MASK);
2700 memcpy(buf, ptr, l);
2701 }
2702 }
2703 len -= l;
2704 buf += l;
2705 addr += l;
2706 }
2707 }
2708
2709 /* used for ROM loading : can write in RAM and ROM */
2710 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2711 const uint8_t *buf, int len)
2712 {
2713 int l;
2714 uint8_t *ptr;
2715 target_phys_addr_t page;
2716 unsigned long pd;
2717 PhysPageDesc *p;
2718
2719 while (len > 0) {
2720 page = addr & TARGET_PAGE_MASK;
2721 l = (page + TARGET_PAGE_SIZE) - addr;
2722 if (l > len)
2723 l = len;
2724 p = phys_page_find(page >> TARGET_PAGE_BITS);
2725 if (!p) {
2726 pd = IO_MEM_UNASSIGNED;
2727 } else {
2728 pd = p->phys_offset;
2729 }
2730
2731 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2732 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2733 !(pd & IO_MEM_ROMD)) {
2734 /* do nothing */
2735 } else {
2736 unsigned long addr1;
2737 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2738 /* ROM/RAM case */
2739 ptr = phys_ram_base + addr1;
2740 memcpy(ptr, buf, l);
2741 }
2742 len -= l;
2743 buf += l;
2744 addr += l;
2745 }
2746 }
2747
2748
2749 /* warning: addr must be aligned */
2750 uint32_t ldl_phys(target_phys_addr_t addr)
2751 {
2752 int io_index;
2753 uint8_t *ptr;
2754 uint32_t val;
2755 unsigned long pd;
2756 PhysPageDesc *p;
2757
2758 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2759 if (!p) {
2760 pd = IO_MEM_UNASSIGNED;
2761 } else {
2762 pd = p->phys_offset;
2763 }
2764
2765 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2766 !(pd & IO_MEM_ROMD)) {
2767 /* I/O case */
2768 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2769 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2770 } else {
2771 /* RAM case */
2772 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2773 (addr & ~TARGET_PAGE_MASK);
2774 val = ldl_p(ptr);
2775 }
2776 return val;
2777 }
2778
2779 /* warning: addr must be aligned */
2780 uint64_t ldq_phys(target_phys_addr_t addr)
2781 {
2782 int io_index;
2783 uint8_t *ptr;
2784 uint64_t val;
2785 unsigned long pd;
2786 PhysPageDesc *p;
2787
2788 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2789 if (!p) {
2790 pd = IO_MEM_UNASSIGNED;
2791 } else {
2792 pd = p->phys_offset;
2793 }
2794
2795 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2796 !(pd & IO_MEM_ROMD)) {
2797 /* I/O case */
2798 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2799 #ifdef TARGET_WORDS_BIGENDIAN
2800 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2801 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2802 #else
2803 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2804 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2805 #endif
2806 } else {
2807 /* RAM case */
2808 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2809 (addr & ~TARGET_PAGE_MASK);
2810 val = ldq_p(ptr);
2811 }
2812 return val;
2813 }
2814
2815 /* XXX: optimize */
2816 uint32_t ldub_phys(target_phys_addr_t addr)
2817 {
2818 uint8_t val;
2819 cpu_physical_memory_read(addr, &val, 1);
2820 return val;
2821 }
2822
2823 /* XXX: optimize */
2824 uint32_t lduw_phys(target_phys_addr_t addr)
2825 {
2826 uint16_t val;
2827 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2828 return tswap16(val);
2829 }
2830
2831 /* warning: addr must be aligned. The ram page is not masked as dirty
2832 and the code inside is not invalidated. It is useful if the dirty
2833 bits are used to track modified PTEs */
2834 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2835 {
2836 int io_index;
2837 uint8_t *ptr;
2838 unsigned long pd;
2839 PhysPageDesc *p;
2840
2841 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2842 if (!p) {
2843 pd = IO_MEM_UNASSIGNED;
2844 } else {
2845 pd = p->phys_offset;
2846 }
2847
2848 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2849 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2850 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2851 } else {
2852 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2853 (addr & ~TARGET_PAGE_MASK);
2854 stl_p(ptr, val);
2855 }
2856 }
2857
2858 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2859 {
2860 int io_index;
2861 uint8_t *ptr;
2862 unsigned long pd;
2863 PhysPageDesc *p;
2864
2865 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2866 if (!p) {
2867 pd = IO_MEM_UNASSIGNED;
2868 } else {
2869 pd = p->phys_offset;
2870 }
2871
2872 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2873 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2874 #ifdef TARGET_WORDS_BIGENDIAN
2875 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2876 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2877 #else
2878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2879 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2880 #endif
2881 } else {
2882 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2883 (addr & ~TARGET_PAGE_MASK);
2884 stq_p(ptr, val);
2885 }
2886 }
2887
2888 /* warning: addr must be aligned */
2889 void stl_phys(target_phys_addr_t addr, uint32_t val)
2890 {
2891 int io_index;
2892 uint8_t *ptr;
2893 unsigned long pd;
2894 PhysPageDesc *p;
2895
2896 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2897 if (!p) {
2898 pd = IO_MEM_UNASSIGNED;
2899 } else {
2900 pd = p->phys_offset;
2901 }
2902
2903 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2904 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2905 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2906 } else {
2907 unsigned long addr1;
2908 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2909 /* RAM case */
2910 ptr = phys_ram_base + addr1;
2911 stl_p(ptr, val);
2912 if (!cpu_physical_memory_is_dirty(addr1)) {
2913 /* invalidate code */
2914 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2915 /* set dirty bit */
2916 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2917 (0xff & ~CODE_DIRTY_FLAG);
2918 }
2919 }
2920 }
2921
2922 /* XXX: optimize */
2923 void stb_phys(target_phys_addr_t addr, uint32_t val)
2924 {
2925 uint8_t v = val;
2926 cpu_physical_memory_write(addr, &v, 1);
2927 }
2928
2929 /* XXX: optimize */
2930 void stw_phys(target_phys_addr_t addr, uint32_t val)
2931 {
2932 uint16_t v = tswap16(val);
2933 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2934 }
2935
2936 /* XXX: optimize */
2937 void stq_phys(target_phys_addr_t addr, uint64_t val)
2938 {
2939 val = tswap64(val);
2940 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2941 }
2942
2943 #endif
2944
2945 /* virtual memory access for debug */
2946 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2947 uint8_t *buf, int len, int is_write)
2948 {
2949 int l;
2950 target_phys_addr_t phys_addr;
2951 target_ulong page;
2952
2953 while (len > 0) {
2954 page = addr & TARGET_PAGE_MASK;
2955 phys_addr = cpu_get_phys_page_debug(env, page);
2956 /* if no physical page mapped, return an error */
2957 if (phys_addr == -1)
2958 return -1;
2959 l = (page + TARGET_PAGE_SIZE) - addr;
2960 if (l > len)
2961 l = len;
2962 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2963 buf, l, is_write);
2964 len -= l;
2965 buf += l;
2966 addr += l;
2967 }
2968 return 0;
2969 }
2970
2971 void dump_exec_info(FILE *f,
2972 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2973 {
2974 int i, target_code_size, max_target_code_size;
2975 int direct_jmp_count, direct_jmp2_count, cross_page;
2976 TranslationBlock *tb;
2977
2978 target_code_size = 0;
2979 max_target_code_size = 0;
2980 cross_page = 0;
2981 direct_jmp_count = 0;
2982 direct_jmp2_count = 0;
2983 for(i = 0; i < nb_tbs; i++) {
2984 tb = &tbs[i];
2985 target_code_size += tb->size;
2986 if (tb->size > max_target_code_size)
2987 max_target_code_size = tb->size;
2988 if (tb->page_addr[1] != -1)
2989 cross_page++;
2990 if (tb->tb_next_offset[0] != 0xffff) {
2991 direct_jmp_count++;
2992 if (tb->tb_next_offset[1] != 0xffff) {
2993 direct_jmp2_count++;
2994 }
2995 }
2996 }
2997 /* XXX: avoid using doubles ? */
2998 cpu_fprintf(f, "Translation buffer state:\n");
2999 cpu_fprintf(f, "gen code size %ld/%ld\n",
3000 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3001 cpu_fprintf(f, "TB count %d/%d\n",
3002 nb_tbs, code_gen_max_blocks);
3003 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3004 nb_tbs ? target_code_size / nb_tbs : 0,
3005 max_target_code_size);
3006 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3007 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3008 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3009 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3010 cross_page,
3011 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3012 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3013 direct_jmp_count,
3014 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3015 direct_jmp2_count,
3016 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3017 cpu_fprintf(f, "\nStatistics:\n");
3018 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3019 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3020 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3021 tcg_dump_info(f, cpu_fprintf);
3022 }
3023
3024 #if !defined(CONFIG_USER_ONLY)
3025
3026 #define MMUSUFFIX _cmmu
3027 #define GETPC() NULL
3028 #define env cpu_single_env
3029 #define SOFTMMU_CODE_ACCESS
3030
3031 #define SHIFT 0
3032 #include "softmmu_template.h"
3033
3034 #define SHIFT 1
3035 #include "softmmu_template.h"
3036
3037 #define SHIFT 2
3038 #include "softmmu_template.h"
3039
3040 #define SHIFT 3
3041 #include "softmmu_template.h"
3042
3043 #undef env
3044
3045 #endif