]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
Allow 5 mmu indexes.
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
38 #include "tcg.h"
39 #include "hw/hw.h"
40 #include "osdep.h"
41 #include "kvm.h"
42 #if defined(CONFIG_USER_ONLY)
43 #include <qemu.h>
44 #endif
45
46 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_FLUSH
48 //#define DEBUG_TLB
49 //#define DEBUG_UNASSIGNED
50
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
54
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
57
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
60 #undef DEBUG_TB_CHECK
61 #endif
62
63 #define SMC_BITMAP_USE_THRESHOLD 10
64
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
78 #else
79 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80 #define TARGET_PHYS_ADDR_SPACE_BITS 32
81 #endif
82
83 static TranslationBlock *tbs;
84 int code_gen_max_blocks;
85 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86 static int nb_tbs;
87 /* any access to the tbs or the page table must use this lock */
88 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89
90 #if defined(__arm__) || defined(__sparc_v9__)
91 /* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
93 section close to code segment. */
94 #define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
97 #else
98 #define code_gen_section \
99 __attribute__((aligned (32)))
100 #endif
101
102 uint8_t code_gen_prologue[1024] code_gen_section;
103 static uint8_t *code_gen_buffer;
104 static unsigned long code_gen_buffer_size;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size;
107 uint8_t *code_gen_ptr;
108
109 #if !defined(CONFIG_USER_ONLY)
110 ram_addr_t phys_ram_size;
111 int phys_ram_fd;
112 uint8_t *phys_ram_base;
113 uint8_t *phys_ram_dirty;
114 static int in_migration;
115 static ram_addr_t phys_ram_alloc_offset = 0;
116 #endif
117
118 CPUState *first_cpu;
119 /* current CPU in the current thread. It is only valid inside
120 cpu_exec() */
121 CPUState *cpu_single_env;
122 /* 0 = Do not count executed instructions.
123 1 = Precise instruction counting.
124 2 = Adaptive rate instruction counting. */
125 int use_icount = 0;
126 /* Current instruction counter. While executing translated code this may
127 include some instructions that have not yet been executed. */
128 int64_t qemu_icount;
129
130 typedef struct PageDesc {
131 /* list of TBs intersecting this ram page */
132 TranslationBlock *first_tb;
133 /* in order to optimize self modifying code, we count the number
134 of lookups we do to a given page to use a bitmap */
135 unsigned int code_write_count;
136 uint8_t *code_bitmap;
137 #if defined(CONFIG_USER_ONLY)
138 unsigned long flags;
139 #endif
140 } PageDesc;
141
142 typedef struct PhysPageDesc {
143 /* offset in host memory of the page + io_index in the low bits */
144 ram_addr_t phys_offset;
145 ram_addr_t region_offset;
146 } PhysPageDesc;
147
148 #define L2_BITS 10
149 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150 /* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
153 */
154 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155 #else
156 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157 #endif
158
159 #define L1_SIZE (1 << L1_BITS)
160 #define L2_SIZE (1 << L2_BITS)
161
162 unsigned long qemu_real_host_page_size;
163 unsigned long qemu_host_page_bits;
164 unsigned long qemu_host_page_size;
165 unsigned long qemu_host_page_mask;
166
167 /* XXX: for system emulation, it could just be an array */
168 static PageDesc *l1_map[L1_SIZE];
169 static PhysPageDesc **l1_phys_map;
170
171 #if !defined(CONFIG_USER_ONLY)
172 static void io_mem_init(void);
173
174 /* io memory support */
175 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178 static char io_mem_used[IO_MEM_NB_ENTRIES];
179 static int io_mem_watch;
180 #endif
181
182 /* log support */
183 static const char *logfilename = "/tmp/qemu.log";
184 FILE *logfile;
185 int loglevel;
186 static int log_append = 0;
187
188 /* statistics */
189 static int tlb_flush_count;
190 static int tb_flush_count;
191 static int tb_phys_invalidate_count;
192
193 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 typedef struct subpage_t {
195 target_phys_addr_t base;
196 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 void *opaque[TARGET_PAGE_SIZE][2][4];
199 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
200 } subpage_t;
201
202 #ifdef _WIN32
203 static void map_exec(void *addr, long size)
204 {
205 DWORD old_protect;
206 VirtualProtect(addr, size,
207 PAGE_EXECUTE_READWRITE, &old_protect);
208
209 }
210 #else
211 static void map_exec(void *addr, long size)
212 {
213 unsigned long start, end, page_size;
214
215 page_size = getpagesize();
216 start = (unsigned long)addr;
217 start &= ~(page_size - 1);
218
219 end = (unsigned long)addr + size;
220 end += page_size - 1;
221 end &= ~(page_size - 1);
222
223 mprotect((void *)start, end - start,
224 PROT_READ | PROT_WRITE | PROT_EXEC);
225 }
226 #endif
227
228 static void page_init(void)
229 {
230 /* NOTE: we can always suppose that qemu_host_page_size >=
231 TARGET_PAGE_SIZE */
232 #ifdef _WIN32
233 {
234 SYSTEM_INFO system_info;
235
236 GetSystemInfo(&system_info);
237 qemu_real_host_page_size = system_info.dwPageSize;
238 }
239 #else
240 qemu_real_host_page_size = getpagesize();
241 #endif
242 if (qemu_host_page_size == 0)
243 qemu_host_page_size = qemu_real_host_page_size;
244 if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 qemu_host_page_size = TARGET_PAGE_SIZE;
246 qemu_host_page_bits = 0;
247 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 qemu_host_page_bits++;
249 qemu_host_page_mask = ~(qemu_host_page_size - 1);
250 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252
253 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 {
255 long long startaddr, endaddr;
256 FILE *f;
257 int n;
258
259 mmap_lock();
260 last_brk = (unsigned long)sbrk(0);
261 f = fopen("/proc/self/maps", "r");
262 if (f) {
263 do {
264 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 if (n == 2) {
266 startaddr = MIN(startaddr,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 endaddr = MIN(endaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 page_set_flags(startaddr & TARGET_PAGE_MASK,
271 TARGET_PAGE_ALIGN(endaddr),
272 PAGE_RESERVED);
273 }
274 } while (!feof(f));
275 fclose(f);
276 }
277 mmap_unlock();
278 }
279 #endif
280 }
281
282 static inline PageDesc **page_l1_map(target_ulong index)
283 {
284 #if TARGET_LONG_BITS > 32
285 /* Host memory outside guest VM. For 32-bit targets we have already
286 excluded high addresses. */
287 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288 return NULL;
289 #endif
290 return &l1_map[index >> L2_BITS];
291 }
292
293 static inline PageDesc *page_find_alloc(target_ulong index)
294 {
295 PageDesc **lp, *p;
296 lp = page_l1_map(index);
297 if (!lp)
298 return NULL;
299
300 p = *lp;
301 if (!p) {
302 /* allocate if not found */
303 #if defined(CONFIG_USER_ONLY)
304 size_t len = sizeof(PageDesc) * L2_SIZE;
305 /* Don't use qemu_malloc because it may recurse. */
306 p = mmap(0, len, PROT_READ | PROT_WRITE,
307 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
308 *lp = p;
309 if (h2g_valid(p)) {
310 unsigned long addr = h2g(p);
311 page_set_flags(addr & TARGET_PAGE_MASK,
312 TARGET_PAGE_ALIGN(addr + len),
313 PAGE_RESERVED);
314 }
315 #else
316 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
317 *lp = p;
318 #endif
319 }
320 return p + (index & (L2_SIZE - 1));
321 }
322
323 static inline PageDesc *page_find(target_ulong index)
324 {
325 PageDesc **lp, *p;
326 lp = page_l1_map(index);
327 if (!lp)
328 return NULL;
329
330 p = *lp;
331 if (!p)
332 return 0;
333 return p + (index & (L2_SIZE - 1));
334 }
335
336 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
337 {
338 void **lp, **p;
339 PhysPageDesc *pd;
340
341 p = (void **)l1_phys_map;
342 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
343
344 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
346 #endif
347 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
348 p = *lp;
349 if (!p) {
350 /* allocate if not found */
351 if (!alloc)
352 return NULL;
353 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354 memset(p, 0, sizeof(void *) * L1_SIZE);
355 *lp = p;
356 }
357 #endif
358 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
359 pd = *lp;
360 if (!pd) {
361 int i;
362 /* allocate if not found */
363 if (!alloc)
364 return NULL;
365 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
366 *lp = pd;
367 for (i = 0; i < L2_SIZE; i++) {
368 pd[i].phys_offset = IO_MEM_UNASSIGNED;
369 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
370 }
371 }
372 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
373 }
374
375 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
376 {
377 return phys_page_find_alloc(index, 0);
378 }
379
380 #if !defined(CONFIG_USER_ONLY)
381 static void tlb_protect_code(ram_addr_t ram_addr);
382 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
383 target_ulong vaddr);
384 #define mmap_lock() do { } while(0)
385 #define mmap_unlock() do { } while(0)
386 #endif
387
388 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
389
390 #if defined(CONFIG_USER_ONLY)
391 /* Currently it is not recommanded to allocate big chunks of data in
392 user mode. It will change when a dedicated libc will be used */
393 #define USE_STATIC_CODE_GEN_BUFFER
394 #endif
395
396 #ifdef USE_STATIC_CODE_GEN_BUFFER
397 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
398 #endif
399
400 static void code_gen_alloc(unsigned long tb_size)
401 {
402 #ifdef USE_STATIC_CODE_GEN_BUFFER
403 code_gen_buffer = static_code_gen_buffer;
404 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405 map_exec(code_gen_buffer, code_gen_buffer_size);
406 #else
407 code_gen_buffer_size = tb_size;
408 if (code_gen_buffer_size == 0) {
409 #if defined(CONFIG_USER_ONLY)
410 /* in user mode, phys_ram_size is not meaningful */
411 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
412 #else
413 /* XXX: needs ajustments */
414 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
415 #endif
416 }
417 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419 /* The code gen buffer location may have constraints depending on
420 the host cpu and OS */
421 #if defined(__linux__)
422 {
423 int flags;
424 void *start = NULL;
425
426 flags = MAP_PRIVATE | MAP_ANONYMOUS;
427 #if defined(__x86_64__)
428 flags |= MAP_32BIT;
429 /* Cannot map more than that */
430 if (code_gen_buffer_size > (800 * 1024 * 1024))
431 code_gen_buffer_size = (800 * 1024 * 1024);
432 #elif defined(__sparc_v9__)
433 // Map the buffer below 2G, so we can use direct calls and branches
434 flags |= MAP_FIXED;
435 start = (void *) 0x60000000UL;
436 if (code_gen_buffer_size > (512 * 1024 * 1024))
437 code_gen_buffer_size = (512 * 1024 * 1024);
438 #elif defined(__arm__)
439 /* Map the buffer below 32M, so we can use direct calls and branches */
440 flags |= MAP_FIXED;
441 start = (void *) 0x01000000UL;
442 if (code_gen_buffer_size > 16 * 1024 * 1024)
443 code_gen_buffer_size = 16 * 1024 * 1024;
444 #endif
445 code_gen_buffer = mmap(start, code_gen_buffer_size,
446 PROT_WRITE | PROT_READ | PROT_EXEC,
447 flags, -1, 0);
448 if (code_gen_buffer == MAP_FAILED) {
449 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450 exit(1);
451 }
452 }
453 #elif defined(__FreeBSD__) || defined(__DragonFly__)
454 {
455 int flags;
456 void *addr = NULL;
457 flags = MAP_PRIVATE | MAP_ANONYMOUS;
458 #if defined(__x86_64__)
459 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460 * 0x40000000 is free */
461 flags |= MAP_FIXED;
462 addr = (void *)0x40000000;
463 /* Cannot map more than that */
464 if (code_gen_buffer_size > (800 * 1024 * 1024))
465 code_gen_buffer_size = (800 * 1024 * 1024);
466 #endif
467 code_gen_buffer = mmap(addr, code_gen_buffer_size,
468 PROT_WRITE | PROT_READ | PROT_EXEC,
469 flags, -1, 0);
470 if (code_gen_buffer == MAP_FAILED) {
471 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472 exit(1);
473 }
474 }
475 #else
476 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
477 map_exec(code_gen_buffer, code_gen_buffer_size);
478 #endif
479 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
480 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481 code_gen_buffer_max_size = code_gen_buffer_size -
482 code_gen_max_block_size();
483 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485 }
486
487 /* Must be called before using the QEMU cpus. 'tb_size' is the size
488 (in bytes) allocated to the translation buffer. Zero means default
489 size. */
490 void cpu_exec_init_all(unsigned long tb_size)
491 {
492 cpu_gen_init();
493 code_gen_alloc(tb_size);
494 code_gen_ptr = code_gen_buffer;
495 page_init();
496 #if !defined(CONFIG_USER_ONLY)
497 io_mem_init();
498 #endif
499 }
500
501 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502
503 #define CPU_COMMON_SAVE_VERSION 1
504
505 static void cpu_common_save(QEMUFile *f, void *opaque)
506 {
507 CPUState *env = opaque;
508
509 qemu_put_be32s(f, &env->halted);
510 qemu_put_be32s(f, &env->interrupt_request);
511 }
512
513 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514 {
515 CPUState *env = opaque;
516
517 if (version_id != CPU_COMMON_SAVE_VERSION)
518 return -EINVAL;
519
520 qemu_get_be32s(f, &env->halted);
521 qemu_get_be32s(f, &env->interrupt_request);
522 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523 version_id is increased. */
524 env->interrupt_request &= ~0x01;
525 tlb_flush(env, 1);
526
527 return 0;
528 }
529 #endif
530
531 void cpu_exec_init(CPUState *env)
532 {
533 CPUState **penv;
534 int cpu_index;
535
536 #if defined(CONFIG_USER_ONLY)
537 cpu_list_lock();
538 #endif
539 env->next_cpu = NULL;
540 penv = &first_cpu;
541 cpu_index = 0;
542 while (*penv != NULL) {
543 penv = (CPUState **)&(*penv)->next_cpu;
544 cpu_index++;
545 }
546 env->cpu_index = cpu_index;
547 TAILQ_INIT(&env->breakpoints);
548 TAILQ_INIT(&env->watchpoints);
549 *penv = env;
550 #if defined(CONFIG_USER_ONLY)
551 cpu_list_unlock();
552 #endif
553 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555 cpu_common_save, cpu_common_load, env);
556 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557 cpu_save, cpu_load, env);
558 #endif
559 }
560
561 static inline void invalidate_page_bitmap(PageDesc *p)
562 {
563 if (p->code_bitmap) {
564 qemu_free(p->code_bitmap);
565 p->code_bitmap = NULL;
566 }
567 p->code_write_count = 0;
568 }
569
570 /* set to NULL all the 'first_tb' fields in all PageDescs */
571 static void page_flush_tb(void)
572 {
573 int i, j;
574 PageDesc *p;
575
576 for(i = 0; i < L1_SIZE; i++) {
577 p = l1_map[i];
578 if (p) {
579 for(j = 0; j < L2_SIZE; j++) {
580 p->first_tb = NULL;
581 invalidate_page_bitmap(p);
582 p++;
583 }
584 }
585 }
586 }
587
588 /* flush all the translation blocks */
589 /* XXX: tb_flush is currently not thread safe */
590 void tb_flush(CPUState *env1)
591 {
592 CPUState *env;
593 #if defined(DEBUG_FLUSH)
594 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595 (unsigned long)(code_gen_ptr - code_gen_buffer),
596 nb_tbs, nb_tbs > 0 ?
597 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
598 #endif
599 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
600 cpu_abort(env1, "Internal error: code buffer overflow\n");
601
602 nb_tbs = 0;
603
604 for(env = first_cpu; env != NULL; env = env->next_cpu) {
605 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606 }
607
608 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
609 page_flush_tb();
610
611 code_gen_ptr = code_gen_buffer;
612 /* XXX: flush processor icache at this point if cache flush is
613 expensive */
614 tb_flush_count++;
615 }
616
617 #ifdef DEBUG_TB_CHECK
618
619 static void tb_invalidate_check(target_ulong address)
620 {
621 TranslationBlock *tb;
622 int i;
623 address &= TARGET_PAGE_MASK;
624 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
626 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627 address >= tb->pc + tb->size)) {
628 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
629 address, (long)tb->pc, tb->size);
630 }
631 }
632 }
633 }
634
635 /* verify that all the pages have correct rights for code */
636 static void tb_page_check(void)
637 {
638 TranslationBlock *tb;
639 int i, flags1, flags2;
640
641 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643 flags1 = page_get_flags(tb->pc);
644 flags2 = page_get_flags(tb->pc + tb->size - 1);
645 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
647 (long)tb->pc, tb->size, flags1, flags2);
648 }
649 }
650 }
651 }
652
653 static void tb_jmp_check(TranslationBlock *tb)
654 {
655 TranslationBlock *tb1;
656 unsigned int n1;
657
658 /* suppress any remaining jumps to this TB */
659 tb1 = tb->jmp_first;
660 for(;;) {
661 n1 = (long)tb1 & 3;
662 tb1 = (TranslationBlock *)((long)tb1 & ~3);
663 if (n1 == 2)
664 break;
665 tb1 = tb1->jmp_next[n1];
666 }
667 /* check end of list */
668 if (tb1 != tb) {
669 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
670 }
671 }
672
673 #endif
674
675 /* invalidate one TB */
676 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
677 int next_offset)
678 {
679 TranslationBlock *tb1;
680 for(;;) {
681 tb1 = *ptb;
682 if (tb1 == tb) {
683 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
684 break;
685 }
686 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
687 }
688 }
689
690 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
691 {
692 TranslationBlock *tb1;
693 unsigned int n1;
694
695 for(;;) {
696 tb1 = *ptb;
697 n1 = (long)tb1 & 3;
698 tb1 = (TranslationBlock *)((long)tb1 & ~3);
699 if (tb1 == tb) {
700 *ptb = tb1->page_next[n1];
701 break;
702 }
703 ptb = &tb1->page_next[n1];
704 }
705 }
706
707 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
708 {
709 TranslationBlock *tb1, **ptb;
710 unsigned int n1;
711
712 ptb = &tb->jmp_next[n];
713 tb1 = *ptb;
714 if (tb1) {
715 /* find tb(n) in circular list */
716 for(;;) {
717 tb1 = *ptb;
718 n1 = (long)tb1 & 3;
719 tb1 = (TranslationBlock *)((long)tb1 & ~3);
720 if (n1 == n && tb1 == tb)
721 break;
722 if (n1 == 2) {
723 ptb = &tb1->jmp_first;
724 } else {
725 ptb = &tb1->jmp_next[n1];
726 }
727 }
728 /* now we can suppress tb(n) from the list */
729 *ptb = tb->jmp_next[n];
730
731 tb->jmp_next[n] = NULL;
732 }
733 }
734
735 /* reset the jump entry 'n' of a TB so that it is not chained to
736 another TB */
737 static inline void tb_reset_jump(TranslationBlock *tb, int n)
738 {
739 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740 }
741
742 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
743 {
744 CPUState *env;
745 PageDesc *p;
746 unsigned int h, n1;
747 target_phys_addr_t phys_pc;
748 TranslationBlock *tb1, *tb2;
749
750 /* remove the TB from the hash list */
751 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 h = tb_phys_hash_func(phys_pc);
753 tb_remove(&tb_phys_hash[h], tb,
754 offsetof(TranslationBlock, phys_hash_next));
755
756 /* remove the TB from the page list */
757 if (tb->page_addr[0] != page_addr) {
758 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759 tb_page_remove(&p->first_tb, tb);
760 invalidate_page_bitmap(p);
761 }
762 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764 tb_page_remove(&p->first_tb, tb);
765 invalidate_page_bitmap(p);
766 }
767
768 tb_invalidated_flag = 1;
769
770 /* remove the TB from the hash list */
771 h = tb_jmp_cache_hash_func(tb->pc);
772 for(env = first_cpu; env != NULL; env = env->next_cpu) {
773 if (env->tb_jmp_cache[h] == tb)
774 env->tb_jmp_cache[h] = NULL;
775 }
776
777 /* suppress this TB from the two jump lists */
778 tb_jmp_remove(tb, 0);
779 tb_jmp_remove(tb, 1);
780
781 /* suppress any remaining jumps to this TB */
782 tb1 = tb->jmp_first;
783 for(;;) {
784 n1 = (long)tb1 & 3;
785 if (n1 == 2)
786 break;
787 tb1 = (TranslationBlock *)((long)tb1 & ~3);
788 tb2 = tb1->jmp_next[n1];
789 tb_reset_jump(tb1, n1);
790 tb1->jmp_next[n1] = NULL;
791 tb1 = tb2;
792 }
793 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
794
795 tb_phys_invalidate_count++;
796 }
797
798 static inline void set_bits(uint8_t *tab, int start, int len)
799 {
800 int end, mask, end1;
801
802 end = start + len;
803 tab += start >> 3;
804 mask = 0xff << (start & 7);
805 if ((start & ~7) == (end & ~7)) {
806 if (start < end) {
807 mask &= ~(0xff << (end & 7));
808 *tab |= mask;
809 }
810 } else {
811 *tab++ |= mask;
812 start = (start + 8) & ~7;
813 end1 = end & ~7;
814 while (start < end1) {
815 *tab++ = 0xff;
816 start += 8;
817 }
818 if (start < end) {
819 mask = ~(0xff << (end & 7));
820 *tab |= mask;
821 }
822 }
823 }
824
825 static void build_page_bitmap(PageDesc *p)
826 {
827 int n, tb_start, tb_end;
828 TranslationBlock *tb;
829
830 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
831
832 tb = p->first_tb;
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
844 } else {
845 tb_start = 0;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847 }
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
850 }
851 }
852
853 TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
856 {
857 TranslationBlock *tb;
858 uint8_t *tc_ptr;
859 target_ulong phys_pc, phys_page2, virt_page2;
860 int code_gen_size;
861
862 phys_pc = get_phys_addr_code(env, pc);
863 tb = tb_alloc(pc);
864 if (!tb) {
865 /* flush must be done */
866 tb_flush(env);
867 /* cannot fail at this point */
868 tb = tb_alloc(pc);
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
871 }
872 tc_ptr = code_gen_ptr;
873 tb->tc_ptr = tc_ptr;
874 tb->cs_base = cs_base;
875 tb->flags = flags;
876 tb->cflags = cflags;
877 cpu_gen_code(env, tb, &code_gen_size);
878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
879
880 /* check next page if needed */
881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882 phys_page2 = -1;
883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884 phys_page2 = get_phys_addr_code(env, virt_page2);
885 }
886 tb_link_phys(tb, phys_pc, phys_page2);
887 return tb;
888 }
889
890 /* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
895 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896 int is_cpu_write_access)
897 {
898 TranslationBlock *tb, *tb_next, *saved_tb;
899 CPUState *env = cpu_single_env;
900 target_ulong tb_start, tb_end;
901 PageDesc *p;
902 int n;
903 #ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910 #endif /* TARGET_HAS_PRECISE_SMC */
911
912 p = page_find(start >> TARGET_PAGE_BITS);
913 if (!p)
914 return;
915 if (!p->code_bitmap &&
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
918 /* build code bitmap */
919 build_page_bitmap(p);
920 }
921
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
924 tb = p->first_tb;
925 while (tb != NULL) {
926 n = (long)tb & 3;
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
930 if (n == 0) {
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
935 } else {
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 }
939 if (!(tb_end <= start || tb_start >= end)) {
940 #ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
943 current_tb = NULL;
944 if (env->mem_io_pc) {
945 /* now we have a real cpu fault */
946 current_tb = tb_find_pc(env->mem_io_pc);
947 }
948 }
949 if (current_tb == tb &&
950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
956
957 current_tb_modified = 1;
958 cpu_restore_state(current_tb, env,
959 env->mem_io_pc, NULL);
960 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 &current_flags);
962 }
963 #endif /* TARGET_HAS_PRECISE_SMC */
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
966 saved_tb = NULL;
967 if (env) {
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
970 }
971 tb_phys_invalidate(tb, -1);
972 if (env) {
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
976 }
977 }
978 tb = tb_next;
979 }
980 #if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
982 if (!p->first_tb) {
983 invalidate_page_bitmap(p);
984 if (is_cpu_write_access) {
985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
986 }
987 }
988 #endif
989 #ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
993 itself */
994 env->current_tb = NULL;
995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996 cpu_resume_from_signal(env, NULL);
997 }
998 #endif
999 }
1000
1001 /* len must be <= 8 and start must be a multiple of len */
1002 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1003 {
1004 PageDesc *p;
1005 int offset, b;
1006 #if 0
1007 if (1) {
1008 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009 cpu_single_env->mem_io_vaddr, len,
1010 cpu_single_env->eip,
1011 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1012 }
1013 #endif
1014 p = page_find(start >> TARGET_PAGE_BITS);
1015 if (!p)
1016 return;
1017 if (p->code_bitmap) {
1018 offset = start & ~TARGET_PAGE_MASK;
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 if (b & ((1 << len) - 1))
1021 goto do_invalidate;
1022 } else {
1023 do_invalidate:
1024 tb_invalidate_phys_page_range(start, start + len, 1);
1025 }
1026 }
1027
1028 #if !defined(CONFIG_SOFTMMU)
1029 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030 unsigned long pc, void *puc)
1031 {
1032 TranslationBlock *tb;
1033 PageDesc *p;
1034 int n;
1035 #ifdef TARGET_HAS_PRECISE_SMC
1036 TranslationBlock *current_tb = NULL;
1037 CPUState *env = cpu_single_env;
1038 int current_tb_modified = 0;
1039 target_ulong current_pc = 0;
1040 target_ulong current_cs_base = 0;
1041 int current_flags = 0;
1042 #endif
1043
1044 addr &= TARGET_PAGE_MASK;
1045 p = page_find(addr >> TARGET_PAGE_BITS);
1046 if (!p)
1047 return;
1048 tb = p->first_tb;
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1052 }
1053 #endif
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057 #ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
1065
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
1068 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069 &current_flags);
1070 }
1071 #endif /* TARGET_HAS_PRECISE_SMC */
1072 tb_phys_invalidate(tb, addr);
1073 tb = tb->page_next[n];
1074 }
1075 p->first_tb = NULL;
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (current_tb_modified) {
1078 /* we generate a block containing just the instruction
1079 modifying the memory. It will ensure that it cannot modify
1080 itself */
1081 env->current_tb = NULL;
1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083 cpu_resume_from_signal(env, puc);
1084 }
1085 #endif
1086 }
1087 #endif
1088
1089 /* add the tb in the target page and protect it if necessary */
1090 static inline void tb_alloc_page(TranslationBlock *tb,
1091 unsigned int n, target_ulong page_addr)
1092 {
1093 PageDesc *p;
1094 TranslationBlock *last_first_tb;
1095
1096 tb->page_addr[n] = page_addr;
1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098 tb->page_next[n] = p->first_tb;
1099 last_first_tb = p->first_tb;
1100 p->first_tb = (TranslationBlock *)((long)tb | n);
1101 invalidate_page_bitmap(p);
1102
1103 #if defined(TARGET_HAS_SMC) || 1
1104
1105 #if defined(CONFIG_USER_ONLY)
1106 if (p->flags & PAGE_WRITE) {
1107 target_ulong addr;
1108 PageDesc *p2;
1109 int prot;
1110
1111 /* force the host page as non writable (writes will have a
1112 page fault + mprotect overhead) */
1113 page_addr &= qemu_host_page_mask;
1114 prot = 0;
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 addr += TARGET_PAGE_SIZE) {
1117
1118 p2 = page_find (addr >> TARGET_PAGE_BITS);
1119 if (!p2)
1120 continue;
1121 prot |= p2->flags;
1122 p2->flags &= ~PAGE_WRITE;
1123 page_get_flags(addr);
1124 }
1125 mprotect(g2h(page_addr), qemu_host_page_size,
1126 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127 #ifdef DEBUG_TB_INVALIDATE
1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1129 page_addr);
1130 #endif
1131 }
1132 #else
1133 /* if some code is already present, then the pages are already
1134 protected. So we handle the case where only the first TB is
1135 allocated in a physical page */
1136 if (!last_first_tb) {
1137 tlb_protect_code(page_addr);
1138 }
1139 #endif
1140
1141 #endif /* TARGET_HAS_SMC */
1142 }
1143
1144 /* Allocate a new translation block. Flush the translation buffer if
1145 too many translation blocks or too much generated code. */
1146 TranslationBlock *tb_alloc(target_ulong pc)
1147 {
1148 TranslationBlock *tb;
1149
1150 if (nb_tbs >= code_gen_max_blocks ||
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1152 return NULL;
1153 tb = &tbs[nb_tbs++];
1154 tb->pc = pc;
1155 tb->cflags = 0;
1156 return tb;
1157 }
1158
1159 void tb_free(TranslationBlock *tb)
1160 {
1161 /* In practice this is mostly used for single use temporary TB
1162 Ignore the hard cases and just back up if this TB happens to
1163 be the last one generated. */
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 code_gen_ptr = tb->tc_ptr;
1166 nb_tbs--;
1167 }
1168 }
1169
1170 /* add a new TB and link it to the physical page tables. phys_page2 is
1171 (-1) to indicate that only one page contains the TB. */
1172 void tb_link_phys(TranslationBlock *tb,
1173 target_ulong phys_pc, target_ulong phys_page2)
1174 {
1175 unsigned int h;
1176 TranslationBlock **ptb;
1177
1178 /* Grab the mmap lock to stop another thread invalidating this TB
1179 before we are done. */
1180 mmap_lock();
1181 /* add in the physical hash table */
1182 h = tb_phys_hash_func(phys_pc);
1183 ptb = &tb_phys_hash[h];
1184 tb->phys_hash_next = *ptb;
1185 *ptb = tb;
1186
1187 /* add in the page list */
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 if (phys_page2 != -1)
1190 tb_alloc_page(tb, 1, phys_page2);
1191 else
1192 tb->page_addr[1] = -1;
1193
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 tb->jmp_next[0] = NULL;
1196 tb->jmp_next[1] = NULL;
1197
1198 /* init original jump addresses */
1199 if (tb->tb_next_offset[0] != 0xffff)
1200 tb_reset_jump(tb, 0);
1201 if (tb->tb_next_offset[1] != 0xffff)
1202 tb_reset_jump(tb, 1);
1203
1204 #ifdef DEBUG_TB_CHECK
1205 tb_page_check();
1206 #endif
1207 mmap_unlock();
1208 }
1209
1210 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 tb[1].tc_ptr. Return NULL if not found */
1212 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1213 {
1214 int m_min, m_max, m;
1215 unsigned long v;
1216 TranslationBlock *tb;
1217
1218 if (nb_tbs <= 0)
1219 return NULL;
1220 if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 tc_ptr >= (unsigned long)code_gen_ptr)
1222 return NULL;
1223 /* binary search (cf Knuth) */
1224 m_min = 0;
1225 m_max = nb_tbs - 1;
1226 while (m_min <= m_max) {
1227 m = (m_min + m_max) >> 1;
1228 tb = &tbs[m];
1229 v = (unsigned long)tb->tc_ptr;
1230 if (v == tc_ptr)
1231 return tb;
1232 else if (tc_ptr < v) {
1233 m_max = m - 1;
1234 } else {
1235 m_min = m + 1;
1236 }
1237 }
1238 return &tbs[m_max];
1239 }
1240
1241 static void tb_reset_jump_recursive(TranslationBlock *tb);
1242
1243 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244 {
1245 TranslationBlock *tb1, *tb_next, **ptb;
1246 unsigned int n1;
1247
1248 tb1 = tb->jmp_next[n];
1249 if (tb1 != NULL) {
1250 /* find head of list */
1251 for(;;) {
1252 n1 = (long)tb1 & 3;
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 if (n1 == 2)
1255 break;
1256 tb1 = tb1->jmp_next[n1];
1257 }
1258 /* we are now sure now that tb jumps to tb1 */
1259 tb_next = tb1;
1260
1261 /* remove tb from the jmp_first list */
1262 ptb = &tb_next->jmp_first;
1263 for(;;) {
1264 tb1 = *ptb;
1265 n1 = (long)tb1 & 3;
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 if (n1 == n && tb1 == tb)
1268 break;
1269 ptb = &tb1->jmp_next[n1];
1270 }
1271 *ptb = tb->jmp_next[n];
1272 tb->jmp_next[n] = NULL;
1273
1274 /* suppress the jump to next tb in generated code */
1275 tb_reset_jump(tb, n);
1276
1277 /* suppress jumps in the tb on which we could have jumped */
1278 tb_reset_jump_recursive(tb_next);
1279 }
1280 }
1281
1282 static void tb_reset_jump_recursive(TranslationBlock *tb)
1283 {
1284 tb_reset_jump_recursive2(tb, 0);
1285 tb_reset_jump_recursive2(tb, 1);
1286 }
1287
1288 #if defined(TARGET_HAS_ICE)
1289 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290 {
1291 target_phys_addr_t addr;
1292 target_ulong pd;
1293 ram_addr_t ram_addr;
1294 PhysPageDesc *p;
1295
1296 addr = cpu_get_phys_page_debug(env, pc);
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298 if (!p) {
1299 pd = IO_MEM_UNASSIGNED;
1300 } else {
1301 pd = p->phys_offset;
1302 }
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1305 }
1306 #endif
1307
1308 /* Add a watchpoint. */
1309 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 int flags, CPUWatchpoint **watchpoint)
1311 {
1312 target_ulong len_mask = ~(len - 1);
1313 CPUWatchpoint *wp;
1314
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319 return -EINVAL;
1320 }
1321 wp = qemu_malloc(sizeof(*wp));
1322
1323 wp->vaddr = addr;
1324 wp->len_mask = len_mask;
1325 wp->flags = flags;
1326
1327 /* keep all GDB-injected watchpoints in front */
1328 if (flags & BP_GDB)
1329 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1330 else
1331 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1332
1333 tlb_flush_page(env, addr);
1334
1335 if (watchpoint)
1336 *watchpoint = wp;
1337 return 0;
1338 }
1339
1340 /* Remove a specific watchpoint. */
1341 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1342 int flags)
1343 {
1344 target_ulong len_mask = ~(len - 1);
1345 CPUWatchpoint *wp;
1346
1347 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1348 if (addr == wp->vaddr && len_mask == wp->len_mask
1349 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1350 cpu_watchpoint_remove_by_ref(env, wp);
1351 return 0;
1352 }
1353 }
1354 return -ENOENT;
1355 }
1356
1357 /* Remove a specific watchpoint by reference. */
1358 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1359 {
1360 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1361
1362 tlb_flush_page(env, watchpoint->vaddr);
1363
1364 qemu_free(watchpoint);
1365 }
1366
1367 /* Remove all matching watchpoints. */
1368 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369 {
1370 CPUWatchpoint *wp, *next;
1371
1372 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1373 if (wp->flags & mask)
1374 cpu_watchpoint_remove_by_ref(env, wp);
1375 }
1376 }
1377
1378 /* Add a breakpoint. */
1379 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380 CPUBreakpoint **breakpoint)
1381 {
1382 #if defined(TARGET_HAS_ICE)
1383 CPUBreakpoint *bp;
1384
1385 bp = qemu_malloc(sizeof(*bp));
1386
1387 bp->pc = pc;
1388 bp->flags = flags;
1389
1390 /* keep all GDB-injected breakpoints in front */
1391 if (flags & BP_GDB)
1392 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1393 else
1394 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1395
1396 breakpoint_invalidate(env, pc);
1397
1398 if (breakpoint)
1399 *breakpoint = bp;
1400 return 0;
1401 #else
1402 return -ENOSYS;
1403 #endif
1404 }
1405
1406 /* Remove a specific breakpoint. */
1407 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1408 {
1409 #if defined(TARGET_HAS_ICE)
1410 CPUBreakpoint *bp;
1411
1412 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1413 if (bp->pc == pc && bp->flags == flags) {
1414 cpu_breakpoint_remove_by_ref(env, bp);
1415 return 0;
1416 }
1417 }
1418 return -ENOENT;
1419 #else
1420 return -ENOSYS;
1421 #endif
1422 }
1423
1424 /* Remove a specific breakpoint by reference. */
1425 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1426 {
1427 #if defined(TARGET_HAS_ICE)
1428 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1429
1430 breakpoint_invalidate(env, breakpoint->pc);
1431
1432 qemu_free(breakpoint);
1433 #endif
1434 }
1435
1436 /* Remove all matching breakpoints. */
1437 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438 {
1439 #if defined(TARGET_HAS_ICE)
1440 CPUBreakpoint *bp, *next;
1441
1442 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1443 if (bp->flags & mask)
1444 cpu_breakpoint_remove_by_ref(env, bp);
1445 }
1446 #endif
1447 }
1448
1449 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1450 CPU loop after each instruction */
1451 void cpu_single_step(CPUState *env, int enabled)
1452 {
1453 #if defined(TARGET_HAS_ICE)
1454 if (env->singlestep_enabled != enabled) {
1455 env->singlestep_enabled = enabled;
1456 if (kvm_enabled())
1457 kvm_update_guest_debug(env, 0);
1458 else {
1459 /* must flush all the translated code to avoid inconsistancies */
1460 /* XXX: only flush what is necessary */
1461 tb_flush(env);
1462 }
1463 }
1464 #endif
1465 }
1466
1467 /* enable or disable low levels log */
1468 void cpu_set_log(int log_flags)
1469 {
1470 loglevel = log_flags;
1471 if (loglevel && !logfile) {
1472 logfile = fopen(logfilename, log_append ? "a" : "w");
1473 if (!logfile) {
1474 perror(logfilename);
1475 _exit(1);
1476 }
1477 #if !defined(CONFIG_SOFTMMU)
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479 {
1480 static char logfile_buf[4096];
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482 }
1483 #else
1484 setvbuf(logfile, NULL, _IOLBF, 0);
1485 #endif
1486 log_append = 1;
1487 }
1488 if (!loglevel && logfile) {
1489 fclose(logfile);
1490 logfile = NULL;
1491 }
1492 }
1493
1494 void cpu_set_log_filename(const char *filename)
1495 {
1496 logfilename = strdup(filename);
1497 if (logfile) {
1498 fclose(logfile);
1499 logfile = NULL;
1500 }
1501 cpu_set_log(loglevel);
1502 }
1503
1504 static void cpu_unlink_tb(CPUState *env)
1505 {
1506 #if defined(USE_NPTL)
1507 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1508 problem and hope the cpu will stop of its own accord. For userspace
1509 emulation this often isn't actually as bad as it sounds. Often
1510 signals are used primarily to interrupt blocking syscalls. */
1511 #else
1512 TranslationBlock *tb;
1513 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1514
1515 tb = env->current_tb;
1516 /* if the cpu is currently executing code, we must unlink it and
1517 all the potentially executing TB */
1518 if (tb && !testandset(&interrupt_lock)) {
1519 env->current_tb = NULL;
1520 tb_reset_jump_recursive(tb);
1521 resetlock(&interrupt_lock);
1522 }
1523 #endif
1524 }
1525
1526 /* mask must never be zero, except for A20 change call */
1527 void cpu_interrupt(CPUState *env, int mask)
1528 {
1529 int old_mask;
1530
1531 old_mask = env->interrupt_request;
1532 env->interrupt_request |= mask;
1533
1534 if (use_icount) {
1535 env->icount_decr.u16.high = 0xffff;
1536 #ifndef CONFIG_USER_ONLY
1537 if (!can_do_io(env)
1538 && (mask & ~old_mask) != 0) {
1539 cpu_abort(env, "Raised interrupt while not in I/O function");
1540 }
1541 #endif
1542 } else {
1543 cpu_unlink_tb(env);
1544 }
1545 }
1546
1547 void cpu_reset_interrupt(CPUState *env, int mask)
1548 {
1549 env->interrupt_request &= ~mask;
1550 }
1551
1552 void cpu_exit(CPUState *env)
1553 {
1554 env->exit_request = 1;
1555 cpu_unlink_tb(env);
1556 }
1557
1558 const CPULogItem cpu_log_items[] = {
1559 { CPU_LOG_TB_OUT_ASM, "out_asm",
1560 "show generated host assembly code for each compiled TB" },
1561 { CPU_LOG_TB_IN_ASM, "in_asm",
1562 "show target assembly code for each compiled TB" },
1563 { CPU_LOG_TB_OP, "op",
1564 "show micro ops for each compiled TB" },
1565 { CPU_LOG_TB_OP_OPT, "op_opt",
1566 "show micro ops "
1567 #ifdef TARGET_I386
1568 "before eflags optimization and "
1569 #endif
1570 "after liveness analysis" },
1571 { CPU_LOG_INT, "int",
1572 "show interrupts/exceptions in short format" },
1573 { CPU_LOG_EXEC, "exec",
1574 "show trace before each executed TB (lots of logs)" },
1575 { CPU_LOG_TB_CPU, "cpu",
1576 "show CPU state before block translation" },
1577 #ifdef TARGET_I386
1578 { CPU_LOG_PCALL, "pcall",
1579 "show protected mode far calls/returns/exceptions" },
1580 { CPU_LOG_RESET, "cpu_reset",
1581 "show CPU state before CPU resets" },
1582 #endif
1583 #ifdef DEBUG_IOPORT
1584 { CPU_LOG_IOPORT, "ioport",
1585 "show all i/o ports accesses" },
1586 #endif
1587 { 0, NULL, NULL },
1588 };
1589
1590 static int cmp1(const char *s1, int n, const char *s2)
1591 {
1592 if (strlen(s2) != n)
1593 return 0;
1594 return memcmp(s1, s2, n) == 0;
1595 }
1596
1597 /* takes a comma separated list of log masks. Return 0 if error. */
1598 int cpu_str_to_log_mask(const char *str)
1599 {
1600 const CPULogItem *item;
1601 int mask;
1602 const char *p, *p1;
1603
1604 p = str;
1605 mask = 0;
1606 for(;;) {
1607 p1 = strchr(p, ',');
1608 if (!p1)
1609 p1 = p + strlen(p);
1610 if(cmp1(p,p1-p,"all")) {
1611 for(item = cpu_log_items; item->mask != 0; item++) {
1612 mask |= item->mask;
1613 }
1614 } else {
1615 for(item = cpu_log_items; item->mask != 0; item++) {
1616 if (cmp1(p, p1 - p, item->name))
1617 goto found;
1618 }
1619 return 0;
1620 }
1621 found:
1622 mask |= item->mask;
1623 if (*p1 != ',')
1624 break;
1625 p = p1 + 1;
1626 }
1627 return mask;
1628 }
1629
1630 void cpu_abort(CPUState *env, const char *fmt, ...)
1631 {
1632 va_list ap;
1633 va_list ap2;
1634
1635 va_start(ap, fmt);
1636 va_copy(ap2, ap);
1637 fprintf(stderr, "qemu: fatal: ");
1638 vfprintf(stderr, fmt, ap);
1639 fprintf(stderr, "\n");
1640 #ifdef TARGET_I386
1641 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642 #else
1643 cpu_dump_state(env, stderr, fprintf, 0);
1644 #endif
1645 if (qemu_log_enabled()) {
1646 qemu_log("qemu: fatal: ");
1647 qemu_log_vprintf(fmt, ap2);
1648 qemu_log("\n");
1649 #ifdef TARGET_I386
1650 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1651 #else
1652 log_cpu_state(env, 0);
1653 #endif
1654 qemu_log_flush();
1655 qemu_log_close();
1656 }
1657 va_end(ap2);
1658 va_end(ap);
1659 abort();
1660 }
1661
1662 CPUState *cpu_copy(CPUState *env)
1663 {
1664 CPUState *new_env = cpu_init(env->cpu_model_str);
1665 CPUState *next_cpu = new_env->next_cpu;
1666 int cpu_index = new_env->cpu_index;
1667 #if defined(TARGET_HAS_ICE)
1668 CPUBreakpoint *bp;
1669 CPUWatchpoint *wp;
1670 #endif
1671
1672 memcpy(new_env, env, sizeof(CPUState));
1673
1674 /* Preserve chaining and index. */
1675 new_env->next_cpu = next_cpu;
1676 new_env->cpu_index = cpu_index;
1677
1678 /* Clone all break/watchpoints.
1679 Note: Once we support ptrace with hw-debug register access, make sure
1680 BP_CPU break/watchpoints are handled correctly on clone. */
1681 TAILQ_INIT(&env->breakpoints);
1682 TAILQ_INIT(&env->watchpoints);
1683 #if defined(TARGET_HAS_ICE)
1684 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1686 }
1687 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1689 wp->flags, NULL);
1690 }
1691 #endif
1692
1693 return new_env;
1694 }
1695
1696 #if !defined(CONFIG_USER_ONLY)
1697
1698 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1699 {
1700 unsigned int i;
1701
1702 /* Discard jump cache entries for any tb which might potentially
1703 overlap the flushed page. */
1704 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705 memset (&env->tb_jmp_cache[i], 0,
1706 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1707
1708 i = tb_jmp_cache_hash_page(addr);
1709 memset (&env->tb_jmp_cache[i], 0,
1710 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711 }
1712
1713 /* NOTE: if flush_global is true, also flush global entries (not
1714 implemented yet) */
1715 void tlb_flush(CPUState *env, int flush_global)
1716 {
1717 int i;
1718
1719 #if defined(DEBUG_TLB)
1720 printf("tlb_flush:\n");
1721 #endif
1722 /* must reset current TB so that interrupts cannot modify the
1723 links while we are modifying them */
1724 env->current_tb = NULL;
1725
1726 for(i = 0; i < CPU_TLB_SIZE; i++) {
1727 env->tlb_table[0][i].addr_read = -1;
1728 env->tlb_table[0][i].addr_write = -1;
1729 env->tlb_table[0][i].addr_code = -1;
1730 env->tlb_table[1][i].addr_read = -1;
1731 env->tlb_table[1][i].addr_write = -1;
1732 env->tlb_table[1][i].addr_code = -1;
1733 #if (NB_MMU_MODES >= 3)
1734 env->tlb_table[2][i].addr_read = -1;
1735 env->tlb_table[2][i].addr_write = -1;
1736 env->tlb_table[2][i].addr_code = -1;
1737 #endif
1738 #if (NB_MMU_MODES >= 4)
1739 env->tlb_table[3][i].addr_read = -1;
1740 env->tlb_table[3][i].addr_write = -1;
1741 env->tlb_table[3][i].addr_code = -1;
1742 #endif
1743 #if (NB_MMU_MODES >= 5)
1744 env->tlb_table[4][i].addr_read = -1;
1745 env->tlb_table[4][i].addr_write = -1;
1746 env->tlb_table[4][i].addr_code = -1;
1747 #endif
1748
1749 }
1750
1751 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1752
1753 #ifdef USE_KQEMU
1754 if (env->kqemu_enabled) {
1755 kqemu_flush(env, flush_global);
1756 }
1757 #endif
1758 tlb_flush_count++;
1759 }
1760
1761 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1762 {
1763 if (addr == (tlb_entry->addr_read &
1764 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765 addr == (tlb_entry->addr_write &
1766 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1767 addr == (tlb_entry->addr_code &
1768 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769 tlb_entry->addr_read = -1;
1770 tlb_entry->addr_write = -1;
1771 tlb_entry->addr_code = -1;
1772 }
1773 }
1774
1775 void tlb_flush_page(CPUState *env, target_ulong addr)
1776 {
1777 int i;
1778
1779 #if defined(DEBUG_TLB)
1780 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1781 #endif
1782 /* must reset current TB so that interrupts cannot modify the
1783 links while we are modifying them */
1784 env->current_tb = NULL;
1785
1786 addr &= TARGET_PAGE_MASK;
1787 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1788 tlb_flush_entry(&env->tlb_table[0][i], addr);
1789 tlb_flush_entry(&env->tlb_table[1][i], addr);
1790 #if (NB_MMU_MODES >= 3)
1791 tlb_flush_entry(&env->tlb_table[2][i], addr);
1792 #endif
1793 #if (NB_MMU_MODES >= 4)
1794 tlb_flush_entry(&env->tlb_table[3][i], addr);
1795 #endif
1796 #if (NB_MMU_MODES >= 5)
1797 tlb_flush_entry(&env->tlb_table[4][i], addr);
1798 #endif
1799
1800 tlb_flush_jmp_cache(env, addr);
1801
1802 #ifdef USE_KQEMU
1803 if (env->kqemu_enabled) {
1804 kqemu_flush_page(env, addr);
1805 }
1806 #endif
1807 }
1808
1809 /* update the TLBs so that writes to code in the virtual page 'addr'
1810 can be detected */
1811 static void tlb_protect_code(ram_addr_t ram_addr)
1812 {
1813 cpu_physical_memory_reset_dirty(ram_addr,
1814 ram_addr + TARGET_PAGE_SIZE,
1815 CODE_DIRTY_FLAG);
1816 }
1817
1818 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1819 tested for self modifying code */
1820 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1821 target_ulong vaddr)
1822 {
1823 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1824 }
1825
1826 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1827 unsigned long start, unsigned long length)
1828 {
1829 unsigned long addr;
1830 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1832 if ((addr - start) < length) {
1833 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1834 }
1835 }
1836 }
1837
1838 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1839 int dirty_flags)
1840 {
1841 CPUState *env;
1842 unsigned long length, start1;
1843 int i, mask, len;
1844 uint8_t *p;
1845
1846 start &= TARGET_PAGE_MASK;
1847 end = TARGET_PAGE_ALIGN(end);
1848
1849 length = end - start;
1850 if (length == 0)
1851 return;
1852 len = length >> TARGET_PAGE_BITS;
1853 #ifdef USE_KQEMU
1854 /* XXX: should not depend on cpu context */
1855 env = first_cpu;
1856 if (env->kqemu_enabled) {
1857 ram_addr_t addr;
1858 addr = start;
1859 for(i = 0; i < len; i++) {
1860 kqemu_set_notdirty(env, addr);
1861 addr += TARGET_PAGE_SIZE;
1862 }
1863 }
1864 #endif
1865 mask = ~dirty_flags;
1866 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1867 for(i = 0; i < len; i++)
1868 p[i] &= mask;
1869
1870 /* we modify the TLB cache so that the dirty bit will be set again
1871 when accessing the range */
1872 start1 = start + (unsigned long)phys_ram_base;
1873 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1876 for(i = 0; i < CPU_TLB_SIZE; i++)
1877 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1878 #if (NB_MMU_MODES >= 3)
1879 for(i = 0; i < CPU_TLB_SIZE; i++)
1880 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1881 #endif
1882 #if (NB_MMU_MODES >= 4)
1883 for(i = 0; i < CPU_TLB_SIZE; i++)
1884 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1885 #endif
1886 #if (NB_MMU_MODES >= 5)
1887 for(i = 0; i < CPU_TLB_SIZE; i++)
1888 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1889 #endif
1890 }
1891 }
1892
1893 int cpu_physical_memory_set_dirty_tracking(int enable)
1894 {
1895 in_migration = enable;
1896 return 0;
1897 }
1898
1899 int cpu_physical_memory_get_dirty_tracking(void)
1900 {
1901 return in_migration;
1902 }
1903
1904 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1905 {
1906 if (kvm_enabled())
1907 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1908 }
1909
1910 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1911 {
1912 ram_addr_t ram_addr;
1913
1914 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1915 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1916 tlb_entry->addend - (unsigned long)phys_ram_base;
1917 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1918 tlb_entry->addr_write |= TLB_NOTDIRTY;
1919 }
1920 }
1921 }
1922
1923 /* update the TLB according to the current state of the dirty bits */
1924 void cpu_tlb_update_dirty(CPUState *env)
1925 {
1926 int i;
1927 for(i = 0; i < CPU_TLB_SIZE; i++)
1928 tlb_update_dirty(&env->tlb_table[0][i]);
1929 for(i = 0; i < CPU_TLB_SIZE; i++)
1930 tlb_update_dirty(&env->tlb_table[1][i]);
1931 #if (NB_MMU_MODES >= 3)
1932 for(i = 0; i < CPU_TLB_SIZE; i++)
1933 tlb_update_dirty(&env->tlb_table[2][i]);
1934 #endif
1935 #if (NB_MMU_MODES >= 4)
1936 for(i = 0; i < CPU_TLB_SIZE; i++)
1937 tlb_update_dirty(&env->tlb_table[3][i]);
1938 #endif
1939 #if (NB_MMU_MODES >= 5)
1940 for(i = 0; i < CPU_TLB_SIZE; i++)
1941 tlb_update_dirty(&env->tlb_table[4][i]);
1942 #endif
1943 }
1944
1945 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1946 {
1947 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1948 tlb_entry->addr_write = vaddr;
1949 }
1950
1951 /* update the TLB corresponding to virtual page vaddr
1952 so that it is no longer dirty */
1953 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1954 {
1955 int i;
1956
1957 vaddr &= TARGET_PAGE_MASK;
1958 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1959 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1960 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1961 #if (NB_MMU_MODES >= 3)
1962 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1963 #endif
1964 #if (NB_MMU_MODES >= 4)
1965 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1966 #endif
1967 #if (NB_MMU_MODES >= 5)
1968 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1969 #endif
1970 }
1971
1972 /* add a new TLB entry. At most one entry for a given virtual address
1973 is permitted. Return 0 if OK or 2 if the page could not be mapped
1974 (can only happen in non SOFTMMU mode for I/O pages or pages
1975 conflicting with the host address space). */
1976 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1977 target_phys_addr_t paddr, int prot,
1978 int mmu_idx, int is_softmmu)
1979 {
1980 PhysPageDesc *p;
1981 unsigned long pd;
1982 unsigned int index;
1983 target_ulong address;
1984 target_ulong code_address;
1985 target_phys_addr_t addend;
1986 int ret;
1987 CPUTLBEntry *te;
1988 CPUWatchpoint *wp;
1989 target_phys_addr_t iotlb;
1990
1991 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1992 if (!p) {
1993 pd = IO_MEM_UNASSIGNED;
1994 } else {
1995 pd = p->phys_offset;
1996 }
1997 #if defined(DEBUG_TLB)
1998 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1999 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2000 #endif
2001
2002 ret = 0;
2003 address = vaddr;
2004 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2005 /* IO memory case (romd handled later) */
2006 address |= TLB_MMIO;
2007 }
2008 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2009 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2010 /* Normal RAM. */
2011 iotlb = pd & TARGET_PAGE_MASK;
2012 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2013 iotlb |= IO_MEM_NOTDIRTY;
2014 else
2015 iotlb |= IO_MEM_ROM;
2016 } else {
2017 /* IO handlers are currently passed a phsical address.
2018 It would be nice to pass an offset from the base address
2019 of that region. This would avoid having to special case RAM,
2020 and avoid full address decoding in every device.
2021 We can't use the high bits of pd for this because
2022 IO_MEM_ROMD uses these as a ram address. */
2023 iotlb = (pd & ~TARGET_PAGE_MASK);
2024 if (p) {
2025 iotlb += p->region_offset;
2026 } else {
2027 iotlb += paddr;
2028 }
2029 }
2030
2031 code_address = address;
2032 /* Make accesses to pages with watchpoints go via the
2033 watchpoint trap routines. */
2034 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2035 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2036 iotlb = io_mem_watch + paddr;
2037 /* TODO: The memory case can be optimized by not trapping
2038 reads of pages with a write breakpoint. */
2039 address |= TLB_MMIO;
2040 }
2041 }
2042
2043 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2044 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2045 te = &env->tlb_table[mmu_idx][index];
2046 te->addend = addend - vaddr;
2047 if (prot & PAGE_READ) {
2048 te->addr_read = address;
2049 } else {
2050 te->addr_read = -1;
2051 }
2052
2053 if (prot & PAGE_EXEC) {
2054 te->addr_code = code_address;
2055 } else {
2056 te->addr_code = -1;
2057 }
2058 if (prot & PAGE_WRITE) {
2059 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2060 (pd & IO_MEM_ROMD)) {
2061 /* Write access calls the I/O callback. */
2062 te->addr_write = address | TLB_MMIO;
2063 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2064 !cpu_physical_memory_is_dirty(pd)) {
2065 te->addr_write = address | TLB_NOTDIRTY;
2066 } else {
2067 te->addr_write = address;
2068 }
2069 } else {
2070 te->addr_write = -1;
2071 }
2072 return ret;
2073 }
2074
2075 #else
2076
2077 void tlb_flush(CPUState *env, int flush_global)
2078 {
2079 }
2080
2081 void tlb_flush_page(CPUState *env, target_ulong addr)
2082 {
2083 }
2084
2085 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2086 target_phys_addr_t paddr, int prot,
2087 int mmu_idx, int is_softmmu)
2088 {
2089 return 0;
2090 }
2091
2092 /* dump memory mappings */
2093 void page_dump(FILE *f)
2094 {
2095 unsigned long start, end;
2096 int i, j, prot, prot1;
2097 PageDesc *p;
2098
2099 fprintf(f, "%-8s %-8s %-8s %s\n",
2100 "start", "end", "size", "prot");
2101 start = -1;
2102 end = -1;
2103 prot = 0;
2104 for(i = 0; i <= L1_SIZE; i++) {
2105 if (i < L1_SIZE)
2106 p = l1_map[i];
2107 else
2108 p = NULL;
2109 for(j = 0;j < L2_SIZE; j++) {
2110 if (!p)
2111 prot1 = 0;
2112 else
2113 prot1 = p[j].flags;
2114 if (prot1 != prot) {
2115 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2116 if (start != -1) {
2117 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2118 start, end, end - start,
2119 prot & PAGE_READ ? 'r' : '-',
2120 prot & PAGE_WRITE ? 'w' : '-',
2121 prot & PAGE_EXEC ? 'x' : '-');
2122 }
2123 if (prot1 != 0)
2124 start = end;
2125 else
2126 start = -1;
2127 prot = prot1;
2128 }
2129 if (!p)
2130 break;
2131 }
2132 }
2133 }
2134
2135 int page_get_flags(target_ulong address)
2136 {
2137 PageDesc *p;
2138
2139 p = page_find(address >> TARGET_PAGE_BITS);
2140 if (!p)
2141 return 0;
2142 return p->flags;
2143 }
2144
2145 /* modify the flags of a page and invalidate the code if
2146 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2147 depending on PAGE_WRITE */
2148 void page_set_flags(target_ulong start, target_ulong end, int flags)
2149 {
2150 PageDesc *p;
2151 target_ulong addr;
2152
2153 /* mmap_lock should already be held. */
2154 start = start & TARGET_PAGE_MASK;
2155 end = TARGET_PAGE_ALIGN(end);
2156 if (flags & PAGE_WRITE)
2157 flags |= PAGE_WRITE_ORG;
2158 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2159 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2160 /* We may be called for host regions that are outside guest
2161 address space. */
2162 if (!p)
2163 return;
2164 /* if the write protection is set, then we invalidate the code
2165 inside */
2166 if (!(p->flags & PAGE_WRITE) &&
2167 (flags & PAGE_WRITE) &&
2168 p->first_tb) {
2169 tb_invalidate_phys_page(addr, 0, NULL);
2170 }
2171 p->flags = flags;
2172 }
2173 }
2174
2175 int page_check_range(target_ulong start, target_ulong len, int flags)
2176 {
2177 PageDesc *p;
2178 target_ulong end;
2179 target_ulong addr;
2180
2181 if (start + len < start)
2182 /* we've wrapped around */
2183 return -1;
2184
2185 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2186 start = start & TARGET_PAGE_MASK;
2187
2188 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2189 p = page_find(addr >> TARGET_PAGE_BITS);
2190 if( !p )
2191 return -1;
2192 if( !(p->flags & PAGE_VALID) )
2193 return -1;
2194
2195 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2196 return -1;
2197 if (flags & PAGE_WRITE) {
2198 if (!(p->flags & PAGE_WRITE_ORG))
2199 return -1;
2200 /* unprotect the page if it was put read-only because it
2201 contains translated code */
2202 if (!(p->flags & PAGE_WRITE)) {
2203 if (!page_unprotect(addr, 0, NULL))
2204 return -1;
2205 }
2206 return 0;
2207 }
2208 }
2209 return 0;
2210 }
2211
2212 /* called from signal handler: invalidate the code and unprotect the
2213 page. Return TRUE if the fault was succesfully handled. */
2214 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2215 {
2216 unsigned int page_index, prot, pindex;
2217 PageDesc *p, *p1;
2218 target_ulong host_start, host_end, addr;
2219
2220 /* Technically this isn't safe inside a signal handler. However we
2221 know this only ever happens in a synchronous SEGV handler, so in
2222 practice it seems to be ok. */
2223 mmap_lock();
2224
2225 host_start = address & qemu_host_page_mask;
2226 page_index = host_start >> TARGET_PAGE_BITS;
2227 p1 = page_find(page_index);
2228 if (!p1) {
2229 mmap_unlock();
2230 return 0;
2231 }
2232 host_end = host_start + qemu_host_page_size;
2233 p = p1;
2234 prot = 0;
2235 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2236 prot |= p->flags;
2237 p++;
2238 }
2239 /* if the page was really writable, then we change its
2240 protection back to writable */
2241 if (prot & PAGE_WRITE_ORG) {
2242 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2243 if (!(p1[pindex].flags & PAGE_WRITE)) {
2244 mprotect((void *)g2h(host_start), qemu_host_page_size,
2245 (prot & PAGE_BITS) | PAGE_WRITE);
2246 p1[pindex].flags |= PAGE_WRITE;
2247 /* and since the content will be modified, we must invalidate
2248 the corresponding translated code. */
2249 tb_invalidate_phys_page(address, pc, puc);
2250 #ifdef DEBUG_TB_CHECK
2251 tb_invalidate_check(address);
2252 #endif
2253 mmap_unlock();
2254 return 1;
2255 }
2256 }
2257 mmap_unlock();
2258 return 0;
2259 }
2260
2261 static inline void tlb_set_dirty(CPUState *env,
2262 unsigned long addr, target_ulong vaddr)
2263 {
2264 }
2265 #endif /* defined(CONFIG_USER_ONLY) */
2266
2267 #if !defined(CONFIG_USER_ONLY)
2268
2269 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2270 ram_addr_t memory, ram_addr_t region_offset);
2271 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2272 ram_addr_t orig_memory, ram_addr_t region_offset);
2273 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2274 need_subpage) \
2275 do { \
2276 if (addr > start_addr) \
2277 start_addr2 = 0; \
2278 else { \
2279 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2280 if (start_addr2 > 0) \
2281 need_subpage = 1; \
2282 } \
2283 \
2284 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2285 end_addr2 = TARGET_PAGE_SIZE - 1; \
2286 else { \
2287 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2288 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2289 need_subpage = 1; \
2290 } \
2291 } while (0)
2292
2293 /* register physical memory. 'size' must be a multiple of the target
2294 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2295 io memory page. The address used when calling the IO function is
2296 the offset from the start of the region, plus region_offset. Both
2297 start_region and regon_offset are rounded down to a page boundary
2298 before calculating this offset. This should not be a problem unless
2299 the low bits of start_addr and region_offset differ. */
2300 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2301 ram_addr_t size,
2302 ram_addr_t phys_offset,
2303 ram_addr_t region_offset)
2304 {
2305 target_phys_addr_t addr, end_addr;
2306 PhysPageDesc *p;
2307 CPUState *env;
2308 ram_addr_t orig_size = size;
2309 void *subpage;
2310
2311 #ifdef USE_KQEMU
2312 /* XXX: should not depend on cpu context */
2313 env = first_cpu;
2314 if (env->kqemu_enabled) {
2315 kqemu_set_phys_mem(start_addr, size, phys_offset);
2316 }
2317 #endif
2318 if (kvm_enabled())
2319 kvm_set_phys_mem(start_addr, size, phys_offset);
2320
2321 if (phys_offset == IO_MEM_UNASSIGNED) {
2322 region_offset = start_addr;
2323 }
2324 region_offset &= TARGET_PAGE_MASK;
2325 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2326 end_addr = start_addr + (target_phys_addr_t)size;
2327 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2328 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2329 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2330 ram_addr_t orig_memory = p->phys_offset;
2331 target_phys_addr_t start_addr2, end_addr2;
2332 int need_subpage = 0;
2333
2334 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2335 need_subpage);
2336 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2337 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2338 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2339 &p->phys_offset, orig_memory,
2340 p->region_offset);
2341 } else {
2342 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2343 >> IO_MEM_SHIFT];
2344 }
2345 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2346 region_offset);
2347 p->region_offset = 0;
2348 } else {
2349 p->phys_offset = phys_offset;
2350 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2351 (phys_offset & IO_MEM_ROMD))
2352 phys_offset += TARGET_PAGE_SIZE;
2353 }
2354 } else {
2355 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356 p->phys_offset = phys_offset;
2357 p->region_offset = region_offset;
2358 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2359 (phys_offset & IO_MEM_ROMD)) {
2360 phys_offset += TARGET_PAGE_SIZE;
2361 } else {
2362 target_phys_addr_t start_addr2, end_addr2;
2363 int need_subpage = 0;
2364
2365 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2366 end_addr2, need_subpage);
2367
2368 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2369 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2370 &p->phys_offset, IO_MEM_UNASSIGNED,
2371 addr & TARGET_PAGE_MASK);
2372 subpage_register(subpage, start_addr2, end_addr2,
2373 phys_offset, region_offset);
2374 p->region_offset = 0;
2375 }
2376 }
2377 }
2378 region_offset += TARGET_PAGE_SIZE;
2379 }
2380
2381 /* since each CPU stores ram addresses in its TLB cache, we must
2382 reset the modified entries */
2383 /* XXX: slow ! */
2384 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2385 tlb_flush(env, 1);
2386 }
2387 }
2388
2389 /* XXX: temporary until new memory mapping API */
2390 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2391 {
2392 PhysPageDesc *p;
2393
2394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395 if (!p)
2396 return IO_MEM_UNASSIGNED;
2397 return p->phys_offset;
2398 }
2399
2400 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2401 {
2402 if (kvm_enabled())
2403 kvm_coalesce_mmio_region(addr, size);
2404 }
2405
2406 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407 {
2408 if (kvm_enabled())
2409 kvm_uncoalesce_mmio_region(addr, size);
2410 }
2411
2412 /* XXX: better than nothing */
2413 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2414 {
2415 ram_addr_t addr;
2416 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2417 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2418 (uint64_t)size, (uint64_t)phys_ram_size);
2419 abort();
2420 }
2421 addr = phys_ram_alloc_offset;
2422 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2423 return addr;
2424 }
2425
2426 void qemu_ram_free(ram_addr_t addr)
2427 {
2428 }
2429
2430 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2431 {
2432 #ifdef DEBUG_UNASSIGNED
2433 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2434 #endif
2435 #if defined(TARGET_SPARC)
2436 do_unassigned_access(addr, 0, 0, 0, 1);
2437 #endif
2438 return 0;
2439 }
2440
2441 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2442 {
2443 #ifdef DEBUG_UNASSIGNED
2444 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2445 #endif
2446 #if defined(TARGET_SPARC)
2447 do_unassigned_access(addr, 0, 0, 0, 2);
2448 #endif
2449 return 0;
2450 }
2451
2452 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2453 {
2454 #ifdef DEBUG_UNASSIGNED
2455 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2456 #endif
2457 #if defined(TARGET_SPARC)
2458 do_unassigned_access(addr, 0, 0, 0, 4);
2459 #endif
2460 return 0;
2461 }
2462
2463 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2464 {
2465 #ifdef DEBUG_UNASSIGNED
2466 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2467 #endif
2468 #if defined(TARGET_SPARC)
2469 do_unassigned_access(addr, 1, 0, 0, 1);
2470 #endif
2471 }
2472
2473 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2474 {
2475 #ifdef DEBUG_UNASSIGNED
2476 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2477 #endif
2478 #if defined(TARGET_SPARC)
2479 do_unassigned_access(addr, 1, 0, 0, 2);
2480 #endif
2481 }
2482
2483 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2484 {
2485 #ifdef DEBUG_UNASSIGNED
2486 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2487 #endif
2488 #if defined(TARGET_SPARC)
2489 do_unassigned_access(addr, 1, 0, 0, 4);
2490 #endif
2491 }
2492
2493 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2494 unassigned_mem_readb,
2495 unassigned_mem_readw,
2496 unassigned_mem_readl,
2497 };
2498
2499 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2500 unassigned_mem_writeb,
2501 unassigned_mem_writew,
2502 unassigned_mem_writel,
2503 };
2504
2505 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2506 uint32_t val)
2507 {
2508 int dirty_flags;
2509 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2510 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2511 #if !defined(CONFIG_USER_ONLY)
2512 tb_invalidate_phys_page_fast(ram_addr, 1);
2513 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2514 #endif
2515 }
2516 stb_p(phys_ram_base + ram_addr, val);
2517 #ifdef USE_KQEMU
2518 if (cpu_single_env->kqemu_enabled &&
2519 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2520 kqemu_modify_page(cpu_single_env, ram_addr);
2521 #endif
2522 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2523 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2524 /* we remove the notdirty callback only if the code has been
2525 flushed */
2526 if (dirty_flags == 0xff)
2527 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2528 }
2529
2530 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2531 uint32_t val)
2532 {
2533 int dirty_flags;
2534 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2535 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2536 #if !defined(CONFIG_USER_ONLY)
2537 tb_invalidate_phys_page_fast(ram_addr, 2);
2538 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2539 #endif
2540 }
2541 stw_p(phys_ram_base + ram_addr, val);
2542 #ifdef USE_KQEMU
2543 if (cpu_single_env->kqemu_enabled &&
2544 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2545 kqemu_modify_page(cpu_single_env, ram_addr);
2546 #endif
2547 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2548 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2549 /* we remove the notdirty callback only if the code has been
2550 flushed */
2551 if (dirty_flags == 0xff)
2552 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2553 }
2554
2555 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2556 uint32_t val)
2557 {
2558 int dirty_flags;
2559 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2560 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2561 #if !defined(CONFIG_USER_ONLY)
2562 tb_invalidate_phys_page_fast(ram_addr, 4);
2563 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2564 #endif
2565 }
2566 stl_p(phys_ram_base + ram_addr, val);
2567 #ifdef USE_KQEMU
2568 if (cpu_single_env->kqemu_enabled &&
2569 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2570 kqemu_modify_page(cpu_single_env, ram_addr);
2571 #endif
2572 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2573 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2574 /* we remove the notdirty callback only if the code has been
2575 flushed */
2576 if (dirty_flags == 0xff)
2577 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2578 }
2579
2580 static CPUReadMemoryFunc *error_mem_read[3] = {
2581 NULL, /* never used */
2582 NULL, /* never used */
2583 NULL, /* never used */
2584 };
2585
2586 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2587 notdirty_mem_writeb,
2588 notdirty_mem_writew,
2589 notdirty_mem_writel,
2590 };
2591
2592 /* Generate a debug exception if a watchpoint has been hit. */
2593 static void check_watchpoint(int offset, int len_mask, int flags)
2594 {
2595 CPUState *env = cpu_single_env;
2596 target_ulong pc, cs_base;
2597 TranslationBlock *tb;
2598 target_ulong vaddr;
2599 CPUWatchpoint *wp;
2600 int cpu_flags;
2601
2602 if (env->watchpoint_hit) {
2603 /* We re-entered the check after replacing the TB. Now raise
2604 * the debug interrupt so that is will trigger after the
2605 * current instruction. */
2606 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2607 return;
2608 }
2609 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2610 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2611 if ((vaddr == (wp->vaddr & len_mask) ||
2612 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2613 wp->flags |= BP_WATCHPOINT_HIT;
2614 if (!env->watchpoint_hit) {
2615 env->watchpoint_hit = wp;
2616 tb = tb_find_pc(env->mem_io_pc);
2617 if (!tb) {
2618 cpu_abort(env, "check_watchpoint: could not find TB for "
2619 "pc=%p", (void *)env->mem_io_pc);
2620 }
2621 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2622 tb_phys_invalidate(tb, -1);
2623 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2624 env->exception_index = EXCP_DEBUG;
2625 } else {
2626 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2627 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2628 }
2629 cpu_resume_from_signal(env, NULL);
2630 }
2631 } else {
2632 wp->flags &= ~BP_WATCHPOINT_HIT;
2633 }
2634 }
2635 }
2636
2637 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2638 so these check for a hit then pass through to the normal out-of-line
2639 phys routines. */
2640 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2641 {
2642 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2643 return ldub_phys(addr);
2644 }
2645
2646 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2647 {
2648 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2649 return lduw_phys(addr);
2650 }
2651
2652 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2653 {
2654 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2655 return ldl_phys(addr);
2656 }
2657
2658 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2659 uint32_t val)
2660 {
2661 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2662 stb_phys(addr, val);
2663 }
2664
2665 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2666 uint32_t val)
2667 {
2668 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2669 stw_phys(addr, val);
2670 }
2671
2672 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2673 uint32_t val)
2674 {
2675 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2676 stl_phys(addr, val);
2677 }
2678
2679 static CPUReadMemoryFunc *watch_mem_read[3] = {
2680 watch_mem_readb,
2681 watch_mem_readw,
2682 watch_mem_readl,
2683 };
2684
2685 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2686 watch_mem_writeb,
2687 watch_mem_writew,
2688 watch_mem_writel,
2689 };
2690
2691 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2692 unsigned int len)
2693 {
2694 uint32_t ret;
2695 unsigned int idx;
2696
2697 idx = SUBPAGE_IDX(addr);
2698 #if defined(DEBUG_SUBPAGE)
2699 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2700 mmio, len, addr, idx);
2701 #endif
2702 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2703 addr + mmio->region_offset[idx][0][len]);
2704
2705 return ret;
2706 }
2707
2708 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2709 uint32_t value, unsigned int len)
2710 {
2711 unsigned int idx;
2712
2713 idx = SUBPAGE_IDX(addr);
2714 #if defined(DEBUG_SUBPAGE)
2715 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2716 mmio, len, addr, idx, value);
2717 #endif
2718 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2719 addr + mmio->region_offset[idx][1][len],
2720 value);
2721 }
2722
2723 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2724 {
2725 #if defined(DEBUG_SUBPAGE)
2726 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2727 #endif
2728
2729 return subpage_readlen(opaque, addr, 0);
2730 }
2731
2732 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2733 uint32_t value)
2734 {
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2737 #endif
2738 subpage_writelen(opaque, addr, value, 0);
2739 }
2740
2741 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2742 {
2743 #if defined(DEBUG_SUBPAGE)
2744 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2745 #endif
2746
2747 return subpage_readlen(opaque, addr, 1);
2748 }
2749
2750 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2751 uint32_t value)
2752 {
2753 #if defined(DEBUG_SUBPAGE)
2754 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2755 #endif
2756 subpage_writelen(opaque, addr, value, 1);
2757 }
2758
2759 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2760 {
2761 #if defined(DEBUG_SUBPAGE)
2762 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2763 #endif
2764
2765 return subpage_readlen(opaque, addr, 2);
2766 }
2767
2768 static void subpage_writel (void *opaque,
2769 target_phys_addr_t addr, uint32_t value)
2770 {
2771 #if defined(DEBUG_SUBPAGE)
2772 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2773 #endif
2774 subpage_writelen(opaque, addr, value, 2);
2775 }
2776
2777 static CPUReadMemoryFunc *subpage_read[] = {
2778 &subpage_readb,
2779 &subpage_readw,
2780 &subpage_readl,
2781 };
2782
2783 static CPUWriteMemoryFunc *subpage_write[] = {
2784 &subpage_writeb,
2785 &subpage_writew,
2786 &subpage_writel,
2787 };
2788
2789 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2790 ram_addr_t memory, ram_addr_t region_offset)
2791 {
2792 int idx, eidx;
2793 unsigned int i;
2794
2795 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2796 return -1;
2797 idx = SUBPAGE_IDX(start);
2798 eidx = SUBPAGE_IDX(end);
2799 #if defined(DEBUG_SUBPAGE)
2800 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2801 mmio, start, end, idx, eidx, memory);
2802 #endif
2803 memory >>= IO_MEM_SHIFT;
2804 for (; idx <= eidx; idx++) {
2805 for (i = 0; i < 4; i++) {
2806 if (io_mem_read[memory][i]) {
2807 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2808 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2809 mmio->region_offset[idx][0][i] = region_offset;
2810 }
2811 if (io_mem_write[memory][i]) {
2812 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2813 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2814 mmio->region_offset[idx][1][i] = region_offset;
2815 }
2816 }
2817 }
2818
2819 return 0;
2820 }
2821
2822 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2823 ram_addr_t orig_memory, ram_addr_t region_offset)
2824 {
2825 subpage_t *mmio;
2826 int subpage_memory;
2827
2828 mmio = qemu_mallocz(sizeof(subpage_t));
2829
2830 mmio->base = base;
2831 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2832 #if defined(DEBUG_SUBPAGE)
2833 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2834 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2835 #endif
2836 *phys = subpage_memory | IO_MEM_SUBPAGE;
2837 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2838 region_offset);
2839
2840 return mmio;
2841 }
2842
2843 static int get_free_io_mem_idx(void)
2844 {
2845 int i;
2846
2847 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2848 if (!io_mem_used[i]) {
2849 io_mem_used[i] = 1;
2850 return i;
2851 }
2852
2853 return -1;
2854 }
2855
2856 static void io_mem_init(void)
2857 {
2858 int i;
2859
2860 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2861 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2862 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2863 for (i=0; i<5; i++)
2864 io_mem_used[i] = 1;
2865
2866 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2867 watch_mem_write, NULL);
2868 /* alloc dirty bits array */
2869 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2870 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2871 }
2872
2873 /* mem_read and mem_write are arrays of functions containing the
2874 function to access byte (index 0), word (index 1) and dword (index
2875 2). Functions can be omitted with a NULL function pointer. The
2876 registered functions may be modified dynamically later.
2877 If io_index is non zero, the corresponding io zone is
2878 modified. If it is zero, a new io zone is allocated. The return
2879 value can be used with cpu_register_physical_memory(). (-1) is
2880 returned if error. */
2881 int cpu_register_io_memory(int io_index,
2882 CPUReadMemoryFunc **mem_read,
2883 CPUWriteMemoryFunc **mem_write,
2884 void *opaque)
2885 {
2886 int i, subwidth = 0;
2887
2888 if (io_index <= 0) {
2889 io_index = get_free_io_mem_idx();
2890 if (io_index == -1)
2891 return io_index;
2892 } else {
2893 if (io_index >= IO_MEM_NB_ENTRIES)
2894 return -1;
2895 }
2896
2897 for(i = 0;i < 3; i++) {
2898 if (!mem_read[i] || !mem_write[i])
2899 subwidth = IO_MEM_SUBWIDTH;
2900 io_mem_read[io_index][i] = mem_read[i];
2901 io_mem_write[io_index][i] = mem_write[i];
2902 }
2903 io_mem_opaque[io_index] = opaque;
2904 return (io_index << IO_MEM_SHIFT) | subwidth;
2905 }
2906
2907 void cpu_unregister_io_memory(int io_table_address)
2908 {
2909 int i;
2910 int io_index = io_table_address >> IO_MEM_SHIFT;
2911
2912 for (i=0;i < 3; i++) {
2913 io_mem_read[io_index][i] = unassigned_mem_read[i];
2914 io_mem_write[io_index][i] = unassigned_mem_write[i];
2915 }
2916 io_mem_opaque[io_index] = NULL;
2917 io_mem_used[io_index] = 0;
2918 }
2919
2920 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2921 {
2922 return io_mem_write[io_index >> IO_MEM_SHIFT];
2923 }
2924
2925 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2926 {
2927 return io_mem_read[io_index >> IO_MEM_SHIFT];
2928 }
2929
2930 #endif /* !defined(CONFIG_USER_ONLY) */
2931
2932 /* physical memory access (slow version, mainly for debug) */
2933 #if defined(CONFIG_USER_ONLY)
2934 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2935 int len, int is_write)
2936 {
2937 int l, flags;
2938 target_ulong page;
2939 void * p;
2940
2941 while (len > 0) {
2942 page = addr & TARGET_PAGE_MASK;
2943 l = (page + TARGET_PAGE_SIZE) - addr;
2944 if (l > len)
2945 l = len;
2946 flags = page_get_flags(page);
2947 if (!(flags & PAGE_VALID))
2948 return;
2949 if (is_write) {
2950 if (!(flags & PAGE_WRITE))
2951 return;
2952 /* XXX: this code should not depend on lock_user */
2953 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2954 /* FIXME - should this return an error rather than just fail? */
2955 return;
2956 memcpy(p, buf, l);
2957 unlock_user(p, addr, l);
2958 } else {
2959 if (!(flags & PAGE_READ))
2960 return;
2961 /* XXX: this code should not depend on lock_user */
2962 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2963 /* FIXME - should this return an error rather than just fail? */
2964 return;
2965 memcpy(buf, p, l);
2966 unlock_user(p, addr, 0);
2967 }
2968 len -= l;
2969 buf += l;
2970 addr += l;
2971 }
2972 }
2973
2974 #else
2975 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2976 int len, int is_write)
2977 {
2978 int l, io_index;
2979 uint8_t *ptr;
2980 uint32_t val;
2981 target_phys_addr_t page;
2982 unsigned long pd;
2983 PhysPageDesc *p;
2984
2985 while (len > 0) {
2986 page = addr & TARGET_PAGE_MASK;
2987 l = (page + TARGET_PAGE_SIZE) - addr;
2988 if (l > len)
2989 l = len;
2990 p = phys_page_find(page >> TARGET_PAGE_BITS);
2991 if (!p) {
2992 pd = IO_MEM_UNASSIGNED;
2993 } else {
2994 pd = p->phys_offset;
2995 }
2996
2997 if (is_write) {
2998 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2999 target_phys_addr_t addr1 = addr;
3000 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3001 if (p)
3002 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3003 /* XXX: could force cpu_single_env to NULL to avoid
3004 potential bugs */
3005 if (l >= 4 && ((addr1 & 3) == 0)) {
3006 /* 32 bit write access */
3007 val = ldl_p(buf);
3008 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3009 l = 4;
3010 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3011 /* 16 bit write access */
3012 val = lduw_p(buf);
3013 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3014 l = 2;
3015 } else {
3016 /* 8 bit write access */
3017 val = ldub_p(buf);
3018 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3019 l = 1;
3020 }
3021 } else {
3022 unsigned long addr1;
3023 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3024 /* RAM case */
3025 ptr = phys_ram_base + addr1;
3026 memcpy(ptr, buf, l);
3027 if (!cpu_physical_memory_is_dirty(addr1)) {
3028 /* invalidate code */
3029 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3030 /* set dirty bit */
3031 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3032 (0xff & ~CODE_DIRTY_FLAG);
3033 }
3034 }
3035 } else {
3036 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3037 !(pd & IO_MEM_ROMD)) {
3038 target_phys_addr_t addr1 = addr;
3039 /* I/O case */
3040 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3041 if (p)
3042 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3043 if (l >= 4 && ((addr1 & 3) == 0)) {
3044 /* 32 bit read access */
3045 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3046 stl_p(buf, val);
3047 l = 4;
3048 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3049 /* 16 bit read access */
3050 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3051 stw_p(buf, val);
3052 l = 2;
3053 } else {
3054 /* 8 bit read access */
3055 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3056 stb_p(buf, val);
3057 l = 1;
3058 }
3059 } else {
3060 /* RAM case */
3061 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3062 (addr & ~TARGET_PAGE_MASK);
3063 memcpy(buf, ptr, l);
3064 }
3065 }
3066 len -= l;
3067 buf += l;
3068 addr += l;
3069 }
3070 }
3071
3072 /* used for ROM loading : can write in RAM and ROM */
3073 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3074 const uint8_t *buf, int len)
3075 {
3076 int l;
3077 uint8_t *ptr;
3078 target_phys_addr_t page;
3079 unsigned long pd;
3080 PhysPageDesc *p;
3081
3082 while (len > 0) {
3083 page = addr & TARGET_PAGE_MASK;
3084 l = (page + TARGET_PAGE_SIZE) - addr;
3085 if (l > len)
3086 l = len;
3087 p = phys_page_find(page >> TARGET_PAGE_BITS);
3088 if (!p) {
3089 pd = IO_MEM_UNASSIGNED;
3090 } else {
3091 pd = p->phys_offset;
3092 }
3093
3094 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3095 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3096 !(pd & IO_MEM_ROMD)) {
3097 /* do nothing */
3098 } else {
3099 unsigned long addr1;
3100 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3101 /* ROM/RAM case */
3102 ptr = phys_ram_base + addr1;
3103 memcpy(ptr, buf, l);
3104 }
3105 len -= l;
3106 buf += l;
3107 addr += l;
3108 }
3109 }
3110
3111 typedef struct {
3112 void *buffer;
3113 target_phys_addr_t addr;
3114 target_phys_addr_t len;
3115 } BounceBuffer;
3116
3117 static BounceBuffer bounce;
3118
3119 typedef struct MapClient {
3120 void *opaque;
3121 void (*callback)(void *opaque);
3122 LIST_ENTRY(MapClient) link;
3123 } MapClient;
3124
3125 static LIST_HEAD(map_client_list, MapClient) map_client_list
3126 = LIST_HEAD_INITIALIZER(map_client_list);
3127
3128 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3129 {
3130 MapClient *client = qemu_malloc(sizeof(*client));
3131
3132 client->opaque = opaque;
3133 client->callback = callback;
3134 LIST_INSERT_HEAD(&map_client_list, client, link);
3135 return client;
3136 }
3137
3138 void cpu_unregister_map_client(void *_client)
3139 {
3140 MapClient *client = (MapClient *)_client;
3141
3142 LIST_REMOVE(client, link);
3143 }
3144
3145 static void cpu_notify_map_clients(void)
3146 {
3147 MapClient *client;
3148
3149 while (!LIST_EMPTY(&map_client_list)) {
3150 client = LIST_FIRST(&map_client_list);
3151 client->callback(client->opaque);
3152 LIST_REMOVE(client, link);
3153 }
3154 }
3155
3156 /* Map a physical memory region into a host virtual address.
3157 * May map a subset of the requested range, given by and returned in *plen.
3158 * May return NULL if resources needed to perform the mapping are exhausted.
3159 * Use only for reads OR writes - not for read-modify-write operations.
3160 * Use cpu_register_map_client() to know when retrying the map operation is
3161 * likely to succeed.
3162 */
3163 void *cpu_physical_memory_map(target_phys_addr_t addr,
3164 target_phys_addr_t *plen,
3165 int is_write)
3166 {
3167 target_phys_addr_t len = *plen;
3168 target_phys_addr_t done = 0;
3169 int l;
3170 uint8_t *ret = NULL;
3171 uint8_t *ptr;
3172 target_phys_addr_t page;
3173 unsigned long pd;
3174 PhysPageDesc *p;
3175 unsigned long addr1;
3176
3177 while (len > 0) {
3178 page = addr & TARGET_PAGE_MASK;
3179 l = (page + TARGET_PAGE_SIZE) - addr;
3180 if (l > len)
3181 l = len;
3182 p = phys_page_find(page >> TARGET_PAGE_BITS);
3183 if (!p) {
3184 pd = IO_MEM_UNASSIGNED;
3185 } else {
3186 pd = p->phys_offset;
3187 }
3188
3189 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3190 if (done || bounce.buffer) {
3191 break;
3192 }
3193 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3194 bounce.addr = addr;
3195 bounce.len = l;
3196 if (!is_write) {
3197 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3198 }
3199 ptr = bounce.buffer;
3200 } else {
3201 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3202 ptr = phys_ram_base + addr1;
3203 }
3204 if (!done) {
3205 ret = ptr;
3206 } else if (ret + done != ptr) {
3207 break;
3208 }
3209
3210 len -= l;
3211 addr += l;
3212 done += l;
3213 }
3214 *plen = done;
3215 return ret;
3216 }
3217
3218 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3219 * Will also mark the memory as dirty if is_write == 1. access_len gives
3220 * the amount of memory that was actually read or written by the caller.
3221 */
3222 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3223 int is_write, target_phys_addr_t access_len)
3224 {
3225 if (buffer != bounce.buffer) {
3226 if (is_write) {
3227 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3228 while (access_len) {
3229 unsigned l;
3230 l = TARGET_PAGE_SIZE;
3231 if (l > access_len)
3232 l = access_len;
3233 if (!cpu_physical_memory_is_dirty(addr1)) {
3234 /* invalidate code */
3235 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3236 /* set dirty bit */
3237 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3238 (0xff & ~CODE_DIRTY_FLAG);
3239 }
3240 addr1 += l;
3241 access_len -= l;
3242 }
3243 }
3244 return;
3245 }
3246 if (is_write) {
3247 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3248 }
3249 qemu_free(bounce.buffer);
3250 bounce.buffer = NULL;
3251 cpu_notify_map_clients();
3252 }
3253
3254 /* warning: addr must be aligned */
3255 uint32_t ldl_phys(target_phys_addr_t addr)
3256 {
3257 int io_index;
3258 uint8_t *ptr;
3259 uint32_t val;
3260 unsigned long pd;
3261 PhysPageDesc *p;
3262
3263 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3264 if (!p) {
3265 pd = IO_MEM_UNASSIGNED;
3266 } else {
3267 pd = p->phys_offset;
3268 }
3269
3270 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3271 !(pd & IO_MEM_ROMD)) {
3272 /* I/O case */
3273 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3274 if (p)
3275 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3276 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3277 } else {
3278 /* RAM case */
3279 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3280 (addr & ~TARGET_PAGE_MASK);
3281 val = ldl_p(ptr);
3282 }
3283 return val;
3284 }
3285
3286 /* warning: addr must be aligned */
3287 uint64_t ldq_phys(target_phys_addr_t addr)
3288 {
3289 int io_index;
3290 uint8_t *ptr;
3291 uint64_t val;
3292 unsigned long pd;
3293 PhysPageDesc *p;
3294
3295 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3296 if (!p) {
3297 pd = IO_MEM_UNASSIGNED;
3298 } else {
3299 pd = p->phys_offset;
3300 }
3301
3302 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3303 !(pd & IO_MEM_ROMD)) {
3304 /* I/O case */
3305 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3306 if (p)
3307 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3308 #ifdef TARGET_WORDS_BIGENDIAN
3309 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3310 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3311 #else
3312 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3313 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3314 #endif
3315 } else {
3316 /* RAM case */
3317 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3318 (addr & ~TARGET_PAGE_MASK);
3319 val = ldq_p(ptr);
3320 }
3321 return val;
3322 }
3323
3324 /* XXX: optimize */
3325 uint32_t ldub_phys(target_phys_addr_t addr)
3326 {
3327 uint8_t val;
3328 cpu_physical_memory_read(addr, &val, 1);
3329 return val;
3330 }
3331
3332 /* XXX: optimize */
3333 uint32_t lduw_phys(target_phys_addr_t addr)
3334 {
3335 uint16_t val;
3336 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3337 return tswap16(val);
3338 }
3339
3340 /* warning: addr must be aligned. The ram page is not masked as dirty
3341 and the code inside is not invalidated. It is useful if the dirty
3342 bits are used to track modified PTEs */
3343 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3344 {
3345 int io_index;
3346 uint8_t *ptr;
3347 unsigned long pd;
3348 PhysPageDesc *p;
3349
3350 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3351 if (!p) {
3352 pd = IO_MEM_UNASSIGNED;
3353 } else {
3354 pd = p->phys_offset;
3355 }
3356
3357 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3358 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3359 if (p)
3360 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3361 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3362 } else {
3363 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3364 ptr = phys_ram_base + addr1;
3365 stl_p(ptr, val);
3366
3367 if (unlikely(in_migration)) {
3368 if (!cpu_physical_memory_is_dirty(addr1)) {
3369 /* invalidate code */
3370 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3371 /* set dirty bit */
3372 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3373 (0xff & ~CODE_DIRTY_FLAG);
3374 }
3375 }
3376 }
3377 }
3378
3379 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3380 {
3381 int io_index;
3382 uint8_t *ptr;
3383 unsigned long pd;
3384 PhysPageDesc *p;
3385
3386 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3387 if (!p) {
3388 pd = IO_MEM_UNASSIGNED;
3389 } else {
3390 pd = p->phys_offset;
3391 }
3392
3393 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3394 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3395 if (p)
3396 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3397 #ifdef TARGET_WORDS_BIGENDIAN
3398 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3399 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3400 #else
3401 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3402 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3403 #endif
3404 } else {
3405 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3406 (addr & ~TARGET_PAGE_MASK);
3407 stq_p(ptr, val);
3408 }
3409 }
3410
3411 /* warning: addr must be aligned */
3412 void stl_phys(target_phys_addr_t addr, uint32_t val)
3413 {
3414 int io_index;
3415 uint8_t *ptr;
3416 unsigned long pd;
3417 PhysPageDesc *p;
3418
3419 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3420 if (!p) {
3421 pd = IO_MEM_UNASSIGNED;
3422 } else {
3423 pd = p->phys_offset;
3424 }
3425
3426 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3427 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3428 if (p)
3429 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3430 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3431 } else {
3432 unsigned long addr1;
3433 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3434 /* RAM case */
3435 ptr = phys_ram_base + addr1;
3436 stl_p(ptr, val);
3437 if (!cpu_physical_memory_is_dirty(addr1)) {
3438 /* invalidate code */
3439 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3440 /* set dirty bit */
3441 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3442 (0xff & ~CODE_DIRTY_FLAG);
3443 }
3444 }
3445 }
3446
3447 /* XXX: optimize */
3448 void stb_phys(target_phys_addr_t addr, uint32_t val)
3449 {
3450 uint8_t v = val;
3451 cpu_physical_memory_write(addr, &v, 1);
3452 }
3453
3454 /* XXX: optimize */
3455 void stw_phys(target_phys_addr_t addr, uint32_t val)
3456 {
3457 uint16_t v = tswap16(val);
3458 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3459 }
3460
3461 /* XXX: optimize */
3462 void stq_phys(target_phys_addr_t addr, uint64_t val)
3463 {
3464 val = tswap64(val);
3465 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3466 }
3467
3468 #endif
3469
3470 /* virtual memory access for debug (includes writing to ROM) */
3471 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3472 uint8_t *buf, int len, int is_write)
3473 {
3474 int l;
3475 target_phys_addr_t phys_addr;
3476 target_ulong page;
3477
3478 while (len > 0) {
3479 page = addr & TARGET_PAGE_MASK;
3480 phys_addr = cpu_get_phys_page_debug(env, page);
3481 /* if no physical page mapped, return an error */
3482 if (phys_addr == -1)
3483 return -1;
3484 l = (page + TARGET_PAGE_SIZE) - addr;
3485 if (l > len)
3486 l = len;
3487 phys_addr += (addr & ~TARGET_PAGE_MASK);
3488 #if !defined(CONFIG_USER_ONLY)
3489 if (is_write)
3490 cpu_physical_memory_write_rom(phys_addr, buf, l);
3491 else
3492 #endif
3493 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3494 len -= l;
3495 buf += l;
3496 addr += l;
3497 }
3498 return 0;
3499 }
3500
3501 /* in deterministic execution mode, instructions doing device I/Os
3502 must be at the end of the TB */
3503 void cpu_io_recompile(CPUState *env, void *retaddr)
3504 {
3505 TranslationBlock *tb;
3506 uint32_t n, cflags;
3507 target_ulong pc, cs_base;
3508 uint64_t flags;
3509
3510 tb = tb_find_pc((unsigned long)retaddr);
3511 if (!tb) {
3512 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3513 retaddr);
3514 }
3515 n = env->icount_decr.u16.low + tb->icount;
3516 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3517 /* Calculate how many instructions had been executed before the fault
3518 occurred. */
3519 n = n - env->icount_decr.u16.low;
3520 /* Generate a new TB ending on the I/O insn. */
3521 n++;
3522 /* On MIPS and SH, delay slot instructions can only be restarted if
3523 they were already the first instruction in the TB. If this is not
3524 the first instruction in a TB then re-execute the preceding
3525 branch. */
3526 #if defined(TARGET_MIPS)
3527 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3528 env->active_tc.PC -= 4;
3529 env->icount_decr.u16.low++;
3530 env->hflags &= ~MIPS_HFLAG_BMASK;
3531 }
3532 #elif defined(TARGET_SH4)
3533 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3534 && n > 1) {
3535 env->pc -= 2;
3536 env->icount_decr.u16.low++;
3537 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3538 }
3539 #endif
3540 /* This should never happen. */
3541 if (n > CF_COUNT_MASK)
3542 cpu_abort(env, "TB too big during recompile");
3543
3544 cflags = n | CF_LAST_IO;
3545 pc = tb->pc;
3546 cs_base = tb->cs_base;
3547 flags = tb->flags;
3548 tb_phys_invalidate(tb, -1);
3549 /* FIXME: In theory this could raise an exception. In practice
3550 we have already translated the block once so it's probably ok. */
3551 tb_gen_code(env, pc, cs_base, flags, cflags);
3552 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3553 the first in the TB) then we end up generating a whole new TB and
3554 repeating the fault, which is horribly inefficient.
3555 Better would be to execute just this insn uncached, or generate a
3556 second new TB. */
3557 cpu_resume_from_signal(env, NULL);
3558 }
3559
3560 void dump_exec_info(FILE *f,
3561 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3562 {
3563 int i, target_code_size, max_target_code_size;
3564 int direct_jmp_count, direct_jmp2_count, cross_page;
3565 TranslationBlock *tb;
3566
3567 target_code_size = 0;
3568 max_target_code_size = 0;
3569 cross_page = 0;
3570 direct_jmp_count = 0;
3571 direct_jmp2_count = 0;
3572 for(i = 0; i < nb_tbs; i++) {
3573 tb = &tbs[i];
3574 target_code_size += tb->size;
3575 if (tb->size > max_target_code_size)
3576 max_target_code_size = tb->size;
3577 if (tb->page_addr[1] != -1)
3578 cross_page++;
3579 if (tb->tb_next_offset[0] != 0xffff) {
3580 direct_jmp_count++;
3581 if (tb->tb_next_offset[1] != 0xffff) {
3582 direct_jmp2_count++;
3583 }
3584 }
3585 }
3586 /* XXX: avoid using doubles ? */
3587 cpu_fprintf(f, "Translation buffer state:\n");
3588 cpu_fprintf(f, "gen code size %ld/%ld\n",
3589 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3590 cpu_fprintf(f, "TB count %d/%d\n",
3591 nb_tbs, code_gen_max_blocks);
3592 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3593 nb_tbs ? target_code_size / nb_tbs : 0,
3594 max_target_code_size);
3595 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3596 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3597 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3598 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3599 cross_page,
3600 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3601 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3602 direct_jmp_count,
3603 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3604 direct_jmp2_count,
3605 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3606 cpu_fprintf(f, "\nStatistics:\n");
3607 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3608 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3609 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3610 tcg_dump_info(f, cpu_fprintf);
3611 }
3612
3613 #if !defined(CONFIG_USER_ONLY)
3614
3615 #define MMUSUFFIX _cmmu
3616 #define GETPC() NULL
3617 #define env cpu_single_env
3618 #define SOFTMMU_CODE_ACCESS
3619
3620 #define SHIFT 0
3621 #include "softmmu_template.h"
3622
3623 #define SHIFT 1
3624 #include "softmmu_template.h"
3625
3626 #define SHIFT 2
3627 #include "softmmu_template.h"
3628
3629 #define SHIFT 3
3630 #include "softmmu_template.h"
3631
3632 #undef env
3633
3634 #endif