]> git.proxmox.com Git - qemu.git/blob - exec.c
Introduce BP_CPU as a breakpoint type (Jan Kiszka)
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
46
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
51
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
55
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
58
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
61 #undef DEBUG_TB_CHECK
62 #endif
63
64 #define SMC_BITMAP_USE_THRESHOLD 10
65
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
68
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
86
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101 #else
102 #define code_gen_section \
103 __attribute__((aligned (32)))
104 #endif
105
106 uint8_t code_gen_prologue[1024] code_gen_section;
107 static uint8_t *code_gen_buffer;
108 static unsigned long code_gen_buffer_size;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size;
111 uint8_t *code_gen_ptr;
112
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size;
115 int phys_ram_fd;
116 uint8_t *phys_ram_base;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 static ram_addr_t phys_ram_alloc_offset = 0;
120 #endif
121
122 CPUState *first_cpu;
123 /* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125 CPUState *cpu_single_env;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129 int use_icount = 0;
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132 int64_t qemu_icount;
133
134 typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141 #if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143 #endif
144 } PageDesc;
145
146 typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149 } PhysPageDesc;
150
151 #define L2_BITS 10
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
156 */
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158 #else
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160 #endif
161
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
164
165 unsigned long qemu_real_host_page_size;
166 unsigned long qemu_host_page_bits;
167 unsigned long qemu_host_page_size;
168 unsigned long qemu_host_page_mask;
169
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc *l1_map[L1_SIZE];
172 static PhysPageDesc **l1_phys_map;
173
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
176
177 /* io memory support */
178 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181 static int io_mem_nb;
182 static int io_mem_watch;
183 #endif
184
185 /* log support */
186 static const char *logfilename = "/tmp/qemu.log";
187 FILE *logfile;
188 int loglevel;
189 static int log_append = 0;
190
191 /* statistics */
192 static int tlb_flush_count;
193 static int tb_flush_count;
194 static int tb_phys_invalidate_count;
195
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t {
198 target_phys_addr_t base;
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
202 } subpage_t;
203
204 #ifdef _WIN32
205 static void map_exec(void *addr, long size)
206 {
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211 }
212 #else
213 static void map_exec(void *addr, long size)
214 {
215 unsigned long start, end, page_size;
216
217 page_size = getpagesize();
218 start = (unsigned long)addr;
219 start &= ~(page_size - 1);
220
221 end = (unsigned long)addr + size;
222 end += page_size - 1;
223 end &= ~(page_size - 1);
224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227 }
228 #endif
229
230 static void page_init(void)
231 {
232 /* NOTE: we can always suppose that qemu_host_page_size >=
233 TARGET_PAGE_SIZE */
234 #ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241 #else
242 qemu_real_host_page_size = getpagesize();
243 #endif
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
248 qemu_host_page_bits = 0;
249 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250 qemu_host_page_bits++;
251 qemu_host_page_mask = ~(qemu_host_page_size - 1);
252 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254
255 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 {
257 long long startaddr, endaddr;
258 FILE *f;
259 int n;
260
261 mmap_lock();
262 last_brk = (unsigned long)sbrk(0);
263 f = fopen("/proc/self/maps", "r");
264 if (f) {
265 do {
266 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267 if (n == 2) {
268 startaddr = MIN(startaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 endaddr = MIN(endaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 page_set_flags(startaddr & TARGET_PAGE_MASK,
273 TARGET_PAGE_ALIGN(endaddr),
274 PAGE_RESERVED);
275 }
276 } while (!feof(f));
277 fclose(f);
278 }
279 mmap_unlock();
280 }
281 #endif
282 }
283
284 static inline PageDesc **page_l1_map(target_ulong index)
285 {
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290 return NULL;
291 #endif
292 return &l1_map[index >> L2_BITS];
293 }
294
295 static inline PageDesc *page_find_alloc(target_ulong index)
296 {
297 PageDesc **lp, *p;
298 lp = page_l1_map(index);
299 if (!lp)
300 return NULL;
301
302 p = *lp;
303 if (!p) {
304 /* allocate if not found */
305 #if defined(CONFIG_USER_ONLY)
306 unsigned long addr;
307 size_t len = sizeof(PageDesc) * L2_SIZE;
308 /* Don't use qemu_malloc because it may recurse. */
309 p = mmap(0, len, PROT_READ | PROT_WRITE,
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311 *lp = p;
312 addr = h2g(p);
313 if (addr == (target_ulong)addr) {
314 page_set_flags(addr & TARGET_PAGE_MASK,
315 TARGET_PAGE_ALIGN(addr + len),
316 PAGE_RESERVED);
317 }
318 #else
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320 *lp = p;
321 #endif
322 }
323 return p + (index & (L2_SIZE - 1));
324 }
325
326 static inline PageDesc *page_find(target_ulong index)
327 {
328 PageDesc **lp, *p;
329 lp = page_l1_map(index);
330 if (!lp)
331 return NULL;
332
333 p = *lp;
334 if (!p)
335 return 0;
336 return p + (index & (L2_SIZE - 1));
337 }
338
339 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340 {
341 void **lp, **p;
342 PhysPageDesc *pd;
343
344 p = (void **)l1_phys_map;
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
346
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349 #endif
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351 p = *lp;
352 if (!p) {
353 /* allocate if not found */
354 if (!alloc)
355 return NULL;
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357 memset(p, 0, sizeof(void *) * L1_SIZE);
358 *lp = p;
359 }
360 #endif
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362 pd = *lp;
363 if (!pd) {
364 int i;
365 /* allocate if not found */
366 if (!alloc)
367 return NULL;
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369 *lp = pd;
370 for (i = 0; i < L2_SIZE; i++)
371 pd[i].phys_offset = IO_MEM_UNASSIGNED;
372 }
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374 }
375
376 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377 {
378 return phys_page_find_alloc(index, 0);
379 }
380
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr);
383 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384 target_ulong vaddr);
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
387 #endif
388
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
395 #endif
396
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399 #endif
400
401 static void code_gen_alloc(unsigned long tb_size)
402 {
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407 #else
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413 #else
414 /* XXX: needs ajustments */
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416 #endif
417 }
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
423 {
424 int flags;
425 void *start = NULL;
426
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428 #if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
439 #endif
440 code_gen_buffer = mmap(start, code_gen_buffer_size,
441 PROT_WRITE | PROT_READ | PROT_EXEC,
442 flags, -1, 0);
443 if (code_gen_buffer == MAP_FAILED) {
444 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445 exit(1);
446 }
447 }
448 #elif defined(__FreeBSD__)
449 {
450 int flags;
451 void *addr = NULL;
452 flags = MAP_PRIVATE | MAP_ANONYMOUS;
453 #if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
456 flags |= MAP_FIXED;
457 addr = (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size > (800 * 1024 * 1024))
460 code_gen_buffer_size = (800 * 1024 * 1024);
461 #endif
462 code_gen_buffer = mmap(addr, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
470 #else
471 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472 if (!code_gen_buffer) {
473 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474 exit(1);
475 }
476 map_exec(code_gen_buffer, code_gen_buffer_size);
477 #endif
478 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
479 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480 code_gen_buffer_max_size = code_gen_buffer_size -
481 code_gen_max_block_size();
482 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484 }
485
486 /* Must be called before using the QEMU cpus. 'tb_size' is the size
487 (in bytes) allocated to the translation buffer. Zero means default
488 size. */
489 void cpu_exec_init_all(unsigned long tb_size)
490 {
491 cpu_gen_init();
492 code_gen_alloc(tb_size);
493 code_gen_ptr = code_gen_buffer;
494 page_init();
495 #if !defined(CONFIG_USER_ONLY)
496 io_mem_init();
497 #endif
498 }
499
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501
502 #define CPU_COMMON_SAVE_VERSION 1
503
504 static void cpu_common_save(QEMUFile *f, void *opaque)
505 {
506 CPUState *env = opaque;
507
508 qemu_put_be32s(f, &env->halted);
509 qemu_put_be32s(f, &env->interrupt_request);
510 }
511
512 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513 {
514 CPUState *env = opaque;
515
516 if (version_id != CPU_COMMON_SAVE_VERSION)
517 return -EINVAL;
518
519 qemu_get_be32s(f, &env->halted);
520 qemu_get_be32s(f, &env->interrupt_request);
521 tlb_flush(env, 1);
522
523 return 0;
524 }
525 #endif
526
527 void cpu_exec_init(CPUState *env)
528 {
529 CPUState **penv;
530 int cpu_index;
531
532 env->next_cpu = NULL;
533 penv = &first_cpu;
534 cpu_index = 0;
535 while (*penv != NULL) {
536 penv = (CPUState **)&(*penv)->next_cpu;
537 cpu_index++;
538 }
539 env->cpu_index = cpu_index;
540 *penv = env;
541 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
542 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
543 cpu_common_save, cpu_common_load, env);
544 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
545 cpu_save, cpu_load, env);
546 #endif
547 }
548
549 static inline void invalidate_page_bitmap(PageDesc *p)
550 {
551 if (p->code_bitmap) {
552 qemu_free(p->code_bitmap);
553 p->code_bitmap = NULL;
554 }
555 p->code_write_count = 0;
556 }
557
558 /* set to NULL all the 'first_tb' fields in all PageDescs */
559 static void page_flush_tb(void)
560 {
561 int i, j;
562 PageDesc *p;
563
564 for(i = 0; i < L1_SIZE; i++) {
565 p = l1_map[i];
566 if (p) {
567 for(j = 0; j < L2_SIZE; j++) {
568 p->first_tb = NULL;
569 invalidate_page_bitmap(p);
570 p++;
571 }
572 }
573 }
574 }
575
576 /* flush all the translation blocks */
577 /* XXX: tb_flush is currently not thread safe */
578 void tb_flush(CPUState *env1)
579 {
580 CPUState *env;
581 #if defined(DEBUG_FLUSH)
582 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
583 (unsigned long)(code_gen_ptr - code_gen_buffer),
584 nb_tbs, nb_tbs > 0 ?
585 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
586 #endif
587 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
588 cpu_abort(env1, "Internal error: code buffer overflow\n");
589
590 nb_tbs = 0;
591
592 for(env = first_cpu; env != NULL; env = env->next_cpu) {
593 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
594 }
595
596 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
597 page_flush_tb();
598
599 code_gen_ptr = code_gen_buffer;
600 /* XXX: flush processor icache at this point if cache flush is
601 expensive */
602 tb_flush_count++;
603 }
604
605 #ifdef DEBUG_TB_CHECK
606
607 static void tb_invalidate_check(target_ulong address)
608 {
609 TranslationBlock *tb;
610 int i;
611 address &= TARGET_PAGE_MASK;
612 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
613 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
614 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
615 address >= tb->pc + tb->size)) {
616 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
617 address, (long)tb->pc, tb->size);
618 }
619 }
620 }
621 }
622
623 /* verify that all the pages have correct rights for code */
624 static void tb_page_check(void)
625 {
626 TranslationBlock *tb;
627 int i, flags1, flags2;
628
629 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
630 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
631 flags1 = page_get_flags(tb->pc);
632 flags2 = page_get_flags(tb->pc + tb->size - 1);
633 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
634 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
635 (long)tb->pc, tb->size, flags1, flags2);
636 }
637 }
638 }
639 }
640
641 static void tb_jmp_check(TranslationBlock *tb)
642 {
643 TranslationBlock *tb1;
644 unsigned int n1;
645
646 /* suppress any remaining jumps to this TB */
647 tb1 = tb->jmp_first;
648 for(;;) {
649 n1 = (long)tb1 & 3;
650 tb1 = (TranslationBlock *)((long)tb1 & ~3);
651 if (n1 == 2)
652 break;
653 tb1 = tb1->jmp_next[n1];
654 }
655 /* check end of list */
656 if (tb1 != tb) {
657 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
658 }
659 }
660
661 #endif
662
663 /* invalidate one TB */
664 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
665 int next_offset)
666 {
667 TranslationBlock *tb1;
668 for(;;) {
669 tb1 = *ptb;
670 if (tb1 == tb) {
671 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
672 break;
673 }
674 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
675 }
676 }
677
678 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
679 {
680 TranslationBlock *tb1;
681 unsigned int n1;
682
683 for(;;) {
684 tb1 = *ptb;
685 n1 = (long)tb1 & 3;
686 tb1 = (TranslationBlock *)((long)tb1 & ~3);
687 if (tb1 == tb) {
688 *ptb = tb1->page_next[n1];
689 break;
690 }
691 ptb = &tb1->page_next[n1];
692 }
693 }
694
695 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
696 {
697 TranslationBlock *tb1, **ptb;
698 unsigned int n1;
699
700 ptb = &tb->jmp_next[n];
701 tb1 = *ptb;
702 if (tb1) {
703 /* find tb(n) in circular list */
704 for(;;) {
705 tb1 = *ptb;
706 n1 = (long)tb1 & 3;
707 tb1 = (TranslationBlock *)((long)tb1 & ~3);
708 if (n1 == n && tb1 == tb)
709 break;
710 if (n1 == 2) {
711 ptb = &tb1->jmp_first;
712 } else {
713 ptb = &tb1->jmp_next[n1];
714 }
715 }
716 /* now we can suppress tb(n) from the list */
717 *ptb = tb->jmp_next[n];
718
719 tb->jmp_next[n] = NULL;
720 }
721 }
722
723 /* reset the jump entry 'n' of a TB so that it is not chained to
724 another TB */
725 static inline void tb_reset_jump(TranslationBlock *tb, int n)
726 {
727 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
728 }
729
730 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
731 {
732 CPUState *env;
733 PageDesc *p;
734 unsigned int h, n1;
735 target_phys_addr_t phys_pc;
736 TranslationBlock *tb1, *tb2;
737
738 /* remove the TB from the hash list */
739 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
740 h = tb_phys_hash_func(phys_pc);
741 tb_remove(&tb_phys_hash[h], tb,
742 offsetof(TranslationBlock, phys_hash_next));
743
744 /* remove the TB from the page list */
745 if (tb->page_addr[0] != page_addr) {
746 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
747 tb_page_remove(&p->first_tb, tb);
748 invalidate_page_bitmap(p);
749 }
750 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
751 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
752 tb_page_remove(&p->first_tb, tb);
753 invalidate_page_bitmap(p);
754 }
755
756 tb_invalidated_flag = 1;
757
758 /* remove the TB from the hash list */
759 h = tb_jmp_cache_hash_func(tb->pc);
760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 if (env->tb_jmp_cache[h] == tb)
762 env->tb_jmp_cache[h] = NULL;
763 }
764
765 /* suppress this TB from the two jump lists */
766 tb_jmp_remove(tb, 0);
767 tb_jmp_remove(tb, 1);
768
769 /* suppress any remaining jumps to this TB */
770 tb1 = tb->jmp_first;
771 for(;;) {
772 n1 = (long)tb1 & 3;
773 if (n1 == 2)
774 break;
775 tb1 = (TranslationBlock *)((long)tb1 & ~3);
776 tb2 = tb1->jmp_next[n1];
777 tb_reset_jump(tb1, n1);
778 tb1->jmp_next[n1] = NULL;
779 tb1 = tb2;
780 }
781 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
782
783 tb_phys_invalidate_count++;
784 }
785
786 static inline void set_bits(uint8_t *tab, int start, int len)
787 {
788 int end, mask, end1;
789
790 end = start + len;
791 tab += start >> 3;
792 mask = 0xff << (start & 7);
793 if ((start & ~7) == (end & ~7)) {
794 if (start < end) {
795 mask &= ~(0xff << (end & 7));
796 *tab |= mask;
797 }
798 } else {
799 *tab++ |= mask;
800 start = (start + 8) & ~7;
801 end1 = end & ~7;
802 while (start < end1) {
803 *tab++ = 0xff;
804 start += 8;
805 }
806 if (start < end) {
807 mask = ~(0xff << (end & 7));
808 *tab |= mask;
809 }
810 }
811 }
812
813 static void build_page_bitmap(PageDesc *p)
814 {
815 int n, tb_start, tb_end;
816 TranslationBlock *tb;
817
818 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
819 if (!p->code_bitmap)
820 return;
821
822 tb = p->first_tb;
823 while (tb != NULL) {
824 n = (long)tb & 3;
825 tb = (TranslationBlock *)((long)tb & ~3);
826 /* NOTE: this is subtle as a TB may span two physical pages */
827 if (n == 0) {
828 /* NOTE: tb_end may be after the end of the page, but
829 it is not a problem */
830 tb_start = tb->pc & ~TARGET_PAGE_MASK;
831 tb_end = tb_start + tb->size;
832 if (tb_end > TARGET_PAGE_SIZE)
833 tb_end = TARGET_PAGE_SIZE;
834 } else {
835 tb_start = 0;
836 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
837 }
838 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
839 tb = tb->page_next[n];
840 }
841 }
842
843 TranslationBlock *tb_gen_code(CPUState *env,
844 target_ulong pc, target_ulong cs_base,
845 int flags, int cflags)
846 {
847 TranslationBlock *tb;
848 uint8_t *tc_ptr;
849 target_ulong phys_pc, phys_page2, virt_page2;
850 int code_gen_size;
851
852 phys_pc = get_phys_addr_code(env, pc);
853 tb = tb_alloc(pc);
854 if (!tb) {
855 /* flush must be done */
856 tb_flush(env);
857 /* cannot fail at this point */
858 tb = tb_alloc(pc);
859 /* Don't forget to invalidate previous TB info. */
860 tb_invalidated_flag = 1;
861 }
862 tc_ptr = code_gen_ptr;
863 tb->tc_ptr = tc_ptr;
864 tb->cs_base = cs_base;
865 tb->flags = flags;
866 tb->cflags = cflags;
867 cpu_gen_code(env, tb, &code_gen_size);
868 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
869
870 /* check next page if needed */
871 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
872 phys_page2 = -1;
873 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
874 phys_page2 = get_phys_addr_code(env, virt_page2);
875 }
876 tb_link_phys(tb, phys_pc, phys_page2);
877 return tb;
878 }
879
880 /* invalidate all TBs which intersect with the target physical page
881 starting in range [start;end[. NOTE: start and end must refer to
882 the same physical page. 'is_cpu_write_access' should be true if called
883 from a real cpu write access: the virtual CPU will exit the current
884 TB if code is modified inside this TB. */
885 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
886 int is_cpu_write_access)
887 {
888 TranslationBlock *tb, *tb_next, *saved_tb;
889 CPUState *env = cpu_single_env;
890 target_ulong tb_start, tb_end;
891 PageDesc *p;
892 int n;
893 #ifdef TARGET_HAS_PRECISE_SMC
894 int current_tb_not_found = is_cpu_write_access;
895 TranslationBlock *current_tb = NULL;
896 int current_tb_modified = 0;
897 target_ulong current_pc = 0;
898 target_ulong current_cs_base = 0;
899 int current_flags = 0;
900 #endif /* TARGET_HAS_PRECISE_SMC */
901
902 p = page_find(start >> TARGET_PAGE_BITS);
903 if (!p)
904 return;
905 if (!p->code_bitmap &&
906 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
907 is_cpu_write_access) {
908 /* build code bitmap */
909 build_page_bitmap(p);
910 }
911
912 /* we remove all the TBs in the range [start, end[ */
913 /* XXX: see if in some cases it could be faster to invalidate all the code */
914 tb = p->first_tb;
915 while (tb != NULL) {
916 n = (long)tb & 3;
917 tb = (TranslationBlock *)((long)tb & ~3);
918 tb_next = tb->page_next[n];
919 /* NOTE: this is subtle as a TB may span two physical pages */
920 if (n == 0) {
921 /* NOTE: tb_end may be after the end of the page, but
922 it is not a problem */
923 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
924 tb_end = tb_start + tb->size;
925 } else {
926 tb_start = tb->page_addr[1];
927 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
928 }
929 if (!(tb_end <= start || tb_start >= end)) {
930 #ifdef TARGET_HAS_PRECISE_SMC
931 if (current_tb_not_found) {
932 current_tb_not_found = 0;
933 current_tb = NULL;
934 if (env->mem_io_pc) {
935 /* now we have a real cpu fault */
936 current_tb = tb_find_pc(env->mem_io_pc);
937 }
938 }
939 if (current_tb == tb &&
940 (current_tb->cflags & CF_COUNT_MASK) != 1) {
941 /* If we are modifying the current TB, we must stop
942 its execution. We could be more precise by checking
943 that the modification is after the current PC, but it
944 would require a specialized function to partially
945 restore the CPU state */
946
947 current_tb_modified = 1;
948 cpu_restore_state(current_tb, env,
949 env->mem_io_pc, NULL);
950 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
951 &current_flags);
952 }
953 #endif /* TARGET_HAS_PRECISE_SMC */
954 /* we need to do that to handle the case where a signal
955 occurs while doing tb_phys_invalidate() */
956 saved_tb = NULL;
957 if (env) {
958 saved_tb = env->current_tb;
959 env->current_tb = NULL;
960 }
961 tb_phys_invalidate(tb, -1);
962 if (env) {
963 env->current_tb = saved_tb;
964 if (env->interrupt_request && env->current_tb)
965 cpu_interrupt(env, env->interrupt_request);
966 }
967 }
968 tb = tb_next;
969 }
970 #if !defined(CONFIG_USER_ONLY)
971 /* if no code remaining, no need to continue to use slow writes */
972 if (!p->first_tb) {
973 invalidate_page_bitmap(p);
974 if (is_cpu_write_access) {
975 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
976 }
977 }
978 #endif
979 #ifdef TARGET_HAS_PRECISE_SMC
980 if (current_tb_modified) {
981 /* we generate a block containing just the instruction
982 modifying the memory. It will ensure that it cannot modify
983 itself */
984 env->current_tb = NULL;
985 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
986 cpu_resume_from_signal(env, NULL);
987 }
988 #endif
989 }
990
991 /* len must be <= 8 and start must be a multiple of len */
992 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
993 {
994 PageDesc *p;
995 int offset, b;
996 #if 0
997 if (1) {
998 if (loglevel) {
999 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1000 cpu_single_env->mem_io_vaddr, len,
1001 cpu_single_env->eip,
1002 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1003 }
1004 }
1005 #endif
1006 p = page_find(start >> TARGET_PAGE_BITS);
1007 if (!p)
1008 return;
1009 if (p->code_bitmap) {
1010 offset = start & ~TARGET_PAGE_MASK;
1011 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1012 if (b & ((1 << len) - 1))
1013 goto do_invalidate;
1014 } else {
1015 do_invalidate:
1016 tb_invalidate_phys_page_range(start, start + len, 1);
1017 }
1018 }
1019
1020 #if !defined(CONFIG_SOFTMMU)
1021 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1022 unsigned long pc, void *puc)
1023 {
1024 TranslationBlock *tb;
1025 PageDesc *p;
1026 int n;
1027 #ifdef TARGET_HAS_PRECISE_SMC
1028 TranslationBlock *current_tb = NULL;
1029 CPUState *env = cpu_single_env;
1030 int current_tb_modified = 0;
1031 target_ulong current_pc = 0;
1032 target_ulong current_cs_base = 0;
1033 int current_flags = 0;
1034 #endif
1035
1036 addr &= TARGET_PAGE_MASK;
1037 p = page_find(addr >> TARGET_PAGE_BITS);
1038 if (!p)
1039 return;
1040 tb = p->first_tb;
1041 #ifdef TARGET_HAS_PRECISE_SMC
1042 if (tb && pc != 0) {
1043 current_tb = tb_find_pc(pc);
1044 }
1045 #endif
1046 while (tb != NULL) {
1047 n = (long)tb & 3;
1048 tb = (TranslationBlock *)((long)tb & ~3);
1049 #ifdef TARGET_HAS_PRECISE_SMC
1050 if (current_tb == tb &&
1051 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1052 /* If we are modifying the current TB, we must stop
1053 its execution. We could be more precise by checking
1054 that the modification is after the current PC, but it
1055 would require a specialized function to partially
1056 restore the CPU state */
1057
1058 current_tb_modified = 1;
1059 cpu_restore_state(current_tb, env, pc, puc);
1060 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1061 &current_flags);
1062 }
1063 #endif /* TARGET_HAS_PRECISE_SMC */
1064 tb_phys_invalidate(tb, addr);
1065 tb = tb->page_next[n];
1066 }
1067 p->first_tb = NULL;
1068 #ifdef TARGET_HAS_PRECISE_SMC
1069 if (current_tb_modified) {
1070 /* we generate a block containing just the instruction
1071 modifying the memory. It will ensure that it cannot modify
1072 itself */
1073 env->current_tb = NULL;
1074 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1075 cpu_resume_from_signal(env, puc);
1076 }
1077 #endif
1078 }
1079 #endif
1080
1081 /* add the tb in the target page and protect it if necessary */
1082 static inline void tb_alloc_page(TranslationBlock *tb,
1083 unsigned int n, target_ulong page_addr)
1084 {
1085 PageDesc *p;
1086 TranslationBlock *last_first_tb;
1087
1088 tb->page_addr[n] = page_addr;
1089 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1090 tb->page_next[n] = p->first_tb;
1091 last_first_tb = p->first_tb;
1092 p->first_tb = (TranslationBlock *)((long)tb | n);
1093 invalidate_page_bitmap(p);
1094
1095 #if defined(TARGET_HAS_SMC) || 1
1096
1097 #if defined(CONFIG_USER_ONLY)
1098 if (p->flags & PAGE_WRITE) {
1099 target_ulong addr;
1100 PageDesc *p2;
1101 int prot;
1102
1103 /* force the host page as non writable (writes will have a
1104 page fault + mprotect overhead) */
1105 page_addr &= qemu_host_page_mask;
1106 prot = 0;
1107 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1108 addr += TARGET_PAGE_SIZE) {
1109
1110 p2 = page_find (addr >> TARGET_PAGE_BITS);
1111 if (!p2)
1112 continue;
1113 prot |= p2->flags;
1114 p2->flags &= ~PAGE_WRITE;
1115 page_get_flags(addr);
1116 }
1117 mprotect(g2h(page_addr), qemu_host_page_size,
1118 (prot & PAGE_BITS) & ~PAGE_WRITE);
1119 #ifdef DEBUG_TB_INVALIDATE
1120 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1121 page_addr);
1122 #endif
1123 }
1124 #else
1125 /* if some code is already present, then the pages are already
1126 protected. So we handle the case where only the first TB is
1127 allocated in a physical page */
1128 if (!last_first_tb) {
1129 tlb_protect_code(page_addr);
1130 }
1131 #endif
1132
1133 #endif /* TARGET_HAS_SMC */
1134 }
1135
1136 /* Allocate a new translation block. Flush the translation buffer if
1137 too many translation blocks or too much generated code. */
1138 TranslationBlock *tb_alloc(target_ulong pc)
1139 {
1140 TranslationBlock *tb;
1141
1142 if (nb_tbs >= code_gen_max_blocks ||
1143 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1144 return NULL;
1145 tb = &tbs[nb_tbs++];
1146 tb->pc = pc;
1147 tb->cflags = 0;
1148 return tb;
1149 }
1150
1151 void tb_free(TranslationBlock *tb)
1152 {
1153 /* In practice this is mostly used for single use temporary TB
1154 Ignore the hard cases and just back up if this TB happens to
1155 be the last one generated. */
1156 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1157 code_gen_ptr = tb->tc_ptr;
1158 nb_tbs--;
1159 }
1160 }
1161
1162 /* add a new TB and link it to the physical page tables. phys_page2 is
1163 (-1) to indicate that only one page contains the TB. */
1164 void tb_link_phys(TranslationBlock *tb,
1165 target_ulong phys_pc, target_ulong phys_page2)
1166 {
1167 unsigned int h;
1168 TranslationBlock **ptb;
1169
1170 /* Grab the mmap lock to stop another thread invalidating this TB
1171 before we are done. */
1172 mmap_lock();
1173 /* add in the physical hash table */
1174 h = tb_phys_hash_func(phys_pc);
1175 ptb = &tb_phys_hash[h];
1176 tb->phys_hash_next = *ptb;
1177 *ptb = tb;
1178
1179 /* add in the page list */
1180 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1181 if (phys_page2 != -1)
1182 tb_alloc_page(tb, 1, phys_page2);
1183 else
1184 tb->page_addr[1] = -1;
1185
1186 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1187 tb->jmp_next[0] = NULL;
1188 tb->jmp_next[1] = NULL;
1189
1190 /* init original jump addresses */
1191 if (tb->tb_next_offset[0] != 0xffff)
1192 tb_reset_jump(tb, 0);
1193 if (tb->tb_next_offset[1] != 0xffff)
1194 tb_reset_jump(tb, 1);
1195
1196 #ifdef DEBUG_TB_CHECK
1197 tb_page_check();
1198 #endif
1199 mmap_unlock();
1200 }
1201
1202 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1203 tb[1].tc_ptr. Return NULL if not found */
1204 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1205 {
1206 int m_min, m_max, m;
1207 unsigned long v;
1208 TranslationBlock *tb;
1209
1210 if (nb_tbs <= 0)
1211 return NULL;
1212 if (tc_ptr < (unsigned long)code_gen_buffer ||
1213 tc_ptr >= (unsigned long)code_gen_ptr)
1214 return NULL;
1215 /* binary search (cf Knuth) */
1216 m_min = 0;
1217 m_max = nb_tbs - 1;
1218 while (m_min <= m_max) {
1219 m = (m_min + m_max) >> 1;
1220 tb = &tbs[m];
1221 v = (unsigned long)tb->tc_ptr;
1222 if (v == tc_ptr)
1223 return tb;
1224 else if (tc_ptr < v) {
1225 m_max = m - 1;
1226 } else {
1227 m_min = m + 1;
1228 }
1229 }
1230 return &tbs[m_max];
1231 }
1232
1233 static void tb_reset_jump_recursive(TranslationBlock *tb);
1234
1235 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1236 {
1237 TranslationBlock *tb1, *tb_next, **ptb;
1238 unsigned int n1;
1239
1240 tb1 = tb->jmp_next[n];
1241 if (tb1 != NULL) {
1242 /* find head of list */
1243 for(;;) {
1244 n1 = (long)tb1 & 3;
1245 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1246 if (n1 == 2)
1247 break;
1248 tb1 = tb1->jmp_next[n1];
1249 }
1250 /* we are now sure now that tb jumps to tb1 */
1251 tb_next = tb1;
1252
1253 /* remove tb from the jmp_first list */
1254 ptb = &tb_next->jmp_first;
1255 for(;;) {
1256 tb1 = *ptb;
1257 n1 = (long)tb1 & 3;
1258 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1259 if (n1 == n && tb1 == tb)
1260 break;
1261 ptb = &tb1->jmp_next[n1];
1262 }
1263 *ptb = tb->jmp_next[n];
1264 tb->jmp_next[n] = NULL;
1265
1266 /* suppress the jump to next tb in generated code */
1267 tb_reset_jump(tb, n);
1268
1269 /* suppress jumps in the tb on which we could have jumped */
1270 tb_reset_jump_recursive(tb_next);
1271 }
1272 }
1273
1274 static void tb_reset_jump_recursive(TranslationBlock *tb)
1275 {
1276 tb_reset_jump_recursive2(tb, 0);
1277 tb_reset_jump_recursive2(tb, 1);
1278 }
1279
1280 #if defined(TARGET_HAS_ICE)
1281 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1282 {
1283 target_phys_addr_t addr;
1284 target_ulong pd;
1285 ram_addr_t ram_addr;
1286 PhysPageDesc *p;
1287
1288 addr = cpu_get_phys_page_debug(env, pc);
1289 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1290 if (!p) {
1291 pd = IO_MEM_UNASSIGNED;
1292 } else {
1293 pd = p->phys_offset;
1294 }
1295 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1296 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1297 }
1298 #endif
1299
1300 /* Add a watchpoint. */
1301 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1302 int flags, CPUWatchpoint **watchpoint)
1303 {
1304 target_ulong len_mask = ~(len - 1);
1305 CPUWatchpoint *wp, *prev_wp;
1306
1307 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1308 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1309 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1310 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1311 return -EINVAL;
1312 }
1313 wp = qemu_malloc(sizeof(*wp));
1314 if (!wp)
1315 return -ENOBUFS;
1316
1317 wp->vaddr = addr;
1318 wp->len_mask = len_mask;
1319 wp->flags = flags;
1320
1321 /* keep all GDB-injected watchpoints in front */
1322 if (!(flags & BP_GDB) && env->watchpoints) {
1323 prev_wp = env->watchpoints;
1324 while (prev_wp->next != NULL && (prev_wp->next->flags & BP_GDB))
1325 prev_wp = prev_wp->next;
1326 } else {
1327 prev_wp = NULL;
1328 }
1329
1330 /* Insert new watchpoint */
1331 if (prev_wp) {
1332 wp->next = prev_wp->next;
1333 prev_wp->next = wp;
1334 } else {
1335 wp->next = env->watchpoints;
1336 env->watchpoints = wp;
1337 }
1338 if (wp->next)
1339 wp->next->prev = wp;
1340 wp->prev = prev_wp;
1341
1342 tlb_flush_page(env, addr);
1343
1344 if (watchpoint)
1345 *watchpoint = wp;
1346 return 0;
1347 }
1348
1349 /* Remove a specific watchpoint. */
1350 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1351 int flags)
1352 {
1353 target_ulong len_mask = ~(len - 1);
1354 CPUWatchpoint *wp;
1355
1356 for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
1357 if (addr == wp->vaddr && len_mask == wp->len_mask
1358 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1359 cpu_watchpoint_remove_by_ref(env, wp);
1360 return 0;
1361 }
1362 }
1363 return -ENOENT;
1364 }
1365
1366 /* Remove a specific watchpoint by reference. */
1367 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1368 {
1369 if (watchpoint->next)
1370 watchpoint->next->prev = watchpoint->prev;
1371 if (watchpoint->prev)
1372 watchpoint->prev->next = watchpoint->next;
1373 else
1374 env->watchpoints = watchpoint->next;
1375
1376 tlb_flush_page(env, watchpoint->vaddr);
1377
1378 qemu_free(watchpoint);
1379 }
1380
1381 /* Remove all matching watchpoints. */
1382 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1383 {
1384 CPUWatchpoint *wp;
1385
1386 for (wp = env->watchpoints; wp != NULL; wp = wp->next)
1387 if (wp->flags & mask)
1388 cpu_watchpoint_remove_by_ref(env, wp);
1389 }
1390
1391 /* Add a breakpoint. */
1392 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1393 CPUBreakpoint **breakpoint)
1394 {
1395 #if defined(TARGET_HAS_ICE)
1396 CPUBreakpoint *bp, *prev_bp;
1397
1398 bp = qemu_malloc(sizeof(*bp));
1399 if (!bp)
1400 return -ENOBUFS;
1401
1402 bp->pc = pc;
1403 bp->flags = flags;
1404
1405 /* keep all GDB-injected breakpoints in front */
1406 if (!(flags & BP_GDB) && env->breakpoints) {
1407 prev_bp = env->breakpoints;
1408 while (prev_bp->next != NULL && (prev_bp->next->flags & BP_GDB))
1409 prev_bp = prev_bp->next;
1410 } else {
1411 prev_bp = NULL;
1412 }
1413
1414 /* Insert new breakpoint */
1415 if (prev_bp) {
1416 bp->next = prev_bp->next;
1417 prev_bp->next = bp;
1418 } else {
1419 bp->next = env->breakpoints;
1420 env->breakpoints = bp;
1421 }
1422 if (bp->next)
1423 bp->next->prev = bp;
1424 bp->prev = prev_bp;
1425
1426 breakpoint_invalidate(env, pc);
1427
1428 if (breakpoint)
1429 *breakpoint = bp;
1430 return 0;
1431 #else
1432 return -ENOSYS;
1433 #endif
1434 }
1435
1436 /* Remove a specific breakpoint. */
1437 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1438 {
1439 #if defined(TARGET_HAS_ICE)
1440 CPUBreakpoint *bp;
1441
1442 for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
1443 if (bp->pc == pc && bp->flags == flags) {
1444 cpu_breakpoint_remove_by_ref(env, bp);
1445 return 0;
1446 }
1447 }
1448 return -ENOENT;
1449 #else
1450 return -ENOSYS;
1451 #endif
1452 }
1453
1454 /* Remove a specific breakpoint by reference. */
1455 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1456 {
1457 #if defined(TARGET_HAS_ICE)
1458 if (breakpoint->next)
1459 breakpoint->next->prev = breakpoint->prev;
1460 if (breakpoint->prev)
1461 breakpoint->prev->next = breakpoint->next;
1462 else
1463 env->breakpoints = breakpoint->next;
1464
1465 breakpoint_invalidate(env, breakpoint->pc);
1466
1467 qemu_free(breakpoint);
1468 #endif
1469 }
1470
1471 /* Remove all matching breakpoints. */
1472 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1473 {
1474 #if defined(TARGET_HAS_ICE)
1475 CPUBreakpoint *bp;
1476
1477 for (bp = env->breakpoints; bp != NULL; bp = bp->next)
1478 if (bp->flags & mask)
1479 cpu_breakpoint_remove_by_ref(env, bp);
1480 #endif
1481 }
1482
1483 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1484 CPU loop after each instruction */
1485 void cpu_single_step(CPUState *env, int enabled)
1486 {
1487 #if defined(TARGET_HAS_ICE)
1488 if (env->singlestep_enabled != enabled) {
1489 env->singlestep_enabled = enabled;
1490 /* must flush all the translated code to avoid inconsistancies */
1491 /* XXX: only flush what is necessary */
1492 tb_flush(env);
1493 }
1494 #endif
1495 }
1496
1497 /* enable or disable low levels log */
1498 void cpu_set_log(int log_flags)
1499 {
1500 loglevel = log_flags;
1501 if (loglevel && !logfile) {
1502 logfile = fopen(logfilename, log_append ? "a" : "w");
1503 if (!logfile) {
1504 perror(logfilename);
1505 _exit(1);
1506 }
1507 #if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1509 {
1510 static char logfile_buf[4096];
1511 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1512 }
1513 #else
1514 setvbuf(logfile, NULL, _IOLBF, 0);
1515 #endif
1516 log_append = 1;
1517 }
1518 if (!loglevel && logfile) {
1519 fclose(logfile);
1520 logfile = NULL;
1521 }
1522 }
1523
1524 void cpu_set_log_filename(const char *filename)
1525 {
1526 logfilename = strdup(filename);
1527 if (logfile) {
1528 fclose(logfile);
1529 logfile = NULL;
1530 }
1531 cpu_set_log(loglevel);
1532 }
1533
1534 /* mask must never be zero, except for A20 change call */
1535 void cpu_interrupt(CPUState *env, int mask)
1536 {
1537 #if !defined(USE_NPTL)
1538 TranslationBlock *tb;
1539 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1540 #endif
1541 int old_mask;
1542
1543 old_mask = env->interrupt_request;
1544 /* FIXME: This is probably not threadsafe. A different thread could
1545 be in the middle of a read-modify-write operation. */
1546 env->interrupt_request |= mask;
1547 #if defined(USE_NPTL)
1548 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1549 problem and hope the cpu will stop of its own accord. For userspace
1550 emulation this often isn't actually as bad as it sounds. Often
1551 signals are used primarily to interrupt blocking syscalls. */
1552 #else
1553 if (use_icount) {
1554 env->icount_decr.u16.high = 0xffff;
1555 #ifndef CONFIG_USER_ONLY
1556 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1557 an async event happened and we need to process it. */
1558 if (!can_do_io(env)
1559 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1560 cpu_abort(env, "Raised interrupt while not in I/O function");
1561 }
1562 #endif
1563 } else {
1564 tb = env->current_tb;
1565 /* if the cpu is currently executing code, we must unlink it and
1566 all the potentially executing TB */
1567 if (tb && !testandset(&interrupt_lock)) {
1568 env->current_tb = NULL;
1569 tb_reset_jump_recursive(tb);
1570 resetlock(&interrupt_lock);
1571 }
1572 }
1573 #endif
1574 }
1575
1576 void cpu_reset_interrupt(CPUState *env, int mask)
1577 {
1578 env->interrupt_request &= ~mask;
1579 }
1580
1581 const CPULogItem cpu_log_items[] = {
1582 { CPU_LOG_TB_OUT_ASM, "out_asm",
1583 "show generated host assembly code for each compiled TB" },
1584 { CPU_LOG_TB_IN_ASM, "in_asm",
1585 "show target assembly code for each compiled TB" },
1586 { CPU_LOG_TB_OP, "op",
1587 "show micro ops for each compiled TB" },
1588 { CPU_LOG_TB_OP_OPT, "op_opt",
1589 "show micro ops "
1590 #ifdef TARGET_I386
1591 "before eflags optimization and "
1592 #endif
1593 "after liveness analysis" },
1594 { CPU_LOG_INT, "int",
1595 "show interrupts/exceptions in short format" },
1596 { CPU_LOG_EXEC, "exec",
1597 "show trace before each executed TB (lots of logs)" },
1598 { CPU_LOG_TB_CPU, "cpu",
1599 "show CPU state before block translation" },
1600 #ifdef TARGET_I386
1601 { CPU_LOG_PCALL, "pcall",
1602 "show protected mode far calls/returns/exceptions" },
1603 #endif
1604 #ifdef DEBUG_IOPORT
1605 { CPU_LOG_IOPORT, "ioport",
1606 "show all i/o ports accesses" },
1607 #endif
1608 { 0, NULL, NULL },
1609 };
1610
1611 static int cmp1(const char *s1, int n, const char *s2)
1612 {
1613 if (strlen(s2) != n)
1614 return 0;
1615 return memcmp(s1, s2, n) == 0;
1616 }
1617
1618 /* takes a comma separated list of log masks. Return 0 if error. */
1619 int cpu_str_to_log_mask(const char *str)
1620 {
1621 const CPULogItem *item;
1622 int mask;
1623 const char *p, *p1;
1624
1625 p = str;
1626 mask = 0;
1627 for(;;) {
1628 p1 = strchr(p, ',');
1629 if (!p1)
1630 p1 = p + strlen(p);
1631 if(cmp1(p,p1-p,"all")) {
1632 for(item = cpu_log_items; item->mask != 0; item++) {
1633 mask |= item->mask;
1634 }
1635 } else {
1636 for(item = cpu_log_items; item->mask != 0; item++) {
1637 if (cmp1(p, p1 - p, item->name))
1638 goto found;
1639 }
1640 return 0;
1641 }
1642 found:
1643 mask |= item->mask;
1644 if (*p1 != ',')
1645 break;
1646 p = p1 + 1;
1647 }
1648 return mask;
1649 }
1650
1651 void cpu_abort(CPUState *env, const char *fmt, ...)
1652 {
1653 va_list ap;
1654 va_list ap2;
1655
1656 va_start(ap, fmt);
1657 va_copy(ap2, ap);
1658 fprintf(stderr, "qemu: fatal: ");
1659 vfprintf(stderr, fmt, ap);
1660 fprintf(stderr, "\n");
1661 #ifdef TARGET_I386
1662 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1663 #else
1664 cpu_dump_state(env, stderr, fprintf, 0);
1665 #endif
1666 if (logfile) {
1667 fprintf(logfile, "qemu: fatal: ");
1668 vfprintf(logfile, fmt, ap2);
1669 fprintf(logfile, "\n");
1670 #ifdef TARGET_I386
1671 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1672 #else
1673 cpu_dump_state(env, logfile, fprintf, 0);
1674 #endif
1675 fflush(logfile);
1676 fclose(logfile);
1677 }
1678 va_end(ap2);
1679 va_end(ap);
1680 abort();
1681 }
1682
1683 CPUState *cpu_copy(CPUState *env)
1684 {
1685 CPUState *new_env = cpu_init(env->cpu_model_str);
1686 /* preserve chaining and index */
1687 CPUState *next_cpu = new_env->next_cpu;
1688 int cpu_index = new_env->cpu_index;
1689 memcpy(new_env, env, sizeof(CPUState));
1690 new_env->next_cpu = next_cpu;
1691 new_env->cpu_index = cpu_index;
1692 return new_env;
1693 }
1694
1695 #if !defined(CONFIG_USER_ONLY)
1696
1697 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1698 {
1699 unsigned int i;
1700
1701 /* Discard jump cache entries for any tb which might potentially
1702 overlap the flushed page. */
1703 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1704 memset (&env->tb_jmp_cache[i], 0,
1705 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1706
1707 i = tb_jmp_cache_hash_page(addr);
1708 memset (&env->tb_jmp_cache[i], 0,
1709 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1710 }
1711
1712 /* NOTE: if flush_global is true, also flush global entries (not
1713 implemented yet) */
1714 void tlb_flush(CPUState *env, int flush_global)
1715 {
1716 int i;
1717
1718 #if defined(DEBUG_TLB)
1719 printf("tlb_flush:\n");
1720 #endif
1721 /* must reset current TB so that interrupts cannot modify the
1722 links while we are modifying them */
1723 env->current_tb = NULL;
1724
1725 for(i = 0; i < CPU_TLB_SIZE; i++) {
1726 env->tlb_table[0][i].addr_read = -1;
1727 env->tlb_table[0][i].addr_write = -1;
1728 env->tlb_table[0][i].addr_code = -1;
1729 env->tlb_table[1][i].addr_read = -1;
1730 env->tlb_table[1][i].addr_write = -1;
1731 env->tlb_table[1][i].addr_code = -1;
1732 #if (NB_MMU_MODES >= 3)
1733 env->tlb_table[2][i].addr_read = -1;
1734 env->tlb_table[2][i].addr_write = -1;
1735 env->tlb_table[2][i].addr_code = -1;
1736 #if (NB_MMU_MODES == 4)
1737 env->tlb_table[3][i].addr_read = -1;
1738 env->tlb_table[3][i].addr_write = -1;
1739 env->tlb_table[3][i].addr_code = -1;
1740 #endif
1741 #endif
1742 }
1743
1744 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1745
1746 #ifdef USE_KQEMU
1747 if (env->kqemu_enabled) {
1748 kqemu_flush(env, flush_global);
1749 }
1750 #endif
1751 tlb_flush_count++;
1752 }
1753
1754 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1755 {
1756 if (addr == (tlb_entry->addr_read &
1757 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1758 addr == (tlb_entry->addr_write &
1759 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1760 addr == (tlb_entry->addr_code &
1761 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1762 tlb_entry->addr_read = -1;
1763 tlb_entry->addr_write = -1;
1764 tlb_entry->addr_code = -1;
1765 }
1766 }
1767
1768 void tlb_flush_page(CPUState *env, target_ulong addr)
1769 {
1770 int i;
1771
1772 #if defined(DEBUG_TLB)
1773 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1774 #endif
1775 /* must reset current TB so that interrupts cannot modify the
1776 links while we are modifying them */
1777 env->current_tb = NULL;
1778
1779 addr &= TARGET_PAGE_MASK;
1780 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1781 tlb_flush_entry(&env->tlb_table[0][i], addr);
1782 tlb_flush_entry(&env->tlb_table[1][i], addr);
1783 #if (NB_MMU_MODES >= 3)
1784 tlb_flush_entry(&env->tlb_table[2][i], addr);
1785 #if (NB_MMU_MODES == 4)
1786 tlb_flush_entry(&env->tlb_table[3][i], addr);
1787 #endif
1788 #endif
1789
1790 tlb_flush_jmp_cache(env, addr);
1791
1792 #ifdef USE_KQEMU
1793 if (env->kqemu_enabled) {
1794 kqemu_flush_page(env, addr);
1795 }
1796 #endif
1797 }
1798
1799 /* update the TLBs so that writes to code in the virtual page 'addr'
1800 can be detected */
1801 static void tlb_protect_code(ram_addr_t ram_addr)
1802 {
1803 cpu_physical_memory_reset_dirty(ram_addr,
1804 ram_addr + TARGET_PAGE_SIZE,
1805 CODE_DIRTY_FLAG);
1806 }
1807
1808 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1809 tested for self modifying code */
1810 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1811 target_ulong vaddr)
1812 {
1813 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1814 }
1815
1816 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1817 unsigned long start, unsigned long length)
1818 {
1819 unsigned long addr;
1820 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1821 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1822 if ((addr - start) < length) {
1823 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1824 }
1825 }
1826 }
1827
1828 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1829 int dirty_flags)
1830 {
1831 CPUState *env;
1832 unsigned long length, start1;
1833 int i, mask, len;
1834 uint8_t *p;
1835
1836 start &= TARGET_PAGE_MASK;
1837 end = TARGET_PAGE_ALIGN(end);
1838
1839 length = end - start;
1840 if (length == 0)
1841 return;
1842 len = length >> TARGET_PAGE_BITS;
1843 #ifdef USE_KQEMU
1844 /* XXX: should not depend on cpu context */
1845 env = first_cpu;
1846 if (env->kqemu_enabled) {
1847 ram_addr_t addr;
1848 addr = start;
1849 for(i = 0; i < len; i++) {
1850 kqemu_set_notdirty(env, addr);
1851 addr += TARGET_PAGE_SIZE;
1852 }
1853 }
1854 #endif
1855 mask = ~dirty_flags;
1856 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1857 for(i = 0; i < len; i++)
1858 p[i] &= mask;
1859
1860 /* we modify the TLB cache so that the dirty bit will be set again
1861 when accessing the range */
1862 start1 = start + (unsigned long)phys_ram_base;
1863 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1864 for(i = 0; i < CPU_TLB_SIZE; i++)
1865 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1866 for(i = 0; i < CPU_TLB_SIZE; i++)
1867 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1868 #if (NB_MMU_MODES >= 3)
1869 for(i = 0; i < CPU_TLB_SIZE; i++)
1870 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1871 #if (NB_MMU_MODES == 4)
1872 for(i = 0; i < CPU_TLB_SIZE; i++)
1873 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1874 #endif
1875 #endif
1876 }
1877 }
1878
1879 int cpu_physical_memory_set_dirty_tracking(int enable)
1880 {
1881 in_migration = enable;
1882 return 0;
1883 }
1884
1885 int cpu_physical_memory_get_dirty_tracking(void)
1886 {
1887 return in_migration;
1888 }
1889
1890 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1891 {
1892 ram_addr_t ram_addr;
1893
1894 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1895 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1896 tlb_entry->addend - (unsigned long)phys_ram_base;
1897 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1898 tlb_entry->addr_write |= TLB_NOTDIRTY;
1899 }
1900 }
1901 }
1902
1903 /* update the TLB according to the current state of the dirty bits */
1904 void cpu_tlb_update_dirty(CPUState *env)
1905 {
1906 int i;
1907 for(i = 0; i < CPU_TLB_SIZE; i++)
1908 tlb_update_dirty(&env->tlb_table[0][i]);
1909 for(i = 0; i < CPU_TLB_SIZE; i++)
1910 tlb_update_dirty(&env->tlb_table[1][i]);
1911 #if (NB_MMU_MODES >= 3)
1912 for(i = 0; i < CPU_TLB_SIZE; i++)
1913 tlb_update_dirty(&env->tlb_table[2][i]);
1914 #if (NB_MMU_MODES == 4)
1915 for(i = 0; i < CPU_TLB_SIZE; i++)
1916 tlb_update_dirty(&env->tlb_table[3][i]);
1917 #endif
1918 #endif
1919 }
1920
1921 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1922 {
1923 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1924 tlb_entry->addr_write = vaddr;
1925 }
1926
1927 /* update the TLB corresponding to virtual page vaddr
1928 so that it is no longer dirty */
1929 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1930 {
1931 int i;
1932
1933 vaddr &= TARGET_PAGE_MASK;
1934 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1936 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1937 #if (NB_MMU_MODES >= 3)
1938 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1939 #if (NB_MMU_MODES == 4)
1940 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1941 #endif
1942 #endif
1943 }
1944
1945 /* add a new TLB entry. At most one entry for a given virtual address
1946 is permitted. Return 0 if OK or 2 if the page could not be mapped
1947 (can only happen in non SOFTMMU mode for I/O pages or pages
1948 conflicting with the host address space). */
1949 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1950 target_phys_addr_t paddr, int prot,
1951 int mmu_idx, int is_softmmu)
1952 {
1953 PhysPageDesc *p;
1954 unsigned long pd;
1955 unsigned int index;
1956 target_ulong address;
1957 target_ulong code_address;
1958 target_phys_addr_t addend;
1959 int ret;
1960 CPUTLBEntry *te;
1961 CPUWatchpoint *wp;
1962 target_phys_addr_t iotlb;
1963
1964 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1965 if (!p) {
1966 pd = IO_MEM_UNASSIGNED;
1967 } else {
1968 pd = p->phys_offset;
1969 }
1970 #if defined(DEBUG_TLB)
1971 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1972 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1973 #endif
1974
1975 ret = 0;
1976 address = vaddr;
1977 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1978 /* IO memory case (romd handled later) */
1979 address |= TLB_MMIO;
1980 }
1981 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1982 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1983 /* Normal RAM. */
1984 iotlb = pd & TARGET_PAGE_MASK;
1985 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1986 iotlb |= IO_MEM_NOTDIRTY;
1987 else
1988 iotlb |= IO_MEM_ROM;
1989 } else {
1990 /* IO handlers are currently passed a phsical address.
1991 It would be nice to pass an offset from the base address
1992 of that region. This would avoid having to special case RAM,
1993 and avoid full address decoding in every device.
1994 We can't use the high bits of pd for this because
1995 IO_MEM_ROMD uses these as a ram address. */
1996 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1997 }
1998
1999 code_address = address;
2000 /* Make accesses to pages with watchpoints go via the
2001 watchpoint trap routines. */
2002 for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
2003 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2004 iotlb = io_mem_watch + paddr;
2005 /* TODO: The memory case can be optimized by not trapping
2006 reads of pages with a write breakpoint. */
2007 address |= TLB_MMIO;
2008 }
2009 }
2010
2011 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2012 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2013 te = &env->tlb_table[mmu_idx][index];
2014 te->addend = addend - vaddr;
2015 if (prot & PAGE_READ) {
2016 te->addr_read = address;
2017 } else {
2018 te->addr_read = -1;
2019 }
2020
2021 if (prot & PAGE_EXEC) {
2022 te->addr_code = code_address;
2023 } else {
2024 te->addr_code = -1;
2025 }
2026 if (prot & PAGE_WRITE) {
2027 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2028 (pd & IO_MEM_ROMD)) {
2029 /* Write access calls the I/O callback. */
2030 te->addr_write = address | TLB_MMIO;
2031 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2032 !cpu_physical_memory_is_dirty(pd)) {
2033 te->addr_write = address | TLB_NOTDIRTY;
2034 } else {
2035 te->addr_write = address;
2036 }
2037 } else {
2038 te->addr_write = -1;
2039 }
2040 return ret;
2041 }
2042
2043 #else
2044
2045 void tlb_flush(CPUState *env, int flush_global)
2046 {
2047 }
2048
2049 void tlb_flush_page(CPUState *env, target_ulong addr)
2050 {
2051 }
2052
2053 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2054 target_phys_addr_t paddr, int prot,
2055 int mmu_idx, int is_softmmu)
2056 {
2057 return 0;
2058 }
2059
2060 /* dump memory mappings */
2061 void page_dump(FILE *f)
2062 {
2063 unsigned long start, end;
2064 int i, j, prot, prot1;
2065 PageDesc *p;
2066
2067 fprintf(f, "%-8s %-8s %-8s %s\n",
2068 "start", "end", "size", "prot");
2069 start = -1;
2070 end = -1;
2071 prot = 0;
2072 for(i = 0; i <= L1_SIZE; i++) {
2073 if (i < L1_SIZE)
2074 p = l1_map[i];
2075 else
2076 p = NULL;
2077 for(j = 0;j < L2_SIZE; j++) {
2078 if (!p)
2079 prot1 = 0;
2080 else
2081 prot1 = p[j].flags;
2082 if (prot1 != prot) {
2083 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2084 if (start != -1) {
2085 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2086 start, end, end - start,
2087 prot & PAGE_READ ? 'r' : '-',
2088 prot & PAGE_WRITE ? 'w' : '-',
2089 prot & PAGE_EXEC ? 'x' : '-');
2090 }
2091 if (prot1 != 0)
2092 start = end;
2093 else
2094 start = -1;
2095 prot = prot1;
2096 }
2097 if (!p)
2098 break;
2099 }
2100 }
2101 }
2102
2103 int page_get_flags(target_ulong address)
2104 {
2105 PageDesc *p;
2106
2107 p = page_find(address >> TARGET_PAGE_BITS);
2108 if (!p)
2109 return 0;
2110 return p->flags;
2111 }
2112
2113 /* modify the flags of a page and invalidate the code if
2114 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2115 depending on PAGE_WRITE */
2116 void page_set_flags(target_ulong start, target_ulong end, int flags)
2117 {
2118 PageDesc *p;
2119 target_ulong addr;
2120
2121 /* mmap_lock should already be held. */
2122 start = start & TARGET_PAGE_MASK;
2123 end = TARGET_PAGE_ALIGN(end);
2124 if (flags & PAGE_WRITE)
2125 flags |= PAGE_WRITE_ORG;
2126 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2127 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2128 /* We may be called for host regions that are outside guest
2129 address space. */
2130 if (!p)
2131 return;
2132 /* if the write protection is set, then we invalidate the code
2133 inside */
2134 if (!(p->flags & PAGE_WRITE) &&
2135 (flags & PAGE_WRITE) &&
2136 p->first_tb) {
2137 tb_invalidate_phys_page(addr, 0, NULL);
2138 }
2139 p->flags = flags;
2140 }
2141 }
2142
2143 int page_check_range(target_ulong start, target_ulong len, int flags)
2144 {
2145 PageDesc *p;
2146 target_ulong end;
2147 target_ulong addr;
2148
2149 if (start + len < start)
2150 /* we've wrapped around */
2151 return -1;
2152
2153 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2154 start = start & TARGET_PAGE_MASK;
2155
2156 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2157 p = page_find(addr >> TARGET_PAGE_BITS);
2158 if( !p )
2159 return -1;
2160 if( !(p->flags & PAGE_VALID) )
2161 return -1;
2162
2163 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2164 return -1;
2165 if (flags & PAGE_WRITE) {
2166 if (!(p->flags & PAGE_WRITE_ORG))
2167 return -1;
2168 /* unprotect the page if it was put read-only because it
2169 contains translated code */
2170 if (!(p->flags & PAGE_WRITE)) {
2171 if (!page_unprotect(addr, 0, NULL))
2172 return -1;
2173 }
2174 return 0;
2175 }
2176 }
2177 return 0;
2178 }
2179
2180 /* called from signal handler: invalidate the code and unprotect the
2181 page. Return TRUE if the fault was succesfully handled. */
2182 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2183 {
2184 unsigned int page_index, prot, pindex;
2185 PageDesc *p, *p1;
2186 target_ulong host_start, host_end, addr;
2187
2188 /* Technically this isn't safe inside a signal handler. However we
2189 know this only ever happens in a synchronous SEGV handler, so in
2190 practice it seems to be ok. */
2191 mmap_lock();
2192
2193 host_start = address & qemu_host_page_mask;
2194 page_index = host_start >> TARGET_PAGE_BITS;
2195 p1 = page_find(page_index);
2196 if (!p1) {
2197 mmap_unlock();
2198 return 0;
2199 }
2200 host_end = host_start + qemu_host_page_size;
2201 p = p1;
2202 prot = 0;
2203 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2204 prot |= p->flags;
2205 p++;
2206 }
2207 /* if the page was really writable, then we change its
2208 protection back to writable */
2209 if (prot & PAGE_WRITE_ORG) {
2210 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2211 if (!(p1[pindex].flags & PAGE_WRITE)) {
2212 mprotect((void *)g2h(host_start), qemu_host_page_size,
2213 (prot & PAGE_BITS) | PAGE_WRITE);
2214 p1[pindex].flags |= PAGE_WRITE;
2215 /* and since the content will be modified, we must invalidate
2216 the corresponding translated code. */
2217 tb_invalidate_phys_page(address, pc, puc);
2218 #ifdef DEBUG_TB_CHECK
2219 tb_invalidate_check(address);
2220 #endif
2221 mmap_unlock();
2222 return 1;
2223 }
2224 }
2225 mmap_unlock();
2226 return 0;
2227 }
2228
2229 static inline void tlb_set_dirty(CPUState *env,
2230 unsigned long addr, target_ulong vaddr)
2231 {
2232 }
2233 #endif /* defined(CONFIG_USER_ONLY) */
2234
2235 #if !defined(CONFIG_USER_ONLY)
2236 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2237 ram_addr_t memory);
2238 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2239 ram_addr_t orig_memory);
2240 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2241 need_subpage) \
2242 do { \
2243 if (addr > start_addr) \
2244 start_addr2 = 0; \
2245 else { \
2246 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2247 if (start_addr2 > 0) \
2248 need_subpage = 1; \
2249 } \
2250 \
2251 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2252 end_addr2 = TARGET_PAGE_SIZE - 1; \
2253 else { \
2254 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2255 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2256 need_subpage = 1; \
2257 } \
2258 } while (0)
2259
2260 /* register physical memory. 'size' must be a multiple of the target
2261 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2262 io memory page */
2263 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2264 ram_addr_t size,
2265 ram_addr_t phys_offset)
2266 {
2267 target_phys_addr_t addr, end_addr;
2268 PhysPageDesc *p;
2269 CPUState *env;
2270 ram_addr_t orig_size = size;
2271 void *subpage;
2272
2273 #ifdef USE_KQEMU
2274 /* XXX: should not depend on cpu context */
2275 env = first_cpu;
2276 if (env->kqemu_enabled) {
2277 kqemu_set_phys_mem(start_addr, size, phys_offset);
2278 }
2279 #endif
2280 if (kvm_enabled())
2281 kvm_set_phys_mem(start_addr, size, phys_offset);
2282
2283 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2284 end_addr = start_addr + (target_phys_addr_t)size;
2285 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2286 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2287 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2288 ram_addr_t orig_memory = p->phys_offset;
2289 target_phys_addr_t start_addr2, end_addr2;
2290 int need_subpage = 0;
2291
2292 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2293 need_subpage);
2294 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2295 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2296 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2297 &p->phys_offset, orig_memory);
2298 } else {
2299 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2300 >> IO_MEM_SHIFT];
2301 }
2302 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2303 } else {
2304 p->phys_offset = phys_offset;
2305 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2306 (phys_offset & IO_MEM_ROMD))
2307 phys_offset += TARGET_PAGE_SIZE;
2308 }
2309 } else {
2310 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2311 p->phys_offset = phys_offset;
2312 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2313 (phys_offset & IO_MEM_ROMD))
2314 phys_offset += TARGET_PAGE_SIZE;
2315 else {
2316 target_phys_addr_t start_addr2, end_addr2;
2317 int need_subpage = 0;
2318
2319 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2320 end_addr2, need_subpage);
2321
2322 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2323 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2324 &p->phys_offset, IO_MEM_UNASSIGNED);
2325 subpage_register(subpage, start_addr2, end_addr2,
2326 phys_offset);
2327 }
2328 }
2329 }
2330 }
2331
2332 /* since each CPU stores ram addresses in its TLB cache, we must
2333 reset the modified entries */
2334 /* XXX: slow ! */
2335 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2336 tlb_flush(env, 1);
2337 }
2338 }
2339
2340 /* XXX: temporary until new memory mapping API */
2341 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2342 {
2343 PhysPageDesc *p;
2344
2345 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2346 if (!p)
2347 return IO_MEM_UNASSIGNED;
2348 return p->phys_offset;
2349 }
2350
2351 /* XXX: better than nothing */
2352 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2353 {
2354 ram_addr_t addr;
2355 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2356 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2357 (uint64_t)size, (uint64_t)phys_ram_size);
2358 abort();
2359 }
2360 addr = phys_ram_alloc_offset;
2361 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2362 return addr;
2363 }
2364
2365 void qemu_ram_free(ram_addr_t addr)
2366 {
2367 }
2368
2369 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2370 {
2371 #ifdef DEBUG_UNASSIGNED
2372 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2373 #endif
2374 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2375 do_unassigned_access(addr, 0, 0, 0, 1);
2376 #endif
2377 return 0;
2378 }
2379
2380 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2381 {
2382 #ifdef DEBUG_UNASSIGNED
2383 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2384 #endif
2385 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2386 do_unassigned_access(addr, 0, 0, 0, 2);
2387 #endif
2388 return 0;
2389 }
2390
2391 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2392 {
2393 #ifdef DEBUG_UNASSIGNED
2394 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2395 #endif
2396 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2397 do_unassigned_access(addr, 0, 0, 0, 4);
2398 #endif
2399 return 0;
2400 }
2401
2402 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2403 {
2404 #ifdef DEBUG_UNASSIGNED
2405 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2406 #endif
2407 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2408 do_unassigned_access(addr, 1, 0, 0, 1);
2409 #endif
2410 }
2411
2412 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2413 {
2414 #ifdef DEBUG_UNASSIGNED
2415 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2416 #endif
2417 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2418 do_unassigned_access(addr, 1, 0, 0, 2);
2419 #endif
2420 }
2421
2422 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2423 {
2424 #ifdef DEBUG_UNASSIGNED
2425 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2426 #endif
2427 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2428 do_unassigned_access(addr, 1, 0, 0, 4);
2429 #endif
2430 }
2431
2432 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2433 unassigned_mem_readb,
2434 unassigned_mem_readw,
2435 unassigned_mem_readl,
2436 };
2437
2438 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2439 unassigned_mem_writeb,
2440 unassigned_mem_writew,
2441 unassigned_mem_writel,
2442 };
2443
2444 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2445 uint32_t val)
2446 {
2447 int dirty_flags;
2448 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2449 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2450 #if !defined(CONFIG_USER_ONLY)
2451 tb_invalidate_phys_page_fast(ram_addr, 1);
2452 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2453 #endif
2454 }
2455 stb_p(phys_ram_base + ram_addr, val);
2456 #ifdef USE_KQEMU
2457 if (cpu_single_env->kqemu_enabled &&
2458 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2459 kqemu_modify_page(cpu_single_env, ram_addr);
2460 #endif
2461 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2462 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2463 /* we remove the notdirty callback only if the code has been
2464 flushed */
2465 if (dirty_flags == 0xff)
2466 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2467 }
2468
2469 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2470 uint32_t val)
2471 {
2472 int dirty_flags;
2473 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2474 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2475 #if !defined(CONFIG_USER_ONLY)
2476 tb_invalidate_phys_page_fast(ram_addr, 2);
2477 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2478 #endif
2479 }
2480 stw_p(phys_ram_base + ram_addr, val);
2481 #ifdef USE_KQEMU
2482 if (cpu_single_env->kqemu_enabled &&
2483 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2484 kqemu_modify_page(cpu_single_env, ram_addr);
2485 #endif
2486 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2487 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2488 /* we remove the notdirty callback only if the code has been
2489 flushed */
2490 if (dirty_flags == 0xff)
2491 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2492 }
2493
2494 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2495 uint32_t val)
2496 {
2497 int dirty_flags;
2498 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2499 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2500 #if !defined(CONFIG_USER_ONLY)
2501 tb_invalidate_phys_page_fast(ram_addr, 4);
2502 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2503 #endif
2504 }
2505 stl_p(phys_ram_base + ram_addr, val);
2506 #ifdef USE_KQEMU
2507 if (cpu_single_env->kqemu_enabled &&
2508 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2509 kqemu_modify_page(cpu_single_env, ram_addr);
2510 #endif
2511 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2512 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2513 /* we remove the notdirty callback only if the code has been
2514 flushed */
2515 if (dirty_flags == 0xff)
2516 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2517 }
2518
2519 static CPUReadMemoryFunc *error_mem_read[3] = {
2520 NULL, /* never used */
2521 NULL, /* never used */
2522 NULL, /* never used */
2523 };
2524
2525 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2526 notdirty_mem_writeb,
2527 notdirty_mem_writew,
2528 notdirty_mem_writel,
2529 };
2530
2531 /* Generate a debug exception if a watchpoint has been hit. */
2532 static void check_watchpoint(int offset, int len_mask, int flags)
2533 {
2534 CPUState *env = cpu_single_env;
2535 target_ulong pc, cs_base;
2536 TranslationBlock *tb;
2537 target_ulong vaddr;
2538 CPUWatchpoint *wp;
2539 int cpu_flags;
2540
2541 if (env->watchpoint_hit) {
2542 /* We re-entered the check after replacing the TB. Now raise
2543 * the debug interrupt so that is will trigger after the
2544 * current instruction. */
2545 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2546 return;
2547 }
2548 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2549 for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
2550 if ((vaddr == (wp->vaddr & len_mask) ||
2551 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2552 wp->flags |= BP_WATCHPOINT_HIT;
2553 if (!env->watchpoint_hit) {
2554 env->watchpoint_hit = wp;
2555 tb = tb_find_pc(env->mem_io_pc);
2556 if (!tb) {
2557 cpu_abort(env, "check_watchpoint: could not find TB for "
2558 "pc=%p", (void *)env->mem_io_pc);
2559 }
2560 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2561 tb_phys_invalidate(tb, -1);
2562 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2563 env->exception_index = EXCP_DEBUG;
2564 } else {
2565 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2566 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2567 }
2568 cpu_resume_from_signal(env, NULL);
2569 }
2570 } else {
2571 wp->flags &= ~BP_WATCHPOINT_HIT;
2572 }
2573 }
2574 }
2575
2576 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2577 so these check for a hit then pass through to the normal out-of-line
2578 phys routines. */
2579 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2580 {
2581 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2582 return ldub_phys(addr);
2583 }
2584
2585 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2586 {
2587 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2588 return lduw_phys(addr);
2589 }
2590
2591 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2592 {
2593 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2594 return ldl_phys(addr);
2595 }
2596
2597 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2598 uint32_t val)
2599 {
2600 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2601 stb_phys(addr, val);
2602 }
2603
2604 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2605 uint32_t val)
2606 {
2607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2608 stw_phys(addr, val);
2609 }
2610
2611 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2612 uint32_t val)
2613 {
2614 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2615 stl_phys(addr, val);
2616 }
2617
2618 static CPUReadMemoryFunc *watch_mem_read[3] = {
2619 watch_mem_readb,
2620 watch_mem_readw,
2621 watch_mem_readl,
2622 };
2623
2624 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2625 watch_mem_writeb,
2626 watch_mem_writew,
2627 watch_mem_writel,
2628 };
2629
2630 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2631 unsigned int len)
2632 {
2633 uint32_t ret;
2634 unsigned int idx;
2635
2636 idx = SUBPAGE_IDX(addr - mmio->base);
2637 #if defined(DEBUG_SUBPAGE)
2638 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2639 mmio, len, addr, idx);
2640 #endif
2641 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2642
2643 return ret;
2644 }
2645
2646 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2647 uint32_t value, unsigned int len)
2648 {
2649 unsigned int idx;
2650
2651 idx = SUBPAGE_IDX(addr - mmio->base);
2652 #if defined(DEBUG_SUBPAGE)
2653 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2654 mmio, len, addr, idx, value);
2655 #endif
2656 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2657 }
2658
2659 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2660 {
2661 #if defined(DEBUG_SUBPAGE)
2662 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2663 #endif
2664
2665 return subpage_readlen(opaque, addr, 0);
2666 }
2667
2668 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2669 uint32_t value)
2670 {
2671 #if defined(DEBUG_SUBPAGE)
2672 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2673 #endif
2674 subpage_writelen(opaque, addr, value, 0);
2675 }
2676
2677 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2678 {
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2681 #endif
2682
2683 return subpage_readlen(opaque, addr, 1);
2684 }
2685
2686 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2687 uint32_t value)
2688 {
2689 #if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2691 #endif
2692 subpage_writelen(opaque, addr, value, 1);
2693 }
2694
2695 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2696 {
2697 #if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2699 #endif
2700
2701 return subpage_readlen(opaque, addr, 2);
2702 }
2703
2704 static void subpage_writel (void *opaque,
2705 target_phys_addr_t addr, uint32_t value)
2706 {
2707 #if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2709 #endif
2710 subpage_writelen(opaque, addr, value, 2);
2711 }
2712
2713 static CPUReadMemoryFunc *subpage_read[] = {
2714 &subpage_readb,
2715 &subpage_readw,
2716 &subpage_readl,
2717 };
2718
2719 static CPUWriteMemoryFunc *subpage_write[] = {
2720 &subpage_writeb,
2721 &subpage_writew,
2722 &subpage_writel,
2723 };
2724
2725 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2726 ram_addr_t memory)
2727 {
2728 int idx, eidx;
2729 unsigned int i;
2730
2731 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2732 return -1;
2733 idx = SUBPAGE_IDX(start);
2734 eidx = SUBPAGE_IDX(end);
2735 #if defined(DEBUG_SUBPAGE)
2736 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2737 mmio, start, end, idx, eidx, memory);
2738 #endif
2739 memory >>= IO_MEM_SHIFT;
2740 for (; idx <= eidx; idx++) {
2741 for (i = 0; i < 4; i++) {
2742 if (io_mem_read[memory][i]) {
2743 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2744 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2745 }
2746 if (io_mem_write[memory][i]) {
2747 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2748 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2749 }
2750 }
2751 }
2752
2753 return 0;
2754 }
2755
2756 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2757 ram_addr_t orig_memory)
2758 {
2759 subpage_t *mmio;
2760 int subpage_memory;
2761
2762 mmio = qemu_mallocz(sizeof(subpage_t));
2763 if (mmio != NULL) {
2764 mmio->base = base;
2765 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2766 #if defined(DEBUG_SUBPAGE)
2767 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2768 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2769 #endif
2770 *phys = subpage_memory | IO_MEM_SUBPAGE;
2771 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2772 }
2773
2774 return mmio;
2775 }
2776
2777 static void io_mem_init(void)
2778 {
2779 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2780 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2781 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2782 io_mem_nb = 5;
2783
2784 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2785 watch_mem_write, NULL);
2786 /* alloc dirty bits array */
2787 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2788 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2789 }
2790
2791 /* mem_read and mem_write are arrays of functions containing the
2792 function to access byte (index 0), word (index 1) and dword (index
2793 2). Functions can be omitted with a NULL function pointer. The
2794 registered functions may be modified dynamically later.
2795 If io_index is non zero, the corresponding io zone is
2796 modified. If it is zero, a new io zone is allocated. The return
2797 value can be used with cpu_register_physical_memory(). (-1) is
2798 returned if error. */
2799 int cpu_register_io_memory(int io_index,
2800 CPUReadMemoryFunc **mem_read,
2801 CPUWriteMemoryFunc **mem_write,
2802 void *opaque)
2803 {
2804 int i, subwidth = 0;
2805
2806 if (io_index <= 0) {
2807 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2808 return -1;
2809 io_index = io_mem_nb++;
2810 } else {
2811 if (io_index >= IO_MEM_NB_ENTRIES)
2812 return -1;
2813 }
2814
2815 for(i = 0;i < 3; i++) {
2816 if (!mem_read[i] || !mem_write[i])
2817 subwidth = IO_MEM_SUBWIDTH;
2818 io_mem_read[io_index][i] = mem_read[i];
2819 io_mem_write[io_index][i] = mem_write[i];
2820 }
2821 io_mem_opaque[io_index] = opaque;
2822 return (io_index << IO_MEM_SHIFT) | subwidth;
2823 }
2824
2825 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2826 {
2827 return io_mem_write[io_index >> IO_MEM_SHIFT];
2828 }
2829
2830 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2831 {
2832 return io_mem_read[io_index >> IO_MEM_SHIFT];
2833 }
2834
2835 #endif /* !defined(CONFIG_USER_ONLY) */
2836
2837 /* physical memory access (slow version, mainly for debug) */
2838 #if defined(CONFIG_USER_ONLY)
2839 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2840 int len, int is_write)
2841 {
2842 int l, flags;
2843 target_ulong page;
2844 void * p;
2845
2846 while (len > 0) {
2847 page = addr & TARGET_PAGE_MASK;
2848 l = (page + TARGET_PAGE_SIZE) - addr;
2849 if (l > len)
2850 l = len;
2851 flags = page_get_flags(page);
2852 if (!(flags & PAGE_VALID))
2853 return;
2854 if (is_write) {
2855 if (!(flags & PAGE_WRITE))
2856 return;
2857 /* XXX: this code should not depend on lock_user */
2858 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2859 /* FIXME - should this return an error rather than just fail? */
2860 return;
2861 memcpy(p, buf, l);
2862 unlock_user(p, addr, l);
2863 } else {
2864 if (!(flags & PAGE_READ))
2865 return;
2866 /* XXX: this code should not depend on lock_user */
2867 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2868 /* FIXME - should this return an error rather than just fail? */
2869 return;
2870 memcpy(buf, p, l);
2871 unlock_user(p, addr, 0);
2872 }
2873 len -= l;
2874 buf += l;
2875 addr += l;
2876 }
2877 }
2878
2879 #else
2880 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2881 int len, int is_write)
2882 {
2883 int l, io_index;
2884 uint8_t *ptr;
2885 uint32_t val;
2886 target_phys_addr_t page;
2887 unsigned long pd;
2888 PhysPageDesc *p;
2889
2890 while (len > 0) {
2891 page = addr & TARGET_PAGE_MASK;
2892 l = (page + TARGET_PAGE_SIZE) - addr;
2893 if (l > len)
2894 l = len;
2895 p = phys_page_find(page >> TARGET_PAGE_BITS);
2896 if (!p) {
2897 pd = IO_MEM_UNASSIGNED;
2898 } else {
2899 pd = p->phys_offset;
2900 }
2901
2902 if (is_write) {
2903 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2904 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2905 /* XXX: could force cpu_single_env to NULL to avoid
2906 potential bugs */
2907 if (l >= 4 && ((addr & 3) == 0)) {
2908 /* 32 bit write access */
2909 val = ldl_p(buf);
2910 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2911 l = 4;
2912 } else if (l >= 2 && ((addr & 1) == 0)) {
2913 /* 16 bit write access */
2914 val = lduw_p(buf);
2915 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2916 l = 2;
2917 } else {
2918 /* 8 bit write access */
2919 val = ldub_p(buf);
2920 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2921 l = 1;
2922 }
2923 } else {
2924 unsigned long addr1;
2925 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2926 /* RAM case */
2927 ptr = phys_ram_base + addr1;
2928 memcpy(ptr, buf, l);
2929 if (!cpu_physical_memory_is_dirty(addr1)) {
2930 /* invalidate code */
2931 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2932 /* set dirty bit */
2933 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2934 (0xff & ~CODE_DIRTY_FLAG);
2935 }
2936 }
2937 } else {
2938 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2939 !(pd & IO_MEM_ROMD)) {
2940 /* I/O case */
2941 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2942 if (l >= 4 && ((addr & 3) == 0)) {
2943 /* 32 bit read access */
2944 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2945 stl_p(buf, val);
2946 l = 4;
2947 } else if (l >= 2 && ((addr & 1) == 0)) {
2948 /* 16 bit read access */
2949 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2950 stw_p(buf, val);
2951 l = 2;
2952 } else {
2953 /* 8 bit read access */
2954 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2955 stb_p(buf, val);
2956 l = 1;
2957 }
2958 } else {
2959 /* RAM case */
2960 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2961 (addr & ~TARGET_PAGE_MASK);
2962 memcpy(buf, ptr, l);
2963 }
2964 }
2965 len -= l;
2966 buf += l;
2967 addr += l;
2968 }
2969 }
2970
2971 /* used for ROM loading : can write in RAM and ROM */
2972 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2973 const uint8_t *buf, int len)
2974 {
2975 int l;
2976 uint8_t *ptr;
2977 target_phys_addr_t page;
2978 unsigned long pd;
2979 PhysPageDesc *p;
2980
2981 while (len > 0) {
2982 page = addr & TARGET_PAGE_MASK;
2983 l = (page + TARGET_PAGE_SIZE) - addr;
2984 if (l > len)
2985 l = len;
2986 p = phys_page_find(page >> TARGET_PAGE_BITS);
2987 if (!p) {
2988 pd = IO_MEM_UNASSIGNED;
2989 } else {
2990 pd = p->phys_offset;
2991 }
2992
2993 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2994 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2995 !(pd & IO_MEM_ROMD)) {
2996 /* do nothing */
2997 } else {
2998 unsigned long addr1;
2999 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3000 /* ROM/RAM case */
3001 ptr = phys_ram_base + addr1;
3002 memcpy(ptr, buf, l);
3003 }
3004 len -= l;
3005 buf += l;
3006 addr += l;
3007 }
3008 }
3009
3010
3011 /* warning: addr must be aligned */
3012 uint32_t ldl_phys(target_phys_addr_t addr)
3013 {
3014 int io_index;
3015 uint8_t *ptr;
3016 uint32_t val;
3017 unsigned long pd;
3018 PhysPageDesc *p;
3019
3020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3021 if (!p) {
3022 pd = IO_MEM_UNASSIGNED;
3023 } else {
3024 pd = p->phys_offset;
3025 }
3026
3027 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3028 !(pd & IO_MEM_ROMD)) {
3029 /* I/O case */
3030 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3031 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3032 } else {
3033 /* RAM case */
3034 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3035 (addr & ~TARGET_PAGE_MASK);
3036 val = ldl_p(ptr);
3037 }
3038 return val;
3039 }
3040
3041 /* warning: addr must be aligned */
3042 uint64_t ldq_phys(target_phys_addr_t addr)
3043 {
3044 int io_index;
3045 uint8_t *ptr;
3046 uint64_t val;
3047 unsigned long pd;
3048 PhysPageDesc *p;
3049
3050 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3051 if (!p) {
3052 pd = IO_MEM_UNASSIGNED;
3053 } else {
3054 pd = p->phys_offset;
3055 }
3056
3057 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3058 !(pd & IO_MEM_ROMD)) {
3059 /* I/O case */
3060 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3061 #ifdef TARGET_WORDS_BIGENDIAN
3062 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3063 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3064 #else
3065 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3066 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3067 #endif
3068 } else {
3069 /* RAM case */
3070 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3071 (addr & ~TARGET_PAGE_MASK);
3072 val = ldq_p(ptr);
3073 }
3074 return val;
3075 }
3076
3077 /* XXX: optimize */
3078 uint32_t ldub_phys(target_phys_addr_t addr)
3079 {
3080 uint8_t val;
3081 cpu_physical_memory_read(addr, &val, 1);
3082 return val;
3083 }
3084
3085 /* XXX: optimize */
3086 uint32_t lduw_phys(target_phys_addr_t addr)
3087 {
3088 uint16_t val;
3089 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3090 return tswap16(val);
3091 }
3092
3093 /* warning: addr must be aligned. The ram page is not masked as dirty
3094 and the code inside is not invalidated. It is useful if the dirty
3095 bits are used to track modified PTEs */
3096 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3097 {
3098 int io_index;
3099 uint8_t *ptr;
3100 unsigned long pd;
3101 PhysPageDesc *p;
3102
3103 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3104 if (!p) {
3105 pd = IO_MEM_UNASSIGNED;
3106 } else {
3107 pd = p->phys_offset;
3108 }
3109
3110 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3111 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3112 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3113 } else {
3114 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3115 ptr = phys_ram_base + addr1;
3116 stl_p(ptr, val);
3117
3118 if (unlikely(in_migration)) {
3119 if (!cpu_physical_memory_is_dirty(addr1)) {
3120 /* invalidate code */
3121 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3122 /* set dirty bit */
3123 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3124 (0xff & ~CODE_DIRTY_FLAG);
3125 }
3126 }
3127 }
3128 }
3129
3130 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3131 {
3132 int io_index;
3133 uint8_t *ptr;
3134 unsigned long pd;
3135 PhysPageDesc *p;
3136
3137 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3138 if (!p) {
3139 pd = IO_MEM_UNASSIGNED;
3140 } else {
3141 pd = p->phys_offset;
3142 }
3143
3144 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3145 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3146 #ifdef TARGET_WORDS_BIGENDIAN
3147 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3148 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3149 #else
3150 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3151 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3152 #endif
3153 } else {
3154 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3155 (addr & ~TARGET_PAGE_MASK);
3156 stq_p(ptr, val);
3157 }
3158 }
3159
3160 /* warning: addr must be aligned */
3161 void stl_phys(target_phys_addr_t addr, uint32_t val)
3162 {
3163 int io_index;
3164 uint8_t *ptr;
3165 unsigned long pd;
3166 PhysPageDesc *p;
3167
3168 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3169 if (!p) {
3170 pd = IO_MEM_UNASSIGNED;
3171 } else {
3172 pd = p->phys_offset;
3173 }
3174
3175 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3176 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3177 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3178 } else {
3179 unsigned long addr1;
3180 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3181 /* RAM case */
3182 ptr = phys_ram_base + addr1;
3183 stl_p(ptr, val);
3184 if (!cpu_physical_memory_is_dirty(addr1)) {
3185 /* invalidate code */
3186 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3187 /* set dirty bit */
3188 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3189 (0xff & ~CODE_DIRTY_FLAG);
3190 }
3191 }
3192 }
3193
3194 /* XXX: optimize */
3195 void stb_phys(target_phys_addr_t addr, uint32_t val)
3196 {
3197 uint8_t v = val;
3198 cpu_physical_memory_write(addr, &v, 1);
3199 }
3200
3201 /* XXX: optimize */
3202 void stw_phys(target_phys_addr_t addr, uint32_t val)
3203 {
3204 uint16_t v = tswap16(val);
3205 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3206 }
3207
3208 /* XXX: optimize */
3209 void stq_phys(target_phys_addr_t addr, uint64_t val)
3210 {
3211 val = tswap64(val);
3212 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3213 }
3214
3215 #endif
3216
3217 /* virtual memory access for debug */
3218 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3219 uint8_t *buf, int len, int is_write)
3220 {
3221 int l;
3222 target_phys_addr_t phys_addr;
3223 target_ulong page;
3224
3225 while (len > 0) {
3226 page = addr & TARGET_PAGE_MASK;
3227 phys_addr = cpu_get_phys_page_debug(env, page);
3228 /* if no physical page mapped, return an error */
3229 if (phys_addr == -1)
3230 return -1;
3231 l = (page + TARGET_PAGE_SIZE) - addr;
3232 if (l > len)
3233 l = len;
3234 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3235 buf, l, is_write);
3236 len -= l;
3237 buf += l;
3238 addr += l;
3239 }
3240 return 0;
3241 }
3242
3243 /* in deterministic execution mode, instructions doing device I/Os
3244 must be at the end of the TB */
3245 void cpu_io_recompile(CPUState *env, void *retaddr)
3246 {
3247 TranslationBlock *tb;
3248 uint32_t n, cflags;
3249 target_ulong pc, cs_base;
3250 uint64_t flags;
3251
3252 tb = tb_find_pc((unsigned long)retaddr);
3253 if (!tb) {
3254 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3255 retaddr);
3256 }
3257 n = env->icount_decr.u16.low + tb->icount;
3258 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3259 /* Calculate how many instructions had been executed before the fault
3260 occurred. */
3261 n = n - env->icount_decr.u16.low;
3262 /* Generate a new TB ending on the I/O insn. */
3263 n++;
3264 /* On MIPS and SH, delay slot instructions can only be restarted if
3265 they were already the first instruction in the TB. If this is not
3266 the first instruction in a TB then re-execute the preceding
3267 branch. */
3268 #if defined(TARGET_MIPS)
3269 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3270 env->active_tc.PC -= 4;
3271 env->icount_decr.u16.low++;
3272 env->hflags &= ~MIPS_HFLAG_BMASK;
3273 }
3274 #elif defined(TARGET_SH4)
3275 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3276 && n > 1) {
3277 env->pc -= 2;
3278 env->icount_decr.u16.low++;
3279 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3280 }
3281 #endif
3282 /* This should never happen. */
3283 if (n > CF_COUNT_MASK)
3284 cpu_abort(env, "TB too big during recompile");
3285
3286 cflags = n | CF_LAST_IO;
3287 pc = tb->pc;
3288 cs_base = tb->cs_base;
3289 flags = tb->flags;
3290 tb_phys_invalidate(tb, -1);
3291 /* FIXME: In theory this could raise an exception. In practice
3292 we have already translated the block once so it's probably ok. */
3293 tb_gen_code(env, pc, cs_base, flags, cflags);
3294 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3295 the first in the TB) then we end up generating a whole new TB and
3296 repeating the fault, which is horribly inefficient.
3297 Better would be to execute just this insn uncached, or generate a
3298 second new TB. */
3299 cpu_resume_from_signal(env, NULL);
3300 }
3301
3302 void dump_exec_info(FILE *f,
3303 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3304 {
3305 int i, target_code_size, max_target_code_size;
3306 int direct_jmp_count, direct_jmp2_count, cross_page;
3307 TranslationBlock *tb;
3308
3309 target_code_size = 0;
3310 max_target_code_size = 0;
3311 cross_page = 0;
3312 direct_jmp_count = 0;
3313 direct_jmp2_count = 0;
3314 for(i = 0; i < nb_tbs; i++) {
3315 tb = &tbs[i];
3316 target_code_size += tb->size;
3317 if (tb->size > max_target_code_size)
3318 max_target_code_size = tb->size;
3319 if (tb->page_addr[1] != -1)
3320 cross_page++;
3321 if (tb->tb_next_offset[0] != 0xffff) {
3322 direct_jmp_count++;
3323 if (tb->tb_next_offset[1] != 0xffff) {
3324 direct_jmp2_count++;
3325 }
3326 }
3327 }
3328 /* XXX: avoid using doubles ? */
3329 cpu_fprintf(f, "Translation buffer state:\n");
3330 cpu_fprintf(f, "gen code size %ld/%ld\n",
3331 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3332 cpu_fprintf(f, "TB count %d/%d\n",
3333 nb_tbs, code_gen_max_blocks);
3334 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3335 nb_tbs ? target_code_size / nb_tbs : 0,
3336 max_target_code_size);
3337 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3338 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3339 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3340 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3341 cross_page,
3342 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3343 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3344 direct_jmp_count,
3345 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3346 direct_jmp2_count,
3347 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3348 cpu_fprintf(f, "\nStatistics:\n");
3349 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3350 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3351 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3352 tcg_dump_info(f, cpu_fprintf);
3353 }
3354
3355 #if !defined(CONFIG_USER_ONLY)
3356
3357 #define MMUSUFFIX _cmmu
3358 #define GETPC() NULL
3359 #define env cpu_single_env
3360 #define SOFTMMU_CODE_ACCESS
3361
3362 #define SHIFT 0
3363 #include "softmmu_template.h"
3364
3365 #define SHIFT 1
3366 #include "softmmu_template.h"
3367
3368 #define SHIFT 2
3369 #include "softmmu_template.h"
3370
3371 #define SHIFT 3
3372 #include "softmmu_template.h"
3373
3374 #undef env
3375
3376 #endif