]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - exec.c
Allocate cleared memory for cpu state.
[mirror_qemu.git] / exec.c
... / ...
CommitLineData
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifdef _WIN32
22#define WIN32_LEAN_AND_MEAN
23#include <windows.h>
24#else
25#include <sys/types.h>
26#include <sys/mman.h>
27#endif
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
36#include "cpu.h"
37#include "exec-all.h"
38#include "qemu-common.h"
39#include "tcg.h"
40#include "hw/hw.h"
41#include "osdep.h"
42#include "kvm.h"
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
46
47//#define DEBUG_TB_INVALIDATE
48//#define DEBUG_FLUSH
49//#define DEBUG_TLB
50//#define DEBUG_UNASSIGNED
51
52/* make various TB consistency checks */
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
55
56//#define DEBUG_IOPORT
57//#define DEBUG_SUBPAGE
58
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
68
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
87static TranslationBlock *tbs;
88int code_gen_max_blocks;
89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90static int nb_tbs;
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
109/* threshold to flush the translated code buffer */
110static unsigned long code_gen_buffer_max_size;
111uint8_t *code_gen_ptr;
112
113#if !defined(CONFIG_USER_ONLY)
114ram_addr_t phys_ram_size;
115int phys_ram_fd;
116uint8_t *phys_ram_base;
117uint8_t *phys_ram_dirty;
118static int in_migration;
119static ram_addr_t phys_ram_alloc_offset = 0;
120#endif
121
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
125CPUState *cpu_single_env;
126/* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
133
134typedef struct PageDesc {
135 /* list of TBs intersecting this ram page */
136 TranslationBlock *first_tb;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
144} PageDesc;
145
146typedef struct PhysPageDesc {
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset;
149} PhysPageDesc;
150
151#define L2_BITS 10
152#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153/* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
156 */
157#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158#else
159#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160#endif
161
162#define L1_SIZE (1 << L1_BITS)
163#define L2_SIZE (1 << L2_BITS)
164
165unsigned long qemu_real_host_page_size;
166unsigned long qemu_host_page_bits;
167unsigned long qemu_host_page_size;
168unsigned long qemu_host_page_mask;
169
170/* XXX: for system emulation, it could just be an array */
171static PageDesc *l1_map[L1_SIZE];
172static PhysPageDesc **l1_phys_map;
173
174#if !defined(CONFIG_USER_ONLY)
175static void io_mem_init(void);
176
177/* io memory support */
178CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181static int io_mem_nb;
182static int io_mem_watch;
183#endif
184
185/* log support */
186static const char *logfilename = "/tmp/qemu.log";
187FILE *logfile;
188int loglevel;
189static int log_append = 0;
190
191/* statistics */
192static int tlb_flush_count;
193static int tb_flush_count;
194static int tb_phys_invalidate_count;
195
196#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197typedef struct subpage_t {
198 target_phys_addr_t base;
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
202} subpage_t;
203
204#ifdef _WIN32
205static void map_exec(void *addr, long size)
206{
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211}
212#else
213static void map_exec(void *addr, long size)
214{
215 unsigned long start, end, page_size;
216
217 page_size = getpagesize();
218 start = (unsigned long)addr;
219 start &= ~(page_size - 1);
220
221 end = (unsigned long)addr + size;
222 end += page_size - 1;
223 end &= ~(page_size - 1);
224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
230static void page_init(void)
231{
232 /* NOTE: we can always suppose that qemu_host_page_size >=
233 TARGET_PAGE_SIZE */
234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
248 qemu_host_page_bits = 0;
249 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250 qemu_host_page_bits++;
251 qemu_host_page_mask = ~(qemu_host_page_size - 1);
252 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254
255#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 {
257 long long startaddr, endaddr;
258 FILE *f;
259 int n;
260
261 mmap_lock();
262 last_brk = (unsigned long)sbrk(0);
263 f = fopen("/proc/self/maps", "r");
264 if (f) {
265 do {
266 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267 if (n == 2) {
268 startaddr = MIN(startaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 endaddr = MIN(endaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 page_set_flags(startaddr & TARGET_PAGE_MASK,
273 TARGET_PAGE_ALIGN(endaddr),
274 PAGE_RESERVED);
275 }
276 } while (!feof(f));
277 fclose(f);
278 }
279 mmap_unlock();
280 }
281#endif
282}
283
284static inline PageDesc **page_l1_map(target_ulong index)
285{
286#if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290 return NULL;
291#endif
292 return &l1_map[index >> L2_BITS];
293}
294
295static inline PageDesc *page_find_alloc(target_ulong index)
296{
297 PageDesc **lp, *p;
298 lp = page_l1_map(index);
299 if (!lp)
300 return NULL;
301
302 p = *lp;
303 if (!p) {
304 /* allocate if not found */
305#if defined(CONFIG_USER_ONLY)
306 unsigned long addr;
307 size_t len = sizeof(PageDesc) * L2_SIZE;
308 /* Don't use qemu_malloc because it may recurse. */
309 p = mmap(0, len, PROT_READ | PROT_WRITE,
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311 *lp = p;
312 addr = h2g(p);
313 if (addr == (target_ulong)addr) {
314 page_set_flags(addr & TARGET_PAGE_MASK,
315 TARGET_PAGE_ALIGN(addr + len),
316 PAGE_RESERVED);
317 }
318#else
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320 *lp = p;
321#endif
322 }
323 return p + (index & (L2_SIZE - 1));
324}
325
326static inline PageDesc *page_find(target_ulong index)
327{
328 PageDesc **lp, *p;
329 lp = page_l1_map(index);
330 if (!lp)
331 return NULL;
332
333 p = *lp;
334 if (!p)
335 return 0;
336 return p + (index & (L2_SIZE - 1));
337}
338
339static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340{
341 void **lp, **p;
342 PhysPageDesc *pd;
343
344 p = (void **)l1_phys_map;
345#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346
347#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349#endif
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351 p = *lp;
352 if (!p) {
353 /* allocate if not found */
354 if (!alloc)
355 return NULL;
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357 memset(p, 0, sizeof(void *) * L1_SIZE);
358 *lp = p;
359 }
360#endif
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362 pd = *lp;
363 if (!pd) {
364 int i;
365 /* allocate if not found */
366 if (!alloc)
367 return NULL;
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369 *lp = pd;
370 for (i = 0; i < L2_SIZE; i++)
371 pd[i].phys_offset = IO_MEM_UNASSIGNED;
372 }
373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374}
375
376static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377{
378 return phys_page_find_alloc(index, 0);
379}
380
381#if !defined(CONFIG_USER_ONLY)
382static void tlb_protect_code(ram_addr_t ram_addr);
383static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384 target_ulong vaddr);
385#define mmap_lock() do { } while(0)
386#define mmap_unlock() do { } while(0)
387#endif
388
389#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390
391#if defined(CONFIG_USER_ONLY)
392/* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394#define USE_STATIC_CODE_GEN_BUFFER
395#endif
396
397#ifdef USE_STATIC_CODE_GEN_BUFFER
398static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399#endif
400
401static void code_gen_alloc(unsigned long tb_size)
402{
403#ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407#else
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
410#if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413#else
414 /* XXX: needs ajustments */
415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416#endif
417 }
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422#if defined(__linux__)
423 {
424 int flags;
425 void *start = NULL;
426
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428#if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
433#elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
439#elif defined(__arm__)
440 /* Map the buffer below 32M, so we can use direct calls and branches */
441 flags |= MAP_FIXED;
442 start = (void *) 0x01000000UL;
443 if (code_gen_buffer_size > 16 * 1024 * 1024)
444 code_gen_buffer_size = 16 * 1024 * 1024;
445#endif
446 code_gen_buffer = mmap(start, code_gen_buffer_size,
447 PROT_WRITE | PROT_READ | PROT_EXEC,
448 flags, -1, 0);
449 if (code_gen_buffer == MAP_FAILED) {
450 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
451 exit(1);
452 }
453 }
454#elif defined(__FreeBSD__)
455 {
456 int flags;
457 void *addr = NULL;
458 flags = MAP_PRIVATE | MAP_ANONYMOUS;
459#if defined(__x86_64__)
460 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461 * 0x40000000 is free */
462 flags |= MAP_FIXED;
463 addr = (void *)0x40000000;
464 /* Cannot map more than that */
465 if (code_gen_buffer_size > (800 * 1024 * 1024))
466 code_gen_buffer_size = (800 * 1024 * 1024);
467#endif
468 code_gen_buffer = mmap(addr, code_gen_buffer_size,
469 PROT_WRITE | PROT_READ | PROT_EXEC,
470 flags, -1, 0);
471 if (code_gen_buffer == MAP_FAILED) {
472 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
473 exit(1);
474 }
475 }
476#else
477 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
478 if (!code_gen_buffer) {
479 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
480 exit(1);
481 }
482 map_exec(code_gen_buffer, code_gen_buffer_size);
483#endif
484#endif /* !USE_STATIC_CODE_GEN_BUFFER */
485 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
486 code_gen_buffer_max_size = code_gen_buffer_size -
487 code_gen_max_block_size();
488 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
489 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
490}
491
492/* Must be called before using the QEMU cpus. 'tb_size' is the size
493 (in bytes) allocated to the translation buffer. Zero means default
494 size. */
495void cpu_exec_init_all(unsigned long tb_size)
496{
497 cpu_gen_init();
498 code_gen_alloc(tb_size);
499 code_gen_ptr = code_gen_buffer;
500 page_init();
501#if !defined(CONFIG_USER_ONLY)
502 io_mem_init();
503#endif
504}
505
506#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507
508#define CPU_COMMON_SAVE_VERSION 1
509
510static void cpu_common_save(QEMUFile *f, void *opaque)
511{
512 CPUState *env = opaque;
513
514 qemu_put_be32s(f, &env->halted);
515 qemu_put_be32s(f, &env->interrupt_request);
516}
517
518static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
519{
520 CPUState *env = opaque;
521
522 if (version_id != CPU_COMMON_SAVE_VERSION)
523 return -EINVAL;
524
525 qemu_get_be32s(f, &env->halted);
526 qemu_get_be32s(f, &env->interrupt_request);
527 tlb_flush(env, 1);
528
529 return 0;
530}
531#endif
532
533void cpu_exec_init(CPUState *env)
534{
535 CPUState **penv;
536 int cpu_index;
537
538 env->next_cpu = NULL;
539 penv = &first_cpu;
540 cpu_index = 0;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
543 cpu_index++;
544 }
545 env->cpu_index = cpu_index;
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
548 *penv = env;
549#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
554#endif
555}
556
557static inline void invalidate_page_bitmap(PageDesc *p)
558{
559 if (p->code_bitmap) {
560 qemu_free(p->code_bitmap);
561 p->code_bitmap = NULL;
562 }
563 p->code_write_count = 0;
564}
565
566/* set to NULL all the 'first_tb' fields in all PageDescs */
567static void page_flush_tb(void)
568{
569 int i, j;
570 PageDesc *p;
571
572 for(i = 0; i < L1_SIZE; i++) {
573 p = l1_map[i];
574 if (p) {
575 for(j = 0; j < L2_SIZE; j++) {
576 p->first_tb = NULL;
577 invalidate_page_bitmap(p);
578 p++;
579 }
580 }
581 }
582}
583
584/* flush all the translation blocks */
585/* XXX: tb_flush is currently not thread safe */
586void tb_flush(CPUState *env1)
587{
588 CPUState *env;
589#if defined(DEBUG_FLUSH)
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
592 nb_tbs, nb_tbs > 0 ?
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
594#endif
595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
597
598 nb_tbs = 0;
599
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
602 }
603
604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
605 page_flush_tb();
606
607 code_gen_ptr = code_gen_buffer;
608 /* XXX: flush processor icache at this point if cache flush is
609 expensive */
610 tb_flush_count++;
611}
612
613#ifdef DEBUG_TB_CHECK
614
615static void tb_invalidate_check(target_ulong address)
616{
617 TranslationBlock *tb;
618 int i;
619 address &= TARGET_PAGE_MASK;
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
625 address, (long)tb->pc, tb->size);
626 }
627 }
628 }
629}
630
631/* verify that all the pages have correct rights for code */
632static void tb_page_check(void)
633{
634 TranslationBlock *tb;
635 int i, flags1, flags2;
636
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
643 (long)tb->pc, tb->size, flags1, flags2);
644 }
645 }
646 }
647}
648
649static void tb_jmp_check(TranslationBlock *tb)
650{
651 TranslationBlock *tb1;
652 unsigned int n1;
653
654 /* suppress any remaining jumps to this TB */
655 tb1 = tb->jmp_first;
656 for(;;) {
657 n1 = (long)tb1 & 3;
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
659 if (n1 == 2)
660 break;
661 tb1 = tb1->jmp_next[n1];
662 }
663 /* check end of list */
664 if (tb1 != tb) {
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
666 }
667}
668
669#endif
670
671/* invalidate one TB */
672static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
673 int next_offset)
674{
675 TranslationBlock *tb1;
676 for(;;) {
677 tb1 = *ptb;
678 if (tb1 == tb) {
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
680 break;
681 }
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
683 }
684}
685
686static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
687{
688 TranslationBlock *tb1;
689 unsigned int n1;
690
691 for(;;) {
692 tb1 = *ptb;
693 n1 = (long)tb1 & 3;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
695 if (tb1 == tb) {
696 *ptb = tb1->page_next[n1];
697 break;
698 }
699 ptb = &tb1->page_next[n1];
700 }
701}
702
703static inline void tb_jmp_remove(TranslationBlock *tb, int n)
704{
705 TranslationBlock *tb1, **ptb;
706 unsigned int n1;
707
708 ptb = &tb->jmp_next[n];
709 tb1 = *ptb;
710 if (tb1) {
711 /* find tb(n) in circular list */
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
717 break;
718 if (n1 == 2) {
719 ptb = &tb1->jmp_first;
720 } else {
721 ptb = &tb1->jmp_next[n1];
722 }
723 }
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
726
727 tb->jmp_next[n] = NULL;
728 }
729}
730
731/* reset the jump entry 'n' of a TB so that it is not chained to
732 another TB */
733static inline void tb_reset_jump(TranslationBlock *tb, int n)
734{
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
736}
737
738void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
739{
740 CPUState *env;
741 PageDesc *p;
742 unsigned int h, n1;
743 target_phys_addr_t phys_pc;
744 TranslationBlock *tb1, *tb2;
745
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
749 tb_remove(&tb_phys_hash[h], tb,
750 offsetof(TranslationBlock, phys_hash_next));
751
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
757 }
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
762 }
763
764 tb_invalidated_flag = 1;
765
766 /* remove the TB from the hash list */
767 h = tb_jmp_cache_hash_func(tb->pc);
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
771 }
772
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
776
777 /* suppress any remaining jumps to this TB */
778 tb1 = tb->jmp_first;
779 for(;;) {
780 n1 = (long)tb1 & 3;
781 if (n1 == 2)
782 break;
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
787 tb1 = tb2;
788 }
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
790
791 tb_phys_invalidate_count++;
792}
793
794static inline void set_bits(uint8_t *tab, int start, int len)
795{
796 int end, mask, end1;
797
798 end = start + len;
799 tab += start >> 3;
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
802 if (start < end) {
803 mask &= ~(0xff << (end & 7));
804 *tab |= mask;
805 }
806 } else {
807 *tab++ |= mask;
808 start = (start + 8) & ~7;
809 end1 = end & ~7;
810 while (start < end1) {
811 *tab++ = 0xff;
812 start += 8;
813 }
814 if (start < end) {
815 mask = ~(0xff << (end & 7));
816 *tab |= mask;
817 }
818 }
819}
820
821static void build_page_bitmap(PageDesc *p)
822{
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
825
826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
827 if (!p->code_bitmap)
828 return;
829
830 tb = p->first_tb;
831 while (tb != NULL) {
832 n = (long)tb & 3;
833 tb = (TranslationBlock *)((long)tb & ~3);
834 /* NOTE: this is subtle as a TB may span two physical pages */
835 if (n == 0) {
836 /* NOTE: tb_end may be after the end of the page, but
837 it is not a problem */
838 tb_start = tb->pc & ~TARGET_PAGE_MASK;
839 tb_end = tb_start + tb->size;
840 if (tb_end > TARGET_PAGE_SIZE)
841 tb_end = TARGET_PAGE_SIZE;
842 } else {
843 tb_start = 0;
844 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
845 }
846 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
847 tb = tb->page_next[n];
848 }
849}
850
851TranslationBlock *tb_gen_code(CPUState *env,
852 target_ulong pc, target_ulong cs_base,
853 int flags, int cflags)
854{
855 TranslationBlock *tb;
856 uint8_t *tc_ptr;
857 target_ulong phys_pc, phys_page2, virt_page2;
858 int code_gen_size;
859
860 phys_pc = get_phys_addr_code(env, pc);
861 tb = tb_alloc(pc);
862 if (!tb) {
863 /* flush must be done */
864 tb_flush(env);
865 /* cannot fail at this point */
866 tb = tb_alloc(pc);
867 /* Don't forget to invalidate previous TB info. */
868 tb_invalidated_flag = 1;
869 }
870 tc_ptr = code_gen_ptr;
871 tb->tc_ptr = tc_ptr;
872 tb->cs_base = cs_base;
873 tb->flags = flags;
874 tb->cflags = cflags;
875 cpu_gen_code(env, tb, &code_gen_size);
876 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
877
878 /* check next page if needed */
879 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
880 phys_page2 = -1;
881 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
882 phys_page2 = get_phys_addr_code(env, virt_page2);
883 }
884 tb_link_phys(tb, phys_pc, phys_page2);
885 return tb;
886}
887
888/* invalidate all TBs which intersect with the target physical page
889 starting in range [start;end[. NOTE: start and end must refer to
890 the same physical page. 'is_cpu_write_access' should be true if called
891 from a real cpu write access: the virtual CPU will exit the current
892 TB if code is modified inside this TB. */
893void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
894 int is_cpu_write_access)
895{
896 TranslationBlock *tb, *tb_next, *saved_tb;
897 CPUState *env = cpu_single_env;
898 target_ulong tb_start, tb_end;
899 PageDesc *p;
900 int n;
901#ifdef TARGET_HAS_PRECISE_SMC
902 int current_tb_not_found = is_cpu_write_access;
903 TranslationBlock *current_tb = NULL;
904 int current_tb_modified = 0;
905 target_ulong current_pc = 0;
906 target_ulong current_cs_base = 0;
907 int current_flags = 0;
908#endif /* TARGET_HAS_PRECISE_SMC */
909
910 p = page_find(start >> TARGET_PAGE_BITS);
911 if (!p)
912 return;
913 if (!p->code_bitmap &&
914 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
915 is_cpu_write_access) {
916 /* build code bitmap */
917 build_page_bitmap(p);
918 }
919
920 /* we remove all the TBs in the range [start, end[ */
921 /* XXX: see if in some cases it could be faster to invalidate all the code */
922 tb = p->first_tb;
923 while (tb != NULL) {
924 n = (long)tb & 3;
925 tb = (TranslationBlock *)((long)tb & ~3);
926 tb_next = tb->page_next[n];
927 /* NOTE: this is subtle as a TB may span two physical pages */
928 if (n == 0) {
929 /* NOTE: tb_end may be after the end of the page, but
930 it is not a problem */
931 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
932 tb_end = tb_start + tb->size;
933 } else {
934 tb_start = tb->page_addr[1];
935 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
936 }
937 if (!(tb_end <= start || tb_start >= end)) {
938#ifdef TARGET_HAS_PRECISE_SMC
939 if (current_tb_not_found) {
940 current_tb_not_found = 0;
941 current_tb = NULL;
942 if (env->mem_io_pc) {
943 /* now we have a real cpu fault */
944 current_tb = tb_find_pc(env->mem_io_pc);
945 }
946 }
947 if (current_tb == tb &&
948 (current_tb->cflags & CF_COUNT_MASK) != 1) {
949 /* If we are modifying the current TB, we must stop
950 its execution. We could be more precise by checking
951 that the modification is after the current PC, but it
952 would require a specialized function to partially
953 restore the CPU state */
954
955 current_tb_modified = 1;
956 cpu_restore_state(current_tb, env,
957 env->mem_io_pc, NULL);
958 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
959 &current_flags);
960 }
961#endif /* TARGET_HAS_PRECISE_SMC */
962 /* we need to do that to handle the case where a signal
963 occurs while doing tb_phys_invalidate() */
964 saved_tb = NULL;
965 if (env) {
966 saved_tb = env->current_tb;
967 env->current_tb = NULL;
968 }
969 tb_phys_invalidate(tb, -1);
970 if (env) {
971 env->current_tb = saved_tb;
972 if (env->interrupt_request && env->current_tb)
973 cpu_interrupt(env, env->interrupt_request);
974 }
975 }
976 tb = tb_next;
977 }
978#if !defined(CONFIG_USER_ONLY)
979 /* if no code remaining, no need to continue to use slow writes */
980 if (!p->first_tb) {
981 invalidate_page_bitmap(p);
982 if (is_cpu_write_access) {
983 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
984 }
985 }
986#endif
987#ifdef TARGET_HAS_PRECISE_SMC
988 if (current_tb_modified) {
989 /* we generate a block containing just the instruction
990 modifying the memory. It will ensure that it cannot modify
991 itself */
992 env->current_tb = NULL;
993 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
994 cpu_resume_from_signal(env, NULL);
995 }
996#endif
997}
998
999/* len must be <= 8 and start must be a multiple of len */
1000static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1001{
1002 PageDesc *p;
1003 int offset, b;
1004#if 0
1005 if (1) {
1006 if (loglevel) {
1007 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1008 cpu_single_env->mem_io_vaddr, len,
1009 cpu_single_env->eip,
1010 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1011 }
1012 }
1013#endif
1014 p = page_find(start >> TARGET_PAGE_BITS);
1015 if (!p)
1016 return;
1017 if (p->code_bitmap) {
1018 offset = start & ~TARGET_PAGE_MASK;
1019 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020 if (b & ((1 << len) - 1))
1021 goto do_invalidate;
1022 } else {
1023 do_invalidate:
1024 tb_invalidate_phys_page_range(start, start + len, 1);
1025 }
1026}
1027
1028#if !defined(CONFIG_SOFTMMU)
1029static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030 unsigned long pc, void *puc)
1031{
1032 TranslationBlock *tb;
1033 PageDesc *p;
1034 int n;
1035#ifdef TARGET_HAS_PRECISE_SMC
1036 TranslationBlock *current_tb = NULL;
1037 CPUState *env = cpu_single_env;
1038 int current_tb_modified = 0;
1039 target_ulong current_pc = 0;
1040 target_ulong current_cs_base = 0;
1041 int current_flags = 0;
1042#endif
1043
1044 addr &= TARGET_PAGE_MASK;
1045 p = page_find(addr >> TARGET_PAGE_BITS);
1046 if (!p)
1047 return;
1048 tb = p->first_tb;
1049#ifdef TARGET_HAS_PRECISE_SMC
1050 if (tb && pc != 0) {
1051 current_tb = tb_find_pc(pc);
1052 }
1053#endif
1054 while (tb != NULL) {
1055 n = (long)tb & 3;
1056 tb = (TranslationBlock *)((long)tb & ~3);
1057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb == tb &&
1059 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060 /* If we are modifying the current TB, we must stop
1061 its execution. We could be more precise by checking
1062 that the modification is after the current PC, but it
1063 would require a specialized function to partially
1064 restore the CPU state */
1065
1066 current_tb_modified = 1;
1067 cpu_restore_state(current_tb, env, pc, puc);
1068 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069 &current_flags);
1070 }
1071#endif /* TARGET_HAS_PRECISE_SMC */
1072 tb_phys_invalidate(tb, addr);
1073 tb = tb->page_next[n];
1074 }
1075 p->first_tb = NULL;
1076#ifdef TARGET_HAS_PRECISE_SMC
1077 if (current_tb_modified) {
1078 /* we generate a block containing just the instruction
1079 modifying the memory. It will ensure that it cannot modify
1080 itself */
1081 env->current_tb = NULL;
1082 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083 cpu_resume_from_signal(env, puc);
1084 }
1085#endif
1086}
1087#endif
1088
1089/* add the tb in the target page and protect it if necessary */
1090static inline void tb_alloc_page(TranslationBlock *tb,
1091 unsigned int n, target_ulong page_addr)
1092{
1093 PageDesc *p;
1094 TranslationBlock *last_first_tb;
1095
1096 tb->page_addr[n] = page_addr;
1097 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098 tb->page_next[n] = p->first_tb;
1099 last_first_tb = p->first_tb;
1100 p->first_tb = (TranslationBlock *)((long)tb | n);
1101 invalidate_page_bitmap(p);
1102
1103#if defined(TARGET_HAS_SMC) || 1
1104
1105#if defined(CONFIG_USER_ONLY)
1106 if (p->flags & PAGE_WRITE) {
1107 target_ulong addr;
1108 PageDesc *p2;
1109 int prot;
1110
1111 /* force the host page as non writable (writes will have a
1112 page fault + mprotect overhead) */
1113 page_addr &= qemu_host_page_mask;
1114 prot = 0;
1115 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116 addr += TARGET_PAGE_SIZE) {
1117
1118 p2 = page_find (addr >> TARGET_PAGE_BITS);
1119 if (!p2)
1120 continue;
1121 prot |= p2->flags;
1122 p2->flags &= ~PAGE_WRITE;
1123 page_get_flags(addr);
1124 }
1125 mprotect(g2h(page_addr), qemu_host_page_size,
1126 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127#ifdef DEBUG_TB_INVALIDATE
1128 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1129 page_addr);
1130#endif
1131 }
1132#else
1133 /* if some code is already present, then the pages are already
1134 protected. So we handle the case where only the first TB is
1135 allocated in a physical page */
1136 if (!last_first_tb) {
1137 tlb_protect_code(page_addr);
1138 }
1139#endif
1140
1141#endif /* TARGET_HAS_SMC */
1142}
1143
1144/* Allocate a new translation block. Flush the translation buffer if
1145 too many translation blocks or too much generated code. */
1146TranslationBlock *tb_alloc(target_ulong pc)
1147{
1148 TranslationBlock *tb;
1149
1150 if (nb_tbs >= code_gen_max_blocks ||
1151 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1152 return NULL;
1153 tb = &tbs[nb_tbs++];
1154 tb->pc = pc;
1155 tb->cflags = 0;
1156 return tb;
1157}
1158
1159void tb_free(TranslationBlock *tb)
1160{
1161 /* In practice this is mostly used for single use temporary TB
1162 Ignore the hard cases and just back up if this TB happens to
1163 be the last one generated. */
1164 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165 code_gen_ptr = tb->tc_ptr;
1166 nb_tbs--;
1167 }
1168}
1169
1170/* add a new TB and link it to the physical page tables. phys_page2 is
1171 (-1) to indicate that only one page contains the TB. */
1172void tb_link_phys(TranslationBlock *tb,
1173 target_ulong phys_pc, target_ulong phys_page2)
1174{
1175 unsigned int h;
1176 TranslationBlock **ptb;
1177
1178 /* Grab the mmap lock to stop another thread invalidating this TB
1179 before we are done. */
1180 mmap_lock();
1181 /* add in the physical hash table */
1182 h = tb_phys_hash_func(phys_pc);
1183 ptb = &tb_phys_hash[h];
1184 tb->phys_hash_next = *ptb;
1185 *ptb = tb;
1186
1187 /* add in the page list */
1188 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189 if (phys_page2 != -1)
1190 tb_alloc_page(tb, 1, phys_page2);
1191 else
1192 tb->page_addr[1] = -1;
1193
1194 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195 tb->jmp_next[0] = NULL;
1196 tb->jmp_next[1] = NULL;
1197
1198 /* init original jump addresses */
1199 if (tb->tb_next_offset[0] != 0xffff)
1200 tb_reset_jump(tb, 0);
1201 if (tb->tb_next_offset[1] != 0xffff)
1202 tb_reset_jump(tb, 1);
1203
1204#ifdef DEBUG_TB_CHECK
1205 tb_page_check();
1206#endif
1207 mmap_unlock();
1208}
1209
1210/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211 tb[1].tc_ptr. Return NULL if not found */
1212TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1213{
1214 int m_min, m_max, m;
1215 unsigned long v;
1216 TranslationBlock *tb;
1217
1218 if (nb_tbs <= 0)
1219 return NULL;
1220 if (tc_ptr < (unsigned long)code_gen_buffer ||
1221 tc_ptr >= (unsigned long)code_gen_ptr)
1222 return NULL;
1223 /* binary search (cf Knuth) */
1224 m_min = 0;
1225 m_max = nb_tbs - 1;
1226 while (m_min <= m_max) {
1227 m = (m_min + m_max) >> 1;
1228 tb = &tbs[m];
1229 v = (unsigned long)tb->tc_ptr;
1230 if (v == tc_ptr)
1231 return tb;
1232 else if (tc_ptr < v) {
1233 m_max = m - 1;
1234 } else {
1235 m_min = m + 1;
1236 }
1237 }
1238 return &tbs[m_max];
1239}
1240
1241static void tb_reset_jump_recursive(TranslationBlock *tb);
1242
1243static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244{
1245 TranslationBlock *tb1, *tb_next, **ptb;
1246 unsigned int n1;
1247
1248 tb1 = tb->jmp_next[n];
1249 if (tb1 != NULL) {
1250 /* find head of list */
1251 for(;;) {
1252 n1 = (long)tb1 & 3;
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 if (n1 == 2)
1255 break;
1256 tb1 = tb1->jmp_next[n1];
1257 }
1258 /* we are now sure now that tb jumps to tb1 */
1259 tb_next = tb1;
1260
1261 /* remove tb from the jmp_first list */
1262 ptb = &tb_next->jmp_first;
1263 for(;;) {
1264 tb1 = *ptb;
1265 n1 = (long)tb1 & 3;
1266 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267 if (n1 == n && tb1 == tb)
1268 break;
1269 ptb = &tb1->jmp_next[n1];
1270 }
1271 *ptb = tb->jmp_next[n];
1272 tb->jmp_next[n] = NULL;
1273
1274 /* suppress the jump to next tb in generated code */
1275 tb_reset_jump(tb, n);
1276
1277 /* suppress jumps in the tb on which we could have jumped */
1278 tb_reset_jump_recursive(tb_next);
1279 }
1280}
1281
1282static void tb_reset_jump_recursive(TranslationBlock *tb)
1283{
1284 tb_reset_jump_recursive2(tb, 0);
1285 tb_reset_jump_recursive2(tb, 1);
1286}
1287
1288#if defined(TARGET_HAS_ICE)
1289static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290{
1291 target_phys_addr_t addr;
1292 target_ulong pd;
1293 ram_addr_t ram_addr;
1294 PhysPageDesc *p;
1295
1296 addr = cpu_get_phys_page_debug(env, pc);
1297 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298 if (!p) {
1299 pd = IO_MEM_UNASSIGNED;
1300 } else {
1301 pd = p->phys_offset;
1302 }
1303 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1305}
1306#endif
1307
1308/* Add a watchpoint. */
1309int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310 int flags, CPUWatchpoint **watchpoint)
1311{
1312 target_ulong len_mask = ~(len - 1);
1313 CPUWatchpoint *wp;
1314
1315 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319 return -EINVAL;
1320 }
1321 wp = qemu_malloc(sizeof(*wp));
1322 if (!wp)
1323 return -ENOMEM;
1324
1325 wp->vaddr = addr;
1326 wp->len_mask = len_mask;
1327 wp->flags = flags;
1328
1329 /* keep all GDB-injected watchpoints in front */
1330 if (flags & BP_GDB)
1331 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1332 else
1333 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1334
1335 tlb_flush_page(env, addr);
1336
1337 if (watchpoint)
1338 *watchpoint = wp;
1339 return 0;
1340}
1341
1342/* Remove a specific watchpoint. */
1343int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1344 int flags)
1345{
1346 target_ulong len_mask = ~(len - 1);
1347 CPUWatchpoint *wp;
1348
1349 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1350 if (addr == wp->vaddr && len_mask == wp->len_mask
1351 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1352 cpu_watchpoint_remove_by_ref(env, wp);
1353 return 0;
1354 }
1355 }
1356 return -ENOENT;
1357}
1358
1359/* Remove a specific watchpoint by reference. */
1360void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1361{
1362 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1363
1364 tlb_flush_page(env, watchpoint->vaddr);
1365
1366 qemu_free(watchpoint);
1367}
1368
1369/* Remove all matching watchpoints. */
1370void cpu_watchpoint_remove_all(CPUState *env, int mask)
1371{
1372 CPUWatchpoint *wp, *next;
1373
1374 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1375 if (wp->flags & mask)
1376 cpu_watchpoint_remove_by_ref(env, wp);
1377 }
1378}
1379
1380/* Add a breakpoint. */
1381int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1382 CPUBreakpoint **breakpoint)
1383{
1384#if defined(TARGET_HAS_ICE)
1385 CPUBreakpoint *bp;
1386
1387 bp = qemu_malloc(sizeof(*bp));
1388 if (!bp)
1389 return -ENOMEM;
1390
1391 bp->pc = pc;
1392 bp->flags = flags;
1393
1394 /* keep all GDB-injected breakpoints in front */
1395 if (flags & BP_GDB)
1396 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1397 else
1398 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1399
1400 breakpoint_invalidate(env, pc);
1401
1402 if (breakpoint)
1403 *breakpoint = bp;
1404 return 0;
1405#else
1406 return -ENOSYS;
1407#endif
1408}
1409
1410/* Remove a specific breakpoint. */
1411int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1412{
1413#if defined(TARGET_HAS_ICE)
1414 CPUBreakpoint *bp;
1415
1416 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1417 if (bp->pc == pc && bp->flags == flags) {
1418 cpu_breakpoint_remove_by_ref(env, bp);
1419 return 0;
1420 }
1421 }
1422 return -ENOENT;
1423#else
1424 return -ENOSYS;
1425#endif
1426}
1427
1428/* Remove a specific breakpoint by reference. */
1429void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1430{
1431#if defined(TARGET_HAS_ICE)
1432 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1433
1434 breakpoint_invalidate(env, breakpoint->pc);
1435
1436 qemu_free(breakpoint);
1437#endif
1438}
1439
1440/* Remove all matching breakpoints. */
1441void cpu_breakpoint_remove_all(CPUState *env, int mask)
1442{
1443#if defined(TARGET_HAS_ICE)
1444 CPUBreakpoint *bp, *next;
1445
1446 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1447 if (bp->flags & mask)
1448 cpu_breakpoint_remove_by_ref(env, bp);
1449 }
1450#endif
1451}
1452
1453/* enable or disable single step mode. EXCP_DEBUG is returned by the
1454 CPU loop after each instruction */
1455void cpu_single_step(CPUState *env, int enabled)
1456{
1457#if defined(TARGET_HAS_ICE)
1458 if (env->singlestep_enabled != enabled) {
1459 env->singlestep_enabled = enabled;
1460 /* must flush all the translated code to avoid inconsistancies */
1461 /* XXX: only flush what is necessary */
1462 tb_flush(env);
1463 }
1464#endif
1465}
1466
1467/* enable or disable low levels log */
1468void cpu_set_log(int log_flags)
1469{
1470 loglevel = log_flags;
1471 if (loglevel && !logfile) {
1472 logfile = fopen(logfilename, log_append ? "a" : "w");
1473 if (!logfile) {
1474 perror(logfilename);
1475 _exit(1);
1476 }
1477#if !defined(CONFIG_SOFTMMU)
1478 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479 {
1480 static char logfile_buf[4096];
1481 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482 }
1483#else
1484 setvbuf(logfile, NULL, _IOLBF, 0);
1485#endif
1486 log_append = 1;
1487 }
1488 if (!loglevel && logfile) {
1489 fclose(logfile);
1490 logfile = NULL;
1491 }
1492}
1493
1494void cpu_set_log_filename(const char *filename)
1495{
1496 logfilename = strdup(filename);
1497 if (logfile) {
1498 fclose(logfile);
1499 logfile = NULL;
1500 }
1501 cpu_set_log(loglevel);
1502}
1503
1504/* mask must never be zero, except for A20 change call */
1505void cpu_interrupt(CPUState *env, int mask)
1506{
1507#if !defined(USE_NPTL)
1508 TranslationBlock *tb;
1509 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1510#endif
1511 int old_mask;
1512
1513 old_mask = env->interrupt_request;
1514 /* FIXME: This is probably not threadsafe. A different thread could
1515 be in the middle of a read-modify-write operation. */
1516 env->interrupt_request |= mask;
1517#if defined(USE_NPTL)
1518 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1519 problem and hope the cpu will stop of its own accord. For userspace
1520 emulation this often isn't actually as bad as it sounds. Often
1521 signals are used primarily to interrupt blocking syscalls. */
1522#else
1523 if (use_icount) {
1524 env->icount_decr.u16.high = 0xffff;
1525#ifndef CONFIG_USER_ONLY
1526 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1527 an async event happened and we need to process it. */
1528 if (!can_do_io(env)
1529 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1530 cpu_abort(env, "Raised interrupt while not in I/O function");
1531 }
1532#endif
1533 } else {
1534 tb = env->current_tb;
1535 /* if the cpu is currently executing code, we must unlink it and
1536 all the potentially executing TB */
1537 if (tb && !testandset(&interrupt_lock)) {
1538 env->current_tb = NULL;
1539 tb_reset_jump_recursive(tb);
1540 resetlock(&interrupt_lock);
1541 }
1542 }
1543#endif
1544}
1545
1546void cpu_reset_interrupt(CPUState *env, int mask)
1547{
1548 env->interrupt_request &= ~mask;
1549}
1550
1551const CPULogItem cpu_log_items[] = {
1552 { CPU_LOG_TB_OUT_ASM, "out_asm",
1553 "show generated host assembly code for each compiled TB" },
1554 { CPU_LOG_TB_IN_ASM, "in_asm",
1555 "show target assembly code for each compiled TB" },
1556 { CPU_LOG_TB_OP, "op",
1557 "show micro ops for each compiled TB" },
1558 { CPU_LOG_TB_OP_OPT, "op_opt",
1559 "show micro ops "
1560#ifdef TARGET_I386
1561 "before eflags optimization and "
1562#endif
1563 "after liveness analysis" },
1564 { CPU_LOG_INT, "int",
1565 "show interrupts/exceptions in short format" },
1566 { CPU_LOG_EXEC, "exec",
1567 "show trace before each executed TB (lots of logs)" },
1568 { CPU_LOG_TB_CPU, "cpu",
1569 "show CPU state before block translation" },
1570#ifdef TARGET_I386
1571 { CPU_LOG_PCALL, "pcall",
1572 "show protected mode far calls/returns/exceptions" },
1573#endif
1574#ifdef DEBUG_IOPORT
1575 { CPU_LOG_IOPORT, "ioport",
1576 "show all i/o ports accesses" },
1577#endif
1578 { 0, NULL, NULL },
1579};
1580
1581static int cmp1(const char *s1, int n, const char *s2)
1582{
1583 if (strlen(s2) != n)
1584 return 0;
1585 return memcmp(s1, s2, n) == 0;
1586}
1587
1588/* takes a comma separated list of log masks. Return 0 if error. */
1589int cpu_str_to_log_mask(const char *str)
1590{
1591 const CPULogItem *item;
1592 int mask;
1593 const char *p, *p1;
1594
1595 p = str;
1596 mask = 0;
1597 for(;;) {
1598 p1 = strchr(p, ',');
1599 if (!p1)
1600 p1 = p + strlen(p);
1601 if(cmp1(p,p1-p,"all")) {
1602 for(item = cpu_log_items; item->mask != 0; item++) {
1603 mask |= item->mask;
1604 }
1605 } else {
1606 for(item = cpu_log_items; item->mask != 0; item++) {
1607 if (cmp1(p, p1 - p, item->name))
1608 goto found;
1609 }
1610 return 0;
1611 }
1612 found:
1613 mask |= item->mask;
1614 if (*p1 != ',')
1615 break;
1616 p = p1 + 1;
1617 }
1618 return mask;
1619}
1620
1621void cpu_abort(CPUState *env, const char *fmt, ...)
1622{
1623 va_list ap;
1624 va_list ap2;
1625
1626 va_start(ap, fmt);
1627 va_copy(ap2, ap);
1628 fprintf(stderr, "qemu: fatal: ");
1629 vfprintf(stderr, fmt, ap);
1630 fprintf(stderr, "\n");
1631#ifdef TARGET_I386
1632 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1633#else
1634 cpu_dump_state(env, stderr, fprintf, 0);
1635#endif
1636 if (logfile) {
1637 fprintf(logfile, "qemu: fatal: ");
1638 vfprintf(logfile, fmt, ap2);
1639 fprintf(logfile, "\n");
1640#ifdef TARGET_I386
1641 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642#else
1643 cpu_dump_state(env, logfile, fprintf, 0);
1644#endif
1645 fflush(logfile);
1646 fclose(logfile);
1647 }
1648 va_end(ap2);
1649 va_end(ap);
1650 abort();
1651}
1652
1653CPUState *cpu_copy(CPUState *env)
1654{
1655 CPUState *new_env = cpu_init(env->cpu_model_str);
1656 /* preserve chaining and index */
1657 CPUState *next_cpu = new_env->next_cpu;
1658 int cpu_index = new_env->cpu_index;
1659 memcpy(new_env, env, sizeof(CPUState));
1660 new_env->next_cpu = next_cpu;
1661 new_env->cpu_index = cpu_index;
1662 return new_env;
1663}
1664
1665#if !defined(CONFIG_USER_ONLY)
1666
1667static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1668{
1669 unsigned int i;
1670
1671 /* Discard jump cache entries for any tb which might potentially
1672 overlap the flushed page. */
1673 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1674 memset (&env->tb_jmp_cache[i], 0,
1675 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1676
1677 i = tb_jmp_cache_hash_page(addr);
1678 memset (&env->tb_jmp_cache[i], 0,
1679 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1680}
1681
1682/* NOTE: if flush_global is true, also flush global entries (not
1683 implemented yet) */
1684void tlb_flush(CPUState *env, int flush_global)
1685{
1686 int i;
1687
1688#if defined(DEBUG_TLB)
1689 printf("tlb_flush:\n");
1690#endif
1691 /* must reset current TB so that interrupts cannot modify the
1692 links while we are modifying them */
1693 env->current_tb = NULL;
1694
1695 for(i = 0; i < CPU_TLB_SIZE; i++) {
1696 env->tlb_table[0][i].addr_read = -1;
1697 env->tlb_table[0][i].addr_write = -1;
1698 env->tlb_table[0][i].addr_code = -1;
1699 env->tlb_table[1][i].addr_read = -1;
1700 env->tlb_table[1][i].addr_write = -1;
1701 env->tlb_table[1][i].addr_code = -1;
1702#if (NB_MMU_MODES >= 3)
1703 env->tlb_table[2][i].addr_read = -1;
1704 env->tlb_table[2][i].addr_write = -1;
1705 env->tlb_table[2][i].addr_code = -1;
1706#if (NB_MMU_MODES == 4)
1707 env->tlb_table[3][i].addr_read = -1;
1708 env->tlb_table[3][i].addr_write = -1;
1709 env->tlb_table[3][i].addr_code = -1;
1710#endif
1711#endif
1712 }
1713
1714 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1715
1716#ifdef USE_KQEMU
1717 if (env->kqemu_enabled) {
1718 kqemu_flush(env, flush_global);
1719 }
1720#endif
1721 tlb_flush_count++;
1722}
1723
1724static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1725{
1726 if (addr == (tlb_entry->addr_read &
1727 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1728 addr == (tlb_entry->addr_write &
1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1730 addr == (tlb_entry->addr_code &
1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1732 tlb_entry->addr_read = -1;
1733 tlb_entry->addr_write = -1;
1734 tlb_entry->addr_code = -1;
1735 }
1736}
1737
1738void tlb_flush_page(CPUState *env, target_ulong addr)
1739{
1740 int i;
1741
1742#if defined(DEBUG_TLB)
1743 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1744#endif
1745 /* must reset current TB so that interrupts cannot modify the
1746 links while we are modifying them */
1747 env->current_tb = NULL;
1748
1749 addr &= TARGET_PAGE_MASK;
1750 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1751 tlb_flush_entry(&env->tlb_table[0][i], addr);
1752 tlb_flush_entry(&env->tlb_table[1][i], addr);
1753#if (NB_MMU_MODES >= 3)
1754 tlb_flush_entry(&env->tlb_table[2][i], addr);
1755#if (NB_MMU_MODES == 4)
1756 tlb_flush_entry(&env->tlb_table[3][i], addr);
1757#endif
1758#endif
1759
1760 tlb_flush_jmp_cache(env, addr);
1761
1762#ifdef USE_KQEMU
1763 if (env->kqemu_enabled) {
1764 kqemu_flush_page(env, addr);
1765 }
1766#endif
1767}
1768
1769/* update the TLBs so that writes to code in the virtual page 'addr'
1770 can be detected */
1771static void tlb_protect_code(ram_addr_t ram_addr)
1772{
1773 cpu_physical_memory_reset_dirty(ram_addr,
1774 ram_addr + TARGET_PAGE_SIZE,
1775 CODE_DIRTY_FLAG);
1776}
1777
1778/* update the TLB so that writes in physical page 'phys_addr' are no longer
1779 tested for self modifying code */
1780static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1781 target_ulong vaddr)
1782{
1783 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1784}
1785
1786static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1787 unsigned long start, unsigned long length)
1788{
1789 unsigned long addr;
1790 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1791 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1792 if ((addr - start) < length) {
1793 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1794 }
1795 }
1796}
1797
1798void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1799 int dirty_flags)
1800{
1801 CPUState *env;
1802 unsigned long length, start1;
1803 int i, mask, len;
1804 uint8_t *p;
1805
1806 start &= TARGET_PAGE_MASK;
1807 end = TARGET_PAGE_ALIGN(end);
1808
1809 length = end - start;
1810 if (length == 0)
1811 return;
1812 len = length >> TARGET_PAGE_BITS;
1813#ifdef USE_KQEMU
1814 /* XXX: should not depend on cpu context */
1815 env = first_cpu;
1816 if (env->kqemu_enabled) {
1817 ram_addr_t addr;
1818 addr = start;
1819 for(i = 0; i < len; i++) {
1820 kqemu_set_notdirty(env, addr);
1821 addr += TARGET_PAGE_SIZE;
1822 }
1823 }
1824#endif
1825 mask = ~dirty_flags;
1826 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1827 for(i = 0; i < len; i++)
1828 p[i] &= mask;
1829
1830 /* we modify the TLB cache so that the dirty bit will be set again
1831 when accessing the range */
1832 start1 = start + (unsigned long)phys_ram_base;
1833 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1834 for(i = 0; i < CPU_TLB_SIZE; i++)
1835 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
1837 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1838#if (NB_MMU_MODES >= 3)
1839 for(i = 0; i < CPU_TLB_SIZE; i++)
1840 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1841#if (NB_MMU_MODES == 4)
1842 for(i = 0; i < CPU_TLB_SIZE; i++)
1843 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1844#endif
1845#endif
1846 }
1847}
1848
1849int cpu_physical_memory_set_dirty_tracking(int enable)
1850{
1851 in_migration = enable;
1852 return 0;
1853}
1854
1855int cpu_physical_memory_get_dirty_tracking(void)
1856{
1857 return in_migration;
1858}
1859
1860void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1861{
1862 if (kvm_enabled())
1863 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1864}
1865
1866static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1867{
1868 ram_addr_t ram_addr;
1869
1870 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1871 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1872 tlb_entry->addend - (unsigned long)phys_ram_base;
1873 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1874 tlb_entry->addr_write |= TLB_NOTDIRTY;
1875 }
1876 }
1877}
1878
1879/* update the TLB according to the current state of the dirty bits */
1880void cpu_tlb_update_dirty(CPUState *env)
1881{
1882 int i;
1883 for(i = 0; i < CPU_TLB_SIZE; i++)
1884 tlb_update_dirty(&env->tlb_table[0][i]);
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
1886 tlb_update_dirty(&env->tlb_table[1][i]);
1887#if (NB_MMU_MODES >= 3)
1888 for(i = 0; i < CPU_TLB_SIZE; i++)
1889 tlb_update_dirty(&env->tlb_table[2][i]);
1890#if (NB_MMU_MODES == 4)
1891 for(i = 0; i < CPU_TLB_SIZE; i++)
1892 tlb_update_dirty(&env->tlb_table[3][i]);
1893#endif
1894#endif
1895}
1896
1897static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1898{
1899 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1900 tlb_entry->addr_write = vaddr;
1901}
1902
1903/* update the TLB corresponding to virtual page vaddr
1904 so that it is no longer dirty */
1905static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1906{
1907 int i;
1908
1909 vaddr &= TARGET_PAGE_MASK;
1910 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1911 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1912 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1913#if (NB_MMU_MODES >= 3)
1914 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1915#if (NB_MMU_MODES == 4)
1916 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1917#endif
1918#endif
1919}
1920
1921/* add a new TLB entry. At most one entry for a given virtual address
1922 is permitted. Return 0 if OK or 2 if the page could not be mapped
1923 (can only happen in non SOFTMMU mode for I/O pages or pages
1924 conflicting with the host address space). */
1925int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1926 target_phys_addr_t paddr, int prot,
1927 int mmu_idx, int is_softmmu)
1928{
1929 PhysPageDesc *p;
1930 unsigned long pd;
1931 unsigned int index;
1932 target_ulong address;
1933 target_ulong code_address;
1934 target_phys_addr_t addend;
1935 int ret;
1936 CPUTLBEntry *te;
1937 CPUWatchpoint *wp;
1938 target_phys_addr_t iotlb;
1939
1940 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1941 if (!p) {
1942 pd = IO_MEM_UNASSIGNED;
1943 } else {
1944 pd = p->phys_offset;
1945 }
1946#if defined(DEBUG_TLB)
1947 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1948 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1949#endif
1950
1951 ret = 0;
1952 address = vaddr;
1953 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1954 /* IO memory case (romd handled later) */
1955 address |= TLB_MMIO;
1956 }
1957 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1958 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1959 /* Normal RAM. */
1960 iotlb = pd & TARGET_PAGE_MASK;
1961 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1962 iotlb |= IO_MEM_NOTDIRTY;
1963 else
1964 iotlb |= IO_MEM_ROM;
1965 } else {
1966 /* IO handlers are currently passed a phsical address.
1967 It would be nice to pass an offset from the base address
1968 of that region. This would avoid having to special case RAM,
1969 and avoid full address decoding in every device.
1970 We can't use the high bits of pd for this because
1971 IO_MEM_ROMD uses these as a ram address. */
1972 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1973 }
1974
1975 code_address = address;
1976 /* Make accesses to pages with watchpoints go via the
1977 watchpoint trap routines. */
1978 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1979 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1980 iotlb = io_mem_watch + paddr;
1981 /* TODO: The memory case can be optimized by not trapping
1982 reads of pages with a write breakpoint. */
1983 address |= TLB_MMIO;
1984 }
1985 }
1986
1987 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1988 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1989 te = &env->tlb_table[mmu_idx][index];
1990 te->addend = addend - vaddr;
1991 if (prot & PAGE_READ) {
1992 te->addr_read = address;
1993 } else {
1994 te->addr_read = -1;
1995 }
1996
1997 if (prot & PAGE_EXEC) {
1998 te->addr_code = code_address;
1999 } else {
2000 te->addr_code = -1;
2001 }
2002 if (prot & PAGE_WRITE) {
2003 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2004 (pd & IO_MEM_ROMD)) {
2005 /* Write access calls the I/O callback. */
2006 te->addr_write = address | TLB_MMIO;
2007 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2008 !cpu_physical_memory_is_dirty(pd)) {
2009 te->addr_write = address | TLB_NOTDIRTY;
2010 } else {
2011 te->addr_write = address;
2012 }
2013 } else {
2014 te->addr_write = -1;
2015 }
2016 return ret;
2017}
2018
2019#else
2020
2021void tlb_flush(CPUState *env, int flush_global)
2022{
2023}
2024
2025void tlb_flush_page(CPUState *env, target_ulong addr)
2026{
2027}
2028
2029int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2030 target_phys_addr_t paddr, int prot,
2031 int mmu_idx, int is_softmmu)
2032{
2033 return 0;
2034}
2035
2036/* dump memory mappings */
2037void page_dump(FILE *f)
2038{
2039 unsigned long start, end;
2040 int i, j, prot, prot1;
2041 PageDesc *p;
2042
2043 fprintf(f, "%-8s %-8s %-8s %s\n",
2044 "start", "end", "size", "prot");
2045 start = -1;
2046 end = -1;
2047 prot = 0;
2048 for(i = 0; i <= L1_SIZE; i++) {
2049 if (i < L1_SIZE)
2050 p = l1_map[i];
2051 else
2052 p = NULL;
2053 for(j = 0;j < L2_SIZE; j++) {
2054 if (!p)
2055 prot1 = 0;
2056 else
2057 prot1 = p[j].flags;
2058 if (prot1 != prot) {
2059 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2060 if (start != -1) {
2061 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2062 start, end, end - start,
2063 prot & PAGE_READ ? 'r' : '-',
2064 prot & PAGE_WRITE ? 'w' : '-',
2065 prot & PAGE_EXEC ? 'x' : '-');
2066 }
2067 if (prot1 != 0)
2068 start = end;
2069 else
2070 start = -1;
2071 prot = prot1;
2072 }
2073 if (!p)
2074 break;
2075 }
2076 }
2077}
2078
2079int page_get_flags(target_ulong address)
2080{
2081 PageDesc *p;
2082
2083 p = page_find(address >> TARGET_PAGE_BITS);
2084 if (!p)
2085 return 0;
2086 return p->flags;
2087}
2088
2089/* modify the flags of a page and invalidate the code if
2090 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2091 depending on PAGE_WRITE */
2092void page_set_flags(target_ulong start, target_ulong end, int flags)
2093{
2094 PageDesc *p;
2095 target_ulong addr;
2096
2097 /* mmap_lock should already be held. */
2098 start = start & TARGET_PAGE_MASK;
2099 end = TARGET_PAGE_ALIGN(end);
2100 if (flags & PAGE_WRITE)
2101 flags |= PAGE_WRITE_ORG;
2102 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2103 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2104 /* We may be called for host regions that are outside guest
2105 address space. */
2106 if (!p)
2107 return;
2108 /* if the write protection is set, then we invalidate the code
2109 inside */
2110 if (!(p->flags & PAGE_WRITE) &&
2111 (flags & PAGE_WRITE) &&
2112 p->first_tb) {
2113 tb_invalidate_phys_page(addr, 0, NULL);
2114 }
2115 p->flags = flags;
2116 }
2117}
2118
2119int page_check_range(target_ulong start, target_ulong len, int flags)
2120{
2121 PageDesc *p;
2122 target_ulong end;
2123 target_ulong addr;
2124
2125 if (start + len < start)
2126 /* we've wrapped around */
2127 return -1;
2128
2129 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2130 start = start & TARGET_PAGE_MASK;
2131
2132 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2133 p = page_find(addr >> TARGET_PAGE_BITS);
2134 if( !p )
2135 return -1;
2136 if( !(p->flags & PAGE_VALID) )
2137 return -1;
2138
2139 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2140 return -1;
2141 if (flags & PAGE_WRITE) {
2142 if (!(p->flags & PAGE_WRITE_ORG))
2143 return -1;
2144 /* unprotect the page if it was put read-only because it
2145 contains translated code */
2146 if (!(p->flags & PAGE_WRITE)) {
2147 if (!page_unprotect(addr, 0, NULL))
2148 return -1;
2149 }
2150 return 0;
2151 }
2152 }
2153 return 0;
2154}
2155
2156/* called from signal handler: invalidate the code and unprotect the
2157 page. Return TRUE if the fault was succesfully handled. */
2158int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2159{
2160 unsigned int page_index, prot, pindex;
2161 PageDesc *p, *p1;
2162 target_ulong host_start, host_end, addr;
2163
2164 /* Technically this isn't safe inside a signal handler. However we
2165 know this only ever happens in a synchronous SEGV handler, so in
2166 practice it seems to be ok. */
2167 mmap_lock();
2168
2169 host_start = address & qemu_host_page_mask;
2170 page_index = host_start >> TARGET_PAGE_BITS;
2171 p1 = page_find(page_index);
2172 if (!p1) {
2173 mmap_unlock();
2174 return 0;
2175 }
2176 host_end = host_start + qemu_host_page_size;
2177 p = p1;
2178 prot = 0;
2179 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2180 prot |= p->flags;
2181 p++;
2182 }
2183 /* if the page was really writable, then we change its
2184 protection back to writable */
2185 if (prot & PAGE_WRITE_ORG) {
2186 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2187 if (!(p1[pindex].flags & PAGE_WRITE)) {
2188 mprotect((void *)g2h(host_start), qemu_host_page_size,
2189 (prot & PAGE_BITS) | PAGE_WRITE);
2190 p1[pindex].flags |= PAGE_WRITE;
2191 /* and since the content will be modified, we must invalidate
2192 the corresponding translated code. */
2193 tb_invalidate_phys_page(address, pc, puc);
2194#ifdef DEBUG_TB_CHECK
2195 tb_invalidate_check(address);
2196#endif
2197 mmap_unlock();
2198 return 1;
2199 }
2200 }
2201 mmap_unlock();
2202 return 0;
2203}
2204
2205static inline void tlb_set_dirty(CPUState *env,
2206 unsigned long addr, target_ulong vaddr)
2207{
2208}
2209#endif /* defined(CONFIG_USER_ONLY) */
2210
2211#if !defined(CONFIG_USER_ONLY)
2212static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2213 ram_addr_t memory);
2214static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2215 ram_addr_t orig_memory);
2216#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2217 need_subpage) \
2218 do { \
2219 if (addr > start_addr) \
2220 start_addr2 = 0; \
2221 else { \
2222 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2223 if (start_addr2 > 0) \
2224 need_subpage = 1; \
2225 } \
2226 \
2227 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2228 end_addr2 = TARGET_PAGE_SIZE - 1; \
2229 else { \
2230 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2231 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2232 need_subpage = 1; \
2233 } \
2234 } while (0)
2235
2236/* register physical memory. 'size' must be a multiple of the target
2237 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2238 io memory page */
2239void cpu_register_physical_memory(target_phys_addr_t start_addr,
2240 ram_addr_t size,
2241 ram_addr_t phys_offset)
2242{
2243 target_phys_addr_t addr, end_addr;
2244 PhysPageDesc *p;
2245 CPUState *env;
2246 ram_addr_t orig_size = size;
2247 void *subpage;
2248
2249#ifdef USE_KQEMU
2250 /* XXX: should not depend on cpu context */
2251 env = first_cpu;
2252 if (env->kqemu_enabled) {
2253 kqemu_set_phys_mem(start_addr, size, phys_offset);
2254 }
2255#endif
2256 if (kvm_enabled())
2257 kvm_set_phys_mem(start_addr, size, phys_offset);
2258
2259 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2260 end_addr = start_addr + (target_phys_addr_t)size;
2261 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2262 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2263 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2264 ram_addr_t orig_memory = p->phys_offset;
2265 target_phys_addr_t start_addr2, end_addr2;
2266 int need_subpage = 0;
2267
2268 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2269 need_subpage);
2270 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2271 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2272 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2273 &p->phys_offset, orig_memory);
2274 } else {
2275 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2276 >> IO_MEM_SHIFT];
2277 }
2278 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2279 } else {
2280 p->phys_offset = phys_offset;
2281 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2282 (phys_offset & IO_MEM_ROMD))
2283 phys_offset += TARGET_PAGE_SIZE;
2284 }
2285 } else {
2286 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2287 p->phys_offset = phys_offset;
2288 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2289 (phys_offset & IO_MEM_ROMD))
2290 phys_offset += TARGET_PAGE_SIZE;
2291 else {
2292 target_phys_addr_t start_addr2, end_addr2;
2293 int need_subpage = 0;
2294
2295 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2296 end_addr2, need_subpage);
2297
2298 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2299 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2300 &p->phys_offset, IO_MEM_UNASSIGNED);
2301 subpage_register(subpage, start_addr2, end_addr2,
2302 phys_offset);
2303 }
2304 }
2305 }
2306 }
2307
2308 /* since each CPU stores ram addresses in its TLB cache, we must
2309 reset the modified entries */
2310 /* XXX: slow ! */
2311 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2312 tlb_flush(env, 1);
2313 }
2314}
2315
2316/* XXX: temporary until new memory mapping API */
2317ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2318{
2319 PhysPageDesc *p;
2320
2321 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2322 if (!p)
2323 return IO_MEM_UNASSIGNED;
2324 return p->phys_offset;
2325}
2326
2327/* XXX: better than nothing */
2328ram_addr_t qemu_ram_alloc(ram_addr_t size)
2329{
2330 ram_addr_t addr;
2331 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2332 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2333 (uint64_t)size, (uint64_t)phys_ram_size);
2334 abort();
2335 }
2336 addr = phys_ram_alloc_offset;
2337 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2338 return addr;
2339}
2340
2341void qemu_ram_free(ram_addr_t addr)
2342{
2343}
2344
2345static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2346{
2347#ifdef DEBUG_UNASSIGNED
2348 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2349#endif
2350#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351 do_unassigned_access(addr, 0, 0, 0, 1);
2352#endif
2353 return 0;
2354}
2355
2356static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2357{
2358#ifdef DEBUG_UNASSIGNED
2359 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2360#endif
2361#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2362 do_unassigned_access(addr, 0, 0, 0, 2);
2363#endif
2364 return 0;
2365}
2366
2367static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2368{
2369#ifdef DEBUG_UNASSIGNED
2370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2371#endif
2372#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373 do_unassigned_access(addr, 0, 0, 0, 4);
2374#endif
2375 return 0;
2376}
2377
2378static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2379{
2380#ifdef DEBUG_UNASSIGNED
2381 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2382#endif
2383#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2384 do_unassigned_access(addr, 1, 0, 0, 1);
2385#endif
2386}
2387
2388static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2389{
2390#ifdef DEBUG_UNASSIGNED
2391 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2392#endif
2393#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2394 do_unassigned_access(addr, 1, 0, 0, 2);
2395#endif
2396}
2397
2398static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2399{
2400#ifdef DEBUG_UNASSIGNED
2401 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2402#endif
2403#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2404 do_unassigned_access(addr, 1, 0, 0, 4);
2405#endif
2406}
2407
2408static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2409 unassigned_mem_readb,
2410 unassigned_mem_readw,
2411 unassigned_mem_readl,
2412};
2413
2414static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2415 unassigned_mem_writeb,
2416 unassigned_mem_writew,
2417 unassigned_mem_writel,
2418};
2419
2420static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2421 uint32_t val)
2422{
2423 int dirty_flags;
2424 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2425 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2426#if !defined(CONFIG_USER_ONLY)
2427 tb_invalidate_phys_page_fast(ram_addr, 1);
2428 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2429#endif
2430 }
2431 stb_p(phys_ram_base + ram_addr, val);
2432#ifdef USE_KQEMU
2433 if (cpu_single_env->kqemu_enabled &&
2434 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2435 kqemu_modify_page(cpu_single_env, ram_addr);
2436#endif
2437 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2438 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2439 /* we remove the notdirty callback only if the code has been
2440 flushed */
2441 if (dirty_flags == 0xff)
2442 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2443}
2444
2445static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2446 uint32_t val)
2447{
2448 int dirty_flags;
2449 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2450 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2451#if !defined(CONFIG_USER_ONLY)
2452 tb_invalidate_phys_page_fast(ram_addr, 2);
2453 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2454#endif
2455 }
2456 stw_p(phys_ram_base + ram_addr, val);
2457#ifdef USE_KQEMU
2458 if (cpu_single_env->kqemu_enabled &&
2459 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2460 kqemu_modify_page(cpu_single_env, ram_addr);
2461#endif
2462 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2463 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2464 /* we remove the notdirty callback only if the code has been
2465 flushed */
2466 if (dirty_flags == 0xff)
2467 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2468}
2469
2470static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2471 uint32_t val)
2472{
2473 int dirty_flags;
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2475 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2476#if !defined(CONFIG_USER_ONLY)
2477 tb_invalidate_phys_page_fast(ram_addr, 4);
2478 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2479#endif
2480 }
2481 stl_p(phys_ram_base + ram_addr, val);
2482#ifdef USE_KQEMU
2483 if (cpu_single_env->kqemu_enabled &&
2484 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2485 kqemu_modify_page(cpu_single_env, ram_addr);
2486#endif
2487 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2488 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2489 /* we remove the notdirty callback only if the code has been
2490 flushed */
2491 if (dirty_flags == 0xff)
2492 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2493}
2494
2495static CPUReadMemoryFunc *error_mem_read[3] = {
2496 NULL, /* never used */
2497 NULL, /* never used */
2498 NULL, /* never used */
2499};
2500
2501static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2502 notdirty_mem_writeb,
2503 notdirty_mem_writew,
2504 notdirty_mem_writel,
2505};
2506
2507/* Generate a debug exception if a watchpoint has been hit. */
2508static void check_watchpoint(int offset, int len_mask, int flags)
2509{
2510 CPUState *env = cpu_single_env;
2511 target_ulong pc, cs_base;
2512 TranslationBlock *tb;
2513 target_ulong vaddr;
2514 CPUWatchpoint *wp;
2515 int cpu_flags;
2516
2517 if (env->watchpoint_hit) {
2518 /* We re-entered the check after replacing the TB. Now raise
2519 * the debug interrupt so that is will trigger after the
2520 * current instruction. */
2521 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2522 return;
2523 }
2524 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2525 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2526 if ((vaddr == (wp->vaddr & len_mask) ||
2527 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2528 wp->flags |= BP_WATCHPOINT_HIT;
2529 if (!env->watchpoint_hit) {
2530 env->watchpoint_hit = wp;
2531 tb = tb_find_pc(env->mem_io_pc);
2532 if (!tb) {
2533 cpu_abort(env, "check_watchpoint: could not find TB for "
2534 "pc=%p", (void *)env->mem_io_pc);
2535 }
2536 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2537 tb_phys_invalidate(tb, -1);
2538 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2539 env->exception_index = EXCP_DEBUG;
2540 } else {
2541 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2542 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2543 }
2544 cpu_resume_from_signal(env, NULL);
2545 }
2546 } else {
2547 wp->flags &= ~BP_WATCHPOINT_HIT;
2548 }
2549 }
2550}
2551
2552/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2553 so these check for a hit then pass through to the normal out-of-line
2554 phys routines. */
2555static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2556{
2557 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2558 return ldub_phys(addr);
2559}
2560
2561static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2562{
2563 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2564 return lduw_phys(addr);
2565}
2566
2567static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2568{
2569 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2570 return ldl_phys(addr);
2571}
2572
2573static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2574 uint32_t val)
2575{
2576 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2577 stb_phys(addr, val);
2578}
2579
2580static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2581 uint32_t val)
2582{
2583 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2584 stw_phys(addr, val);
2585}
2586
2587static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2588 uint32_t val)
2589{
2590 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2591 stl_phys(addr, val);
2592}
2593
2594static CPUReadMemoryFunc *watch_mem_read[3] = {
2595 watch_mem_readb,
2596 watch_mem_readw,
2597 watch_mem_readl,
2598};
2599
2600static CPUWriteMemoryFunc *watch_mem_write[3] = {
2601 watch_mem_writeb,
2602 watch_mem_writew,
2603 watch_mem_writel,
2604};
2605
2606static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2607 unsigned int len)
2608{
2609 uint32_t ret;
2610 unsigned int idx;
2611
2612 idx = SUBPAGE_IDX(addr - mmio->base);
2613#if defined(DEBUG_SUBPAGE)
2614 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2615 mmio, len, addr, idx);
2616#endif
2617 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2618
2619 return ret;
2620}
2621
2622static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2623 uint32_t value, unsigned int len)
2624{
2625 unsigned int idx;
2626
2627 idx = SUBPAGE_IDX(addr - mmio->base);
2628#if defined(DEBUG_SUBPAGE)
2629 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2630 mmio, len, addr, idx, value);
2631#endif
2632 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2633}
2634
2635static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2636{
2637#if defined(DEBUG_SUBPAGE)
2638 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2639#endif
2640
2641 return subpage_readlen(opaque, addr, 0);
2642}
2643
2644static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2645 uint32_t value)
2646{
2647#if defined(DEBUG_SUBPAGE)
2648 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2649#endif
2650 subpage_writelen(opaque, addr, value, 0);
2651}
2652
2653static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2654{
2655#if defined(DEBUG_SUBPAGE)
2656 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2657#endif
2658
2659 return subpage_readlen(opaque, addr, 1);
2660}
2661
2662static void subpage_writew (void *opaque, target_phys_addr_t addr,
2663 uint32_t value)
2664{
2665#if defined(DEBUG_SUBPAGE)
2666 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2667#endif
2668 subpage_writelen(opaque, addr, value, 1);
2669}
2670
2671static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2672{
2673#if defined(DEBUG_SUBPAGE)
2674 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2675#endif
2676
2677 return subpage_readlen(opaque, addr, 2);
2678}
2679
2680static void subpage_writel (void *opaque,
2681 target_phys_addr_t addr, uint32_t value)
2682{
2683#if defined(DEBUG_SUBPAGE)
2684 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2685#endif
2686 subpage_writelen(opaque, addr, value, 2);
2687}
2688
2689static CPUReadMemoryFunc *subpage_read[] = {
2690 &subpage_readb,
2691 &subpage_readw,
2692 &subpage_readl,
2693};
2694
2695static CPUWriteMemoryFunc *subpage_write[] = {
2696 &subpage_writeb,
2697 &subpage_writew,
2698 &subpage_writel,
2699};
2700
2701static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2702 ram_addr_t memory)
2703{
2704 int idx, eidx;
2705 unsigned int i;
2706
2707 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2708 return -1;
2709 idx = SUBPAGE_IDX(start);
2710 eidx = SUBPAGE_IDX(end);
2711#if defined(DEBUG_SUBPAGE)
2712 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2713 mmio, start, end, idx, eidx, memory);
2714#endif
2715 memory >>= IO_MEM_SHIFT;
2716 for (; idx <= eidx; idx++) {
2717 for (i = 0; i < 4; i++) {
2718 if (io_mem_read[memory][i]) {
2719 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2720 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2721 }
2722 if (io_mem_write[memory][i]) {
2723 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2724 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2725 }
2726 }
2727 }
2728
2729 return 0;
2730}
2731
2732static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2733 ram_addr_t orig_memory)
2734{
2735 subpage_t *mmio;
2736 int subpage_memory;
2737
2738 mmio = qemu_mallocz(sizeof(subpage_t));
2739 if (mmio != NULL) {
2740 mmio->base = base;
2741 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2742#if defined(DEBUG_SUBPAGE)
2743 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2744 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2745#endif
2746 *phys = subpage_memory | IO_MEM_SUBPAGE;
2747 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2748 }
2749
2750 return mmio;
2751}
2752
2753static void io_mem_init(void)
2754{
2755 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2756 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2757 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2758 io_mem_nb = 5;
2759
2760 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2761 watch_mem_write, NULL);
2762 /* alloc dirty bits array */
2763 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2764 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2765}
2766
2767/* mem_read and mem_write are arrays of functions containing the
2768 function to access byte (index 0), word (index 1) and dword (index
2769 2). Functions can be omitted with a NULL function pointer. The
2770 registered functions may be modified dynamically later.
2771 If io_index is non zero, the corresponding io zone is
2772 modified. If it is zero, a new io zone is allocated. The return
2773 value can be used with cpu_register_physical_memory(). (-1) is
2774 returned if error. */
2775int cpu_register_io_memory(int io_index,
2776 CPUReadMemoryFunc **mem_read,
2777 CPUWriteMemoryFunc **mem_write,
2778 void *opaque)
2779{
2780 int i, subwidth = 0;
2781
2782 if (io_index <= 0) {
2783 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2784 return -1;
2785 io_index = io_mem_nb++;
2786 } else {
2787 if (io_index >= IO_MEM_NB_ENTRIES)
2788 return -1;
2789 }
2790
2791 for(i = 0;i < 3; i++) {
2792 if (!mem_read[i] || !mem_write[i])
2793 subwidth = IO_MEM_SUBWIDTH;
2794 io_mem_read[io_index][i] = mem_read[i];
2795 io_mem_write[io_index][i] = mem_write[i];
2796 }
2797 io_mem_opaque[io_index] = opaque;
2798 return (io_index << IO_MEM_SHIFT) | subwidth;
2799}
2800
2801CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2802{
2803 return io_mem_write[io_index >> IO_MEM_SHIFT];
2804}
2805
2806CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2807{
2808 return io_mem_read[io_index >> IO_MEM_SHIFT];
2809}
2810
2811#endif /* !defined(CONFIG_USER_ONLY) */
2812
2813/* physical memory access (slow version, mainly for debug) */
2814#if defined(CONFIG_USER_ONLY)
2815void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2816 int len, int is_write)
2817{
2818 int l, flags;
2819 target_ulong page;
2820 void * p;
2821
2822 while (len > 0) {
2823 page = addr & TARGET_PAGE_MASK;
2824 l = (page + TARGET_PAGE_SIZE) - addr;
2825 if (l > len)
2826 l = len;
2827 flags = page_get_flags(page);
2828 if (!(flags & PAGE_VALID))
2829 return;
2830 if (is_write) {
2831 if (!(flags & PAGE_WRITE))
2832 return;
2833 /* XXX: this code should not depend on lock_user */
2834 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2835 /* FIXME - should this return an error rather than just fail? */
2836 return;
2837 memcpy(p, buf, l);
2838 unlock_user(p, addr, l);
2839 } else {
2840 if (!(flags & PAGE_READ))
2841 return;
2842 /* XXX: this code should not depend on lock_user */
2843 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2844 /* FIXME - should this return an error rather than just fail? */
2845 return;
2846 memcpy(buf, p, l);
2847 unlock_user(p, addr, 0);
2848 }
2849 len -= l;
2850 buf += l;
2851 addr += l;
2852 }
2853}
2854
2855#else
2856void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2857 int len, int is_write)
2858{
2859 int l, io_index;
2860 uint8_t *ptr;
2861 uint32_t val;
2862 target_phys_addr_t page;
2863 unsigned long pd;
2864 PhysPageDesc *p;
2865
2866 while (len > 0) {
2867 page = addr & TARGET_PAGE_MASK;
2868 l = (page + TARGET_PAGE_SIZE) - addr;
2869 if (l > len)
2870 l = len;
2871 p = phys_page_find(page >> TARGET_PAGE_BITS);
2872 if (!p) {
2873 pd = IO_MEM_UNASSIGNED;
2874 } else {
2875 pd = p->phys_offset;
2876 }
2877
2878 if (is_write) {
2879 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2880 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2881 /* XXX: could force cpu_single_env to NULL to avoid
2882 potential bugs */
2883 if (l >= 4 && ((addr & 3) == 0)) {
2884 /* 32 bit write access */
2885 val = ldl_p(buf);
2886 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2887 l = 4;
2888 } else if (l >= 2 && ((addr & 1) == 0)) {
2889 /* 16 bit write access */
2890 val = lduw_p(buf);
2891 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2892 l = 2;
2893 } else {
2894 /* 8 bit write access */
2895 val = ldub_p(buf);
2896 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2897 l = 1;
2898 }
2899 } else {
2900 unsigned long addr1;
2901 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2902 /* RAM case */
2903 ptr = phys_ram_base + addr1;
2904 memcpy(ptr, buf, l);
2905 if (!cpu_physical_memory_is_dirty(addr1)) {
2906 /* invalidate code */
2907 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2908 /* set dirty bit */
2909 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2910 (0xff & ~CODE_DIRTY_FLAG);
2911 }
2912 }
2913 } else {
2914 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2915 !(pd & IO_MEM_ROMD)) {
2916 /* I/O case */
2917 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2918 if (l >= 4 && ((addr & 3) == 0)) {
2919 /* 32 bit read access */
2920 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2921 stl_p(buf, val);
2922 l = 4;
2923 } else if (l >= 2 && ((addr & 1) == 0)) {
2924 /* 16 bit read access */
2925 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2926 stw_p(buf, val);
2927 l = 2;
2928 } else {
2929 /* 8 bit read access */
2930 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2931 stb_p(buf, val);
2932 l = 1;
2933 }
2934 } else {
2935 /* RAM case */
2936 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2937 (addr & ~TARGET_PAGE_MASK);
2938 memcpy(buf, ptr, l);
2939 }
2940 }
2941 len -= l;
2942 buf += l;
2943 addr += l;
2944 }
2945}
2946
2947/* used for ROM loading : can write in RAM and ROM */
2948void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2949 const uint8_t *buf, int len)
2950{
2951 int l;
2952 uint8_t *ptr;
2953 target_phys_addr_t page;
2954 unsigned long pd;
2955 PhysPageDesc *p;
2956
2957 while (len > 0) {
2958 page = addr & TARGET_PAGE_MASK;
2959 l = (page + TARGET_PAGE_SIZE) - addr;
2960 if (l > len)
2961 l = len;
2962 p = phys_page_find(page >> TARGET_PAGE_BITS);
2963 if (!p) {
2964 pd = IO_MEM_UNASSIGNED;
2965 } else {
2966 pd = p->phys_offset;
2967 }
2968
2969 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2970 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2971 !(pd & IO_MEM_ROMD)) {
2972 /* do nothing */
2973 } else {
2974 unsigned long addr1;
2975 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2976 /* ROM/RAM case */
2977 ptr = phys_ram_base + addr1;
2978 memcpy(ptr, buf, l);
2979 }
2980 len -= l;
2981 buf += l;
2982 addr += l;
2983 }
2984}
2985
2986
2987/* warning: addr must be aligned */
2988uint32_t ldl_phys(target_phys_addr_t addr)
2989{
2990 int io_index;
2991 uint8_t *ptr;
2992 uint32_t val;
2993 unsigned long pd;
2994 PhysPageDesc *p;
2995
2996 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2997 if (!p) {
2998 pd = IO_MEM_UNASSIGNED;
2999 } else {
3000 pd = p->phys_offset;
3001 }
3002
3003 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3004 !(pd & IO_MEM_ROMD)) {
3005 /* I/O case */
3006 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3007 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3008 } else {
3009 /* RAM case */
3010 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3011 (addr & ~TARGET_PAGE_MASK);
3012 val = ldl_p(ptr);
3013 }
3014 return val;
3015}
3016
3017/* warning: addr must be aligned */
3018uint64_t ldq_phys(target_phys_addr_t addr)
3019{
3020 int io_index;
3021 uint8_t *ptr;
3022 uint64_t val;
3023 unsigned long pd;
3024 PhysPageDesc *p;
3025
3026 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3027 if (!p) {
3028 pd = IO_MEM_UNASSIGNED;
3029 } else {
3030 pd = p->phys_offset;
3031 }
3032
3033 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3034 !(pd & IO_MEM_ROMD)) {
3035 /* I/O case */
3036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3037#ifdef TARGET_WORDS_BIGENDIAN
3038 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3039 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3040#else
3041 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3042 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3043#endif
3044 } else {
3045 /* RAM case */
3046 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3047 (addr & ~TARGET_PAGE_MASK);
3048 val = ldq_p(ptr);
3049 }
3050 return val;
3051}
3052
3053/* XXX: optimize */
3054uint32_t ldub_phys(target_phys_addr_t addr)
3055{
3056 uint8_t val;
3057 cpu_physical_memory_read(addr, &val, 1);
3058 return val;
3059}
3060
3061/* XXX: optimize */
3062uint32_t lduw_phys(target_phys_addr_t addr)
3063{
3064 uint16_t val;
3065 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3066 return tswap16(val);
3067}
3068
3069/* warning: addr must be aligned. The ram page is not masked as dirty
3070 and the code inside is not invalidated. It is useful if the dirty
3071 bits are used to track modified PTEs */
3072void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3073{
3074 int io_index;
3075 uint8_t *ptr;
3076 unsigned long pd;
3077 PhysPageDesc *p;
3078
3079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3080 if (!p) {
3081 pd = IO_MEM_UNASSIGNED;
3082 } else {
3083 pd = p->phys_offset;
3084 }
3085
3086 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3087 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3088 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3089 } else {
3090 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3091 ptr = phys_ram_base + addr1;
3092 stl_p(ptr, val);
3093
3094 if (unlikely(in_migration)) {
3095 if (!cpu_physical_memory_is_dirty(addr1)) {
3096 /* invalidate code */
3097 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3098 /* set dirty bit */
3099 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3100 (0xff & ~CODE_DIRTY_FLAG);
3101 }
3102 }
3103 }
3104}
3105
3106void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3107{
3108 int io_index;
3109 uint8_t *ptr;
3110 unsigned long pd;
3111 PhysPageDesc *p;
3112
3113 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3114 if (!p) {
3115 pd = IO_MEM_UNASSIGNED;
3116 } else {
3117 pd = p->phys_offset;
3118 }
3119
3120 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3121 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3122#ifdef TARGET_WORDS_BIGENDIAN
3123 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3124 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3125#else
3126 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3127 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3128#endif
3129 } else {
3130 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3131 (addr & ~TARGET_PAGE_MASK);
3132 stq_p(ptr, val);
3133 }
3134}
3135
3136/* warning: addr must be aligned */
3137void stl_phys(target_phys_addr_t addr, uint32_t val)
3138{
3139 int io_index;
3140 uint8_t *ptr;
3141 unsigned long pd;
3142 PhysPageDesc *p;
3143
3144 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3145 if (!p) {
3146 pd = IO_MEM_UNASSIGNED;
3147 } else {
3148 pd = p->phys_offset;
3149 }
3150
3151 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3152 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3153 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3154 } else {
3155 unsigned long addr1;
3156 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3157 /* RAM case */
3158 ptr = phys_ram_base + addr1;
3159 stl_p(ptr, val);
3160 if (!cpu_physical_memory_is_dirty(addr1)) {
3161 /* invalidate code */
3162 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3163 /* set dirty bit */
3164 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3165 (0xff & ~CODE_DIRTY_FLAG);
3166 }
3167 }
3168}
3169
3170/* XXX: optimize */
3171void stb_phys(target_phys_addr_t addr, uint32_t val)
3172{
3173 uint8_t v = val;
3174 cpu_physical_memory_write(addr, &v, 1);
3175}
3176
3177/* XXX: optimize */
3178void stw_phys(target_phys_addr_t addr, uint32_t val)
3179{
3180 uint16_t v = tswap16(val);
3181 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3182}
3183
3184/* XXX: optimize */
3185void stq_phys(target_phys_addr_t addr, uint64_t val)
3186{
3187 val = tswap64(val);
3188 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3189}
3190
3191#endif
3192
3193/* virtual memory access for debug */
3194int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3195 uint8_t *buf, int len, int is_write)
3196{
3197 int l;
3198 target_phys_addr_t phys_addr;
3199 target_ulong page;
3200
3201 while (len > 0) {
3202 page = addr & TARGET_PAGE_MASK;
3203 phys_addr = cpu_get_phys_page_debug(env, page);
3204 /* if no physical page mapped, return an error */
3205 if (phys_addr == -1)
3206 return -1;
3207 l = (page + TARGET_PAGE_SIZE) - addr;
3208 if (l > len)
3209 l = len;
3210 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3211 buf, l, is_write);
3212 len -= l;
3213 buf += l;
3214 addr += l;
3215 }
3216 return 0;
3217}
3218
3219/* in deterministic execution mode, instructions doing device I/Os
3220 must be at the end of the TB */
3221void cpu_io_recompile(CPUState *env, void *retaddr)
3222{
3223 TranslationBlock *tb;
3224 uint32_t n, cflags;
3225 target_ulong pc, cs_base;
3226 uint64_t flags;
3227
3228 tb = tb_find_pc((unsigned long)retaddr);
3229 if (!tb) {
3230 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3231 retaddr);
3232 }
3233 n = env->icount_decr.u16.low + tb->icount;
3234 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3235 /* Calculate how many instructions had been executed before the fault
3236 occurred. */
3237 n = n - env->icount_decr.u16.low;
3238 /* Generate a new TB ending on the I/O insn. */
3239 n++;
3240 /* On MIPS and SH, delay slot instructions can only be restarted if
3241 they were already the first instruction in the TB. If this is not
3242 the first instruction in a TB then re-execute the preceding
3243 branch. */
3244#if defined(TARGET_MIPS)
3245 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3246 env->active_tc.PC -= 4;
3247 env->icount_decr.u16.low++;
3248 env->hflags &= ~MIPS_HFLAG_BMASK;
3249 }
3250#elif defined(TARGET_SH4)
3251 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3252 && n > 1) {
3253 env->pc -= 2;
3254 env->icount_decr.u16.low++;
3255 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3256 }
3257#endif
3258 /* This should never happen. */
3259 if (n > CF_COUNT_MASK)
3260 cpu_abort(env, "TB too big during recompile");
3261
3262 cflags = n | CF_LAST_IO;
3263 pc = tb->pc;
3264 cs_base = tb->cs_base;
3265 flags = tb->flags;
3266 tb_phys_invalidate(tb, -1);
3267 /* FIXME: In theory this could raise an exception. In practice
3268 we have already translated the block once so it's probably ok. */
3269 tb_gen_code(env, pc, cs_base, flags, cflags);
3270 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3271 the first in the TB) then we end up generating a whole new TB and
3272 repeating the fault, which is horribly inefficient.
3273 Better would be to execute just this insn uncached, or generate a
3274 second new TB. */
3275 cpu_resume_from_signal(env, NULL);
3276}
3277
3278void dump_exec_info(FILE *f,
3279 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3280{
3281 int i, target_code_size, max_target_code_size;
3282 int direct_jmp_count, direct_jmp2_count, cross_page;
3283 TranslationBlock *tb;
3284
3285 target_code_size = 0;
3286 max_target_code_size = 0;
3287 cross_page = 0;
3288 direct_jmp_count = 0;
3289 direct_jmp2_count = 0;
3290 for(i = 0; i < nb_tbs; i++) {
3291 tb = &tbs[i];
3292 target_code_size += tb->size;
3293 if (tb->size > max_target_code_size)
3294 max_target_code_size = tb->size;
3295 if (tb->page_addr[1] != -1)
3296 cross_page++;
3297 if (tb->tb_next_offset[0] != 0xffff) {
3298 direct_jmp_count++;
3299 if (tb->tb_next_offset[1] != 0xffff) {
3300 direct_jmp2_count++;
3301 }
3302 }
3303 }
3304 /* XXX: avoid using doubles ? */
3305 cpu_fprintf(f, "Translation buffer state:\n");
3306 cpu_fprintf(f, "gen code size %ld/%ld\n",
3307 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3308 cpu_fprintf(f, "TB count %d/%d\n",
3309 nb_tbs, code_gen_max_blocks);
3310 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3311 nb_tbs ? target_code_size / nb_tbs : 0,
3312 max_target_code_size);
3313 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3314 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3315 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3316 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3317 cross_page,
3318 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3319 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3320 direct_jmp_count,
3321 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3322 direct_jmp2_count,
3323 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3324 cpu_fprintf(f, "\nStatistics:\n");
3325 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3326 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3327 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3328 tcg_dump_info(f, cpu_fprintf);
3329}
3330
3331#if !defined(CONFIG_USER_ONLY)
3332
3333#define MMUSUFFIX _cmmu
3334#define GETPC() NULL
3335#define env cpu_single_env
3336#define SOFTMMU_CODE_ACCESS
3337
3338#define SHIFT 0
3339#include "softmmu_template.h"
3340
3341#define SHIFT 1
3342#include "softmmu_template.h"
3343
3344#define SHIFT 2
3345#include "softmmu_template.h"
3346
3347#define SHIFT 3
3348#include "softmmu_template.h"
3349
3350#undef env
3351
3352#endif