]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Despite what the documentation says/implies, PTHREAD_STACK_MIN is often not
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
00f82b8a 99ram_addr_t phys_ram_size;
9fa3e853
FB
100int phys_ram_fd;
101uint8_t *phys_ram_base;
1ccde1cb 102uint8_t *phys_ram_dirty;
e9a1ab19 103static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 104#endif
9fa3e853 105
6a00d601
FB
106CPUState *first_cpu;
107/* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
5fafdf24 109CPUState *cpu_single_env;
6a00d601 110
54936004 111typedef struct PageDesc {
92e873b9 112 /* list of TBs intersecting this ram page */
fd6ce8f6 113 TranslationBlock *first_tb;
9fa3e853
FB
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118#if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120#endif
54936004
FB
121} PageDesc;
122
92e873b9 123typedef struct PhysPageDesc {
0f459d16 124 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 125 ram_addr_t phys_offset;
92e873b9
FB
126} PhysPageDesc;
127
54936004 128#define L2_BITS 10
bedb69ea
JM
129#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130/* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
133 */
134#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135#else
03875444 136#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 137#endif
54936004
FB
138
139#define L1_SIZE (1 << L1_BITS)
140#define L2_SIZE (1 << L2_BITS)
141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
e2eef170
PB
151#if !defined(CONFIG_USER_ONLY)
152static void io_mem_init(void);
153
33417e70 154/* io memory support */
33417e70
FB
155CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 157void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 158static int io_mem_nb;
6658ffb8
PB
159static int io_mem_watch;
160#endif
33417e70 161
34865134
FB
162/* log support */
163char *logfilename = "/tmp/qemu.log";
164FILE *logfile;
165int loglevel;
e735b91c 166static int log_append = 0;
34865134 167
e3db7226
FB
168/* statistics */
169static int tlb_flush_count;
170static int tb_flush_count;
171static int tb_phys_invalidate_count;
172
db7b5426
BS
173#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174typedef struct subpage_t {
175 target_phys_addr_t base;
3ee89922
BS
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
179} subpage_t;
180
7cb69cae
FB
181#ifdef _WIN32
182static void map_exec(void *addr, long size)
183{
184 DWORD old_protect;
185 VirtualProtect(addr, size,
186 PAGE_EXECUTE_READWRITE, &old_protect);
187
188}
189#else
190static void map_exec(void *addr, long size)
191{
4369415f 192 unsigned long start, end, page_size;
7cb69cae 193
4369415f 194 page_size = getpagesize();
7cb69cae 195 start = (unsigned long)addr;
4369415f 196 start &= ~(page_size - 1);
7cb69cae
FB
197
198 end = (unsigned long)addr + size;
4369415f
FB
199 end += page_size - 1;
200 end &= ~(page_size - 1);
7cb69cae
FB
201
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
204}
205#endif
206
b346ff46 207static void page_init(void)
54936004 208{
83fb7adf 209 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 210 TARGET_PAGE_SIZE */
67b915a5 211#ifdef _WIN32
d5a8f07c
FB
212 {
213 SYSTEM_INFO system_info;
214 DWORD old_protect;
3b46e624 215
d5a8f07c
FB
216 GetSystemInfo(&system_info);
217 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 218 }
67b915a5 219#else
83fb7adf 220 qemu_real_host_page_size = getpagesize();
67b915a5 221#endif
83fb7adf
FB
222 if (qemu_host_page_size == 0)
223 qemu_host_page_size = qemu_real_host_page_size;
224 if (qemu_host_page_size < TARGET_PAGE_SIZE)
225 qemu_host_page_size = TARGET_PAGE_SIZE;
226 qemu_host_page_bits = 0;
227 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228 qemu_host_page_bits++;
229 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
230 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
232
233#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234 {
235 long long startaddr, endaddr;
236 FILE *f;
237 int n;
238
c8a706fe 239 mmap_lock();
0776590d 240 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
241 f = fopen("/proc/self/maps", "r");
242 if (f) {
243 do {
244 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245 if (n == 2) {
e0b8d65a
BS
246 startaddr = MIN(startaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 endaddr = MIN(endaddr,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 250 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
251 TARGET_PAGE_ALIGN(endaddr),
252 PAGE_RESERVED);
253 }
254 } while (!feof(f));
255 fclose(f);
256 }
c8a706fe 257 mmap_unlock();
50a9569b
AZ
258 }
259#endif
54936004
FB
260}
261
00f82b8a 262static inline PageDesc *page_find_alloc(target_ulong index)
54936004 263{
54936004
FB
264 PageDesc **lp, *p;
265
54936004
FB
266 lp = &l1_map[index >> L2_BITS];
267 p = *lp;
268 if (!p) {
269 /* allocate if not found */
59817ccb 270 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 271 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
272 *lp = p;
273 }
274 return p + (index & (L2_SIZE - 1));
275}
276
00f82b8a 277static inline PageDesc *page_find(target_ulong index)
54936004 278{
54936004
FB
279 PageDesc *p;
280
54936004
FB
281 p = l1_map[index >> L2_BITS];
282 if (!p)
283 return 0;
fd6ce8f6
FB
284 return p + (index & (L2_SIZE - 1));
285}
286
108c49b8 287static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 288{
108c49b8 289 void **lp, **p;
e3f4e2a4 290 PhysPageDesc *pd;
92e873b9 291
108c49b8
FB
292 p = (void **)l1_phys_map;
293#if TARGET_PHYS_ADDR_SPACE_BITS > 32
294
295#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
296#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
297#endif
298 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
299 p = *lp;
300 if (!p) {
301 /* allocate if not found */
108c49b8
FB
302 if (!alloc)
303 return NULL;
304 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
305 memset(p, 0, sizeof(void *) * L1_SIZE);
306 *lp = p;
307 }
308#endif
309 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
310 pd = *lp;
311 if (!pd) {
312 int i;
108c49b8
FB
313 /* allocate if not found */
314 if (!alloc)
315 return NULL;
e3f4e2a4
PB
316 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
317 *lp = pd;
318 for (i = 0; i < L2_SIZE; i++)
319 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 320 }
e3f4e2a4 321 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
322}
323
108c49b8 324static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 325{
108c49b8 326 return phys_page_find_alloc(index, 0);
92e873b9
FB
327}
328
9fa3e853 329#if !defined(CONFIG_USER_ONLY)
6a00d601 330static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 331static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 332 target_ulong vaddr);
c8a706fe
PB
333#define mmap_lock() do { } while(0)
334#define mmap_unlock() do { } while(0)
9fa3e853 335#endif
fd6ce8f6 336
4369415f
FB
337#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
338
339#if defined(CONFIG_USER_ONLY)
340/* Currently it is not recommanded to allocate big chunks of data in
341 user mode. It will change when a dedicated libc will be used */
342#define USE_STATIC_CODE_GEN_BUFFER
343#endif
344
345#ifdef USE_STATIC_CODE_GEN_BUFFER
346static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
347#endif
348
26a5f13b
FB
349void code_gen_alloc(unsigned long tb_size)
350{
4369415f
FB
351#ifdef USE_STATIC_CODE_GEN_BUFFER
352 code_gen_buffer = static_code_gen_buffer;
353 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
354 map_exec(code_gen_buffer, code_gen_buffer_size);
355#else
26a5f13b
FB
356 code_gen_buffer_size = tb_size;
357 if (code_gen_buffer_size == 0) {
4369415f
FB
358#if defined(CONFIG_USER_ONLY)
359 /* in user mode, phys_ram_size is not meaningful */
360 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
361#else
26a5f13b
FB
362 /* XXX: needs ajustments */
363 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 364#endif
26a5f13b
FB
365 }
366 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
367 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
368 /* The code gen buffer location may have constraints depending on
369 the host cpu and OS */
370#if defined(__linux__)
371 {
372 int flags;
373 flags = MAP_PRIVATE | MAP_ANONYMOUS;
374#if defined(__x86_64__)
375 flags |= MAP_32BIT;
376 /* Cannot map more than that */
377 if (code_gen_buffer_size > (800 * 1024 * 1024))
378 code_gen_buffer_size = (800 * 1024 * 1024);
379#endif
380 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
381 PROT_WRITE | PROT_READ | PROT_EXEC,
382 flags, -1, 0);
383 if (code_gen_buffer == MAP_FAILED) {
384 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
385 exit(1);
386 }
387 }
388#else
389 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
390 if (!code_gen_buffer) {
391 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
392 exit(1);
393 }
394 map_exec(code_gen_buffer, code_gen_buffer_size);
395#endif
4369415f 396#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
397 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
398 code_gen_buffer_max_size = code_gen_buffer_size -
399 code_gen_max_block_size();
400 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
401 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
402}
403
404/* Must be called before using the QEMU cpus. 'tb_size' is the size
405 (in bytes) allocated to the translation buffer. Zero means default
406 size. */
407void cpu_exec_init_all(unsigned long tb_size)
408{
26a5f13b
FB
409 cpu_gen_init();
410 code_gen_alloc(tb_size);
411 code_gen_ptr = code_gen_buffer;
4369415f 412 page_init();
e2eef170 413#if !defined(CONFIG_USER_ONLY)
26a5f13b 414 io_mem_init();
e2eef170 415#endif
26a5f13b
FB
416}
417
6a00d601 418void cpu_exec_init(CPUState *env)
fd6ce8f6 419{
6a00d601
FB
420 CPUState **penv;
421 int cpu_index;
422
6a00d601
FB
423 env->next_cpu = NULL;
424 penv = &first_cpu;
425 cpu_index = 0;
426 while (*penv != NULL) {
427 penv = (CPUState **)&(*penv)->next_cpu;
428 cpu_index++;
429 }
430 env->cpu_index = cpu_index;
6658ffb8 431 env->nb_watchpoints = 0;
6a00d601 432 *penv = env;
fd6ce8f6
FB
433}
434
9fa3e853
FB
435static inline void invalidate_page_bitmap(PageDesc *p)
436{
437 if (p->code_bitmap) {
59817ccb 438 qemu_free(p->code_bitmap);
9fa3e853
FB
439 p->code_bitmap = NULL;
440 }
441 p->code_write_count = 0;
442}
443
fd6ce8f6
FB
444/* set to NULL all the 'first_tb' fields in all PageDescs */
445static void page_flush_tb(void)
446{
447 int i, j;
448 PageDesc *p;
449
450 for(i = 0; i < L1_SIZE; i++) {
451 p = l1_map[i];
452 if (p) {
9fa3e853
FB
453 for(j = 0; j < L2_SIZE; j++) {
454 p->first_tb = NULL;
455 invalidate_page_bitmap(p);
456 p++;
457 }
fd6ce8f6
FB
458 }
459 }
460}
461
462/* flush all the translation blocks */
d4e8164f 463/* XXX: tb_flush is currently not thread safe */
6a00d601 464void tb_flush(CPUState *env1)
fd6ce8f6 465{
6a00d601 466 CPUState *env;
0124311e 467#if defined(DEBUG_FLUSH)
ab3d1727
BS
468 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
469 (unsigned long)(code_gen_ptr - code_gen_buffer),
470 nb_tbs, nb_tbs > 0 ?
471 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 472#endif
26a5f13b 473 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
474 cpu_abort(env1, "Internal error: code buffer overflow\n");
475
fd6ce8f6 476 nb_tbs = 0;
3b46e624 477
6a00d601
FB
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
480 }
9fa3e853 481
8a8a608f 482 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 483 page_flush_tb();
9fa3e853 484
fd6ce8f6 485 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
486 /* XXX: flush processor icache at this point if cache flush is
487 expensive */
e3db7226 488 tb_flush_count++;
fd6ce8f6
FB
489}
490
491#ifdef DEBUG_TB_CHECK
492
bc98a7ef 493static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
494{
495 TranslationBlock *tb;
496 int i;
497 address &= TARGET_PAGE_MASK;
99773bd4
PB
498 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
499 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
500 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
501 address >= tb->pc + tb->size)) {
502 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 503 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
504 }
505 }
506 }
507}
508
509/* verify that all the pages have correct rights for code */
510static void tb_page_check(void)
511{
512 TranslationBlock *tb;
513 int i, flags1, flags2;
3b46e624 514
99773bd4
PB
515 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
516 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
517 flags1 = page_get_flags(tb->pc);
518 flags2 = page_get_flags(tb->pc + tb->size - 1);
519 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
520 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 521 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
522 }
523 }
524 }
525}
526
d4e8164f
FB
527void tb_jmp_check(TranslationBlock *tb)
528{
529 TranslationBlock *tb1;
530 unsigned int n1;
531
532 /* suppress any remaining jumps to this TB */
533 tb1 = tb->jmp_first;
534 for(;;) {
535 n1 = (long)tb1 & 3;
536 tb1 = (TranslationBlock *)((long)tb1 & ~3);
537 if (n1 == 2)
538 break;
539 tb1 = tb1->jmp_next[n1];
540 }
541 /* check end of list */
542 if (tb1 != tb) {
543 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
544 }
545}
546
fd6ce8f6
FB
547#endif
548
549/* invalidate one TB */
550static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
551 int next_offset)
552{
553 TranslationBlock *tb1;
554 for(;;) {
555 tb1 = *ptb;
556 if (tb1 == tb) {
557 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
558 break;
559 }
560 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
561 }
562}
563
9fa3e853
FB
564static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
565{
566 TranslationBlock *tb1;
567 unsigned int n1;
568
569 for(;;) {
570 tb1 = *ptb;
571 n1 = (long)tb1 & 3;
572 tb1 = (TranslationBlock *)((long)tb1 & ~3);
573 if (tb1 == tb) {
574 *ptb = tb1->page_next[n1];
575 break;
576 }
577 ptb = &tb1->page_next[n1];
578 }
579}
580
d4e8164f
FB
581static inline void tb_jmp_remove(TranslationBlock *tb, int n)
582{
583 TranslationBlock *tb1, **ptb;
584 unsigned int n1;
585
586 ptb = &tb->jmp_next[n];
587 tb1 = *ptb;
588 if (tb1) {
589 /* find tb(n) in circular list */
590 for(;;) {
591 tb1 = *ptb;
592 n1 = (long)tb1 & 3;
593 tb1 = (TranslationBlock *)((long)tb1 & ~3);
594 if (n1 == n && tb1 == tb)
595 break;
596 if (n1 == 2) {
597 ptb = &tb1->jmp_first;
598 } else {
599 ptb = &tb1->jmp_next[n1];
600 }
601 }
602 /* now we can suppress tb(n) from the list */
603 *ptb = tb->jmp_next[n];
604
605 tb->jmp_next[n] = NULL;
606 }
607}
608
609/* reset the jump entry 'n' of a TB so that it is not chained to
610 another TB */
611static inline void tb_reset_jump(TranslationBlock *tb, int n)
612{
613 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
614}
615
00f82b8a 616static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 617{
6a00d601 618 CPUState *env;
8a40a180 619 PageDesc *p;
d4e8164f 620 unsigned int h, n1;
00f82b8a 621 target_phys_addr_t phys_pc;
8a40a180 622 TranslationBlock *tb1, *tb2;
3b46e624 623
8a40a180
FB
624 /* remove the TB from the hash list */
625 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
626 h = tb_phys_hash_func(phys_pc);
5fafdf24 627 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
628 offsetof(TranslationBlock, phys_hash_next));
629
630 /* remove the TB from the page list */
631 if (tb->page_addr[0] != page_addr) {
632 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
633 tb_page_remove(&p->first_tb, tb);
634 invalidate_page_bitmap(p);
635 }
636 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
637 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
638 tb_page_remove(&p->first_tb, tb);
639 invalidate_page_bitmap(p);
640 }
641
36bdbe54 642 tb_invalidated_flag = 1;
59817ccb 643
fd6ce8f6 644 /* remove the TB from the hash list */
8a40a180 645 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
646 for(env = first_cpu; env != NULL; env = env->next_cpu) {
647 if (env->tb_jmp_cache[h] == tb)
648 env->tb_jmp_cache[h] = NULL;
649 }
d4e8164f
FB
650
651 /* suppress this TB from the two jump lists */
652 tb_jmp_remove(tb, 0);
653 tb_jmp_remove(tb, 1);
654
655 /* suppress any remaining jumps to this TB */
656 tb1 = tb->jmp_first;
657 for(;;) {
658 n1 = (long)tb1 & 3;
659 if (n1 == 2)
660 break;
661 tb1 = (TranslationBlock *)((long)tb1 & ~3);
662 tb2 = tb1->jmp_next[n1];
663 tb_reset_jump(tb1, n1);
664 tb1->jmp_next[n1] = NULL;
665 tb1 = tb2;
666 }
667 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 668
e3db7226 669 tb_phys_invalidate_count++;
9fa3e853
FB
670}
671
672static inline void set_bits(uint8_t *tab, int start, int len)
673{
674 int end, mask, end1;
675
676 end = start + len;
677 tab += start >> 3;
678 mask = 0xff << (start & 7);
679 if ((start & ~7) == (end & ~7)) {
680 if (start < end) {
681 mask &= ~(0xff << (end & 7));
682 *tab |= mask;
683 }
684 } else {
685 *tab++ |= mask;
686 start = (start + 8) & ~7;
687 end1 = end & ~7;
688 while (start < end1) {
689 *tab++ = 0xff;
690 start += 8;
691 }
692 if (start < end) {
693 mask = ~(0xff << (end & 7));
694 *tab |= mask;
695 }
696 }
697}
698
699static void build_page_bitmap(PageDesc *p)
700{
701 int n, tb_start, tb_end;
702 TranslationBlock *tb;
3b46e624 703
59817ccb 704 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
705 if (!p->code_bitmap)
706 return;
707 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
708
709 tb = p->first_tb;
710 while (tb != NULL) {
711 n = (long)tb & 3;
712 tb = (TranslationBlock *)((long)tb & ~3);
713 /* NOTE: this is subtle as a TB may span two physical pages */
714 if (n == 0) {
715 /* NOTE: tb_end may be after the end of the page, but
716 it is not a problem */
717 tb_start = tb->pc & ~TARGET_PAGE_MASK;
718 tb_end = tb_start + tb->size;
719 if (tb_end > TARGET_PAGE_SIZE)
720 tb_end = TARGET_PAGE_SIZE;
721 } else {
722 tb_start = 0;
723 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
724 }
725 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
726 tb = tb->page_next[n];
727 }
728}
729
d720b93d
FB
730#ifdef TARGET_HAS_PRECISE_SMC
731
5fafdf24 732static void tb_gen_code(CPUState *env,
d720b93d
FB
733 target_ulong pc, target_ulong cs_base, int flags,
734 int cflags)
735{
736 TranslationBlock *tb;
737 uint8_t *tc_ptr;
738 target_ulong phys_pc, phys_page2, virt_page2;
739 int code_gen_size;
740
c27004ec
FB
741 phys_pc = get_phys_addr_code(env, pc);
742 tb = tb_alloc(pc);
d720b93d
FB
743 if (!tb) {
744 /* flush must be done */
745 tb_flush(env);
746 /* cannot fail at this point */
c27004ec 747 tb = tb_alloc(pc);
d720b93d
FB
748 }
749 tc_ptr = code_gen_ptr;
750 tb->tc_ptr = tc_ptr;
751 tb->cs_base = cs_base;
752 tb->flags = flags;
753 tb->cflags = cflags;
d07bde88 754 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 755 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 756
d720b93d 757 /* check next page if needed */
c27004ec 758 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 759 phys_page2 = -1;
c27004ec 760 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
761 phys_page2 = get_phys_addr_code(env, virt_page2);
762 }
763 tb_link_phys(tb, phys_pc, phys_page2);
764}
765#endif
3b46e624 766
9fa3e853
FB
767/* invalidate all TBs which intersect with the target physical page
768 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
769 the same physical page. 'is_cpu_write_access' should be true if called
770 from a real cpu write access: the virtual CPU will exit the current
771 TB if code is modified inside this TB. */
00f82b8a 772void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
773 int is_cpu_write_access)
774{
775 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 776 CPUState *env = cpu_single_env;
9fa3e853 777 PageDesc *p;
ea1c1802 778 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 779 target_ulong tb_start, tb_end;
d720b93d 780 target_ulong current_pc, current_cs_base;
9fa3e853
FB
781
782 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 783 if (!p)
9fa3e853 784 return;
5fafdf24 785 if (!p->code_bitmap &&
d720b93d
FB
786 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
787 is_cpu_write_access) {
9fa3e853
FB
788 /* build code bitmap */
789 build_page_bitmap(p);
790 }
791
792 /* we remove all the TBs in the range [start, end[ */
793 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
794 current_tb_not_found = is_cpu_write_access;
795 current_tb_modified = 0;
796 current_tb = NULL; /* avoid warning */
797 current_pc = 0; /* avoid warning */
798 current_cs_base = 0; /* avoid warning */
799 current_flags = 0; /* avoid warning */
9fa3e853
FB
800 tb = p->first_tb;
801 while (tb != NULL) {
802 n = (long)tb & 3;
803 tb = (TranslationBlock *)((long)tb & ~3);
804 tb_next = tb->page_next[n];
805 /* NOTE: this is subtle as a TB may span two physical pages */
806 if (n == 0) {
807 /* NOTE: tb_end may be after the end of the page, but
808 it is not a problem */
809 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
810 tb_end = tb_start + tb->size;
811 } else {
812 tb_start = tb->page_addr[1];
813 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
814 }
815 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
816#ifdef TARGET_HAS_PRECISE_SMC
817 if (current_tb_not_found) {
818 current_tb_not_found = 0;
819 current_tb = NULL;
820 if (env->mem_write_pc) {
821 /* now we have a real cpu fault */
822 current_tb = tb_find_pc(env->mem_write_pc);
823 }
824 }
825 if (current_tb == tb &&
826 !(current_tb->cflags & CF_SINGLE_INSN)) {
827 /* If we are modifying the current TB, we must stop
828 its execution. We could be more precise by checking
829 that the modification is after the current PC, but it
830 would require a specialized function to partially
831 restore the CPU state */
3b46e624 832
d720b93d 833 current_tb_modified = 1;
5fafdf24 834 cpu_restore_state(current_tb, env,
d720b93d
FB
835 env->mem_write_pc, NULL);
836#if defined(TARGET_I386)
837 current_flags = env->hflags;
838 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
839 current_cs_base = (target_ulong)env->segs[R_CS].base;
840 current_pc = current_cs_base + env->eip;
841#else
842#error unsupported CPU
843#endif
844 }
845#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
846 /* we need to do that to handle the case where a signal
847 occurs while doing tb_phys_invalidate() */
848 saved_tb = NULL;
849 if (env) {
850 saved_tb = env->current_tb;
851 env->current_tb = NULL;
852 }
9fa3e853 853 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
854 if (env) {
855 env->current_tb = saved_tb;
856 if (env->interrupt_request && env->current_tb)
857 cpu_interrupt(env, env->interrupt_request);
858 }
9fa3e853
FB
859 }
860 tb = tb_next;
861 }
862#if !defined(CONFIG_USER_ONLY)
863 /* if no code remaining, no need to continue to use slow writes */
864 if (!p->first_tb) {
865 invalidate_page_bitmap(p);
d720b93d
FB
866 if (is_cpu_write_access) {
867 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
868 }
869 }
870#endif
871#ifdef TARGET_HAS_PRECISE_SMC
872 if (current_tb_modified) {
873 /* we generate a block containing just the instruction
874 modifying the memory. It will ensure that it cannot modify
875 itself */
ea1c1802 876 env->current_tb = NULL;
5fafdf24 877 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
878 CF_SINGLE_INSN);
879 cpu_resume_from_signal(env, NULL);
9fa3e853 880 }
fd6ce8f6 881#endif
9fa3e853 882}
fd6ce8f6 883
9fa3e853 884/* len must be <= 8 and start must be a multiple of len */
00f82b8a 885static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
886{
887 PageDesc *p;
888 int offset, b;
59817ccb 889#if 0
a4193c8a
FB
890 if (1) {
891 if (loglevel) {
5fafdf24
TS
892 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
893 cpu_single_env->mem_write_vaddr, len,
894 cpu_single_env->eip,
a4193c8a
FB
895 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
896 }
59817ccb
FB
897 }
898#endif
9fa3e853 899 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 900 if (!p)
9fa3e853
FB
901 return;
902 if (p->code_bitmap) {
903 offset = start & ~TARGET_PAGE_MASK;
904 b = p->code_bitmap[offset >> 3] >> (offset & 7);
905 if (b & ((1 << len) - 1))
906 goto do_invalidate;
907 } else {
908 do_invalidate:
d720b93d 909 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
910 }
911}
912
9fa3e853 913#if !defined(CONFIG_SOFTMMU)
00f82b8a 914static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 915 unsigned long pc, void *puc)
9fa3e853 916{
d720b93d
FB
917 int n, current_flags, current_tb_modified;
918 target_ulong current_pc, current_cs_base;
9fa3e853 919 PageDesc *p;
d720b93d
FB
920 TranslationBlock *tb, *current_tb;
921#ifdef TARGET_HAS_PRECISE_SMC
922 CPUState *env = cpu_single_env;
923#endif
9fa3e853
FB
924
925 addr &= TARGET_PAGE_MASK;
926 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 927 if (!p)
9fa3e853
FB
928 return;
929 tb = p->first_tb;
d720b93d
FB
930 current_tb_modified = 0;
931 current_tb = NULL;
932 current_pc = 0; /* avoid warning */
933 current_cs_base = 0; /* avoid warning */
934 current_flags = 0; /* avoid warning */
935#ifdef TARGET_HAS_PRECISE_SMC
936 if (tb && pc != 0) {
937 current_tb = tb_find_pc(pc);
938 }
939#endif
9fa3e853
FB
940 while (tb != NULL) {
941 n = (long)tb & 3;
942 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
943#ifdef TARGET_HAS_PRECISE_SMC
944 if (current_tb == tb &&
945 !(current_tb->cflags & CF_SINGLE_INSN)) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
3b46e624 951
d720b93d
FB
952 current_tb_modified = 1;
953 cpu_restore_state(current_tb, env, pc, puc);
954#if defined(TARGET_I386)
955 current_flags = env->hflags;
956 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
957 current_cs_base = (target_ulong)env->segs[R_CS].base;
958 current_pc = current_cs_base + env->eip;
959#else
960#error unsupported CPU
961#endif
962 }
963#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
964 tb_phys_invalidate(tb, addr);
965 tb = tb->page_next[n];
966 }
fd6ce8f6 967 p->first_tb = NULL;
d720b93d
FB
968#ifdef TARGET_HAS_PRECISE_SMC
969 if (current_tb_modified) {
970 /* we generate a block containing just the instruction
971 modifying the memory. It will ensure that it cannot modify
972 itself */
ea1c1802 973 env->current_tb = NULL;
5fafdf24 974 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
975 CF_SINGLE_INSN);
976 cpu_resume_from_signal(env, puc);
977 }
978#endif
fd6ce8f6 979}
9fa3e853 980#endif
fd6ce8f6
FB
981
982/* add the tb in the target page and protect it if necessary */
5fafdf24 983static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 984 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
985{
986 PageDesc *p;
9fa3e853
FB
987 TranslationBlock *last_first_tb;
988
989 tb->page_addr[n] = page_addr;
3a7d929e 990 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
991 tb->page_next[n] = p->first_tb;
992 last_first_tb = p->first_tb;
993 p->first_tb = (TranslationBlock *)((long)tb | n);
994 invalidate_page_bitmap(p);
fd6ce8f6 995
107db443 996#if defined(TARGET_HAS_SMC) || 1
d720b93d 997
9fa3e853 998#if defined(CONFIG_USER_ONLY)
fd6ce8f6 999 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1000 target_ulong addr;
1001 PageDesc *p2;
9fa3e853
FB
1002 int prot;
1003
fd6ce8f6
FB
1004 /* force the host page as non writable (writes will have a
1005 page fault + mprotect overhead) */
53a5960a 1006 page_addr &= qemu_host_page_mask;
fd6ce8f6 1007 prot = 0;
53a5960a
PB
1008 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1009 addr += TARGET_PAGE_SIZE) {
1010
1011 p2 = page_find (addr >> TARGET_PAGE_BITS);
1012 if (!p2)
1013 continue;
1014 prot |= p2->flags;
1015 p2->flags &= ~PAGE_WRITE;
1016 page_get_flags(addr);
1017 }
5fafdf24 1018 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1019 (prot & PAGE_BITS) & ~PAGE_WRITE);
1020#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1021 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1022 page_addr);
fd6ce8f6 1023#endif
fd6ce8f6 1024 }
9fa3e853
FB
1025#else
1026 /* if some code is already present, then the pages are already
1027 protected. So we handle the case where only the first TB is
1028 allocated in a physical page */
1029 if (!last_first_tb) {
6a00d601 1030 tlb_protect_code(page_addr);
9fa3e853
FB
1031 }
1032#endif
d720b93d
FB
1033
1034#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1035}
1036
1037/* Allocate a new translation block. Flush the translation buffer if
1038 too many translation blocks or too much generated code. */
c27004ec 1039TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1040{
1041 TranslationBlock *tb;
fd6ce8f6 1042
26a5f13b
FB
1043 if (nb_tbs >= code_gen_max_blocks ||
1044 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1045 return NULL;
fd6ce8f6
FB
1046 tb = &tbs[nb_tbs++];
1047 tb->pc = pc;
b448f2f3 1048 tb->cflags = 0;
d4e8164f
FB
1049 return tb;
1050}
1051
9fa3e853
FB
1052/* add a new TB and link it to the physical page tables. phys_page2 is
1053 (-1) to indicate that only one page contains the TB. */
5fafdf24 1054void tb_link_phys(TranslationBlock *tb,
9fa3e853 1055 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1056{
9fa3e853
FB
1057 unsigned int h;
1058 TranslationBlock **ptb;
1059
c8a706fe
PB
1060 /* Grab the mmap lock to stop another thread invalidating this TB
1061 before we are done. */
1062 mmap_lock();
9fa3e853
FB
1063 /* add in the physical hash table */
1064 h = tb_phys_hash_func(phys_pc);
1065 ptb = &tb_phys_hash[h];
1066 tb->phys_hash_next = *ptb;
1067 *ptb = tb;
fd6ce8f6
FB
1068
1069 /* add in the page list */
9fa3e853
FB
1070 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1071 if (phys_page2 != -1)
1072 tb_alloc_page(tb, 1, phys_page2);
1073 else
1074 tb->page_addr[1] = -1;
9fa3e853 1075
d4e8164f
FB
1076 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1077 tb->jmp_next[0] = NULL;
1078 tb->jmp_next[1] = NULL;
1079
1080 /* init original jump addresses */
1081 if (tb->tb_next_offset[0] != 0xffff)
1082 tb_reset_jump(tb, 0);
1083 if (tb->tb_next_offset[1] != 0xffff)
1084 tb_reset_jump(tb, 1);
8a40a180
FB
1085
1086#ifdef DEBUG_TB_CHECK
1087 tb_page_check();
1088#endif
c8a706fe 1089 mmap_unlock();
fd6ce8f6
FB
1090}
1091
9fa3e853
FB
1092/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1093 tb[1].tc_ptr. Return NULL if not found */
1094TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1095{
9fa3e853
FB
1096 int m_min, m_max, m;
1097 unsigned long v;
1098 TranslationBlock *tb;
a513fe19
FB
1099
1100 if (nb_tbs <= 0)
1101 return NULL;
1102 if (tc_ptr < (unsigned long)code_gen_buffer ||
1103 tc_ptr >= (unsigned long)code_gen_ptr)
1104 return NULL;
1105 /* binary search (cf Knuth) */
1106 m_min = 0;
1107 m_max = nb_tbs - 1;
1108 while (m_min <= m_max) {
1109 m = (m_min + m_max) >> 1;
1110 tb = &tbs[m];
1111 v = (unsigned long)tb->tc_ptr;
1112 if (v == tc_ptr)
1113 return tb;
1114 else if (tc_ptr < v) {
1115 m_max = m - 1;
1116 } else {
1117 m_min = m + 1;
1118 }
5fafdf24 1119 }
a513fe19
FB
1120 return &tbs[m_max];
1121}
7501267e 1122
ea041c0e
FB
1123static void tb_reset_jump_recursive(TranslationBlock *tb);
1124
1125static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1126{
1127 TranslationBlock *tb1, *tb_next, **ptb;
1128 unsigned int n1;
1129
1130 tb1 = tb->jmp_next[n];
1131 if (tb1 != NULL) {
1132 /* find head of list */
1133 for(;;) {
1134 n1 = (long)tb1 & 3;
1135 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136 if (n1 == 2)
1137 break;
1138 tb1 = tb1->jmp_next[n1];
1139 }
1140 /* we are now sure now that tb jumps to tb1 */
1141 tb_next = tb1;
1142
1143 /* remove tb from the jmp_first list */
1144 ptb = &tb_next->jmp_first;
1145 for(;;) {
1146 tb1 = *ptb;
1147 n1 = (long)tb1 & 3;
1148 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1149 if (n1 == n && tb1 == tb)
1150 break;
1151 ptb = &tb1->jmp_next[n1];
1152 }
1153 *ptb = tb->jmp_next[n];
1154 tb->jmp_next[n] = NULL;
3b46e624 1155
ea041c0e
FB
1156 /* suppress the jump to next tb in generated code */
1157 tb_reset_jump(tb, n);
1158
0124311e 1159 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1160 tb_reset_jump_recursive(tb_next);
1161 }
1162}
1163
1164static void tb_reset_jump_recursive(TranslationBlock *tb)
1165{
1166 tb_reset_jump_recursive2(tb, 0);
1167 tb_reset_jump_recursive2(tb, 1);
1168}
1169
1fddef4b 1170#if defined(TARGET_HAS_ICE)
d720b93d
FB
1171static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1172{
9b3c35e0
JM
1173 target_phys_addr_t addr;
1174 target_ulong pd;
c2f07f81
PB
1175 ram_addr_t ram_addr;
1176 PhysPageDesc *p;
d720b93d 1177
c2f07f81
PB
1178 addr = cpu_get_phys_page_debug(env, pc);
1179 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1180 if (!p) {
1181 pd = IO_MEM_UNASSIGNED;
1182 } else {
1183 pd = p->phys_offset;
1184 }
1185 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1186 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1187}
c27004ec 1188#endif
d720b93d 1189
6658ffb8 1190/* Add a watchpoint. */
0f459d16 1191int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1192{
1193 int i;
1194
1195 for (i = 0; i < env->nb_watchpoints; i++) {
1196 if (addr == env->watchpoint[i].vaddr)
1197 return 0;
1198 }
1199 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1200 return -1;
1201
1202 i = env->nb_watchpoints++;
1203 env->watchpoint[i].vaddr = addr;
0f459d16 1204 env->watchpoint[i].type = type;
6658ffb8
PB
1205 tlb_flush_page(env, addr);
1206 /* FIXME: This flush is needed because of the hack to make memory ops
1207 terminate the TB. It can be removed once the proper IO trap and
1208 re-execute bits are in. */
1209 tb_flush(env);
1210 return i;
1211}
1212
1213/* Remove a watchpoint. */
1214int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1215{
1216 int i;
1217
1218 for (i = 0; i < env->nb_watchpoints; i++) {
1219 if (addr == env->watchpoint[i].vaddr) {
1220 env->nb_watchpoints--;
1221 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1222 tlb_flush_page(env, addr);
1223 return 0;
1224 }
1225 }
1226 return -1;
1227}
1228
7d03f82f
EI
1229/* Remove all watchpoints. */
1230void cpu_watchpoint_remove_all(CPUState *env) {
1231 int i;
1232
1233 for (i = 0; i < env->nb_watchpoints; i++) {
1234 tlb_flush_page(env, env->watchpoint[i].vaddr);
1235 }
1236 env->nb_watchpoints = 0;
1237}
1238
c33a346e
FB
1239/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1240 breakpoint is reached */
2e12669a 1241int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1242{
1fddef4b 1243#if defined(TARGET_HAS_ICE)
4c3a88a2 1244 int i;
3b46e624 1245
4c3a88a2
FB
1246 for(i = 0; i < env->nb_breakpoints; i++) {
1247 if (env->breakpoints[i] == pc)
1248 return 0;
1249 }
1250
1251 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1252 return -1;
1253 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1254
d720b93d 1255 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1256 return 0;
1257#else
1258 return -1;
1259#endif
1260}
1261
7d03f82f
EI
1262/* remove all breakpoints */
1263void cpu_breakpoint_remove_all(CPUState *env) {
1264#if defined(TARGET_HAS_ICE)
1265 int i;
1266 for(i = 0; i < env->nb_breakpoints; i++) {
1267 breakpoint_invalidate(env, env->breakpoints[i]);
1268 }
1269 env->nb_breakpoints = 0;
1270#endif
1271}
1272
4c3a88a2 1273/* remove a breakpoint */
2e12669a 1274int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1275{
1fddef4b 1276#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1277 int i;
1278 for(i = 0; i < env->nb_breakpoints; i++) {
1279 if (env->breakpoints[i] == pc)
1280 goto found;
1281 }
1282 return -1;
1283 found:
4c3a88a2 1284 env->nb_breakpoints--;
1fddef4b
FB
1285 if (i < env->nb_breakpoints)
1286 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1287
1288 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1289 return 0;
1290#else
1291 return -1;
1292#endif
1293}
1294
c33a346e
FB
1295/* enable or disable single step mode. EXCP_DEBUG is returned by the
1296 CPU loop after each instruction */
1297void cpu_single_step(CPUState *env, int enabled)
1298{
1fddef4b 1299#if defined(TARGET_HAS_ICE)
c33a346e
FB
1300 if (env->singlestep_enabled != enabled) {
1301 env->singlestep_enabled = enabled;
1302 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1303 /* XXX: only flush what is necessary */
0124311e 1304 tb_flush(env);
c33a346e
FB
1305 }
1306#endif
1307}
1308
34865134
FB
1309/* enable or disable low levels log */
1310void cpu_set_log(int log_flags)
1311{
1312 loglevel = log_flags;
1313 if (loglevel && !logfile) {
11fcfab4 1314 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1315 if (!logfile) {
1316 perror(logfilename);
1317 _exit(1);
1318 }
9fa3e853
FB
1319#if !defined(CONFIG_SOFTMMU)
1320 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1321 {
1322 static uint8_t logfile_buf[4096];
1323 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1324 }
1325#else
34865134 1326 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1327#endif
e735b91c
PB
1328 log_append = 1;
1329 }
1330 if (!loglevel && logfile) {
1331 fclose(logfile);
1332 logfile = NULL;
34865134
FB
1333 }
1334}
1335
1336void cpu_set_log_filename(const char *filename)
1337{
1338 logfilename = strdup(filename);
e735b91c
PB
1339 if (logfile) {
1340 fclose(logfile);
1341 logfile = NULL;
1342 }
1343 cpu_set_log(loglevel);
34865134 1344}
c33a346e 1345
0124311e 1346/* mask must never be zero, except for A20 change call */
68a79315 1347void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1348{
d5975363 1349#if !defined(USE_NPTL)
ea041c0e 1350 TranslationBlock *tb;
15a51156 1351 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1352#endif
59817ccb 1353
d5975363
PB
1354 /* FIXME: This is probably not threadsafe. A different thread could
1355 be in the mittle of a read-modify-write operation. */
68a79315 1356 env->interrupt_request |= mask;
d5975363
PB
1357#if defined(USE_NPTL)
1358 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1359 problem and hope the cpu will stop of its own accord. For userspace
1360 emulation this often isn't actually as bad as it sounds. Often
1361 signals are used primarily to interrupt blocking syscalls. */
1362#else
ea041c0e
FB
1363 /* if the cpu is currently executing code, we must unlink it and
1364 all the potentially executing TB */
1365 tb = env->current_tb;
ee8b7021
FB
1366 if (tb && !testandset(&interrupt_lock)) {
1367 env->current_tb = NULL;
ea041c0e 1368 tb_reset_jump_recursive(tb);
15a51156 1369 resetlock(&interrupt_lock);
ea041c0e 1370 }
d5975363 1371#endif
ea041c0e
FB
1372}
1373
b54ad049
FB
1374void cpu_reset_interrupt(CPUState *env, int mask)
1375{
1376 env->interrupt_request &= ~mask;
1377}
1378
f193c797 1379CPULogItem cpu_log_items[] = {
5fafdf24 1380 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1381 "show generated host assembly code for each compiled TB" },
1382 { CPU_LOG_TB_IN_ASM, "in_asm",
1383 "show target assembly code for each compiled TB" },
5fafdf24 1384 { CPU_LOG_TB_OP, "op",
57fec1fe 1385 "show micro ops for each compiled TB" },
f193c797 1386 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1387 "show micro ops "
1388#ifdef TARGET_I386
1389 "before eflags optimization and "
f193c797 1390#endif
e01a1157 1391 "after liveness analysis" },
f193c797
FB
1392 { CPU_LOG_INT, "int",
1393 "show interrupts/exceptions in short format" },
1394 { CPU_LOG_EXEC, "exec",
1395 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1396 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1397 "show CPU state before block translation" },
f193c797
FB
1398#ifdef TARGET_I386
1399 { CPU_LOG_PCALL, "pcall",
1400 "show protected mode far calls/returns/exceptions" },
1401#endif
8e3a9fd2 1402#ifdef DEBUG_IOPORT
fd872598
FB
1403 { CPU_LOG_IOPORT, "ioport",
1404 "show all i/o ports accesses" },
8e3a9fd2 1405#endif
f193c797
FB
1406 { 0, NULL, NULL },
1407};
1408
1409static int cmp1(const char *s1, int n, const char *s2)
1410{
1411 if (strlen(s2) != n)
1412 return 0;
1413 return memcmp(s1, s2, n) == 0;
1414}
3b46e624 1415
f193c797
FB
1416/* takes a comma separated list of log masks. Return 0 if error. */
1417int cpu_str_to_log_mask(const char *str)
1418{
1419 CPULogItem *item;
1420 int mask;
1421 const char *p, *p1;
1422
1423 p = str;
1424 mask = 0;
1425 for(;;) {
1426 p1 = strchr(p, ',');
1427 if (!p1)
1428 p1 = p + strlen(p);
8e3a9fd2
FB
1429 if(cmp1(p,p1-p,"all")) {
1430 for(item = cpu_log_items; item->mask != 0; item++) {
1431 mask |= item->mask;
1432 }
1433 } else {
f193c797
FB
1434 for(item = cpu_log_items; item->mask != 0; item++) {
1435 if (cmp1(p, p1 - p, item->name))
1436 goto found;
1437 }
1438 return 0;
8e3a9fd2 1439 }
f193c797
FB
1440 found:
1441 mask |= item->mask;
1442 if (*p1 != ',')
1443 break;
1444 p = p1 + 1;
1445 }
1446 return mask;
1447}
ea041c0e 1448
7501267e
FB
1449void cpu_abort(CPUState *env, const char *fmt, ...)
1450{
1451 va_list ap;
493ae1f0 1452 va_list ap2;
7501267e
FB
1453
1454 va_start(ap, fmt);
493ae1f0 1455 va_copy(ap2, ap);
7501267e
FB
1456 fprintf(stderr, "qemu: fatal: ");
1457 vfprintf(stderr, fmt, ap);
1458 fprintf(stderr, "\n");
1459#ifdef TARGET_I386
7fe48483
FB
1460 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1461#else
1462 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1463#endif
924edcae 1464 if (logfile) {
f9373291 1465 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1466 vfprintf(logfile, fmt, ap2);
f9373291
JM
1467 fprintf(logfile, "\n");
1468#ifdef TARGET_I386
1469 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1470#else
1471 cpu_dump_state(env, logfile, fprintf, 0);
1472#endif
924edcae
AZ
1473 fflush(logfile);
1474 fclose(logfile);
1475 }
493ae1f0 1476 va_end(ap2);
f9373291 1477 va_end(ap);
7501267e
FB
1478 abort();
1479}
1480
c5be9f08
TS
1481CPUState *cpu_copy(CPUState *env)
1482{
01ba9816 1483 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1484 /* preserve chaining and index */
1485 CPUState *next_cpu = new_env->next_cpu;
1486 int cpu_index = new_env->cpu_index;
1487 memcpy(new_env, env, sizeof(CPUState));
1488 new_env->next_cpu = next_cpu;
1489 new_env->cpu_index = cpu_index;
1490 return new_env;
1491}
1492
0124311e
FB
1493#if !defined(CONFIG_USER_ONLY)
1494
5c751e99
EI
1495static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1496{
1497 unsigned int i;
1498
1499 /* Discard jump cache entries for any tb which might potentially
1500 overlap the flushed page. */
1501 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1502 memset (&env->tb_jmp_cache[i], 0,
1503 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1504
1505 i = tb_jmp_cache_hash_page(addr);
1506 memset (&env->tb_jmp_cache[i], 0,
1507 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1508}
1509
ee8b7021
FB
1510/* NOTE: if flush_global is true, also flush global entries (not
1511 implemented yet) */
1512void tlb_flush(CPUState *env, int flush_global)
33417e70 1513{
33417e70 1514 int i;
0124311e 1515
9fa3e853
FB
1516#if defined(DEBUG_TLB)
1517 printf("tlb_flush:\n");
1518#endif
0124311e
FB
1519 /* must reset current TB so that interrupts cannot modify the
1520 links while we are modifying them */
1521 env->current_tb = NULL;
1522
33417e70 1523 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1524 env->tlb_table[0][i].addr_read = -1;
1525 env->tlb_table[0][i].addr_write = -1;
1526 env->tlb_table[0][i].addr_code = -1;
1527 env->tlb_table[1][i].addr_read = -1;
1528 env->tlb_table[1][i].addr_write = -1;
1529 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1530#if (NB_MMU_MODES >= 3)
1531 env->tlb_table[2][i].addr_read = -1;
1532 env->tlb_table[2][i].addr_write = -1;
1533 env->tlb_table[2][i].addr_code = -1;
1534#if (NB_MMU_MODES == 4)
1535 env->tlb_table[3][i].addr_read = -1;
1536 env->tlb_table[3][i].addr_write = -1;
1537 env->tlb_table[3][i].addr_code = -1;
1538#endif
1539#endif
33417e70 1540 }
9fa3e853 1541
8a40a180 1542 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1543
0a962c02
FB
1544#ifdef USE_KQEMU
1545 if (env->kqemu_enabled) {
1546 kqemu_flush(env, flush_global);
1547 }
9fa3e853 1548#endif
e3db7226 1549 tlb_flush_count++;
33417e70
FB
1550}
1551
274da6b2 1552static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1553{
5fafdf24 1554 if (addr == (tlb_entry->addr_read &
84b7b8e7 1555 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1556 addr == (tlb_entry->addr_write &
84b7b8e7 1557 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1558 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1559 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1560 tlb_entry->addr_read = -1;
1561 tlb_entry->addr_write = -1;
1562 tlb_entry->addr_code = -1;
1563 }
61382a50
FB
1564}
1565
2e12669a 1566void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1567{
8a40a180 1568 int i;
0124311e 1569
9fa3e853 1570#if defined(DEBUG_TLB)
108c49b8 1571 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1572#endif
0124311e
FB
1573 /* must reset current TB so that interrupts cannot modify the
1574 links while we are modifying them */
1575 env->current_tb = NULL;
61382a50
FB
1576
1577 addr &= TARGET_PAGE_MASK;
1578 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1579 tlb_flush_entry(&env->tlb_table[0][i], addr);
1580 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1581#if (NB_MMU_MODES >= 3)
1582 tlb_flush_entry(&env->tlb_table[2][i], addr);
1583#if (NB_MMU_MODES == 4)
1584 tlb_flush_entry(&env->tlb_table[3][i], addr);
1585#endif
1586#endif
0124311e 1587
5c751e99 1588 tlb_flush_jmp_cache(env, addr);
9fa3e853 1589
0a962c02
FB
1590#ifdef USE_KQEMU
1591 if (env->kqemu_enabled) {
1592 kqemu_flush_page(env, addr);
1593 }
1594#endif
9fa3e853
FB
1595}
1596
9fa3e853
FB
1597/* update the TLBs so that writes to code in the virtual page 'addr'
1598 can be detected */
6a00d601 1599static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1600{
5fafdf24 1601 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1602 ram_addr + TARGET_PAGE_SIZE,
1603 CODE_DIRTY_FLAG);
9fa3e853
FB
1604}
1605
9fa3e853 1606/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1607 tested for self modifying code */
5fafdf24 1608static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1609 target_ulong vaddr)
9fa3e853 1610{
3a7d929e 1611 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1612}
1613
5fafdf24 1614static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1615 unsigned long start, unsigned long length)
1616{
1617 unsigned long addr;
84b7b8e7
FB
1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1619 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1620 if ((addr - start) < length) {
0f459d16 1621 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1622 }
1623 }
1624}
1625
3a7d929e 1626void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1627 int dirty_flags)
1ccde1cb
FB
1628{
1629 CPUState *env;
4f2ac237 1630 unsigned long length, start1;
0a962c02
FB
1631 int i, mask, len;
1632 uint8_t *p;
1ccde1cb
FB
1633
1634 start &= TARGET_PAGE_MASK;
1635 end = TARGET_PAGE_ALIGN(end);
1636
1637 length = end - start;
1638 if (length == 0)
1639 return;
0a962c02 1640 len = length >> TARGET_PAGE_BITS;
3a7d929e 1641#ifdef USE_KQEMU
6a00d601
FB
1642 /* XXX: should not depend on cpu context */
1643 env = first_cpu;
3a7d929e 1644 if (env->kqemu_enabled) {
f23db169
FB
1645 ram_addr_t addr;
1646 addr = start;
1647 for(i = 0; i < len; i++) {
1648 kqemu_set_notdirty(env, addr);
1649 addr += TARGET_PAGE_SIZE;
1650 }
3a7d929e
FB
1651 }
1652#endif
f23db169
FB
1653 mask = ~dirty_flags;
1654 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1655 for(i = 0; i < len; i++)
1656 p[i] &= mask;
1657
1ccde1cb
FB
1658 /* we modify the TLB cache so that the dirty bit will be set again
1659 when accessing the range */
59817ccb 1660 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1661 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1662 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1663 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1664 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1665 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1666#if (NB_MMU_MODES >= 3)
1667 for(i = 0; i < CPU_TLB_SIZE; i++)
1668 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1669#if (NB_MMU_MODES == 4)
1670 for(i = 0; i < CPU_TLB_SIZE; i++)
1671 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1672#endif
1673#endif
6a00d601 1674 }
1ccde1cb
FB
1675}
1676
3a7d929e
FB
1677static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1678{
1679 ram_addr_t ram_addr;
1680
84b7b8e7 1681 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1682 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1683 tlb_entry->addend - (unsigned long)phys_ram_base;
1684 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1685 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1686 }
1687 }
1688}
1689
1690/* update the TLB according to the current state of the dirty bits */
1691void cpu_tlb_update_dirty(CPUState *env)
1692{
1693 int i;
1694 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1695 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1696 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1697 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1698#if (NB_MMU_MODES >= 3)
1699 for(i = 0; i < CPU_TLB_SIZE; i++)
1700 tlb_update_dirty(&env->tlb_table[2][i]);
1701#if (NB_MMU_MODES == 4)
1702 for(i = 0; i < CPU_TLB_SIZE; i++)
1703 tlb_update_dirty(&env->tlb_table[3][i]);
1704#endif
1705#endif
3a7d929e
FB
1706}
1707
0f459d16 1708static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1709{
0f459d16
PB
1710 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1711 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1712}
1713
0f459d16
PB
1714/* update the TLB corresponding to virtual page vaddr
1715 so that it is no longer dirty */
1716static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1717{
1ccde1cb
FB
1718 int i;
1719
0f459d16 1720 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1721 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1722 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1723 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1724#if (NB_MMU_MODES >= 3)
0f459d16 1725 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1726#if (NB_MMU_MODES == 4)
0f459d16 1727 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1728#endif
1729#endif
9fa3e853
FB
1730}
1731
59817ccb
FB
1732/* add a new TLB entry. At most one entry for a given virtual address
1733 is permitted. Return 0 if OK or 2 if the page could not be mapped
1734 (can only happen in non SOFTMMU mode for I/O pages or pages
1735 conflicting with the host address space). */
5fafdf24
TS
1736int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1737 target_phys_addr_t paddr, int prot,
6ebbf390 1738 int mmu_idx, int is_softmmu)
9fa3e853 1739{
92e873b9 1740 PhysPageDesc *p;
4f2ac237 1741 unsigned long pd;
9fa3e853 1742 unsigned int index;
4f2ac237 1743 target_ulong address;
0f459d16 1744 target_ulong code_address;
108c49b8 1745 target_phys_addr_t addend;
9fa3e853 1746 int ret;
84b7b8e7 1747 CPUTLBEntry *te;
6658ffb8 1748 int i;
0f459d16 1749 target_phys_addr_t iotlb;
9fa3e853 1750
92e873b9 1751 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1752 if (!p) {
1753 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1754 } else {
1755 pd = p->phys_offset;
9fa3e853
FB
1756 }
1757#if defined(DEBUG_TLB)
6ebbf390
JM
1758 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1759 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1760#endif
1761
1762 ret = 0;
0f459d16
PB
1763 address = vaddr;
1764 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1765 /* IO memory case (romd handled later) */
1766 address |= TLB_MMIO;
1767 }
1768 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1769 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1770 /* Normal RAM. */
1771 iotlb = pd & TARGET_PAGE_MASK;
1772 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1773 iotlb |= IO_MEM_NOTDIRTY;
1774 else
1775 iotlb |= IO_MEM_ROM;
1776 } else {
1777 /* IO handlers are currently passed a phsical address.
1778 It would be nice to pass an offset from the base address
1779 of that region. This would avoid having to special case RAM,
1780 and avoid full address decoding in every device.
1781 We can't use the high bits of pd for this because
1782 IO_MEM_ROMD uses these as a ram address. */
1783 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1784 }
1785
1786 code_address = address;
1787 /* Make accesses to pages with watchpoints go via the
1788 watchpoint trap routines. */
1789 for (i = 0; i < env->nb_watchpoints; i++) {
1790 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1791 iotlb = io_mem_watch + paddr;
1792 /* TODO: The memory case can be optimized by not trapping
1793 reads of pages with a write breakpoint. */
1794 address |= TLB_MMIO;
6658ffb8 1795 }
0f459d16 1796 }
d79acba4 1797
0f459d16
PB
1798 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1799 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1800 te = &env->tlb_table[mmu_idx][index];
1801 te->addend = addend - vaddr;
1802 if (prot & PAGE_READ) {
1803 te->addr_read = address;
1804 } else {
1805 te->addr_read = -1;
1806 }
5c751e99 1807
0f459d16
PB
1808 if (prot & PAGE_EXEC) {
1809 te->addr_code = code_address;
1810 } else {
1811 te->addr_code = -1;
1812 }
1813 if (prot & PAGE_WRITE) {
1814 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1815 (pd & IO_MEM_ROMD)) {
1816 /* Write access calls the I/O callback. */
1817 te->addr_write = address | TLB_MMIO;
1818 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1819 !cpu_physical_memory_is_dirty(pd)) {
1820 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1821 } else {
0f459d16 1822 te->addr_write = address;
9fa3e853 1823 }
0f459d16
PB
1824 } else {
1825 te->addr_write = -1;
9fa3e853 1826 }
9fa3e853
FB
1827 return ret;
1828}
1829
0124311e
FB
1830#else
1831
ee8b7021 1832void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1833{
1834}
1835
2e12669a 1836void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1837{
1838}
1839
5fafdf24
TS
1840int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1841 target_phys_addr_t paddr, int prot,
6ebbf390 1842 int mmu_idx, int is_softmmu)
9fa3e853
FB
1843{
1844 return 0;
1845}
0124311e 1846
9fa3e853
FB
1847/* dump memory mappings */
1848void page_dump(FILE *f)
33417e70 1849{
9fa3e853
FB
1850 unsigned long start, end;
1851 int i, j, prot, prot1;
1852 PageDesc *p;
33417e70 1853
9fa3e853
FB
1854 fprintf(f, "%-8s %-8s %-8s %s\n",
1855 "start", "end", "size", "prot");
1856 start = -1;
1857 end = -1;
1858 prot = 0;
1859 for(i = 0; i <= L1_SIZE; i++) {
1860 if (i < L1_SIZE)
1861 p = l1_map[i];
1862 else
1863 p = NULL;
1864 for(j = 0;j < L2_SIZE; j++) {
1865 if (!p)
1866 prot1 = 0;
1867 else
1868 prot1 = p[j].flags;
1869 if (prot1 != prot) {
1870 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1871 if (start != -1) {
1872 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1873 start, end, end - start,
9fa3e853
FB
1874 prot & PAGE_READ ? 'r' : '-',
1875 prot & PAGE_WRITE ? 'w' : '-',
1876 prot & PAGE_EXEC ? 'x' : '-');
1877 }
1878 if (prot1 != 0)
1879 start = end;
1880 else
1881 start = -1;
1882 prot = prot1;
1883 }
1884 if (!p)
1885 break;
1886 }
33417e70 1887 }
33417e70
FB
1888}
1889
53a5960a 1890int page_get_flags(target_ulong address)
33417e70 1891{
9fa3e853
FB
1892 PageDesc *p;
1893
1894 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1895 if (!p)
9fa3e853
FB
1896 return 0;
1897 return p->flags;
1898}
1899
1900/* modify the flags of a page and invalidate the code if
1901 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1902 depending on PAGE_WRITE */
53a5960a 1903void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1904{
1905 PageDesc *p;
53a5960a 1906 target_ulong addr;
9fa3e853 1907
c8a706fe 1908 /* mmap_lock should already be held. */
9fa3e853
FB
1909 start = start & TARGET_PAGE_MASK;
1910 end = TARGET_PAGE_ALIGN(end);
1911 if (flags & PAGE_WRITE)
1912 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
1913 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1914 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1915 /* if the write protection is set, then we invalidate the code
1916 inside */
5fafdf24 1917 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1918 (flags & PAGE_WRITE) &&
1919 p->first_tb) {
d720b93d 1920 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1921 }
1922 p->flags = flags;
1923 }
33417e70
FB
1924}
1925
3d97b40b
TS
1926int page_check_range(target_ulong start, target_ulong len, int flags)
1927{
1928 PageDesc *p;
1929 target_ulong end;
1930 target_ulong addr;
1931
1932 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1933 start = start & TARGET_PAGE_MASK;
1934
1935 if( end < start )
1936 /* we've wrapped around */
1937 return -1;
1938 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1939 p = page_find(addr >> TARGET_PAGE_BITS);
1940 if( !p )
1941 return -1;
1942 if( !(p->flags & PAGE_VALID) )
1943 return -1;
1944
dae3270c 1945 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1946 return -1;
dae3270c
FB
1947 if (flags & PAGE_WRITE) {
1948 if (!(p->flags & PAGE_WRITE_ORG))
1949 return -1;
1950 /* unprotect the page if it was put read-only because it
1951 contains translated code */
1952 if (!(p->flags & PAGE_WRITE)) {
1953 if (!page_unprotect(addr, 0, NULL))
1954 return -1;
1955 }
1956 return 0;
1957 }
3d97b40b
TS
1958 }
1959 return 0;
1960}
1961
9fa3e853
FB
1962/* called from signal handler: invalidate the code and unprotect the
1963 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1964int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1965{
1966 unsigned int page_index, prot, pindex;
1967 PageDesc *p, *p1;
53a5960a 1968 target_ulong host_start, host_end, addr;
9fa3e853 1969
c8a706fe
PB
1970 /* Technically this isn't safe inside a signal handler. However we
1971 know this only ever happens in a synchronous SEGV handler, so in
1972 practice it seems to be ok. */
1973 mmap_lock();
1974
83fb7adf 1975 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1976 page_index = host_start >> TARGET_PAGE_BITS;
1977 p1 = page_find(page_index);
c8a706fe
PB
1978 if (!p1) {
1979 mmap_unlock();
9fa3e853 1980 return 0;
c8a706fe 1981 }
83fb7adf 1982 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1983 p = p1;
1984 prot = 0;
1985 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1986 prot |= p->flags;
1987 p++;
1988 }
1989 /* if the page was really writable, then we change its
1990 protection back to writable */
1991 if (prot & PAGE_WRITE_ORG) {
1992 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1993 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1994 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1995 (prot & PAGE_BITS) | PAGE_WRITE);
1996 p1[pindex].flags |= PAGE_WRITE;
1997 /* and since the content will be modified, we must invalidate
1998 the corresponding translated code. */
d720b93d 1999 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2000#ifdef DEBUG_TB_CHECK
2001 tb_invalidate_check(address);
2002#endif
c8a706fe 2003 mmap_unlock();
9fa3e853
FB
2004 return 1;
2005 }
2006 }
c8a706fe 2007 mmap_unlock();
9fa3e853
FB
2008 return 0;
2009}
2010
6a00d601
FB
2011static inline void tlb_set_dirty(CPUState *env,
2012 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2013{
2014}
9fa3e853
FB
2015#endif /* defined(CONFIG_USER_ONLY) */
2016
e2eef170 2017#if !defined(CONFIG_USER_ONLY)
db7b5426 2018static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2019 ram_addr_t memory);
2020static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2021 ram_addr_t orig_memory);
db7b5426
BS
2022#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2023 need_subpage) \
2024 do { \
2025 if (addr > start_addr) \
2026 start_addr2 = 0; \
2027 else { \
2028 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2029 if (start_addr2 > 0) \
2030 need_subpage = 1; \
2031 } \
2032 \
49e9fba2 2033 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2034 end_addr2 = TARGET_PAGE_SIZE - 1; \
2035 else { \
2036 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2037 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2038 need_subpage = 1; \
2039 } \
2040 } while (0)
2041
33417e70
FB
2042/* register physical memory. 'size' must be a multiple of the target
2043 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2044 io memory page */
5fafdf24 2045void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2046 ram_addr_t size,
2047 ram_addr_t phys_offset)
33417e70 2048{
108c49b8 2049 target_phys_addr_t addr, end_addr;
92e873b9 2050 PhysPageDesc *p;
9d42037b 2051 CPUState *env;
00f82b8a 2052 ram_addr_t orig_size = size;
db7b5426 2053 void *subpage;
33417e70 2054
da260249
FB
2055#ifdef USE_KQEMU
2056 /* XXX: should not depend on cpu context */
2057 env = first_cpu;
2058 if (env->kqemu_enabled) {
2059 kqemu_set_phys_mem(start_addr, size, phys_offset);
2060 }
2061#endif
5fd386f6 2062 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2063 end_addr = start_addr + (target_phys_addr_t)size;
2064 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2065 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2066 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2067 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2068 target_phys_addr_t start_addr2, end_addr2;
2069 int need_subpage = 0;
2070
2071 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2072 need_subpage);
4254fab8 2073 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2074 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2075 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2076 &p->phys_offset, orig_memory);
2077 } else {
2078 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2079 >> IO_MEM_SHIFT];
2080 }
2081 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2082 } else {
2083 p->phys_offset = phys_offset;
2084 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2085 (phys_offset & IO_MEM_ROMD))
2086 phys_offset += TARGET_PAGE_SIZE;
2087 }
2088 } else {
2089 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2090 p->phys_offset = phys_offset;
2091 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2092 (phys_offset & IO_MEM_ROMD))
2093 phys_offset += TARGET_PAGE_SIZE;
2094 else {
2095 target_phys_addr_t start_addr2, end_addr2;
2096 int need_subpage = 0;
2097
2098 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2099 end_addr2, need_subpage);
2100
4254fab8 2101 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2102 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2103 &p->phys_offset, IO_MEM_UNASSIGNED);
2104 subpage_register(subpage, start_addr2, end_addr2,
2105 phys_offset);
2106 }
2107 }
2108 }
33417e70 2109 }
3b46e624 2110
9d42037b
FB
2111 /* since each CPU stores ram addresses in its TLB cache, we must
2112 reset the modified entries */
2113 /* XXX: slow ! */
2114 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2115 tlb_flush(env, 1);
2116 }
33417e70
FB
2117}
2118
ba863458 2119/* XXX: temporary until new memory mapping API */
00f82b8a 2120ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2121{
2122 PhysPageDesc *p;
2123
2124 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2125 if (!p)
2126 return IO_MEM_UNASSIGNED;
2127 return p->phys_offset;
2128}
2129
e9a1ab19 2130/* XXX: better than nothing */
00f82b8a 2131ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2132{
2133 ram_addr_t addr;
7fb4fdcf 2134 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2135 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2136 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2137 abort();
2138 }
2139 addr = phys_ram_alloc_offset;
2140 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2141 return addr;
2142}
2143
2144void qemu_ram_free(ram_addr_t addr)
2145{
2146}
2147
a4193c8a 2148static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2149{
67d3b957 2150#ifdef DEBUG_UNASSIGNED
ab3d1727 2151 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2152#endif
2153#ifdef TARGET_SPARC
6c36d3fa 2154 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2155#elif TARGET_CRIS
2156 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2157#endif
33417e70
FB
2158 return 0;
2159}
2160
a4193c8a 2161static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2162{
67d3b957 2163#ifdef DEBUG_UNASSIGNED
ab3d1727 2164 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2165#endif
b4f0a316 2166#ifdef TARGET_SPARC
6c36d3fa 2167 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2168#elif TARGET_CRIS
2169 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2170#endif
33417e70
FB
2171}
2172
2173static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2174 unassigned_mem_readb,
2175 unassigned_mem_readb,
2176 unassigned_mem_readb,
2177};
2178
2179static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2180 unassigned_mem_writeb,
2181 unassigned_mem_writeb,
2182 unassigned_mem_writeb,
2183};
2184
0f459d16
PB
2185static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2186 uint32_t val)
9fa3e853 2187{
3a7d929e 2188 int dirty_flags;
3a7d929e
FB
2189 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2190 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2191#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2192 tb_invalidate_phys_page_fast(ram_addr, 1);
2193 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2194#endif
3a7d929e 2195 }
0f459d16 2196 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2197#ifdef USE_KQEMU
2198 if (cpu_single_env->kqemu_enabled &&
2199 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2200 kqemu_modify_page(cpu_single_env, ram_addr);
2201#endif
f23db169
FB
2202 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2203 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2204 /* we remove the notdirty callback only if the code has been
2205 flushed */
2206 if (dirty_flags == 0xff)
0f459d16 2207 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2208}
2209
0f459d16
PB
2210static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2211 uint32_t val)
9fa3e853 2212{
3a7d929e 2213 int dirty_flags;
3a7d929e
FB
2214 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2215 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2216#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2217 tb_invalidate_phys_page_fast(ram_addr, 2);
2218 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2219#endif
3a7d929e 2220 }
0f459d16 2221 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2222#ifdef USE_KQEMU
2223 if (cpu_single_env->kqemu_enabled &&
2224 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2225 kqemu_modify_page(cpu_single_env, ram_addr);
2226#endif
f23db169
FB
2227 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2228 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2229 /* we remove the notdirty callback only if the code has been
2230 flushed */
2231 if (dirty_flags == 0xff)
0f459d16 2232 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2233}
2234
0f459d16
PB
2235static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2236 uint32_t val)
9fa3e853 2237{
3a7d929e 2238 int dirty_flags;
3a7d929e
FB
2239 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2240 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2241#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2242 tb_invalidate_phys_page_fast(ram_addr, 4);
2243 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2244#endif
3a7d929e 2245 }
0f459d16 2246 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2247#ifdef USE_KQEMU
2248 if (cpu_single_env->kqemu_enabled &&
2249 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2250 kqemu_modify_page(cpu_single_env, ram_addr);
2251#endif
f23db169
FB
2252 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2253 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2254 /* we remove the notdirty callback only if the code has been
2255 flushed */
2256 if (dirty_flags == 0xff)
0f459d16 2257 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2258}
2259
3a7d929e 2260static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2261 NULL, /* never used */
2262 NULL, /* never used */
2263 NULL, /* never used */
2264};
2265
1ccde1cb
FB
2266static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2267 notdirty_mem_writeb,
2268 notdirty_mem_writew,
2269 notdirty_mem_writel,
2270};
2271
0f459d16
PB
2272/* Generate a debug exception if a watchpoint has been hit. */
2273static void check_watchpoint(int offset, int flags)
2274{
2275 CPUState *env = cpu_single_env;
2276 target_ulong vaddr;
2277 int i;
2278
2279 vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2280 for (i = 0; i < env->nb_watchpoints; i++) {
2281 if (vaddr == env->watchpoint[i].vaddr
2282 && (env->watchpoint[i].type & flags)) {
2283 env->watchpoint_hit = i + 1;
2284 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2285 break;
2286 }
2287 }
2288}
2289
6658ffb8
PB
2290/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2291 so these check for a hit then pass through to the normal out-of-line
2292 phys routines. */
2293static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2294{
0f459d16 2295 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2296 return ldub_phys(addr);
2297}
2298
2299static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2300{
0f459d16 2301 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2302 return lduw_phys(addr);
2303}
2304
2305static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2306{
0f459d16 2307 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2308 return ldl_phys(addr);
2309}
2310
6658ffb8
PB
2311static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2312 uint32_t val)
2313{
0f459d16 2314 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2315 stb_phys(addr, val);
2316}
2317
2318static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2319 uint32_t val)
2320{
0f459d16 2321 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2322 stw_phys(addr, val);
2323}
2324
2325static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2326 uint32_t val)
2327{
0f459d16 2328 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2329 stl_phys(addr, val);
2330}
2331
2332static CPUReadMemoryFunc *watch_mem_read[3] = {
2333 watch_mem_readb,
2334 watch_mem_readw,
2335 watch_mem_readl,
2336};
2337
2338static CPUWriteMemoryFunc *watch_mem_write[3] = {
2339 watch_mem_writeb,
2340 watch_mem_writew,
2341 watch_mem_writel,
2342};
6658ffb8 2343
db7b5426
BS
2344static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2345 unsigned int len)
2346{
db7b5426
BS
2347 uint32_t ret;
2348 unsigned int idx;
2349
2350 idx = SUBPAGE_IDX(addr - mmio->base);
2351#if defined(DEBUG_SUBPAGE)
2352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2353 mmio, len, addr, idx);
2354#endif
3ee89922 2355 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2356
2357 return ret;
2358}
2359
2360static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2361 uint32_t value, unsigned int len)
2362{
db7b5426
BS
2363 unsigned int idx;
2364
2365 idx = SUBPAGE_IDX(addr - mmio->base);
2366#if defined(DEBUG_SUBPAGE)
2367 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2368 mmio, len, addr, idx, value);
2369#endif
3ee89922 2370 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2371}
2372
2373static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2374{
2375#if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2377#endif
2378
2379 return subpage_readlen(opaque, addr, 0);
2380}
2381
2382static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2383 uint32_t value)
2384{
2385#if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2387#endif
2388 subpage_writelen(opaque, addr, value, 0);
2389}
2390
2391static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2392{
2393#if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2395#endif
2396
2397 return subpage_readlen(opaque, addr, 1);
2398}
2399
2400static void subpage_writew (void *opaque, target_phys_addr_t addr,
2401 uint32_t value)
2402{
2403#if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2405#endif
2406 subpage_writelen(opaque, addr, value, 1);
2407}
2408
2409static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2410{
2411#if defined(DEBUG_SUBPAGE)
2412 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2413#endif
2414
2415 return subpage_readlen(opaque, addr, 2);
2416}
2417
2418static void subpage_writel (void *opaque,
2419 target_phys_addr_t addr, uint32_t value)
2420{
2421#if defined(DEBUG_SUBPAGE)
2422 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2423#endif
2424 subpage_writelen(opaque, addr, value, 2);
2425}
2426
2427static CPUReadMemoryFunc *subpage_read[] = {
2428 &subpage_readb,
2429 &subpage_readw,
2430 &subpage_readl,
2431};
2432
2433static CPUWriteMemoryFunc *subpage_write[] = {
2434 &subpage_writeb,
2435 &subpage_writew,
2436 &subpage_writel,
2437};
2438
2439static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2440 ram_addr_t memory)
db7b5426
BS
2441{
2442 int idx, eidx;
4254fab8 2443 unsigned int i;
db7b5426
BS
2444
2445 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2446 return -1;
2447 idx = SUBPAGE_IDX(start);
2448 eidx = SUBPAGE_IDX(end);
2449#if defined(DEBUG_SUBPAGE)
2450 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2451 mmio, start, end, idx, eidx, memory);
2452#endif
2453 memory >>= IO_MEM_SHIFT;
2454 for (; idx <= eidx; idx++) {
4254fab8 2455 for (i = 0; i < 4; i++) {
3ee89922
BS
2456 if (io_mem_read[memory][i]) {
2457 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2458 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2459 }
2460 if (io_mem_write[memory][i]) {
2461 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2462 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2463 }
4254fab8 2464 }
db7b5426
BS
2465 }
2466
2467 return 0;
2468}
2469
00f82b8a
AJ
2470static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2471 ram_addr_t orig_memory)
db7b5426
BS
2472{
2473 subpage_t *mmio;
2474 int subpage_memory;
2475
2476 mmio = qemu_mallocz(sizeof(subpage_t));
2477 if (mmio != NULL) {
2478 mmio->base = base;
2479 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2480#if defined(DEBUG_SUBPAGE)
2481 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2482 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2483#endif
2484 *phys = subpage_memory | IO_MEM_SUBPAGE;
2485 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2486 }
2487
2488 return mmio;
2489}
2490
33417e70
FB
2491static void io_mem_init(void)
2492{
3a7d929e 2493 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2494 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2495 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2496 io_mem_nb = 5;
2497
0f459d16 2498 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2499 watch_mem_write, NULL);
1ccde1cb 2500 /* alloc dirty bits array */
0a962c02 2501 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2502 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2503}
2504
2505/* mem_read and mem_write are arrays of functions containing the
2506 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2507 2). Functions can be omitted with a NULL function pointer. The
2508 registered functions may be modified dynamically later.
2509 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2510 modified. If it is zero, a new io zone is allocated. The return
2511 value can be used with cpu_register_physical_memory(). (-1) is
2512 returned if error. */
33417e70
FB
2513int cpu_register_io_memory(int io_index,
2514 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2515 CPUWriteMemoryFunc **mem_write,
2516 void *opaque)
33417e70 2517{
4254fab8 2518 int i, subwidth = 0;
33417e70
FB
2519
2520 if (io_index <= 0) {
b5ff1b31 2521 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2522 return -1;
2523 io_index = io_mem_nb++;
2524 } else {
2525 if (io_index >= IO_MEM_NB_ENTRIES)
2526 return -1;
2527 }
b5ff1b31 2528
33417e70 2529 for(i = 0;i < 3; i++) {
4254fab8
BS
2530 if (!mem_read[i] || !mem_write[i])
2531 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2532 io_mem_read[io_index][i] = mem_read[i];
2533 io_mem_write[io_index][i] = mem_write[i];
2534 }
a4193c8a 2535 io_mem_opaque[io_index] = opaque;
4254fab8 2536 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2537}
61382a50 2538
8926b517
FB
2539CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2540{
2541 return io_mem_write[io_index >> IO_MEM_SHIFT];
2542}
2543
2544CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2545{
2546 return io_mem_read[io_index >> IO_MEM_SHIFT];
2547}
2548
e2eef170
PB
2549#endif /* !defined(CONFIG_USER_ONLY) */
2550
13eb76e0
FB
2551/* physical memory access (slow version, mainly for debug) */
2552#if defined(CONFIG_USER_ONLY)
5fafdf24 2553void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2554 int len, int is_write)
2555{
2556 int l, flags;
2557 target_ulong page;
53a5960a 2558 void * p;
13eb76e0
FB
2559
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
2562 l = (page + TARGET_PAGE_SIZE) - addr;
2563 if (l > len)
2564 l = len;
2565 flags = page_get_flags(page);
2566 if (!(flags & PAGE_VALID))
2567 return;
2568 if (is_write) {
2569 if (!(flags & PAGE_WRITE))
2570 return;
579a97f7 2571 /* XXX: this code should not depend on lock_user */
72fb7daa 2572 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2573 /* FIXME - should this return an error rather than just fail? */
2574 return;
72fb7daa
AJ
2575 memcpy(p, buf, l);
2576 unlock_user(p, addr, l);
13eb76e0
FB
2577 } else {
2578 if (!(flags & PAGE_READ))
2579 return;
579a97f7 2580 /* XXX: this code should not depend on lock_user */
72fb7daa 2581 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2582 /* FIXME - should this return an error rather than just fail? */
2583 return;
72fb7daa 2584 memcpy(buf, p, l);
5b257578 2585 unlock_user(p, addr, 0);
13eb76e0
FB
2586 }
2587 len -= l;
2588 buf += l;
2589 addr += l;
2590 }
2591}
8df1cd07 2592
13eb76e0 2593#else
5fafdf24 2594void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2595 int len, int is_write)
2596{
2597 int l, io_index;
2598 uint8_t *ptr;
2599 uint32_t val;
2e12669a
FB
2600 target_phys_addr_t page;
2601 unsigned long pd;
92e873b9 2602 PhysPageDesc *p;
3b46e624 2603
13eb76e0
FB
2604 while (len > 0) {
2605 page = addr & TARGET_PAGE_MASK;
2606 l = (page + TARGET_PAGE_SIZE) - addr;
2607 if (l > len)
2608 l = len;
92e873b9 2609 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2610 if (!p) {
2611 pd = IO_MEM_UNASSIGNED;
2612 } else {
2613 pd = p->phys_offset;
2614 }
3b46e624 2615
13eb76e0 2616 if (is_write) {
3a7d929e 2617 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2618 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2619 /* XXX: could force cpu_single_env to NULL to avoid
2620 potential bugs */
13eb76e0 2621 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2622 /* 32 bit write access */
c27004ec 2623 val = ldl_p(buf);
a4193c8a 2624 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2625 l = 4;
2626 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2627 /* 16 bit write access */
c27004ec 2628 val = lduw_p(buf);
a4193c8a 2629 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2630 l = 2;
2631 } else {
1c213d19 2632 /* 8 bit write access */
c27004ec 2633 val = ldub_p(buf);
a4193c8a 2634 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2635 l = 1;
2636 }
2637 } else {
b448f2f3
FB
2638 unsigned long addr1;
2639 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2640 /* RAM case */
b448f2f3 2641 ptr = phys_ram_base + addr1;
13eb76e0 2642 memcpy(ptr, buf, l);
3a7d929e
FB
2643 if (!cpu_physical_memory_is_dirty(addr1)) {
2644 /* invalidate code */
2645 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2646 /* set dirty bit */
5fafdf24 2647 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2648 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2649 }
13eb76e0
FB
2650 }
2651 } else {
5fafdf24 2652 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2653 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2654 /* I/O case */
2655 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2656 if (l >= 4 && ((addr & 3) == 0)) {
2657 /* 32 bit read access */
a4193c8a 2658 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2659 stl_p(buf, val);
13eb76e0
FB
2660 l = 4;
2661 } else if (l >= 2 && ((addr & 1) == 0)) {
2662 /* 16 bit read access */
a4193c8a 2663 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2664 stw_p(buf, val);
13eb76e0
FB
2665 l = 2;
2666 } else {
1c213d19 2667 /* 8 bit read access */
a4193c8a 2668 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2669 stb_p(buf, val);
13eb76e0
FB
2670 l = 1;
2671 }
2672 } else {
2673 /* RAM case */
5fafdf24 2674 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2675 (addr & ~TARGET_PAGE_MASK);
2676 memcpy(buf, ptr, l);
2677 }
2678 }
2679 len -= l;
2680 buf += l;
2681 addr += l;
2682 }
2683}
8df1cd07 2684
d0ecd2aa 2685/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2686void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2687 const uint8_t *buf, int len)
2688{
2689 int l;
2690 uint8_t *ptr;
2691 target_phys_addr_t page;
2692 unsigned long pd;
2693 PhysPageDesc *p;
3b46e624 2694
d0ecd2aa
FB
2695 while (len > 0) {
2696 page = addr & TARGET_PAGE_MASK;
2697 l = (page + TARGET_PAGE_SIZE) - addr;
2698 if (l > len)
2699 l = len;
2700 p = phys_page_find(page >> TARGET_PAGE_BITS);
2701 if (!p) {
2702 pd = IO_MEM_UNASSIGNED;
2703 } else {
2704 pd = p->phys_offset;
2705 }
3b46e624 2706
d0ecd2aa 2707 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2708 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2709 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2710 /* do nothing */
2711 } else {
2712 unsigned long addr1;
2713 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2714 /* ROM/RAM case */
2715 ptr = phys_ram_base + addr1;
2716 memcpy(ptr, buf, l);
2717 }
2718 len -= l;
2719 buf += l;
2720 addr += l;
2721 }
2722}
2723
2724
8df1cd07
FB
2725/* warning: addr must be aligned */
2726uint32_t ldl_phys(target_phys_addr_t addr)
2727{
2728 int io_index;
2729 uint8_t *ptr;
2730 uint32_t val;
2731 unsigned long pd;
2732 PhysPageDesc *p;
2733
2734 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2735 if (!p) {
2736 pd = IO_MEM_UNASSIGNED;
2737 } else {
2738 pd = p->phys_offset;
2739 }
3b46e624 2740
5fafdf24 2741 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2742 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2743 /* I/O case */
2744 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2745 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2746 } else {
2747 /* RAM case */
5fafdf24 2748 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2749 (addr & ~TARGET_PAGE_MASK);
2750 val = ldl_p(ptr);
2751 }
2752 return val;
2753}
2754
84b7b8e7
FB
2755/* warning: addr must be aligned */
2756uint64_t ldq_phys(target_phys_addr_t addr)
2757{
2758 int io_index;
2759 uint8_t *ptr;
2760 uint64_t val;
2761 unsigned long pd;
2762 PhysPageDesc *p;
2763
2764 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2765 if (!p) {
2766 pd = IO_MEM_UNASSIGNED;
2767 } else {
2768 pd = p->phys_offset;
2769 }
3b46e624 2770
2a4188a3
FB
2771 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2772 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2773 /* I/O case */
2774 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2775#ifdef TARGET_WORDS_BIGENDIAN
2776 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2777 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2778#else
2779 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2780 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2781#endif
2782 } else {
2783 /* RAM case */
5fafdf24 2784 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2785 (addr & ~TARGET_PAGE_MASK);
2786 val = ldq_p(ptr);
2787 }
2788 return val;
2789}
2790
aab33094
FB
2791/* XXX: optimize */
2792uint32_t ldub_phys(target_phys_addr_t addr)
2793{
2794 uint8_t val;
2795 cpu_physical_memory_read(addr, &val, 1);
2796 return val;
2797}
2798
2799/* XXX: optimize */
2800uint32_t lduw_phys(target_phys_addr_t addr)
2801{
2802 uint16_t val;
2803 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2804 return tswap16(val);
2805}
2806
8df1cd07
FB
2807/* warning: addr must be aligned. The ram page is not masked as dirty
2808 and the code inside is not invalidated. It is useful if the dirty
2809 bits are used to track modified PTEs */
2810void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2811{
2812 int io_index;
2813 uint8_t *ptr;
2814 unsigned long pd;
2815 PhysPageDesc *p;
2816
2817 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2818 if (!p) {
2819 pd = IO_MEM_UNASSIGNED;
2820 } else {
2821 pd = p->phys_offset;
2822 }
3b46e624 2823
3a7d929e 2824 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2825 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2827 } else {
5fafdf24 2828 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2829 (addr & ~TARGET_PAGE_MASK);
2830 stl_p(ptr, val);
2831 }
2832}
2833
bc98a7ef
JM
2834void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2835{
2836 int io_index;
2837 uint8_t *ptr;
2838 unsigned long pd;
2839 PhysPageDesc *p;
2840
2841 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2842 if (!p) {
2843 pd = IO_MEM_UNASSIGNED;
2844 } else {
2845 pd = p->phys_offset;
2846 }
3b46e624 2847
bc98a7ef
JM
2848 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2849 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2850#ifdef TARGET_WORDS_BIGENDIAN
2851 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2852 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2853#else
2854 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2855 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2856#endif
2857 } else {
5fafdf24 2858 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2859 (addr & ~TARGET_PAGE_MASK);
2860 stq_p(ptr, val);
2861 }
2862}
2863
8df1cd07 2864/* warning: addr must be aligned */
8df1cd07
FB
2865void stl_phys(target_phys_addr_t addr, uint32_t val)
2866{
2867 int io_index;
2868 uint8_t *ptr;
2869 unsigned long pd;
2870 PhysPageDesc *p;
2871
2872 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2873 if (!p) {
2874 pd = IO_MEM_UNASSIGNED;
2875 } else {
2876 pd = p->phys_offset;
2877 }
3b46e624 2878
3a7d929e 2879 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2880 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2881 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2882 } else {
2883 unsigned long addr1;
2884 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2885 /* RAM case */
2886 ptr = phys_ram_base + addr1;
2887 stl_p(ptr, val);
3a7d929e
FB
2888 if (!cpu_physical_memory_is_dirty(addr1)) {
2889 /* invalidate code */
2890 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2891 /* set dirty bit */
f23db169
FB
2892 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2893 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2894 }
8df1cd07
FB
2895 }
2896}
2897
aab33094
FB
2898/* XXX: optimize */
2899void stb_phys(target_phys_addr_t addr, uint32_t val)
2900{
2901 uint8_t v = val;
2902 cpu_physical_memory_write(addr, &v, 1);
2903}
2904
2905/* XXX: optimize */
2906void stw_phys(target_phys_addr_t addr, uint32_t val)
2907{
2908 uint16_t v = tswap16(val);
2909 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2910}
2911
2912/* XXX: optimize */
2913void stq_phys(target_phys_addr_t addr, uint64_t val)
2914{
2915 val = tswap64(val);
2916 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2917}
2918
13eb76e0
FB
2919#endif
2920
2921/* virtual memory access for debug */
5fafdf24 2922int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2923 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2924{
2925 int l;
9b3c35e0
JM
2926 target_phys_addr_t phys_addr;
2927 target_ulong page;
13eb76e0
FB
2928
2929 while (len > 0) {
2930 page = addr & TARGET_PAGE_MASK;
2931 phys_addr = cpu_get_phys_page_debug(env, page);
2932 /* if no physical page mapped, return an error */
2933 if (phys_addr == -1)
2934 return -1;
2935 l = (page + TARGET_PAGE_SIZE) - addr;
2936 if (l > len)
2937 l = len;
5fafdf24 2938 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2939 buf, l, is_write);
13eb76e0
FB
2940 len -= l;
2941 buf += l;
2942 addr += l;
2943 }
2944 return 0;
2945}
2946
e3db7226
FB
2947void dump_exec_info(FILE *f,
2948 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2949{
2950 int i, target_code_size, max_target_code_size;
2951 int direct_jmp_count, direct_jmp2_count, cross_page;
2952 TranslationBlock *tb;
3b46e624 2953
e3db7226
FB
2954 target_code_size = 0;
2955 max_target_code_size = 0;
2956 cross_page = 0;
2957 direct_jmp_count = 0;
2958 direct_jmp2_count = 0;
2959 for(i = 0; i < nb_tbs; i++) {
2960 tb = &tbs[i];
2961 target_code_size += tb->size;
2962 if (tb->size > max_target_code_size)
2963 max_target_code_size = tb->size;
2964 if (tb->page_addr[1] != -1)
2965 cross_page++;
2966 if (tb->tb_next_offset[0] != 0xffff) {
2967 direct_jmp_count++;
2968 if (tb->tb_next_offset[1] != 0xffff) {
2969 direct_jmp2_count++;
2970 }
2971 }
2972 }
2973 /* XXX: avoid using doubles ? */
57fec1fe 2974 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
2975 cpu_fprintf(f, "gen code size %ld/%ld\n",
2976 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
2977 cpu_fprintf(f, "TB count %d/%d\n",
2978 nb_tbs, code_gen_max_blocks);
5fafdf24 2979 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2980 nb_tbs ? target_code_size / nb_tbs : 0,
2981 max_target_code_size);
5fafdf24 2982 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2983 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2984 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2985 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2986 cross_page,
e3db7226
FB
2987 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2988 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2989 direct_jmp_count,
e3db7226
FB
2990 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2991 direct_jmp2_count,
2992 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2993 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2994 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2995 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2996 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 2997 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
2998}
2999
5fafdf24 3000#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3001
3002#define MMUSUFFIX _cmmu
3003#define GETPC() NULL
3004#define env cpu_single_env
b769d8fe 3005#define SOFTMMU_CODE_ACCESS
61382a50
FB
3006
3007#define SHIFT 0
3008#include "softmmu_template.h"
3009
3010#define SHIFT 1
3011#include "softmmu_template.h"
3012
3013#define SHIFT 2
3014#include "softmmu_template.h"
3015
3016#define SHIFT 3
3017#include "softmmu_template.h"
3018
3019#undef env
3020
3021#endif