]> git.proxmox.com Git - qemu.git/blame - exec.c
CRIS: Support RFN insn.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
00f82b8a 99ram_addr_t phys_ram_size;
9fa3e853
FB
100int phys_ram_fd;
101uint8_t *phys_ram_base;
1ccde1cb 102uint8_t *phys_ram_dirty;
e9a1ab19 103static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 104#endif
9fa3e853 105
6a00d601
FB
106CPUState *first_cpu;
107/* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
5fafdf24 109CPUState *cpu_single_env;
6a00d601 110
54936004 111typedef struct PageDesc {
92e873b9 112 /* list of TBs intersecting this ram page */
fd6ce8f6 113 TranslationBlock *first_tb;
9fa3e853
FB
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118#if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120#endif
54936004
FB
121} PageDesc;
122
92e873b9 123typedef struct PhysPageDesc {
0f459d16 124 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 125 ram_addr_t phys_offset;
92e873b9
FB
126} PhysPageDesc;
127
54936004 128#define L2_BITS 10
bedb69ea
JM
129#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130/* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
133 */
134#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135#else
03875444 136#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 137#endif
54936004
FB
138
139#define L1_SIZE (1 << L1_BITS)
140#define L2_SIZE (1 << L2_BITS)
141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
e2eef170
PB
151#if !defined(CONFIG_USER_ONLY)
152static void io_mem_init(void);
153
33417e70 154/* io memory support */
33417e70
FB
155CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 157void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 158static int io_mem_nb;
6658ffb8
PB
159static int io_mem_watch;
160#endif
33417e70 161
34865134
FB
162/* log support */
163char *logfilename = "/tmp/qemu.log";
164FILE *logfile;
165int loglevel;
e735b91c 166static int log_append = 0;
34865134 167
e3db7226
FB
168/* statistics */
169static int tlb_flush_count;
170static int tb_flush_count;
171static int tb_phys_invalidate_count;
172
db7b5426
BS
173#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174typedef struct subpage_t {
175 target_phys_addr_t base;
3ee89922
BS
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
179} subpage_t;
180
7cb69cae
FB
181#ifdef _WIN32
182static void map_exec(void *addr, long size)
183{
184 DWORD old_protect;
185 VirtualProtect(addr, size,
186 PAGE_EXECUTE_READWRITE, &old_protect);
187
188}
189#else
190static void map_exec(void *addr, long size)
191{
4369415f 192 unsigned long start, end, page_size;
7cb69cae 193
4369415f 194 page_size = getpagesize();
7cb69cae 195 start = (unsigned long)addr;
4369415f 196 start &= ~(page_size - 1);
7cb69cae
FB
197
198 end = (unsigned long)addr + size;
4369415f
FB
199 end += page_size - 1;
200 end &= ~(page_size - 1);
7cb69cae
FB
201
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
204}
205#endif
206
b346ff46 207static void page_init(void)
54936004 208{
83fb7adf 209 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 210 TARGET_PAGE_SIZE */
67b915a5 211#ifdef _WIN32
d5a8f07c
FB
212 {
213 SYSTEM_INFO system_info;
214 DWORD old_protect;
3b46e624 215
d5a8f07c
FB
216 GetSystemInfo(&system_info);
217 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 218 }
67b915a5 219#else
83fb7adf 220 qemu_real_host_page_size = getpagesize();
67b915a5 221#endif
83fb7adf
FB
222 if (qemu_host_page_size == 0)
223 qemu_host_page_size = qemu_real_host_page_size;
224 if (qemu_host_page_size < TARGET_PAGE_SIZE)
225 qemu_host_page_size = TARGET_PAGE_SIZE;
226 qemu_host_page_bits = 0;
227 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228 qemu_host_page_bits++;
229 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
230 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
232
233#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234 {
235 long long startaddr, endaddr;
236 FILE *f;
237 int n;
238
c8a706fe 239 mmap_lock();
0776590d 240 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
241 f = fopen("/proc/self/maps", "r");
242 if (f) {
243 do {
244 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245 if (n == 2) {
e0b8d65a
BS
246 startaddr = MIN(startaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 endaddr = MIN(endaddr,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 250 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
251 TARGET_PAGE_ALIGN(endaddr),
252 PAGE_RESERVED);
253 }
254 } while (!feof(f));
255 fclose(f);
256 }
c8a706fe 257 mmap_unlock();
50a9569b
AZ
258 }
259#endif
54936004
FB
260}
261
00f82b8a 262static inline PageDesc *page_find_alloc(target_ulong index)
54936004 263{
54936004
FB
264 PageDesc **lp, *p;
265
17e2377a
PB
266#if TARGET_LONG_BITS > 32
267 /* Host memory outside guest VM. For 32-bit targets we have already
268 excluded high addresses. */
269 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
270 return NULL;
271#endif
54936004
FB
272 lp = &l1_map[index >> L2_BITS];
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
17e2377a
PB
276#if defined(CONFIG_USER_ONLY)
277 unsigned long addr;
278 size_t len = sizeof(PageDesc) * L2_SIZE;
279 /* Don't use qemu_malloc because it may recurse. */
280 p = mmap(0, len, PROT_READ | PROT_WRITE,
281 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 282 *lp = p;
17e2377a
PB
283 addr = h2g(p);
284 if (addr == (target_ulong)addr) {
285 page_set_flags(addr & TARGET_PAGE_MASK,
286 TARGET_PAGE_ALIGN(addr + len),
287 PAGE_RESERVED);
288 }
289#else
290 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
291 *lp = p;
292#endif
54936004
FB
293 }
294 return p + (index & (L2_SIZE - 1));
295}
296
00f82b8a 297static inline PageDesc *page_find(target_ulong index)
54936004 298{
54936004
FB
299 PageDesc *p;
300
54936004
FB
301 p = l1_map[index >> L2_BITS];
302 if (!p)
303 return 0;
fd6ce8f6
FB
304 return p + (index & (L2_SIZE - 1));
305}
306
108c49b8 307static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 308{
108c49b8 309 void **lp, **p;
e3f4e2a4 310 PhysPageDesc *pd;
92e873b9 311
108c49b8
FB
312 p = (void **)l1_phys_map;
313#if TARGET_PHYS_ADDR_SPACE_BITS > 32
314
315#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
316#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
317#endif
318 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
319 p = *lp;
320 if (!p) {
321 /* allocate if not found */
108c49b8
FB
322 if (!alloc)
323 return NULL;
324 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
325 memset(p, 0, sizeof(void *) * L1_SIZE);
326 *lp = p;
327 }
328#endif
329 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
330 pd = *lp;
331 if (!pd) {
332 int i;
108c49b8
FB
333 /* allocate if not found */
334 if (!alloc)
335 return NULL;
e3f4e2a4
PB
336 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
337 *lp = pd;
338 for (i = 0; i < L2_SIZE; i++)
339 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 340 }
e3f4e2a4 341 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
342}
343
108c49b8 344static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 345{
108c49b8 346 return phys_page_find_alloc(index, 0);
92e873b9
FB
347}
348
9fa3e853 349#if !defined(CONFIG_USER_ONLY)
6a00d601 350static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 351static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 352 target_ulong vaddr);
c8a706fe
PB
353#define mmap_lock() do { } while(0)
354#define mmap_unlock() do { } while(0)
9fa3e853 355#endif
fd6ce8f6 356
4369415f
FB
357#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
358
359#if defined(CONFIG_USER_ONLY)
360/* Currently it is not recommanded to allocate big chunks of data in
361 user mode. It will change when a dedicated libc will be used */
362#define USE_STATIC_CODE_GEN_BUFFER
363#endif
364
365#ifdef USE_STATIC_CODE_GEN_BUFFER
366static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
367#endif
368
26a5f13b
FB
369void code_gen_alloc(unsigned long tb_size)
370{
4369415f
FB
371#ifdef USE_STATIC_CODE_GEN_BUFFER
372 code_gen_buffer = static_code_gen_buffer;
373 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
374 map_exec(code_gen_buffer, code_gen_buffer_size);
375#else
26a5f13b
FB
376 code_gen_buffer_size = tb_size;
377 if (code_gen_buffer_size == 0) {
4369415f
FB
378#if defined(CONFIG_USER_ONLY)
379 /* in user mode, phys_ram_size is not meaningful */
380 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
381#else
26a5f13b
FB
382 /* XXX: needs ajustments */
383 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 384#endif
26a5f13b
FB
385 }
386 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
387 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
388 /* The code gen buffer location may have constraints depending on
389 the host cpu and OS */
390#if defined(__linux__)
391 {
392 int flags;
393 flags = MAP_PRIVATE | MAP_ANONYMOUS;
394#if defined(__x86_64__)
395 flags |= MAP_32BIT;
396 /* Cannot map more than that */
397 if (code_gen_buffer_size > (800 * 1024 * 1024))
398 code_gen_buffer_size = (800 * 1024 * 1024);
399#endif
400 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
401 PROT_WRITE | PROT_READ | PROT_EXEC,
402 flags, -1, 0);
403 if (code_gen_buffer == MAP_FAILED) {
404 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
405 exit(1);
406 }
407 }
408#else
409 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
410 if (!code_gen_buffer) {
411 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
412 exit(1);
413 }
414 map_exec(code_gen_buffer, code_gen_buffer_size);
415#endif
4369415f 416#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
417 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
418 code_gen_buffer_max_size = code_gen_buffer_size -
419 code_gen_max_block_size();
420 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
421 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
422}
423
424/* Must be called before using the QEMU cpus. 'tb_size' is the size
425 (in bytes) allocated to the translation buffer. Zero means default
426 size. */
427void cpu_exec_init_all(unsigned long tb_size)
428{
26a5f13b
FB
429 cpu_gen_init();
430 code_gen_alloc(tb_size);
431 code_gen_ptr = code_gen_buffer;
4369415f 432 page_init();
e2eef170 433#if !defined(CONFIG_USER_ONLY)
26a5f13b 434 io_mem_init();
e2eef170 435#endif
26a5f13b
FB
436}
437
6a00d601 438void cpu_exec_init(CPUState *env)
fd6ce8f6 439{
6a00d601
FB
440 CPUState **penv;
441 int cpu_index;
442
6a00d601
FB
443 env->next_cpu = NULL;
444 penv = &first_cpu;
445 cpu_index = 0;
446 while (*penv != NULL) {
447 penv = (CPUState **)&(*penv)->next_cpu;
448 cpu_index++;
449 }
450 env->cpu_index = cpu_index;
6658ffb8 451 env->nb_watchpoints = 0;
6a00d601 452 *penv = env;
fd6ce8f6
FB
453}
454
9fa3e853
FB
455static inline void invalidate_page_bitmap(PageDesc *p)
456{
457 if (p->code_bitmap) {
59817ccb 458 qemu_free(p->code_bitmap);
9fa3e853
FB
459 p->code_bitmap = NULL;
460 }
461 p->code_write_count = 0;
462}
463
fd6ce8f6
FB
464/* set to NULL all the 'first_tb' fields in all PageDescs */
465static void page_flush_tb(void)
466{
467 int i, j;
468 PageDesc *p;
469
470 for(i = 0; i < L1_SIZE; i++) {
471 p = l1_map[i];
472 if (p) {
9fa3e853
FB
473 for(j = 0; j < L2_SIZE; j++) {
474 p->first_tb = NULL;
475 invalidate_page_bitmap(p);
476 p++;
477 }
fd6ce8f6
FB
478 }
479 }
480}
481
482/* flush all the translation blocks */
d4e8164f 483/* XXX: tb_flush is currently not thread safe */
6a00d601 484void tb_flush(CPUState *env1)
fd6ce8f6 485{
6a00d601 486 CPUState *env;
0124311e 487#if defined(DEBUG_FLUSH)
ab3d1727
BS
488 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
489 (unsigned long)(code_gen_ptr - code_gen_buffer),
490 nb_tbs, nb_tbs > 0 ?
491 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 492#endif
26a5f13b 493 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
494 cpu_abort(env1, "Internal error: code buffer overflow\n");
495
fd6ce8f6 496 nb_tbs = 0;
3b46e624 497
6a00d601
FB
498 for(env = first_cpu; env != NULL; env = env->next_cpu) {
499 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
500 }
9fa3e853 501
8a8a608f 502 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 503 page_flush_tb();
9fa3e853 504
fd6ce8f6 505 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
506 /* XXX: flush processor icache at this point if cache flush is
507 expensive */
e3db7226 508 tb_flush_count++;
fd6ce8f6
FB
509}
510
511#ifdef DEBUG_TB_CHECK
512
bc98a7ef 513static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
514{
515 TranslationBlock *tb;
516 int i;
517 address &= TARGET_PAGE_MASK;
99773bd4
PB
518 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
519 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
520 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
521 address >= tb->pc + tb->size)) {
522 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 523 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
524 }
525 }
526 }
527}
528
529/* verify that all the pages have correct rights for code */
530static void tb_page_check(void)
531{
532 TranslationBlock *tb;
533 int i, flags1, flags2;
3b46e624 534
99773bd4
PB
535 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
536 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
537 flags1 = page_get_flags(tb->pc);
538 flags2 = page_get_flags(tb->pc + tb->size - 1);
539 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
540 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 541 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
542 }
543 }
544 }
545}
546
d4e8164f
FB
547void tb_jmp_check(TranslationBlock *tb)
548{
549 TranslationBlock *tb1;
550 unsigned int n1;
551
552 /* suppress any remaining jumps to this TB */
553 tb1 = tb->jmp_first;
554 for(;;) {
555 n1 = (long)tb1 & 3;
556 tb1 = (TranslationBlock *)((long)tb1 & ~3);
557 if (n1 == 2)
558 break;
559 tb1 = tb1->jmp_next[n1];
560 }
561 /* check end of list */
562 if (tb1 != tb) {
563 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
564 }
565}
566
fd6ce8f6
FB
567#endif
568
569/* invalidate one TB */
570static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
571 int next_offset)
572{
573 TranslationBlock *tb1;
574 for(;;) {
575 tb1 = *ptb;
576 if (tb1 == tb) {
577 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
578 break;
579 }
580 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
581 }
582}
583
9fa3e853
FB
584static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
585{
586 TranslationBlock *tb1;
587 unsigned int n1;
588
589 for(;;) {
590 tb1 = *ptb;
591 n1 = (long)tb1 & 3;
592 tb1 = (TranslationBlock *)((long)tb1 & ~3);
593 if (tb1 == tb) {
594 *ptb = tb1->page_next[n1];
595 break;
596 }
597 ptb = &tb1->page_next[n1];
598 }
599}
600
d4e8164f
FB
601static inline void tb_jmp_remove(TranslationBlock *tb, int n)
602{
603 TranslationBlock *tb1, **ptb;
604 unsigned int n1;
605
606 ptb = &tb->jmp_next[n];
607 tb1 = *ptb;
608 if (tb1) {
609 /* find tb(n) in circular list */
610 for(;;) {
611 tb1 = *ptb;
612 n1 = (long)tb1 & 3;
613 tb1 = (TranslationBlock *)((long)tb1 & ~3);
614 if (n1 == n && tb1 == tb)
615 break;
616 if (n1 == 2) {
617 ptb = &tb1->jmp_first;
618 } else {
619 ptb = &tb1->jmp_next[n1];
620 }
621 }
622 /* now we can suppress tb(n) from the list */
623 *ptb = tb->jmp_next[n];
624
625 tb->jmp_next[n] = NULL;
626 }
627}
628
629/* reset the jump entry 'n' of a TB so that it is not chained to
630 another TB */
631static inline void tb_reset_jump(TranslationBlock *tb, int n)
632{
633 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
634}
635
00f82b8a 636static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 637{
6a00d601 638 CPUState *env;
8a40a180 639 PageDesc *p;
d4e8164f 640 unsigned int h, n1;
00f82b8a 641 target_phys_addr_t phys_pc;
8a40a180 642 TranslationBlock *tb1, *tb2;
3b46e624 643
8a40a180
FB
644 /* remove the TB from the hash list */
645 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646 h = tb_phys_hash_func(phys_pc);
5fafdf24 647 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
648 offsetof(TranslationBlock, phys_hash_next));
649
650 /* remove the TB from the page list */
651 if (tb->page_addr[0] != page_addr) {
652 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
653 tb_page_remove(&p->first_tb, tb);
654 invalidate_page_bitmap(p);
655 }
656 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
657 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
658 tb_page_remove(&p->first_tb, tb);
659 invalidate_page_bitmap(p);
660 }
661
36bdbe54 662 tb_invalidated_flag = 1;
59817ccb 663
fd6ce8f6 664 /* remove the TB from the hash list */
8a40a180 665 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
666 for(env = first_cpu; env != NULL; env = env->next_cpu) {
667 if (env->tb_jmp_cache[h] == tb)
668 env->tb_jmp_cache[h] = NULL;
669 }
d4e8164f
FB
670
671 /* suppress this TB from the two jump lists */
672 tb_jmp_remove(tb, 0);
673 tb_jmp_remove(tb, 1);
674
675 /* suppress any remaining jumps to this TB */
676 tb1 = tb->jmp_first;
677 for(;;) {
678 n1 = (long)tb1 & 3;
679 if (n1 == 2)
680 break;
681 tb1 = (TranslationBlock *)((long)tb1 & ~3);
682 tb2 = tb1->jmp_next[n1];
683 tb_reset_jump(tb1, n1);
684 tb1->jmp_next[n1] = NULL;
685 tb1 = tb2;
686 }
687 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 688
e3db7226 689 tb_phys_invalidate_count++;
9fa3e853
FB
690}
691
692static inline void set_bits(uint8_t *tab, int start, int len)
693{
694 int end, mask, end1;
695
696 end = start + len;
697 tab += start >> 3;
698 mask = 0xff << (start & 7);
699 if ((start & ~7) == (end & ~7)) {
700 if (start < end) {
701 mask &= ~(0xff << (end & 7));
702 *tab |= mask;
703 }
704 } else {
705 *tab++ |= mask;
706 start = (start + 8) & ~7;
707 end1 = end & ~7;
708 while (start < end1) {
709 *tab++ = 0xff;
710 start += 8;
711 }
712 if (start < end) {
713 mask = ~(0xff << (end & 7));
714 *tab |= mask;
715 }
716 }
717}
718
719static void build_page_bitmap(PageDesc *p)
720{
721 int n, tb_start, tb_end;
722 TranslationBlock *tb;
3b46e624 723
b2a7081a 724 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
725 if (!p->code_bitmap)
726 return;
9fa3e853
FB
727
728 tb = p->first_tb;
729 while (tb != NULL) {
730 n = (long)tb & 3;
731 tb = (TranslationBlock *)((long)tb & ~3);
732 /* NOTE: this is subtle as a TB may span two physical pages */
733 if (n == 0) {
734 /* NOTE: tb_end may be after the end of the page, but
735 it is not a problem */
736 tb_start = tb->pc & ~TARGET_PAGE_MASK;
737 tb_end = tb_start + tb->size;
738 if (tb_end > TARGET_PAGE_SIZE)
739 tb_end = TARGET_PAGE_SIZE;
740 } else {
741 tb_start = 0;
742 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
743 }
744 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
745 tb = tb->page_next[n];
746 }
747}
748
d720b93d
FB
749#ifdef TARGET_HAS_PRECISE_SMC
750
5fafdf24 751static void tb_gen_code(CPUState *env,
d720b93d
FB
752 target_ulong pc, target_ulong cs_base, int flags,
753 int cflags)
754{
755 TranslationBlock *tb;
756 uint8_t *tc_ptr;
757 target_ulong phys_pc, phys_page2, virt_page2;
758 int code_gen_size;
759
c27004ec
FB
760 phys_pc = get_phys_addr_code(env, pc);
761 tb = tb_alloc(pc);
d720b93d
FB
762 if (!tb) {
763 /* flush must be done */
764 tb_flush(env);
765 /* cannot fail at this point */
c27004ec 766 tb = tb_alloc(pc);
d720b93d
FB
767 }
768 tc_ptr = code_gen_ptr;
769 tb->tc_ptr = tc_ptr;
770 tb->cs_base = cs_base;
771 tb->flags = flags;
772 tb->cflags = cflags;
d07bde88 773 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 774 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 775
d720b93d 776 /* check next page if needed */
c27004ec 777 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 778 phys_page2 = -1;
c27004ec 779 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
780 phys_page2 = get_phys_addr_code(env, virt_page2);
781 }
782 tb_link_phys(tb, phys_pc, phys_page2);
783}
784#endif
3b46e624 785
9fa3e853
FB
786/* invalidate all TBs which intersect with the target physical page
787 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
788 the same physical page. 'is_cpu_write_access' should be true if called
789 from a real cpu write access: the virtual CPU will exit the current
790 TB if code is modified inside this TB. */
00f82b8a 791void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
792 int is_cpu_write_access)
793{
794 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 795 CPUState *env = cpu_single_env;
9fa3e853 796 PageDesc *p;
ea1c1802 797 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 798 target_ulong tb_start, tb_end;
d720b93d 799 target_ulong current_pc, current_cs_base;
9fa3e853
FB
800
801 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 802 if (!p)
9fa3e853 803 return;
5fafdf24 804 if (!p->code_bitmap &&
d720b93d
FB
805 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
806 is_cpu_write_access) {
9fa3e853
FB
807 /* build code bitmap */
808 build_page_bitmap(p);
809 }
810
811 /* we remove all the TBs in the range [start, end[ */
812 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
813 current_tb_not_found = is_cpu_write_access;
814 current_tb_modified = 0;
815 current_tb = NULL; /* avoid warning */
816 current_pc = 0; /* avoid warning */
817 current_cs_base = 0; /* avoid warning */
818 current_flags = 0; /* avoid warning */
9fa3e853
FB
819 tb = p->first_tb;
820 while (tb != NULL) {
821 n = (long)tb & 3;
822 tb = (TranslationBlock *)((long)tb & ~3);
823 tb_next = tb->page_next[n];
824 /* NOTE: this is subtle as a TB may span two physical pages */
825 if (n == 0) {
826 /* NOTE: tb_end may be after the end of the page, but
827 it is not a problem */
828 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
829 tb_end = tb_start + tb->size;
830 } else {
831 tb_start = tb->page_addr[1];
832 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
833 }
834 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
835#ifdef TARGET_HAS_PRECISE_SMC
836 if (current_tb_not_found) {
837 current_tb_not_found = 0;
838 current_tb = NULL;
839 if (env->mem_write_pc) {
840 /* now we have a real cpu fault */
841 current_tb = tb_find_pc(env->mem_write_pc);
842 }
843 }
844 if (current_tb == tb &&
845 !(current_tb->cflags & CF_SINGLE_INSN)) {
846 /* If we are modifying the current TB, we must stop
847 its execution. We could be more precise by checking
848 that the modification is after the current PC, but it
849 would require a specialized function to partially
850 restore the CPU state */
3b46e624 851
d720b93d 852 current_tb_modified = 1;
5fafdf24 853 cpu_restore_state(current_tb, env,
d720b93d
FB
854 env->mem_write_pc, NULL);
855#if defined(TARGET_I386)
856 current_flags = env->hflags;
857 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
858 current_cs_base = (target_ulong)env->segs[R_CS].base;
859 current_pc = current_cs_base + env->eip;
860#else
861#error unsupported CPU
862#endif
863 }
864#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
865 /* we need to do that to handle the case where a signal
866 occurs while doing tb_phys_invalidate() */
867 saved_tb = NULL;
868 if (env) {
869 saved_tb = env->current_tb;
870 env->current_tb = NULL;
871 }
9fa3e853 872 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
873 if (env) {
874 env->current_tb = saved_tb;
875 if (env->interrupt_request && env->current_tb)
876 cpu_interrupt(env, env->interrupt_request);
877 }
9fa3e853
FB
878 }
879 tb = tb_next;
880 }
881#if !defined(CONFIG_USER_ONLY)
882 /* if no code remaining, no need to continue to use slow writes */
883 if (!p->first_tb) {
884 invalidate_page_bitmap(p);
d720b93d
FB
885 if (is_cpu_write_access) {
886 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
887 }
888 }
889#endif
890#ifdef TARGET_HAS_PRECISE_SMC
891 if (current_tb_modified) {
892 /* we generate a block containing just the instruction
893 modifying the memory. It will ensure that it cannot modify
894 itself */
ea1c1802 895 env->current_tb = NULL;
5fafdf24 896 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
897 CF_SINGLE_INSN);
898 cpu_resume_from_signal(env, NULL);
9fa3e853 899 }
fd6ce8f6 900#endif
9fa3e853 901}
fd6ce8f6 902
9fa3e853 903/* len must be <= 8 and start must be a multiple of len */
00f82b8a 904static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
905{
906 PageDesc *p;
907 int offset, b;
59817ccb 908#if 0
a4193c8a
FB
909 if (1) {
910 if (loglevel) {
5fafdf24
TS
911 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
912 cpu_single_env->mem_write_vaddr, len,
913 cpu_single_env->eip,
a4193c8a
FB
914 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
915 }
59817ccb
FB
916 }
917#endif
9fa3e853 918 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 919 if (!p)
9fa3e853
FB
920 return;
921 if (p->code_bitmap) {
922 offset = start & ~TARGET_PAGE_MASK;
923 b = p->code_bitmap[offset >> 3] >> (offset & 7);
924 if (b & ((1 << len) - 1))
925 goto do_invalidate;
926 } else {
927 do_invalidate:
d720b93d 928 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
929 }
930}
931
9fa3e853 932#if !defined(CONFIG_SOFTMMU)
00f82b8a 933static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 934 unsigned long pc, void *puc)
9fa3e853 935{
d720b93d
FB
936 int n, current_flags, current_tb_modified;
937 target_ulong current_pc, current_cs_base;
9fa3e853 938 PageDesc *p;
d720b93d
FB
939 TranslationBlock *tb, *current_tb;
940#ifdef TARGET_HAS_PRECISE_SMC
941 CPUState *env = cpu_single_env;
942#endif
9fa3e853
FB
943
944 addr &= TARGET_PAGE_MASK;
945 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 946 if (!p)
9fa3e853
FB
947 return;
948 tb = p->first_tb;
d720b93d
FB
949 current_tb_modified = 0;
950 current_tb = NULL;
951 current_pc = 0; /* avoid warning */
952 current_cs_base = 0; /* avoid warning */
953 current_flags = 0; /* avoid warning */
954#ifdef TARGET_HAS_PRECISE_SMC
955 if (tb && pc != 0) {
956 current_tb = tb_find_pc(pc);
957 }
958#endif
9fa3e853
FB
959 while (tb != NULL) {
960 n = (long)tb & 3;
961 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
962#ifdef TARGET_HAS_PRECISE_SMC
963 if (current_tb == tb &&
964 !(current_tb->cflags & CF_SINGLE_INSN)) {
965 /* If we are modifying the current TB, we must stop
966 its execution. We could be more precise by checking
967 that the modification is after the current PC, but it
968 would require a specialized function to partially
969 restore the CPU state */
3b46e624 970
d720b93d
FB
971 current_tb_modified = 1;
972 cpu_restore_state(current_tb, env, pc, puc);
973#if defined(TARGET_I386)
974 current_flags = env->hflags;
975 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
976 current_cs_base = (target_ulong)env->segs[R_CS].base;
977 current_pc = current_cs_base + env->eip;
978#else
979#error unsupported CPU
980#endif
981 }
982#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
983 tb_phys_invalidate(tb, addr);
984 tb = tb->page_next[n];
985 }
fd6ce8f6 986 p->first_tb = NULL;
d720b93d
FB
987#ifdef TARGET_HAS_PRECISE_SMC
988 if (current_tb_modified) {
989 /* we generate a block containing just the instruction
990 modifying the memory. It will ensure that it cannot modify
991 itself */
ea1c1802 992 env->current_tb = NULL;
5fafdf24 993 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
994 CF_SINGLE_INSN);
995 cpu_resume_from_signal(env, puc);
996 }
997#endif
fd6ce8f6 998}
9fa3e853 999#endif
fd6ce8f6
FB
1000
1001/* add the tb in the target page and protect it if necessary */
5fafdf24 1002static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1003 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1004{
1005 PageDesc *p;
9fa3e853
FB
1006 TranslationBlock *last_first_tb;
1007
1008 tb->page_addr[n] = page_addr;
3a7d929e 1009 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1010 tb->page_next[n] = p->first_tb;
1011 last_first_tb = p->first_tb;
1012 p->first_tb = (TranslationBlock *)((long)tb | n);
1013 invalidate_page_bitmap(p);
fd6ce8f6 1014
107db443 1015#if defined(TARGET_HAS_SMC) || 1
d720b93d 1016
9fa3e853 1017#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1018 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1019 target_ulong addr;
1020 PageDesc *p2;
9fa3e853
FB
1021 int prot;
1022
fd6ce8f6
FB
1023 /* force the host page as non writable (writes will have a
1024 page fault + mprotect overhead) */
53a5960a 1025 page_addr &= qemu_host_page_mask;
fd6ce8f6 1026 prot = 0;
53a5960a
PB
1027 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1028 addr += TARGET_PAGE_SIZE) {
1029
1030 p2 = page_find (addr >> TARGET_PAGE_BITS);
1031 if (!p2)
1032 continue;
1033 prot |= p2->flags;
1034 p2->flags &= ~PAGE_WRITE;
1035 page_get_flags(addr);
1036 }
5fafdf24 1037 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1038 (prot & PAGE_BITS) & ~PAGE_WRITE);
1039#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1040 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1041 page_addr);
fd6ce8f6 1042#endif
fd6ce8f6 1043 }
9fa3e853
FB
1044#else
1045 /* if some code is already present, then the pages are already
1046 protected. So we handle the case where only the first TB is
1047 allocated in a physical page */
1048 if (!last_first_tb) {
6a00d601 1049 tlb_protect_code(page_addr);
9fa3e853
FB
1050 }
1051#endif
d720b93d
FB
1052
1053#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1054}
1055
1056/* Allocate a new translation block. Flush the translation buffer if
1057 too many translation blocks or too much generated code. */
c27004ec 1058TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1059{
1060 TranslationBlock *tb;
fd6ce8f6 1061
26a5f13b
FB
1062 if (nb_tbs >= code_gen_max_blocks ||
1063 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1064 return NULL;
fd6ce8f6
FB
1065 tb = &tbs[nb_tbs++];
1066 tb->pc = pc;
b448f2f3 1067 tb->cflags = 0;
d4e8164f
FB
1068 return tb;
1069}
1070
9fa3e853
FB
1071/* add a new TB and link it to the physical page tables. phys_page2 is
1072 (-1) to indicate that only one page contains the TB. */
5fafdf24 1073void tb_link_phys(TranslationBlock *tb,
9fa3e853 1074 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1075{
9fa3e853
FB
1076 unsigned int h;
1077 TranslationBlock **ptb;
1078
c8a706fe
PB
1079 /* Grab the mmap lock to stop another thread invalidating this TB
1080 before we are done. */
1081 mmap_lock();
9fa3e853
FB
1082 /* add in the physical hash table */
1083 h = tb_phys_hash_func(phys_pc);
1084 ptb = &tb_phys_hash[h];
1085 tb->phys_hash_next = *ptb;
1086 *ptb = tb;
fd6ce8f6
FB
1087
1088 /* add in the page list */
9fa3e853
FB
1089 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1090 if (phys_page2 != -1)
1091 tb_alloc_page(tb, 1, phys_page2);
1092 else
1093 tb->page_addr[1] = -1;
9fa3e853 1094
d4e8164f
FB
1095 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1096 tb->jmp_next[0] = NULL;
1097 tb->jmp_next[1] = NULL;
1098
1099 /* init original jump addresses */
1100 if (tb->tb_next_offset[0] != 0xffff)
1101 tb_reset_jump(tb, 0);
1102 if (tb->tb_next_offset[1] != 0xffff)
1103 tb_reset_jump(tb, 1);
8a40a180
FB
1104
1105#ifdef DEBUG_TB_CHECK
1106 tb_page_check();
1107#endif
c8a706fe 1108 mmap_unlock();
fd6ce8f6
FB
1109}
1110
9fa3e853
FB
1111/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1112 tb[1].tc_ptr. Return NULL if not found */
1113TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1114{
9fa3e853
FB
1115 int m_min, m_max, m;
1116 unsigned long v;
1117 TranslationBlock *tb;
a513fe19
FB
1118
1119 if (nb_tbs <= 0)
1120 return NULL;
1121 if (tc_ptr < (unsigned long)code_gen_buffer ||
1122 tc_ptr >= (unsigned long)code_gen_ptr)
1123 return NULL;
1124 /* binary search (cf Knuth) */
1125 m_min = 0;
1126 m_max = nb_tbs - 1;
1127 while (m_min <= m_max) {
1128 m = (m_min + m_max) >> 1;
1129 tb = &tbs[m];
1130 v = (unsigned long)tb->tc_ptr;
1131 if (v == tc_ptr)
1132 return tb;
1133 else if (tc_ptr < v) {
1134 m_max = m - 1;
1135 } else {
1136 m_min = m + 1;
1137 }
5fafdf24 1138 }
a513fe19
FB
1139 return &tbs[m_max];
1140}
7501267e 1141
ea041c0e
FB
1142static void tb_reset_jump_recursive(TranslationBlock *tb);
1143
1144static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1145{
1146 TranslationBlock *tb1, *tb_next, **ptb;
1147 unsigned int n1;
1148
1149 tb1 = tb->jmp_next[n];
1150 if (tb1 != NULL) {
1151 /* find head of list */
1152 for(;;) {
1153 n1 = (long)tb1 & 3;
1154 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1155 if (n1 == 2)
1156 break;
1157 tb1 = tb1->jmp_next[n1];
1158 }
1159 /* we are now sure now that tb jumps to tb1 */
1160 tb_next = tb1;
1161
1162 /* remove tb from the jmp_first list */
1163 ptb = &tb_next->jmp_first;
1164 for(;;) {
1165 tb1 = *ptb;
1166 n1 = (long)tb1 & 3;
1167 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1168 if (n1 == n && tb1 == tb)
1169 break;
1170 ptb = &tb1->jmp_next[n1];
1171 }
1172 *ptb = tb->jmp_next[n];
1173 tb->jmp_next[n] = NULL;
3b46e624 1174
ea041c0e
FB
1175 /* suppress the jump to next tb in generated code */
1176 tb_reset_jump(tb, n);
1177
0124311e 1178 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1179 tb_reset_jump_recursive(tb_next);
1180 }
1181}
1182
1183static void tb_reset_jump_recursive(TranslationBlock *tb)
1184{
1185 tb_reset_jump_recursive2(tb, 0);
1186 tb_reset_jump_recursive2(tb, 1);
1187}
1188
1fddef4b 1189#if defined(TARGET_HAS_ICE)
d720b93d
FB
1190static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1191{
9b3c35e0
JM
1192 target_phys_addr_t addr;
1193 target_ulong pd;
c2f07f81
PB
1194 ram_addr_t ram_addr;
1195 PhysPageDesc *p;
d720b93d 1196
c2f07f81
PB
1197 addr = cpu_get_phys_page_debug(env, pc);
1198 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1199 if (!p) {
1200 pd = IO_MEM_UNASSIGNED;
1201 } else {
1202 pd = p->phys_offset;
1203 }
1204 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1205 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1206}
c27004ec 1207#endif
d720b93d 1208
6658ffb8 1209/* Add a watchpoint. */
0f459d16 1210int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1211{
1212 int i;
1213
1214 for (i = 0; i < env->nb_watchpoints; i++) {
1215 if (addr == env->watchpoint[i].vaddr)
1216 return 0;
1217 }
1218 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1219 return -1;
1220
1221 i = env->nb_watchpoints++;
1222 env->watchpoint[i].vaddr = addr;
0f459d16 1223 env->watchpoint[i].type = type;
6658ffb8
PB
1224 tlb_flush_page(env, addr);
1225 /* FIXME: This flush is needed because of the hack to make memory ops
1226 terminate the TB. It can be removed once the proper IO trap and
1227 re-execute bits are in. */
1228 tb_flush(env);
1229 return i;
1230}
1231
1232/* Remove a watchpoint. */
1233int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1234{
1235 int i;
1236
1237 for (i = 0; i < env->nb_watchpoints; i++) {
1238 if (addr == env->watchpoint[i].vaddr) {
1239 env->nb_watchpoints--;
1240 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1241 tlb_flush_page(env, addr);
1242 return 0;
1243 }
1244 }
1245 return -1;
1246}
1247
7d03f82f
EI
1248/* Remove all watchpoints. */
1249void cpu_watchpoint_remove_all(CPUState *env) {
1250 int i;
1251
1252 for (i = 0; i < env->nb_watchpoints; i++) {
1253 tlb_flush_page(env, env->watchpoint[i].vaddr);
1254 }
1255 env->nb_watchpoints = 0;
1256}
1257
c33a346e
FB
1258/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1259 breakpoint is reached */
2e12669a 1260int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1261{
1fddef4b 1262#if defined(TARGET_HAS_ICE)
4c3a88a2 1263 int i;
3b46e624 1264
4c3a88a2
FB
1265 for(i = 0; i < env->nb_breakpoints; i++) {
1266 if (env->breakpoints[i] == pc)
1267 return 0;
1268 }
1269
1270 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1271 return -1;
1272 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1273
d720b93d 1274 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1275 return 0;
1276#else
1277 return -1;
1278#endif
1279}
1280
7d03f82f
EI
1281/* remove all breakpoints */
1282void cpu_breakpoint_remove_all(CPUState *env) {
1283#if defined(TARGET_HAS_ICE)
1284 int i;
1285 for(i = 0; i < env->nb_breakpoints; i++) {
1286 breakpoint_invalidate(env, env->breakpoints[i]);
1287 }
1288 env->nb_breakpoints = 0;
1289#endif
1290}
1291
4c3a88a2 1292/* remove a breakpoint */
2e12669a 1293int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1294{
1fddef4b 1295#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1296 int i;
1297 for(i = 0; i < env->nb_breakpoints; i++) {
1298 if (env->breakpoints[i] == pc)
1299 goto found;
1300 }
1301 return -1;
1302 found:
4c3a88a2 1303 env->nb_breakpoints--;
1fddef4b
FB
1304 if (i < env->nb_breakpoints)
1305 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1306
1307 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1308 return 0;
1309#else
1310 return -1;
1311#endif
1312}
1313
c33a346e
FB
1314/* enable or disable single step mode. EXCP_DEBUG is returned by the
1315 CPU loop after each instruction */
1316void cpu_single_step(CPUState *env, int enabled)
1317{
1fddef4b 1318#if defined(TARGET_HAS_ICE)
c33a346e
FB
1319 if (env->singlestep_enabled != enabled) {
1320 env->singlestep_enabled = enabled;
1321 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1322 /* XXX: only flush what is necessary */
0124311e 1323 tb_flush(env);
c33a346e
FB
1324 }
1325#endif
1326}
1327
34865134
FB
1328/* enable or disable low levels log */
1329void cpu_set_log(int log_flags)
1330{
1331 loglevel = log_flags;
1332 if (loglevel && !logfile) {
11fcfab4 1333 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1334 if (!logfile) {
1335 perror(logfilename);
1336 _exit(1);
1337 }
9fa3e853
FB
1338#if !defined(CONFIG_SOFTMMU)
1339 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1340 {
1341 static uint8_t logfile_buf[4096];
1342 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1343 }
1344#else
34865134 1345 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1346#endif
e735b91c
PB
1347 log_append = 1;
1348 }
1349 if (!loglevel && logfile) {
1350 fclose(logfile);
1351 logfile = NULL;
34865134
FB
1352 }
1353}
1354
1355void cpu_set_log_filename(const char *filename)
1356{
1357 logfilename = strdup(filename);
e735b91c
PB
1358 if (logfile) {
1359 fclose(logfile);
1360 logfile = NULL;
1361 }
1362 cpu_set_log(loglevel);
34865134 1363}
c33a346e 1364
0124311e 1365/* mask must never be zero, except for A20 change call */
68a79315 1366void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1367{
d5975363 1368#if !defined(USE_NPTL)
ea041c0e 1369 TranslationBlock *tb;
15a51156 1370 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1371#endif
59817ccb 1372
d5975363
PB
1373 /* FIXME: This is probably not threadsafe. A different thread could
1374 be in the mittle of a read-modify-write operation. */
68a79315 1375 env->interrupt_request |= mask;
d5975363
PB
1376#if defined(USE_NPTL)
1377 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1378 problem and hope the cpu will stop of its own accord. For userspace
1379 emulation this often isn't actually as bad as it sounds. Often
1380 signals are used primarily to interrupt blocking syscalls. */
1381#else
ea041c0e
FB
1382 /* if the cpu is currently executing code, we must unlink it and
1383 all the potentially executing TB */
1384 tb = env->current_tb;
ee8b7021
FB
1385 if (tb && !testandset(&interrupt_lock)) {
1386 env->current_tb = NULL;
ea041c0e 1387 tb_reset_jump_recursive(tb);
15a51156 1388 resetlock(&interrupt_lock);
ea041c0e 1389 }
d5975363 1390#endif
ea041c0e
FB
1391}
1392
b54ad049
FB
1393void cpu_reset_interrupt(CPUState *env, int mask)
1394{
1395 env->interrupt_request &= ~mask;
1396}
1397
f193c797 1398CPULogItem cpu_log_items[] = {
5fafdf24 1399 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1400 "show generated host assembly code for each compiled TB" },
1401 { CPU_LOG_TB_IN_ASM, "in_asm",
1402 "show target assembly code for each compiled TB" },
5fafdf24 1403 { CPU_LOG_TB_OP, "op",
57fec1fe 1404 "show micro ops for each compiled TB" },
f193c797 1405 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1406 "show micro ops "
1407#ifdef TARGET_I386
1408 "before eflags optimization and "
f193c797 1409#endif
e01a1157 1410 "after liveness analysis" },
f193c797
FB
1411 { CPU_LOG_INT, "int",
1412 "show interrupts/exceptions in short format" },
1413 { CPU_LOG_EXEC, "exec",
1414 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1415 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1416 "show CPU state before block translation" },
f193c797
FB
1417#ifdef TARGET_I386
1418 { CPU_LOG_PCALL, "pcall",
1419 "show protected mode far calls/returns/exceptions" },
1420#endif
8e3a9fd2 1421#ifdef DEBUG_IOPORT
fd872598
FB
1422 { CPU_LOG_IOPORT, "ioport",
1423 "show all i/o ports accesses" },
8e3a9fd2 1424#endif
f193c797
FB
1425 { 0, NULL, NULL },
1426};
1427
1428static int cmp1(const char *s1, int n, const char *s2)
1429{
1430 if (strlen(s2) != n)
1431 return 0;
1432 return memcmp(s1, s2, n) == 0;
1433}
3b46e624 1434
f193c797
FB
1435/* takes a comma separated list of log masks. Return 0 if error. */
1436int cpu_str_to_log_mask(const char *str)
1437{
1438 CPULogItem *item;
1439 int mask;
1440 const char *p, *p1;
1441
1442 p = str;
1443 mask = 0;
1444 for(;;) {
1445 p1 = strchr(p, ',');
1446 if (!p1)
1447 p1 = p + strlen(p);
8e3a9fd2
FB
1448 if(cmp1(p,p1-p,"all")) {
1449 for(item = cpu_log_items; item->mask != 0; item++) {
1450 mask |= item->mask;
1451 }
1452 } else {
f193c797
FB
1453 for(item = cpu_log_items; item->mask != 0; item++) {
1454 if (cmp1(p, p1 - p, item->name))
1455 goto found;
1456 }
1457 return 0;
8e3a9fd2 1458 }
f193c797
FB
1459 found:
1460 mask |= item->mask;
1461 if (*p1 != ',')
1462 break;
1463 p = p1 + 1;
1464 }
1465 return mask;
1466}
ea041c0e 1467
7501267e
FB
1468void cpu_abort(CPUState *env, const char *fmt, ...)
1469{
1470 va_list ap;
493ae1f0 1471 va_list ap2;
7501267e
FB
1472
1473 va_start(ap, fmt);
493ae1f0 1474 va_copy(ap2, ap);
7501267e
FB
1475 fprintf(stderr, "qemu: fatal: ");
1476 vfprintf(stderr, fmt, ap);
1477 fprintf(stderr, "\n");
1478#ifdef TARGET_I386
7fe48483
FB
1479 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1480#else
1481 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1482#endif
924edcae 1483 if (logfile) {
f9373291 1484 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1485 vfprintf(logfile, fmt, ap2);
f9373291
JM
1486 fprintf(logfile, "\n");
1487#ifdef TARGET_I386
1488 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1489#else
1490 cpu_dump_state(env, logfile, fprintf, 0);
1491#endif
924edcae
AZ
1492 fflush(logfile);
1493 fclose(logfile);
1494 }
493ae1f0 1495 va_end(ap2);
f9373291 1496 va_end(ap);
7501267e
FB
1497 abort();
1498}
1499
c5be9f08
TS
1500CPUState *cpu_copy(CPUState *env)
1501{
01ba9816 1502 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1503 /* preserve chaining and index */
1504 CPUState *next_cpu = new_env->next_cpu;
1505 int cpu_index = new_env->cpu_index;
1506 memcpy(new_env, env, sizeof(CPUState));
1507 new_env->next_cpu = next_cpu;
1508 new_env->cpu_index = cpu_index;
1509 return new_env;
1510}
1511
0124311e
FB
1512#if !defined(CONFIG_USER_ONLY)
1513
5c751e99
EI
1514static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1515{
1516 unsigned int i;
1517
1518 /* Discard jump cache entries for any tb which might potentially
1519 overlap the flushed page. */
1520 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1521 memset (&env->tb_jmp_cache[i], 0,
1522 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1523
1524 i = tb_jmp_cache_hash_page(addr);
1525 memset (&env->tb_jmp_cache[i], 0,
1526 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1527}
1528
ee8b7021
FB
1529/* NOTE: if flush_global is true, also flush global entries (not
1530 implemented yet) */
1531void tlb_flush(CPUState *env, int flush_global)
33417e70 1532{
33417e70 1533 int i;
0124311e 1534
9fa3e853
FB
1535#if defined(DEBUG_TLB)
1536 printf("tlb_flush:\n");
1537#endif
0124311e
FB
1538 /* must reset current TB so that interrupts cannot modify the
1539 links while we are modifying them */
1540 env->current_tb = NULL;
1541
33417e70 1542 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1543 env->tlb_table[0][i].addr_read = -1;
1544 env->tlb_table[0][i].addr_write = -1;
1545 env->tlb_table[0][i].addr_code = -1;
1546 env->tlb_table[1][i].addr_read = -1;
1547 env->tlb_table[1][i].addr_write = -1;
1548 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1549#if (NB_MMU_MODES >= 3)
1550 env->tlb_table[2][i].addr_read = -1;
1551 env->tlb_table[2][i].addr_write = -1;
1552 env->tlb_table[2][i].addr_code = -1;
1553#if (NB_MMU_MODES == 4)
1554 env->tlb_table[3][i].addr_read = -1;
1555 env->tlb_table[3][i].addr_write = -1;
1556 env->tlb_table[3][i].addr_code = -1;
1557#endif
1558#endif
33417e70 1559 }
9fa3e853 1560
8a40a180 1561 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1562
0a962c02
FB
1563#ifdef USE_KQEMU
1564 if (env->kqemu_enabled) {
1565 kqemu_flush(env, flush_global);
1566 }
9fa3e853 1567#endif
e3db7226 1568 tlb_flush_count++;
33417e70
FB
1569}
1570
274da6b2 1571static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1572{
5fafdf24 1573 if (addr == (tlb_entry->addr_read &
84b7b8e7 1574 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1575 addr == (tlb_entry->addr_write &
84b7b8e7 1576 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1577 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1578 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1579 tlb_entry->addr_read = -1;
1580 tlb_entry->addr_write = -1;
1581 tlb_entry->addr_code = -1;
1582 }
61382a50
FB
1583}
1584
2e12669a 1585void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1586{
8a40a180 1587 int i;
0124311e 1588
9fa3e853 1589#if defined(DEBUG_TLB)
108c49b8 1590 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1591#endif
0124311e
FB
1592 /* must reset current TB so that interrupts cannot modify the
1593 links while we are modifying them */
1594 env->current_tb = NULL;
61382a50
FB
1595
1596 addr &= TARGET_PAGE_MASK;
1597 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1598 tlb_flush_entry(&env->tlb_table[0][i], addr);
1599 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1600#if (NB_MMU_MODES >= 3)
1601 tlb_flush_entry(&env->tlb_table[2][i], addr);
1602#if (NB_MMU_MODES == 4)
1603 tlb_flush_entry(&env->tlb_table[3][i], addr);
1604#endif
1605#endif
0124311e 1606
5c751e99 1607 tlb_flush_jmp_cache(env, addr);
9fa3e853 1608
0a962c02
FB
1609#ifdef USE_KQEMU
1610 if (env->kqemu_enabled) {
1611 kqemu_flush_page(env, addr);
1612 }
1613#endif
9fa3e853
FB
1614}
1615
9fa3e853
FB
1616/* update the TLBs so that writes to code in the virtual page 'addr'
1617 can be detected */
6a00d601 1618static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1619{
5fafdf24 1620 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1621 ram_addr + TARGET_PAGE_SIZE,
1622 CODE_DIRTY_FLAG);
9fa3e853
FB
1623}
1624
9fa3e853 1625/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1626 tested for self modifying code */
5fafdf24 1627static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1628 target_ulong vaddr)
9fa3e853 1629{
3a7d929e 1630 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1631}
1632
5fafdf24 1633static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1634 unsigned long start, unsigned long length)
1635{
1636 unsigned long addr;
84b7b8e7
FB
1637 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1638 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1639 if ((addr - start) < length) {
0f459d16 1640 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1641 }
1642 }
1643}
1644
3a7d929e 1645void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1646 int dirty_flags)
1ccde1cb
FB
1647{
1648 CPUState *env;
4f2ac237 1649 unsigned long length, start1;
0a962c02
FB
1650 int i, mask, len;
1651 uint8_t *p;
1ccde1cb
FB
1652
1653 start &= TARGET_PAGE_MASK;
1654 end = TARGET_PAGE_ALIGN(end);
1655
1656 length = end - start;
1657 if (length == 0)
1658 return;
0a962c02 1659 len = length >> TARGET_PAGE_BITS;
3a7d929e 1660#ifdef USE_KQEMU
6a00d601
FB
1661 /* XXX: should not depend on cpu context */
1662 env = first_cpu;
3a7d929e 1663 if (env->kqemu_enabled) {
f23db169
FB
1664 ram_addr_t addr;
1665 addr = start;
1666 for(i = 0; i < len; i++) {
1667 kqemu_set_notdirty(env, addr);
1668 addr += TARGET_PAGE_SIZE;
1669 }
3a7d929e
FB
1670 }
1671#endif
f23db169
FB
1672 mask = ~dirty_flags;
1673 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1674 for(i = 0; i < len; i++)
1675 p[i] &= mask;
1676
1ccde1cb
FB
1677 /* we modify the TLB cache so that the dirty bit will be set again
1678 when accessing the range */
59817ccb 1679 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1680 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1681 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1682 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1683 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1684 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1685#if (NB_MMU_MODES >= 3)
1686 for(i = 0; i < CPU_TLB_SIZE; i++)
1687 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1688#if (NB_MMU_MODES == 4)
1689 for(i = 0; i < CPU_TLB_SIZE; i++)
1690 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1691#endif
1692#endif
6a00d601 1693 }
1ccde1cb
FB
1694}
1695
3a7d929e
FB
1696static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1697{
1698 ram_addr_t ram_addr;
1699
84b7b8e7 1700 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1701 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1702 tlb_entry->addend - (unsigned long)phys_ram_base;
1703 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1704 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1705 }
1706 }
1707}
1708
1709/* update the TLB according to the current state of the dirty bits */
1710void cpu_tlb_update_dirty(CPUState *env)
1711{
1712 int i;
1713 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1714 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1715 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1716 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1717#if (NB_MMU_MODES >= 3)
1718 for(i = 0; i < CPU_TLB_SIZE; i++)
1719 tlb_update_dirty(&env->tlb_table[2][i]);
1720#if (NB_MMU_MODES == 4)
1721 for(i = 0; i < CPU_TLB_SIZE; i++)
1722 tlb_update_dirty(&env->tlb_table[3][i]);
1723#endif
1724#endif
3a7d929e
FB
1725}
1726
0f459d16 1727static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1728{
0f459d16
PB
1729 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1730 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1731}
1732
0f459d16
PB
1733/* update the TLB corresponding to virtual page vaddr
1734 so that it is no longer dirty */
1735static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1736{
1ccde1cb
FB
1737 int i;
1738
0f459d16 1739 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1740 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1741 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1742 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1743#if (NB_MMU_MODES >= 3)
0f459d16 1744 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1745#if (NB_MMU_MODES == 4)
0f459d16 1746 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1747#endif
1748#endif
9fa3e853
FB
1749}
1750
59817ccb
FB
1751/* add a new TLB entry. At most one entry for a given virtual address
1752 is permitted. Return 0 if OK or 2 if the page could not be mapped
1753 (can only happen in non SOFTMMU mode for I/O pages or pages
1754 conflicting with the host address space). */
5fafdf24
TS
1755int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1756 target_phys_addr_t paddr, int prot,
6ebbf390 1757 int mmu_idx, int is_softmmu)
9fa3e853 1758{
92e873b9 1759 PhysPageDesc *p;
4f2ac237 1760 unsigned long pd;
9fa3e853 1761 unsigned int index;
4f2ac237 1762 target_ulong address;
0f459d16 1763 target_ulong code_address;
108c49b8 1764 target_phys_addr_t addend;
9fa3e853 1765 int ret;
84b7b8e7 1766 CPUTLBEntry *te;
6658ffb8 1767 int i;
0f459d16 1768 target_phys_addr_t iotlb;
9fa3e853 1769
92e873b9 1770 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1771 if (!p) {
1772 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1773 } else {
1774 pd = p->phys_offset;
9fa3e853
FB
1775 }
1776#if defined(DEBUG_TLB)
6ebbf390
JM
1777 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1778 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1779#endif
1780
1781 ret = 0;
0f459d16
PB
1782 address = vaddr;
1783 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1784 /* IO memory case (romd handled later) */
1785 address |= TLB_MMIO;
1786 }
1787 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1788 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1789 /* Normal RAM. */
1790 iotlb = pd & TARGET_PAGE_MASK;
1791 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1792 iotlb |= IO_MEM_NOTDIRTY;
1793 else
1794 iotlb |= IO_MEM_ROM;
1795 } else {
1796 /* IO handlers are currently passed a phsical address.
1797 It would be nice to pass an offset from the base address
1798 of that region. This would avoid having to special case RAM,
1799 and avoid full address decoding in every device.
1800 We can't use the high bits of pd for this because
1801 IO_MEM_ROMD uses these as a ram address. */
1802 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1803 }
1804
1805 code_address = address;
1806 /* Make accesses to pages with watchpoints go via the
1807 watchpoint trap routines. */
1808 for (i = 0; i < env->nb_watchpoints; i++) {
1809 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1810 iotlb = io_mem_watch + paddr;
1811 /* TODO: The memory case can be optimized by not trapping
1812 reads of pages with a write breakpoint. */
1813 address |= TLB_MMIO;
6658ffb8 1814 }
0f459d16 1815 }
d79acba4 1816
0f459d16
PB
1817 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1818 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1819 te = &env->tlb_table[mmu_idx][index];
1820 te->addend = addend - vaddr;
1821 if (prot & PAGE_READ) {
1822 te->addr_read = address;
1823 } else {
1824 te->addr_read = -1;
1825 }
5c751e99 1826
0f459d16
PB
1827 if (prot & PAGE_EXEC) {
1828 te->addr_code = code_address;
1829 } else {
1830 te->addr_code = -1;
1831 }
1832 if (prot & PAGE_WRITE) {
1833 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1834 (pd & IO_MEM_ROMD)) {
1835 /* Write access calls the I/O callback. */
1836 te->addr_write = address | TLB_MMIO;
1837 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1838 !cpu_physical_memory_is_dirty(pd)) {
1839 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1840 } else {
0f459d16 1841 te->addr_write = address;
9fa3e853 1842 }
0f459d16
PB
1843 } else {
1844 te->addr_write = -1;
9fa3e853 1845 }
9fa3e853
FB
1846 return ret;
1847}
1848
0124311e
FB
1849#else
1850
ee8b7021 1851void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1852{
1853}
1854
2e12669a 1855void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1856{
1857}
1858
5fafdf24
TS
1859int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1860 target_phys_addr_t paddr, int prot,
6ebbf390 1861 int mmu_idx, int is_softmmu)
9fa3e853
FB
1862{
1863 return 0;
1864}
0124311e 1865
9fa3e853
FB
1866/* dump memory mappings */
1867void page_dump(FILE *f)
33417e70 1868{
9fa3e853
FB
1869 unsigned long start, end;
1870 int i, j, prot, prot1;
1871 PageDesc *p;
33417e70 1872
9fa3e853
FB
1873 fprintf(f, "%-8s %-8s %-8s %s\n",
1874 "start", "end", "size", "prot");
1875 start = -1;
1876 end = -1;
1877 prot = 0;
1878 for(i = 0; i <= L1_SIZE; i++) {
1879 if (i < L1_SIZE)
1880 p = l1_map[i];
1881 else
1882 p = NULL;
1883 for(j = 0;j < L2_SIZE; j++) {
1884 if (!p)
1885 prot1 = 0;
1886 else
1887 prot1 = p[j].flags;
1888 if (prot1 != prot) {
1889 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1890 if (start != -1) {
1891 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1892 start, end, end - start,
9fa3e853
FB
1893 prot & PAGE_READ ? 'r' : '-',
1894 prot & PAGE_WRITE ? 'w' : '-',
1895 prot & PAGE_EXEC ? 'x' : '-');
1896 }
1897 if (prot1 != 0)
1898 start = end;
1899 else
1900 start = -1;
1901 prot = prot1;
1902 }
1903 if (!p)
1904 break;
1905 }
33417e70 1906 }
33417e70
FB
1907}
1908
53a5960a 1909int page_get_flags(target_ulong address)
33417e70 1910{
9fa3e853
FB
1911 PageDesc *p;
1912
1913 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1914 if (!p)
9fa3e853
FB
1915 return 0;
1916 return p->flags;
1917}
1918
1919/* modify the flags of a page and invalidate the code if
1920 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1921 depending on PAGE_WRITE */
53a5960a 1922void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1923{
1924 PageDesc *p;
53a5960a 1925 target_ulong addr;
9fa3e853 1926
c8a706fe 1927 /* mmap_lock should already be held. */
9fa3e853
FB
1928 start = start & TARGET_PAGE_MASK;
1929 end = TARGET_PAGE_ALIGN(end);
1930 if (flags & PAGE_WRITE)
1931 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
1932 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1933 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
1934 /* We may be called for host regions that are outside guest
1935 address space. */
1936 if (!p)
1937 return;
9fa3e853
FB
1938 /* if the write protection is set, then we invalidate the code
1939 inside */
5fafdf24 1940 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1941 (flags & PAGE_WRITE) &&
1942 p->first_tb) {
d720b93d 1943 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1944 }
1945 p->flags = flags;
1946 }
33417e70
FB
1947}
1948
3d97b40b
TS
1949int page_check_range(target_ulong start, target_ulong len, int flags)
1950{
1951 PageDesc *p;
1952 target_ulong end;
1953 target_ulong addr;
1954
1955 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1956 start = start & TARGET_PAGE_MASK;
1957
1958 if( end < start )
1959 /* we've wrapped around */
1960 return -1;
1961 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1962 p = page_find(addr >> TARGET_PAGE_BITS);
1963 if( !p )
1964 return -1;
1965 if( !(p->flags & PAGE_VALID) )
1966 return -1;
1967
dae3270c 1968 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1969 return -1;
dae3270c
FB
1970 if (flags & PAGE_WRITE) {
1971 if (!(p->flags & PAGE_WRITE_ORG))
1972 return -1;
1973 /* unprotect the page if it was put read-only because it
1974 contains translated code */
1975 if (!(p->flags & PAGE_WRITE)) {
1976 if (!page_unprotect(addr, 0, NULL))
1977 return -1;
1978 }
1979 return 0;
1980 }
3d97b40b
TS
1981 }
1982 return 0;
1983}
1984
9fa3e853
FB
1985/* called from signal handler: invalidate the code and unprotect the
1986 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1987int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1988{
1989 unsigned int page_index, prot, pindex;
1990 PageDesc *p, *p1;
53a5960a 1991 target_ulong host_start, host_end, addr;
9fa3e853 1992
c8a706fe
PB
1993 /* Technically this isn't safe inside a signal handler. However we
1994 know this only ever happens in a synchronous SEGV handler, so in
1995 practice it seems to be ok. */
1996 mmap_lock();
1997
83fb7adf 1998 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1999 page_index = host_start >> TARGET_PAGE_BITS;
2000 p1 = page_find(page_index);
c8a706fe
PB
2001 if (!p1) {
2002 mmap_unlock();
9fa3e853 2003 return 0;
c8a706fe 2004 }
83fb7adf 2005 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2006 p = p1;
2007 prot = 0;
2008 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2009 prot |= p->flags;
2010 p++;
2011 }
2012 /* if the page was really writable, then we change its
2013 protection back to writable */
2014 if (prot & PAGE_WRITE_ORG) {
2015 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2016 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2017 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2018 (prot & PAGE_BITS) | PAGE_WRITE);
2019 p1[pindex].flags |= PAGE_WRITE;
2020 /* and since the content will be modified, we must invalidate
2021 the corresponding translated code. */
d720b93d 2022 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2023#ifdef DEBUG_TB_CHECK
2024 tb_invalidate_check(address);
2025#endif
c8a706fe 2026 mmap_unlock();
9fa3e853
FB
2027 return 1;
2028 }
2029 }
c8a706fe 2030 mmap_unlock();
9fa3e853
FB
2031 return 0;
2032}
2033
6a00d601
FB
2034static inline void tlb_set_dirty(CPUState *env,
2035 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2036{
2037}
9fa3e853
FB
2038#endif /* defined(CONFIG_USER_ONLY) */
2039
e2eef170 2040#if !defined(CONFIG_USER_ONLY)
db7b5426 2041static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2042 ram_addr_t memory);
2043static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2044 ram_addr_t orig_memory);
db7b5426
BS
2045#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2046 need_subpage) \
2047 do { \
2048 if (addr > start_addr) \
2049 start_addr2 = 0; \
2050 else { \
2051 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2052 if (start_addr2 > 0) \
2053 need_subpage = 1; \
2054 } \
2055 \
49e9fba2 2056 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2057 end_addr2 = TARGET_PAGE_SIZE - 1; \
2058 else { \
2059 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2060 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2061 need_subpage = 1; \
2062 } \
2063 } while (0)
2064
33417e70
FB
2065/* register physical memory. 'size' must be a multiple of the target
2066 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2067 io memory page */
5fafdf24 2068void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2069 ram_addr_t size,
2070 ram_addr_t phys_offset)
33417e70 2071{
108c49b8 2072 target_phys_addr_t addr, end_addr;
92e873b9 2073 PhysPageDesc *p;
9d42037b 2074 CPUState *env;
00f82b8a 2075 ram_addr_t orig_size = size;
db7b5426 2076 void *subpage;
33417e70 2077
da260249
FB
2078#ifdef USE_KQEMU
2079 /* XXX: should not depend on cpu context */
2080 env = first_cpu;
2081 if (env->kqemu_enabled) {
2082 kqemu_set_phys_mem(start_addr, size, phys_offset);
2083 }
2084#endif
5fd386f6 2085 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2086 end_addr = start_addr + (target_phys_addr_t)size;
2087 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2088 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2089 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2090 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2091 target_phys_addr_t start_addr2, end_addr2;
2092 int need_subpage = 0;
2093
2094 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2095 need_subpage);
4254fab8 2096 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2097 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2098 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2099 &p->phys_offset, orig_memory);
2100 } else {
2101 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2102 >> IO_MEM_SHIFT];
2103 }
2104 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2105 } else {
2106 p->phys_offset = phys_offset;
2107 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2108 (phys_offset & IO_MEM_ROMD))
2109 phys_offset += TARGET_PAGE_SIZE;
2110 }
2111 } else {
2112 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2113 p->phys_offset = phys_offset;
2114 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2115 (phys_offset & IO_MEM_ROMD))
2116 phys_offset += TARGET_PAGE_SIZE;
2117 else {
2118 target_phys_addr_t start_addr2, end_addr2;
2119 int need_subpage = 0;
2120
2121 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2122 end_addr2, need_subpage);
2123
4254fab8 2124 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2125 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2126 &p->phys_offset, IO_MEM_UNASSIGNED);
2127 subpage_register(subpage, start_addr2, end_addr2,
2128 phys_offset);
2129 }
2130 }
2131 }
33417e70 2132 }
3b46e624 2133
9d42037b
FB
2134 /* since each CPU stores ram addresses in its TLB cache, we must
2135 reset the modified entries */
2136 /* XXX: slow ! */
2137 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2138 tlb_flush(env, 1);
2139 }
33417e70
FB
2140}
2141
ba863458 2142/* XXX: temporary until new memory mapping API */
00f82b8a 2143ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2144{
2145 PhysPageDesc *p;
2146
2147 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2148 if (!p)
2149 return IO_MEM_UNASSIGNED;
2150 return p->phys_offset;
2151}
2152
e9a1ab19 2153/* XXX: better than nothing */
00f82b8a 2154ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2155{
2156 ram_addr_t addr;
7fb4fdcf 2157 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2158 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2159 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2160 abort();
2161 }
2162 addr = phys_ram_alloc_offset;
2163 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2164 return addr;
2165}
2166
2167void qemu_ram_free(ram_addr_t addr)
2168{
2169}
2170
a4193c8a 2171static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2172{
67d3b957 2173#ifdef DEBUG_UNASSIGNED
ab3d1727 2174 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2175#endif
2176#ifdef TARGET_SPARC
6c36d3fa 2177 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2178#elif TARGET_CRIS
2179 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2180#endif
33417e70
FB
2181 return 0;
2182}
2183
a4193c8a 2184static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2185{
67d3b957 2186#ifdef DEBUG_UNASSIGNED
ab3d1727 2187 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2188#endif
b4f0a316 2189#ifdef TARGET_SPARC
6c36d3fa 2190 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2191#elif TARGET_CRIS
2192 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2193#endif
33417e70
FB
2194}
2195
2196static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2197 unassigned_mem_readb,
2198 unassigned_mem_readb,
2199 unassigned_mem_readb,
2200};
2201
2202static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2203 unassigned_mem_writeb,
2204 unassigned_mem_writeb,
2205 unassigned_mem_writeb,
2206};
2207
0f459d16
PB
2208static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2209 uint32_t val)
9fa3e853 2210{
3a7d929e 2211 int dirty_flags;
3a7d929e
FB
2212 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2213 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2214#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2215 tb_invalidate_phys_page_fast(ram_addr, 1);
2216 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2217#endif
3a7d929e 2218 }
0f459d16 2219 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2220#ifdef USE_KQEMU
2221 if (cpu_single_env->kqemu_enabled &&
2222 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2223 kqemu_modify_page(cpu_single_env, ram_addr);
2224#endif
f23db169
FB
2225 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2226 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2227 /* we remove the notdirty callback only if the code has been
2228 flushed */
2229 if (dirty_flags == 0xff)
0f459d16 2230 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2231}
2232
0f459d16
PB
2233static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2234 uint32_t val)
9fa3e853 2235{
3a7d929e 2236 int dirty_flags;
3a7d929e
FB
2237 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2238 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2239#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2240 tb_invalidate_phys_page_fast(ram_addr, 2);
2241 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2242#endif
3a7d929e 2243 }
0f459d16 2244 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2245#ifdef USE_KQEMU
2246 if (cpu_single_env->kqemu_enabled &&
2247 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2248 kqemu_modify_page(cpu_single_env, ram_addr);
2249#endif
f23db169
FB
2250 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2251 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2252 /* we remove the notdirty callback only if the code has been
2253 flushed */
2254 if (dirty_flags == 0xff)
0f459d16 2255 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2256}
2257
0f459d16
PB
2258static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2259 uint32_t val)
9fa3e853 2260{
3a7d929e 2261 int dirty_flags;
3a7d929e
FB
2262 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2263 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2264#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2265 tb_invalidate_phys_page_fast(ram_addr, 4);
2266 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2267#endif
3a7d929e 2268 }
0f459d16 2269 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2270#ifdef USE_KQEMU
2271 if (cpu_single_env->kqemu_enabled &&
2272 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2273 kqemu_modify_page(cpu_single_env, ram_addr);
2274#endif
f23db169
FB
2275 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2276 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2277 /* we remove the notdirty callback only if the code has been
2278 flushed */
2279 if (dirty_flags == 0xff)
0f459d16 2280 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2281}
2282
3a7d929e 2283static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2284 NULL, /* never used */
2285 NULL, /* never used */
2286 NULL, /* never used */
2287};
2288
1ccde1cb
FB
2289static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2290 notdirty_mem_writeb,
2291 notdirty_mem_writew,
2292 notdirty_mem_writel,
2293};
2294
0f459d16
PB
2295/* Generate a debug exception if a watchpoint has been hit. */
2296static void check_watchpoint(int offset, int flags)
2297{
2298 CPUState *env = cpu_single_env;
2299 target_ulong vaddr;
2300 int i;
2301
2302 vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2303 for (i = 0; i < env->nb_watchpoints; i++) {
2304 if (vaddr == env->watchpoint[i].vaddr
2305 && (env->watchpoint[i].type & flags)) {
2306 env->watchpoint_hit = i + 1;
2307 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2308 break;
2309 }
2310 }
2311}
2312
6658ffb8
PB
2313/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2314 so these check for a hit then pass through to the normal out-of-line
2315 phys routines. */
2316static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2317{
0f459d16 2318 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2319 return ldub_phys(addr);
2320}
2321
2322static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2323{
0f459d16 2324 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2325 return lduw_phys(addr);
2326}
2327
2328static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2329{
0f459d16 2330 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2331 return ldl_phys(addr);
2332}
2333
6658ffb8
PB
2334static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2335 uint32_t val)
2336{
0f459d16 2337 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2338 stb_phys(addr, val);
2339}
2340
2341static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2342 uint32_t val)
2343{
0f459d16 2344 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2345 stw_phys(addr, val);
2346}
2347
2348static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2349 uint32_t val)
2350{
0f459d16 2351 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2352 stl_phys(addr, val);
2353}
2354
2355static CPUReadMemoryFunc *watch_mem_read[3] = {
2356 watch_mem_readb,
2357 watch_mem_readw,
2358 watch_mem_readl,
2359};
2360
2361static CPUWriteMemoryFunc *watch_mem_write[3] = {
2362 watch_mem_writeb,
2363 watch_mem_writew,
2364 watch_mem_writel,
2365};
6658ffb8 2366
db7b5426
BS
2367static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2368 unsigned int len)
2369{
db7b5426
BS
2370 uint32_t ret;
2371 unsigned int idx;
2372
2373 idx = SUBPAGE_IDX(addr - mmio->base);
2374#if defined(DEBUG_SUBPAGE)
2375 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2376 mmio, len, addr, idx);
2377#endif
3ee89922 2378 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2379
2380 return ret;
2381}
2382
2383static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2384 uint32_t value, unsigned int len)
2385{
db7b5426
BS
2386 unsigned int idx;
2387
2388 idx = SUBPAGE_IDX(addr - mmio->base);
2389#if defined(DEBUG_SUBPAGE)
2390 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2391 mmio, len, addr, idx, value);
2392#endif
3ee89922 2393 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2394}
2395
2396static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2397{
2398#if defined(DEBUG_SUBPAGE)
2399 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2400#endif
2401
2402 return subpage_readlen(opaque, addr, 0);
2403}
2404
2405static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2406 uint32_t value)
2407{
2408#if defined(DEBUG_SUBPAGE)
2409 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2410#endif
2411 subpage_writelen(opaque, addr, value, 0);
2412}
2413
2414static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2415{
2416#if defined(DEBUG_SUBPAGE)
2417 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2418#endif
2419
2420 return subpage_readlen(opaque, addr, 1);
2421}
2422
2423static void subpage_writew (void *opaque, target_phys_addr_t addr,
2424 uint32_t value)
2425{
2426#if defined(DEBUG_SUBPAGE)
2427 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2428#endif
2429 subpage_writelen(opaque, addr, value, 1);
2430}
2431
2432static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2433{
2434#if defined(DEBUG_SUBPAGE)
2435 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2436#endif
2437
2438 return subpage_readlen(opaque, addr, 2);
2439}
2440
2441static void subpage_writel (void *opaque,
2442 target_phys_addr_t addr, uint32_t value)
2443{
2444#if defined(DEBUG_SUBPAGE)
2445 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2446#endif
2447 subpage_writelen(opaque, addr, value, 2);
2448}
2449
2450static CPUReadMemoryFunc *subpage_read[] = {
2451 &subpage_readb,
2452 &subpage_readw,
2453 &subpage_readl,
2454};
2455
2456static CPUWriteMemoryFunc *subpage_write[] = {
2457 &subpage_writeb,
2458 &subpage_writew,
2459 &subpage_writel,
2460};
2461
2462static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2463 ram_addr_t memory)
db7b5426
BS
2464{
2465 int idx, eidx;
4254fab8 2466 unsigned int i;
db7b5426
BS
2467
2468 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2469 return -1;
2470 idx = SUBPAGE_IDX(start);
2471 eidx = SUBPAGE_IDX(end);
2472#if defined(DEBUG_SUBPAGE)
2473 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2474 mmio, start, end, idx, eidx, memory);
2475#endif
2476 memory >>= IO_MEM_SHIFT;
2477 for (; idx <= eidx; idx++) {
4254fab8 2478 for (i = 0; i < 4; i++) {
3ee89922
BS
2479 if (io_mem_read[memory][i]) {
2480 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2481 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2482 }
2483 if (io_mem_write[memory][i]) {
2484 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2485 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2486 }
4254fab8 2487 }
db7b5426
BS
2488 }
2489
2490 return 0;
2491}
2492
00f82b8a
AJ
2493static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2494 ram_addr_t orig_memory)
db7b5426
BS
2495{
2496 subpage_t *mmio;
2497 int subpage_memory;
2498
2499 mmio = qemu_mallocz(sizeof(subpage_t));
2500 if (mmio != NULL) {
2501 mmio->base = base;
2502 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2503#if defined(DEBUG_SUBPAGE)
2504 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2505 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2506#endif
2507 *phys = subpage_memory | IO_MEM_SUBPAGE;
2508 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2509 }
2510
2511 return mmio;
2512}
2513
33417e70
FB
2514static void io_mem_init(void)
2515{
3a7d929e 2516 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2517 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2518 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2519 io_mem_nb = 5;
2520
0f459d16 2521 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2522 watch_mem_write, NULL);
1ccde1cb 2523 /* alloc dirty bits array */
0a962c02 2524 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2525 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2526}
2527
2528/* mem_read and mem_write are arrays of functions containing the
2529 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2530 2). Functions can be omitted with a NULL function pointer. The
2531 registered functions may be modified dynamically later.
2532 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2533 modified. If it is zero, a new io zone is allocated. The return
2534 value can be used with cpu_register_physical_memory(). (-1) is
2535 returned if error. */
33417e70
FB
2536int cpu_register_io_memory(int io_index,
2537 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2538 CPUWriteMemoryFunc **mem_write,
2539 void *opaque)
33417e70 2540{
4254fab8 2541 int i, subwidth = 0;
33417e70
FB
2542
2543 if (io_index <= 0) {
b5ff1b31 2544 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2545 return -1;
2546 io_index = io_mem_nb++;
2547 } else {
2548 if (io_index >= IO_MEM_NB_ENTRIES)
2549 return -1;
2550 }
b5ff1b31 2551
33417e70 2552 for(i = 0;i < 3; i++) {
4254fab8
BS
2553 if (!mem_read[i] || !mem_write[i])
2554 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2555 io_mem_read[io_index][i] = mem_read[i];
2556 io_mem_write[io_index][i] = mem_write[i];
2557 }
a4193c8a 2558 io_mem_opaque[io_index] = opaque;
4254fab8 2559 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2560}
61382a50 2561
8926b517
FB
2562CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2563{
2564 return io_mem_write[io_index >> IO_MEM_SHIFT];
2565}
2566
2567CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2568{
2569 return io_mem_read[io_index >> IO_MEM_SHIFT];
2570}
2571
e2eef170
PB
2572#endif /* !defined(CONFIG_USER_ONLY) */
2573
13eb76e0
FB
2574/* physical memory access (slow version, mainly for debug) */
2575#if defined(CONFIG_USER_ONLY)
5fafdf24 2576void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2577 int len, int is_write)
2578{
2579 int l, flags;
2580 target_ulong page;
53a5960a 2581 void * p;
13eb76e0
FB
2582
2583 while (len > 0) {
2584 page = addr & TARGET_PAGE_MASK;
2585 l = (page + TARGET_PAGE_SIZE) - addr;
2586 if (l > len)
2587 l = len;
2588 flags = page_get_flags(page);
2589 if (!(flags & PAGE_VALID))
2590 return;
2591 if (is_write) {
2592 if (!(flags & PAGE_WRITE))
2593 return;
579a97f7 2594 /* XXX: this code should not depend on lock_user */
72fb7daa 2595 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2596 /* FIXME - should this return an error rather than just fail? */
2597 return;
72fb7daa
AJ
2598 memcpy(p, buf, l);
2599 unlock_user(p, addr, l);
13eb76e0
FB
2600 } else {
2601 if (!(flags & PAGE_READ))
2602 return;
579a97f7 2603 /* XXX: this code should not depend on lock_user */
72fb7daa 2604 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2605 /* FIXME - should this return an error rather than just fail? */
2606 return;
72fb7daa 2607 memcpy(buf, p, l);
5b257578 2608 unlock_user(p, addr, 0);
13eb76e0
FB
2609 }
2610 len -= l;
2611 buf += l;
2612 addr += l;
2613 }
2614}
8df1cd07 2615
13eb76e0 2616#else
5fafdf24 2617void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2618 int len, int is_write)
2619{
2620 int l, io_index;
2621 uint8_t *ptr;
2622 uint32_t val;
2e12669a
FB
2623 target_phys_addr_t page;
2624 unsigned long pd;
92e873b9 2625 PhysPageDesc *p;
3b46e624 2626
13eb76e0
FB
2627 while (len > 0) {
2628 page = addr & TARGET_PAGE_MASK;
2629 l = (page + TARGET_PAGE_SIZE) - addr;
2630 if (l > len)
2631 l = len;
92e873b9 2632 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2633 if (!p) {
2634 pd = IO_MEM_UNASSIGNED;
2635 } else {
2636 pd = p->phys_offset;
2637 }
3b46e624 2638
13eb76e0 2639 if (is_write) {
3a7d929e 2640 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2641 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2642 /* XXX: could force cpu_single_env to NULL to avoid
2643 potential bugs */
13eb76e0 2644 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2645 /* 32 bit write access */
c27004ec 2646 val = ldl_p(buf);
a4193c8a 2647 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2648 l = 4;
2649 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2650 /* 16 bit write access */
c27004ec 2651 val = lduw_p(buf);
a4193c8a 2652 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2653 l = 2;
2654 } else {
1c213d19 2655 /* 8 bit write access */
c27004ec 2656 val = ldub_p(buf);
a4193c8a 2657 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2658 l = 1;
2659 }
2660 } else {
b448f2f3
FB
2661 unsigned long addr1;
2662 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2663 /* RAM case */
b448f2f3 2664 ptr = phys_ram_base + addr1;
13eb76e0 2665 memcpy(ptr, buf, l);
3a7d929e
FB
2666 if (!cpu_physical_memory_is_dirty(addr1)) {
2667 /* invalidate code */
2668 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2669 /* set dirty bit */
5fafdf24 2670 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2671 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2672 }
13eb76e0
FB
2673 }
2674 } else {
5fafdf24 2675 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2676 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2677 /* I/O case */
2678 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2679 if (l >= 4 && ((addr & 3) == 0)) {
2680 /* 32 bit read access */
a4193c8a 2681 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2682 stl_p(buf, val);
13eb76e0
FB
2683 l = 4;
2684 } else if (l >= 2 && ((addr & 1) == 0)) {
2685 /* 16 bit read access */
a4193c8a 2686 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2687 stw_p(buf, val);
13eb76e0
FB
2688 l = 2;
2689 } else {
1c213d19 2690 /* 8 bit read access */
a4193c8a 2691 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2692 stb_p(buf, val);
13eb76e0
FB
2693 l = 1;
2694 }
2695 } else {
2696 /* RAM case */
5fafdf24 2697 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2698 (addr & ~TARGET_PAGE_MASK);
2699 memcpy(buf, ptr, l);
2700 }
2701 }
2702 len -= l;
2703 buf += l;
2704 addr += l;
2705 }
2706}
8df1cd07 2707
d0ecd2aa 2708/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2709void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2710 const uint8_t *buf, int len)
2711{
2712 int l;
2713 uint8_t *ptr;
2714 target_phys_addr_t page;
2715 unsigned long pd;
2716 PhysPageDesc *p;
3b46e624 2717
d0ecd2aa
FB
2718 while (len > 0) {
2719 page = addr & TARGET_PAGE_MASK;
2720 l = (page + TARGET_PAGE_SIZE) - addr;
2721 if (l > len)
2722 l = len;
2723 p = phys_page_find(page >> TARGET_PAGE_BITS);
2724 if (!p) {
2725 pd = IO_MEM_UNASSIGNED;
2726 } else {
2727 pd = p->phys_offset;
2728 }
3b46e624 2729
d0ecd2aa 2730 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2731 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2732 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2733 /* do nothing */
2734 } else {
2735 unsigned long addr1;
2736 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2737 /* ROM/RAM case */
2738 ptr = phys_ram_base + addr1;
2739 memcpy(ptr, buf, l);
2740 }
2741 len -= l;
2742 buf += l;
2743 addr += l;
2744 }
2745}
2746
2747
8df1cd07
FB
2748/* warning: addr must be aligned */
2749uint32_t ldl_phys(target_phys_addr_t addr)
2750{
2751 int io_index;
2752 uint8_t *ptr;
2753 uint32_t val;
2754 unsigned long pd;
2755 PhysPageDesc *p;
2756
2757 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2758 if (!p) {
2759 pd = IO_MEM_UNASSIGNED;
2760 } else {
2761 pd = p->phys_offset;
2762 }
3b46e624 2763
5fafdf24 2764 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2765 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2766 /* I/O case */
2767 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2768 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2769 } else {
2770 /* RAM case */
5fafdf24 2771 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2772 (addr & ~TARGET_PAGE_MASK);
2773 val = ldl_p(ptr);
2774 }
2775 return val;
2776}
2777
84b7b8e7
FB
2778/* warning: addr must be aligned */
2779uint64_t ldq_phys(target_phys_addr_t addr)
2780{
2781 int io_index;
2782 uint8_t *ptr;
2783 uint64_t val;
2784 unsigned long pd;
2785 PhysPageDesc *p;
2786
2787 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2788 if (!p) {
2789 pd = IO_MEM_UNASSIGNED;
2790 } else {
2791 pd = p->phys_offset;
2792 }
3b46e624 2793
2a4188a3
FB
2794 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2795 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2796 /* I/O case */
2797 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2798#ifdef TARGET_WORDS_BIGENDIAN
2799 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2800 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2801#else
2802 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2803 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2804#endif
2805 } else {
2806 /* RAM case */
5fafdf24 2807 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2808 (addr & ~TARGET_PAGE_MASK);
2809 val = ldq_p(ptr);
2810 }
2811 return val;
2812}
2813
aab33094
FB
2814/* XXX: optimize */
2815uint32_t ldub_phys(target_phys_addr_t addr)
2816{
2817 uint8_t val;
2818 cpu_physical_memory_read(addr, &val, 1);
2819 return val;
2820}
2821
2822/* XXX: optimize */
2823uint32_t lduw_phys(target_phys_addr_t addr)
2824{
2825 uint16_t val;
2826 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2827 return tswap16(val);
2828}
2829
8df1cd07
FB
2830/* warning: addr must be aligned. The ram page is not masked as dirty
2831 and the code inside is not invalidated. It is useful if the dirty
2832 bits are used to track modified PTEs */
2833void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2834{
2835 int io_index;
2836 uint8_t *ptr;
2837 unsigned long pd;
2838 PhysPageDesc *p;
2839
2840 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2841 if (!p) {
2842 pd = IO_MEM_UNASSIGNED;
2843 } else {
2844 pd = p->phys_offset;
2845 }
3b46e624 2846
3a7d929e 2847 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2848 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2849 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2850 } else {
5fafdf24 2851 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2852 (addr & ~TARGET_PAGE_MASK);
2853 stl_p(ptr, val);
2854 }
2855}
2856
bc98a7ef
JM
2857void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2858{
2859 int io_index;
2860 uint8_t *ptr;
2861 unsigned long pd;
2862 PhysPageDesc *p;
2863
2864 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2865 if (!p) {
2866 pd = IO_MEM_UNASSIGNED;
2867 } else {
2868 pd = p->phys_offset;
2869 }
3b46e624 2870
bc98a7ef
JM
2871 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2872 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2873#ifdef TARGET_WORDS_BIGENDIAN
2874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2875 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2876#else
2877 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2879#endif
2880 } else {
5fafdf24 2881 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2882 (addr & ~TARGET_PAGE_MASK);
2883 stq_p(ptr, val);
2884 }
2885}
2886
8df1cd07 2887/* warning: addr must be aligned */
8df1cd07
FB
2888void stl_phys(target_phys_addr_t addr, uint32_t val)
2889{
2890 int io_index;
2891 uint8_t *ptr;
2892 unsigned long pd;
2893 PhysPageDesc *p;
2894
2895 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2896 if (!p) {
2897 pd = IO_MEM_UNASSIGNED;
2898 } else {
2899 pd = p->phys_offset;
2900 }
3b46e624 2901
3a7d929e 2902 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2903 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2904 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2905 } else {
2906 unsigned long addr1;
2907 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2908 /* RAM case */
2909 ptr = phys_ram_base + addr1;
2910 stl_p(ptr, val);
3a7d929e
FB
2911 if (!cpu_physical_memory_is_dirty(addr1)) {
2912 /* invalidate code */
2913 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2914 /* set dirty bit */
f23db169
FB
2915 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2916 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2917 }
8df1cd07
FB
2918 }
2919}
2920
aab33094
FB
2921/* XXX: optimize */
2922void stb_phys(target_phys_addr_t addr, uint32_t val)
2923{
2924 uint8_t v = val;
2925 cpu_physical_memory_write(addr, &v, 1);
2926}
2927
2928/* XXX: optimize */
2929void stw_phys(target_phys_addr_t addr, uint32_t val)
2930{
2931 uint16_t v = tswap16(val);
2932 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2933}
2934
2935/* XXX: optimize */
2936void stq_phys(target_phys_addr_t addr, uint64_t val)
2937{
2938 val = tswap64(val);
2939 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2940}
2941
13eb76e0
FB
2942#endif
2943
2944/* virtual memory access for debug */
5fafdf24 2945int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2946 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2947{
2948 int l;
9b3c35e0
JM
2949 target_phys_addr_t phys_addr;
2950 target_ulong page;
13eb76e0
FB
2951
2952 while (len > 0) {
2953 page = addr & TARGET_PAGE_MASK;
2954 phys_addr = cpu_get_phys_page_debug(env, page);
2955 /* if no physical page mapped, return an error */
2956 if (phys_addr == -1)
2957 return -1;
2958 l = (page + TARGET_PAGE_SIZE) - addr;
2959 if (l > len)
2960 l = len;
5fafdf24 2961 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2962 buf, l, is_write);
13eb76e0
FB
2963 len -= l;
2964 buf += l;
2965 addr += l;
2966 }
2967 return 0;
2968}
2969
e3db7226
FB
2970void dump_exec_info(FILE *f,
2971 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2972{
2973 int i, target_code_size, max_target_code_size;
2974 int direct_jmp_count, direct_jmp2_count, cross_page;
2975 TranslationBlock *tb;
3b46e624 2976
e3db7226
FB
2977 target_code_size = 0;
2978 max_target_code_size = 0;
2979 cross_page = 0;
2980 direct_jmp_count = 0;
2981 direct_jmp2_count = 0;
2982 for(i = 0; i < nb_tbs; i++) {
2983 tb = &tbs[i];
2984 target_code_size += tb->size;
2985 if (tb->size > max_target_code_size)
2986 max_target_code_size = tb->size;
2987 if (tb->page_addr[1] != -1)
2988 cross_page++;
2989 if (tb->tb_next_offset[0] != 0xffff) {
2990 direct_jmp_count++;
2991 if (tb->tb_next_offset[1] != 0xffff) {
2992 direct_jmp2_count++;
2993 }
2994 }
2995 }
2996 /* XXX: avoid using doubles ? */
57fec1fe 2997 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
2998 cpu_fprintf(f, "gen code size %ld/%ld\n",
2999 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3000 cpu_fprintf(f, "TB count %d/%d\n",
3001 nb_tbs, code_gen_max_blocks);
5fafdf24 3002 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3003 nb_tbs ? target_code_size / nb_tbs : 0,
3004 max_target_code_size);
5fafdf24 3005 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3006 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3007 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3008 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3009 cross_page,
e3db7226
FB
3010 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3011 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3012 direct_jmp_count,
e3db7226
FB
3013 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3014 direct_jmp2_count,
3015 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3016 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3017 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3018 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3019 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3020 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3021}
3022
5fafdf24 3023#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3024
3025#define MMUSUFFIX _cmmu
3026#define GETPC() NULL
3027#define env cpu_single_env
b769d8fe 3028#define SOFTMMU_CODE_ACCESS
61382a50
FB
3029
3030#define SHIFT 0
3031#include "softmmu_template.h"
3032
3033#define SHIFT 1
3034#include "softmmu_template.h"
3035
3036#define SHIFT 2
3037#include "softmmu_template.h"
3038
3039#define SHIFT 3
3040#include "softmmu_template.h"
3041
3042#undef env
3043
3044#endif