]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Prevent guest reusing host memory allocations.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
00f82b8a 99ram_addr_t phys_ram_size;
9fa3e853
FB
100int phys_ram_fd;
101uint8_t *phys_ram_base;
1ccde1cb 102uint8_t *phys_ram_dirty;
e9a1ab19 103static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 104#endif
9fa3e853 105
6a00d601
FB
106CPUState *first_cpu;
107/* current CPU in the current thread. It is only valid inside
108 cpu_exec() */
5fafdf24 109CPUState *cpu_single_env;
6a00d601 110
54936004 111typedef struct PageDesc {
92e873b9 112 /* list of TBs intersecting this ram page */
fd6ce8f6 113 TranslationBlock *first_tb;
9fa3e853
FB
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count;
117 uint8_t *code_bitmap;
118#if defined(CONFIG_USER_ONLY)
119 unsigned long flags;
120#endif
54936004
FB
121} PageDesc;
122
92e873b9 123typedef struct PhysPageDesc {
0f459d16 124 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 125 ram_addr_t phys_offset;
92e873b9
FB
126} PhysPageDesc;
127
54936004 128#define L2_BITS 10
bedb69ea
JM
129#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130/* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
133 */
134#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135#else
03875444 136#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 137#endif
54936004
FB
138
139#define L1_SIZE (1 << L1_BITS)
140#define L2_SIZE (1 << L2_BITS)
141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
e2eef170
PB
151#if !defined(CONFIG_USER_ONLY)
152static void io_mem_init(void);
153
33417e70 154/* io memory support */
33417e70
FB
155CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 157void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 158static int io_mem_nb;
6658ffb8
PB
159static int io_mem_watch;
160#endif
33417e70 161
34865134
FB
162/* log support */
163char *logfilename = "/tmp/qemu.log";
164FILE *logfile;
165int loglevel;
e735b91c 166static int log_append = 0;
34865134 167
e3db7226
FB
168/* statistics */
169static int tlb_flush_count;
170static int tb_flush_count;
171static int tb_phys_invalidate_count;
172
db7b5426
BS
173#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174typedef struct subpage_t {
175 target_phys_addr_t base;
3ee89922
BS
176 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
179} subpage_t;
180
7cb69cae
FB
181#ifdef _WIN32
182static void map_exec(void *addr, long size)
183{
184 DWORD old_protect;
185 VirtualProtect(addr, size,
186 PAGE_EXECUTE_READWRITE, &old_protect);
187
188}
189#else
190static void map_exec(void *addr, long size)
191{
4369415f 192 unsigned long start, end, page_size;
7cb69cae 193
4369415f 194 page_size = getpagesize();
7cb69cae 195 start = (unsigned long)addr;
4369415f 196 start &= ~(page_size - 1);
7cb69cae
FB
197
198 end = (unsigned long)addr + size;
4369415f
FB
199 end += page_size - 1;
200 end &= ~(page_size - 1);
7cb69cae
FB
201
202 mprotect((void *)start, end - start,
203 PROT_READ | PROT_WRITE | PROT_EXEC);
204}
205#endif
206
b346ff46 207static void page_init(void)
54936004 208{
83fb7adf 209 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 210 TARGET_PAGE_SIZE */
67b915a5 211#ifdef _WIN32
d5a8f07c
FB
212 {
213 SYSTEM_INFO system_info;
214 DWORD old_protect;
3b46e624 215
d5a8f07c
FB
216 GetSystemInfo(&system_info);
217 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 218 }
67b915a5 219#else
83fb7adf 220 qemu_real_host_page_size = getpagesize();
67b915a5 221#endif
83fb7adf
FB
222 if (qemu_host_page_size == 0)
223 qemu_host_page_size = qemu_real_host_page_size;
224 if (qemu_host_page_size < TARGET_PAGE_SIZE)
225 qemu_host_page_size = TARGET_PAGE_SIZE;
226 qemu_host_page_bits = 0;
227 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228 qemu_host_page_bits++;
229 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
230 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
232
233#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234 {
235 long long startaddr, endaddr;
236 FILE *f;
237 int n;
238
c8a706fe 239 mmap_lock();
0776590d 240 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
241 f = fopen("/proc/self/maps", "r");
242 if (f) {
243 do {
244 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245 if (n == 2) {
e0b8d65a
BS
246 startaddr = MIN(startaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248 endaddr = MIN(endaddr,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 250 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
251 TARGET_PAGE_ALIGN(endaddr),
252 PAGE_RESERVED);
253 }
254 } while (!feof(f));
255 fclose(f);
256 }
c8a706fe 257 mmap_unlock();
50a9569b
AZ
258 }
259#endif
54936004
FB
260}
261
00f82b8a 262static inline PageDesc *page_find_alloc(target_ulong index)
54936004 263{
54936004
FB
264 PageDesc **lp, *p;
265
17e2377a
PB
266#if TARGET_LONG_BITS > 32
267 /* Host memory outside guest VM. For 32-bit targets we have already
268 excluded high addresses. */
269 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
270 return NULL;
271#endif
54936004
FB
272 lp = &l1_map[index >> L2_BITS];
273 p = *lp;
274 if (!p) {
275 /* allocate if not found */
17e2377a
PB
276#if defined(CONFIG_USER_ONLY)
277 unsigned long addr;
278 size_t len = sizeof(PageDesc) * L2_SIZE;
279 /* Don't use qemu_malloc because it may recurse. */
280 p = mmap(0, len, PROT_READ | PROT_WRITE,
281 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 282 *lp = p;
17e2377a
PB
283 addr = h2g(p);
284 if (addr == (target_ulong)addr) {
285 page_set_flags(addr & TARGET_PAGE_MASK,
286 TARGET_PAGE_ALIGN(addr + len),
287 PAGE_RESERVED);
288 }
289#else
290 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
291 *lp = p;
292#endif
54936004
FB
293 }
294 return p + (index & (L2_SIZE - 1));
295}
296
00f82b8a 297static inline PageDesc *page_find(target_ulong index)
54936004 298{
54936004
FB
299 PageDesc *p;
300
54936004
FB
301 p = l1_map[index >> L2_BITS];
302 if (!p)
303 return 0;
fd6ce8f6
FB
304 return p + (index & (L2_SIZE - 1));
305}
306
108c49b8 307static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 308{
108c49b8 309 void **lp, **p;
e3f4e2a4 310 PhysPageDesc *pd;
92e873b9 311
108c49b8
FB
312 p = (void **)l1_phys_map;
313#if TARGET_PHYS_ADDR_SPACE_BITS > 32
314
315#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
316#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
317#endif
318 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
319 p = *lp;
320 if (!p) {
321 /* allocate if not found */
108c49b8
FB
322 if (!alloc)
323 return NULL;
324 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
325 memset(p, 0, sizeof(void *) * L1_SIZE);
326 *lp = p;
327 }
328#endif
329 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
330 pd = *lp;
331 if (!pd) {
332 int i;
108c49b8
FB
333 /* allocate if not found */
334 if (!alloc)
335 return NULL;
e3f4e2a4
PB
336 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
337 *lp = pd;
338 for (i = 0; i < L2_SIZE; i++)
339 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 340 }
e3f4e2a4 341 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
342}
343
108c49b8 344static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 345{
108c49b8 346 return phys_page_find_alloc(index, 0);
92e873b9
FB
347}
348
9fa3e853 349#if !defined(CONFIG_USER_ONLY)
6a00d601 350static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 351static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 352 target_ulong vaddr);
c8a706fe
PB
353#define mmap_lock() do { } while(0)
354#define mmap_unlock() do { } while(0)
9fa3e853 355#endif
fd6ce8f6 356
4369415f
FB
357#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
358
359#if defined(CONFIG_USER_ONLY)
360/* Currently it is not recommanded to allocate big chunks of data in
361 user mode. It will change when a dedicated libc will be used */
362#define USE_STATIC_CODE_GEN_BUFFER
363#endif
364
365#ifdef USE_STATIC_CODE_GEN_BUFFER
366static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
367#endif
368
26a5f13b
FB
369void code_gen_alloc(unsigned long tb_size)
370{
4369415f
FB
371#ifdef USE_STATIC_CODE_GEN_BUFFER
372 code_gen_buffer = static_code_gen_buffer;
373 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
374 map_exec(code_gen_buffer, code_gen_buffer_size);
375#else
26a5f13b
FB
376 code_gen_buffer_size = tb_size;
377 if (code_gen_buffer_size == 0) {
4369415f
FB
378#if defined(CONFIG_USER_ONLY)
379 /* in user mode, phys_ram_size is not meaningful */
380 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
381#else
26a5f13b
FB
382 /* XXX: needs ajustments */
383 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 384#endif
26a5f13b
FB
385 }
386 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
387 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
388 /* The code gen buffer location may have constraints depending on
389 the host cpu and OS */
390#if defined(__linux__)
391 {
392 int flags;
393 flags = MAP_PRIVATE | MAP_ANONYMOUS;
394#if defined(__x86_64__)
395 flags |= MAP_32BIT;
396 /* Cannot map more than that */
397 if (code_gen_buffer_size > (800 * 1024 * 1024))
398 code_gen_buffer_size = (800 * 1024 * 1024);
399#endif
400 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
401 PROT_WRITE | PROT_READ | PROT_EXEC,
402 flags, -1, 0);
403 if (code_gen_buffer == MAP_FAILED) {
404 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
405 exit(1);
406 }
407 }
408#else
409 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
410 if (!code_gen_buffer) {
411 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
412 exit(1);
413 }
414 map_exec(code_gen_buffer, code_gen_buffer_size);
415#endif
4369415f 416#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
417 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
418 code_gen_buffer_max_size = code_gen_buffer_size -
419 code_gen_max_block_size();
420 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
421 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
422}
423
424/* Must be called before using the QEMU cpus. 'tb_size' is the size
425 (in bytes) allocated to the translation buffer. Zero means default
426 size. */
427void cpu_exec_init_all(unsigned long tb_size)
428{
26a5f13b
FB
429 cpu_gen_init();
430 code_gen_alloc(tb_size);
431 code_gen_ptr = code_gen_buffer;
4369415f 432 page_init();
e2eef170 433#if !defined(CONFIG_USER_ONLY)
26a5f13b 434 io_mem_init();
e2eef170 435#endif
26a5f13b
FB
436}
437
6a00d601 438void cpu_exec_init(CPUState *env)
fd6ce8f6 439{
6a00d601
FB
440 CPUState **penv;
441 int cpu_index;
442
6a00d601
FB
443 env->next_cpu = NULL;
444 penv = &first_cpu;
445 cpu_index = 0;
446 while (*penv != NULL) {
447 penv = (CPUState **)&(*penv)->next_cpu;
448 cpu_index++;
449 }
450 env->cpu_index = cpu_index;
6658ffb8 451 env->nb_watchpoints = 0;
6a00d601 452 *penv = env;
fd6ce8f6
FB
453}
454
9fa3e853
FB
455static inline void invalidate_page_bitmap(PageDesc *p)
456{
457 if (p->code_bitmap) {
59817ccb 458 qemu_free(p->code_bitmap);
9fa3e853
FB
459 p->code_bitmap = NULL;
460 }
461 p->code_write_count = 0;
462}
463
fd6ce8f6
FB
464/* set to NULL all the 'first_tb' fields in all PageDescs */
465static void page_flush_tb(void)
466{
467 int i, j;
468 PageDesc *p;
469
470 for(i = 0; i < L1_SIZE; i++) {
471 p = l1_map[i];
472 if (p) {
9fa3e853
FB
473 for(j = 0; j < L2_SIZE; j++) {
474 p->first_tb = NULL;
475 invalidate_page_bitmap(p);
476 p++;
477 }
fd6ce8f6
FB
478 }
479 }
480}
481
482/* flush all the translation blocks */
d4e8164f 483/* XXX: tb_flush is currently not thread safe */
6a00d601 484void tb_flush(CPUState *env1)
fd6ce8f6 485{
6a00d601 486 CPUState *env;
0124311e 487#if defined(DEBUG_FLUSH)
ab3d1727
BS
488 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
489 (unsigned long)(code_gen_ptr - code_gen_buffer),
490 nb_tbs, nb_tbs > 0 ?
491 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 492#endif
26a5f13b 493 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
494 cpu_abort(env1, "Internal error: code buffer overflow\n");
495
fd6ce8f6 496 nb_tbs = 0;
3b46e624 497
6a00d601
FB
498 for(env = first_cpu; env != NULL; env = env->next_cpu) {
499 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
500 }
9fa3e853 501
8a8a608f 502 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 503 page_flush_tb();
9fa3e853 504
fd6ce8f6 505 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
506 /* XXX: flush processor icache at this point if cache flush is
507 expensive */
e3db7226 508 tb_flush_count++;
fd6ce8f6
FB
509}
510
511#ifdef DEBUG_TB_CHECK
512
bc98a7ef 513static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
514{
515 TranslationBlock *tb;
516 int i;
517 address &= TARGET_PAGE_MASK;
99773bd4
PB
518 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
519 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
520 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
521 address >= tb->pc + tb->size)) {
522 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 523 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
524 }
525 }
526 }
527}
528
529/* verify that all the pages have correct rights for code */
530static void tb_page_check(void)
531{
532 TranslationBlock *tb;
533 int i, flags1, flags2;
3b46e624 534
99773bd4
PB
535 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
536 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
537 flags1 = page_get_flags(tb->pc);
538 flags2 = page_get_flags(tb->pc + tb->size - 1);
539 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
540 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 541 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
542 }
543 }
544 }
545}
546
d4e8164f
FB
547void tb_jmp_check(TranslationBlock *tb)
548{
549 TranslationBlock *tb1;
550 unsigned int n1;
551
552 /* suppress any remaining jumps to this TB */
553 tb1 = tb->jmp_first;
554 for(;;) {
555 n1 = (long)tb1 & 3;
556 tb1 = (TranslationBlock *)((long)tb1 & ~3);
557 if (n1 == 2)
558 break;
559 tb1 = tb1->jmp_next[n1];
560 }
561 /* check end of list */
562 if (tb1 != tb) {
563 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
564 }
565}
566
fd6ce8f6
FB
567#endif
568
569/* invalidate one TB */
570static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
571 int next_offset)
572{
573 TranslationBlock *tb1;
574 for(;;) {
575 tb1 = *ptb;
576 if (tb1 == tb) {
577 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
578 break;
579 }
580 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
581 }
582}
583
9fa3e853
FB
584static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
585{
586 TranslationBlock *tb1;
587 unsigned int n1;
588
589 for(;;) {
590 tb1 = *ptb;
591 n1 = (long)tb1 & 3;
592 tb1 = (TranslationBlock *)((long)tb1 & ~3);
593 if (tb1 == tb) {
594 *ptb = tb1->page_next[n1];
595 break;
596 }
597 ptb = &tb1->page_next[n1];
598 }
599}
600
d4e8164f
FB
601static inline void tb_jmp_remove(TranslationBlock *tb, int n)
602{
603 TranslationBlock *tb1, **ptb;
604 unsigned int n1;
605
606 ptb = &tb->jmp_next[n];
607 tb1 = *ptb;
608 if (tb1) {
609 /* find tb(n) in circular list */
610 for(;;) {
611 tb1 = *ptb;
612 n1 = (long)tb1 & 3;
613 tb1 = (TranslationBlock *)((long)tb1 & ~3);
614 if (n1 == n && tb1 == tb)
615 break;
616 if (n1 == 2) {
617 ptb = &tb1->jmp_first;
618 } else {
619 ptb = &tb1->jmp_next[n1];
620 }
621 }
622 /* now we can suppress tb(n) from the list */
623 *ptb = tb->jmp_next[n];
624
625 tb->jmp_next[n] = NULL;
626 }
627}
628
629/* reset the jump entry 'n' of a TB so that it is not chained to
630 another TB */
631static inline void tb_reset_jump(TranslationBlock *tb, int n)
632{
633 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
634}
635
00f82b8a 636static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 637{
6a00d601 638 CPUState *env;
8a40a180 639 PageDesc *p;
d4e8164f 640 unsigned int h, n1;
00f82b8a 641 target_phys_addr_t phys_pc;
8a40a180 642 TranslationBlock *tb1, *tb2;
3b46e624 643
8a40a180
FB
644 /* remove the TB from the hash list */
645 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646 h = tb_phys_hash_func(phys_pc);
5fafdf24 647 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
648 offsetof(TranslationBlock, phys_hash_next));
649
650 /* remove the TB from the page list */
651 if (tb->page_addr[0] != page_addr) {
652 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
653 tb_page_remove(&p->first_tb, tb);
654 invalidate_page_bitmap(p);
655 }
656 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
657 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
658 tb_page_remove(&p->first_tb, tb);
659 invalidate_page_bitmap(p);
660 }
661
36bdbe54 662 tb_invalidated_flag = 1;
59817ccb 663
fd6ce8f6 664 /* remove the TB from the hash list */
8a40a180 665 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
666 for(env = first_cpu; env != NULL; env = env->next_cpu) {
667 if (env->tb_jmp_cache[h] == tb)
668 env->tb_jmp_cache[h] = NULL;
669 }
d4e8164f
FB
670
671 /* suppress this TB from the two jump lists */
672 tb_jmp_remove(tb, 0);
673 tb_jmp_remove(tb, 1);
674
675 /* suppress any remaining jumps to this TB */
676 tb1 = tb->jmp_first;
677 for(;;) {
678 n1 = (long)tb1 & 3;
679 if (n1 == 2)
680 break;
681 tb1 = (TranslationBlock *)((long)tb1 & ~3);
682 tb2 = tb1->jmp_next[n1];
683 tb_reset_jump(tb1, n1);
684 tb1->jmp_next[n1] = NULL;
685 tb1 = tb2;
686 }
687 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 688
e3db7226 689 tb_phys_invalidate_count++;
9fa3e853
FB
690}
691
692static inline void set_bits(uint8_t *tab, int start, int len)
693{
694 int end, mask, end1;
695
696 end = start + len;
697 tab += start >> 3;
698 mask = 0xff << (start & 7);
699 if ((start & ~7) == (end & ~7)) {
700 if (start < end) {
701 mask &= ~(0xff << (end & 7));
702 *tab |= mask;
703 }
704 } else {
705 *tab++ |= mask;
706 start = (start + 8) & ~7;
707 end1 = end & ~7;
708 while (start < end1) {
709 *tab++ = 0xff;
710 start += 8;
711 }
712 if (start < end) {
713 mask = ~(0xff << (end & 7));
714 *tab |= mask;
715 }
716 }
717}
718
719static void build_page_bitmap(PageDesc *p)
720{
721 int n, tb_start, tb_end;
722 TranslationBlock *tb;
3b46e624 723
59817ccb 724 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
725 if (!p->code_bitmap)
726 return;
727 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
728
729 tb = p->first_tb;
730 while (tb != NULL) {
731 n = (long)tb & 3;
732 tb = (TranslationBlock *)((long)tb & ~3);
733 /* NOTE: this is subtle as a TB may span two physical pages */
734 if (n == 0) {
735 /* NOTE: tb_end may be after the end of the page, but
736 it is not a problem */
737 tb_start = tb->pc & ~TARGET_PAGE_MASK;
738 tb_end = tb_start + tb->size;
739 if (tb_end > TARGET_PAGE_SIZE)
740 tb_end = TARGET_PAGE_SIZE;
741 } else {
742 tb_start = 0;
743 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
744 }
745 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
746 tb = tb->page_next[n];
747 }
748}
749
d720b93d
FB
750#ifdef TARGET_HAS_PRECISE_SMC
751
5fafdf24 752static void tb_gen_code(CPUState *env,
d720b93d
FB
753 target_ulong pc, target_ulong cs_base, int flags,
754 int cflags)
755{
756 TranslationBlock *tb;
757 uint8_t *tc_ptr;
758 target_ulong phys_pc, phys_page2, virt_page2;
759 int code_gen_size;
760
c27004ec
FB
761 phys_pc = get_phys_addr_code(env, pc);
762 tb = tb_alloc(pc);
d720b93d
FB
763 if (!tb) {
764 /* flush must be done */
765 tb_flush(env);
766 /* cannot fail at this point */
c27004ec 767 tb = tb_alloc(pc);
d720b93d
FB
768 }
769 tc_ptr = code_gen_ptr;
770 tb->tc_ptr = tc_ptr;
771 tb->cs_base = cs_base;
772 tb->flags = flags;
773 tb->cflags = cflags;
d07bde88 774 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 775 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 776
d720b93d 777 /* check next page if needed */
c27004ec 778 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 779 phys_page2 = -1;
c27004ec 780 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
781 phys_page2 = get_phys_addr_code(env, virt_page2);
782 }
783 tb_link_phys(tb, phys_pc, phys_page2);
784}
785#endif
3b46e624 786
9fa3e853
FB
787/* invalidate all TBs which intersect with the target physical page
788 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
789 the same physical page. 'is_cpu_write_access' should be true if called
790 from a real cpu write access: the virtual CPU will exit the current
791 TB if code is modified inside this TB. */
00f82b8a 792void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
793 int is_cpu_write_access)
794{
795 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 796 CPUState *env = cpu_single_env;
9fa3e853 797 PageDesc *p;
ea1c1802 798 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 799 target_ulong tb_start, tb_end;
d720b93d 800 target_ulong current_pc, current_cs_base;
9fa3e853
FB
801
802 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 803 if (!p)
9fa3e853 804 return;
5fafdf24 805 if (!p->code_bitmap &&
d720b93d
FB
806 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
807 is_cpu_write_access) {
9fa3e853
FB
808 /* build code bitmap */
809 build_page_bitmap(p);
810 }
811
812 /* we remove all the TBs in the range [start, end[ */
813 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
814 current_tb_not_found = is_cpu_write_access;
815 current_tb_modified = 0;
816 current_tb = NULL; /* avoid warning */
817 current_pc = 0; /* avoid warning */
818 current_cs_base = 0; /* avoid warning */
819 current_flags = 0; /* avoid warning */
9fa3e853
FB
820 tb = p->first_tb;
821 while (tb != NULL) {
822 n = (long)tb & 3;
823 tb = (TranslationBlock *)((long)tb & ~3);
824 tb_next = tb->page_next[n];
825 /* NOTE: this is subtle as a TB may span two physical pages */
826 if (n == 0) {
827 /* NOTE: tb_end may be after the end of the page, but
828 it is not a problem */
829 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
830 tb_end = tb_start + tb->size;
831 } else {
832 tb_start = tb->page_addr[1];
833 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
834 }
835 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
836#ifdef TARGET_HAS_PRECISE_SMC
837 if (current_tb_not_found) {
838 current_tb_not_found = 0;
839 current_tb = NULL;
840 if (env->mem_write_pc) {
841 /* now we have a real cpu fault */
842 current_tb = tb_find_pc(env->mem_write_pc);
843 }
844 }
845 if (current_tb == tb &&
846 !(current_tb->cflags & CF_SINGLE_INSN)) {
847 /* If we are modifying the current TB, we must stop
848 its execution. We could be more precise by checking
849 that the modification is after the current PC, but it
850 would require a specialized function to partially
851 restore the CPU state */
3b46e624 852
d720b93d 853 current_tb_modified = 1;
5fafdf24 854 cpu_restore_state(current_tb, env,
d720b93d
FB
855 env->mem_write_pc, NULL);
856#if defined(TARGET_I386)
857 current_flags = env->hflags;
858 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
859 current_cs_base = (target_ulong)env->segs[R_CS].base;
860 current_pc = current_cs_base + env->eip;
861#else
862#error unsupported CPU
863#endif
864 }
865#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
866 /* we need to do that to handle the case where a signal
867 occurs while doing tb_phys_invalidate() */
868 saved_tb = NULL;
869 if (env) {
870 saved_tb = env->current_tb;
871 env->current_tb = NULL;
872 }
9fa3e853 873 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
874 if (env) {
875 env->current_tb = saved_tb;
876 if (env->interrupt_request && env->current_tb)
877 cpu_interrupt(env, env->interrupt_request);
878 }
9fa3e853
FB
879 }
880 tb = tb_next;
881 }
882#if !defined(CONFIG_USER_ONLY)
883 /* if no code remaining, no need to continue to use slow writes */
884 if (!p->first_tb) {
885 invalidate_page_bitmap(p);
d720b93d
FB
886 if (is_cpu_write_access) {
887 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
888 }
889 }
890#endif
891#ifdef TARGET_HAS_PRECISE_SMC
892 if (current_tb_modified) {
893 /* we generate a block containing just the instruction
894 modifying the memory. It will ensure that it cannot modify
895 itself */
ea1c1802 896 env->current_tb = NULL;
5fafdf24 897 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
898 CF_SINGLE_INSN);
899 cpu_resume_from_signal(env, NULL);
9fa3e853 900 }
fd6ce8f6 901#endif
9fa3e853 902}
fd6ce8f6 903
9fa3e853 904/* len must be <= 8 and start must be a multiple of len */
00f82b8a 905static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
906{
907 PageDesc *p;
908 int offset, b;
59817ccb 909#if 0
a4193c8a
FB
910 if (1) {
911 if (loglevel) {
5fafdf24
TS
912 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
913 cpu_single_env->mem_write_vaddr, len,
914 cpu_single_env->eip,
a4193c8a
FB
915 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
916 }
59817ccb
FB
917 }
918#endif
9fa3e853 919 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 920 if (!p)
9fa3e853
FB
921 return;
922 if (p->code_bitmap) {
923 offset = start & ~TARGET_PAGE_MASK;
924 b = p->code_bitmap[offset >> 3] >> (offset & 7);
925 if (b & ((1 << len) - 1))
926 goto do_invalidate;
927 } else {
928 do_invalidate:
d720b93d 929 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
930 }
931}
932
9fa3e853 933#if !defined(CONFIG_SOFTMMU)
00f82b8a 934static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 935 unsigned long pc, void *puc)
9fa3e853 936{
d720b93d
FB
937 int n, current_flags, current_tb_modified;
938 target_ulong current_pc, current_cs_base;
9fa3e853 939 PageDesc *p;
d720b93d
FB
940 TranslationBlock *tb, *current_tb;
941#ifdef TARGET_HAS_PRECISE_SMC
942 CPUState *env = cpu_single_env;
943#endif
9fa3e853
FB
944
945 addr &= TARGET_PAGE_MASK;
946 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 947 if (!p)
9fa3e853
FB
948 return;
949 tb = p->first_tb;
d720b93d
FB
950 current_tb_modified = 0;
951 current_tb = NULL;
952 current_pc = 0; /* avoid warning */
953 current_cs_base = 0; /* avoid warning */
954 current_flags = 0; /* avoid warning */
955#ifdef TARGET_HAS_PRECISE_SMC
956 if (tb && pc != 0) {
957 current_tb = tb_find_pc(pc);
958 }
959#endif
9fa3e853
FB
960 while (tb != NULL) {
961 n = (long)tb & 3;
962 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
963#ifdef TARGET_HAS_PRECISE_SMC
964 if (current_tb == tb &&
965 !(current_tb->cflags & CF_SINGLE_INSN)) {
966 /* If we are modifying the current TB, we must stop
967 its execution. We could be more precise by checking
968 that the modification is after the current PC, but it
969 would require a specialized function to partially
970 restore the CPU state */
3b46e624 971
d720b93d
FB
972 current_tb_modified = 1;
973 cpu_restore_state(current_tb, env, pc, puc);
974#if defined(TARGET_I386)
975 current_flags = env->hflags;
976 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
977 current_cs_base = (target_ulong)env->segs[R_CS].base;
978 current_pc = current_cs_base + env->eip;
979#else
980#error unsupported CPU
981#endif
982 }
983#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
984 tb_phys_invalidate(tb, addr);
985 tb = tb->page_next[n];
986 }
fd6ce8f6 987 p->first_tb = NULL;
d720b93d
FB
988#ifdef TARGET_HAS_PRECISE_SMC
989 if (current_tb_modified) {
990 /* we generate a block containing just the instruction
991 modifying the memory. It will ensure that it cannot modify
992 itself */
ea1c1802 993 env->current_tb = NULL;
5fafdf24 994 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
995 CF_SINGLE_INSN);
996 cpu_resume_from_signal(env, puc);
997 }
998#endif
fd6ce8f6 999}
9fa3e853 1000#endif
fd6ce8f6
FB
1001
1002/* add the tb in the target page and protect it if necessary */
5fafdf24 1003static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1004 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1005{
1006 PageDesc *p;
9fa3e853
FB
1007 TranslationBlock *last_first_tb;
1008
1009 tb->page_addr[n] = page_addr;
3a7d929e 1010 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1011 tb->page_next[n] = p->first_tb;
1012 last_first_tb = p->first_tb;
1013 p->first_tb = (TranslationBlock *)((long)tb | n);
1014 invalidate_page_bitmap(p);
fd6ce8f6 1015
107db443 1016#if defined(TARGET_HAS_SMC) || 1
d720b93d 1017
9fa3e853 1018#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1019 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1020 target_ulong addr;
1021 PageDesc *p2;
9fa3e853
FB
1022 int prot;
1023
fd6ce8f6
FB
1024 /* force the host page as non writable (writes will have a
1025 page fault + mprotect overhead) */
53a5960a 1026 page_addr &= qemu_host_page_mask;
fd6ce8f6 1027 prot = 0;
53a5960a
PB
1028 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1029 addr += TARGET_PAGE_SIZE) {
1030
1031 p2 = page_find (addr >> TARGET_PAGE_BITS);
1032 if (!p2)
1033 continue;
1034 prot |= p2->flags;
1035 p2->flags &= ~PAGE_WRITE;
1036 page_get_flags(addr);
1037 }
5fafdf24 1038 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1039 (prot & PAGE_BITS) & ~PAGE_WRITE);
1040#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1041 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1042 page_addr);
fd6ce8f6 1043#endif
fd6ce8f6 1044 }
9fa3e853
FB
1045#else
1046 /* if some code is already present, then the pages are already
1047 protected. So we handle the case where only the first TB is
1048 allocated in a physical page */
1049 if (!last_first_tb) {
6a00d601 1050 tlb_protect_code(page_addr);
9fa3e853
FB
1051 }
1052#endif
d720b93d
FB
1053
1054#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1055}
1056
1057/* Allocate a new translation block. Flush the translation buffer if
1058 too many translation blocks or too much generated code. */
c27004ec 1059TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1060{
1061 TranslationBlock *tb;
fd6ce8f6 1062
26a5f13b
FB
1063 if (nb_tbs >= code_gen_max_blocks ||
1064 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1065 return NULL;
fd6ce8f6
FB
1066 tb = &tbs[nb_tbs++];
1067 tb->pc = pc;
b448f2f3 1068 tb->cflags = 0;
d4e8164f
FB
1069 return tb;
1070}
1071
9fa3e853
FB
1072/* add a new TB and link it to the physical page tables. phys_page2 is
1073 (-1) to indicate that only one page contains the TB. */
5fafdf24 1074void tb_link_phys(TranslationBlock *tb,
9fa3e853 1075 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1076{
9fa3e853
FB
1077 unsigned int h;
1078 TranslationBlock **ptb;
1079
c8a706fe
PB
1080 /* Grab the mmap lock to stop another thread invalidating this TB
1081 before we are done. */
1082 mmap_lock();
9fa3e853
FB
1083 /* add in the physical hash table */
1084 h = tb_phys_hash_func(phys_pc);
1085 ptb = &tb_phys_hash[h];
1086 tb->phys_hash_next = *ptb;
1087 *ptb = tb;
fd6ce8f6
FB
1088
1089 /* add in the page list */
9fa3e853
FB
1090 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1091 if (phys_page2 != -1)
1092 tb_alloc_page(tb, 1, phys_page2);
1093 else
1094 tb->page_addr[1] = -1;
9fa3e853 1095
d4e8164f
FB
1096 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1097 tb->jmp_next[0] = NULL;
1098 tb->jmp_next[1] = NULL;
1099
1100 /* init original jump addresses */
1101 if (tb->tb_next_offset[0] != 0xffff)
1102 tb_reset_jump(tb, 0);
1103 if (tb->tb_next_offset[1] != 0xffff)
1104 tb_reset_jump(tb, 1);
8a40a180
FB
1105
1106#ifdef DEBUG_TB_CHECK
1107 tb_page_check();
1108#endif
c8a706fe 1109 mmap_unlock();
fd6ce8f6
FB
1110}
1111
9fa3e853
FB
1112/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1113 tb[1].tc_ptr. Return NULL if not found */
1114TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1115{
9fa3e853
FB
1116 int m_min, m_max, m;
1117 unsigned long v;
1118 TranslationBlock *tb;
a513fe19
FB
1119
1120 if (nb_tbs <= 0)
1121 return NULL;
1122 if (tc_ptr < (unsigned long)code_gen_buffer ||
1123 tc_ptr >= (unsigned long)code_gen_ptr)
1124 return NULL;
1125 /* binary search (cf Knuth) */
1126 m_min = 0;
1127 m_max = nb_tbs - 1;
1128 while (m_min <= m_max) {
1129 m = (m_min + m_max) >> 1;
1130 tb = &tbs[m];
1131 v = (unsigned long)tb->tc_ptr;
1132 if (v == tc_ptr)
1133 return tb;
1134 else if (tc_ptr < v) {
1135 m_max = m - 1;
1136 } else {
1137 m_min = m + 1;
1138 }
5fafdf24 1139 }
a513fe19
FB
1140 return &tbs[m_max];
1141}
7501267e 1142
ea041c0e
FB
1143static void tb_reset_jump_recursive(TranslationBlock *tb);
1144
1145static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1146{
1147 TranslationBlock *tb1, *tb_next, **ptb;
1148 unsigned int n1;
1149
1150 tb1 = tb->jmp_next[n];
1151 if (tb1 != NULL) {
1152 /* find head of list */
1153 for(;;) {
1154 n1 = (long)tb1 & 3;
1155 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1156 if (n1 == 2)
1157 break;
1158 tb1 = tb1->jmp_next[n1];
1159 }
1160 /* we are now sure now that tb jumps to tb1 */
1161 tb_next = tb1;
1162
1163 /* remove tb from the jmp_first list */
1164 ptb = &tb_next->jmp_first;
1165 for(;;) {
1166 tb1 = *ptb;
1167 n1 = (long)tb1 & 3;
1168 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1169 if (n1 == n && tb1 == tb)
1170 break;
1171 ptb = &tb1->jmp_next[n1];
1172 }
1173 *ptb = tb->jmp_next[n];
1174 tb->jmp_next[n] = NULL;
3b46e624 1175
ea041c0e
FB
1176 /* suppress the jump to next tb in generated code */
1177 tb_reset_jump(tb, n);
1178
0124311e 1179 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1180 tb_reset_jump_recursive(tb_next);
1181 }
1182}
1183
1184static void tb_reset_jump_recursive(TranslationBlock *tb)
1185{
1186 tb_reset_jump_recursive2(tb, 0);
1187 tb_reset_jump_recursive2(tb, 1);
1188}
1189
1fddef4b 1190#if defined(TARGET_HAS_ICE)
d720b93d
FB
1191static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1192{
9b3c35e0
JM
1193 target_phys_addr_t addr;
1194 target_ulong pd;
c2f07f81
PB
1195 ram_addr_t ram_addr;
1196 PhysPageDesc *p;
d720b93d 1197
c2f07f81
PB
1198 addr = cpu_get_phys_page_debug(env, pc);
1199 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1200 if (!p) {
1201 pd = IO_MEM_UNASSIGNED;
1202 } else {
1203 pd = p->phys_offset;
1204 }
1205 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1206 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1207}
c27004ec 1208#endif
d720b93d 1209
6658ffb8 1210/* Add a watchpoint. */
0f459d16 1211int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1212{
1213 int i;
1214
1215 for (i = 0; i < env->nb_watchpoints; i++) {
1216 if (addr == env->watchpoint[i].vaddr)
1217 return 0;
1218 }
1219 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1220 return -1;
1221
1222 i = env->nb_watchpoints++;
1223 env->watchpoint[i].vaddr = addr;
0f459d16 1224 env->watchpoint[i].type = type;
6658ffb8
PB
1225 tlb_flush_page(env, addr);
1226 /* FIXME: This flush is needed because of the hack to make memory ops
1227 terminate the TB. It can be removed once the proper IO trap and
1228 re-execute bits are in. */
1229 tb_flush(env);
1230 return i;
1231}
1232
1233/* Remove a watchpoint. */
1234int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1235{
1236 int i;
1237
1238 for (i = 0; i < env->nb_watchpoints; i++) {
1239 if (addr == env->watchpoint[i].vaddr) {
1240 env->nb_watchpoints--;
1241 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1242 tlb_flush_page(env, addr);
1243 return 0;
1244 }
1245 }
1246 return -1;
1247}
1248
7d03f82f
EI
1249/* Remove all watchpoints. */
1250void cpu_watchpoint_remove_all(CPUState *env) {
1251 int i;
1252
1253 for (i = 0; i < env->nb_watchpoints; i++) {
1254 tlb_flush_page(env, env->watchpoint[i].vaddr);
1255 }
1256 env->nb_watchpoints = 0;
1257}
1258
c33a346e
FB
1259/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1260 breakpoint is reached */
2e12669a 1261int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1262{
1fddef4b 1263#if defined(TARGET_HAS_ICE)
4c3a88a2 1264 int i;
3b46e624 1265
4c3a88a2
FB
1266 for(i = 0; i < env->nb_breakpoints; i++) {
1267 if (env->breakpoints[i] == pc)
1268 return 0;
1269 }
1270
1271 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1272 return -1;
1273 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1274
d720b93d 1275 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1276 return 0;
1277#else
1278 return -1;
1279#endif
1280}
1281
7d03f82f
EI
1282/* remove all breakpoints */
1283void cpu_breakpoint_remove_all(CPUState *env) {
1284#if defined(TARGET_HAS_ICE)
1285 int i;
1286 for(i = 0; i < env->nb_breakpoints; i++) {
1287 breakpoint_invalidate(env, env->breakpoints[i]);
1288 }
1289 env->nb_breakpoints = 0;
1290#endif
1291}
1292
4c3a88a2 1293/* remove a breakpoint */
2e12669a 1294int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1295{
1fddef4b 1296#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1297 int i;
1298 for(i = 0; i < env->nb_breakpoints; i++) {
1299 if (env->breakpoints[i] == pc)
1300 goto found;
1301 }
1302 return -1;
1303 found:
4c3a88a2 1304 env->nb_breakpoints--;
1fddef4b
FB
1305 if (i < env->nb_breakpoints)
1306 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1307
1308 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1309 return 0;
1310#else
1311 return -1;
1312#endif
1313}
1314
c33a346e
FB
1315/* enable or disable single step mode. EXCP_DEBUG is returned by the
1316 CPU loop after each instruction */
1317void cpu_single_step(CPUState *env, int enabled)
1318{
1fddef4b 1319#if defined(TARGET_HAS_ICE)
c33a346e
FB
1320 if (env->singlestep_enabled != enabled) {
1321 env->singlestep_enabled = enabled;
1322 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1323 /* XXX: only flush what is necessary */
0124311e 1324 tb_flush(env);
c33a346e
FB
1325 }
1326#endif
1327}
1328
34865134
FB
1329/* enable or disable low levels log */
1330void cpu_set_log(int log_flags)
1331{
1332 loglevel = log_flags;
1333 if (loglevel && !logfile) {
11fcfab4 1334 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1335 if (!logfile) {
1336 perror(logfilename);
1337 _exit(1);
1338 }
9fa3e853
FB
1339#if !defined(CONFIG_SOFTMMU)
1340 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1341 {
1342 static uint8_t logfile_buf[4096];
1343 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1344 }
1345#else
34865134 1346 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1347#endif
e735b91c
PB
1348 log_append = 1;
1349 }
1350 if (!loglevel && logfile) {
1351 fclose(logfile);
1352 logfile = NULL;
34865134
FB
1353 }
1354}
1355
1356void cpu_set_log_filename(const char *filename)
1357{
1358 logfilename = strdup(filename);
e735b91c
PB
1359 if (logfile) {
1360 fclose(logfile);
1361 logfile = NULL;
1362 }
1363 cpu_set_log(loglevel);
34865134 1364}
c33a346e 1365
0124311e 1366/* mask must never be zero, except for A20 change call */
68a79315 1367void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1368{
d5975363 1369#if !defined(USE_NPTL)
ea041c0e 1370 TranslationBlock *tb;
15a51156 1371 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1372#endif
59817ccb 1373
d5975363
PB
1374 /* FIXME: This is probably not threadsafe. A different thread could
1375 be in the mittle of a read-modify-write operation. */
68a79315 1376 env->interrupt_request |= mask;
d5975363
PB
1377#if defined(USE_NPTL)
1378 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1379 problem and hope the cpu will stop of its own accord. For userspace
1380 emulation this often isn't actually as bad as it sounds. Often
1381 signals are used primarily to interrupt blocking syscalls. */
1382#else
ea041c0e
FB
1383 /* if the cpu is currently executing code, we must unlink it and
1384 all the potentially executing TB */
1385 tb = env->current_tb;
ee8b7021
FB
1386 if (tb && !testandset(&interrupt_lock)) {
1387 env->current_tb = NULL;
ea041c0e 1388 tb_reset_jump_recursive(tb);
15a51156 1389 resetlock(&interrupt_lock);
ea041c0e 1390 }
d5975363 1391#endif
ea041c0e
FB
1392}
1393
b54ad049
FB
1394void cpu_reset_interrupt(CPUState *env, int mask)
1395{
1396 env->interrupt_request &= ~mask;
1397}
1398
f193c797 1399CPULogItem cpu_log_items[] = {
5fafdf24 1400 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1401 "show generated host assembly code for each compiled TB" },
1402 { CPU_LOG_TB_IN_ASM, "in_asm",
1403 "show target assembly code for each compiled TB" },
5fafdf24 1404 { CPU_LOG_TB_OP, "op",
57fec1fe 1405 "show micro ops for each compiled TB" },
f193c797 1406 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1407 "show micro ops "
1408#ifdef TARGET_I386
1409 "before eflags optimization and "
f193c797 1410#endif
e01a1157 1411 "after liveness analysis" },
f193c797
FB
1412 { CPU_LOG_INT, "int",
1413 "show interrupts/exceptions in short format" },
1414 { CPU_LOG_EXEC, "exec",
1415 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1416 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1417 "show CPU state before block translation" },
f193c797
FB
1418#ifdef TARGET_I386
1419 { CPU_LOG_PCALL, "pcall",
1420 "show protected mode far calls/returns/exceptions" },
1421#endif
8e3a9fd2 1422#ifdef DEBUG_IOPORT
fd872598
FB
1423 { CPU_LOG_IOPORT, "ioport",
1424 "show all i/o ports accesses" },
8e3a9fd2 1425#endif
f193c797
FB
1426 { 0, NULL, NULL },
1427};
1428
1429static int cmp1(const char *s1, int n, const char *s2)
1430{
1431 if (strlen(s2) != n)
1432 return 0;
1433 return memcmp(s1, s2, n) == 0;
1434}
3b46e624 1435
f193c797
FB
1436/* takes a comma separated list of log masks. Return 0 if error. */
1437int cpu_str_to_log_mask(const char *str)
1438{
1439 CPULogItem *item;
1440 int mask;
1441 const char *p, *p1;
1442
1443 p = str;
1444 mask = 0;
1445 for(;;) {
1446 p1 = strchr(p, ',');
1447 if (!p1)
1448 p1 = p + strlen(p);
8e3a9fd2
FB
1449 if(cmp1(p,p1-p,"all")) {
1450 for(item = cpu_log_items; item->mask != 0; item++) {
1451 mask |= item->mask;
1452 }
1453 } else {
f193c797
FB
1454 for(item = cpu_log_items; item->mask != 0; item++) {
1455 if (cmp1(p, p1 - p, item->name))
1456 goto found;
1457 }
1458 return 0;
8e3a9fd2 1459 }
f193c797
FB
1460 found:
1461 mask |= item->mask;
1462 if (*p1 != ',')
1463 break;
1464 p = p1 + 1;
1465 }
1466 return mask;
1467}
ea041c0e 1468
7501267e
FB
1469void cpu_abort(CPUState *env, const char *fmt, ...)
1470{
1471 va_list ap;
493ae1f0 1472 va_list ap2;
7501267e
FB
1473
1474 va_start(ap, fmt);
493ae1f0 1475 va_copy(ap2, ap);
7501267e
FB
1476 fprintf(stderr, "qemu: fatal: ");
1477 vfprintf(stderr, fmt, ap);
1478 fprintf(stderr, "\n");
1479#ifdef TARGET_I386
7fe48483
FB
1480 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1481#else
1482 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1483#endif
924edcae 1484 if (logfile) {
f9373291 1485 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1486 vfprintf(logfile, fmt, ap2);
f9373291
JM
1487 fprintf(logfile, "\n");
1488#ifdef TARGET_I386
1489 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1490#else
1491 cpu_dump_state(env, logfile, fprintf, 0);
1492#endif
924edcae
AZ
1493 fflush(logfile);
1494 fclose(logfile);
1495 }
493ae1f0 1496 va_end(ap2);
f9373291 1497 va_end(ap);
7501267e
FB
1498 abort();
1499}
1500
c5be9f08
TS
1501CPUState *cpu_copy(CPUState *env)
1502{
01ba9816 1503 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1504 /* preserve chaining and index */
1505 CPUState *next_cpu = new_env->next_cpu;
1506 int cpu_index = new_env->cpu_index;
1507 memcpy(new_env, env, sizeof(CPUState));
1508 new_env->next_cpu = next_cpu;
1509 new_env->cpu_index = cpu_index;
1510 return new_env;
1511}
1512
0124311e
FB
1513#if !defined(CONFIG_USER_ONLY)
1514
5c751e99
EI
1515static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1516{
1517 unsigned int i;
1518
1519 /* Discard jump cache entries for any tb which might potentially
1520 overlap the flushed page. */
1521 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1522 memset (&env->tb_jmp_cache[i], 0,
1523 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1524
1525 i = tb_jmp_cache_hash_page(addr);
1526 memset (&env->tb_jmp_cache[i], 0,
1527 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1528}
1529
ee8b7021
FB
1530/* NOTE: if flush_global is true, also flush global entries (not
1531 implemented yet) */
1532void tlb_flush(CPUState *env, int flush_global)
33417e70 1533{
33417e70 1534 int i;
0124311e 1535
9fa3e853
FB
1536#if defined(DEBUG_TLB)
1537 printf("tlb_flush:\n");
1538#endif
0124311e
FB
1539 /* must reset current TB so that interrupts cannot modify the
1540 links while we are modifying them */
1541 env->current_tb = NULL;
1542
33417e70 1543 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1544 env->tlb_table[0][i].addr_read = -1;
1545 env->tlb_table[0][i].addr_write = -1;
1546 env->tlb_table[0][i].addr_code = -1;
1547 env->tlb_table[1][i].addr_read = -1;
1548 env->tlb_table[1][i].addr_write = -1;
1549 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1550#if (NB_MMU_MODES >= 3)
1551 env->tlb_table[2][i].addr_read = -1;
1552 env->tlb_table[2][i].addr_write = -1;
1553 env->tlb_table[2][i].addr_code = -1;
1554#if (NB_MMU_MODES == 4)
1555 env->tlb_table[3][i].addr_read = -1;
1556 env->tlb_table[3][i].addr_write = -1;
1557 env->tlb_table[3][i].addr_code = -1;
1558#endif
1559#endif
33417e70 1560 }
9fa3e853 1561
8a40a180 1562 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1563
0a962c02
FB
1564#ifdef USE_KQEMU
1565 if (env->kqemu_enabled) {
1566 kqemu_flush(env, flush_global);
1567 }
9fa3e853 1568#endif
e3db7226 1569 tlb_flush_count++;
33417e70
FB
1570}
1571
274da6b2 1572static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1573{
5fafdf24 1574 if (addr == (tlb_entry->addr_read &
84b7b8e7 1575 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1576 addr == (tlb_entry->addr_write &
84b7b8e7 1577 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1578 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1579 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1580 tlb_entry->addr_read = -1;
1581 tlb_entry->addr_write = -1;
1582 tlb_entry->addr_code = -1;
1583 }
61382a50
FB
1584}
1585
2e12669a 1586void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1587{
8a40a180 1588 int i;
0124311e 1589
9fa3e853 1590#if defined(DEBUG_TLB)
108c49b8 1591 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1592#endif
0124311e
FB
1593 /* must reset current TB so that interrupts cannot modify the
1594 links while we are modifying them */
1595 env->current_tb = NULL;
61382a50
FB
1596
1597 addr &= TARGET_PAGE_MASK;
1598 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1599 tlb_flush_entry(&env->tlb_table[0][i], addr);
1600 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1601#if (NB_MMU_MODES >= 3)
1602 tlb_flush_entry(&env->tlb_table[2][i], addr);
1603#if (NB_MMU_MODES == 4)
1604 tlb_flush_entry(&env->tlb_table[3][i], addr);
1605#endif
1606#endif
0124311e 1607
5c751e99 1608 tlb_flush_jmp_cache(env, addr);
9fa3e853 1609
0a962c02
FB
1610#ifdef USE_KQEMU
1611 if (env->kqemu_enabled) {
1612 kqemu_flush_page(env, addr);
1613 }
1614#endif
9fa3e853
FB
1615}
1616
9fa3e853
FB
1617/* update the TLBs so that writes to code in the virtual page 'addr'
1618 can be detected */
6a00d601 1619static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1620{
5fafdf24 1621 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1622 ram_addr + TARGET_PAGE_SIZE,
1623 CODE_DIRTY_FLAG);
9fa3e853
FB
1624}
1625
9fa3e853 1626/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1627 tested for self modifying code */
5fafdf24 1628static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1629 target_ulong vaddr)
9fa3e853 1630{
3a7d929e 1631 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1632}
1633
5fafdf24 1634static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1635 unsigned long start, unsigned long length)
1636{
1637 unsigned long addr;
84b7b8e7
FB
1638 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1639 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1640 if ((addr - start) < length) {
0f459d16 1641 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1642 }
1643 }
1644}
1645
3a7d929e 1646void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1647 int dirty_flags)
1ccde1cb
FB
1648{
1649 CPUState *env;
4f2ac237 1650 unsigned long length, start1;
0a962c02
FB
1651 int i, mask, len;
1652 uint8_t *p;
1ccde1cb
FB
1653
1654 start &= TARGET_PAGE_MASK;
1655 end = TARGET_PAGE_ALIGN(end);
1656
1657 length = end - start;
1658 if (length == 0)
1659 return;
0a962c02 1660 len = length >> TARGET_PAGE_BITS;
3a7d929e 1661#ifdef USE_KQEMU
6a00d601
FB
1662 /* XXX: should not depend on cpu context */
1663 env = first_cpu;
3a7d929e 1664 if (env->kqemu_enabled) {
f23db169
FB
1665 ram_addr_t addr;
1666 addr = start;
1667 for(i = 0; i < len; i++) {
1668 kqemu_set_notdirty(env, addr);
1669 addr += TARGET_PAGE_SIZE;
1670 }
3a7d929e
FB
1671 }
1672#endif
f23db169
FB
1673 mask = ~dirty_flags;
1674 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1675 for(i = 0; i < len; i++)
1676 p[i] &= mask;
1677
1ccde1cb
FB
1678 /* we modify the TLB cache so that the dirty bit will be set again
1679 when accessing the range */
59817ccb 1680 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1681 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1682 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1683 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1684 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1685 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1686#if (NB_MMU_MODES >= 3)
1687 for(i = 0; i < CPU_TLB_SIZE; i++)
1688 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1689#if (NB_MMU_MODES == 4)
1690 for(i = 0; i < CPU_TLB_SIZE; i++)
1691 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1692#endif
1693#endif
6a00d601 1694 }
1ccde1cb
FB
1695}
1696
3a7d929e
FB
1697static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1698{
1699 ram_addr_t ram_addr;
1700
84b7b8e7 1701 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1702 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1703 tlb_entry->addend - (unsigned long)phys_ram_base;
1704 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1705 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1706 }
1707 }
1708}
1709
1710/* update the TLB according to the current state of the dirty bits */
1711void cpu_tlb_update_dirty(CPUState *env)
1712{
1713 int i;
1714 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1715 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1716 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1717 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1718#if (NB_MMU_MODES >= 3)
1719 for(i = 0; i < CPU_TLB_SIZE; i++)
1720 tlb_update_dirty(&env->tlb_table[2][i]);
1721#if (NB_MMU_MODES == 4)
1722 for(i = 0; i < CPU_TLB_SIZE; i++)
1723 tlb_update_dirty(&env->tlb_table[3][i]);
1724#endif
1725#endif
3a7d929e
FB
1726}
1727
0f459d16 1728static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1729{
0f459d16
PB
1730 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1731 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1732}
1733
0f459d16
PB
1734/* update the TLB corresponding to virtual page vaddr
1735 so that it is no longer dirty */
1736static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1737{
1ccde1cb
FB
1738 int i;
1739
0f459d16 1740 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1741 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1742 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1743 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1744#if (NB_MMU_MODES >= 3)
0f459d16 1745 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1746#if (NB_MMU_MODES == 4)
0f459d16 1747 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1748#endif
1749#endif
9fa3e853
FB
1750}
1751
59817ccb
FB
1752/* add a new TLB entry. At most one entry for a given virtual address
1753 is permitted. Return 0 if OK or 2 if the page could not be mapped
1754 (can only happen in non SOFTMMU mode for I/O pages or pages
1755 conflicting with the host address space). */
5fafdf24
TS
1756int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1757 target_phys_addr_t paddr, int prot,
6ebbf390 1758 int mmu_idx, int is_softmmu)
9fa3e853 1759{
92e873b9 1760 PhysPageDesc *p;
4f2ac237 1761 unsigned long pd;
9fa3e853 1762 unsigned int index;
4f2ac237 1763 target_ulong address;
0f459d16 1764 target_ulong code_address;
108c49b8 1765 target_phys_addr_t addend;
9fa3e853 1766 int ret;
84b7b8e7 1767 CPUTLBEntry *te;
6658ffb8 1768 int i;
0f459d16 1769 target_phys_addr_t iotlb;
9fa3e853 1770
92e873b9 1771 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1772 if (!p) {
1773 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1774 } else {
1775 pd = p->phys_offset;
9fa3e853
FB
1776 }
1777#if defined(DEBUG_TLB)
6ebbf390
JM
1778 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1779 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1780#endif
1781
1782 ret = 0;
0f459d16
PB
1783 address = vaddr;
1784 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1785 /* IO memory case (romd handled later) */
1786 address |= TLB_MMIO;
1787 }
1788 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1789 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1790 /* Normal RAM. */
1791 iotlb = pd & TARGET_PAGE_MASK;
1792 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1793 iotlb |= IO_MEM_NOTDIRTY;
1794 else
1795 iotlb |= IO_MEM_ROM;
1796 } else {
1797 /* IO handlers are currently passed a phsical address.
1798 It would be nice to pass an offset from the base address
1799 of that region. This would avoid having to special case RAM,
1800 and avoid full address decoding in every device.
1801 We can't use the high bits of pd for this because
1802 IO_MEM_ROMD uses these as a ram address. */
1803 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1804 }
1805
1806 code_address = address;
1807 /* Make accesses to pages with watchpoints go via the
1808 watchpoint trap routines. */
1809 for (i = 0; i < env->nb_watchpoints; i++) {
1810 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1811 iotlb = io_mem_watch + paddr;
1812 /* TODO: The memory case can be optimized by not trapping
1813 reads of pages with a write breakpoint. */
1814 address |= TLB_MMIO;
6658ffb8 1815 }
0f459d16 1816 }
d79acba4 1817
0f459d16
PB
1818 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1819 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1820 te = &env->tlb_table[mmu_idx][index];
1821 te->addend = addend - vaddr;
1822 if (prot & PAGE_READ) {
1823 te->addr_read = address;
1824 } else {
1825 te->addr_read = -1;
1826 }
5c751e99 1827
0f459d16
PB
1828 if (prot & PAGE_EXEC) {
1829 te->addr_code = code_address;
1830 } else {
1831 te->addr_code = -1;
1832 }
1833 if (prot & PAGE_WRITE) {
1834 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1835 (pd & IO_MEM_ROMD)) {
1836 /* Write access calls the I/O callback. */
1837 te->addr_write = address | TLB_MMIO;
1838 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1839 !cpu_physical_memory_is_dirty(pd)) {
1840 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1841 } else {
0f459d16 1842 te->addr_write = address;
9fa3e853 1843 }
0f459d16
PB
1844 } else {
1845 te->addr_write = -1;
9fa3e853 1846 }
9fa3e853
FB
1847 return ret;
1848}
1849
0124311e
FB
1850#else
1851
ee8b7021 1852void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1853{
1854}
1855
2e12669a 1856void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1857{
1858}
1859
5fafdf24
TS
1860int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1861 target_phys_addr_t paddr, int prot,
6ebbf390 1862 int mmu_idx, int is_softmmu)
9fa3e853
FB
1863{
1864 return 0;
1865}
0124311e 1866
9fa3e853
FB
1867/* dump memory mappings */
1868void page_dump(FILE *f)
33417e70 1869{
9fa3e853
FB
1870 unsigned long start, end;
1871 int i, j, prot, prot1;
1872 PageDesc *p;
33417e70 1873
9fa3e853
FB
1874 fprintf(f, "%-8s %-8s %-8s %s\n",
1875 "start", "end", "size", "prot");
1876 start = -1;
1877 end = -1;
1878 prot = 0;
1879 for(i = 0; i <= L1_SIZE; i++) {
1880 if (i < L1_SIZE)
1881 p = l1_map[i];
1882 else
1883 p = NULL;
1884 for(j = 0;j < L2_SIZE; j++) {
1885 if (!p)
1886 prot1 = 0;
1887 else
1888 prot1 = p[j].flags;
1889 if (prot1 != prot) {
1890 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1891 if (start != -1) {
1892 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1893 start, end, end - start,
9fa3e853
FB
1894 prot & PAGE_READ ? 'r' : '-',
1895 prot & PAGE_WRITE ? 'w' : '-',
1896 prot & PAGE_EXEC ? 'x' : '-');
1897 }
1898 if (prot1 != 0)
1899 start = end;
1900 else
1901 start = -1;
1902 prot = prot1;
1903 }
1904 if (!p)
1905 break;
1906 }
33417e70 1907 }
33417e70
FB
1908}
1909
53a5960a 1910int page_get_flags(target_ulong address)
33417e70 1911{
9fa3e853
FB
1912 PageDesc *p;
1913
1914 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1915 if (!p)
9fa3e853
FB
1916 return 0;
1917 return p->flags;
1918}
1919
1920/* modify the flags of a page and invalidate the code if
1921 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1922 depending on PAGE_WRITE */
53a5960a 1923void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1924{
1925 PageDesc *p;
53a5960a 1926 target_ulong addr;
9fa3e853 1927
c8a706fe 1928 /* mmap_lock should already be held. */
9fa3e853
FB
1929 start = start & TARGET_PAGE_MASK;
1930 end = TARGET_PAGE_ALIGN(end);
1931 if (flags & PAGE_WRITE)
1932 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
1933 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1934 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
1935 /* We may be called for host regions that are outside guest
1936 address space. */
1937 if (!p)
1938 return;
9fa3e853
FB
1939 /* if the write protection is set, then we invalidate the code
1940 inside */
5fafdf24 1941 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1942 (flags & PAGE_WRITE) &&
1943 p->first_tb) {
d720b93d 1944 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1945 }
1946 p->flags = flags;
1947 }
33417e70
FB
1948}
1949
3d97b40b
TS
1950int page_check_range(target_ulong start, target_ulong len, int flags)
1951{
1952 PageDesc *p;
1953 target_ulong end;
1954 target_ulong addr;
1955
1956 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1957 start = start & TARGET_PAGE_MASK;
1958
1959 if( end < start )
1960 /* we've wrapped around */
1961 return -1;
1962 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1963 p = page_find(addr >> TARGET_PAGE_BITS);
1964 if( !p )
1965 return -1;
1966 if( !(p->flags & PAGE_VALID) )
1967 return -1;
1968
dae3270c 1969 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1970 return -1;
dae3270c
FB
1971 if (flags & PAGE_WRITE) {
1972 if (!(p->flags & PAGE_WRITE_ORG))
1973 return -1;
1974 /* unprotect the page if it was put read-only because it
1975 contains translated code */
1976 if (!(p->flags & PAGE_WRITE)) {
1977 if (!page_unprotect(addr, 0, NULL))
1978 return -1;
1979 }
1980 return 0;
1981 }
3d97b40b
TS
1982 }
1983 return 0;
1984}
1985
9fa3e853
FB
1986/* called from signal handler: invalidate the code and unprotect the
1987 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1988int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1989{
1990 unsigned int page_index, prot, pindex;
1991 PageDesc *p, *p1;
53a5960a 1992 target_ulong host_start, host_end, addr;
9fa3e853 1993
c8a706fe
PB
1994 /* Technically this isn't safe inside a signal handler. However we
1995 know this only ever happens in a synchronous SEGV handler, so in
1996 practice it seems to be ok. */
1997 mmap_lock();
1998
83fb7adf 1999 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2000 page_index = host_start >> TARGET_PAGE_BITS;
2001 p1 = page_find(page_index);
c8a706fe
PB
2002 if (!p1) {
2003 mmap_unlock();
9fa3e853 2004 return 0;
c8a706fe 2005 }
83fb7adf 2006 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2007 p = p1;
2008 prot = 0;
2009 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2010 prot |= p->flags;
2011 p++;
2012 }
2013 /* if the page was really writable, then we change its
2014 protection back to writable */
2015 if (prot & PAGE_WRITE_ORG) {
2016 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2017 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2018 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2019 (prot & PAGE_BITS) | PAGE_WRITE);
2020 p1[pindex].flags |= PAGE_WRITE;
2021 /* and since the content will be modified, we must invalidate
2022 the corresponding translated code. */
d720b93d 2023 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2024#ifdef DEBUG_TB_CHECK
2025 tb_invalidate_check(address);
2026#endif
c8a706fe 2027 mmap_unlock();
9fa3e853
FB
2028 return 1;
2029 }
2030 }
c8a706fe 2031 mmap_unlock();
9fa3e853
FB
2032 return 0;
2033}
2034
6a00d601
FB
2035static inline void tlb_set_dirty(CPUState *env,
2036 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2037{
2038}
9fa3e853
FB
2039#endif /* defined(CONFIG_USER_ONLY) */
2040
e2eef170 2041#if !defined(CONFIG_USER_ONLY)
db7b5426 2042static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2043 ram_addr_t memory);
2044static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2045 ram_addr_t orig_memory);
db7b5426
BS
2046#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2047 need_subpage) \
2048 do { \
2049 if (addr > start_addr) \
2050 start_addr2 = 0; \
2051 else { \
2052 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2053 if (start_addr2 > 0) \
2054 need_subpage = 1; \
2055 } \
2056 \
49e9fba2 2057 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2058 end_addr2 = TARGET_PAGE_SIZE - 1; \
2059 else { \
2060 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2061 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2062 need_subpage = 1; \
2063 } \
2064 } while (0)
2065
33417e70
FB
2066/* register physical memory. 'size' must be a multiple of the target
2067 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2068 io memory page */
5fafdf24 2069void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2070 ram_addr_t size,
2071 ram_addr_t phys_offset)
33417e70 2072{
108c49b8 2073 target_phys_addr_t addr, end_addr;
92e873b9 2074 PhysPageDesc *p;
9d42037b 2075 CPUState *env;
00f82b8a 2076 ram_addr_t orig_size = size;
db7b5426 2077 void *subpage;
33417e70 2078
da260249
FB
2079#ifdef USE_KQEMU
2080 /* XXX: should not depend on cpu context */
2081 env = first_cpu;
2082 if (env->kqemu_enabled) {
2083 kqemu_set_phys_mem(start_addr, size, phys_offset);
2084 }
2085#endif
5fd386f6 2086 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2087 end_addr = start_addr + (target_phys_addr_t)size;
2088 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2089 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2090 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2091 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2092 target_phys_addr_t start_addr2, end_addr2;
2093 int need_subpage = 0;
2094
2095 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2096 need_subpage);
4254fab8 2097 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2098 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2099 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2100 &p->phys_offset, orig_memory);
2101 } else {
2102 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2103 >> IO_MEM_SHIFT];
2104 }
2105 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2106 } else {
2107 p->phys_offset = phys_offset;
2108 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2109 (phys_offset & IO_MEM_ROMD))
2110 phys_offset += TARGET_PAGE_SIZE;
2111 }
2112 } else {
2113 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2114 p->phys_offset = phys_offset;
2115 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2116 (phys_offset & IO_MEM_ROMD))
2117 phys_offset += TARGET_PAGE_SIZE;
2118 else {
2119 target_phys_addr_t start_addr2, end_addr2;
2120 int need_subpage = 0;
2121
2122 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2123 end_addr2, need_subpage);
2124
4254fab8 2125 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2126 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2127 &p->phys_offset, IO_MEM_UNASSIGNED);
2128 subpage_register(subpage, start_addr2, end_addr2,
2129 phys_offset);
2130 }
2131 }
2132 }
33417e70 2133 }
3b46e624 2134
9d42037b
FB
2135 /* since each CPU stores ram addresses in its TLB cache, we must
2136 reset the modified entries */
2137 /* XXX: slow ! */
2138 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2139 tlb_flush(env, 1);
2140 }
33417e70
FB
2141}
2142
ba863458 2143/* XXX: temporary until new memory mapping API */
00f82b8a 2144ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2145{
2146 PhysPageDesc *p;
2147
2148 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2149 if (!p)
2150 return IO_MEM_UNASSIGNED;
2151 return p->phys_offset;
2152}
2153
e9a1ab19 2154/* XXX: better than nothing */
00f82b8a 2155ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2156{
2157 ram_addr_t addr;
7fb4fdcf 2158 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2159 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2160 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2161 abort();
2162 }
2163 addr = phys_ram_alloc_offset;
2164 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2165 return addr;
2166}
2167
2168void qemu_ram_free(ram_addr_t addr)
2169{
2170}
2171
a4193c8a 2172static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2173{
67d3b957 2174#ifdef DEBUG_UNASSIGNED
ab3d1727 2175 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2176#endif
2177#ifdef TARGET_SPARC
6c36d3fa 2178 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2179#elif TARGET_CRIS
2180 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2181#endif
33417e70
FB
2182 return 0;
2183}
2184
a4193c8a 2185static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2186{
67d3b957 2187#ifdef DEBUG_UNASSIGNED
ab3d1727 2188 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2189#endif
b4f0a316 2190#ifdef TARGET_SPARC
6c36d3fa 2191 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2192#elif TARGET_CRIS
2193 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2194#endif
33417e70
FB
2195}
2196
2197static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2198 unassigned_mem_readb,
2199 unassigned_mem_readb,
2200 unassigned_mem_readb,
2201};
2202
2203static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2204 unassigned_mem_writeb,
2205 unassigned_mem_writeb,
2206 unassigned_mem_writeb,
2207};
2208
0f459d16
PB
2209static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2210 uint32_t val)
9fa3e853 2211{
3a7d929e 2212 int dirty_flags;
3a7d929e
FB
2213 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2214 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2215#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2216 tb_invalidate_phys_page_fast(ram_addr, 1);
2217 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2218#endif
3a7d929e 2219 }
0f459d16 2220 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2221#ifdef USE_KQEMU
2222 if (cpu_single_env->kqemu_enabled &&
2223 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2224 kqemu_modify_page(cpu_single_env, ram_addr);
2225#endif
f23db169
FB
2226 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2227 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2228 /* we remove the notdirty callback only if the code has been
2229 flushed */
2230 if (dirty_flags == 0xff)
0f459d16 2231 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2232}
2233
0f459d16
PB
2234static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2235 uint32_t val)
9fa3e853 2236{
3a7d929e 2237 int dirty_flags;
3a7d929e
FB
2238 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2239 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2240#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2241 tb_invalidate_phys_page_fast(ram_addr, 2);
2242 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2243#endif
3a7d929e 2244 }
0f459d16 2245 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2246#ifdef USE_KQEMU
2247 if (cpu_single_env->kqemu_enabled &&
2248 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2249 kqemu_modify_page(cpu_single_env, ram_addr);
2250#endif
f23db169
FB
2251 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2252 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2253 /* we remove the notdirty callback only if the code has been
2254 flushed */
2255 if (dirty_flags == 0xff)
0f459d16 2256 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2257}
2258
0f459d16
PB
2259static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2260 uint32_t val)
9fa3e853 2261{
3a7d929e 2262 int dirty_flags;
3a7d929e
FB
2263 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2264 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2265#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2266 tb_invalidate_phys_page_fast(ram_addr, 4);
2267 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2268#endif
3a7d929e 2269 }
0f459d16 2270 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2271#ifdef USE_KQEMU
2272 if (cpu_single_env->kqemu_enabled &&
2273 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2274 kqemu_modify_page(cpu_single_env, ram_addr);
2275#endif
f23db169
FB
2276 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2277 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2278 /* we remove the notdirty callback only if the code has been
2279 flushed */
2280 if (dirty_flags == 0xff)
0f459d16 2281 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2282}
2283
3a7d929e 2284static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2285 NULL, /* never used */
2286 NULL, /* never used */
2287 NULL, /* never used */
2288};
2289
1ccde1cb
FB
2290static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2291 notdirty_mem_writeb,
2292 notdirty_mem_writew,
2293 notdirty_mem_writel,
2294};
2295
0f459d16
PB
2296/* Generate a debug exception if a watchpoint has been hit. */
2297static void check_watchpoint(int offset, int flags)
2298{
2299 CPUState *env = cpu_single_env;
2300 target_ulong vaddr;
2301 int i;
2302
2303 vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2304 for (i = 0; i < env->nb_watchpoints; i++) {
2305 if (vaddr == env->watchpoint[i].vaddr
2306 && (env->watchpoint[i].type & flags)) {
2307 env->watchpoint_hit = i + 1;
2308 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2309 break;
2310 }
2311 }
2312}
2313
6658ffb8
PB
2314/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2315 so these check for a hit then pass through to the normal out-of-line
2316 phys routines. */
2317static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2318{
0f459d16 2319 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2320 return ldub_phys(addr);
2321}
2322
2323static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2324{
0f459d16 2325 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2326 return lduw_phys(addr);
2327}
2328
2329static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2330{
0f459d16 2331 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2332 return ldl_phys(addr);
2333}
2334
6658ffb8
PB
2335static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2336 uint32_t val)
2337{
0f459d16 2338 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2339 stb_phys(addr, val);
2340}
2341
2342static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2343 uint32_t val)
2344{
0f459d16 2345 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2346 stw_phys(addr, val);
2347}
2348
2349static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2350 uint32_t val)
2351{
0f459d16 2352 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2353 stl_phys(addr, val);
2354}
2355
2356static CPUReadMemoryFunc *watch_mem_read[3] = {
2357 watch_mem_readb,
2358 watch_mem_readw,
2359 watch_mem_readl,
2360};
2361
2362static CPUWriteMemoryFunc *watch_mem_write[3] = {
2363 watch_mem_writeb,
2364 watch_mem_writew,
2365 watch_mem_writel,
2366};
6658ffb8 2367
db7b5426
BS
2368static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2369 unsigned int len)
2370{
db7b5426
BS
2371 uint32_t ret;
2372 unsigned int idx;
2373
2374 idx = SUBPAGE_IDX(addr - mmio->base);
2375#if defined(DEBUG_SUBPAGE)
2376 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2377 mmio, len, addr, idx);
2378#endif
3ee89922 2379 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2380
2381 return ret;
2382}
2383
2384static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2385 uint32_t value, unsigned int len)
2386{
db7b5426
BS
2387 unsigned int idx;
2388
2389 idx = SUBPAGE_IDX(addr - mmio->base);
2390#if defined(DEBUG_SUBPAGE)
2391 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2392 mmio, len, addr, idx, value);
2393#endif
3ee89922 2394 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2395}
2396
2397static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2398{
2399#if defined(DEBUG_SUBPAGE)
2400 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2401#endif
2402
2403 return subpage_readlen(opaque, addr, 0);
2404}
2405
2406static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2407 uint32_t value)
2408{
2409#if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2411#endif
2412 subpage_writelen(opaque, addr, value, 0);
2413}
2414
2415static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2416{
2417#if defined(DEBUG_SUBPAGE)
2418 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2419#endif
2420
2421 return subpage_readlen(opaque, addr, 1);
2422}
2423
2424static void subpage_writew (void *opaque, target_phys_addr_t addr,
2425 uint32_t value)
2426{
2427#if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2429#endif
2430 subpage_writelen(opaque, addr, value, 1);
2431}
2432
2433static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2434{
2435#if defined(DEBUG_SUBPAGE)
2436 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2437#endif
2438
2439 return subpage_readlen(opaque, addr, 2);
2440}
2441
2442static void subpage_writel (void *opaque,
2443 target_phys_addr_t addr, uint32_t value)
2444{
2445#if defined(DEBUG_SUBPAGE)
2446 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2447#endif
2448 subpage_writelen(opaque, addr, value, 2);
2449}
2450
2451static CPUReadMemoryFunc *subpage_read[] = {
2452 &subpage_readb,
2453 &subpage_readw,
2454 &subpage_readl,
2455};
2456
2457static CPUWriteMemoryFunc *subpage_write[] = {
2458 &subpage_writeb,
2459 &subpage_writew,
2460 &subpage_writel,
2461};
2462
2463static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2464 ram_addr_t memory)
db7b5426
BS
2465{
2466 int idx, eidx;
4254fab8 2467 unsigned int i;
db7b5426
BS
2468
2469 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2470 return -1;
2471 idx = SUBPAGE_IDX(start);
2472 eidx = SUBPAGE_IDX(end);
2473#if defined(DEBUG_SUBPAGE)
2474 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2475 mmio, start, end, idx, eidx, memory);
2476#endif
2477 memory >>= IO_MEM_SHIFT;
2478 for (; idx <= eidx; idx++) {
4254fab8 2479 for (i = 0; i < 4; i++) {
3ee89922
BS
2480 if (io_mem_read[memory][i]) {
2481 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2482 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2483 }
2484 if (io_mem_write[memory][i]) {
2485 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2486 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2487 }
4254fab8 2488 }
db7b5426
BS
2489 }
2490
2491 return 0;
2492}
2493
00f82b8a
AJ
2494static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2495 ram_addr_t orig_memory)
db7b5426
BS
2496{
2497 subpage_t *mmio;
2498 int subpage_memory;
2499
2500 mmio = qemu_mallocz(sizeof(subpage_t));
2501 if (mmio != NULL) {
2502 mmio->base = base;
2503 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2504#if defined(DEBUG_SUBPAGE)
2505 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2506 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2507#endif
2508 *phys = subpage_memory | IO_MEM_SUBPAGE;
2509 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2510 }
2511
2512 return mmio;
2513}
2514
33417e70
FB
2515static void io_mem_init(void)
2516{
3a7d929e 2517 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2518 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2519 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2520 io_mem_nb = 5;
2521
0f459d16 2522 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2523 watch_mem_write, NULL);
1ccde1cb 2524 /* alloc dirty bits array */
0a962c02 2525 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2526 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2527}
2528
2529/* mem_read and mem_write are arrays of functions containing the
2530 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2531 2). Functions can be omitted with a NULL function pointer. The
2532 registered functions may be modified dynamically later.
2533 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2534 modified. If it is zero, a new io zone is allocated. The return
2535 value can be used with cpu_register_physical_memory(). (-1) is
2536 returned if error. */
33417e70
FB
2537int cpu_register_io_memory(int io_index,
2538 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2539 CPUWriteMemoryFunc **mem_write,
2540 void *opaque)
33417e70 2541{
4254fab8 2542 int i, subwidth = 0;
33417e70
FB
2543
2544 if (io_index <= 0) {
b5ff1b31 2545 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2546 return -1;
2547 io_index = io_mem_nb++;
2548 } else {
2549 if (io_index >= IO_MEM_NB_ENTRIES)
2550 return -1;
2551 }
b5ff1b31 2552
33417e70 2553 for(i = 0;i < 3; i++) {
4254fab8
BS
2554 if (!mem_read[i] || !mem_write[i])
2555 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2556 io_mem_read[io_index][i] = mem_read[i];
2557 io_mem_write[io_index][i] = mem_write[i];
2558 }
a4193c8a 2559 io_mem_opaque[io_index] = opaque;
4254fab8 2560 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2561}
61382a50 2562
8926b517
FB
2563CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2564{
2565 return io_mem_write[io_index >> IO_MEM_SHIFT];
2566}
2567
2568CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2569{
2570 return io_mem_read[io_index >> IO_MEM_SHIFT];
2571}
2572
e2eef170
PB
2573#endif /* !defined(CONFIG_USER_ONLY) */
2574
13eb76e0
FB
2575/* physical memory access (slow version, mainly for debug) */
2576#if defined(CONFIG_USER_ONLY)
5fafdf24 2577void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2578 int len, int is_write)
2579{
2580 int l, flags;
2581 target_ulong page;
53a5960a 2582 void * p;
13eb76e0
FB
2583
2584 while (len > 0) {
2585 page = addr & TARGET_PAGE_MASK;
2586 l = (page + TARGET_PAGE_SIZE) - addr;
2587 if (l > len)
2588 l = len;
2589 flags = page_get_flags(page);
2590 if (!(flags & PAGE_VALID))
2591 return;
2592 if (is_write) {
2593 if (!(flags & PAGE_WRITE))
2594 return;
579a97f7 2595 /* XXX: this code should not depend on lock_user */
72fb7daa 2596 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2597 /* FIXME - should this return an error rather than just fail? */
2598 return;
72fb7daa
AJ
2599 memcpy(p, buf, l);
2600 unlock_user(p, addr, l);
13eb76e0
FB
2601 } else {
2602 if (!(flags & PAGE_READ))
2603 return;
579a97f7 2604 /* XXX: this code should not depend on lock_user */
72fb7daa 2605 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2606 /* FIXME - should this return an error rather than just fail? */
2607 return;
72fb7daa 2608 memcpy(buf, p, l);
5b257578 2609 unlock_user(p, addr, 0);
13eb76e0
FB
2610 }
2611 len -= l;
2612 buf += l;
2613 addr += l;
2614 }
2615}
8df1cd07 2616
13eb76e0 2617#else
5fafdf24 2618void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2619 int len, int is_write)
2620{
2621 int l, io_index;
2622 uint8_t *ptr;
2623 uint32_t val;
2e12669a
FB
2624 target_phys_addr_t page;
2625 unsigned long pd;
92e873b9 2626 PhysPageDesc *p;
3b46e624 2627
13eb76e0
FB
2628 while (len > 0) {
2629 page = addr & TARGET_PAGE_MASK;
2630 l = (page + TARGET_PAGE_SIZE) - addr;
2631 if (l > len)
2632 l = len;
92e873b9 2633 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2634 if (!p) {
2635 pd = IO_MEM_UNASSIGNED;
2636 } else {
2637 pd = p->phys_offset;
2638 }
3b46e624 2639
13eb76e0 2640 if (is_write) {
3a7d929e 2641 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2642 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2643 /* XXX: could force cpu_single_env to NULL to avoid
2644 potential bugs */
13eb76e0 2645 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2646 /* 32 bit write access */
c27004ec 2647 val = ldl_p(buf);
a4193c8a 2648 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2649 l = 4;
2650 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2651 /* 16 bit write access */
c27004ec 2652 val = lduw_p(buf);
a4193c8a 2653 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2654 l = 2;
2655 } else {
1c213d19 2656 /* 8 bit write access */
c27004ec 2657 val = ldub_p(buf);
a4193c8a 2658 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2659 l = 1;
2660 }
2661 } else {
b448f2f3
FB
2662 unsigned long addr1;
2663 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2664 /* RAM case */
b448f2f3 2665 ptr = phys_ram_base + addr1;
13eb76e0 2666 memcpy(ptr, buf, l);
3a7d929e
FB
2667 if (!cpu_physical_memory_is_dirty(addr1)) {
2668 /* invalidate code */
2669 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2670 /* set dirty bit */
5fafdf24 2671 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2672 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2673 }
13eb76e0
FB
2674 }
2675 } else {
5fafdf24 2676 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2677 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2678 /* I/O case */
2679 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2680 if (l >= 4 && ((addr & 3) == 0)) {
2681 /* 32 bit read access */
a4193c8a 2682 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2683 stl_p(buf, val);
13eb76e0
FB
2684 l = 4;
2685 } else if (l >= 2 && ((addr & 1) == 0)) {
2686 /* 16 bit read access */
a4193c8a 2687 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2688 stw_p(buf, val);
13eb76e0
FB
2689 l = 2;
2690 } else {
1c213d19 2691 /* 8 bit read access */
a4193c8a 2692 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2693 stb_p(buf, val);
13eb76e0
FB
2694 l = 1;
2695 }
2696 } else {
2697 /* RAM case */
5fafdf24 2698 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2699 (addr & ~TARGET_PAGE_MASK);
2700 memcpy(buf, ptr, l);
2701 }
2702 }
2703 len -= l;
2704 buf += l;
2705 addr += l;
2706 }
2707}
8df1cd07 2708
d0ecd2aa 2709/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2710void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2711 const uint8_t *buf, int len)
2712{
2713 int l;
2714 uint8_t *ptr;
2715 target_phys_addr_t page;
2716 unsigned long pd;
2717 PhysPageDesc *p;
3b46e624 2718
d0ecd2aa
FB
2719 while (len > 0) {
2720 page = addr & TARGET_PAGE_MASK;
2721 l = (page + TARGET_PAGE_SIZE) - addr;
2722 if (l > len)
2723 l = len;
2724 p = phys_page_find(page >> TARGET_PAGE_BITS);
2725 if (!p) {
2726 pd = IO_MEM_UNASSIGNED;
2727 } else {
2728 pd = p->phys_offset;
2729 }
3b46e624 2730
d0ecd2aa 2731 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2732 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2733 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2734 /* do nothing */
2735 } else {
2736 unsigned long addr1;
2737 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2738 /* ROM/RAM case */
2739 ptr = phys_ram_base + addr1;
2740 memcpy(ptr, buf, l);
2741 }
2742 len -= l;
2743 buf += l;
2744 addr += l;
2745 }
2746}
2747
2748
8df1cd07
FB
2749/* warning: addr must be aligned */
2750uint32_t ldl_phys(target_phys_addr_t addr)
2751{
2752 int io_index;
2753 uint8_t *ptr;
2754 uint32_t val;
2755 unsigned long pd;
2756 PhysPageDesc *p;
2757
2758 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2759 if (!p) {
2760 pd = IO_MEM_UNASSIGNED;
2761 } else {
2762 pd = p->phys_offset;
2763 }
3b46e624 2764
5fafdf24 2765 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2766 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2767 /* I/O case */
2768 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2769 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2770 } else {
2771 /* RAM case */
5fafdf24 2772 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2773 (addr & ~TARGET_PAGE_MASK);
2774 val = ldl_p(ptr);
2775 }
2776 return val;
2777}
2778
84b7b8e7
FB
2779/* warning: addr must be aligned */
2780uint64_t ldq_phys(target_phys_addr_t addr)
2781{
2782 int io_index;
2783 uint8_t *ptr;
2784 uint64_t val;
2785 unsigned long pd;
2786 PhysPageDesc *p;
2787
2788 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2789 if (!p) {
2790 pd = IO_MEM_UNASSIGNED;
2791 } else {
2792 pd = p->phys_offset;
2793 }
3b46e624 2794
2a4188a3
FB
2795 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2796 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2797 /* I/O case */
2798 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2799#ifdef TARGET_WORDS_BIGENDIAN
2800 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2801 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2802#else
2803 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2804 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2805#endif
2806 } else {
2807 /* RAM case */
5fafdf24 2808 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2809 (addr & ~TARGET_PAGE_MASK);
2810 val = ldq_p(ptr);
2811 }
2812 return val;
2813}
2814
aab33094
FB
2815/* XXX: optimize */
2816uint32_t ldub_phys(target_phys_addr_t addr)
2817{
2818 uint8_t val;
2819 cpu_physical_memory_read(addr, &val, 1);
2820 return val;
2821}
2822
2823/* XXX: optimize */
2824uint32_t lduw_phys(target_phys_addr_t addr)
2825{
2826 uint16_t val;
2827 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2828 return tswap16(val);
2829}
2830
8df1cd07
FB
2831/* warning: addr must be aligned. The ram page is not masked as dirty
2832 and the code inside is not invalidated. It is useful if the dirty
2833 bits are used to track modified PTEs */
2834void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2835{
2836 int io_index;
2837 uint8_t *ptr;
2838 unsigned long pd;
2839 PhysPageDesc *p;
2840
2841 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2842 if (!p) {
2843 pd = IO_MEM_UNASSIGNED;
2844 } else {
2845 pd = p->phys_offset;
2846 }
3b46e624 2847
3a7d929e 2848 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2849 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2850 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2851 } else {
5fafdf24 2852 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2853 (addr & ~TARGET_PAGE_MASK);
2854 stl_p(ptr, val);
2855 }
2856}
2857
bc98a7ef
JM
2858void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2859{
2860 int io_index;
2861 uint8_t *ptr;
2862 unsigned long pd;
2863 PhysPageDesc *p;
2864
2865 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2866 if (!p) {
2867 pd = IO_MEM_UNASSIGNED;
2868 } else {
2869 pd = p->phys_offset;
2870 }
3b46e624 2871
bc98a7ef
JM
2872 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2873 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2874#ifdef TARGET_WORDS_BIGENDIAN
2875 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2876 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2877#else
2878 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2879 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2880#endif
2881 } else {
5fafdf24 2882 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2883 (addr & ~TARGET_PAGE_MASK);
2884 stq_p(ptr, val);
2885 }
2886}
2887
8df1cd07 2888/* warning: addr must be aligned */
8df1cd07
FB
2889void stl_phys(target_phys_addr_t addr, uint32_t val)
2890{
2891 int io_index;
2892 uint8_t *ptr;
2893 unsigned long pd;
2894 PhysPageDesc *p;
2895
2896 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2897 if (!p) {
2898 pd = IO_MEM_UNASSIGNED;
2899 } else {
2900 pd = p->phys_offset;
2901 }
3b46e624 2902
3a7d929e 2903 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2904 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2905 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2906 } else {
2907 unsigned long addr1;
2908 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2909 /* RAM case */
2910 ptr = phys_ram_base + addr1;
2911 stl_p(ptr, val);
3a7d929e
FB
2912 if (!cpu_physical_memory_is_dirty(addr1)) {
2913 /* invalidate code */
2914 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2915 /* set dirty bit */
f23db169
FB
2916 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2917 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2918 }
8df1cd07
FB
2919 }
2920}
2921
aab33094
FB
2922/* XXX: optimize */
2923void stb_phys(target_phys_addr_t addr, uint32_t val)
2924{
2925 uint8_t v = val;
2926 cpu_physical_memory_write(addr, &v, 1);
2927}
2928
2929/* XXX: optimize */
2930void stw_phys(target_phys_addr_t addr, uint32_t val)
2931{
2932 uint16_t v = tswap16(val);
2933 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2934}
2935
2936/* XXX: optimize */
2937void stq_phys(target_phys_addr_t addr, uint64_t val)
2938{
2939 val = tswap64(val);
2940 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2941}
2942
13eb76e0
FB
2943#endif
2944
2945/* virtual memory access for debug */
5fafdf24 2946int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2947 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2948{
2949 int l;
9b3c35e0
JM
2950 target_phys_addr_t phys_addr;
2951 target_ulong page;
13eb76e0
FB
2952
2953 while (len > 0) {
2954 page = addr & TARGET_PAGE_MASK;
2955 phys_addr = cpu_get_phys_page_debug(env, page);
2956 /* if no physical page mapped, return an error */
2957 if (phys_addr == -1)
2958 return -1;
2959 l = (page + TARGET_PAGE_SIZE) - addr;
2960 if (l > len)
2961 l = len;
5fafdf24 2962 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2963 buf, l, is_write);
13eb76e0
FB
2964 len -= l;
2965 buf += l;
2966 addr += l;
2967 }
2968 return 0;
2969}
2970
e3db7226
FB
2971void dump_exec_info(FILE *f,
2972 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2973{
2974 int i, target_code_size, max_target_code_size;
2975 int direct_jmp_count, direct_jmp2_count, cross_page;
2976 TranslationBlock *tb;
3b46e624 2977
e3db7226
FB
2978 target_code_size = 0;
2979 max_target_code_size = 0;
2980 cross_page = 0;
2981 direct_jmp_count = 0;
2982 direct_jmp2_count = 0;
2983 for(i = 0; i < nb_tbs; i++) {
2984 tb = &tbs[i];
2985 target_code_size += tb->size;
2986 if (tb->size > max_target_code_size)
2987 max_target_code_size = tb->size;
2988 if (tb->page_addr[1] != -1)
2989 cross_page++;
2990 if (tb->tb_next_offset[0] != 0xffff) {
2991 direct_jmp_count++;
2992 if (tb->tb_next_offset[1] != 0xffff) {
2993 direct_jmp2_count++;
2994 }
2995 }
2996 }
2997 /* XXX: avoid using doubles ? */
57fec1fe 2998 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
2999 cpu_fprintf(f, "gen code size %ld/%ld\n",
3000 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3001 cpu_fprintf(f, "TB count %d/%d\n",
3002 nb_tbs, code_gen_max_blocks);
5fafdf24 3003 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3004 nb_tbs ? target_code_size / nb_tbs : 0,
3005 max_target_code_size);
5fafdf24 3006 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3007 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3008 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3009 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3010 cross_page,
e3db7226
FB
3011 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3012 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3013 direct_jmp_count,
e3db7226
FB
3014 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3015 direct_jmp2_count,
3016 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3017 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3018 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3019 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3020 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3021 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3022}
3023
5fafdf24 3024#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3025
3026#define MMUSUFFIX _cmmu
3027#define GETPC() NULL
3028#define env cpu_single_env
b769d8fe 3029#define SOFTMMU_CODE_ACCESS
61382a50
FB
3030
3031#define SHIFT 0
3032#include "softmmu_template.h"
3033
3034#define SHIFT 1
3035#include "softmmu_template.h"
3036
3037#define SHIFT 2
3038#include "softmmu_template.h"
3039
3040#define SHIFT 3
3041#include "softmmu_template.h"
3042
3043#undef env
3044
3045#endif