]> git.proxmox.com Git - qemu.git/blame - exec.c
Multithreaded locking for mmap().
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
42#endif
54936004 43
fd6ce8f6 44//#define DEBUG_TB_INVALIDATE
66e85a21 45//#define DEBUG_FLUSH
9fa3e853 46//#define DEBUG_TLB
67d3b957 47//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
48
49/* make various TB consistency checks */
5fafdf24
TS
50//#define DEBUG_TB_CHECK
51//#define DEBUG_TLB_CHECK
fd6ce8f6 52
1196be37 53//#define DEBUG_IOPORT
db7b5426 54//#define DEBUG_SUBPAGE
1196be37 55
99773bd4
PB
56#if !defined(CONFIG_USER_ONLY)
57/* TB consistency checks only implemented for usermode emulation. */
58#undef DEBUG_TB_CHECK
59#endif
60
9fa3e853
FB
61#define SMC_BITMAP_USE_THRESHOLD 10
62
63#define MMAP_AREA_START 0x00000000
64#define MMAP_AREA_END 0xa8000000
fd6ce8f6 65
108c49b8
FB
66#if defined(TARGET_SPARC64)
67#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
68#elif defined(TARGET_SPARC)
69#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
70#elif defined(TARGET_ALPHA)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
73#elif defined(TARGET_PPC64)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
75#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
77#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
79#else
80/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81#define TARGET_PHYS_ADDR_SPACE_BITS 32
82#endif
83
fab94c0e 84TranslationBlock *tbs;
26a5f13b 85int code_gen_max_blocks;
9fa3e853 86TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 87int nb_tbs;
eb51d102
FB
88/* any access to the tbs or the page table must use this lock */
89spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 90
7cb69cae 91uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
26a5f13b
FB
92uint8_t *code_gen_buffer;
93unsigned long code_gen_buffer_size;
94/* threshold to flush the translated code buffer */
95unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
96uint8_t *code_gen_ptr;
97
00f82b8a 98ram_addr_t phys_ram_size;
9fa3e853
FB
99int phys_ram_fd;
100uint8_t *phys_ram_base;
1ccde1cb 101uint8_t *phys_ram_dirty;
e9a1ab19 102static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 103
6a00d601
FB
104CPUState *first_cpu;
105/* current CPU in the current thread. It is only valid inside
106 cpu_exec() */
5fafdf24 107CPUState *cpu_single_env;
6a00d601 108
54936004 109typedef struct PageDesc {
92e873b9 110 /* list of TBs intersecting this ram page */
fd6ce8f6 111 TranslationBlock *first_tb;
9fa3e853
FB
112 /* in order to optimize self modifying code, we count the number
113 of lookups we do to a given page to use a bitmap */
114 unsigned int code_write_count;
115 uint8_t *code_bitmap;
116#if defined(CONFIG_USER_ONLY)
117 unsigned long flags;
118#endif
54936004
FB
119} PageDesc;
120
92e873b9
FB
121typedef struct PhysPageDesc {
122 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 123 ram_addr_t phys_offset;
92e873b9
FB
124} PhysPageDesc;
125
54936004 126#define L2_BITS 10
bedb69ea
JM
127#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128/* XXX: this is a temporary hack for alpha target.
129 * In the future, this is to be replaced by a multi-level table
130 * to actually be able to handle the complete 64 bits address space.
131 */
132#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133#else
03875444 134#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 135#endif
54936004
FB
136
137#define L1_SIZE (1 << L1_BITS)
138#define L2_SIZE (1 << L2_BITS)
139
33417e70 140static void io_mem_init(void);
fd6ce8f6 141
83fb7adf
FB
142unsigned long qemu_real_host_page_size;
143unsigned long qemu_host_page_bits;
144unsigned long qemu_host_page_size;
145unsigned long qemu_host_page_mask;
54936004 146
92e873b9 147/* XXX: for system emulation, it could just be an array */
54936004 148static PageDesc *l1_map[L1_SIZE];
0a962c02 149PhysPageDesc **l1_phys_map;
54936004 150
33417e70 151/* io memory support */
33417e70
FB
152CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 154void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 155static int io_mem_nb;
6658ffb8
PB
156#if defined(CONFIG_SOFTMMU)
157static int io_mem_watch;
158#endif
33417e70 159
34865134
FB
160/* log support */
161char *logfilename = "/tmp/qemu.log";
162FILE *logfile;
163int loglevel;
e735b91c 164static int log_append = 0;
34865134 165
e3db7226
FB
166/* statistics */
167static int tlb_flush_count;
168static int tb_flush_count;
169static int tb_phys_invalidate_count;
170
db7b5426
BS
171#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172typedef struct subpage_t {
173 target_phys_addr_t base;
3ee89922
BS
174 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
177} subpage_t;
178
7cb69cae
FB
179#ifdef _WIN32
180static void map_exec(void *addr, long size)
181{
182 DWORD old_protect;
183 VirtualProtect(addr, size,
184 PAGE_EXECUTE_READWRITE, &old_protect);
185
186}
187#else
188static void map_exec(void *addr, long size)
189{
4369415f 190 unsigned long start, end, page_size;
7cb69cae 191
4369415f 192 page_size = getpagesize();
7cb69cae 193 start = (unsigned long)addr;
4369415f 194 start &= ~(page_size - 1);
7cb69cae
FB
195
196 end = (unsigned long)addr + size;
4369415f
FB
197 end += page_size - 1;
198 end &= ~(page_size - 1);
7cb69cae
FB
199
200 mprotect((void *)start, end - start,
201 PROT_READ | PROT_WRITE | PROT_EXEC);
202}
203#endif
204
b346ff46 205static void page_init(void)
54936004 206{
83fb7adf 207 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 208 TARGET_PAGE_SIZE */
67b915a5 209#ifdef _WIN32
d5a8f07c
FB
210 {
211 SYSTEM_INFO system_info;
212 DWORD old_protect;
3b46e624 213
d5a8f07c
FB
214 GetSystemInfo(&system_info);
215 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 216 }
67b915a5 217#else
83fb7adf 218 qemu_real_host_page_size = getpagesize();
67b915a5 219#endif
83fb7adf
FB
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
230
231#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
c8a706fe 237 mmap_lock();
0776590d 238 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
239 f = fopen("/proc/self/maps", "r");
240 if (f) {
241 do {
242 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
243 if (n == 2) {
e0b8d65a
BS
244 startaddr = MIN(startaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246 endaddr = MIN(endaddr,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 248 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
249 TARGET_PAGE_ALIGN(endaddr),
250 PAGE_RESERVED);
251 }
252 } while (!feof(f));
253 fclose(f);
254 }
c8a706fe 255 mmap_unlock();
50a9569b
AZ
256 }
257#endif
54936004
FB
258}
259
00f82b8a 260static inline PageDesc *page_find_alloc(target_ulong index)
54936004 261{
54936004
FB
262 PageDesc **lp, *p;
263
54936004
FB
264 lp = &l1_map[index >> L2_BITS];
265 p = *lp;
266 if (!p) {
267 /* allocate if not found */
59817ccb 268 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 269 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
270 *lp = p;
271 }
272 return p + (index & (L2_SIZE - 1));
273}
274
00f82b8a 275static inline PageDesc *page_find(target_ulong index)
54936004 276{
54936004
FB
277 PageDesc *p;
278
54936004
FB
279 p = l1_map[index >> L2_BITS];
280 if (!p)
281 return 0;
fd6ce8f6
FB
282 return p + (index & (L2_SIZE - 1));
283}
284
108c49b8 285static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 286{
108c49b8 287 void **lp, **p;
e3f4e2a4 288 PhysPageDesc *pd;
92e873b9 289
108c49b8
FB
290 p = (void **)l1_phys_map;
291#if TARGET_PHYS_ADDR_SPACE_BITS > 32
292
293#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
294#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
295#endif
296 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
297 p = *lp;
298 if (!p) {
299 /* allocate if not found */
108c49b8
FB
300 if (!alloc)
301 return NULL;
302 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
303 memset(p, 0, sizeof(void *) * L1_SIZE);
304 *lp = p;
305 }
306#endif
307 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
308 pd = *lp;
309 if (!pd) {
310 int i;
108c49b8
FB
311 /* allocate if not found */
312 if (!alloc)
313 return NULL;
e3f4e2a4
PB
314 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
315 *lp = pd;
316 for (i = 0; i < L2_SIZE; i++)
317 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 318 }
e3f4e2a4 319 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
320}
321
108c49b8 322static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 323{
108c49b8 324 return phys_page_find_alloc(index, 0);
92e873b9
FB
325}
326
9fa3e853 327#if !defined(CONFIG_USER_ONLY)
6a00d601 328static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 329static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 330 target_ulong vaddr);
c8a706fe
PB
331#define mmap_lock() do { } while(0)
332#define mmap_unlock() do { } while(0)
9fa3e853 333#endif
fd6ce8f6 334
4369415f
FB
335#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
336
337#if defined(CONFIG_USER_ONLY)
338/* Currently it is not recommanded to allocate big chunks of data in
339 user mode. It will change when a dedicated libc will be used */
340#define USE_STATIC_CODE_GEN_BUFFER
341#endif
342
343#ifdef USE_STATIC_CODE_GEN_BUFFER
344static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
345#endif
346
26a5f13b
FB
347void code_gen_alloc(unsigned long tb_size)
348{
4369415f
FB
349#ifdef USE_STATIC_CODE_GEN_BUFFER
350 code_gen_buffer = static_code_gen_buffer;
351 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
352 map_exec(code_gen_buffer, code_gen_buffer_size);
353#else
26a5f13b
FB
354 code_gen_buffer_size = tb_size;
355 if (code_gen_buffer_size == 0) {
4369415f
FB
356#if defined(CONFIG_USER_ONLY)
357 /* in user mode, phys_ram_size is not meaningful */
358 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
359#else
26a5f13b
FB
360 /* XXX: needs ajustments */
361 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 362#endif
26a5f13b
FB
363 }
364 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
365 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
366 /* The code gen buffer location may have constraints depending on
367 the host cpu and OS */
368#if defined(__linux__)
369 {
370 int flags;
371 flags = MAP_PRIVATE | MAP_ANONYMOUS;
372#if defined(__x86_64__)
373 flags |= MAP_32BIT;
374 /* Cannot map more than that */
375 if (code_gen_buffer_size > (800 * 1024 * 1024))
376 code_gen_buffer_size = (800 * 1024 * 1024);
377#endif
378 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
379 PROT_WRITE | PROT_READ | PROT_EXEC,
380 flags, -1, 0);
381 if (code_gen_buffer == MAP_FAILED) {
382 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
383 exit(1);
384 }
385 }
386#else
387 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
388 if (!code_gen_buffer) {
389 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
390 exit(1);
391 }
392 map_exec(code_gen_buffer, code_gen_buffer_size);
393#endif
4369415f 394#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
395 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
396 code_gen_buffer_max_size = code_gen_buffer_size -
397 code_gen_max_block_size();
398 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
399 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
400}
401
402/* Must be called before using the QEMU cpus. 'tb_size' is the size
403 (in bytes) allocated to the translation buffer. Zero means default
404 size. */
405void cpu_exec_init_all(unsigned long tb_size)
406{
26a5f13b
FB
407 cpu_gen_init();
408 code_gen_alloc(tb_size);
409 code_gen_ptr = code_gen_buffer;
4369415f 410 page_init();
26a5f13b
FB
411 io_mem_init();
412}
413
6a00d601 414void cpu_exec_init(CPUState *env)
fd6ce8f6 415{
6a00d601
FB
416 CPUState **penv;
417 int cpu_index;
418
6a00d601
FB
419 env->next_cpu = NULL;
420 penv = &first_cpu;
421 cpu_index = 0;
422 while (*penv != NULL) {
423 penv = (CPUState **)&(*penv)->next_cpu;
424 cpu_index++;
425 }
426 env->cpu_index = cpu_index;
6658ffb8 427 env->nb_watchpoints = 0;
6a00d601 428 *penv = env;
fd6ce8f6
FB
429}
430
9fa3e853
FB
431static inline void invalidate_page_bitmap(PageDesc *p)
432{
433 if (p->code_bitmap) {
59817ccb 434 qemu_free(p->code_bitmap);
9fa3e853
FB
435 p->code_bitmap = NULL;
436 }
437 p->code_write_count = 0;
438}
439
fd6ce8f6
FB
440/* set to NULL all the 'first_tb' fields in all PageDescs */
441static void page_flush_tb(void)
442{
443 int i, j;
444 PageDesc *p;
445
446 for(i = 0; i < L1_SIZE; i++) {
447 p = l1_map[i];
448 if (p) {
9fa3e853
FB
449 for(j = 0; j < L2_SIZE; j++) {
450 p->first_tb = NULL;
451 invalidate_page_bitmap(p);
452 p++;
453 }
fd6ce8f6
FB
454 }
455 }
456}
457
458/* flush all the translation blocks */
d4e8164f 459/* XXX: tb_flush is currently not thread safe */
6a00d601 460void tb_flush(CPUState *env1)
fd6ce8f6 461{
6a00d601 462 CPUState *env;
0124311e 463#if defined(DEBUG_FLUSH)
ab3d1727
BS
464 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
465 (unsigned long)(code_gen_ptr - code_gen_buffer),
466 nb_tbs, nb_tbs > 0 ?
467 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 468#endif
26a5f13b 469 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
470 cpu_abort(env1, "Internal error: code buffer overflow\n");
471
fd6ce8f6 472 nb_tbs = 0;
3b46e624 473
6a00d601
FB
474 for(env = first_cpu; env != NULL; env = env->next_cpu) {
475 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
476 }
9fa3e853 477
8a8a608f 478 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 479 page_flush_tb();
9fa3e853 480
fd6ce8f6 481 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
482 /* XXX: flush processor icache at this point if cache flush is
483 expensive */
e3db7226 484 tb_flush_count++;
fd6ce8f6
FB
485}
486
487#ifdef DEBUG_TB_CHECK
488
bc98a7ef 489static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
490{
491 TranslationBlock *tb;
492 int i;
493 address &= TARGET_PAGE_MASK;
99773bd4
PB
494 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
495 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
496 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
497 address >= tb->pc + tb->size)) {
498 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 499 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
500 }
501 }
502 }
503}
504
505/* verify that all the pages have correct rights for code */
506static void tb_page_check(void)
507{
508 TranslationBlock *tb;
509 int i, flags1, flags2;
3b46e624 510
99773bd4
PB
511 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
512 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
513 flags1 = page_get_flags(tb->pc);
514 flags2 = page_get_flags(tb->pc + tb->size - 1);
515 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
516 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 517 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
518 }
519 }
520 }
521}
522
d4e8164f
FB
523void tb_jmp_check(TranslationBlock *tb)
524{
525 TranslationBlock *tb1;
526 unsigned int n1;
527
528 /* suppress any remaining jumps to this TB */
529 tb1 = tb->jmp_first;
530 for(;;) {
531 n1 = (long)tb1 & 3;
532 tb1 = (TranslationBlock *)((long)tb1 & ~3);
533 if (n1 == 2)
534 break;
535 tb1 = tb1->jmp_next[n1];
536 }
537 /* check end of list */
538 if (tb1 != tb) {
539 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
540 }
541}
542
fd6ce8f6
FB
543#endif
544
545/* invalidate one TB */
546static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
547 int next_offset)
548{
549 TranslationBlock *tb1;
550 for(;;) {
551 tb1 = *ptb;
552 if (tb1 == tb) {
553 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
554 break;
555 }
556 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
557 }
558}
559
9fa3e853
FB
560static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
561{
562 TranslationBlock *tb1;
563 unsigned int n1;
564
565 for(;;) {
566 tb1 = *ptb;
567 n1 = (long)tb1 & 3;
568 tb1 = (TranslationBlock *)((long)tb1 & ~3);
569 if (tb1 == tb) {
570 *ptb = tb1->page_next[n1];
571 break;
572 }
573 ptb = &tb1->page_next[n1];
574 }
575}
576
d4e8164f
FB
577static inline void tb_jmp_remove(TranslationBlock *tb, int n)
578{
579 TranslationBlock *tb1, **ptb;
580 unsigned int n1;
581
582 ptb = &tb->jmp_next[n];
583 tb1 = *ptb;
584 if (tb1) {
585 /* find tb(n) in circular list */
586 for(;;) {
587 tb1 = *ptb;
588 n1 = (long)tb1 & 3;
589 tb1 = (TranslationBlock *)((long)tb1 & ~3);
590 if (n1 == n && tb1 == tb)
591 break;
592 if (n1 == 2) {
593 ptb = &tb1->jmp_first;
594 } else {
595 ptb = &tb1->jmp_next[n1];
596 }
597 }
598 /* now we can suppress tb(n) from the list */
599 *ptb = tb->jmp_next[n];
600
601 tb->jmp_next[n] = NULL;
602 }
603}
604
605/* reset the jump entry 'n' of a TB so that it is not chained to
606 another TB */
607static inline void tb_reset_jump(TranslationBlock *tb, int n)
608{
609 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
610}
611
00f82b8a 612static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 613{
6a00d601 614 CPUState *env;
8a40a180 615 PageDesc *p;
d4e8164f 616 unsigned int h, n1;
00f82b8a 617 target_phys_addr_t phys_pc;
8a40a180 618 TranslationBlock *tb1, *tb2;
3b46e624 619
8a40a180
FB
620 /* remove the TB from the hash list */
621 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
622 h = tb_phys_hash_func(phys_pc);
5fafdf24 623 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
624 offsetof(TranslationBlock, phys_hash_next));
625
626 /* remove the TB from the page list */
627 if (tb->page_addr[0] != page_addr) {
628 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
633 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
634 tb_page_remove(&p->first_tb, tb);
635 invalidate_page_bitmap(p);
636 }
637
36bdbe54 638 tb_invalidated_flag = 1;
59817ccb 639
fd6ce8f6 640 /* remove the TB from the hash list */
8a40a180 641 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
642 for(env = first_cpu; env != NULL; env = env->next_cpu) {
643 if (env->tb_jmp_cache[h] == tb)
644 env->tb_jmp_cache[h] = NULL;
645 }
d4e8164f
FB
646
647 /* suppress this TB from the two jump lists */
648 tb_jmp_remove(tb, 0);
649 tb_jmp_remove(tb, 1);
650
651 /* suppress any remaining jumps to this TB */
652 tb1 = tb->jmp_first;
653 for(;;) {
654 n1 = (long)tb1 & 3;
655 if (n1 == 2)
656 break;
657 tb1 = (TranslationBlock *)((long)tb1 & ~3);
658 tb2 = tb1->jmp_next[n1];
659 tb_reset_jump(tb1, n1);
660 tb1->jmp_next[n1] = NULL;
661 tb1 = tb2;
662 }
663 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 664
e3db7226 665 tb_phys_invalidate_count++;
9fa3e853
FB
666}
667
668static inline void set_bits(uint8_t *tab, int start, int len)
669{
670 int end, mask, end1;
671
672 end = start + len;
673 tab += start >> 3;
674 mask = 0xff << (start & 7);
675 if ((start & ~7) == (end & ~7)) {
676 if (start < end) {
677 mask &= ~(0xff << (end & 7));
678 *tab |= mask;
679 }
680 } else {
681 *tab++ |= mask;
682 start = (start + 8) & ~7;
683 end1 = end & ~7;
684 while (start < end1) {
685 *tab++ = 0xff;
686 start += 8;
687 }
688 if (start < end) {
689 mask = ~(0xff << (end & 7));
690 *tab |= mask;
691 }
692 }
693}
694
695static void build_page_bitmap(PageDesc *p)
696{
697 int n, tb_start, tb_end;
698 TranslationBlock *tb;
3b46e624 699
59817ccb 700 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
701 if (!p->code_bitmap)
702 return;
703 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
704
705 tb = p->first_tb;
706 while (tb != NULL) {
707 n = (long)tb & 3;
708 tb = (TranslationBlock *)((long)tb & ~3);
709 /* NOTE: this is subtle as a TB may span two physical pages */
710 if (n == 0) {
711 /* NOTE: tb_end may be after the end of the page, but
712 it is not a problem */
713 tb_start = tb->pc & ~TARGET_PAGE_MASK;
714 tb_end = tb_start + tb->size;
715 if (tb_end > TARGET_PAGE_SIZE)
716 tb_end = TARGET_PAGE_SIZE;
717 } else {
718 tb_start = 0;
719 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
720 }
721 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
722 tb = tb->page_next[n];
723 }
724}
725
d720b93d
FB
726#ifdef TARGET_HAS_PRECISE_SMC
727
5fafdf24 728static void tb_gen_code(CPUState *env,
d720b93d
FB
729 target_ulong pc, target_ulong cs_base, int flags,
730 int cflags)
731{
732 TranslationBlock *tb;
733 uint8_t *tc_ptr;
734 target_ulong phys_pc, phys_page2, virt_page2;
735 int code_gen_size;
736
c27004ec
FB
737 phys_pc = get_phys_addr_code(env, pc);
738 tb = tb_alloc(pc);
d720b93d
FB
739 if (!tb) {
740 /* flush must be done */
741 tb_flush(env);
742 /* cannot fail at this point */
c27004ec 743 tb = tb_alloc(pc);
d720b93d
FB
744 }
745 tc_ptr = code_gen_ptr;
746 tb->tc_ptr = tc_ptr;
747 tb->cs_base = cs_base;
748 tb->flags = flags;
749 tb->cflags = cflags;
d07bde88 750 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 751 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 752
d720b93d 753 /* check next page if needed */
c27004ec 754 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 755 phys_page2 = -1;
c27004ec 756 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
757 phys_page2 = get_phys_addr_code(env, virt_page2);
758 }
759 tb_link_phys(tb, phys_pc, phys_page2);
760}
761#endif
3b46e624 762
9fa3e853
FB
763/* invalidate all TBs which intersect with the target physical page
764 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
765 the same physical page. 'is_cpu_write_access' should be true if called
766 from a real cpu write access: the virtual CPU will exit the current
767 TB if code is modified inside this TB. */
00f82b8a 768void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
769 int is_cpu_write_access)
770{
771 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 772 CPUState *env = cpu_single_env;
9fa3e853 773 PageDesc *p;
ea1c1802 774 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 775 target_ulong tb_start, tb_end;
d720b93d 776 target_ulong current_pc, current_cs_base;
9fa3e853
FB
777
778 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 779 if (!p)
9fa3e853 780 return;
5fafdf24 781 if (!p->code_bitmap &&
d720b93d
FB
782 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
783 is_cpu_write_access) {
9fa3e853
FB
784 /* build code bitmap */
785 build_page_bitmap(p);
786 }
787
788 /* we remove all the TBs in the range [start, end[ */
789 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
790 current_tb_not_found = is_cpu_write_access;
791 current_tb_modified = 0;
792 current_tb = NULL; /* avoid warning */
793 current_pc = 0; /* avoid warning */
794 current_cs_base = 0; /* avoid warning */
795 current_flags = 0; /* avoid warning */
9fa3e853
FB
796 tb = p->first_tb;
797 while (tb != NULL) {
798 n = (long)tb & 3;
799 tb = (TranslationBlock *)((long)tb & ~3);
800 tb_next = tb->page_next[n];
801 /* NOTE: this is subtle as a TB may span two physical pages */
802 if (n == 0) {
803 /* NOTE: tb_end may be after the end of the page, but
804 it is not a problem */
805 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
806 tb_end = tb_start + tb->size;
807 } else {
808 tb_start = tb->page_addr[1];
809 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
810 }
811 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
812#ifdef TARGET_HAS_PRECISE_SMC
813 if (current_tb_not_found) {
814 current_tb_not_found = 0;
815 current_tb = NULL;
816 if (env->mem_write_pc) {
817 /* now we have a real cpu fault */
818 current_tb = tb_find_pc(env->mem_write_pc);
819 }
820 }
821 if (current_tb == tb &&
822 !(current_tb->cflags & CF_SINGLE_INSN)) {
823 /* If we are modifying the current TB, we must stop
824 its execution. We could be more precise by checking
825 that the modification is after the current PC, but it
826 would require a specialized function to partially
827 restore the CPU state */
3b46e624 828
d720b93d 829 current_tb_modified = 1;
5fafdf24 830 cpu_restore_state(current_tb, env,
d720b93d
FB
831 env->mem_write_pc, NULL);
832#if defined(TARGET_I386)
833 current_flags = env->hflags;
834 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
835 current_cs_base = (target_ulong)env->segs[R_CS].base;
836 current_pc = current_cs_base + env->eip;
837#else
838#error unsupported CPU
839#endif
840 }
841#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
842 /* we need to do that to handle the case where a signal
843 occurs while doing tb_phys_invalidate() */
844 saved_tb = NULL;
845 if (env) {
846 saved_tb = env->current_tb;
847 env->current_tb = NULL;
848 }
9fa3e853 849 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
850 if (env) {
851 env->current_tb = saved_tb;
852 if (env->interrupt_request && env->current_tb)
853 cpu_interrupt(env, env->interrupt_request);
854 }
9fa3e853
FB
855 }
856 tb = tb_next;
857 }
858#if !defined(CONFIG_USER_ONLY)
859 /* if no code remaining, no need to continue to use slow writes */
860 if (!p->first_tb) {
861 invalidate_page_bitmap(p);
d720b93d
FB
862 if (is_cpu_write_access) {
863 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
864 }
865 }
866#endif
867#ifdef TARGET_HAS_PRECISE_SMC
868 if (current_tb_modified) {
869 /* we generate a block containing just the instruction
870 modifying the memory. It will ensure that it cannot modify
871 itself */
ea1c1802 872 env->current_tb = NULL;
5fafdf24 873 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
874 CF_SINGLE_INSN);
875 cpu_resume_from_signal(env, NULL);
9fa3e853 876 }
fd6ce8f6 877#endif
9fa3e853 878}
fd6ce8f6 879
9fa3e853 880/* len must be <= 8 and start must be a multiple of len */
00f82b8a 881static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
882{
883 PageDesc *p;
884 int offset, b;
59817ccb 885#if 0
a4193c8a
FB
886 if (1) {
887 if (loglevel) {
5fafdf24
TS
888 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
889 cpu_single_env->mem_write_vaddr, len,
890 cpu_single_env->eip,
a4193c8a
FB
891 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
892 }
59817ccb
FB
893 }
894#endif
9fa3e853 895 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 896 if (!p)
9fa3e853
FB
897 return;
898 if (p->code_bitmap) {
899 offset = start & ~TARGET_PAGE_MASK;
900 b = p->code_bitmap[offset >> 3] >> (offset & 7);
901 if (b & ((1 << len) - 1))
902 goto do_invalidate;
903 } else {
904 do_invalidate:
d720b93d 905 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
906 }
907}
908
9fa3e853 909#if !defined(CONFIG_SOFTMMU)
00f82b8a 910static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 911 unsigned long pc, void *puc)
9fa3e853 912{
d720b93d
FB
913 int n, current_flags, current_tb_modified;
914 target_ulong current_pc, current_cs_base;
9fa3e853 915 PageDesc *p;
d720b93d
FB
916 TranslationBlock *tb, *current_tb;
917#ifdef TARGET_HAS_PRECISE_SMC
918 CPUState *env = cpu_single_env;
919#endif
9fa3e853
FB
920
921 addr &= TARGET_PAGE_MASK;
922 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 923 if (!p)
9fa3e853
FB
924 return;
925 tb = p->first_tb;
d720b93d
FB
926 current_tb_modified = 0;
927 current_tb = NULL;
928 current_pc = 0; /* avoid warning */
929 current_cs_base = 0; /* avoid warning */
930 current_flags = 0; /* avoid warning */
931#ifdef TARGET_HAS_PRECISE_SMC
932 if (tb && pc != 0) {
933 current_tb = tb_find_pc(pc);
934 }
935#endif
9fa3e853
FB
936 while (tb != NULL) {
937 n = (long)tb & 3;
938 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
939#ifdef TARGET_HAS_PRECISE_SMC
940 if (current_tb == tb &&
941 !(current_tb->cflags & CF_SINGLE_INSN)) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
3b46e624 947
d720b93d
FB
948 current_tb_modified = 1;
949 cpu_restore_state(current_tb, env, pc, puc);
950#if defined(TARGET_I386)
951 current_flags = env->hflags;
952 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
953 current_cs_base = (target_ulong)env->segs[R_CS].base;
954 current_pc = current_cs_base + env->eip;
955#else
956#error unsupported CPU
957#endif
958 }
959#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
960 tb_phys_invalidate(tb, addr);
961 tb = tb->page_next[n];
962 }
fd6ce8f6 963 p->first_tb = NULL;
d720b93d
FB
964#ifdef TARGET_HAS_PRECISE_SMC
965 if (current_tb_modified) {
966 /* we generate a block containing just the instruction
967 modifying the memory. It will ensure that it cannot modify
968 itself */
ea1c1802 969 env->current_tb = NULL;
5fafdf24 970 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
971 CF_SINGLE_INSN);
972 cpu_resume_from_signal(env, puc);
973 }
974#endif
fd6ce8f6 975}
9fa3e853 976#endif
fd6ce8f6
FB
977
978/* add the tb in the target page and protect it if necessary */
5fafdf24 979static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 980 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
981{
982 PageDesc *p;
9fa3e853
FB
983 TranslationBlock *last_first_tb;
984
985 tb->page_addr[n] = page_addr;
3a7d929e 986 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
987 tb->page_next[n] = p->first_tb;
988 last_first_tb = p->first_tb;
989 p->first_tb = (TranslationBlock *)((long)tb | n);
990 invalidate_page_bitmap(p);
fd6ce8f6 991
107db443 992#if defined(TARGET_HAS_SMC) || 1
d720b93d 993
9fa3e853 994#if defined(CONFIG_USER_ONLY)
fd6ce8f6 995 if (p->flags & PAGE_WRITE) {
53a5960a
PB
996 target_ulong addr;
997 PageDesc *p2;
9fa3e853
FB
998 int prot;
999
fd6ce8f6
FB
1000 /* force the host page as non writable (writes will have a
1001 page fault + mprotect overhead) */
53a5960a 1002 page_addr &= qemu_host_page_mask;
fd6ce8f6 1003 prot = 0;
53a5960a
PB
1004 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1005 addr += TARGET_PAGE_SIZE) {
1006
1007 p2 = page_find (addr >> TARGET_PAGE_BITS);
1008 if (!p2)
1009 continue;
1010 prot |= p2->flags;
1011 p2->flags &= ~PAGE_WRITE;
1012 page_get_flags(addr);
1013 }
5fafdf24 1014 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1015 (prot & PAGE_BITS) & ~PAGE_WRITE);
1016#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1017 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1018 page_addr);
fd6ce8f6 1019#endif
fd6ce8f6 1020 }
9fa3e853
FB
1021#else
1022 /* if some code is already present, then the pages are already
1023 protected. So we handle the case where only the first TB is
1024 allocated in a physical page */
1025 if (!last_first_tb) {
6a00d601 1026 tlb_protect_code(page_addr);
9fa3e853
FB
1027 }
1028#endif
d720b93d
FB
1029
1030#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1031}
1032
1033/* Allocate a new translation block. Flush the translation buffer if
1034 too many translation blocks or too much generated code. */
c27004ec 1035TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1036{
1037 TranslationBlock *tb;
fd6ce8f6 1038
26a5f13b
FB
1039 if (nb_tbs >= code_gen_max_blocks ||
1040 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1041 return NULL;
fd6ce8f6
FB
1042 tb = &tbs[nb_tbs++];
1043 tb->pc = pc;
b448f2f3 1044 tb->cflags = 0;
d4e8164f
FB
1045 return tb;
1046}
1047
9fa3e853
FB
1048/* add a new TB and link it to the physical page tables. phys_page2 is
1049 (-1) to indicate that only one page contains the TB. */
5fafdf24 1050void tb_link_phys(TranslationBlock *tb,
9fa3e853 1051 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1052{
9fa3e853
FB
1053 unsigned int h;
1054 TranslationBlock **ptb;
1055
c8a706fe
PB
1056 /* Grab the mmap lock to stop another thread invalidating this TB
1057 before we are done. */
1058 mmap_lock();
9fa3e853
FB
1059 /* add in the physical hash table */
1060 h = tb_phys_hash_func(phys_pc);
1061 ptb = &tb_phys_hash[h];
1062 tb->phys_hash_next = *ptb;
1063 *ptb = tb;
fd6ce8f6
FB
1064
1065 /* add in the page list */
9fa3e853
FB
1066 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1067 if (phys_page2 != -1)
1068 tb_alloc_page(tb, 1, phys_page2);
1069 else
1070 tb->page_addr[1] = -1;
9fa3e853 1071
d4e8164f
FB
1072 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1073 tb->jmp_next[0] = NULL;
1074 tb->jmp_next[1] = NULL;
1075
1076 /* init original jump addresses */
1077 if (tb->tb_next_offset[0] != 0xffff)
1078 tb_reset_jump(tb, 0);
1079 if (tb->tb_next_offset[1] != 0xffff)
1080 tb_reset_jump(tb, 1);
8a40a180
FB
1081
1082#ifdef DEBUG_TB_CHECK
1083 tb_page_check();
1084#endif
c8a706fe 1085 mmap_unlock();
fd6ce8f6
FB
1086}
1087
9fa3e853
FB
1088/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1089 tb[1].tc_ptr. Return NULL if not found */
1090TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1091{
9fa3e853
FB
1092 int m_min, m_max, m;
1093 unsigned long v;
1094 TranslationBlock *tb;
a513fe19
FB
1095
1096 if (nb_tbs <= 0)
1097 return NULL;
1098 if (tc_ptr < (unsigned long)code_gen_buffer ||
1099 tc_ptr >= (unsigned long)code_gen_ptr)
1100 return NULL;
1101 /* binary search (cf Knuth) */
1102 m_min = 0;
1103 m_max = nb_tbs - 1;
1104 while (m_min <= m_max) {
1105 m = (m_min + m_max) >> 1;
1106 tb = &tbs[m];
1107 v = (unsigned long)tb->tc_ptr;
1108 if (v == tc_ptr)
1109 return tb;
1110 else if (tc_ptr < v) {
1111 m_max = m - 1;
1112 } else {
1113 m_min = m + 1;
1114 }
5fafdf24 1115 }
a513fe19
FB
1116 return &tbs[m_max];
1117}
7501267e 1118
ea041c0e
FB
1119static void tb_reset_jump_recursive(TranslationBlock *tb);
1120
1121static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1122{
1123 TranslationBlock *tb1, *tb_next, **ptb;
1124 unsigned int n1;
1125
1126 tb1 = tb->jmp_next[n];
1127 if (tb1 != NULL) {
1128 /* find head of list */
1129 for(;;) {
1130 n1 = (long)tb1 & 3;
1131 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1132 if (n1 == 2)
1133 break;
1134 tb1 = tb1->jmp_next[n1];
1135 }
1136 /* we are now sure now that tb jumps to tb1 */
1137 tb_next = tb1;
1138
1139 /* remove tb from the jmp_first list */
1140 ptb = &tb_next->jmp_first;
1141 for(;;) {
1142 tb1 = *ptb;
1143 n1 = (long)tb1 & 3;
1144 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1145 if (n1 == n && tb1 == tb)
1146 break;
1147 ptb = &tb1->jmp_next[n1];
1148 }
1149 *ptb = tb->jmp_next[n];
1150 tb->jmp_next[n] = NULL;
3b46e624 1151
ea041c0e
FB
1152 /* suppress the jump to next tb in generated code */
1153 tb_reset_jump(tb, n);
1154
0124311e 1155 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1156 tb_reset_jump_recursive(tb_next);
1157 }
1158}
1159
1160static void tb_reset_jump_recursive(TranslationBlock *tb)
1161{
1162 tb_reset_jump_recursive2(tb, 0);
1163 tb_reset_jump_recursive2(tb, 1);
1164}
1165
1fddef4b 1166#if defined(TARGET_HAS_ICE)
d720b93d
FB
1167static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1168{
9b3c35e0
JM
1169 target_phys_addr_t addr;
1170 target_ulong pd;
c2f07f81
PB
1171 ram_addr_t ram_addr;
1172 PhysPageDesc *p;
d720b93d 1173
c2f07f81
PB
1174 addr = cpu_get_phys_page_debug(env, pc);
1175 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1176 if (!p) {
1177 pd = IO_MEM_UNASSIGNED;
1178 } else {
1179 pd = p->phys_offset;
1180 }
1181 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1182 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1183}
c27004ec 1184#endif
d720b93d 1185
6658ffb8
PB
1186/* Add a watchpoint. */
1187int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1188{
1189 int i;
1190
1191 for (i = 0; i < env->nb_watchpoints; i++) {
1192 if (addr == env->watchpoint[i].vaddr)
1193 return 0;
1194 }
1195 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1196 return -1;
1197
1198 i = env->nb_watchpoints++;
1199 env->watchpoint[i].vaddr = addr;
1200 tlb_flush_page(env, addr);
1201 /* FIXME: This flush is needed because of the hack to make memory ops
1202 terminate the TB. It can be removed once the proper IO trap and
1203 re-execute bits are in. */
1204 tb_flush(env);
1205 return i;
1206}
1207
1208/* Remove a watchpoint. */
1209int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1210{
1211 int i;
1212
1213 for (i = 0; i < env->nb_watchpoints; i++) {
1214 if (addr == env->watchpoint[i].vaddr) {
1215 env->nb_watchpoints--;
1216 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1217 tlb_flush_page(env, addr);
1218 return 0;
1219 }
1220 }
1221 return -1;
1222}
1223
7d03f82f
EI
1224/* Remove all watchpoints. */
1225void cpu_watchpoint_remove_all(CPUState *env) {
1226 int i;
1227
1228 for (i = 0; i < env->nb_watchpoints; i++) {
1229 tlb_flush_page(env, env->watchpoint[i].vaddr);
1230 }
1231 env->nb_watchpoints = 0;
1232}
1233
c33a346e
FB
1234/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1235 breakpoint is reached */
2e12669a 1236int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1237{
1fddef4b 1238#if defined(TARGET_HAS_ICE)
4c3a88a2 1239 int i;
3b46e624 1240
4c3a88a2
FB
1241 for(i = 0; i < env->nb_breakpoints; i++) {
1242 if (env->breakpoints[i] == pc)
1243 return 0;
1244 }
1245
1246 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1247 return -1;
1248 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1249
d720b93d 1250 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1251 return 0;
1252#else
1253 return -1;
1254#endif
1255}
1256
7d03f82f
EI
1257/* remove all breakpoints */
1258void cpu_breakpoint_remove_all(CPUState *env) {
1259#if defined(TARGET_HAS_ICE)
1260 int i;
1261 for(i = 0; i < env->nb_breakpoints; i++) {
1262 breakpoint_invalidate(env, env->breakpoints[i]);
1263 }
1264 env->nb_breakpoints = 0;
1265#endif
1266}
1267
4c3a88a2 1268/* remove a breakpoint */
2e12669a 1269int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1270{
1fddef4b 1271#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1272 int i;
1273 for(i = 0; i < env->nb_breakpoints; i++) {
1274 if (env->breakpoints[i] == pc)
1275 goto found;
1276 }
1277 return -1;
1278 found:
4c3a88a2 1279 env->nb_breakpoints--;
1fddef4b
FB
1280 if (i < env->nb_breakpoints)
1281 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1282
1283 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1284 return 0;
1285#else
1286 return -1;
1287#endif
1288}
1289
c33a346e
FB
1290/* enable or disable single step mode. EXCP_DEBUG is returned by the
1291 CPU loop after each instruction */
1292void cpu_single_step(CPUState *env, int enabled)
1293{
1fddef4b 1294#if defined(TARGET_HAS_ICE)
c33a346e
FB
1295 if (env->singlestep_enabled != enabled) {
1296 env->singlestep_enabled = enabled;
1297 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1298 /* XXX: only flush what is necessary */
0124311e 1299 tb_flush(env);
c33a346e
FB
1300 }
1301#endif
1302}
1303
34865134
FB
1304/* enable or disable low levels log */
1305void cpu_set_log(int log_flags)
1306{
1307 loglevel = log_flags;
1308 if (loglevel && !logfile) {
11fcfab4 1309 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1310 if (!logfile) {
1311 perror(logfilename);
1312 _exit(1);
1313 }
9fa3e853
FB
1314#if !defined(CONFIG_SOFTMMU)
1315 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1316 {
1317 static uint8_t logfile_buf[4096];
1318 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1319 }
1320#else
34865134 1321 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1322#endif
e735b91c
PB
1323 log_append = 1;
1324 }
1325 if (!loglevel && logfile) {
1326 fclose(logfile);
1327 logfile = NULL;
34865134
FB
1328 }
1329}
1330
1331void cpu_set_log_filename(const char *filename)
1332{
1333 logfilename = strdup(filename);
e735b91c
PB
1334 if (logfile) {
1335 fclose(logfile);
1336 logfile = NULL;
1337 }
1338 cpu_set_log(loglevel);
34865134 1339}
c33a346e 1340
0124311e 1341/* mask must never be zero, except for A20 change call */
68a79315 1342void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1343{
1344 TranslationBlock *tb;
15a51156 1345 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1346
68a79315 1347 env->interrupt_request |= mask;
ea041c0e
FB
1348 /* if the cpu is currently executing code, we must unlink it and
1349 all the potentially executing TB */
1350 tb = env->current_tb;
ee8b7021
FB
1351 if (tb && !testandset(&interrupt_lock)) {
1352 env->current_tb = NULL;
ea041c0e 1353 tb_reset_jump_recursive(tb);
15a51156 1354 resetlock(&interrupt_lock);
ea041c0e
FB
1355 }
1356}
1357
b54ad049
FB
1358void cpu_reset_interrupt(CPUState *env, int mask)
1359{
1360 env->interrupt_request &= ~mask;
1361}
1362
f193c797 1363CPULogItem cpu_log_items[] = {
5fafdf24 1364 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1365 "show generated host assembly code for each compiled TB" },
1366 { CPU_LOG_TB_IN_ASM, "in_asm",
1367 "show target assembly code for each compiled TB" },
5fafdf24 1368 { CPU_LOG_TB_OP, "op",
57fec1fe 1369 "show micro ops for each compiled TB" },
f193c797 1370 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1371 "show micro ops "
1372#ifdef TARGET_I386
1373 "before eflags optimization and "
f193c797 1374#endif
e01a1157 1375 "after liveness analysis" },
f193c797
FB
1376 { CPU_LOG_INT, "int",
1377 "show interrupts/exceptions in short format" },
1378 { CPU_LOG_EXEC, "exec",
1379 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1380 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1381 "show CPU state before block translation" },
f193c797
FB
1382#ifdef TARGET_I386
1383 { CPU_LOG_PCALL, "pcall",
1384 "show protected mode far calls/returns/exceptions" },
1385#endif
8e3a9fd2 1386#ifdef DEBUG_IOPORT
fd872598
FB
1387 { CPU_LOG_IOPORT, "ioport",
1388 "show all i/o ports accesses" },
8e3a9fd2 1389#endif
f193c797
FB
1390 { 0, NULL, NULL },
1391};
1392
1393static int cmp1(const char *s1, int n, const char *s2)
1394{
1395 if (strlen(s2) != n)
1396 return 0;
1397 return memcmp(s1, s2, n) == 0;
1398}
3b46e624 1399
f193c797
FB
1400/* takes a comma separated list of log masks. Return 0 if error. */
1401int cpu_str_to_log_mask(const char *str)
1402{
1403 CPULogItem *item;
1404 int mask;
1405 const char *p, *p1;
1406
1407 p = str;
1408 mask = 0;
1409 for(;;) {
1410 p1 = strchr(p, ',');
1411 if (!p1)
1412 p1 = p + strlen(p);
8e3a9fd2
FB
1413 if(cmp1(p,p1-p,"all")) {
1414 for(item = cpu_log_items; item->mask != 0; item++) {
1415 mask |= item->mask;
1416 }
1417 } else {
f193c797
FB
1418 for(item = cpu_log_items; item->mask != 0; item++) {
1419 if (cmp1(p, p1 - p, item->name))
1420 goto found;
1421 }
1422 return 0;
8e3a9fd2 1423 }
f193c797
FB
1424 found:
1425 mask |= item->mask;
1426 if (*p1 != ',')
1427 break;
1428 p = p1 + 1;
1429 }
1430 return mask;
1431}
ea041c0e 1432
7501267e
FB
1433void cpu_abort(CPUState *env, const char *fmt, ...)
1434{
1435 va_list ap;
493ae1f0 1436 va_list ap2;
7501267e
FB
1437
1438 va_start(ap, fmt);
493ae1f0 1439 va_copy(ap2, ap);
7501267e
FB
1440 fprintf(stderr, "qemu: fatal: ");
1441 vfprintf(stderr, fmt, ap);
1442 fprintf(stderr, "\n");
1443#ifdef TARGET_I386
7fe48483
FB
1444 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1445#else
1446 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1447#endif
924edcae 1448 if (logfile) {
f9373291 1449 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1450 vfprintf(logfile, fmt, ap2);
f9373291
JM
1451 fprintf(logfile, "\n");
1452#ifdef TARGET_I386
1453 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1454#else
1455 cpu_dump_state(env, logfile, fprintf, 0);
1456#endif
924edcae
AZ
1457 fflush(logfile);
1458 fclose(logfile);
1459 }
493ae1f0 1460 va_end(ap2);
f9373291 1461 va_end(ap);
7501267e
FB
1462 abort();
1463}
1464
c5be9f08
TS
1465CPUState *cpu_copy(CPUState *env)
1466{
01ba9816 1467 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1468 /* preserve chaining and index */
1469 CPUState *next_cpu = new_env->next_cpu;
1470 int cpu_index = new_env->cpu_index;
1471 memcpy(new_env, env, sizeof(CPUState));
1472 new_env->next_cpu = next_cpu;
1473 new_env->cpu_index = cpu_index;
1474 return new_env;
1475}
1476
0124311e
FB
1477#if !defined(CONFIG_USER_ONLY)
1478
5c751e99
EI
1479static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1480{
1481 unsigned int i;
1482
1483 /* Discard jump cache entries for any tb which might potentially
1484 overlap the flushed page. */
1485 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1486 memset (&env->tb_jmp_cache[i], 0,
1487 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1488
1489 i = tb_jmp_cache_hash_page(addr);
1490 memset (&env->tb_jmp_cache[i], 0,
1491 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1492}
1493
ee8b7021
FB
1494/* NOTE: if flush_global is true, also flush global entries (not
1495 implemented yet) */
1496void tlb_flush(CPUState *env, int flush_global)
33417e70 1497{
33417e70 1498 int i;
0124311e 1499
9fa3e853
FB
1500#if defined(DEBUG_TLB)
1501 printf("tlb_flush:\n");
1502#endif
0124311e
FB
1503 /* must reset current TB so that interrupts cannot modify the
1504 links while we are modifying them */
1505 env->current_tb = NULL;
1506
33417e70 1507 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1508 env->tlb_table[0][i].addr_read = -1;
1509 env->tlb_table[0][i].addr_write = -1;
1510 env->tlb_table[0][i].addr_code = -1;
1511 env->tlb_table[1][i].addr_read = -1;
1512 env->tlb_table[1][i].addr_write = -1;
1513 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1514#if (NB_MMU_MODES >= 3)
1515 env->tlb_table[2][i].addr_read = -1;
1516 env->tlb_table[2][i].addr_write = -1;
1517 env->tlb_table[2][i].addr_code = -1;
1518#if (NB_MMU_MODES == 4)
1519 env->tlb_table[3][i].addr_read = -1;
1520 env->tlb_table[3][i].addr_write = -1;
1521 env->tlb_table[3][i].addr_code = -1;
1522#endif
1523#endif
33417e70 1524 }
9fa3e853 1525
8a40a180 1526 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1527
1528#if !defined(CONFIG_SOFTMMU)
1529 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1530#endif
1531#ifdef USE_KQEMU
1532 if (env->kqemu_enabled) {
1533 kqemu_flush(env, flush_global);
1534 }
9fa3e853 1535#endif
e3db7226 1536 tlb_flush_count++;
33417e70
FB
1537}
1538
274da6b2 1539static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1540{
5fafdf24 1541 if (addr == (tlb_entry->addr_read &
84b7b8e7 1542 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1543 addr == (tlb_entry->addr_write &
84b7b8e7 1544 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1545 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1546 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1547 tlb_entry->addr_read = -1;
1548 tlb_entry->addr_write = -1;
1549 tlb_entry->addr_code = -1;
1550 }
61382a50
FB
1551}
1552
2e12669a 1553void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1554{
8a40a180 1555 int i;
0124311e 1556
9fa3e853 1557#if defined(DEBUG_TLB)
108c49b8 1558 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1559#endif
0124311e
FB
1560 /* must reset current TB so that interrupts cannot modify the
1561 links while we are modifying them */
1562 env->current_tb = NULL;
61382a50
FB
1563
1564 addr &= TARGET_PAGE_MASK;
1565 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1566 tlb_flush_entry(&env->tlb_table[0][i], addr);
1567 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1568#if (NB_MMU_MODES >= 3)
1569 tlb_flush_entry(&env->tlb_table[2][i], addr);
1570#if (NB_MMU_MODES == 4)
1571 tlb_flush_entry(&env->tlb_table[3][i], addr);
1572#endif
1573#endif
0124311e 1574
5c751e99 1575 tlb_flush_jmp_cache(env, addr);
9fa3e853 1576
0124311e 1577#if !defined(CONFIG_SOFTMMU)
9fa3e853 1578 if (addr < MMAP_AREA_END)
0124311e 1579 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1580#endif
0a962c02
FB
1581#ifdef USE_KQEMU
1582 if (env->kqemu_enabled) {
1583 kqemu_flush_page(env, addr);
1584 }
1585#endif
9fa3e853
FB
1586}
1587
9fa3e853
FB
1588/* update the TLBs so that writes to code in the virtual page 'addr'
1589 can be detected */
6a00d601 1590static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1591{
5fafdf24 1592 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1593 ram_addr + TARGET_PAGE_SIZE,
1594 CODE_DIRTY_FLAG);
9fa3e853
FB
1595}
1596
9fa3e853 1597/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1598 tested for self modifying code */
5fafdf24 1599static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1600 target_ulong vaddr)
9fa3e853 1601{
3a7d929e 1602 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1603}
1604
5fafdf24 1605static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1606 unsigned long start, unsigned long length)
1607{
1608 unsigned long addr;
84b7b8e7
FB
1609 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1610 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1611 if ((addr - start) < length) {
84b7b8e7 1612 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1613 }
1614 }
1615}
1616
3a7d929e 1617void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1618 int dirty_flags)
1ccde1cb
FB
1619{
1620 CPUState *env;
4f2ac237 1621 unsigned long length, start1;
0a962c02
FB
1622 int i, mask, len;
1623 uint8_t *p;
1ccde1cb
FB
1624
1625 start &= TARGET_PAGE_MASK;
1626 end = TARGET_PAGE_ALIGN(end);
1627
1628 length = end - start;
1629 if (length == 0)
1630 return;
0a962c02 1631 len = length >> TARGET_PAGE_BITS;
3a7d929e 1632#ifdef USE_KQEMU
6a00d601
FB
1633 /* XXX: should not depend on cpu context */
1634 env = first_cpu;
3a7d929e 1635 if (env->kqemu_enabled) {
f23db169
FB
1636 ram_addr_t addr;
1637 addr = start;
1638 for(i = 0; i < len; i++) {
1639 kqemu_set_notdirty(env, addr);
1640 addr += TARGET_PAGE_SIZE;
1641 }
3a7d929e
FB
1642 }
1643#endif
f23db169
FB
1644 mask = ~dirty_flags;
1645 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1646 for(i = 0; i < len; i++)
1647 p[i] &= mask;
1648
1ccde1cb
FB
1649 /* we modify the TLB cache so that the dirty bit will be set again
1650 when accessing the range */
59817ccb 1651 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1652 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1653 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1654 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1655 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1656 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1657#if (NB_MMU_MODES >= 3)
1658 for(i = 0; i < CPU_TLB_SIZE; i++)
1659 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1660#if (NB_MMU_MODES == 4)
1661 for(i = 0; i < CPU_TLB_SIZE; i++)
1662 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1663#endif
1664#endif
6a00d601 1665 }
59817ccb
FB
1666
1667#if !defined(CONFIG_SOFTMMU)
1668 /* XXX: this is expensive */
1669 {
1670 VirtPageDesc *p;
1671 int j;
1672 target_ulong addr;
1673
1674 for(i = 0; i < L1_SIZE; i++) {
1675 p = l1_virt_map[i];
1676 if (p) {
1677 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1678 for(j = 0; j < L2_SIZE; j++) {
1679 if (p->valid_tag == virt_valid_tag &&
1680 p->phys_addr >= start && p->phys_addr < end &&
1681 (p->prot & PROT_WRITE)) {
1682 if (addr < MMAP_AREA_END) {
5fafdf24 1683 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1684 p->prot & ~PROT_WRITE);
1685 }
1686 }
1687 addr += TARGET_PAGE_SIZE;
1688 p++;
1689 }
1690 }
1691 }
1692 }
1693#endif
1ccde1cb
FB
1694}
1695
3a7d929e
FB
1696static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1697{
1698 ram_addr_t ram_addr;
1699
84b7b8e7 1700 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1701 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1702 tlb_entry->addend - (unsigned long)phys_ram_base;
1703 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1704 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1705 }
1706 }
1707}
1708
1709/* update the TLB according to the current state of the dirty bits */
1710void cpu_tlb_update_dirty(CPUState *env)
1711{
1712 int i;
1713 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1714 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1715 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1716 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1717#if (NB_MMU_MODES >= 3)
1718 for(i = 0; i < CPU_TLB_SIZE; i++)
1719 tlb_update_dirty(&env->tlb_table[2][i]);
1720#if (NB_MMU_MODES == 4)
1721 for(i = 0; i < CPU_TLB_SIZE; i++)
1722 tlb_update_dirty(&env->tlb_table[3][i]);
1723#endif
1724#endif
3a7d929e
FB
1725}
1726
5fafdf24 1727static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1728 unsigned long start)
1ccde1cb
FB
1729{
1730 unsigned long addr;
84b7b8e7
FB
1731 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1732 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1733 if (addr == start) {
84b7b8e7 1734 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1735 }
1736 }
1737}
1738
1739/* update the TLB corresponding to virtual page vaddr and phys addr
1740 addr so that it is no longer dirty */
6a00d601
FB
1741static inline void tlb_set_dirty(CPUState *env,
1742 unsigned long addr, target_ulong vaddr)
1ccde1cb 1743{
1ccde1cb
FB
1744 int i;
1745
1ccde1cb
FB
1746 addr &= TARGET_PAGE_MASK;
1747 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1748 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1749 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1750#if (NB_MMU_MODES >= 3)
1751 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1752#if (NB_MMU_MODES == 4)
1753 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1754#endif
1755#endif
9fa3e853
FB
1756}
1757
59817ccb
FB
1758/* add a new TLB entry. At most one entry for a given virtual address
1759 is permitted. Return 0 if OK or 2 if the page could not be mapped
1760 (can only happen in non SOFTMMU mode for I/O pages or pages
1761 conflicting with the host address space). */
5fafdf24
TS
1762int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1763 target_phys_addr_t paddr, int prot,
6ebbf390 1764 int mmu_idx, int is_softmmu)
9fa3e853 1765{
92e873b9 1766 PhysPageDesc *p;
4f2ac237 1767 unsigned long pd;
9fa3e853 1768 unsigned int index;
4f2ac237 1769 target_ulong address;
108c49b8 1770 target_phys_addr_t addend;
9fa3e853 1771 int ret;
84b7b8e7 1772 CPUTLBEntry *te;
6658ffb8 1773 int i;
9fa3e853 1774
92e873b9 1775 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1776 if (!p) {
1777 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1778 } else {
1779 pd = p->phys_offset;
9fa3e853
FB
1780 }
1781#if defined(DEBUG_TLB)
6ebbf390
JM
1782 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1783 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1784#endif
1785
1786 ret = 0;
1787#if !defined(CONFIG_SOFTMMU)
5fafdf24 1788 if (is_softmmu)
9fa3e853
FB
1789#endif
1790 {
2a4188a3 1791 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1792 /* IO memory case */
1793 address = vaddr | pd;
1794 addend = paddr;
1795 } else {
1796 /* standard memory */
1797 address = vaddr;
1798 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1799 }
6658ffb8
PB
1800
1801 /* Make accesses to pages with watchpoints go via the
1802 watchpoint trap routines. */
1803 for (i = 0; i < env->nb_watchpoints; i++) {
1804 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1805 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1806 env->watchpoint[i].addend = 0;
6658ffb8
PB
1807 address = vaddr | io_mem_watch;
1808 } else {
d79acba4
AZ
1809 env->watchpoint[i].addend = pd - paddr +
1810 (unsigned long) phys_ram_base;
6658ffb8
PB
1811 /* TODO: Figure out how to make read watchpoints coexist
1812 with code. */
1813 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1814 }
1815 }
1816 }
d79acba4 1817
90f18422 1818 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1819 addend -= vaddr;
6ebbf390 1820 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1821 te->addend = addend;
67b915a5 1822 if (prot & PAGE_READ) {
84b7b8e7
FB
1823 te->addr_read = address;
1824 } else {
1825 te->addr_read = -1;
1826 }
5c751e99 1827
84b7b8e7
FB
1828 if (prot & PAGE_EXEC) {
1829 te->addr_code = address;
9fa3e853 1830 } else {
84b7b8e7 1831 te->addr_code = -1;
9fa3e853 1832 }
67b915a5 1833 if (prot & PAGE_WRITE) {
5fafdf24 1834 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1835 (pd & IO_MEM_ROMD)) {
1836 /* write access calls the I/O callback */
5fafdf24 1837 te->addr_write = vaddr |
856074ec 1838 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1839 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1840 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1841 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1842 } else {
84b7b8e7 1843 te->addr_write = address;
9fa3e853
FB
1844 }
1845 } else {
84b7b8e7 1846 te->addr_write = -1;
9fa3e853
FB
1847 }
1848 }
1849#if !defined(CONFIG_SOFTMMU)
1850 else {
1851 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1852 /* IO access: no mapping is done as it will be handled by the
1853 soft MMU */
1854 if (!(env->hflags & HF_SOFTMMU_MASK))
1855 ret = 2;
1856 } else {
1857 void *map_addr;
59817ccb
FB
1858
1859 if (vaddr >= MMAP_AREA_END) {
1860 ret = 2;
1861 } else {
1862 if (prot & PROT_WRITE) {
5fafdf24 1863 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1864#if defined(TARGET_HAS_SMC) || 1
59817ccb 1865 first_tb ||
d720b93d 1866#endif
5fafdf24 1867 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1868 !cpu_physical_memory_is_dirty(pd))) {
1869 /* ROM: we do as if code was inside */
1870 /* if code is present, we only map as read only and save the
1871 original mapping */
1872 VirtPageDesc *vp;
3b46e624 1873
90f18422 1874 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1875 vp->phys_addr = pd;
1876 vp->prot = prot;
1877 vp->valid_tag = virt_valid_tag;
1878 prot &= ~PAGE_WRITE;
1879 }
1880 }
5fafdf24 1881 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1882 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1883 if (map_addr == MAP_FAILED) {
1884 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1885 paddr, vaddr);
9fa3e853 1886 }
9fa3e853
FB
1887 }
1888 }
1889 }
1890#endif
1891 return ret;
1892}
1893
1894/* called from signal handler: invalidate the code and unprotect the
1895 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1896int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1897{
1898#if !defined(CONFIG_SOFTMMU)
1899 VirtPageDesc *vp;
1900
1901#if defined(DEBUG_TLB)
1902 printf("page_unprotect: addr=0x%08x\n", addr);
1903#endif
1904 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1905
1906 /* if it is not mapped, no need to worry here */
1907 if (addr >= MMAP_AREA_END)
1908 return 0;
9fa3e853
FB
1909 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1910 if (!vp)
1911 return 0;
1912 /* NOTE: in this case, validate_tag is _not_ tested as it
1913 validates only the code TLB */
1914 if (vp->valid_tag != virt_valid_tag)
1915 return 0;
1916 if (!(vp->prot & PAGE_WRITE))
1917 return 0;
1918#if defined(DEBUG_TLB)
5fafdf24 1919 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1920 addr, vp->phys_addr, vp->prot);
1921#endif
59817ccb
FB
1922 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1923 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1924 (unsigned long)addr, vp->prot);
d720b93d 1925 /* set the dirty bit */
0a962c02 1926 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1927 /* flush the code inside */
1928 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1929 return 1;
1930#else
1931 return 0;
1932#endif
33417e70
FB
1933}
1934
0124311e
FB
1935#else
1936
ee8b7021 1937void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1938{
1939}
1940
2e12669a 1941void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1942{
1943}
1944
5fafdf24
TS
1945int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1946 target_phys_addr_t paddr, int prot,
6ebbf390 1947 int mmu_idx, int is_softmmu)
9fa3e853
FB
1948{
1949 return 0;
1950}
0124311e 1951
9fa3e853
FB
1952/* dump memory mappings */
1953void page_dump(FILE *f)
33417e70 1954{
9fa3e853
FB
1955 unsigned long start, end;
1956 int i, j, prot, prot1;
1957 PageDesc *p;
33417e70 1958
9fa3e853
FB
1959 fprintf(f, "%-8s %-8s %-8s %s\n",
1960 "start", "end", "size", "prot");
1961 start = -1;
1962 end = -1;
1963 prot = 0;
1964 for(i = 0; i <= L1_SIZE; i++) {
1965 if (i < L1_SIZE)
1966 p = l1_map[i];
1967 else
1968 p = NULL;
1969 for(j = 0;j < L2_SIZE; j++) {
1970 if (!p)
1971 prot1 = 0;
1972 else
1973 prot1 = p[j].flags;
1974 if (prot1 != prot) {
1975 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1976 if (start != -1) {
1977 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1978 start, end, end - start,
9fa3e853
FB
1979 prot & PAGE_READ ? 'r' : '-',
1980 prot & PAGE_WRITE ? 'w' : '-',
1981 prot & PAGE_EXEC ? 'x' : '-');
1982 }
1983 if (prot1 != 0)
1984 start = end;
1985 else
1986 start = -1;
1987 prot = prot1;
1988 }
1989 if (!p)
1990 break;
1991 }
33417e70 1992 }
33417e70
FB
1993}
1994
53a5960a 1995int page_get_flags(target_ulong address)
33417e70 1996{
9fa3e853
FB
1997 PageDesc *p;
1998
1999 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2000 if (!p)
9fa3e853
FB
2001 return 0;
2002 return p->flags;
2003}
2004
2005/* modify the flags of a page and invalidate the code if
2006 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2007 depending on PAGE_WRITE */
53a5960a 2008void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2009{
2010 PageDesc *p;
53a5960a 2011 target_ulong addr;
9fa3e853 2012
c8a706fe 2013 /* mmap_lock should already be held. */
9fa3e853
FB
2014 start = start & TARGET_PAGE_MASK;
2015 end = TARGET_PAGE_ALIGN(end);
2016 if (flags & PAGE_WRITE)
2017 flags |= PAGE_WRITE_ORG;
2018 spin_lock(&tb_lock);
2019 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2020 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2021 /* if the write protection is set, then we invalidate the code
2022 inside */
5fafdf24 2023 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2024 (flags & PAGE_WRITE) &&
2025 p->first_tb) {
d720b93d 2026 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2027 }
2028 p->flags = flags;
2029 }
2030 spin_unlock(&tb_lock);
33417e70
FB
2031}
2032
3d97b40b
TS
2033int page_check_range(target_ulong start, target_ulong len, int flags)
2034{
2035 PageDesc *p;
2036 target_ulong end;
2037 target_ulong addr;
2038
2039 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2040 start = start & TARGET_PAGE_MASK;
2041
2042 if( end < start )
2043 /* we've wrapped around */
2044 return -1;
2045 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2046 p = page_find(addr >> TARGET_PAGE_BITS);
2047 if( !p )
2048 return -1;
2049 if( !(p->flags & PAGE_VALID) )
2050 return -1;
2051
dae3270c 2052 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2053 return -1;
dae3270c
FB
2054 if (flags & PAGE_WRITE) {
2055 if (!(p->flags & PAGE_WRITE_ORG))
2056 return -1;
2057 /* unprotect the page if it was put read-only because it
2058 contains translated code */
2059 if (!(p->flags & PAGE_WRITE)) {
2060 if (!page_unprotect(addr, 0, NULL))
2061 return -1;
2062 }
2063 return 0;
2064 }
3d97b40b
TS
2065 }
2066 return 0;
2067}
2068
9fa3e853
FB
2069/* called from signal handler: invalidate the code and unprotect the
2070 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2071int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2072{
2073 unsigned int page_index, prot, pindex;
2074 PageDesc *p, *p1;
53a5960a 2075 target_ulong host_start, host_end, addr;
9fa3e853 2076
c8a706fe
PB
2077 /* Technically this isn't safe inside a signal handler. However we
2078 know this only ever happens in a synchronous SEGV handler, so in
2079 practice it seems to be ok. */
2080 mmap_lock();
2081
83fb7adf 2082 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2083 page_index = host_start >> TARGET_PAGE_BITS;
2084 p1 = page_find(page_index);
c8a706fe
PB
2085 if (!p1) {
2086 mmap_unlock();
9fa3e853 2087 return 0;
c8a706fe 2088 }
83fb7adf 2089 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2090 p = p1;
2091 prot = 0;
2092 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2093 prot |= p->flags;
2094 p++;
2095 }
2096 /* if the page was really writable, then we change its
2097 protection back to writable */
2098 if (prot & PAGE_WRITE_ORG) {
2099 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2100 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2101 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2102 (prot & PAGE_BITS) | PAGE_WRITE);
2103 p1[pindex].flags |= PAGE_WRITE;
2104 /* and since the content will be modified, we must invalidate
2105 the corresponding translated code. */
d720b93d 2106 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2107#ifdef DEBUG_TB_CHECK
2108 tb_invalidate_check(address);
2109#endif
c8a706fe 2110 mmap_unlock();
9fa3e853
FB
2111 return 1;
2112 }
2113 }
c8a706fe 2114 mmap_unlock();
9fa3e853
FB
2115 return 0;
2116}
2117
6a00d601
FB
2118static inline void tlb_set_dirty(CPUState *env,
2119 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2120{
2121}
9fa3e853
FB
2122#endif /* defined(CONFIG_USER_ONLY) */
2123
db7b5426 2124static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2125 ram_addr_t memory);
2126static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2127 ram_addr_t orig_memory);
db7b5426
BS
2128#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2129 need_subpage) \
2130 do { \
2131 if (addr > start_addr) \
2132 start_addr2 = 0; \
2133 else { \
2134 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2135 if (start_addr2 > 0) \
2136 need_subpage = 1; \
2137 } \
2138 \
49e9fba2 2139 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2140 end_addr2 = TARGET_PAGE_SIZE - 1; \
2141 else { \
2142 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2143 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2144 need_subpage = 1; \
2145 } \
2146 } while (0)
2147
33417e70
FB
2148/* register physical memory. 'size' must be a multiple of the target
2149 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2150 io memory page */
5fafdf24 2151void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2152 ram_addr_t size,
2153 ram_addr_t phys_offset)
33417e70 2154{
108c49b8 2155 target_phys_addr_t addr, end_addr;
92e873b9 2156 PhysPageDesc *p;
9d42037b 2157 CPUState *env;
00f82b8a 2158 ram_addr_t orig_size = size;
db7b5426 2159 void *subpage;
33417e70 2160
da260249
FB
2161#ifdef USE_KQEMU
2162 /* XXX: should not depend on cpu context */
2163 env = first_cpu;
2164 if (env->kqemu_enabled) {
2165 kqemu_set_phys_mem(start_addr, size, phys_offset);
2166 }
2167#endif
5fd386f6 2168 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2169 end_addr = start_addr + (target_phys_addr_t)size;
2170 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2171 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2172 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2173 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2174 target_phys_addr_t start_addr2, end_addr2;
2175 int need_subpage = 0;
2176
2177 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2178 need_subpage);
4254fab8 2179 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2180 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2181 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2182 &p->phys_offset, orig_memory);
2183 } else {
2184 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2185 >> IO_MEM_SHIFT];
2186 }
2187 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2188 } else {
2189 p->phys_offset = phys_offset;
2190 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2191 (phys_offset & IO_MEM_ROMD))
2192 phys_offset += TARGET_PAGE_SIZE;
2193 }
2194 } else {
2195 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2196 p->phys_offset = phys_offset;
2197 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2198 (phys_offset & IO_MEM_ROMD))
2199 phys_offset += TARGET_PAGE_SIZE;
2200 else {
2201 target_phys_addr_t start_addr2, end_addr2;
2202 int need_subpage = 0;
2203
2204 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2205 end_addr2, need_subpage);
2206
4254fab8 2207 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2208 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2209 &p->phys_offset, IO_MEM_UNASSIGNED);
2210 subpage_register(subpage, start_addr2, end_addr2,
2211 phys_offset);
2212 }
2213 }
2214 }
33417e70 2215 }
3b46e624 2216
9d42037b
FB
2217 /* since each CPU stores ram addresses in its TLB cache, we must
2218 reset the modified entries */
2219 /* XXX: slow ! */
2220 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2221 tlb_flush(env, 1);
2222 }
33417e70
FB
2223}
2224
ba863458 2225/* XXX: temporary until new memory mapping API */
00f82b8a 2226ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2227{
2228 PhysPageDesc *p;
2229
2230 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2231 if (!p)
2232 return IO_MEM_UNASSIGNED;
2233 return p->phys_offset;
2234}
2235
e9a1ab19 2236/* XXX: better than nothing */
00f82b8a 2237ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2238{
2239 ram_addr_t addr;
7fb4fdcf 2240 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2241 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2242 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2243 abort();
2244 }
2245 addr = phys_ram_alloc_offset;
2246 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2247 return addr;
2248}
2249
2250void qemu_ram_free(ram_addr_t addr)
2251{
2252}
2253
a4193c8a 2254static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2255{
67d3b957 2256#ifdef DEBUG_UNASSIGNED
ab3d1727 2257 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2258#endif
2259#ifdef TARGET_SPARC
6c36d3fa 2260 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2261#elif TARGET_CRIS
2262 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2263#endif
33417e70
FB
2264 return 0;
2265}
2266
a4193c8a 2267static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2268{
67d3b957 2269#ifdef DEBUG_UNASSIGNED
ab3d1727 2270 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2271#endif
b4f0a316 2272#ifdef TARGET_SPARC
6c36d3fa 2273 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2274#elif TARGET_CRIS
2275 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2276#endif
33417e70
FB
2277}
2278
2279static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2280 unassigned_mem_readb,
2281 unassigned_mem_readb,
2282 unassigned_mem_readb,
2283};
2284
2285static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2286 unassigned_mem_writeb,
2287 unassigned_mem_writeb,
2288 unassigned_mem_writeb,
2289};
2290
3a7d929e 2291static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2292{
3a7d929e
FB
2293 unsigned long ram_addr;
2294 int dirty_flags;
2295 ram_addr = addr - (unsigned long)phys_ram_base;
2296 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2297 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2298#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2299 tb_invalidate_phys_page_fast(ram_addr, 1);
2300 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2301#endif
3a7d929e 2302 }
c27004ec 2303 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2304#ifdef USE_KQEMU
2305 if (cpu_single_env->kqemu_enabled &&
2306 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2307 kqemu_modify_page(cpu_single_env, ram_addr);
2308#endif
f23db169
FB
2309 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2310 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2311 /* we remove the notdirty callback only if the code has been
2312 flushed */
2313 if (dirty_flags == 0xff)
6a00d601 2314 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2315}
2316
3a7d929e 2317static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2318{
3a7d929e
FB
2319 unsigned long ram_addr;
2320 int dirty_flags;
2321 ram_addr = addr - (unsigned long)phys_ram_base;
2322 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2323 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2324#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2325 tb_invalidate_phys_page_fast(ram_addr, 2);
2326 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2327#endif
3a7d929e 2328 }
c27004ec 2329 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2330#ifdef USE_KQEMU
2331 if (cpu_single_env->kqemu_enabled &&
2332 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2333 kqemu_modify_page(cpu_single_env, ram_addr);
2334#endif
f23db169
FB
2335 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2336 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2337 /* we remove the notdirty callback only if the code has been
2338 flushed */
2339 if (dirty_flags == 0xff)
6a00d601 2340 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2341}
2342
3a7d929e 2343static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2344{
3a7d929e
FB
2345 unsigned long ram_addr;
2346 int dirty_flags;
2347 ram_addr = addr - (unsigned long)phys_ram_base;
2348 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2349 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2350#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2351 tb_invalidate_phys_page_fast(ram_addr, 4);
2352 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2353#endif
3a7d929e 2354 }
c27004ec 2355 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2356#ifdef USE_KQEMU
2357 if (cpu_single_env->kqemu_enabled &&
2358 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2359 kqemu_modify_page(cpu_single_env, ram_addr);
2360#endif
f23db169
FB
2361 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2362 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2363 /* we remove the notdirty callback only if the code has been
2364 flushed */
2365 if (dirty_flags == 0xff)
6a00d601 2366 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2367}
2368
3a7d929e 2369static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2370 NULL, /* never used */
2371 NULL, /* never used */
2372 NULL, /* never used */
2373};
2374
1ccde1cb
FB
2375static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2376 notdirty_mem_writeb,
2377 notdirty_mem_writew,
2378 notdirty_mem_writel,
2379};
2380
6658ffb8
PB
2381#if defined(CONFIG_SOFTMMU)
2382/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2383 so these check for a hit then pass through to the normal out-of-line
2384 phys routines. */
2385static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2386{
2387 return ldub_phys(addr);
2388}
2389
2390static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2391{
2392 return lduw_phys(addr);
2393}
2394
2395static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2396{
2397 return ldl_phys(addr);
2398}
2399
2400/* Generate a debug exception if a watchpoint has been hit.
2401 Returns the real physical address of the access. addr will be a host
d79acba4 2402 address in case of a RAM location. */
6658ffb8
PB
2403static target_ulong check_watchpoint(target_phys_addr_t addr)
2404{
2405 CPUState *env = cpu_single_env;
2406 target_ulong watch;
2407 target_ulong retaddr;
2408 int i;
2409
2410 retaddr = addr;
2411 for (i = 0; i < env->nb_watchpoints; i++) {
2412 watch = env->watchpoint[i].vaddr;
2413 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2414 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2415 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2416 cpu_single_env->watchpoint_hit = i + 1;
2417 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2418 break;
2419 }
2420 }
2421 }
2422 return retaddr;
2423}
2424
2425static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2426 uint32_t val)
2427{
2428 addr = check_watchpoint(addr);
2429 stb_phys(addr, val);
2430}
2431
2432static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2433 uint32_t val)
2434{
2435 addr = check_watchpoint(addr);
2436 stw_phys(addr, val);
2437}
2438
2439static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2440 uint32_t val)
2441{
2442 addr = check_watchpoint(addr);
2443 stl_phys(addr, val);
2444}
2445
2446static CPUReadMemoryFunc *watch_mem_read[3] = {
2447 watch_mem_readb,
2448 watch_mem_readw,
2449 watch_mem_readl,
2450};
2451
2452static CPUWriteMemoryFunc *watch_mem_write[3] = {
2453 watch_mem_writeb,
2454 watch_mem_writew,
2455 watch_mem_writel,
2456};
2457#endif
2458
db7b5426
BS
2459static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2460 unsigned int len)
2461{
db7b5426
BS
2462 uint32_t ret;
2463 unsigned int idx;
2464
2465 idx = SUBPAGE_IDX(addr - mmio->base);
2466#if defined(DEBUG_SUBPAGE)
2467 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2468 mmio, len, addr, idx);
2469#endif
3ee89922 2470 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2471
2472 return ret;
2473}
2474
2475static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2476 uint32_t value, unsigned int len)
2477{
db7b5426
BS
2478 unsigned int idx;
2479
2480 idx = SUBPAGE_IDX(addr - mmio->base);
2481#if defined(DEBUG_SUBPAGE)
2482 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2483 mmio, len, addr, idx, value);
2484#endif
3ee89922 2485 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2486}
2487
2488static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2489{
2490#if defined(DEBUG_SUBPAGE)
2491 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2492#endif
2493
2494 return subpage_readlen(opaque, addr, 0);
2495}
2496
2497static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2498 uint32_t value)
2499{
2500#if defined(DEBUG_SUBPAGE)
2501 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2502#endif
2503 subpage_writelen(opaque, addr, value, 0);
2504}
2505
2506static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2507{
2508#if defined(DEBUG_SUBPAGE)
2509 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2510#endif
2511
2512 return subpage_readlen(opaque, addr, 1);
2513}
2514
2515static void subpage_writew (void *opaque, target_phys_addr_t addr,
2516 uint32_t value)
2517{
2518#if defined(DEBUG_SUBPAGE)
2519 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2520#endif
2521 subpage_writelen(opaque, addr, value, 1);
2522}
2523
2524static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2525{
2526#if defined(DEBUG_SUBPAGE)
2527 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2528#endif
2529
2530 return subpage_readlen(opaque, addr, 2);
2531}
2532
2533static void subpage_writel (void *opaque,
2534 target_phys_addr_t addr, uint32_t value)
2535{
2536#if defined(DEBUG_SUBPAGE)
2537 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2538#endif
2539 subpage_writelen(opaque, addr, value, 2);
2540}
2541
2542static CPUReadMemoryFunc *subpage_read[] = {
2543 &subpage_readb,
2544 &subpage_readw,
2545 &subpage_readl,
2546};
2547
2548static CPUWriteMemoryFunc *subpage_write[] = {
2549 &subpage_writeb,
2550 &subpage_writew,
2551 &subpage_writel,
2552};
2553
2554static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2555 ram_addr_t memory)
db7b5426
BS
2556{
2557 int idx, eidx;
4254fab8 2558 unsigned int i;
db7b5426
BS
2559
2560 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2561 return -1;
2562 idx = SUBPAGE_IDX(start);
2563 eidx = SUBPAGE_IDX(end);
2564#if defined(DEBUG_SUBPAGE)
2565 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2566 mmio, start, end, idx, eidx, memory);
2567#endif
2568 memory >>= IO_MEM_SHIFT;
2569 for (; idx <= eidx; idx++) {
4254fab8 2570 for (i = 0; i < 4; i++) {
3ee89922
BS
2571 if (io_mem_read[memory][i]) {
2572 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2573 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2574 }
2575 if (io_mem_write[memory][i]) {
2576 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2577 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2578 }
4254fab8 2579 }
db7b5426
BS
2580 }
2581
2582 return 0;
2583}
2584
00f82b8a
AJ
2585static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2586 ram_addr_t orig_memory)
db7b5426
BS
2587{
2588 subpage_t *mmio;
2589 int subpage_memory;
2590
2591 mmio = qemu_mallocz(sizeof(subpage_t));
2592 if (mmio != NULL) {
2593 mmio->base = base;
2594 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2595#if defined(DEBUG_SUBPAGE)
2596 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2597 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2598#endif
2599 *phys = subpage_memory | IO_MEM_SUBPAGE;
2600 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2601 }
2602
2603 return mmio;
2604}
2605
33417e70
FB
2606static void io_mem_init(void)
2607{
3a7d929e 2608 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2609 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2610 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2611 io_mem_nb = 5;
2612
6658ffb8
PB
2613#if defined(CONFIG_SOFTMMU)
2614 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2615 watch_mem_write, NULL);
2616#endif
1ccde1cb 2617 /* alloc dirty bits array */
0a962c02 2618 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2619 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2620}
2621
2622/* mem_read and mem_write are arrays of functions containing the
2623 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2624 2). Functions can be omitted with a NULL function pointer. The
2625 registered functions may be modified dynamically later.
2626 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2627 modified. If it is zero, a new io zone is allocated. The return
2628 value can be used with cpu_register_physical_memory(). (-1) is
2629 returned if error. */
33417e70
FB
2630int cpu_register_io_memory(int io_index,
2631 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2632 CPUWriteMemoryFunc **mem_write,
2633 void *opaque)
33417e70 2634{
4254fab8 2635 int i, subwidth = 0;
33417e70
FB
2636
2637 if (io_index <= 0) {
b5ff1b31 2638 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2639 return -1;
2640 io_index = io_mem_nb++;
2641 } else {
2642 if (io_index >= IO_MEM_NB_ENTRIES)
2643 return -1;
2644 }
b5ff1b31 2645
33417e70 2646 for(i = 0;i < 3; i++) {
4254fab8
BS
2647 if (!mem_read[i] || !mem_write[i])
2648 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2649 io_mem_read[io_index][i] = mem_read[i];
2650 io_mem_write[io_index][i] = mem_write[i];
2651 }
a4193c8a 2652 io_mem_opaque[io_index] = opaque;
4254fab8 2653 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2654}
61382a50 2655
8926b517
FB
2656CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2657{
2658 return io_mem_write[io_index >> IO_MEM_SHIFT];
2659}
2660
2661CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2662{
2663 return io_mem_read[io_index >> IO_MEM_SHIFT];
2664}
2665
13eb76e0
FB
2666/* physical memory access (slow version, mainly for debug) */
2667#if defined(CONFIG_USER_ONLY)
5fafdf24 2668void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2669 int len, int is_write)
2670{
2671 int l, flags;
2672 target_ulong page;
53a5960a 2673 void * p;
13eb76e0
FB
2674
2675 while (len > 0) {
2676 page = addr & TARGET_PAGE_MASK;
2677 l = (page + TARGET_PAGE_SIZE) - addr;
2678 if (l > len)
2679 l = len;
2680 flags = page_get_flags(page);
2681 if (!(flags & PAGE_VALID))
2682 return;
2683 if (is_write) {
2684 if (!(flags & PAGE_WRITE))
2685 return;
579a97f7 2686 /* XXX: this code should not depend on lock_user */
72fb7daa 2687 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2688 /* FIXME - should this return an error rather than just fail? */
2689 return;
72fb7daa
AJ
2690 memcpy(p, buf, l);
2691 unlock_user(p, addr, l);
13eb76e0
FB
2692 } else {
2693 if (!(flags & PAGE_READ))
2694 return;
579a97f7 2695 /* XXX: this code should not depend on lock_user */
72fb7daa 2696 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2697 /* FIXME - should this return an error rather than just fail? */
2698 return;
72fb7daa 2699 memcpy(buf, p, l);
5b257578 2700 unlock_user(p, addr, 0);
13eb76e0
FB
2701 }
2702 len -= l;
2703 buf += l;
2704 addr += l;
2705 }
2706}
8df1cd07 2707
13eb76e0 2708#else
5fafdf24 2709void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2710 int len, int is_write)
2711{
2712 int l, io_index;
2713 uint8_t *ptr;
2714 uint32_t val;
2e12669a
FB
2715 target_phys_addr_t page;
2716 unsigned long pd;
92e873b9 2717 PhysPageDesc *p;
3b46e624 2718
13eb76e0
FB
2719 while (len > 0) {
2720 page = addr & TARGET_PAGE_MASK;
2721 l = (page + TARGET_PAGE_SIZE) - addr;
2722 if (l > len)
2723 l = len;
92e873b9 2724 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2725 if (!p) {
2726 pd = IO_MEM_UNASSIGNED;
2727 } else {
2728 pd = p->phys_offset;
2729 }
3b46e624 2730
13eb76e0 2731 if (is_write) {
3a7d929e 2732 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2733 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2734 /* XXX: could force cpu_single_env to NULL to avoid
2735 potential bugs */
13eb76e0 2736 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2737 /* 32 bit write access */
c27004ec 2738 val = ldl_p(buf);
a4193c8a 2739 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2740 l = 4;
2741 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2742 /* 16 bit write access */
c27004ec 2743 val = lduw_p(buf);
a4193c8a 2744 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2745 l = 2;
2746 } else {
1c213d19 2747 /* 8 bit write access */
c27004ec 2748 val = ldub_p(buf);
a4193c8a 2749 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2750 l = 1;
2751 }
2752 } else {
b448f2f3
FB
2753 unsigned long addr1;
2754 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2755 /* RAM case */
b448f2f3 2756 ptr = phys_ram_base + addr1;
13eb76e0 2757 memcpy(ptr, buf, l);
3a7d929e
FB
2758 if (!cpu_physical_memory_is_dirty(addr1)) {
2759 /* invalidate code */
2760 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2761 /* set dirty bit */
5fafdf24 2762 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2763 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2764 }
13eb76e0
FB
2765 }
2766 } else {
5fafdf24 2767 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2768 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2769 /* I/O case */
2770 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2771 if (l >= 4 && ((addr & 3) == 0)) {
2772 /* 32 bit read access */
a4193c8a 2773 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2774 stl_p(buf, val);
13eb76e0
FB
2775 l = 4;
2776 } else if (l >= 2 && ((addr & 1) == 0)) {
2777 /* 16 bit read access */
a4193c8a 2778 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2779 stw_p(buf, val);
13eb76e0
FB
2780 l = 2;
2781 } else {
1c213d19 2782 /* 8 bit read access */
a4193c8a 2783 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2784 stb_p(buf, val);
13eb76e0
FB
2785 l = 1;
2786 }
2787 } else {
2788 /* RAM case */
5fafdf24 2789 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2790 (addr & ~TARGET_PAGE_MASK);
2791 memcpy(buf, ptr, l);
2792 }
2793 }
2794 len -= l;
2795 buf += l;
2796 addr += l;
2797 }
2798}
8df1cd07 2799
d0ecd2aa 2800/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2801void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2802 const uint8_t *buf, int len)
2803{
2804 int l;
2805 uint8_t *ptr;
2806 target_phys_addr_t page;
2807 unsigned long pd;
2808 PhysPageDesc *p;
3b46e624 2809
d0ecd2aa
FB
2810 while (len > 0) {
2811 page = addr & TARGET_PAGE_MASK;
2812 l = (page + TARGET_PAGE_SIZE) - addr;
2813 if (l > len)
2814 l = len;
2815 p = phys_page_find(page >> TARGET_PAGE_BITS);
2816 if (!p) {
2817 pd = IO_MEM_UNASSIGNED;
2818 } else {
2819 pd = p->phys_offset;
2820 }
3b46e624 2821
d0ecd2aa 2822 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2823 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2824 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2825 /* do nothing */
2826 } else {
2827 unsigned long addr1;
2828 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2829 /* ROM/RAM case */
2830 ptr = phys_ram_base + addr1;
2831 memcpy(ptr, buf, l);
2832 }
2833 len -= l;
2834 buf += l;
2835 addr += l;
2836 }
2837}
2838
2839
8df1cd07
FB
2840/* warning: addr must be aligned */
2841uint32_t ldl_phys(target_phys_addr_t addr)
2842{
2843 int io_index;
2844 uint8_t *ptr;
2845 uint32_t val;
2846 unsigned long pd;
2847 PhysPageDesc *p;
2848
2849 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2850 if (!p) {
2851 pd = IO_MEM_UNASSIGNED;
2852 } else {
2853 pd = p->phys_offset;
2854 }
3b46e624 2855
5fafdf24 2856 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2857 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2858 /* I/O case */
2859 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2860 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2861 } else {
2862 /* RAM case */
5fafdf24 2863 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2864 (addr & ~TARGET_PAGE_MASK);
2865 val = ldl_p(ptr);
2866 }
2867 return val;
2868}
2869
84b7b8e7
FB
2870/* warning: addr must be aligned */
2871uint64_t ldq_phys(target_phys_addr_t addr)
2872{
2873 int io_index;
2874 uint8_t *ptr;
2875 uint64_t val;
2876 unsigned long pd;
2877 PhysPageDesc *p;
2878
2879 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2880 if (!p) {
2881 pd = IO_MEM_UNASSIGNED;
2882 } else {
2883 pd = p->phys_offset;
2884 }
3b46e624 2885
2a4188a3
FB
2886 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2887 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2888 /* I/O case */
2889 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2890#ifdef TARGET_WORDS_BIGENDIAN
2891 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2892 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2893#else
2894 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2895 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2896#endif
2897 } else {
2898 /* RAM case */
5fafdf24 2899 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2900 (addr & ~TARGET_PAGE_MASK);
2901 val = ldq_p(ptr);
2902 }
2903 return val;
2904}
2905
aab33094
FB
2906/* XXX: optimize */
2907uint32_t ldub_phys(target_phys_addr_t addr)
2908{
2909 uint8_t val;
2910 cpu_physical_memory_read(addr, &val, 1);
2911 return val;
2912}
2913
2914/* XXX: optimize */
2915uint32_t lduw_phys(target_phys_addr_t addr)
2916{
2917 uint16_t val;
2918 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2919 return tswap16(val);
2920}
2921
8df1cd07
FB
2922/* warning: addr must be aligned. The ram page is not masked as dirty
2923 and the code inside is not invalidated. It is useful if the dirty
2924 bits are used to track modified PTEs */
2925void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2926{
2927 int io_index;
2928 uint8_t *ptr;
2929 unsigned long pd;
2930 PhysPageDesc *p;
2931
2932 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2933 if (!p) {
2934 pd = IO_MEM_UNASSIGNED;
2935 } else {
2936 pd = p->phys_offset;
2937 }
3b46e624 2938
3a7d929e 2939 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2940 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2941 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2942 } else {
5fafdf24 2943 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2944 (addr & ~TARGET_PAGE_MASK);
2945 stl_p(ptr, val);
2946 }
2947}
2948
bc98a7ef
JM
2949void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2950{
2951 int io_index;
2952 uint8_t *ptr;
2953 unsigned long pd;
2954 PhysPageDesc *p;
2955
2956 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2957 if (!p) {
2958 pd = IO_MEM_UNASSIGNED;
2959 } else {
2960 pd = p->phys_offset;
2961 }
3b46e624 2962
bc98a7ef
JM
2963 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2964 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2965#ifdef TARGET_WORDS_BIGENDIAN
2966 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2967 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2968#else
2969 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2970 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2971#endif
2972 } else {
5fafdf24 2973 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2974 (addr & ~TARGET_PAGE_MASK);
2975 stq_p(ptr, val);
2976 }
2977}
2978
8df1cd07 2979/* warning: addr must be aligned */
8df1cd07
FB
2980void stl_phys(target_phys_addr_t addr, uint32_t val)
2981{
2982 int io_index;
2983 uint8_t *ptr;
2984 unsigned long pd;
2985 PhysPageDesc *p;
2986
2987 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2988 if (!p) {
2989 pd = IO_MEM_UNASSIGNED;
2990 } else {
2991 pd = p->phys_offset;
2992 }
3b46e624 2993
3a7d929e 2994 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2995 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2996 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2997 } else {
2998 unsigned long addr1;
2999 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3000 /* RAM case */
3001 ptr = phys_ram_base + addr1;
3002 stl_p(ptr, val);
3a7d929e
FB
3003 if (!cpu_physical_memory_is_dirty(addr1)) {
3004 /* invalidate code */
3005 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3006 /* set dirty bit */
f23db169
FB
3007 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3008 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3009 }
8df1cd07
FB
3010 }
3011}
3012
aab33094
FB
3013/* XXX: optimize */
3014void stb_phys(target_phys_addr_t addr, uint32_t val)
3015{
3016 uint8_t v = val;
3017 cpu_physical_memory_write(addr, &v, 1);
3018}
3019
3020/* XXX: optimize */
3021void stw_phys(target_phys_addr_t addr, uint32_t val)
3022{
3023 uint16_t v = tswap16(val);
3024 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3025}
3026
3027/* XXX: optimize */
3028void stq_phys(target_phys_addr_t addr, uint64_t val)
3029{
3030 val = tswap64(val);
3031 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3032}
3033
13eb76e0
FB
3034#endif
3035
3036/* virtual memory access for debug */
5fafdf24 3037int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3038 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3039{
3040 int l;
9b3c35e0
JM
3041 target_phys_addr_t phys_addr;
3042 target_ulong page;
13eb76e0
FB
3043
3044 while (len > 0) {
3045 page = addr & TARGET_PAGE_MASK;
3046 phys_addr = cpu_get_phys_page_debug(env, page);
3047 /* if no physical page mapped, return an error */
3048 if (phys_addr == -1)
3049 return -1;
3050 l = (page + TARGET_PAGE_SIZE) - addr;
3051 if (l > len)
3052 l = len;
5fafdf24 3053 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3054 buf, l, is_write);
13eb76e0
FB
3055 len -= l;
3056 buf += l;
3057 addr += l;
3058 }
3059 return 0;
3060}
3061
e3db7226
FB
3062void dump_exec_info(FILE *f,
3063 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3064{
3065 int i, target_code_size, max_target_code_size;
3066 int direct_jmp_count, direct_jmp2_count, cross_page;
3067 TranslationBlock *tb;
3b46e624 3068
e3db7226
FB
3069 target_code_size = 0;
3070 max_target_code_size = 0;
3071 cross_page = 0;
3072 direct_jmp_count = 0;
3073 direct_jmp2_count = 0;
3074 for(i = 0; i < nb_tbs; i++) {
3075 tb = &tbs[i];
3076 target_code_size += tb->size;
3077 if (tb->size > max_target_code_size)
3078 max_target_code_size = tb->size;
3079 if (tb->page_addr[1] != -1)
3080 cross_page++;
3081 if (tb->tb_next_offset[0] != 0xffff) {
3082 direct_jmp_count++;
3083 if (tb->tb_next_offset[1] != 0xffff) {
3084 direct_jmp2_count++;
3085 }
3086 }
3087 }
3088 /* XXX: avoid using doubles ? */
57fec1fe 3089 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3090 cpu_fprintf(f, "gen code size %ld/%ld\n",
3091 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3092 cpu_fprintf(f, "TB count %d/%d\n",
3093 nb_tbs, code_gen_max_blocks);
5fafdf24 3094 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3095 nb_tbs ? target_code_size / nb_tbs : 0,
3096 max_target_code_size);
5fafdf24 3097 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3098 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3099 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3100 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3101 cross_page,
e3db7226
FB
3102 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3103 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3104 direct_jmp_count,
e3db7226
FB
3105 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3106 direct_jmp2_count,
3107 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3108 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3109 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3110 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3111 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3112 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3113}
3114
5fafdf24 3115#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3116
3117#define MMUSUFFIX _cmmu
3118#define GETPC() NULL
3119#define env cpu_single_env
b769d8fe 3120#define SOFTMMU_CODE_ACCESS
61382a50
FB
3121
3122#define SHIFT 0
3123#include "softmmu_template.h"
3124
3125#define SHIFT 1
3126#include "softmmu_template.h"
3127
3128#define SHIFT 2
3129#include "softmmu_template.h"
3130
3131#define SHIFT 3
3132#include "softmmu_template.h"
3133
3134#undef env
3135
3136#endif