]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Relax memory operations constraints
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
54936004 44
fd6ce8f6 45//#define DEBUG_TB_INVALIDATE
66e85a21 46//#define DEBUG_FLUSH
9fa3e853 47//#define DEBUG_TLB
67d3b957 48//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
49
50/* make various TB consistency checks */
5fafdf24
TS
51//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
fd6ce8f6 53
1196be37 54//#define DEBUG_IOPORT
db7b5426 55//#define DEBUG_SUBPAGE
1196be37 56
99773bd4
PB
57#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
76#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
78#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
80#else
81/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82#define TARGET_PHYS_ADDR_SPACE_BITS 32
83#endif
84
fab94c0e 85TranslationBlock *tbs;
26a5f13b 86int code_gen_max_blocks;
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 88int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
d03d860b
BS
92#if defined(__arm__)
93/* The prologue must be reachable with a direct jump. ARM has a
94 limited branch range (possibly also PPC and SPARC?) so place it in a
95 section close to code segment. */
96#define code_gen_section \
97 __attribute__((__section__(".gen_code"))) \
98 __attribute__((aligned (32)))
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
26a5f13b
FB
105uint8_t *code_gen_buffer;
106unsigned long code_gen_buffer_size;
107/* threshold to flush the translated code buffer */
108unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
109uint8_t *code_gen_ptr;
110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
00f82b8a 112ram_addr_t phys_ram_size;
9fa3e853
FB
113int phys_ram_fd;
114uint8_t *phys_ram_base;
1ccde1cb 115uint8_t *phys_ram_dirty;
e9a1ab19 116static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 117#endif
9fa3e853 118
6a00d601
FB
119CPUState *first_cpu;
120/* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
5fafdf24 122CPUState *cpu_single_env;
2e70f6ef 123/* 0 = Do not count executed instructions.
bf20dc07 124 1 = Precise instruction counting.
2e70f6ef
PB
125 2 = Adaptive rate instruction counting. */
126int use_icount = 0;
127/* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129int64_t qemu_icount;
6a00d601 130
54936004 131typedef struct PageDesc {
92e873b9 132 /* list of TBs intersecting this ram page */
fd6ce8f6 133 TranslationBlock *first_tb;
9fa3e853
FB
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138#if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140#endif
54936004
FB
141} PageDesc;
142
92e873b9 143typedef struct PhysPageDesc {
0f459d16 144 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 145 ram_addr_t phys_offset;
92e873b9
FB
146} PhysPageDesc;
147
54936004 148#define L2_BITS 10
bedb69ea
JM
149#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150/* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
153 */
154#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155#else
03875444 156#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 157#endif
54936004
FB
158
159#define L1_SIZE (1 << L1_BITS)
160#define L2_SIZE (1 << L2_BITS)
161
83fb7adf
FB
162unsigned long qemu_real_host_page_size;
163unsigned long qemu_host_page_bits;
164unsigned long qemu_host_page_size;
165unsigned long qemu_host_page_mask;
54936004 166
92e873b9 167/* XXX: for system emulation, it could just be an array */
54936004 168static PageDesc *l1_map[L1_SIZE];
0a962c02 169PhysPageDesc **l1_phys_map;
54936004 170
e2eef170
PB
171#if !defined(CONFIG_USER_ONLY)
172static void io_mem_init(void);
173
33417e70 174/* io memory support */
33417e70
FB
175CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 177void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 178static int io_mem_nb;
6658ffb8
PB
179static int io_mem_watch;
180#endif
33417e70 181
34865134
FB
182/* log support */
183char *logfilename = "/tmp/qemu.log";
184FILE *logfile;
185int loglevel;
e735b91c 186static int log_append = 0;
34865134 187
e3db7226
FB
188/* statistics */
189static int tlb_flush_count;
190static int tb_flush_count;
191static int tb_phys_invalidate_count;
192
db7b5426
BS
193#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194typedef struct subpage_t {
195 target_phys_addr_t base;
3ee89922
BS
196 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
199} subpage_t;
200
7cb69cae
FB
201#ifdef _WIN32
202static void map_exec(void *addr, long size)
203{
204 DWORD old_protect;
205 VirtualProtect(addr, size,
206 PAGE_EXECUTE_READWRITE, &old_protect);
207
208}
209#else
210static void map_exec(void *addr, long size)
211{
4369415f 212 unsigned long start, end, page_size;
7cb69cae 213
4369415f 214 page_size = getpagesize();
7cb69cae 215 start = (unsigned long)addr;
4369415f 216 start &= ~(page_size - 1);
7cb69cae
FB
217
218 end = (unsigned long)addr + size;
4369415f
FB
219 end += page_size - 1;
220 end &= ~(page_size - 1);
7cb69cae
FB
221
222 mprotect((void *)start, end - start,
223 PROT_READ | PROT_WRITE | PROT_EXEC);
224}
225#endif
226
b346ff46 227static void page_init(void)
54936004 228{
83fb7adf 229 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 230 TARGET_PAGE_SIZE */
67b915a5 231#ifdef _WIN32
d5a8f07c
FB
232 {
233 SYSTEM_INFO system_info;
234 DWORD old_protect;
3b46e624 235
d5a8f07c
FB
236 GetSystemInfo(&system_info);
237 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 238 }
67b915a5 239#else
83fb7adf 240 qemu_real_host_page_size = getpagesize();
67b915a5 241#endif
83fb7adf
FB
242 if (qemu_host_page_size == 0)
243 qemu_host_page_size = qemu_real_host_page_size;
244 if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 qemu_host_page_size = TARGET_PAGE_SIZE;
246 qemu_host_page_bits = 0;
247 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 qemu_host_page_bits++;
249 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
250 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
252
253#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 {
255 long long startaddr, endaddr;
256 FILE *f;
257 int n;
258
c8a706fe 259 mmap_lock();
0776590d 260 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
261 f = fopen("/proc/self/maps", "r");
262 if (f) {
263 do {
264 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 if (n == 2) {
e0b8d65a
BS
266 startaddr = MIN(startaddr,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 endaddr = MIN(endaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 270 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
271 TARGET_PAGE_ALIGN(endaddr),
272 PAGE_RESERVED);
273 }
274 } while (!feof(f));
275 fclose(f);
276 }
c8a706fe 277 mmap_unlock();
50a9569b
AZ
278 }
279#endif
54936004
FB
280}
281
00f82b8a 282static inline PageDesc *page_find_alloc(target_ulong index)
54936004 283{
54936004
FB
284 PageDesc **lp, *p;
285
17e2377a
PB
286#if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
290 return NULL;
291#endif
54936004
FB
292 lp = &l1_map[index >> L2_BITS];
293 p = *lp;
294 if (!p) {
295 /* allocate if not found */
17e2377a
PB
296#if defined(CONFIG_USER_ONLY)
297 unsigned long addr;
298 size_t len = sizeof(PageDesc) * L2_SIZE;
299 /* Don't use qemu_malloc because it may recurse. */
300 p = mmap(0, len, PROT_READ | PROT_WRITE,
301 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 302 *lp = p;
17e2377a
PB
303 addr = h2g(p);
304 if (addr == (target_ulong)addr) {
305 page_set_flags(addr & TARGET_PAGE_MASK,
306 TARGET_PAGE_ALIGN(addr + len),
307 PAGE_RESERVED);
308 }
309#else
310 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
311 *lp = p;
312#endif
54936004
FB
313 }
314 return p + (index & (L2_SIZE - 1));
315}
316
00f82b8a 317static inline PageDesc *page_find(target_ulong index)
54936004 318{
54936004
FB
319 PageDesc *p;
320
54936004
FB
321 p = l1_map[index >> L2_BITS];
322 if (!p)
323 return 0;
fd6ce8f6
FB
324 return p + (index & (L2_SIZE - 1));
325}
326
108c49b8 327static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 328{
108c49b8 329 void **lp, **p;
e3f4e2a4 330 PhysPageDesc *pd;
92e873b9 331
108c49b8
FB
332 p = (void **)l1_phys_map;
333#if TARGET_PHYS_ADDR_SPACE_BITS > 32
334
335#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
336#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
337#endif
338 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
339 p = *lp;
340 if (!p) {
341 /* allocate if not found */
108c49b8
FB
342 if (!alloc)
343 return NULL;
344 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
345 memset(p, 0, sizeof(void *) * L1_SIZE);
346 *lp = p;
347 }
348#endif
349 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
350 pd = *lp;
351 if (!pd) {
352 int i;
108c49b8
FB
353 /* allocate if not found */
354 if (!alloc)
355 return NULL;
e3f4e2a4
PB
356 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
357 *lp = pd;
358 for (i = 0; i < L2_SIZE; i++)
359 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 360 }
e3f4e2a4 361 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
362}
363
108c49b8 364static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 365{
108c49b8 366 return phys_page_find_alloc(index, 0);
92e873b9
FB
367}
368
9fa3e853 369#if !defined(CONFIG_USER_ONLY)
6a00d601 370static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 371static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 372 target_ulong vaddr);
c8a706fe
PB
373#define mmap_lock() do { } while(0)
374#define mmap_unlock() do { } while(0)
9fa3e853 375#endif
fd6ce8f6 376
4369415f
FB
377#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
378
379#if defined(CONFIG_USER_ONLY)
380/* Currently it is not recommanded to allocate big chunks of data in
381 user mode. It will change when a dedicated libc will be used */
382#define USE_STATIC_CODE_GEN_BUFFER
383#endif
384
385#ifdef USE_STATIC_CODE_GEN_BUFFER
386static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
387#endif
388
26a5f13b
FB
389void code_gen_alloc(unsigned long tb_size)
390{
4369415f
FB
391#ifdef USE_STATIC_CODE_GEN_BUFFER
392 code_gen_buffer = static_code_gen_buffer;
393 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
394 map_exec(code_gen_buffer, code_gen_buffer_size);
395#else
26a5f13b
FB
396 code_gen_buffer_size = tb_size;
397 if (code_gen_buffer_size == 0) {
4369415f
FB
398#if defined(CONFIG_USER_ONLY)
399 /* in user mode, phys_ram_size is not meaningful */
400 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
401#else
26a5f13b
FB
402 /* XXX: needs ajustments */
403 code_gen_buffer_size = (int)(phys_ram_size / 4);
4369415f 404#endif
26a5f13b
FB
405 }
406 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
407 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
408 /* The code gen buffer location may have constraints depending on
409 the host cpu and OS */
410#if defined(__linux__)
411 {
412 int flags;
413 flags = MAP_PRIVATE | MAP_ANONYMOUS;
414#if defined(__x86_64__)
415 flags |= MAP_32BIT;
416 /* Cannot map more than that */
417 if (code_gen_buffer_size > (800 * 1024 * 1024))
418 code_gen_buffer_size = (800 * 1024 * 1024);
419#endif
420 code_gen_buffer = mmap(NULL, code_gen_buffer_size,
421 PROT_WRITE | PROT_READ | PROT_EXEC,
422 flags, -1, 0);
423 if (code_gen_buffer == MAP_FAILED) {
424 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
425 exit(1);
426 }
427 }
428#else
429 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
430 if (!code_gen_buffer) {
431 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
432 exit(1);
433 }
434 map_exec(code_gen_buffer, code_gen_buffer_size);
435#endif
4369415f 436#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
437 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
438 code_gen_buffer_max_size = code_gen_buffer_size -
439 code_gen_max_block_size();
440 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
441 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
442}
443
444/* Must be called before using the QEMU cpus. 'tb_size' is the size
445 (in bytes) allocated to the translation buffer. Zero means default
446 size. */
447void cpu_exec_init_all(unsigned long tb_size)
448{
26a5f13b
FB
449 cpu_gen_init();
450 code_gen_alloc(tb_size);
451 code_gen_ptr = code_gen_buffer;
4369415f 452 page_init();
e2eef170 453#if !defined(CONFIG_USER_ONLY)
26a5f13b 454 io_mem_init();
e2eef170 455#endif
26a5f13b
FB
456}
457
9656f324
PB
458#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
459
460#define CPU_COMMON_SAVE_VERSION 1
461
462static void cpu_common_save(QEMUFile *f, void *opaque)
463{
464 CPUState *env = opaque;
465
466 qemu_put_be32s(f, &env->halted);
467 qemu_put_be32s(f, &env->interrupt_request);
468}
469
470static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
471{
472 CPUState *env = opaque;
473
474 if (version_id != CPU_COMMON_SAVE_VERSION)
475 return -EINVAL;
476
477 qemu_get_be32s(f, &env->halted);
75f482ae 478 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
479 tlb_flush(env, 1);
480
481 return 0;
482}
483#endif
484
6a00d601 485void cpu_exec_init(CPUState *env)
fd6ce8f6 486{
6a00d601
FB
487 CPUState **penv;
488 int cpu_index;
489
6a00d601
FB
490 env->next_cpu = NULL;
491 penv = &first_cpu;
492 cpu_index = 0;
493 while (*penv != NULL) {
494 penv = (CPUState **)&(*penv)->next_cpu;
495 cpu_index++;
496 }
497 env->cpu_index = cpu_index;
6658ffb8 498 env->nb_watchpoints = 0;
6a00d601 499 *penv = env;
b3c7724c 500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
501 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
502 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
503 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
504 cpu_save, cpu_load, env);
505#endif
fd6ce8f6
FB
506}
507
9fa3e853
FB
508static inline void invalidate_page_bitmap(PageDesc *p)
509{
510 if (p->code_bitmap) {
59817ccb 511 qemu_free(p->code_bitmap);
9fa3e853
FB
512 p->code_bitmap = NULL;
513 }
514 p->code_write_count = 0;
515}
516
fd6ce8f6
FB
517/* set to NULL all the 'first_tb' fields in all PageDescs */
518static void page_flush_tb(void)
519{
520 int i, j;
521 PageDesc *p;
522
523 for(i = 0; i < L1_SIZE; i++) {
524 p = l1_map[i];
525 if (p) {
9fa3e853
FB
526 for(j = 0; j < L2_SIZE; j++) {
527 p->first_tb = NULL;
528 invalidate_page_bitmap(p);
529 p++;
530 }
fd6ce8f6
FB
531 }
532 }
533}
534
535/* flush all the translation blocks */
d4e8164f 536/* XXX: tb_flush is currently not thread safe */
6a00d601 537void tb_flush(CPUState *env1)
fd6ce8f6 538{
6a00d601 539 CPUState *env;
0124311e 540#if defined(DEBUG_FLUSH)
ab3d1727
BS
541 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
542 (unsigned long)(code_gen_ptr - code_gen_buffer),
543 nb_tbs, nb_tbs > 0 ?
544 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 545#endif
26a5f13b 546 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
547 cpu_abort(env1, "Internal error: code buffer overflow\n");
548
fd6ce8f6 549 nb_tbs = 0;
3b46e624 550
6a00d601
FB
551 for(env = first_cpu; env != NULL; env = env->next_cpu) {
552 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
553 }
9fa3e853 554
8a8a608f 555 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 556 page_flush_tb();
9fa3e853 557
fd6ce8f6 558 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
559 /* XXX: flush processor icache at this point if cache flush is
560 expensive */
e3db7226 561 tb_flush_count++;
fd6ce8f6
FB
562}
563
564#ifdef DEBUG_TB_CHECK
565
bc98a7ef 566static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
567{
568 TranslationBlock *tb;
569 int i;
570 address &= TARGET_PAGE_MASK;
99773bd4
PB
571 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
572 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
573 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
574 address >= tb->pc + tb->size)) {
575 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 576 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
577 }
578 }
579 }
580}
581
582/* verify that all the pages have correct rights for code */
583static void tb_page_check(void)
584{
585 TranslationBlock *tb;
586 int i, flags1, flags2;
3b46e624 587
99773bd4
PB
588 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
589 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
590 flags1 = page_get_flags(tb->pc);
591 flags2 = page_get_flags(tb->pc + tb->size - 1);
592 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
593 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 594 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
595 }
596 }
597 }
598}
599
d4e8164f
FB
600void tb_jmp_check(TranslationBlock *tb)
601{
602 TranslationBlock *tb1;
603 unsigned int n1;
604
605 /* suppress any remaining jumps to this TB */
606 tb1 = tb->jmp_first;
607 for(;;) {
608 n1 = (long)tb1 & 3;
609 tb1 = (TranslationBlock *)((long)tb1 & ~3);
610 if (n1 == 2)
611 break;
612 tb1 = tb1->jmp_next[n1];
613 }
614 /* check end of list */
615 if (tb1 != tb) {
616 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
617 }
618}
619
fd6ce8f6
FB
620#endif
621
622/* invalidate one TB */
623static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
624 int next_offset)
625{
626 TranslationBlock *tb1;
627 for(;;) {
628 tb1 = *ptb;
629 if (tb1 == tb) {
630 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
631 break;
632 }
633 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
634 }
635}
636
9fa3e853
FB
637static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
638{
639 TranslationBlock *tb1;
640 unsigned int n1;
641
642 for(;;) {
643 tb1 = *ptb;
644 n1 = (long)tb1 & 3;
645 tb1 = (TranslationBlock *)((long)tb1 & ~3);
646 if (tb1 == tb) {
647 *ptb = tb1->page_next[n1];
648 break;
649 }
650 ptb = &tb1->page_next[n1];
651 }
652}
653
d4e8164f
FB
654static inline void tb_jmp_remove(TranslationBlock *tb, int n)
655{
656 TranslationBlock *tb1, **ptb;
657 unsigned int n1;
658
659 ptb = &tb->jmp_next[n];
660 tb1 = *ptb;
661 if (tb1) {
662 /* find tb(n) in circular list */
663 for(;;) {
664 tb1 = *ptb;
665 n1 = (long)tb1 & 3;
666 tb1 = (TranslationBlock *)((long)tb1 & ~3);
667 if (n1 == n && tb1 == tb)
668 break;
669 if (n1 == 2) {
670 ptb = &tb1->jmp_first;
671 } else {
672 ptb = &tb1->jmp_next[n1];
673 }
674 }
675 /* now we can suppress tb(n) from the list */
676 *ptb = tb->jmp_next[n];
677
678 tb->jmp_next[n] = NULL;
679 }
680}
681
682/* reset the jump entry 'n' of a TB so that it is not chained to
683 another TB */
684static inline void tb_reset_jump(TranslationBlock *tb, int n)
685{
686 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
687}
688
2e70f6ef 689void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 690{
6a00d601 691 CPUState *env;
8a40a180 692 PageDesc *p;
d4e8164f 693 unsigned int h, n1;
00f82b8a 694 target_phys_addr_t phys_pc;
8a40a180 695 TranslationBlock *tb1, *tb2;
3b46e624 696
8a40a180
FB
697 /* remove the TB from the hash list */
698 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
699 h = tb_phys_hash_func(phys_pc);
5fafdf24 700 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
701 offsetof(TranslationBlock, phys_hash_next));
702
703 /* remove the TB from the page list */
704 if (tb->page_addr[0] != page_addr) {
705 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
706 tb_page_remove(&p->first_tb, tb);
707 invalidate_page_bitmap(p);
708 }
709 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
710 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
711 tb_page_remove(&p->first_tb, tb);
712 invalidate_page_bitmap(p);
713 }
714
36bdbe54 715 tb_invalidated_flag = 1;
59817ccb 716
fd6ce8f6 717 /* remove the TB from the hash list */
8a40a180 718 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
719 for(env = first_cpu; env != NULL; env = env->next_cpu) {
720 if (env->tb_jmp_cache[h] == tb)
721 env->tb_jmp_cache[h] = NULL;
722 }
d4e8164f
FB
723
724 /* suppress this TB from the two jump lists */
725 tb_jmp_remove(tb, 0);
726 tb_jmp_remove(tb, 1);
727
728 /* suppress any remaining jumps to this TB */
729 tb1 = tb->jmp_first;
730 for(;;) {
731 n1 = (long)tb1 & 3;
732 if (n1 == 2)
733 break;
734 tb1 = (TranslationBlock *)((long)tb1 & ~3);
735 tb2 = tb1->jmp_next[n1];
736 tb_reset_jump(tb1, n1);
737 tb1->jmp_next[n1] = NULL;
738 tb1 = tb2;
739 }
740 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 741
e3db7226 742 tb_phys_invalidate_count++;
9fa3e853
FB
743}
744
745static inline void set_bits(uint8_t *tab, int start, int len)
746{
747 int end, mask, end1;
748
749 end = start + len;
750 tab += start >> 3;
751 mask = 0xff << (start & 7);
752 if ((start & ~7) == (end & ~7)) {
753 if (start < end) {
754 mask &= ~(0xff << (end & 7));
755 *tab |= mask;
756 }
757 } else {
758 *tab++ |= mask;
759 start = (start + 8) & ~7;
760 end1 = end & ~7;
761 while (start < end1) {
762 *tab++ = 0xff;
763 start += 8;
764 }
765 if (start < end) {
766 mask = ~(0xff << (end & 7));
767 *tab |= mask;
768 }
769 }
770}
771
772static void build_page_bitmap(PageDesc *p)
773{
774 int n, tb_start, tb_end;
775 TranslationBlock *tb;
3b46e624 776
b2a7081a 777 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
778 if (!p->code_bitmap)
779 return;
9fa3e853
FB
780
781 tb = p->first_tb;
782 while (tb != NULL) {
783 n = (long)tb & 3;
784 tb = (TranslationBlock *)((long)tb & ~3);
785 /* NOTE: this is subtle as a TB may span two physical pages */
786 if (n == 0) {
787 /* NOTE: tb_end may be after the end of the page, but
788 it is not a problem */
789 tb_start = tb->pc & ~TARGET_PAGE_MASK;
790 tb_end = tb_start + tb->size;
791 if (tb_end > TARGET_PAGE_SIZE)
792 tb_end = TARGET_PAGE_SIZE;
793 } else {
794 tb_start = 0;
795 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
796 }
797 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
798 tb = tb->page_next[n];
799 }
800}
801
2e70f6ef
PB
802TranslationBlock *tb_gen_code(CPUState *env,
803 target_ulong pc, target_ulong cs_base,
804 int flags, int cflags)
d720b93d
FB
805{
806 TranslationBlock *tb;
807 uint8_t *tc_ptr;
808 target_ulong phys_pc, phys_page2, virt_page2;
809 int code_gen_size;
810
c27004ec
FB
811 phys_pc = get_phys_addr_code(env, pc);
812 tb = tb_alloc(pc);
d720b93d
FB
813 if (!tb) {
814 /* flush must be done */
815 tb_flush(env);
816 /* cannot fail at this point */
c27004ec 817 tb = tb_alloc(pc);
2e70f6ef
PB
818 /* Don't forget to invalidate previous TB info. */
819 tb_invalidated_flag = 1;
d720b93d
FB
820 }
821 tc_ptr = code_gen_ptr;
822 tb->tc_ptr = tc_ptr;
823 tb->cs_base = cs_base;
824 tb->flags = flags;
825 tb->cflags = cflags;
d07bde88 826 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 827 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 828
d720b93d 829 /* check next page if needed */
c27004ec 830 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 831 phys_page2 = -1;
c27004ec 832 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
833 phys_page2 = get_phys_addr_code(env, virt_page2);
834 }
835 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 836 return tb;
d720b93d 837}
3b46e624 838
9fa3e853
FB
839/* invalidate all TBs which intersect with the target physical page
840 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
841 the same physical page. 'is_cpu_write_access' should be true if called
842 from a real cpu write access: the virtual CPU will exit the current
843 TB if code is modified inside this TB. */
00f82b8a 844void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
845 int is_cpu_write_access)
846{
847 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 848 CPUState *env = cpu_single_env;
9fa3e853 849 PageDesc *p;
ea1c1802 850 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 851 target_ulong tb_start, tb_end;
d720b93d 852 target_ulong current_pc, current_cs_base;
9fa3e853
FB
853
854 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 855 if (!p)
9fa3e853 856 return;
5fafdf24 857 if (!p->code_bitmap &&
d720b93d
FB
858 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
859 is_cpu_write_access) {
9fa3e853
FB
860 /* build code bitmap */
861 build_page_bitmap(p);
862 }
863
864 /* we remove all the TBs in the range [start, end[ */
865 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
866 current_tb_not_found = is_cpu_write_access;
867 current_tb_modified = 0;
868 current_tb = NULL; /* avoid warning */
869 current_pc = 0; /* avoid warning */
870 current_cs_base = 0; /* avoid warning */
871 current_flags = 0; /* avoid warning */
9fa3e853
FB
872 tb = p->first_tb;
873 while (tb != NULL) {
874 n = (long)tb & 3;
875 tb = (TranslationBlock *)((long)tb & ~3);
876 tb_next = tb->page_next[n];
877 /* NOTE: this is subtle as a TB may span two physical pages */
878 if (n == 0) {
879 /* NOTE: tb_end may be after the end of the page, but
880 it is not a problem */
881 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
882 tb_end = tb_start + tb->size;
883 } else {
884 tb_start = tb->page_addr[1];
885 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
886 }
887 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
888#ifdef TARGET_HAS_PRECISE_SMC
889 if (current_tb_not_found) {
890 current_tb_not_found = 0;
891 current_tb = NULL;
2e70f6ef 892 if (env->mem_io_pc) {
d720b93d 893 /* now we have a real cpu fault */
2e70f6ef 894 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
895 }
896 }
897 if (current_tb == tb &&
2e70f6ef 898 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
899 /* If we are modifying the current TB, we must stop
900 its execution. We could be more precise by checking
901 that the modification is after the current PC, but it
902 would require a specialized function to partially
903 restore the CPU state */
3b46e624 904
d720b93d 905 current_tb_modified = 1;
5fafdf24 906 cpu_restore_state(current_tb, env,
2e70f6ef 907 env->mem_io_pc, NULL);
d720b93d
FB
908#if defined(TARGET_I386)
909 current_flags = env->hflags;
910 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
911 current_cs_base = (target_ulong)env->segs[R_CS].base;
912 current_pc = current_cs_base + env->eip;
913#else
914#error unsupported CPU
915#endif
916 }
917#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
918 /* we need to do that to handle the case where a signal
919 occurs while doing tb_phys_invalidate() */
920 saved_tb = NULL;
921 if (env) {
922 saved_tb = env->current_tb;
923 env->current_tb = NULL;
924 }
9fa3e853 925 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
926 if (env) {
927 env->current_tb = saved_tb;
928 if (env->interrupt_request && env->current_tb)
929 cpu_interrupt(env, env->interrupt_request);
930 }
9fa3e853
FB
931 }
932 tb = tb_next;
933 }
934#if !defined(CONFIG_USER_ONLY)
935 /* if no code remaining, no need to continue to use slow writes */
936 if (!p->first_tb) {
937 invalidate_page_bitmap(p);
d720b93d 938 if (is_cpu_write_access) {
2e70f6ef 939 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
940 }
941 }
942#endif
943#ifdef TARGET_HAS_PRECISE_SMC
944 if (current_tb_modified) {
945 /* we generate a block containing just the instruction
946 modifying the memory. It will ensure that it cannot modify
947 itself */
ea1c1802 948 env->current_tb = NULL;
2e70f6ef 949 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 950 cpu_resume_from_signal(env, NULL);
9fa3e853 951 }
fd6ce8f6 952#endif
9fa3e853 953}
fd6ce8f6 954
9fa3e853 955/* len must be <= 8 and start must be a multiple of len */
00f82b8a 956static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
957{
958 PageDesc *p;
959 int offset, b;
59817ccb 960#if 0
a4193c8a
FB
961 if (1) {
962 if (loglevel) {
5fafdf24 963 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 964 cpu_single_env->mem_io_vaddr, len,
5fafdf24 965 cpu_single_env->eip,
a4193c8a
FB
966 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
967 }
59817ccb
FB
968 }
969#endif
9fa3e853 970 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 971 if (!p)
9fa3e853
FB
972 return;
973 if (p->code_bitmap) {
974 offset = start & ~TARGET_PAGE_MASK;
975 b = p->code_bitmap[offset >> 3] >> (offset & 7);
976 if (b & ((1 << len) - 1))
977 goto do_invalidate;
978 } else {
979 do_invalidate:
d720b93d 980 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
981 }
982}
983
9fa3e853 984#if !defined(CONFIG_SOFTMMU)
00f82b8a 985static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 986 unsigned long pc, void *puc)
9fa3e853 987{
d720b93d
FB
988 int n, current_flags, current_tb_modified;
989 target_ulong current_pc, current_cs_base;
9fa3e853 990 PageDesc *p;
d720b93d
FB
991 TranslationBlock *tb, *current_tb;
992#ifdef TARGET_HAS_PRECISE_SMC
993 CPUState *env = cpu_single_env;
994#endif
9fa3e853
FB
995
996 addr &= TARGET_PAGE_MASK;
997 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 998 if (!p)
9fa3e853
FB
999 return;
1000 tb = p->first_tb;
d720b93d
FB
1001 current_tb_modified = 0;
1002 current_tb = NULL;
1003 current_pc = 0; /* avoid warning */
1004 current_cs_base = 0; /* avoid warning */
1005 current_flags = 0; /* avoid warning */
1006#ifdef TARGET_HAS_PRECISE_SMC
1007 if (tb && pc != 0) {
1008 current_tb = tb_find_pc(pc);
1009 }
1010#endif
9fa3e853
FB
1011 while (tb != NULL) {
1012 n = (long)tb & 3;
1013 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1014#ifdef TARGET_HAS_PRECISE_SMC
1015 if (current_tb == tb &&
2e70f6ef 1016 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1017 /* If we are modifying the current TB, we must stop
1018 its execution. We could be more precise by checking
1019 that the modification is after the current PC, but it
1020 would require a specialized function to partially
1021 restore the CPU state */
3b46e624 1022
d720b93d
FB
1023 current_tb_modified = 1;
1024 cpu_restore_state(current_tb, env, pc, puc);
1025#if defined(TARGET_I386)
1026 current_flags = env->hflags;
1027 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1028 current_cs_base = (target_ulong)env->segs[R_CS].base;
1029 current_pc = current_cs_base + env->eip;
1030#else
1031#error unsupported CPU
1032#endif
1033 }
1034#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1035 tb_phys_invalidate(tb, addr);
1036 tb = tb->page_next[n];
1037 }
fd6ce8f6 1038 p->first_tb = NULL;
d720b93d
FB
1039#ifdef TARGET_HAS_PRECISE_SMC
1040 if (current_tb_modified) {
1041 /* we generate a block containing just the instruction
1042 modifying the memory. It will ensure that it cannot modify
1043 itself */
ea1c1802 1044 env->current_tb = NULL;
2e70f6ef 1045 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1046 cpu_resume_from_signal(env, puc);
1047 }
1048#endif
fd6ce8f6 1049}
9fa3e853 1050#endif
fd6ce8f6
FB
1051
1052/* add the tb in the target page and protect it if necessary */
5fafdf24 1053static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1054 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1055{
1056 PageDesc *p;
9fa3e853
FB
1057 TranslationBlock *last_first_tb;
1058
1059 tb->page_addr[n] = page_addr;
3a7d929e 1060 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1061 tb->page_next[n] = p->first_tb;
1062 last_first_tb = p->first_tb;
1063 p->first_tb = (TranslationBlock *)((long)tb | n);
1064 invalidate_page_bitmap(p);
fd6ce8f6 1065
107db443 1066#if defined(TARGET_HAS_SMC) || 1
d720b93d 1067
9fa3e853 1068#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1069 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1070 target_ulong addr;
1071 PageDesc *p2;
9fa3e853
FB
1072 int prot;
1073
fd6ce8f6
FB
1074 /* force the host page as non writable (writes will have a
1075 page fault + mprotect overhead) */
53a5960a 1076 page_addr &= qemu_host_page_mask;
fd6ce8f6 1077 prot = 0;
53a5960a
PB
1078 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1079 addr += TARGET_PAGE_SIZE) {
1080
1081 p2 = page_find (addr >> TARGET_PAGE_BITS);
1082 if (!p2)
1083 continue;
1084 prot |= p2->flags;
1085 p2->flags &= ~PAGE_WRITE;
1086 page_get_flags(addr);
1087 }
5fafdf24 1088 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1089 (prot & PAGE_BITS) & ~PAGE_WRITE);
1090#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1091 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1092 page_addr);
fd6ce8f6 1093#endif
fd6ce8f6 1094 }
9fa3e853
FB
1095#else
1096 /* if some code is already present, then the pages are already
1097 protected. So we handle the case where only the first TB is
1098 allocated in a physical page */
1099 if (!last_first_tb) {
6a00d601 1100 tlb_protect_code(page_addr);
9fa3e853
FB
1101 }
1102#endif
d720b93d
FB
1103
1104#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1105}
1106
1107/* Allocate a new translation block. Flush the translation buffer if
1108 too many translation blocks or too much generated code. */
c27004ec 1109TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1110{
1111 TranslationBlock *tb;
fd6ce8f6 1112
26a5f13b
FB
1113 if (nb_tbs >= code_gen_max_blocks ||
1114 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1115 return NULL;
fd6ce8f6
FB
1116 tb = &tbs[nb_tbs++];
1117 tb->pc = pc;
b448f2f3 1118 tb->cflags = 0;
d4e8164f
FB
1119 return tb;
1120}
1121
2e70f6ef
PB
1122void tb_free(TranslationBlock *tb)
1123{
bf20dc07 1124 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1125 Ignore the hard cases and just back up if this TB happens to
1126 be the last one generated. */
1127 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1128 code_gen_ptr = tb->tc_ptr;
1129 nb_tbs--;
1130 }
1131}
1132
9fa3e853
FB
1133/* add a new TB and link it to the physical page tables. phys_page2 is
1134 (-1) to indicate that only one page contains the TB. */
5fafdf24 1135void tb_link_phys(TranslationBlock *tb,
9fa3e853 1136 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1137{
9fa3e853
FB
1138 unsigned int h;
1139 TranslationBlock **ptb;
1140
c8a706fe
PB
1141 /* Grab the mmap lock to stop another thread invalidating this TB
1142 before we are done. */
1143 mmap_lock();
9fa3e853
FB
1144 /* add in the physical hash table */
1145 h = tb_phys_hash_func(phys_pc);
1146 ptb = &tb_phys_hash[h];
1147 tb->phys_hash_next = *ptb;
1148 *ptb = tb;
fd6ce8f6
FB
1149
1150 /* add in the page list */
9fa3e853
FB
1151 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1152 if (phys_page2 != -1)
1153 tb_alloc_page(tb, 1, phys_page2);
1154 else
1155 tb->page_addr[1] = -1;
9fa3e853 1156
d4e8164f
FB
1157 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1158 tb->jmp_next[0] = NULL;
1159 tb->jmp_next[1] = NULL;
1160
1161 /* init original jump addresses */
1162 if (tb->tb_next_offset[0] != 0xffff)
1163 tb_reset_jump(tb, 0);
1164 if (tb->tb_next_offset[1] != 0xffff)
1165 tb_reset_jump(tb, 1);
8a40a180
FB
1166
1167#ifdef DEBUG_TB_CHECK
1168 tb_page_check();
1169#endif
c8a706fe 1170 mmap_unlock();
fd6ce8f6
FB
1171}
1172
9fa3e853
FB
1173/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1174 tb[1].tc_ptr. Return NULL if not found */
1175TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1176{
9fa3e853
FB
1177 int m_min, m_max, m;
1178 unsigned long v;
1179 TranslationBlock *tb;
a513fe19
FB
1180
1181 if (nb_tbs <= 0)
1182 return NULL;
1183 if (tc_ptr < (unsigned long)code_gen_buffer ||
1184 tc_ptr >= (unsigned long)code_gen_ptr)
1185 return NULL;
1186 /* binary search (cf Knuth) */
1187 m_min = 0;
1188 m_max = nb_tbs - 1;
1189 while (m_min <= m_max) {
1190 m = (m_min + m_max) >> 1;
1191 tb = &tbs[m];
1192 v = (unsigned long)tb->tc_ptr;
1193 if (v == tc_ptr)
1194 return tb;
1195 else if (tc_ptr < v) {
1196 m_max = m - 1;
1197 } else {
1198 m_min = m + 1;
1199 }
5fafdf24 1200 }
a513fe19
FB
1201 return &tbs[m_max];
1202}
7501267e 1203
ea041c0e
FB
1204static void tb_reset_jump_recursive(TranslationBlock *tb);
1205
1206static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1207{
1208 TranslationBlock *tb1, *tb_next, **ptb;
1209 unsigned int n1;
1210
1211 tb1 = tb->jmp_next[n];
1212 if (tb1 != NULL) {
1213 /* find head of list */
1214 for(;;) {
1215 n1 = (long)tb1 & 3;
1216 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1217 if (n1 == 2)
1218 break;
1219 tb1 = tb1->jmp_next[n1];
1220 }
1221 /* we are now sure now that tb jumps to tb1 */
1222 tb_next = tb1;
1223
1224 /* remove tb from the jmp_first list */
1225 ptb = &tb_next->jmp_first;
1226 for(;;) {
1227 tb1 = *ptb;
1228 n1 = (long)tb1 & 3;
1229 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1230 if (n1 == n && tb1 == tb)
1231 break;
1232 ptb = &tb1->jmp_next[n1];
1233 }
1234 *ptb = tb->jmp_next[n];
1235 tb->jmp_next[n] = NULL;
3b46e624 1236
ea041c0e
FB
1237 /* suppress the jump to next tb in generated code */
1238 tb_reset_jump(tb, n);
1239
0124311e 1240 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1241 tb_reset_jump_recursive(tb_next);
1242 }
1243}
1244
1245static void tb_reset_jump_recursive(TranslationBlock *tb)
1246{
1247 tb_reset_jump_recursive2(tb, 0);
1248 tb_reset_jump_recursive2(tb, 1);
1249}
1250
1fddef4b 1251#if defined(TARGET_HAS_ICE)
d720b93d
FB
1252static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1253{
9b3c35e0
JM
1254 target_phys_addr_t addr;
1255 target_ulong pd;
c2f07f81
PB
1256 ram_addr_t ram_addr;
1257 PhysPageDesc *p;
d720b93d 1258
c2f07f81
PB
1259 addr = cpu_get_phys_page_debug(env, pc);
1260 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1261 if (!p) {
1262 pd = IO_MEM_UNASSIGNED;
1263 } else {
1264 pd = p->phys_offset;
1265 }
1266 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1267 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1268}
c27004ec 1269#endif
d720b93d 1270
6658ffb8 1271/* Add a watchpoint. */
0f459d16 1272int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1273{
1274 int i;
1275
1276 for (i = 0; i < env->nb_watchpoints; i++) {
1277 if (addr == env->watchpoint[i].vaddr)
1278 return 0;
1279 }
1280 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1281 return -1;
1282
1283 i = env->nb_watchpoints++;
1284 env->watchpoint[i].vaddr = addr;
0f459d16 1285 env->watchpoint[i].type = type;
6658ffb8
PB
1286 tlb_flush_page(env, addr);
1287 /* FIXME: This flush is needed because of the hack to make memory ops
1288 terminate the TB. It can be removed once the proper IO trap and
1289 re-execute bits are in. */
1290 tb_flush(env);
1291 return i;
1292}
1293
1294/* Remove a watchpoint. */
1295int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1296{
1297 int i;
1298
1299 for (i = 0; i < env->nb_watchpoints; i++) {
1300 if (addr == env->watchpoint[i].vaddr) {
1301 env->nb_watchpoints--;
1302 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1303 tlb_flush_page(env, addr);
1304 return 0;
1305 }
1306 }
1307 return -1;
1308}
1309
7d03f82f
EI
1310/* Remove all watchpoints. */
1311void cpu_watchpoint_remove_all(CPUState *env) {
1312 int i;
1313
1314 for (i = 0; i < env->nb_watchpoints; i++) {
1315 tlb_flush_page(env, env->watchpoint[i].vaddr);
1316 }
1317 env->nb_watchpoints = 0;
1318}
1319
c33a346e
FB
1320/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1321 breakpoint is reached */
2e12669a 1322int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1323{
1fddef4b 1324#if defined(TARGET_HAS_ICE)
4c3a88a2 1325 int i;
3b46e624 1326
4c3a88a2
FB
1327 for(i = 0; i < env->nb_breakpoints; i++) {
1328 if (env->breakpoints[i] == pc)
1329 return 0;
1330 }
1331
1332 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1333 return -1;
1334 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1335
d720b93d 1336 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1337 return 0;
1338#else
1339 return -1;
1340#endif
1341}
1342
7d03f82f
EI
1343/* remove all breakpoints */
1344void cpu_breakpoint_remove_all(CPUState *env) {
1345#if defined(TARGET_HAS_ICE)
1346 int i;
1347 for(i = 0; i < env->nb_breakpoints; i++) {
1348 breakpoint_invalidate(env, env->breakpoints[i]);
1349 }
1350 env->nb_breakpoints = 0;
1351#endif
1352}
1353
4c3a88a2 1354/* remove a breakpoint */
2e12669a 1355int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1356{
1fddef4b 1357#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1358 int i;
1359 for(i = 0; i < env->nb_breakpoints; i++) {
1360 if (env->breakpoints[i] == pc)
1361 goto found;
1362 }
1363 return -1;
1364 found:
4c3a88a2 1365 env->nb_breakpoints--;
1fddef4b
FB
1366 if (i < env->nb_breakpoints)
1367 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1368
1369 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1370 return 0;
1371#else
1372 return -1;
1373#endif
1374}
1375
c33a346e
FB
1376/* enable or disable single step mode. EXCP_DEBUG is returned by the
1377 CPU loop after each instruction */
1378void cpu_single_step(CPUState *env, int enabled)
1379{
1fddef4b 1380#if defined(TARGET_HAS_ICE)
c33a346e
FB
1381 if (env->singlestep_enabled != enabled) {
1382 env->singlestep_enabled = enabled;
1383 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1384 /* XXX: only flush what is necessary */
0124311e 1385 tb_flush(env);
c33a346e
FB
1386 }
1387#endif
1388}
1389
34865134
FB
1390/* enable or disable low levels log */
1391void cpu_set_log(int log_flags)
1392{
1393 loglevel = log_flags;
1394 if (loglevel && !logfile) {
11fcfab4 1395 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1396 if (!logfile) {
1397 perror(logfilename);
1398 _exit(1);
1399 }
9fa3e853
FB
1400#if !defined(CONFIG_SOFTMMU)
1401 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1402 {
1403 static uint8_t logfile_buf[4096];
1404 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1405 }
1406#else
34865134 1407 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1408#endif
e735b91c
PB
1409 log_append = 1;
1410 }
1411 if (!loglevel && logfile) {
1412 fclose(logfile);
1413 logfile = NULL;
34865134
FB
1414 }
1415}
1416
1417void cpu_set_log_filename(const char *filename)
1418{
1419 logfilename = strdup(filename);
e735b91c
PB
1420 if (logfile) {
1421 fclose(logfile);
1422 logfile = NULL;
1423 }
1424 cpu_set_log(loglevel);
34865134 1425}
c33a346e 1426
0124311e 1427/* mask must never be zero, except for A20 change call */
68a79315 1428void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1429{
d5975363 1430#if !defined(USE_NPTL)
ea041c0e 1431 TranslationBlock *tb;
15a51156 1432 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1433#endif
2e70f6ef 1434 int old_mask;
59817ccb 1435
2e70f6ef 1436 old_mask = env->interrupt_request;
d5975363 1437 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1438 be in the middle of a read-modify-write operation. */
68a79315 1439 env->interrupt_request |= mask;
d5975363
PB
1440#if defined(USE_NPTL)
1441 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1442 problem and hope the cpu will stop of its own accord. For userspace
1443 emulation this often isn't actually as bad as it sounds. Often
1444 signals are used primarily to interrupt blocking syscalls. */
1445#else
2e70f6ef 1446 if (use_icount) {
266910c4 1447 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1448#ifndef CONFIG_USER_ONLY
1449 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1450 an async event happened and we need to process it. */
1451 if (!can_do_io(env)
1452 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1453 cpu_abort(env, "Raised interrupt while not in I/O function");
1454 }
1455#endif
1456 } else {
1457 tb = env->current_tb;
1458 /* if the cpu is currently executing code, we must unlink it and
1459 all the potentially executing TB */
1460 if (tb && !testandset(&interrupt_lock)) {
1461 env->current_tb = NULL;
1462 tb_reset_jump_recursive(tb);
1463 resetlock(&interrupt_lock);
1464 }
ea041c0e 1465 }
d5975363 1466#endif
ea041c0e
FB
1467}
1468
b54ad049
FB
1469void cpu_reset_interrupt(CPUState *env, int mask)
1470{
1471 env->interrupt_request &= ~mask;
1472}
1473
f193c797 1474CPULogItem cpu_log_items[] = {
5fafdf24 1475 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1476 "show generated host assembly code for each compiled TB" },
1477 { CPU_LOG_TB_IN_ASM, "in_asm",
1478 "show target assembly code for each compiled TB" },
5fafdf24 1479 { CPU_LOG_TB_OP, "op",
57fec1fe 1480 "show micro ops for each compiled TB" },
f193c797 1481 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1482 "show micro ops "
1483#ifdef TARGET_I386
1484 "before eflags optimization and "
f193c797 1485#endif
e01a1157 1486 "after liveness analysis" },
f193c797
FB
1487 { CPU_LOG_INT, "int",
1488 "show interrupts/exceptions in short format" },
1489 { CPU_LOG_EXEC, "exec",
1490 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1491 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1492 "show CPU state before block translation" },
f193c797
FB
1493#ifdef TARGET_I386
1494 { CPU_LOG_PCALL, "pcall",
1495 "show protected mode far calls/returns/exceptions" },
1496#endif
8e3a9fd2 1497#ifdef DEBUG_IOPORT
fd872598
FB
1498 { CPU_LOG_IOPORT, "ioport",
1499 "show all i/o ports accesses" },
8e3a9fd2 1500#endif
f193c797
FB
1501 { 0, NULL, NULL },
1502};
1503
1504static int cmp1(const char *s1, int n, const char *s2)
1505{
1506 if (strlen(s2) != n)
1507 return 0;
1508 return memcmp(s1, s2, n) == 0;
1509}
3b46e624 1510
f193c797
FB
1511/* takes a comma separated list of log masks. Return 0 if error. */
1512int cpu_str_to_log_mask(const char *str)
1513{
1514 CPULogItem *item;
1515 int mask;
1516 const char *p, *p1;
1517
1518 p = str;
1519 mask = 0;
1520 for(;;) {
1521 p1 = strchr(p, ',');
1522 if (!p1)
1523 p1 = p + strlen(p);
8e3a9fd2
FB
1524 if(cmp1(p,p1-p,"all")) {
1525 for(item = cpu_log_items; item->mask != 0; item++) {
1526 mask |= item->mask;
1527 }
1528 } else {
f193c797
FB
1529 for(item = cpu_log_items; item->mask != 0; item++) {
1530 if (cmp1(p, p1 - p, item->name))
1531 goto found;
1532 }
1533 return 0;
8e3a9fd2 1534 }
f193c797
FB
1535 found:
1536 mask |= item->mask;
1537 if (*p1 != ',')
1538 break;
1539 p = p1 + 1;
1540 }
1541 return mask;
1542}
ea041c0e 1543
7501267e
FB
1544void cpu_abort(CPUState *env, const char *fmt, ...)
1545{
1546 va_list ap;
493ae1f0 1547 va_list ap2;
7501267e
FB
1548
1549 va_start(ap, fmt);
493ae1f0 1550 va_copy(ap2, ap);
7501267e
FB
1551 fprintf(stderr, "qemu: fatal: ");
1552 vfprintf(stderr, fmt, ap);
1553 fprintf(stderr, "\n");
1554#ifdef TARGET_I386
7fe48483
FB
1555 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1556#else
1557 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1558#endif
924edcae 1559 if (logfile) {
f9373291 1560 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1561 vfprintf(logfile, fmt, ap2);
f9373291
JM
1562 fprintf(logfile, "\n");
1563#ifdef TARGET_I386
1564 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1565#else
1566 cpu_dump_state(env, logfile, fprintf, 0);
1567#endif
924edcae
AZ
1568 fflush(logfile);
1569 fclose(logfile);
1570 }
493ae1f0 1571 va_end(ap2);
f9373291 1572 va_end(ap);
7501267e
FB
1573 abort();
1574}
1575
c5be9f08
TS
1576CPUState *cpu_copy(CPUState *env)
1577{
01ba9816 1578 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1579 /* preserve chaining and index */
1580 CPUState *next_cpu = new_env->next_cpu;
1581 int cpu_index = new_env->cpu_index;
1582 memcpy(new_env, env, sizeof(CPUState));
1583 new_env->next_cpu = next_cpu;
1584 new_env->cpu_index = cpu_index;
1585 return new_env;
1586}
1587
0124311e
FB
1588#if !defined(CONFIG_USER_ONLY)
1589
5c751e99
EI
1590static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1591{
1592 unsigned int i;
1593
1594 /* Discard jump cache entries for any tb which might potentially
1595 overlap the flushed page. */
1596 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1597 memset (&env->tb_jmp_cache[i], 0,
1598 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1599
1600 i = tb_jmp_cache_hash_page(addr);
1601 memset (&env->tb_jmp_cache[i], 0,
1602 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1603}
1604
ee8b7021
FB
1605/* NOTE: if flush_global is true, also flush global entries (not
1606 implemented yet) */
1607void tlb_flush(CPUState *env, int flush_global)
33417e70 1608{
33417e70 1609 int i;
0124311e 1610
9fa3e853
FB
1611#if defined(DEBUG_TLB)
1612 printf("tlb_flush:\n");
1613#endif
0124311e
FB
1614 /* must reset current TB so that interrupts cannot modify the
1615 links while we are modifying them */
1616 env->current_tb = NULL;
1617
33417e70 1618 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1619 env->tlb_table[0][i].addr_read = -1;
1620 env->tlb_table[0][i].addr_write = -1;
1621 env->tlb_table[0][i].addr_code = -1;
1622 env->tlb_table[1][i].addr_read = -1;
1623 env->tlb_table[1][i].addr_write = -1;
1624 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1625#if (NB_MMU_MODES >= 3)
1626 env->tlb_table[2][i].addr_read = -1;
1627 env->tlb_table[2][i].addr_write = -1;
1628 env->tlb_table[2][i].addr_code = -1;
1629#if (NB_MMU_MODES == 4)
1630 env->tlb_table[3][i].addr_read = -1;
1631 env->tlb_table[3][i].addr_write = -1;
1632 env->tlb_table[3][i].addr_code = -1;
1633#endif
1634#endif
33417e70 1635 }
9fa3e853 1636
8a40a180 1637 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1638
0a962c02
FB
1639#ifdef USE_KQEMU
1640 if (env->kqemu_enabled) {
1641 kqemu_flush(env, flush_global);
1642 }
9fa3e853 1643#endif
e3db7226 1644 tlb_flush_count++;
33417e70
FB
1645}
1646
274da6b2 1647static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1648{
5fafdf24 1649 if (addr == (tlb_entry->addr_read &
84b7b8e7 1650 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1651 addr == (tlb_entry->addr_write &
84b7b8e7 1652 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1653 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1654 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1655 tlb_entry->addr_read = -1;
1656 tlb_entry->addr_write = -1;
1657 tlb_entry->addr_code = -1;
1658 }
61382a50
FB
1659}
1660
2e12669a 1661void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1662{
8a40a180 1663 int i;
0124311e 1664
9fa3e853 1665#if defined(DEBUG_TLB)
108c49b8 1666 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1667#endif
0124311e
FB
1668 /* must reset current TB so that interrupts cannot modify the
1669 links while we are modifying them */
1670 env->current_tb = NULL;
61382a50
FB
1671
1672 addr &= TARGET_PAGE_MASK;
1673 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1674 tlb_flush_entry(&env->tlb_table[0][i], addr);
1675 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1676#if (NB_MMU_MODES >= 3)
1677 tlb_flush_entry(&env->tlb_table[2][i], addr);
1678#if (NB_MMU_MODES == 4)
1679 tlb_flush_entry(&env->tlb_table[3][i], addr);
1680#endif
1681#endif
0124311e 1682
5c751e99 1683 tlb_flush_jmp_cache(env, addr);
9fa3e853 1684
0a962c02
FB
1685#ifdef USE_KQEMU
1686 if (env->kqemu_enabled) {
1687 kqemu_flush_page(env, addr);
1688 }
1689#endif
9fa3e853
FB
1690}
1691
9fa3e853
FB
1692/* update the TLBs so that writes to code in the virtual page 'addr'
1693 can be detected */
6a00d601 1694static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1695{
5fafdf24 1696 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1697 ram_addr + TARGET_PAGE_SIZE,
1698 CODE_DIRTY_FLAG);
9fa3e853
FB
1699}
1700
9fa3e853 1701/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1702 tested for self modifying code */
5fafdf24 1703static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1704 target_ulong vaddr)
9fa3e853 1705{
3a7d929e 1706 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1707}
1708
5fafdf24 1709static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1710 unsigned long start, unsigned long length)
1711{
1712 unsigned long addr;
84b7b8e7
FB
1713 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1714 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1715 if ((addr - start) < length) {
0f459d16 1716 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1717 }
1718 }
1719}
1720
3a7d929e 1721void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1722 int dirty_flags)
1ccde1cb
FB
1723{
1724 CPUState *env;
4f2ac237 1725 unsigned long length, start1;
0a962c02
FB
1726 int i, mask, len;
1727 uint8_t *p;
1ccde1cb
FB
1728
1729 start &= TARGET_PAGE_MASK;
1730 end = TARGET_PAGE_ALIGN(end);
1731
1732 length = end - start;
1733 if (length == 0)
1734 return;
0a962c02 1735 len = length >> TARGET_PAGE_BITS;
3a7d929e 1736#ifdef USE_KQEMU
6a00d601
FB
1737 /* XXX: should not depend on cpu context */
1738 env = first_cpu;
3a7d929e 1739 if (env->kqemu_enabled) {
f23db169
FB
1740 ram_addr_t addr;
1741 addr = start;
1742 for(i = 0; i < len; i++) {
1743 kqemu_set_notdirty(env, addr);
1744 addr += TARGET_PAGE_SIZE;
1745 }
3a7d929e
FB
1746 }
1747#endif
f23db169
FB
1748 mask = ~dirty_flags;
1749 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1750 for(i = 0; i < len; i++)
1751 p[i] &= mask;
1752
1ccde1cb
FB
1753 /* we modify the TLB cache so that the dirty bit will be set again
1754 when accessing the range */
59817ccb 1755 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1756 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1757 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1758 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1759 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1760 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1761#if (NB_MMU_MODES >= 3)
1762 for(i = 0; i < CPU_TLB_SIZE; i++)
1763 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1764#if (NB_MMU_MODES == 4)
1765 for(i = 0; i < CPU_TLB_SIZE; i++)
1766 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1767#endif
1768#endif
6a00d601 1769 }
1ccde1cb
FB
1770}
1771
3a7d929e
FB
1772static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1773{
1774 ram_addr_t ram_addr;
1775
84b7b8e7 1776 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1777 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1778 tlb_entry->addend - (unsigned long)phys_ram_base;
1779 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1780 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1781 }
1782 }
1783}
1784
1785/* update the TLB according to the current state of the dirty bits */
1786void cpu_tlb_update_dirty(CPUState *env)
1787{
1788 int i;
1789 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1790 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1791 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1792 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1793#if (NB_MMU_MODES >= 3)
1794 for(i = 0; i < CPU_TLB_SIZE; i++)
1795 tlb_update_dirty(&env->tlb_table[2][i]);
1796#if (NB_MMU_MODES == 4)
1797 for(i = 0; i < CPU_TLB_SIZE; i++)
1798 tlb_update_dirty(&env->tlb_table[3][i]);
1799#endif
1800#endif
3a7d929e
FB
1801}
1802
0f459d16 1803static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1804{
0f459d16
PB
1805 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1806 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1807}
1808
0f459d16
PB
1809/* update the TLB corresponding to virtual page vaddr
1810 so that it is no longer dirty */
1811static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1812{
1ccde1cb
FB
1813 int i;
1814
0f459d16 1815 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1816 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1817 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1818 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1819#if (NB_MMU_MODES >= 3)
0f459d16 1820 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1821#if (NB_MMU_MODES == 4)
0f459d16 1822 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1823#endif
1824#endif
9fa3e853
FB
1825}
1826
59817ccb
FB
1827/* add a new TLB entry. At most one entry for a given virtual address
1828 is permitted. Return 0 if OK or 2 if the page could not be mapped
1829 (can only happen in non SOFTMMU mode for I/O pages or pages
1830 conflicting with the host address space). */
5fafdf24
TS
1831int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1832 target_phys_addr_t paddr, int prot,
6ebbf390 1833 int mmu_idx, int is_softmmu)
9fa3e853 1834{
92e873b9 1835 PhysPageDesc *p;
4f2ac237 1836 unsigned long pd;
9fa3e853 1837 unsigned int index;
4f2ac237 1838 target_ulong address;
0f459d16 1839 target_ulong code_address;
108c49b8 1840 target_phys_addr_t addend;
9fa3e853 1841 int ret;
84b7b8e7 1842 CPUTLBEntry *te;
6658ffb8 1843 int i;
0f459d16 1844 target_phys_addr_t iotlb;
9fa3e853 1845
92e873b9 1846 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1847 if (!p) {
1848 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1849 } else {
1850 pd = p->phys_offset;
9fa3e853
FB
1851 }
1852#if defined(DEBUG_TLB)
6ebbf390
JM
1853 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1854 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1855#endif
1856
1857 ret = 0;
0f459d16
PB
1858 address = vaddr;
1859 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1860 /* IO memory case (romd handled later) */
1861 address |= TLB_MMIO;
1862 }
1863 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1864 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1865 /* Normal RAM. */
1866 iotlb = pd & TARGET_PAGE_MASK;
1867 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1868 iotlb |= IO_MEM_NOTDIRTY;
1869 else
1870 iotlb |= IO_MEM_ROM;
1871 } else {
1872 /* IO handlers are currently passed a phsical address.
1873 It would be nice to pass an offset from the base address
1874 of that region. This would avoid having to special case RAM,
1875 and avoid full address decoding in every device.
1876 We can't use the high bits of pd for this because
1877 IO_MEM_ROMD uses these as a ram address. */
1878 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1879 }
1880
1881 code_address = address;
1882 /* Make accesses to pages with watchpoints go via the
1883 watchpoint trap routines. */
1884 for (i = 0; i < env->nb_watchpoints; i++) {
1885 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1886 iotlb = io_mem_watch + paddr;
1887 /* TODO: The memory case can be optimized by not trapping
1888 reads of pages with a write breakpoint. */
1889 address |= TLB_MMIO;
6658ffb8 1890 }
0f459d16 1891 }
d79acba4 1892
0f459d16
PB
1893 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1894 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1895 te = &env->tlb_table[mmu_idx][index];
1896 te->addend = addend - vaddr;
1897 if (prot & PAGE_READ) {
1898 te->addr_read = address;
1899 } else {
1900 te->addr_read = -1;
1901 }
5c751e99 1902
0f459d16
PB
1903 if (prot & PAGE_EXEC) {
1904 te->addr_code = code_address;
1905 } else {
1906 te->addr_code = -1;
1907 }
1908 if (prot & PAGE_WRITE) {
1909 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1910 (pd & IO_MEM_ROMD)) {
1911 /* Write access calls the I/O callback. */
1912 te->addr_write = address | TLB_MMIO;
1913 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1914 !cpu_physical_memory_is_dirty(pd)) {
1915 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1916 } else {
0f459d16 1917 te->addr_write = address;
9fa3e853 1918 }
0f459d16
PB
1919 } else {
1920 te->addr_write = -1;
9fa3e853 1921 }
9fa3e853
FB
1922 return ret;
1923}
1924
0124311e
FB
1925#else
1926
ee8b7021 1927void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1928{
1929}
1930
2e12669a 1931void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1932{
1933}
1934
5fafdf24
TS
1935int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1936 target_phys_addr_t paddr, int prot,
6ebbf390 1937 int mmu_idx, int is_softmmu)
9fa3e853
FB
1938{
1939 return 0;
1940}
0124311e 1941
9fa3e853
FB
1942/* dump memory mappings */
1943void page_dump(FILE *f)
33417e70 1944{
9fa3e853
FB
1945 unsigned long start, end;
1946 int i, j, prot, prot1;
1947 PageDesc *p;
33417e70 1948
9fa3e853
FB
1949 fprintf(f, "%-8s %-8s %-8s %s\n",
1950 "start", "end", "size", "prot");
1951 start = -1;
1952 end = -1;
1953 prot = 0;
1954 for(i = 0; i <= L1_SIZE; i++) {
1955 if (i < L1_SIZE)
1956 p = l1_map[i];
1957 else
1958 p = NULL;
1959 for(j = 0;j < L2_SIZE; j++) {
1960 if (!p)
1961 prot1 = 0;
1962 else
1963 prot1 = p[j].flags;
1964 if (prot1 != prot) {
1965 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1966 if (start != -1) {
1967 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1968 start, end, end - start,
9fa3e853
FB
1969 prot & PAGE_READ ? 'r' : '-',
1970 prot & PAGE_WRITE ? 'w' : '-',
1971 prot & PAGE_EXEC ? 'x' : '-');
1972 }
1973 if (prot1 != 0)
1974 start = end;
1975 else
1976 start = -1;
1977 prot = prot1;
1978 }
1979 if (!p)
1980 break;
1981 }
33417e70 1982 }
33417e70
FB
1983}
1984
53a5960a 1985int page_get_flags(target_ulong address)
33417e70 1986{
9fa3e853
FB
1987 PageDesc *p;
1988
1989 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1990 if (!p)
9fa3e853
FB
1991 return 0;
1992 return p->flags;
1993}
1994
1995/* modify the flags of a page and invalidate the code if
1996 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1997 depending on PAGE_WRITE */
53a5960a 1998void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1999{
2000 PageDesc *p;
53a5960a 2001 target_ulong addr;
9fa3e853 2002
c8a706fe 2003 /* mmap_lock should already be held. */
9fa3e853
FB
2004 start = start & TARGET_PAGE_MASK;
2005 end = TARGET_PAGE_ALIGN(end);
2006 if (flags & PAGE_WRITE)
2007 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2008 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2009 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2010 /* We may be called for host regions that are outside guest
2011 address space. */
2012 if (!p)
2013 return;
9fa3e853
FB
2014 /* if the write protection is set, then we invalidate the code
2015 inside */
5fafdf24 2016 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2017 (flags & PAGE_WRITE) &&
2018 p->first_tb) {
d720b93d 2019 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2020 }
2021 p->flags = flags;
2022 }
33417e70
FB
2023}
2024
3d97b40b
TS
2025int page_check_range(target_ulong start, target_ulong len, int flags)
2026{
2027 PageDesc *p;
2028 target_ulong end;
2029 target_ulong addr;
2030
2031 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2032 start = start & TARGET_PAGE_MASK;
2033
2034 if( end < start )
2035 /* we've wrapped around */
2036 return -1;
2037 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2038 p = page_find(addr >> TARGET_PAGE_BITS);
2039 if( !p )
2040 return -1;
2041 if( !(p->flags & PAGE_VALID) )
2042 return -1;
2043
dae3270c 2044 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2045 return -1;
dae3270c
FB
2046 if (flags & PAGE_WRITE) {
2047 if (!(p->flags & PAGE_WRITE_ORG))
2048 return -1;
2049 /* unprotect the page if it was put read-only because it
2050 contains translated code */
2051 if (!(p->flags & PAGE_WRITE)) {
2052 if (!page_unprotect(addr, 0, NULL))
2053 return -1;
2054 }
2055 return 0;
2056 }
3d97b40b
TS
2057 }
2058 return 0;
2059}
2060
9fa3e853
FB
2061/* called from signal handler: invalidate the code and unprotect the
2062 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2063int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2064{
2065 unsigned int page_index, prot, pindex;
2066 PageDesc *p, *p1;
53a5960a 2067 target_ulong host_start, host_end, addr;
9fa3e853 2068
c8a706fe
PB
2069 /* Technically this isn't safe inside a signal handler. However we
2070 know this only ever happens in a synchronous SEGV handler, so in
2071 practice it seems to be ok. */
2072 mmap_lock();
2073
83fb7adf 2074 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2075 page_index = host_start >> TARGET_PAGE_BITS;
2076 p1 = page_find(page_index);
c8a706fe
PB
2077 if (!p1) {
2078 mmap_unlock();
9fa3e853 2079 return 0;
c8a706fe 2080 }
83fb7adf 2081 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2082 p = p1;
2083 prot = 0;
2084 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2085 prot |= p->flags;
2086 p++;
2087 }
2088 /* if the page was really writable, then we change its
2089 protection back to writable */
2090 if (prot & PAGE_WRITE_ORG) {
2091 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2092 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2093 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2094 (prot & PAGE_BITS) | PAGE_WRITE);
2095 p1[pindex].flags |= PAGE_WRITE;
2096 /* and since the content will be modified, we must invalidate
2097 the corresponding translated code. */
d720b93d 2098 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2099#ifdef DEBUG_TB_CHECK
2100 tb_invalidate_check(address);
2101#endif
c8a706fe 2102 mmap_unlock();
9fa3e853
FB
2103 return 1;
2104 }
2105 }
c8a706fe 2106 mmap_unlock();
9fa3e853
FB
2107 return 0;
2108}
2109
6a00d601
FB
2110static inline void tlb_set_dirty(CPUState *env,
2111 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2112{
2113}
9fa3e853
FB
2114#endif /* defined(CONFIG_USER_ONLY) */
2115
e2eef170 2116#if !defined(CONFIG_USER_ONLY)
db7b5426 2117static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2118 ram_addr_t memory);
2119static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2120 ram_addr_t orig_memory);
db7b5426
BS
2121#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2122 need_subpage) \
2123 do { \
2124 if (addr > start_addr) \
2125 start_addr2 = 0; \
2126 else { \
2127 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2128 if (start_addr2 > 0) \
2129 need_subpage = 1; \
2130 } \
2131 \
49e9fba2 2132 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2133 end_addr2 = TARGET_PAGE_SIZE - 1; \
2134 else { \
2135 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2136 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2137 need_subpage = 1; \
2138 } \
2139 } while (0)
2140
33417e70
FB
2141/* register physical memory. 'size' must be a multiple of the target
2142 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2143 io memory page */
5fafdf24 2144void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2145 ram_addr_t size,
2146 ram_addr_t phys_offset)
33417e70 2147{
108c49b8 2148 target_phys_addr_t addr, end_addr;
92e873b9 2149 PhysPageDesc *p;
9d42037b 2150 CPUState *env;
00f82b8a 2151 ram_addr_t orig_size = size;
db7b5426 2152 void *subpage;
33417e70 2153
da260249
FB
2154#ifdef USE_KQEMU
2155 /* XXX: should not depend on cpu context */
2156 env = first_cpu;
2157 if (env->kqemu_enabled) {
2158 kqemu_set_phys_mem(start_addr, size, phys_offset);
2159 }
2160#endif
5fd386f6 2161 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2162 end_addr = start_addr + (target_phys_addr_t)size;
2163 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2164 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2165 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2166 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2167 target_phys_addr_t start_addr2, end_addr2;
2168 int need_subpage = 0;
2169
2170 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2171 need_subpage);
4254fab8 2172 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2173 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2174 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2175 &p->phys_offset, orig_memory);
2176 } else {
2177 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2178 >> IO_MEM_SHIFT];
2179 }
2180 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2181 } else {
2182 p->phys_offset = phys_offset;
2183 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2184 (phys_offset & IO_MEM_ROMD))
2185 phys_offset += TARGET_PAGE_SIZE;
2186 }
2187 } else {
2188 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2189 p->phys_offset = phys_offset;
2190 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2191 (phys_offset & IO_MEM_ROMD))
2192 phys_offset += TARGET_PAGE_SIZE;
2193 else {
2194 target_phys_addr_t start_addr2, end_addr2;
2195 int need_subpage = 0;
2196
2197 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2198 end_addr2, need_subpage);
2199
4254fab8 2200 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2201 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2202 &p->phys_offset, IO_MEM_UNASSIGNED);
2203 subpage_register(subpage, start_addr2, end_addr2,
2204 phys_offset);
2205 }
2206 }
2207 }
33417e70 2208 }
3b46e624 2209
9d42037b
FB
2210 /* since each CPU stores ram addresses in its TLB cache, we must
2211 reset the modified entries */
2212 /* XXX: slow ! */
2213 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2214 tlb_flush(env, 1);
2215 }
33417e70
FB
2216}
2217
ba863458 2218/* XXX: temporary until new memory mapping API */
00f82b8a 2219ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2220{
2221 PhysPageDesc *p;
2222
2223 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2224 if (!p)
2225 return IO_MEM_UNASSIGNED;
2226 return p->phys_offset;
2227}
2228
e9a1ab19 2229/* XXX: better than nothing */
00f82b8a 2230ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2231{
2232 ram_addr_t addr;
7fb4fdcf 2233 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
ed441467
FB
2234 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2235 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2236 abort();
2237 }
2238 addr = phys_ram_alloc_offset;
2239 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2240 return addr;
2241}
2242
2243void qemu_ram_free(ram_addr_t addr)
2244{
2245}
2246
a4193c8a 2247static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2248{
67d3b957 2249#ifdef DEBUG_UNASSIGNED
ab3d1727 2250 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2251#endif
2252#ifdef TARGET_SPARC
6c36d3fa 2253 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2254#elif TARGET_CRIS
2255 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2256#endif
33417e70
FB
2257 return 0;
2258}
2259
a4193c8a 2260static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2261{
67d3b957 2262#ifdef DEBUG_UNASSIGNED
ab3d1727 2263 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2264#endif
b4f0a316 2265#ifdef TARGET_SPARC
6c36d3fa 2266 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2267#elif TARGET_CRIS
2268 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2269#endif
33417e70
FB
2270}
2271
2272static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2273 unassigned_mem_readb,
2274 unassigned_mem_readb,
2275 unassigned_mem_readb,
2276};
2277
2278static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2279 unassigned_mem_writeb,
2280 unassigned_mem_writeb,
2281 unassigned_mem_writeb,
2282};
2283
0f459d16
PB
2284static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2285 uint32_t val)
9fa3e853 2286{
3a7d929e 2287 int dirty_flags;
3a7d929e
FB
2288 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2289 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2290#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2291 tb_invalidate_phys_page_fast(ram_addr, 1);
2292 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2293#endif
3a7d929e 2294 }
0f459d16 2295 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2296#ifdef USE_KQEMU
2297 if (cpu_single_env->kqemu_enabled &&
2298 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2299 kqemu_modify_page(cpu_single_env, ram_addr);
2300#endif
f23db169
FB
2301 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2302 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2303 /* we remove the notdirty callback only if the code has been
2304 flushed */
2305 if (dirty_flags == 0xff)
2e70f6ef 2306 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2307}
2308
0f459d16
PB
2309static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2310 uint32_t val)
9fa3e853 2311{
3a7d929e 2312 int dirty_flags;
3a7d929e
FB
2313 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2314 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2315#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2316 tb_invalidate_phys_page_fast(ram_addr, 2);
2317 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2318#endif
3a7d929e 2319 }
0f459d16 2320 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2321#ifdef USE_KQEMU
2322 if (cpu_single_env->kqemu_enabled &&
2323 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2324 kqemu_modify_page(cpu_single_env, ram_addr);
2325#endif
f23db169
FB
2326 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2327 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2328 /* we remove the notdirty callback only if the code has been
2329 flushed */
2330 if (dirty_flags == 0xff)
2e70f6ef 2331 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2332}
2333
0f459d16
PB
2334static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2335 uint32_t val)
9fa3e853 2336{
3a7d929e 2337 int dirty_flags;
3a7d929e
FB
2338 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2339 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2340#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2341 tb_invalidate_phys_page_fast(ram_addr, 4);
2342 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2343#endif
3a7d929e 2344 }
0f459d16 2345 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2346#ifdef USE_KQEMU
2347 if (cpu_single_env->kqemu_enabled &&
2348 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2349 kqemu_modify_page(cpu_single_env, ram_addr);
2350#endif
f23db169
FB
2351 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2352 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2353 /* we remove the notdirty callback only if the code has been
2354 flushed */
2355 if (dirty_flags == 0xff)
2e70f6ef 2356 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2357}
2358
3a7d929e 2359static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2360 NULL, /* never used */
2361 NULL, /* never used */
2362 NULL, /* never used */
2363};
2364
1ccde1cb
FB
2365static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2366 notdirty_mem_writeb,
2367 notdirty_mem_writew,
2368 notdirty_mem_writel,
2369};
2370
0f459d16
PB
2371/* Generate a debug exception if a watchpoint has been hit. */
2372static void check_watchpoint(int offset, int flags)
2373{
2374 CPUState *env = cpu_single_env;
2375 target_ulong vaddr;
2376 int i;
2377
2e70f6ef 2378 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
0f459d16
PB
2379 for (i = 0; i < env->nb_watchpoints; i++) {
2380 if (vaddr == env->watchpoint[i].vaddr
2381 && (env->watchpoint[i].type & flags)) {
2382 env->watchpoint_hit = i + 1;
2383 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2384 break;
2385 }
2386 }
2387}
2388
6658ffb8
PB
2389/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2390 so these check for a hit then pass through to the normal out-of-line
2391 phys routines. */
2392static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2393{
0f459d16 2394 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2395 return ldub_phys(addr);
2396}
2397
2398static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2399{
0f459d16 2400 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2401 return lduw_phys(addr);
2402}
2403
2404static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2405{
0f459d16 2406 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2407 return ldl_phys(addr);
2408}
2409
6658ffb8
PB
2410static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2411 uint32_t val)
2412{
0f459d16 2413 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2414 stb_phys(addr, val);
2415}
2416
2417static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2418 uint32_t val)
2419{
0f459d16 2420 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2421 stw_phys(addr, val);
2422}
2423
2424static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2425 uint32_t val)
2426{
0f459d16 2427 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2428 stl_phys(addr, val);
2429}
2430
2431static CPUReadMemoryFunc *watch_mem_read[3] = {
2432 watch_mem_readb,
2433 watch_mem_readw,
2434 watch_mem_readl,
2435};
2436
2437static CPUWriteMemoryFunc *watch_mem_write[3] = {
2438 watch_mem_writeb,
2439 watch_mem_writew,
2440 watch_mem_writel,
2441};
6658ffb8 2442
db7b5426
BS
2443static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2444 unsigned int len)
2445{
db7b5426
BS
2446 uint32_t ret;
2447 unsigned int idx;
2448
2449 idx = SUBPAGE_IDX(addr - mmio->base);
2450#if defined(DEBUG_SUBPAGE)
2451 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2452 mmio, len, addr, idx);
2453#endif
3ee89922 2454 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2455
2456 return ret;
2457}
2458
2459static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2460 uint32_t value, unsigned int len)
2461{
db7b5426
BS
2462 unsigned int idx;
2463
2464 idx = SUBPAGE_IDX(addr - mmio->base);
2465#if defined(DEBUG_SUBPAGE)
2466 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2467 mmio, len, addr, idx, value);
2468#endif
3ee89922 2469 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2470}
2471
2472static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2473{
2474#if defined(DEBUG_SUBPAGE)
2475 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2476#endif
2477
2478 return subpage_readlen(opaque, addr, 0);
2479}
2480
2481static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2482 uint32_t value)
2483{
2484#if defined(DEBUG_SUBPAGE)
2485 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2486#endif
2487 subpage_writelen(opaque, addr, value, 0);
2488}
2489
2490static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2491{
2492#if defined(DEBUG_SUBPAGE)
2493 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2494#endif
2495
2496 return subpage_readlen(opaque, addr, 1);
2497}
2498
2499static void subpage_writew (void *opaque, target_phys_addr_t addr,
2500 uint32_t value)
2501{
2502#if defined(DEBUG_SUBPAGE)
2503 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2504#endif
2505 subpage_writelen(opaque, addr, value, 1);
2506}
2507
2508static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2509{
2510#if defined(DEBUG_SUBPAGE)
2511 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2512#endif
2513
2514 return subpage_readlen(opaque, addr, 2);
2515}
2516
2517static void subpage_writel (void *opaque,
2518 target_phys_addr_t addr, uint32_t value)
2519{
2520#if defined(DEBUG_SUBPAGE)
2521 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2522#endif
2523 subpage_writelen(opaque, addr, value, 2);
2524}
2525
2526static CPUReadMemoryFunc *subpage_read[] = {
2527 &subpage_readb,
2528 &subpage_readw,
2529 &subpage_readl,
2530};
2531
2532static CPUWriteMemoryFunc *subpage_write[] = {
2533 &subpage_writeb,
2534 &subpage_writew,
2535 &subpage_writel,
2536};
2537
2538static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2539 ram_addr_t memory)
db7b5426
BS
2540{
2541 int idx, eidx;
4254fab8 2542 unsigned int i;
db7b5426
BS
2543
2544 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2545 return -1;
2546 idx = SUBPAGE_IDX(start);
2547 eidx = SUBPAGE_IDX(end);
2548#if defined(DEBUG_SUBPAGE)
2549 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2550 mmio, start, end, idx, eidx, memory);
2551#endif
2552 memory >>= IO_MEM_SHIFT;
2553 for (; idx <= eidx; idx++) {
4254fab8 2554 for (i = 0; i < 4; i++) {
3ee89922
BS
2555 if (io_mem_read[memory][i]) {
2556 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2557 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2558 }
2559 if (io_mem_write[memory][i]) {
2560 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2561 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2562 }
4254fab8 2563 }
db7b5426
BS
2564 }
2565
2566 return 0;
2567}
2568
00f82b8a
AJ
2569static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2570 ram_addr_t orig_memory)
db7b5426
BS
2571{
2572 subpage_t *mmio;
2573 int subpage_memory;
2574
2575 mmio = qemu_mallocz(sizeof(subpage_t));
2576 if (mmio != NULL) {
2577 mmio->base = base;
2578 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2579#if defined(DEBUG_SUBPAGE)
2580 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2581 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2582#endif
2583 *phys = subpage_memory | IO_MEM_SUBPAGE;
2584 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2585 }
2586
2587 return mmio;
2588}
2589
33417e70
FB
2590static void io_mem_init(void)
2591{
3a7d929e 2592 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2593 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2594 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2595 io_mem_nb = 5;
2596
0f459d16 2597 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2598 watch_mem_write, NULL);
1ccde1cb 2599 /* alloc dirty bits array */
0a962c02 2600 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2601 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2602}
2603
2604/* mem_read and mem_write are arrays of functions containing the
2605 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2606 2). Functions can be omitted with a NULL function pointer. The
2607 registered functions may be modified dynamically later.
2608 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2609 modified. If it is zero, a new io zone is allocated. The return
2610 value can be used with cpu_register_physical_memory(). (-1) is
2611 returned if error. */
33417e70
FB
2612int cpu_register_io_memory(int io_index,
2613 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2614 CPUWriteMemoryFunc **mem_write,
2615 void *opaque)
33417e70 2616{
4254fab8 2617 int i, subwidth = 0;
33417e70
FB
2618
2619 if (io_index <= 0) {
b5ff1b31 2620 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2621 return -1;
2622 io_index = io_mem_nb++;
2623 } else {
2624 if (io_index >= IO_MEM_NB_ENTRIES)
2625 return -1;
2626 }
b5ff1b31 2627
33417e70 2628 for(i = 0;i < 3; i++) {
4254fab8
BS
2629 if (!mem_read[i] || !mem_write[i])
2630 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2631 io_mem_read[io_index][i] = mem_read[i];
2632 io_mem_write[io_index][i] = mem_write[i];
2633 }
a4193c8a 2634 io_mem_opaque[io_index] = opaque;
4254fab8 2635 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2636}
61382a50 2637
8926b517
FB
2638CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2639{
2640 return io_mem_write[io_index >> IO_MEM_SHIFT];
2641}
2642
2643CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2644{
2645 return io_mem_read[io_index >> IO_MEM_SHIFT];
2646}
2647
e2eef170
PB
2648#endif /* !defined(CONFIG_USER_ONLY) */
2649
13eb76e0
FB
2650/* physical memory access (slow version, mainly for debug) */
2651#if defined(CONFIG_USER_ONLY)
5fafdf24 2652void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2653 int len, int is_write)
2654{
2655 int l, flags;
2656 target_ulong page;
53a5960a 2657 void * p;
13eb76e0
FB
2658
2659 while (len > 0) {
2660 page = addr & TARGET_PAGE_MASK;
2661 l = (page + TARGET_PAGE_SIZE) - addr;
2662 if (l > len)
2663 l = len;
2664 flags = page_get_flags(page);
2665 if (!(flags & PAGE_VALID))
2666 return;
2667 if (is_write) {
2668 if (!(flags & PAGE_WRITE))
2669 return;
579a97f7 2670 /* XXX: this code should not depend on lock_user */
72fb7daa 2671 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2672 /* FIXME - should this return an error rather than just fail? */
2673 return;
72fb7daa
AJ
2674 memcpy(p, buf, l);
2675 unlock_user(p, addr, l);
13eb76e0
FB
2676 } else {
2677 if (!(flags & PAGE_READ))
2678 return;
579a97f7 2679 /* XXX: this code should not depend on lock_user */
72fb7daa 2680 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2681 /* FIXME - should this return an error rather than just fail? */
2682 return;
72fb7daa 2683 memcpy(buf, p, l);
5b257578 2684 unlock_user(p, addr, 0);
13eb76e0
FB
2685 }
2686 len -= l;
2687 buf += l;
2688 addr += l;
2689 }
2690}
8df1cd07 2691
13eb76e0 2692#else
5fafdf24 2693void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2694 int len, int is_write)
2695{
2696 int l, io_index;
2697 uint8_t *ptr;
2698 uint32_t val;
2e12669a
FB
2699 target_phys_addr_t page;
2700 unsigned long pd;
92e873b9 2701 PhysPageDesc *p;
3b46e624 2702
13eb76e0
FB
2703 while (len > 0) {
2704 page = addr & TARGET_PAGE_MASK;
2705 l = (page + TARGET_PAGE_SIZE) - addr;
2706 if (l > len)
2707 l = len;
92e873b9 2708 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2709 if (!p) {
2710 pd = IO_MEM_UNASSIGNED;
2711 } else {
2712 pd = p->phys_offset;
2713 }
3b46e624 2714
13eb76e0 2715 if (is_write) {
3a7d929e 2716 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2717 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2718 /* XXX: could force cpu_single_env to NULL to avoid
2719 potential bugs */
13eb76e0 2720 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2721 /* 32 bit write access */
c27004ec 2722 val = ldl_p(buf);
a4193c8a 2723 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2724 l = 4;
2725 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2726 /* 16 bit write access */
c27004ec 2727 val = lduw_p(buf);
a4193c8a 2728 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2729 l = 2;
2730 } else {
1c213d19 2731 /* 8 bit write access */
c27004ec 2732 val = ldub_p(buf);
a4193c8a 2733 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2734 l = 1;
2735 }
2736 } else {
b448f2f3
FB
2737 unsigned long addr1;
2738 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2739 /* RAM case */
b448f2f3 2740 ptr = phys_ram_base + addr1;
13eb76e0 2741 memcpy(ptr, buf, l);
3a7d929e
FB
2742 if (!cpu_physical_memory_is_dirty(addr1)) {
2743 /* invalidate code */
2744 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2745 /* set dirty bit */
5fafdf24 2746 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2747 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2748 }
13eb76e0
FB
2749 }
2750 } else {
5fafdf24 2751 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2752 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2753 /* I/O case */
2754 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2755 if (l >= 4 && ((addr & 3) == 0)) {
2756 /* 32 bit read access */
a4193c8a 2757 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2758 stl_p(buf, val);
13eb76e0
FB
2759 l = 4;
2760 } else if (l >= 2 && ((addr & 1) == 0)) {
2761 /* 16 bit read access */
a4193c8a 2762 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2763 stw_p(buf, val);
13eb76e0
FB
2764 l = 2;
2765 } else {
1c213d19 2766 /* 8 bit read access */
a4193c8a 2767 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2768 stb_p(buf, val);
13eb76e0
FB
2769 l = 1;
2770 }
2771 } else {
2772 /* RAM case */
5fafdf24 2773 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2774 (addr & ~TARGET_PAGE_MASK);
2775 memcpy(buf, ptr, l);
2776 }
2777 }
2778 len -= l;
2779 buf += l;
2780 addr += l;
2781 }
2782}
8df1cd07 2783
d0ecd2aa 2784/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2785void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2786 const uint8_t *buf, int len)
2787{
2788 int l;
2789 uint8_t *ptr;
2790 target_phys_addr_t page;
2791 unsigned long pd;
2792 PhysPageDesc *p;
3b46e624 2793
d0ecd2aa
FB
2794 while (len > 0) {
2795 page = addr & TARGET_PAGE_MASK;
2796 l = (page + TARGET_PAGE_SIZE) - addr;
2797 if (l > len)
2798 l = len;
2799 p = phys_page_find(page >> TARGET_PAGE_BITS);
2800 if (!p) {
2801 pd = IO_MEM_UNASSIGNED;
2802 } else {
2803 pd = p->phys_offset;
2804 }
3b46e624 2805
d0ecd2aa 2806 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2807 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2808 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2809 /* do nothing */
2810 } else {
2811 unsigned long addr1;
2812 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2813 /* ROM/RAM case */
2814 ptr = phys_ram_base + addr1;
2815 memcpy(ptr, buf, l);
2816 }
2817 len -= l;
2818 buf += l;
2819 addr += l;
2820 }
2821}
2822
2823
8df1cd07
FB
2824/* warning: addr must be aligned */
2825uint32_t ldl_phys(target_phys_addr_t addr)
2826{
2827 int io_index;
2828 uint8_t *ptr;
2829 uint32_t val;
2830 unsigned long pd;
2831 PhysPageDesc *p;
2832
2833 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2834 if (!p) {
2835 pd = IO_MEM_UNASSIGNED;
2836 } else {
2837 pd = p->phys_offset;
2838 }
3b46e624 2839
5fafdf24 2840 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2841 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2842 /* I/O case */
2843 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2844 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2845 } else {
2846 /* RAM case */
5fafdf24 2847 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2848 (addr & ~TARGET_PAGE_MASK);
2849 val = ldl_p(ptr);
2850 }
2851 return val;
2852}
2853
84b7b8e7
FB
2854/* warning: addr must be aligned */
2855uint64_t ldq_phys(target_phys_addr_t addr)
2856{
2857 int io_index;
2858 uint8_t *ptr;
2859 uint64_t val;
2860 unsigned long pd;
2861 PhysPageDesc *p;
2862
2863 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2864 if (!p) {
2865 pd = IO_MEM_UNASSIGNED;
2866 } else {
2867 pd = p->phys_offset;
2868 }
3b46e624 2869
2a4188a3
FB
2870 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2871 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2872 /* I/O case */
2873 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2874#ifdef TARGET_WORDS_BIGENDIAN
2875 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2876 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2877#else
2878 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2879 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2880#endif
2881 } else {
2882 /* RAM case */
5fafdf24 2883 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2884 (addr & ~TARGET_PAGE_MASK);
2885 val = ldq_p(ptr);
2886 }
2887 return val;
2888}
2889
aab33094
FB
2890/* XXX: optimize */
2891uint32_t ldub_phys(target_phys_addr_t addr)
2892{
2893 uint8_t val;
2894 cpu_physical_memory_read(addr, &val, 1);
2895 return val;
2896}
2897
2898/* XXX: optimize */
2899uint32_t lduw_phys(target_phys_addr_t addr)
2900{
2901 uint16_t val;
2902 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2903 return tswap16(val);
2904}
2905
8df1cd07
FB
2906/* warning: addr must be aligned. The ram page is not masked as dirty
2907 and the code inside is not invalidated. It is useful if the dirty
2908 bits are used to track modified PTEs */
2909void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2910{
2911 int io_index;
2912 uint8_t *ptr;
2913 unsigned long pd;
2914 PhysPageDesc *p;
2915
2916 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2917 if (!p) {
2918 pd = IO_MEM_UNASSIGNED;
2919 } else {
2920 pd = p->phys_offset;
2921 }
3b46e624 2922
3a7d929e 2923 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2924 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2925 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2926 } else {
5fafdf24 2927 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2928 (addr & ~TARGET_PAGE_MASK);
2929 stl_p(ptr, val);
2930 }
2931}
2932
bc98a7ef
JM
2933void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2934{
2935 int io_index;
2936 uint8_t *ptr;
2937 unsigned long pd;
2938 PhysPageDesc *p;
2939
2940 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2941 if (!p) {
2942 pd = IO_MEM_UNASSIGNED;
2943 } else {
2944 pd = p->phys_offset;
2945 }
3b46e624 2946
bc98a7ef
JM
2947 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2948 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2949#ifdef TARGET_WORDS_BIGENDIAN
2950 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2951 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2952#else
2953 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2954 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2955#endif
2956 } else {
5fafdf24 2957 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2958 (addr & ~TARGET_PAGE_MASK);
2959 stq_p(ptr, val);
2960 }
2961}
2962
8df1cd07 2963/* warning: addr must be aligned */
8df1cd07
FB
2964void stl_phys(target_phys_addr_t addr, uint32_t val)
2965{
2966 int io_index;
2967 uint8_t *ptr;
2968 unsigned long pd;
2969 PhysPageDesc *p;
2970
2971 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2972 if (!p) {
2973 pd = IO_MEM_UNASSIGNED;
2974 } else {
2975 pd = p->phys_offset;
2976 }
3b46e624 2977
3a7d929e 2978 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2979 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2980 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2981 } else {
2982 unsigned long addr1;
2983 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2984 /* RAM case */
2985 ptr = phys_ram_base + addr1;
2986 stl_p(ptr, val);
3a7d929e
FB
2987 if (!cpu_physical_memory_is_dirty(addr1)) {
2988 /* invalidate code */
2989 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2990 /* set dirty bit */
f23db169
FB
2991 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2992 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2993 }
8df1cd07
FB
2994 }
2995}
2996
aab33094
FB
2997/* XXX: optimize */
2998void stb_phys(target_phys_addr_t addr, uint32_t val)
2999{
3000 uint8_t v = val;
3001 cpu_physical_memory_write(addr, &v, 1);
3002}
3003
3004/* XXX: optimize */
3005void stw_phys(target_phys_addr_t addr, uint32_t val)
3006{
3007 uint16_t v = tswap16(val);
3008 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3009}
3010
3011/* XXX: optimize */
3012void stq_phys(target_phys_addr_t addr, uint64_t val)
3013{
3014 val = tswap64(val);
3015 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3016}
3017
13eb76e0
FB
3018#endif
3019
3020/* virtual memory access for debug */
5fafdf24 3021int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3022 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3023{
3024 int l;
9b3c35e0
JM
3025 target_phys_addr_t phys_addr;
3026 target_ulong page;
13eb76e0
FB
3027
3028 while (len > 0) {
3029 page = addr & TARGET_PAGE_MASK;
3030 phys_addr = cpu_get_phys_page_debug(env, page);
3031 /* if no physical page mapped, return an error */
3032 if (phys_addr == -1)
3033 return -1;
3034 l = (page + TARGET_PAGE_SIZE) - addr;
3035 if (l > len)
3036 l = len;
5fafdf24 3037 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3038 buf, l, is_write);
13eb76e0
FB
3039 len -= l;
3040 buf += l;
3041 addr += l;
3042 }
3043 return 0;
3044}
3045
2e70f6ef
PB
3046/* in deterministic execution mode, instructions doing device I/Os
3047 must be at the end of the TB */
3048void cpu_io_recompile(CPUState *env, void *retaddr)
3049{
3050 TranslationBlock *tb;
3051 uint32_t n, cflags;
3052 target_ulong pc, cs_base;
3053 uint64_t flags;
3054
3055 tb = tb_find_pc((unsigned long)retaddr);
3056 if (!tb) {
3057 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3058 retaddr);
3059 }
3060 n = env->icount_decr.u16.low + tb->icount;
3061 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3062 /* Calculate how many instructions had been executed before the fault
bf20dc07 3063 occurred. */
2e70f6ef
PB
3064 n = n - env->icount_decr.u16.low;
3065 /* Generate a new TB ending on the I/O insn. */
3066 n++;
3067 /* On MIPS and SH, delay slot instructions can only be restarted if
3068 they were already the first instruction in the TB. If this is not
bf20dc07 3069 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3070 branch. */
3071#if defined(TARGET_MIPS)
3072 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3073 env->active_tc.PC -= 4;
3074 env->icount_decr.u16.low++;
3075 env->hflags &= ~MIPS_HFLAG_BMASK;
3076 }
3077#elif defined(TARGET_SH4)
3078 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3079 && n > 1) {
3080 env->pc -= 2;
3081 env->icount_decr.u16.low++;
3082 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3083 }
3084#endif
3085 /* This should never happen. */
3086 if (n > CF_COUNT_MASK)
3087 cpu_abort(env, "TB too big during recompile");
3088
3089 cflags = n | CF_LAST_IO;
3090 pc = tb->pc;
3091 cs_base = tb->cs_base;
3092 flags = tb->flags;
3093 tb_phys_invalidate(tb, -1);
3094 /* FIXME: In theory this could raise an exception. In practice
3095 we have already translated the block once so it's probably ok. */
3096 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3097 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3098 the first in the TB) then we end up generating a whole new TB and
3099 repeating the fault, which is horribly inefficient.
3100 Better would be to execute just this insn uncached, or generate a
3101 second new TB. */
3102 cpu_resume_from_signal(env, NULL);
3103}
3104
e3db7226
FB
3105void dump_exec_info(FILE *f,
3106 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3107{
3108 int i, target_code_size, max_target_code_size;
3109 int direct_jmp_count, direct_jmp2_count, cross_page;
3110 TranslationBlock *tb;
3b46e624 3111
e3db7226
FB
3112 target_code_size = 0;
3113 max_target_code_size = 0;
3114 cross_page = 0;
3115 direct_jmp_count = 0;
3116 direct_jmp2_count = 0;
3117 for(i = 0; i < nb_tbs; i++) {
3118 tb = &tbs[i];
3119 target_code_size += tb->size;
3120 if (tb->size > max_target_code_size)
3121 max_target_code_size = tb->size;
3122 if (tb->page_addr[1] != -1)
3123 cross_page++;
3124 if (tb->tb_next_offset[0] != 0xffff) {
3125 direct_jmp_count++;
3126 if (tb->tb_next_offset[1] != 0xffff) {
3127 direct_jmp2_count++;
3128 }
3129 }
3130 }
3131 /* XXX: avoid using doubles ? */
57fec1fe 3132 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3133 cpu_fprintf(f, "gen code size %ld/%ld\n",
3134 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3135 cpu_fprintf(f, "TB count %d/%d\n",
3136 nb_tbs, code_gen_max_blocks);
5fafdf24 3137 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3138 nb_tbs ? target_code_size / nb_tbs : 0,
3139 max_target_code_size);
5fafdf24 3140 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3141 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3142 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3143 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3144 cross_page,
e3db7226
FB
3145 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3146 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3147 direct_jmp_count,
e3db7226
FB
3148 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3149 direct_jmp2_count,
3150 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3151 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3152 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3153 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3154 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3155 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3156}
3157
5fafdf24 3158#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3159
3160#define MMUSUFFIX _cmmu
3161#define GETPC() NULL
3162#define env cpu_single_env
b769d8fe 3163#define SOFTMMU_CODE_ACCESS
61382a50
FB
3164
3165#define SHIFT 0
3166#include "softmmu_template.h"
3167
3168#define SHIFT 1
3169#include "softmmu_template.h"
3170
3171#define SHIFT 2
3172#include "softmmu_template.h"
3173
3174#define SHIFT 3
3175#include "softmmu_template.h"
3176
3177#undef env
3178
3179#endif