]> git.proxmox.com Git - qemu.git/blame - exec.c
Variable autostart is not used outside main()
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
54936004 44
fd6ce8f6 45//#define DEBUG_TB_INVALIDATE
66e85a21 46//#define DEBUG_FLUSH
9fa3e853 47//#define DEBUG_TLB
67d3b957 48//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
49
50/* make various TB consistency checks */
5fafdf24
TS
51//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
fd6ce8f6 53
1196be37 54//#define DEBUG_IOPORT
db7b5426 55//#define DEBUG_SUBPAGE
1196be37 56
99773bd4
PB
57#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
64#define MMAP_AREA_START 0x00000000
65#define MMAP_AREA_END 0xa8000000
fd6ce8f6 66
108c49b8
FB
67#if defined(TARGET_SPARC64)
68#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
69#elif defined(TARGET_SPARC)
70#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
71#elif defined(TARGET_ALPHA)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
73#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
74#elif defined(TARGET_PPC64)
75#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
76#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
78#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
80#else
81/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82#define TARGET_PHYS_ADDR_SPACE_BITS 32
83#endif
84
bdaf78e0 85static TranslationBlock *tbs;
26a5f13b 86int code_gen_max_blocks;
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 88static int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
141ac468
BS
92#if defined(__arm__) || defined(__sparc_v9__)
93/* The prologue must be reachable with a direct jump. ARM and Sparc64
94 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
95 section close to code segment. */
96#define code_gen_section \
97 __attribute__((__section__(".gen_code"))) \
98 __attribute__((aligned (32)))
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
109uint8_t *code_gen_ptr;
110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
00f82b8a 112ram_addr_t phys_ram_size;
9fa3e853
FB
113int phys_ram_fd;
114uint8_t *phys_ram_base;
1ccde1cb 115uint8_t *phys_ram_dirty;
e9a1ab19 116static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 117#endif
9fa3e853 118
6a00d601
FB
119CPUState *first_cpu;
120/* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
5fafdf24 122CPUState *cpu_single_env;
2e70f6ef 123/* 0 = Do not count executed instructions.
bf20dc07 124 1 = Precise instruction counting.
2e70f6ef
PB
125 2 = Adaptive rate instruction counting. */
126int use_icount = 0;
127/* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129int64_t qemu_icount;
6a00d601 130
54936004 131typedef struct PageDesc {
92e873b9 132 /* list of TBs intersecting this ram page */
fd6ce8f6 133 TranslationBlock *first_tb;
9fa3e853
FB
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138#if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140#endif
54936004
FB
141} PageDesc;
142
92e873b9 143typedef struct PhysPageDesc {
0f459d16 144 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 145 ram_addr_t phys_offset;
92e873b9
FB
146} PhysPageDesc;
147
54936004 148#define L2_BITS 10
bedb69ea
JM
149#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150/* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
153 */
154#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155#else
03875444 156#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 157#endif
54936004
FB
158
159#define L1_SIZE (1 << L1_BITS)
160#define L2_SIZE (1 << L2_BITS)
161
83fb7adf
FB
162unsigned long qemu_real_host_page_size;
163unsigned long qemu_host_page_bits;
164unsigned long qemu_host_page_size;
165unsigned long qemu_host_page_mask;
54936004 166
92e873b9 167/* XXX: for system emulation, it could just be an array */
54936004 168static PageDesc *l1_map[L1_SIZE];
bdaf78e0 169static PhysPageDesc **l1_phys_map;
54936004 170
e2eef170
PB
171#if !defined(CONFIG_USER_ONLY)
172static void io_mem_init(void);
173
33417e70 174/* io memory support */
33417e70
FB
175CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 177void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 178static int io_mem_nb;
6658ffb8
PB
179static int io_mem_watch;
180#endif
33417e70 181
34865134 182/* log support */
7ccfb2eb 183const char *logfilename = "/tmp/qemu.log";
34865134
FB
184FILE *logfile;
185int loglevel;
e735b91c 186static int log_append = 0;
34865134 187
e3db7226
FB
188/* statistics */
189static int tlb_flush_count;
190static int tb_flush_count;
191static int tb_phys_invalidate_count;
192
db7b5426
BS
193#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194typedef struct subpage_t {
195 target_phys_addr_t base;
3ee89922
BS
196 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
199} subpage_t;
200
7cb69cae
FB
201#ifdef _WIN32
202static void map_exec(void *addr, long size)
203{
204 DWORD old_protect;
205 VirtualProtect(addr, size,
206 PAGE_EXECUTE_READWRITE, &old_protect);
207
208}
209#else
210static void map_exec(void *addr, long size)
211{
4369415f 212 unsigned long start, end, page_size;
7cb69cae 213
4369415f 214 page_size = getpagesize();
7cb69cae 215 start = (unsigned long)addr;
4369415f 216 start &= ~(page_size - 1);
7cb69cae
FB
217
218 end = (unsigned long)addr + size;
4369415f
FB
219 end += page_size - 1;
220 end &= ~(page_size - 1);
7cb69cae
FB
221
222 mprotect((void *)start, end - start,
223 PROT_READ | PROT_WRITE | PROT_EXEC);
224}
225#endif
226
b346ff46 227static void page_init(void)
54936004 228{
83fb7adf 229 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 230 TARGET_PAGE_SIZE */
67b915a5 231#ifdef _WIN32
d5a8f07c
FB
232 {
233 SYSTEM_INFO system_info;
234 DWORD old_protect;
3b46e624 235
d5a8f07c
FB
236 GetSystemInfo(&system_info);
237 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 238 }
67b915a5 239#else
83fb7adf 240 qemu_real_host_page_size = getpagesize();
67b915a5 241#endif
83fb7adf
FB
242 if (qemu_host_page_size == 0)
243 qemu_host_page_size = qemu_real_host_page_size;
244 if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 qemu_host_page_size = TARGET_PAGE_SIZE;
246 qemu_host_page_bits = 0;
247 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 qemu_host_page_bits++;
249 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
250 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
252
253#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 {
255 long long startaddr, endaddr;
256 FILE *f;
257 int n;
258
c8a706fe 259 mmap_lock();
0776590d 260 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
261 f = fopen("/proc/self/maps", "r");
262 if (f) {
263 do {
264 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 if (n == 2) {
e0b8d65a
BS
266 startaddr = MIN(startaddr,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 endaddr = MIN(endaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 270 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
271 TARGET_PAGE_ALIGN(endaddr),
272 PAGE_RESERVED);
273 }
274 } while (!feof(f));
275 fclose(f);
276 }
c8a706fe 277 mmap_unlock();
50a9569b
AZ
278 }
279#endif
54936004
FB
280}
281
434929bf 282static inline PageDesc **page_l1_map(target_ulong index)
54936004 283{
17e2377a
PB
284#if TARGET_LONG_BITS > 32
285 /* Host memory outside guest VM. For 32-bit targets we have already
286 excluded high addresses. */
d8173e0f 287 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
288 return NULL;
289#endif
434929bf
AL
290 return &l1_map[index >> L2_BITS];
291}
292
293static inline PageDesc *page_find_alloc(target_ulong index)
294{
295 PageDesc **lp, *p;
296 lp = page_l1_map(index);
297 if (!lp)
298 return NULL;
299
54936004
FB
300 p = *lp;
301 if (!p) {
302 /* allocate if not found */
17e2377a
PB
303#if defined(CONFIG_USER_ONLY)
304 unsigned long addr;
305 size_t len = sizeof(PageDesc) * L2_SIZE;
306 /* Don't use qemu_malloc because it may recurse. */
307 p = mmap(0, len, PROT_READ | PROT_WRITE,
308 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 309 *lp = p;
17e2377a
PB
310 addr = h2g(p);
311 if (addr == (target_ulong)addr) {
312 page_set_flags(addr & TARGET_PAGE_MASK,
313 TARGET_PAGE_ALIGN(addr + len),
314 PAGE_RESERVED);
315 }
316#else
317 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318 *lp = p;
319#endif
54936004
FB
320 }
321 return p + (index & (L2_SIZE - 1));
322}
323
00f82b8a 324static inline PageDesc *page_find(target_ulong index)
54936004 325{
434929bf
AL
326 PageDesc **lp, *p;
327 lp = page_l1_map(index);
328 if (!lp)
329 return NULL;
54936004 330
434929bf 331 p = *lp;
54936004
FB
332 if (!p)
333 return 0;
fd6ce8f6
FB
334 return p + (index & (L2_SIZE - 1));
335}
336
108c49b8 337static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 338{
108c49b8 339 void **lp, **p;
e3f4e2a4 340 PhysPageDesc *pd;
92e873b9 341
108c49b8
FB
342 p = (void **)l1_phys_map;
343#if TARGET_PHYS_ADDR_SPACE_BITS > 32
344
345#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347#endif
348 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
349 p = *lp;
350 if (!p) {
351 /* allocate if not found */
108c49b8
FB
352 if (!alloc)
353 return NULL;
354 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355 memset(p, 0, sizeof(void *) * L1_SIZE);
356 *lp = p;
357 }
358#endif
359 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
360 pd = *lp;
361 if (!pd) {
362 int i;
108c49b8
FB
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
e3f4e2a4
PB
366 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367 *lp = pd;
368 for (i = 0; i < L2_SIZE; i++)
369 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 370 }
e3f4e2a4 371 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
372}
373
108c49b8 374static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 375{
108c49b8 376 return phys_page_find_alloc(index, 0);
92e873b9
FB
377}
378
9fa3e853 379#if !defined(CONFIG_USER_ONLY)
6a00d601 380static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 381static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 382 target_ulong vaddr);
c8a706fe
PB
383#define mmap_lock() do { } while(0)
384#define mmap_unlock() do { } while(0)
9fa3e853 385#endif
fd6ce8f6 386
4369415f
FB
387#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
388
389#if defined(CONFIG_USER_ONLY)
390/* Currently it is not recommanded to allocate big chunks of data in
391 user mode. It will change when a dedicated libc will be used */
392#define USE_STATIC_CODE_GEN_BUFFER
393#endif
394
395#ifdef USE_STATIC_CODE_GEN_BUFFER
396static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
397#endif
398
8fcd3692 399static void code_gen_alloc(unsigned long tb_size)
26a5f13b 400{
4369415f
FB
401#ifdef USE_STATIC_CODE_GEN_BUFFER
402 code_gen_buffer = static_code_gen_buffer;
403 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
404 map_exec(code_gen_buffer, code_gen_buffer_size);
405#else
26a5f13b
FB
406 code_gen_buffer_size = tb_size;
407 if (code_gen_buffer_size == 0) {
4369415f
FB
408#if defined(CONFIG_USER_ONLY)
409 /* in user mode, phys_ram_size is not meaningful */
410 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
411#else
26a5f13b 412 /* XXX: needs ajustments */
174a9a1f 413 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 414#endif
26a5f13b
FB
415 }
416 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
417 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
418 /* The code gen buffer location may have constraints depending on
419 the host cpu and OS */
420#if defined(__linux__)
421 {
422 int flags;
141ac468
BS
423 void *start = NULL;
424
26a5f13b
FB
425 flags = MAP_PRIVATE | MAP_ANONYMOUS;
426#if defined(__x86_64__)
427 flags |= MAP_32BIT;
428 /* Cannot map more than that */
429 if (code_gen_buffer_size > (800 * 1024 * 1024))
430 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
431#elif defined(__sparc_v9__)
432 // Map the buffer below 2G, so we can use direct calls and branches
433 flags |= MAP_FIXED;
434 start = (void *) 0x60000000UL;
435 if (code_gen_buffer_size > (512 * 1024 * 1024))
436 code_gen_buffer_size = (512 * 1024 * 1024);
26a5f13b 437#endif
141ac468
BS
438 code_gen_buffer = mmap(start, code_gen_buffer_size,
439 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
440 flags, -1, 0);
441 if (code_gen_buffer == MAP_FAILED) {
442 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
443 exit(1);
444 }
445 }
06e67a82
AL
446#elif defined(__FreeBSD__)
447 {
448 int flags;
449 void *addr = NULL;
450 flags = MAP_PRIVATE | MAP_ANONYMOUS;
451#if defined(__x86_64__)
452 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
453 * 0x40000000 is free */
454 flags |= MAP_FIXED;
455 addr = (void *)0x40000000;
456 /* Cannot map more than that */
457 if (code_gen_buffer_size > (800 * 1024 * 1024))
458 code_gen_buffer_size = (800 * 1024 * 1024);
459#endif
460 code_gen_buffer = mmap(addr, code_gen_buffer_size,
461 PROT_WRITE | PROT_READ | PROT_EXEC,
462 flags, -1, 0);
463 if (code_gen_buffer == MAP_FAILED) {
464 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
465 exit(1);
466 }
467 }
26a5f13b
FB
468#else
469 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
470 if (!code_gen_buffer) {
471 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472 exit(1);
473 }
474 map_exec(code_gen_buffer, code_gen_buffer_size);
475#endif
4369415f 476#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
477 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
478 code_gen_buffer_max_size = code_gen_buffer_size -
479 code_gen_max_block_size();
480 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
481 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
482}
483
484/* Must be called before using the QEMU cpus. 'tb_size' is the size
485 (in bytes) allocated to the translation buffer. Zero means default
486 size. */
487void cpu_exec_init_all(unsigned long tb_size)
488{
26a5f13b
FB
489 cpu_gen_init();
490 code_gen_alloc(tb_size);
491 code_gen_ptr = code_gen_buffer;
4369415f 492 page_init();
e2eef170 493#if !defined(CONFIG_USER_ONLY)
26a5f13b 494 io_mem_init();
e2eef170 495#endif
26a5f13b
FB
496}
497
9656f324
PB
498#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
499
500#define CPU_COMMON_SAVE_VERSION 1
501
502static void cpu_common_save(QEMUFile *f, void *opaque)
503{
504 CPUState *env = opaque;
505
506 qemu_put_be32s(f, &env->halted);
507 qemu_put_be32s(f, &env->interrupt_request);
508}
509
510static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
511{
512 CPUState *env = opaque;
513
514 if (version_id != CPU_COMMON_SAVE_VERSION)
515 return -EINVAL;
516
517 qemu_get_be32s(f, &env->halted);
75f482ae 518 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
519 tlb_flush(env, 1);
520
521 return 0;
522}
523#endif
524
6a00d601 525void cpu_exec_init(CPUState *env)
fd6ce8f6 526{
6a00d601
FB
527 CPUState **penv;
528 int cpu_index;
529
6a00d601
FB
530 env->next_cpu = NULL;
531 penv = &first_cpu;
532 cpu_index = 0;
533 while (*penv != NULL) {
534 penv = (CPUState **)&(*penv)->next_cpu;
535 cpu_index++;
536 }
537 env->cpu_index = cpu_index;
6658ffb8 538 env->nb_watchpoints = 0;
6a00d601 539 *penv = env;
b3c7724c 540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
541 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
542 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
543 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
544 cpu_save, cpu_load, env);
545#endif
fd6ce8f6
FB
546}
547
9fa3e853
FB
548static inline void invalidate_page_bitmap(PageDesc *p)
549{
550 if (p->code_bitmap) {
59817ccb 551 qemu_free(p->code_bitmap);
9fa3e853
FB
552 p->code_bitmap = NULL;
553 }
554 p->code_write_count = 0;
555}
556
fd6ce8f6
FB
557/* set to NULL all the 'first_tb' fields in all PageDescs */
558static void page_flush_tb(void)
559{
560 int i, j;
561 PageDesc *p;
562
563 for(i = 0; i < L1_SIZE; i++) {
564 p = l1_map[i];
565 if (p) {
9fa3e853
FB
566 for(j = 0; j < L2_SIZE; j++) {
567 p->first_tb = NULL;
568 invalidate_page_bitmap(p);
569 p++;
570 }
fd6ce8f6
FB
571 }
572 }
573}
574
575/* flush all the translation blocks */
d4e8164f 576/* XXX: tb_flush is currently not thread safe */
6a00d601 577void tb_flush(CPUState *env1)
fd6ce8f6 578{
6a00d601 579 CPUState *env;
0124311e 580#if defined(DEBUG_FLUSH)
ab3d1727
BS
581 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
582 (unsigned long)(code_gen_ptr - code_gen_buffer),
583 nb_tbs, nb_tbs > 0 ?
584 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 585#endif
26a5f13b 586 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
587 cpu_abort(env1, "Internal error: code buffer overflow\n");
588
fd6ce8f6 589 nb_tbs = 0;
3b46e624 590
6a00d601
FB
591 for(env = first_cpu; env != NULL; env = env->next_cpu) {
592 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
593 }
9fa3e853 594
8a8a608f 595 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 596 page_flush_tb();
9fa3e853 597
fd6ce8f6 598 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
599 /* XXX: flush processor icache at this point if cache flush is
600 expensive */
e3db7226 601 tb_flush_count++;
fd6ce8f6
FB
602}
603
604#ifdef DEBUG_TB_CHECK
605
bc98a7ef 606static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
607{
608 TranslationBlock *tb;
609 int i;
610 address &= TARGET_PAGE_MASK;
99773bd4
PB
611 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
612 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
613 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
614 address >= tb->pc + tb->size)) {
615 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 616 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
617 }
618 }
619 }
620}
621
622/* verify that all the pages have correct rights for code */
623static void tb_page_check(void)
624{
625 TranslationBlock *tb;
626 int i, flags1, flags2;
3b46e624 627
99773bd4
PB
628 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
629 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
630 flags1 = page_get_flags(tb->pc);
631 flags2 = page_get_flags(tb->pc + tb->size - 1);
632 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
633 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 634 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
635 }
636 }
637 }
638}
639
bdaf78e0 640static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
641{
642 TranslationBlock *tb1;
643 unsigned int n1;
644
645 /* suppress any remaining jumps to this TB */
646 tb1 = tb->jmp_first;
647 for(;;) {
648 n1 = (long)tb1 & 3;
649 tb1 = (TranslationBlock *)((long)tb1 & ~3);
650 if (n1 == 2)
651 break;
652 tb1 = tb1->jmp_next[n1];
653 }
654 /* check end of list */
655 if (tb1 != tb) {
656 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
657 }
658}
659
fd6ce8f6
FB
660#endif
661
662/* invalidate one TB */
663static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
664 int next_offset)
665{
666 TranslationBlock *tb1;
667 for(;;) {
668 tb1 = *ptb;
669 if (tb1 == tb) {
670 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
671 break;
672 }
673 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
674 }
675}
676
9fa3e853
FB
677static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
678{
679 TranslationBlock *tb1;
680 unsigned int n1;
681
682 for(;;) {
683 tb1 = *ptb;
684 n1 = (long)tb1 & 3;
685 tb1 = (TranslationBlock *)((long)tb1 & ~3);
686 if (tb1 == tb) {
687 *ptb = tb1->page_next[n1];
688 break;
689 }
690 ptb = &tb1->page_next[n1];
691 }
692}
693
d4e8164f
FB
694static inline void tb_jmp_remove(TranslationBlock *tb, int n)
695{
696 TranslationBlock *tb1, **ptb;
697 unsigned int n1;
698
699 ptb = &tb->jmp_next[n];
700 tb1 = *ptb;
701 if (tb1) {
702 /* find tb(n) in circular list */
703 for(;;) {
704 tb1 = *ptb;
705 n1 = (long)tb1 & 3;
706 tb1 = (TranslationBlock *)((long)tb1 & ~3);
707 if (n1 == n && tb1 == tb)
708 break;
709 if (n1 == 2) {
710 ptb = &tb1->jmp_first;
711 } else {
712 ptb = &tb1->jmp_next[n1];
713 }
714 }
715 /* now we can suppress tb(n) from the list */
716 *ptb = tb->jmp_next[n];
717
718 tb->jmp_next[n] = NULL;
719 }
720}
721
722/* reset the jump entry 'n' of a TB so that it is not chained to
723 another TB */
724static inline void tb_reset_jump(TranslationBlock *tb, int n)
725{
726 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
727}
728
2e70f6ef 729void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 730{
6a00d601 731 CPUState *env;
8a40a180 732 PageDesc *p;
d4e8164f 733 unsigned int h, n1;
00f82b8a 734 target_phys_addr_t phys_pc;
8a40a180 735 TranslationBlock *tb1, *tb2;
3b46e624 736
8a40a180
FB
737 /* remove the TB from the hash list */
738 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
739 h = tb_phys_hash_func(phys_pc);
5fafdf24 740 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
741 offsetof(TranslationBlock, phys_hash_next));
742
743 /* remove the TB from the page list */
744 if (tb->page_addr[0] != page_addr) {
745 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
746 tb_page_remove(&p->first_tb, tb);
747 invalidate_page_bitmap(p);
748 }
749 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
750 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
751 tb_page_remove(&p->first_tb, tb);
752 invalidate_page_bitmap(p);
753 }
754
36bdbe54 755 tb_invalidated_flag = 1;
59817ccb 756
fd6ce8f6 757 /* remove the TB from the hash list */
8a40a180 758 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
759 for(env = first_cpu; env != NULL; env = env->next_cpu) {
760 if (env->tb_jmp_cache[h] == tb)
761 env->tb_jmp_cache[h] = NULL;
762 }
d4e8164f
FB
763
764 /* suppress this TB from the two jump lists */
765 tb_jmp_remove(tb, 0);
766 tb_jmp_remove(tb, 1);
767
768 /* suppress any remaining jumps to this TB */
769 tb1 = tb->jmp_first;
770 for(;;) {
771 n1 = (long)tb1 & 3;
772 if (n1 == 2)
773 break;
774 tb1 = (TranslationBlock *)((long)tb1 & ~3);
775 tb2 = tb1->jmp_next[n1];
776 tb_reset_jump(tb1, n1);
777 tb1->jmp_next[n1] = NULL;
778 tb1 = tb2;
779 }
780 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 781
e3db7226 782 tb_phys_invalidate_count++;
9fa3e853
FB
783}
784
785static inline void set_bits(uint8_t *tab, int start, int len)
786{
787 int end, mask, end1;
788
789 end = start + len;
790 tab += start >> 3;
791 mask = 0xff << (start & 7);
792 if ((start & ~7) == (end & ~7)) {
793 if (start < end) {
794 mask &= ~(0xff << (end & 7));
795 *tab |= mask;
796 }
797 } else {
798 *tab++ |= mask;
799 start = (start + 8) & ~7;
800 end1 = end & ~7;
801 while (start < end1) {
802 *tab++ = 0xff;
803 start += 8;
804 }
805 if (start < end) {
806 mask = ~(0xff << (end & 7));
807 *tab |= mask;
808 }
809 }
810}
811
812static void build_page_bitmap(PageDesc *p)
813{
814 int n, tb_start, tb_end;
815 TranslationBlock *tb;
3b46e624 816
b2a7081a 817 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
818 if (!p->code_bitmap)
819 return;
9fa3e853
FB
820
821 tb = p->first_tb;
822 while (tb != NULL) {
823 n = (long)tb & 3;
824 tb = (TranslationBlock *)((long)tb & ~3);
825 /* NOTE: this is subtle as a TB may span two physical pages */
826 if (n == 0) {
827 /* NOTE: tb_end may be after the end of the page, but
828 it is not a problem */
829 tb_start = tb->pc & ~TARGET_PAGE_MASK;
830 tb_end = tb_start + tb->size;
831 if (tb_end > TARGET_PAGE_SIZE)
832 tb_end = TARGET_PAGE_SIZE;
833 } else {
834 tb_start = 0;
835 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
836 }
837 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
838 tb = tb->page_next[n];
839 }
840}
841
2e70f6ef
PB
842TranslationBlock *tb_gen_code(CPUState *env,
843 target_ulong pc, target_ulong cs_base,
844 int flags, int cflags)
d720b93d
FB
845{
846 TranslationBlock *tb;
847 uint8_t *tc_ptr;
848 target_ulong phys_pc, phys_page2, virt_page2;
849 int code_gen_size;
850
c27004ec
FB
851 phys_pc = get_phys_addr_code(env, pc);
852 tb = tb_alloc(pc);
d720b93d
FB
853 if (!tb) {
854 /* flush must be done */
855 tb_flush(env);
856 /* cannot fail at this point */
c27004ec 857 tb = tb_alloc(pc);
2e70f6ef
PB
858 /* Don't forget to invalidate previous TB info. */
859 tb_invalidated_flag = 1;
d720b93d
FB
860 }
861 tc_ptr = code_gen_ptr;
862 tb->tc_ptr = tc_ptr;
863 tb->cs_base = cs_base;
864 tb->flags = flags;
865 tb->cflags = cflags;
d07bde88 866 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 867 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 868
d720b93d 869 /* check next page if needed */
c27004ec 870 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 871 phys_page2 = -1;
c27004ec 872 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
873 phys_page2 = get_phys_addr_code(env, virt_page2);
874 }
875 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 876 return tb;
d720b93d 877}
3b46e624 878
9fa3e853
FB
879/* invalidate all TBs which intersect with the target physical page
880 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
881 the same physical page. 'is_cpu_write_access' should be true if called
882 from a real cpu write access: the virtual CPU will exit the current
883 TB if code is modified inside this TB. */
00f82b8a 884void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
885 int is_cpu_write_access)
886{
887 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 888 CPUState *env = cpu_single_env;
9fa3e853 889 PageDesc *p;
ea1c1802 890 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 891 target_ulong tb_start, tb_end;
d720b93d 892 target_ulong current_pc, current_cs_base;
9fa3e853
FB
893
894 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 895 if (!p)
9fa3e853 896 return;
5fafdf24 897 if (!p->code_bitmap &&
d720b93d
FB
898 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
899 is_cpu_write_access) {
9fa3e853
FB
900 /* build code bitmap */
901 build_page_bitmap(p);
902 }
903
904 /* we remove all the TBs in the range [start, end[ */
905 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
906 current_tb_not_found = is_cpu_write_access;
907 current_tb_modified = 0;
908 current_tb = NULL; /* avoid warning */
909 current_pc = 0; /* avoid warning */
910 current_cs_base = 0; /* avoid warning */
911 current_flags = 0; /* avoid warning */
9fa3e853
FB
912 tb = p->first_tb;
913 while (tb != NULL) {
914 n = (long)tb & 3;
915 tb = (TranslationBlock *)((long)tb & ~3);
916 tb_next = tb->page_next[n];
917 /* NOTE: this is subtle as a TB may span two physical pages */
918 if (n == 0) {
919 /* NOTE: tb_end may be after the end of the page, but
920 it is not a problem */
921 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
922 tb_end = tb_start + tb->size;
923 } else {
924 tb_start = tb->page_addr[1];
925 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
926 }
927 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
928#ifdef TARGET_HAS_PRECISE_SMC
929 if (current_tb_not_found) {
930 current_tb_not_found = 0;
931 current_tb = NULL;
2e70f6ef 932 if (env->mem_io_pc) {
d720b93d 933 /* now we have a real cpu fault */
2e70f6ef 934 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
935 }
936 }
937 if (current_tb == tb &&
2e70f6ef 938 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
939 /* If we are modifying the current TB, we must stop
940 its execution. We could be more precise by checking
941 that the modification is after the current PC, but it
942 would require a specialized function to partially
943 restore the CPU state */
3b46e624 944
d720b93d 945 current_tb_modified = 1;
5fafdf24 946 cpu_restore_state(current_tb, env,
2e70f6ef 947 env->mem_io_pc, NULL);
d720b93d
FB
948#if defined(TARGET_I386)
949 current_flags = env->hflags;
950 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
951 current_cs_base = (target_ulong)env->segs[R_CS].base;
952 current_pc = current_cs_base + env->eip;
953#else
954#error unsupported CPU
955#endif
956 }
957#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
958 /* we need to do that to handle the case where a signal
959 occurs while doing tb_phys_invalidate() */
960 saved_tb = NULL;
961 if (env) {
962 saved_tb = env->current_tb;
963 env->current_tb = NULL;
964 }
9fa3e853 965 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
966 if (env) {
967 env->current_tb = saved_tb;
968 if (env->interrupt_request && env->current_tb)
969 cpu_interrupt(env, env->interrupt_request);
970 }
9fa3e853
FB
971 }
972 tb = tb_next;
973 }
974#if !defined(CONFIG_USER_ONLY)
975 /* if no code remaining, no need to continue to use slow writes */
976 if (!p->first_tb) {
977 invalidate_page_bitmap(p);
d720b93d 978 if (is_cpu_write_access) {
2e70f6ef 979 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
980 }
981 }
982#endif
983#ifdef TARGET_HAS_PRECISE_SMC
984 if (current_tb_modified) {
985 /* we generate a block containing just the instruction
986 modifying the memory. It will ensure that it cannot modify
987 itself */
ea1c1802 988 env->current_tb = NULL;
2e70f6ef 989 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 990 cpu_resume_from_signal(env, NULL);
9fa3e853 991 }
fd6ce8f6 992#endif
9fa3e853 993}
fd6ce8f6 994
9fa3e853 995/* len must be <= 8 and start must be a multiple of len */
00f82b8a 996static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
997{
998 PageDesc *p;
999 int offset, b;
59817ccb 1000#if 0
a4193c8a
FB
1001 if (1) {
1002 if (loglevel) {
5fafdf24 1003 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 1004 cpu_single_env->mem_io_vaddr, len,
5fafdf24 1005 cpu_single_env->eip,
a4193c8a
FB
1006 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1007 }
59817ccb
FB
1008 }
1009#endif
9fa3e853 1010 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1011 if (!p)
9fa3e853
FB
1012 return;
1013 if (p->code_bitmap) {
1014 offset = start & ~TARGET_PAGE_MASK;
1015 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1016 if (b & ((1 << len) - 1))
1017 goto do_invalidate;
1018 } else {
1019 do_invalidate:
d720b93d 1020 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1021 }
1022}
1023
9fa3e853 1024#if !defined(CONFIG_SOFTMMU)
00f82b8a 1025static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1026 unsigned long pc, void *puc)
9fa3e853 1027{
d720b93d
FB
1028 int n, current_flags, current_tb_modified;
1029 target_ulong current_pc, current_cs_base;
9fa3e853 1030 PageDesc *p;
d720b93d
FB
1031 TranslationBlock *tb, *current_tb;
1032#ifdef TARGET_HAS_PRECISE_SMC
1033 CPUState *env = cpu_single_env;
1034#endif
9fa3e853
FB
1035
1036 addr &= TARGET_PAGE_MASK;
1037 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1038 if (!p)
9fa3e853
FB
1039 return;
1040 tb = p->first_tb;
d720b93d
FB
1041 current_tb_modified = 0;
1042 current_tb = NULL;
1043 current_pc = 0; /* avoid warning */
1044 current_cs_base = 0; /* avoid warning */
1045 current_flags = 0; /* avoid warning */
1046#ifdef TARGET_HAS_PRECISE_SMC
1047 if (tb && pc != 0) {
1048 current_tb = tb_find_pc(pc);
1049 }
1050#endif
9fa3e853
FB
1051 while (tb != NULL) {
1052 n = (long)tb & 3;
1053 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1054#ifdef TARGET_HAS_PRECISE_SMC
1055 if (current_tb == tb &&
2e70f6ef 1056 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1057 /* If we are modifying the current TB, we must stop
1058 its execution. We could be more precise by checking
1059 that the modification is after the current PC, but it
1060 would require a specialized function to partially
1061 restore the CPU state */
3b46e624 1062
d720b93d
FB
1063 current_tb_modified = 1;
1064 cpu_restore_state(current_tb, env, pc, puc);
1065#if defined(TARGET_I386)
1066 current_flags = env->hflags;
1067 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1068 current_cs_base = (target_ulong)env->segs[R_CS].base;
1069 current_pc = current_cs_base + env->eip;
1070#else
1071#error unsupported CPU
1072#endif
1073 }
1074#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1075 tb_phys_invalidate(tb, addr);
1076 tb = tb->page_next[n];
1077 }
fd6ce8f6 1078 p->first_tb = NULL;
d720b93d
FB
1079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (current_tb_modified) {
1081 /* we generate a block containing just the instruction
1082 modifying the memory. It will ensure that it cannot modify
1083 itself */
ea1c1802 1084 env->current_tb = NULL;
2e70f6ef 1085 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1086 cpu_resume_from_signal(env, puc);
1087 }
1088#endif
fd6ce8f6 1089}
9fa3e853 1090#endif
fd6ce8f6
FB
1091
1092/* add the tb in the target page and protect it if necessary */
5fafdf24 1093static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1094 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1095{
1096 PageDesc *p;
9fa3e853
FB
1097 TranslationBlock *last_first_tb;
1098
1099 tb->page_addr[n] = page_addr;
3a7d929e 1100 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1101 tb->page_next[n] = p->first_tb;
1102 last_first_tb = p->first_tb;
1103 p->first_tb = (TranslationBlock *)((long)tb | n);
1104 invalidate_page_bitmap(p);
fd6ce8f6 1105
107db443 1106#if defined(TARGET_HAS_SMC) || 1
d720b93d 1107
9fa3e853 1108#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1109 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1110 target_ulong addr;
1111 PageDesc *p2;
9fa3e853
FB
1112 int prot;
1113
fd6ce8f6
FB
1114 /* force the host page as non writable (writes will have a
1115 page fault + mprotect overhead) */
53a5960a 1116 page_addr &= qemu_host_page_mask;
fd6ce8f6 1117 prot = 0;
53a5960a
PB
1118 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1119 addr += TARGET_PAGE_SIZE) {
1120
1121 p2 = page_find (addr >> TARGET_PAGE_BITS);
1122 if (!p2)
1123 continue;
1124 prot |= p2->flags;
1125 p2->flags &= ~PAGE_WRITE;
1126 page_get_flags(addr);
1127 }
5fafdf24 1128 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1129 (prot & PAGE_BITS) & ~PAGE_WRITE);
1130#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1131 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1132 page_addr);
fd6ce8f6 1133#endif
fd6ce8f6 1134 }
9fa3e853
FB
1135#else
1136 /* if some code is already present, then the pages are already
1137 protected. So we handle the case where only the first TB is
1138 allocated in a physical page */
1139 if (!last_first_tb) {
6a00d601 1140 tlb_protect_code(page_addr);
9fa3e853
FB
1141 }
1142#endif
d720b93d
FB
1143
1144#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1145}
1146
1147/* Allocate a new translation block. Flush the translation buffer if
1148 too many translation blocks or too much generated code. */
c27004ec 1149TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1150{
1151 TranslationBlock *tb;
fd6ce8f6 1152
26a5f13b
FB
1153 if (nb_tbs >= code_gen_max_blocks ||
1154 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1155 return NULL;
fd6ce8f6
FB
1156 tb = &tbs[nb_tbs++];
1157 tb->pc = pc;
b448f2f3 1158 tb->cflags = 0;
d4e8164f
FB
1159 return tb;
1160}
1161
2e70f6ef
PB
1162void tb_free(TranslationBlock *tb)
1163{
bf20dc07 1164 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1165 Ignore the hard cases and just back up if this TB happens to
1166 be the last one generated. */
1167 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1168 code_gen_ptr = tb->tc_ptr;
1169 nb_tbs--;
1170 }
1171}
1172
9fa3e853
FB
1173/* add a new TB and link it to the physical page tables. phys_page2 is
1174 (-1) to indicate that only one page contains the TB. */
5fafdf24 1175void tb_link_phys(TranslationBlock *tb,
9fa3e853 1176 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1177{
9fa3e853
FB
1178 unsigned int h;
1179 TranslationBlock **ptb;
1180
c8a706fe
PB
1181 /* Grab the mmap lock to stop another thread invalidating this TB
1182 before we are done. */
1183 mmap_lock();
9fa3e853
FB
1184 /* add in the physical hash table */
1185 h = tb_phys_hash_func(phys_pc);
1186 ptb = &tb_phys_hash[h];
1187 tb->phys_hash_next = *ptb;
1188 *ptb = tb;
fd6ce8f6
FB
1189
1190 /* add in the page list */
9fa3e853
FB
1191 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1192 if (phys_page2 != -1)
1193 tb_alloc_page(tb, 1, phys_page2);
1194 else
1195 tb->page_addr[1] = -1;
9fa3e853 1196
d4e8164f
FB
1197 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1198 tb->jmp_next[0] = NULL;
1199 tb->jmp_next[1] = NULL;
1200
1201 /* init original jump addresses */
1202 if (tb->tb_next_offset[0] != 0xffff)
1203 tb_reset_jump(tb, 0);
1204 if (tb->tb_next_offset[1] != 0xffff)
1205 tb_reset_jump(tb, 1);
8a40a180
FB
1206
1207#ifdef DEBUG_TB_CHECK
1208 tb_page_check();
1209#endif
c8a706fe 1210 mmap_unlock();
fd6ce8f6
FB
1211}
1212
9fa3e853
FB
1213/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1214 tb[1].tc_ptr. Return NULL if not found */
1215TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1216{
9fa3e853
FB
1217 int m_min, m_max, m;
1218 unsigned long v;
1219 TranslationBlock *tb;
a513fe19
FB
1220
1221 if (nb_tbs <= 0)
1222 return NULL;
1223 if (tc_ptr < (unsigned long)code_gen_buffer ||
1224 tc_ptr >= (unsigned long)code_gen_ptr)
1225 return NULL;
1226 /* binary search (cf Knuth) */
1227 m_min = 0;
1228 m_max = nb_tbs - 1;
1229 while (m_min <= m_max) {
1230 m = (m_min + m_max) >> 1;
1231 tb = &tbs[m];
1232 v = (unsigned long)tb->tc_ptr;
1233 if (v == tc_ptr)
1234 return tb;
1235 else if (tc_ptr < v) {
1236 m_max = m - 1;
1237 } else {
1238 m_min = m + 1;
1239 }
5fafdf24 1240 }
a513fe19
FB
1241 return &tbs[m_max];
1242}
7501267e 1243
ea041c0e
FB
1244static void tb_reset_jump_recursive(TranslationBlock *tb);
1245
1246static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1247{
1248 TranslationBlock *tb1, *tb_next, **ptb;
1249 unsigned int n1;
1250
1251 tb1 = tb->jmp_next[n];
1252 if (tb1 != NULL) {
1253 /* find head of list */
1254 for(;;) {
1255 n1 = (long)tb1 & 3;
1256 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1257 if (n1 == 2)
1258 break;
1259 tb1 = tb1->jmp_next[n1];
1260 }
1261 /* we are now sure now that tb jumps to tb1 */
1262 tb_next = tb1;
1263
1264 /* remove tb from the jmp_first list */
1265 ptb = &tb_next->jmp_first;
1266 for(;;) {
1267 tb1 = *ptb;
1268 n1 = (long)tb1 & 3;
1269 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1270 if (n1 == n && tb1 == tb)
1271 break;
1272 ptb = &tb1->jmp_next[n1];
1273 }
1274 *ptb = tb->jmp_next[n];
1275 tb->jmp_next[n] = NULL;
3b46e624 1276
ea041c0e
FB
1277 /* suppress the jump to next tb in generated code */
1278 tb_reset_jump(tb, n);
1279
0124311e 1280 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1281 tb_reset_jump_recursive(tb_next);
1282 }
1283}
1284
1285static void tb_reset_jump_recursive(TranslationBlock *tb)
1286{
1287 tb_reset_jump_recursive2(tb, 0);
1288 tb_reset_jump_recursive2(tb, 1);
1289}
1290
1fddef4b 1291#if defined(TARGET_HAS_ICE)
d720b93d
FB
1292static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1293{
9b3c35e0
JM
1294 target_phys_addr_t addr;
1295 target_ulong pd;
c2f07f81
PB
1296 ram_addr_t ram_addr;
1297 PhysPageDesc *p;
d720b93d 1298
c2f07f81
PB
1299 addr = cpu_get_phys_page_debug(env, pc);
1300 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1301 if (!p) {
1302 pd = IO_MEM_UNASSIGNED;
1303 } else {
1304 pd = p->phys_offset;
1305 }
1306 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1307 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1308}
c27004ec 1309#endif
d720b93d 1310
6658ffb8 1311/* Add a watchpoint. */
0f459d16 1312int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1313{
1314 int i;
1315
1316 for (i = 0; i < env->nb_watchpoints; i++) {
1317 if (addr == env->watchpoint[i].vaddr)
1318 return 0;
1319 }
1320 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1321 return -1;
1322
1323 i = env->nb_watchpoints++;
1324 env->watchpoint[i].vaddr = addr;
0f459d16 1325 env->watchpoint[i].type = type;
6658ffb8
PB
1326 tlb_flush_page(env, addr);
1327 /* FIXME: This flush is needed because of the hack to make memory ops
1328 terminate the TB. It can be removed once the proper IO trap and
1329 re-execute bits are in. */
1330 tb_flush(env);
1331 return i;
1332}
1333
1334/* Remove a watchpoint. */
1335int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1336{
1337 int i;
1338
1339 for (i = 0; i < env->nb_watchpoints; i++) {
1340 if (addr == env->watchpoint[i].vaddr) {
1341 env->nb_watchpoints--;
1342 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1343 tlb_flush_page(env, addr);
1344 return 0;
1345 }
1346 }
1347 return -1;
1348}
1349
7d03f82f
EI
1350/* Remove all watchpoints. */
1351void cpu_watchpoint_remove_all(CPUState *env) {
1352 int i;
1353
1354 for (i = 0; i < env->nb_watchpoints; i++) {
1355 tlb_flush_page(env, env->watchpoint[i].vaddr);
1356 }
1357 env->nb_watchpoints = 0;
1358}
1359
c33a346e
FB
1360/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1361 breakpoint is reached */
2e12669a 1362int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1363{
1fddef4b 1364#if defined(TARGET_HAS_ICE)
4c3a88a2 1365 int i;
3b46e624 1366
4c3a88a2
FB
1367 for(i = 0; i < env->nb_breakpoints; i++) {
1368 if (env->breakpoints[i] == pc)
1369 return 0;
1370 }
1371
1372 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1373 return -1;
1374 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1375
d720b93d 1376 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1377 return 0;
1378#else
1379 return -1;
1380#endif
1381}
1382
7d03f82f
EI
1383/* remove all breakpoints */
1384void cpu_breakpoint_remove_all(CPUState *env) {
1385#if defined(TARGET_HAS_ICE)
1386 int i;
1387 for(i = 0; i < env->nb_breakpoints; i++) {
1388 breakpoint_invalidate(env, env->breakpoints[i]);
1389 }
1390 env->nb_breakpoints = 0;
1391#endif
1392}
1393
4c3a88a2 1394/* remove a breakpoint */
2e12669a 1395int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1396{
1fddef4b 1397#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1398 int i;
1399 for(i = 0; i < env->nb_breakpoints; i++) {
1400 if (env->breakpoints[i] == pc)
1401 goto found;
1402 }
1403 return -1;
1404 found:
4c3a88a2 1405 env->nb_breakpoints--;
1fddef4b
FB
1406 if (i < env->nb_breakpoints)
1407 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1408
1409 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1410 return 0;
1411#else
1412 return -1;
1413#endif
1414}
1415
c33a346e
FB
1416/* enable or disable single step mode. EXCP_DEBUG is returned by the
1417 CPU loop after each instruction */
1418void cpu_single_step(CPUState *env, int enabled)
1419{
1fddef4b 1420#if defined(TARGET_HAS_ICE)
c33a346e
FB
1421 if (env->singlestep_enabled != enabled) {
1422 env->singlestep_enabled = enabled;
1423 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1424 /* XXX: only flush what is necessary */
0124311e 1425 tb_flush(env);
c33a346e
FB
1426 }
1427#endif
1428}
1429
34865134
FB
1430/* enable or disable low levels log */
1431void cpu_set_log(int log_flags)
1432{
1433 loglevel = log_flags;
1434 if (loglevel && !logfile) {
11fcfab4 1435 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1436 if (!logfile) {
1437 perror(logfilename);
1438 _exit(1);
1439 }
9fa3e853
FB
1440#if !defined(CONFIG_SOFTMMU)
1441 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1442 {
b55266b5 1443 static char logfile_buf[4096];
9fa3e853
FB
1444 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1445 }
1446#else
34865134 1447 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1448#endif
e735b91c
PB
1449 log_append = 1;
1450 }
1451 if (!loglevel && logfile) {
1452 fclose(logfile);
1453 logfile = NULL;
34865134
FB
1454 }
1455}
1456
1457void cpu_set_log_filename(const char *filename)
1458{
1459 logfilename = strdup(filename);
e735b91c
PB
1460 if (logfile) {
1461 fclose(logfile);
1462 logfile = NULL;
1463 }
1464 cpu_set_log(loglevel);
34865134 1465}
c33a346e 1466
0124311e 1467/* mask must never be zero, except for A20 change call */
68a79315 1468void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1469{
d5975363 1470#if !defined(USE_NPTL)
ea041c0e 1471 TranslationBlock *tb;
15a51156 1472 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1473#endif
2e70f6ef 1474 int old_mask;
59817ccb 1475
2e70f6ef 1476 old_mask = env->interrupt_request;
d5975363 1477 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1478 be in the middle of a read-modify-write operation. */
68a79315 1479 env->interrupt_request |= mask;
d5975363
PB
1480#if defined(USE_NPTL)
1481 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1482 problem and hope the cpu will stop of its own accord. For userspace
1483 emulation this often isn't actually as bad as it sounds. Often
1484 signals are used primarily to interrupt blocking syscalls. */
1485#else
2e70f6ef 1486 if (use_icount) {
266910c4 1487 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1488#ifndef CONFIG_USER_ONLY
1489 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1490 an async event happened and we need to process it. */
1491 if (!can_do_io(env)
1492 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1493 cpu_abort(env, "Raised interrupt while not in I/O function");
1494 }
1495#endif
1496 } else {
1497 tb = env->current_tb;
1498 /* if the cpu is currently executing code, we must unlink it and
1499 all the potentially executing TB */
1500 if (tb && !testandset(&interrupt_lock)) {
1501 env->current_tb = NULL;
1502 tb_reset_jump_recursive(tb);
1503 resetlock(&interrupt_lock);
1504 }
ea041c0e 1505 }
d5975363 1506#endif
ea041c0e
FB
1507}
1508
b54ad049
FB
1509void cpu_reset_interrupt(CPUState *env, int mask)
1510{
1511 env->interrupt_request &= ~mask;
1512}
1513
c7cd6a37 1514const CPULogItem cpu_log_items[] = {
5fafdf24 1515 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1516 "show generated host assembly code for each compiled TB" },
1517 { CPU_LOG_TB_IN_ASM, "in_asm",
1518 "show target assembly code for each compiled TB" },
5fafdf24 1519 { CPU_LOG_TB_OP, "op",
57fec1fe 1520 "show micro ops for each compiled TB" },
f193c797 1521 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1522 "show micro ops "
1523#ifdef TARGET_I386
1524 "before eflags optimization and "
f193c797 1525#endif
e01a1157 1526 "after liveness analysis" },
f193c797
FB
1527 { CPU_LOG_INT, "int",
1528 "show interrupts/exceptions in short format" },
1529 { CPU_LOG_EXEC, "exec",
1530 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1531 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1532 "show CPU state before block translation" },
f193c797
FB
1533#ifdef TARGET_I386
1534 { CPU_LOG_PCALL, "pcall",
1535 "show protected mode far calls/returns/exceptions" },
1536#endif
8e3a9fd2 1537#ifdef DEBUG_IOPORT
fd872598
FB
1538 { CPU_LOG_IOPORT, "ioport",
1539 "show all i/o ports accesses" },
8e3a9fd2 1540#endif
f193c797
FB
1541 { 0, NULL, NULL },
1542};
1543
1544static int cmp1(const char *s1, int n, const char *s2)
1545{
1546 if (strlen(s2) != n)
1547 return 0;
1548 return memcmp(s1, s2, n) == 0;
1549}
3b46e624 1550
f193c797
FB
1551/* takes a comma separated list of log masks. Return 0 if error. */
1552int cpu_str_to_log_mask(const char *str)
1553{
c7cd6a37 1554 const CPULogItem *item;
f193c797
FB
1555 int mask;
1556 const char *p, *p1;
1557
1558 p = str;
1559 mask = 0;
1560 for(;;) {
1561 p1 = strchr(p, ',');
1562 if (!p1)
1563 p1 = p + strlen(p);
8e3a9fd2
FB
1564 if(cmp1(p,p1-p,"all")) {
1565 for(item = cpu_log_items; item->mask != 0; item++) {
1566 mask |= item->mask;
1567 }
1568 } else {
f193c797
FB
1569 for(item = cpu_log_items; item->mask != 0; item++) {
1570 if (cmp1(p, p1 - p, item->name))
1571 goto found;
1572 }
1573 return 0;
8e3a9fd2 1574 }
f193c797
FB
1575 found:
1576 mask |= item->mask;
1577 if (*p1 != ',')
1578 break;
1579 p = p1 + 1;
1580 }
1581 return mask;
1582}
ea041c0e 1583
7501267e
FB
1584void cpu_abort(CPUState *env, const char *fmt, ...)
1585{
1586 va_list ap;
493ae1f0 1587 va_list ap2;
7501267e
FB
1588
1589 va_start(ap, fmt);
493ae1f0 1590 va_copy(ap2, ap);
7501267e
FB
1591 fprintf(stderr, "qemu: fatal: ");
1592 vfprintf(stderr, fmt, ap);
1593 fprintf(stderr, "\n");
1594#ifdef TARGET_I386
7fe48483
FB
1595 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1596#else
1597 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1598#endif
924edcae 1599 if (logfile) {
f9373291 1600 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1601 vfprintf(logfile, fmt, ap2);
f9373291
JM
1602 fprintf(logfile, "\n");
1603#ifdef TARGET_I386
1604 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1605#else
1606 cpu_dump_state(env, logfile, fprintf, 0);
1607#endif
924edcae
AZ
1608 fflush(logfile);
1609 fclose(logfile);
1610 }
493ae1f0 1611 va_end(ap2);
f9373291 1612 va_end(ap);
7501267e
FB
1613 abort();
1614}
1615
c5be9f08
TS
1616CPUState *cpu_copy(CPUState *env)
1617{
01ba9816 1618 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1619 /* preserve chaining and index */
1620 CPUState *next_cpu = new_env->next_cpu;
1621 int cpu_index = new_env->cpu_index;
1622 memcpy(new_env, env, sizeof(CPUState));
1623 new_env->next_cpu = next_cpu;
1624 new_env->cpu_index = cpu_index;
1625 return new_env;
1626}
1627
0124311e
FB
1628#if !defined(CONFIG_USER_ONLY)
1629
5c751e99
EI
1630static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1631{
1632 unsigned int i;
1633
1634 /* Discard jump cache entries for any tb which might potentially
1635 overlap the flushed page. */
1636 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1637 memset (&env->tb_jmp_cache[i], 0,
1638 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1639
1640 i = tb_jmp_cache_hash_page(addr);
1641 memset (&env->tb_jmp_cache[i], 0,
1642 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1643}
1644
ee8b7021
FB
1645/* NOTE: if flush_global is true, also flush global entries (not
1646 implemented yet) */
1647void tlb_flush(CPUState *env, int flush_global)
33417e70 1648{
33417e70 1649 int i;
0124311e 1650
9fa3e853
FB
1651#if defined(DEBUG_TLB)
1652 printf("tlb_flush:\n");
1653#endif
0124311e
FB
1654 /* must reset current TB so that interrupts cannot modify the
1655 links while we are modifying them */
1656 env->current_tb = NULL;
1657
33417e70 1658 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1659 env->tlb_table[0][i].addr_read = -1;
1660 env->tlb_table[0][i].addr_write = -1;
1661 env->tlb_table[0][i].addr_code = -1;
1662 env->tlb_table[1][i].addr_read = -1;
1663 env->tlb_table[1][i].addr_write = -1;
1664 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1665#if (NB_MMU_MODES >= 3)
1666 env->tlb_table[2][i].addr_read = -1;
1667 env->tlb_table[2][i].addr_write = -1;
1668 env->tlb_table[2][i].addr_code = -1;
1669#if (NB_MMU_MODES == 4)
1670 env->tlb_table[3][i].addr_read = -1;
1671 env->tlb_table[3][i].addr_write = -1;
1672 env->tlb_table[3][i].addr_code = -1;
1673#endif
1674#endif
33417e70 1675 }
9fa3e853 1676
8a40a180 1677 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1678
0a962c02
FB
1679#ifdef USE_KQEMU
1680 if (env->kqemu_enabled) {
1681 kqemu_flush(env, flush_global);
1682 }
9fa3e853 1683#endif
e3db7226 1684 tlb_flush_count++;
33417e70
FB
1685}
1686
274da6b2 1687static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1688{
5fafdf24 1689 if (addr == (tlb_entry->addr_read &
84b7b8e7 1690 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1691 addr == (tlb_entry->addr_write &
84b7b8e7 1692 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1693 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1694 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1695 tlb_entry->addr_read = -1;
1696 tlb_entry->addr_write = -1;
1697 tlb_entry->addr_code = -1;
1698 }
61382a50
FB
1699}
1700
2e12669a 1701void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1702{
8a40a180 1703 int i;
0124311e 1704
9fa3e853 1705#if defined(DEBUG_TLB)
108c49b8 1706 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1707#endif
0124311e
FB
1708 /* must reset current TB so that interrupts cannot modify the
1709 links while we are modifying them */
1710 env->current_tb = NULL;
61382a50
FB
1711
1712 addr &= TARGET_PAGE_MASK;
1713 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1714 tlb_flush_entry(&env->tlb_table[0][i], addr);
1715 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1716#if (NB_MMU_MODES >= 3)
1717 tlb_flush_entry(&env->tlb_table[2][i], addr);
1718#if (NB_MMU_MODES == 4)
1719 tlb_flush_entry(&env->tlb_table[3][i], addr);
1720#endif
1721#endif
0124311e 1722
5c751e99 1723 tlb_flush_jmp_cache(env, addr);
9fa3e853 1724
0a962c02
FB
1725#ifdef USE_KQEMU
1726 if (env->kqemu_enabled) {
1727 kqemu_flush_page(env, addr);
1728 }
1729#endif
9fa3e853
FB
1730}
1731
9fa3e853
FB
1732/* update the TLBs so that writes to code in the virtual page 'addr'
1733 can be detected */
6a00d601 1734static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1735{
5fafdf24 1736 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1737 ram_addr + TARGET_PAGE_SIZE,
1738 CODE_DIRTY_FLAG);
9fa3e853
FB
1739}
1740
9fa3e853 1741/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1742 tested for self modifying code */
5fafdf24 1743static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1744 target_ulong vaddr)
9fa3e853 1745{
3a7d929e 1746 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1747}
1748
5fafdf24 1749static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1750 unsigned long start, unsigned long length)
1751{
1752 unsigned long addr;
84b7b8e7
FB
1753 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1754 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1755 if ((addr - start) < length) {
0f459d16 1756 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1757 }
1758 }
1759}
1760
3a7d929e 1761void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1762 int dirty_flags)
1ccde1cb
FB
1763{
1764 CPUState *env;
4f2ac237 1765 unsigned long length, start1;
0a962c02
FB
1766 int i, mask, len;
1767 uint8_t *p;
1ccde1cb
FB
1768
1769 start &= TARGET_PAGE_MASK;
1770 end = TARGET_PAGE_ALIGN(end);
1771
1772 length = end - start;
1773 if (length == 0)
1774 return;
0a962c02 1775 len = length >> TARGET_PAGE_BITS;
3a7d929e 1776#ifdef USE_KQEMU
6a00d601
FB
1777 /* XXX: should not depend on cpu context */
1778 env = first_cpu;
3a7d929e 1779 if (env->kqemu_enabled) {
f23db169
FB
1780 ram_addr_t addr;
1781 addr = start;
1782 for(i = 0; i < len; i++) {
1783 kqemu_set_notdirty(env, addr);
1784 addr += TARGET_PAGE_SIZE;
1785 }
3a7d929e
FB
1786 }
1787#endif
f23db169
FB
1788 mask = ~dirty_flags;
1789 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1790 for(i = 0; i < len; i++)
1791 p[i] &= mask;
1792
1ccde1cb
FB
1793 /* we modify the TLB cache so that the dirty bit will be set again
1794 when accessing the range */
59817ccb 1795 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1796 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1797 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1798 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1799 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1800 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1801#if (NB_MMU_MODES >= 3)
1802 for(i = 0; i < CPU_TLB_SIZE; i++)
1803 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1804#if (NB_MMU_MODES == 4)
1805 for(i = 0; i < CPU_TLB_SIZE; i++)
1806 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1807#endif
1808#endif
6a00d601 1809 }
1ccde1cb
FB
1810}
1811
3a7d929e
FB
1812static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1813{
1814 ram_addr_t ram_addr;
1815
84b7b8e7 1816 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1817 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1818 tlb_entry->addend - (unsigned long)phys_ram_base;
1819 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1820 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1821 }
1822 }
1823}
1824
1825/* update the TLB according to the current state of the dirty bits */
1826void cpu_tlb_update_dirty(CPUState *env)
1827{
1828 int i;
1829 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1830 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1831 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1832 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1833#if (NB_MMU_MODES >= 3)
1834 for(i = 0; i < CPU_TLB_SIZE; i++)
1835 tlb_update_dirty(&env->tlb_table[2][i]);
1836#if (NB_MMU_MODES == 4)
1837 for(i = 0; i < CPU_TLB_SIZE; i++)
1838 tlb_update_dirty(&env->tlb_table[3][i]);
1839#endif
1840#endif
3a7d929e
FB
1841}
1842
0f459d16 1843static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1844{
0f459d16
PB
1845 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1846 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1847}
1848
0f459d16
PB
1849/* update the TLB corresponding to virtual page vaddr
1850 so that it is no longer dirty */
1851static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1852{
1ccde1cb
FB
1853 int i;
1854
0f459d16 1855 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1856 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1857 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1858 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1859#if (NB_MMU_MODES >= 3)
0f459d16 1860 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1861#if (NB_MMU_MODES == 4)
0f459d16 1862 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1863#endif
1864#endif
9fa3e853
FB
1865}
1866
59817ccb
FB
1867/* add a new TLB entry. At most one entry for a given virtual address
1868 is permitted. Return 0 if OK or 2 if the page could not be mapped
1869 (can only happen in non SOFTMMU mode for I/O pages or pages
1870 conflicting with the host address space). */
5fafdf24
TS
1871int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1872 target_phys_addr_t paddr, int prot,
6ebbf390 1873 int mmu_idx, int is_softmmu)
9fa3e853 1874{
92e873b9 1875 PhysPageDesc *p;
4f2ac237 1876 unsigned long pd;
9fa3e853 1877 unsigned int index;
4f2ac237 1878 target_ulong address;
0f459d16 1879 target_ulong code_address;
108c49b8 1880 target_phys_addr_t addend;
9fa3e853 1881 int ret;
84b7b8e7 1882 CPUTLBEntry *te;
6658ffb8 1883 int i;
0f459d16 1884 target_phys_addr_t iotlb;
9fa3e853 1885
92e873b9 1886 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1887 if (!p) {
1888 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1889 } else {
1890 pd = p->phys_offset;
9fa3e853
FB
1891 }
1892#if defined(DEBUG_TLB)
6ebbf390
JM
1893 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1894 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1895#endif
1896
1897 ret = 0;
0f459d16
PB
1898 address = vaddr;
1899 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1900 /* IO memory case (romd handled later) */
1901 address |= TLB_MMIO;
1902 }
1903 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1904 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1905 /* Normal RAM. */
1906 iotlb = pd & TARGET_PAGE_MASK;
1907 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1908 iotlb |= IO_MEM_NOTDIRTY;
1909 else
1910 iotlb |= IO_MEM_ROM;
1911 } else {
1912 /* IO handlers are currently passed a phsical address.
1913 It would be nice to pass an offset from the base address
1914 of that region. This would avoid having to special case RAM,
1915 and avoid full address decoding in every device.
1916 We can't use the high bits of pd for this because
1917 IO_MEM_ROMD uses these as a ram address. */
1918 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1919 }
1920
1921 code_address = address;
1922 /* Make accesses to pages with watchpoints go via the
1923 watchpoint trap routines. */
1924 for (i = 0; i < env->nb_watchpoints; i++) {
1925 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1926 iotlb = io_mem_watch + paddr;
1927 /* TODO: The memory case can be optimized by not trapping
1928 reads of pages with a write breakpoint. */
1929 address |= TLB_MMIO;
6658ffb8 1930 }
0f459d16 1931 }
d79acba4 1932
0f459d16
PB
1933 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1934 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1935 te = &env->tlb_table[mmu_idx][index];
1936 te->addend = addend - vaddr;
1937 if (prot & PAGE_READ) {
1938 te->addr_read = address;
1939 } else {
1940 te->addr_read = -1;
1941 }
5c751e99 1942
0f459d16
PB
1943 if (prot & PAGE_EXEC) {
1944 te->addr_code = code_address;
1945 } else {
1946 te->addr_code = -1;
1947 }
1948 if (prot & PAGE_WRITE) {
1949 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1950 (pd & IO_MEM_ROMD)) {
1951 /* Write access calls the I/O callback. */
1952 te->addr_write = address | TLB_MMIO;
1953 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1954 !cpu_physical_memory_is_dirty(pd)) {
1955 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1956 } else {
0f459d16 1957 te->addr_write = address;
9fa3e853 1958 }
0f459d16
PB
1959 } else {
1960 te->addr_write = -1;
9fa3e853 1961 }
9fa3e853
FB
1962 return ret;
1963}
1964
0124311e
FB
1965#else
1966
ee8b7021 1967void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1968{
1969}
1970
2e12669a 1971void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1972{
1973}
1974
5fafdf24
TS
1975int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1976 target_phys_addr_t paddr, int prot,
6ebbf390 1977 int mmu_idx, int is_softmmu)
9fa3e853
FB
1978{
1979 return 0;
1980}
0124311e 1981
9fa3e853
FB
1982/* dump memory mappings */
1983void page_dump(FILE *f)
33417e70 1984{
9fa3e853
FB
1985 unsigned long start, end;
1986 int i, j, prot, prot1;
1987 PageDesc *p;
33417e70 1988
9fa3e853
FB
1989 fprintf(f, "%-8s %-8s %-8s %s\n",
1990 "start", "end", "size", "prot");
1991 start = -1;
1992 end = -1;
1993 prot = 0;
1994 for(i = 0; i <= L1_SIZE; i++) {
1995 if (i < L1_SIZE)
1996 p = l1_map[i];
1997 else
1998 p = NULL;
1999 for(j = 0;j < L2_SIZE; j++) {
2000 if (!p)
2001 prot1 = 0;
2002 else
2003 prot1 = p[j].flags;
2004 if (prot1 != prot) {
2005 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2006 if (start != -1) {
2007 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2008 start, end, end - start,
9fa3e853
FB
2009 prot & PAGE_READ ? 'r' : '-',
2010 prot & PAGE_WRITE ? 'w' : '-',
2011 prot & PAGE_EXEC ? 'x' : '-');
2012 }
2013 if (prot1 != 0)
2014 start = end;
2015 else
2016 start = -1;
2017 prot = prot1;
2018 }
2019 if (!p)
2020 break;
2021 }
33417e70 2022 }
33417e70
FB
2023}
2024
53a5960a 2025int page_get_flags(target_ulong address)
33417e70 2026{
9fa3e853
FB
2027 PageDesc *p;
2028
2029 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2030 if (!p)
9fa3e853
FB
2031 return 0;
2032 return p->flags;
2033}
2034
2035/* modify the flags of a page and invalidate the code if
2036 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2037 depending on PAGE_WRITE */
53a5960a 2038void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2039{
2040 PageDesc *p;
53a5960a 2041 target_ulong addr;
9fa3e853 2042
c8a706fe 2043 /* mmap_lock should already be held. */
9fa3e853
FB
2044 start = start & TARGET_PAGE_MASK;
2045 end = TARGET_PAGE_ALIGN(end);
2046 if (flags & PAGE_WRITE)
2047 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2048 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2049 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2050 /* We may be called for host regions that are outside guest
2051 address space. */
2052 if (!p)
2053 return;
9fa3e853
FB
2054 /* if the write protection is set, then we invalidate the code
2055 inside */
5fafdf24 2056 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2057 (flags & PAGE_WRITE) &&
2058 p->first_tb) {
d720b93d 2059 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2060 }
2061 p->flags = flags;
2062 }
33417e70
FB
2063}
2064
3d97b40b
TS
2065int page_check_range(target_ulong start, target_ulong len, int flags)
2066{
2067 PageDesc *p;
2068 target_ulong end;
2069 target_ulong addr;
2070
2071 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2072 start = start & TARGET_PAGE_MASK;
2073
2074 if( end < start )
2075 /* we've wrapped around */
2076 return -1;
2077 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2078 p = page_find(addr >> TARGET_PAGE_BITS);
2079 if( !p )
2080 return -1;
2081 if( !(p->flags & PAGE_VALID) )
2082 return -1;
2083
dae3270c 2084 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2085 return -1;
dae3270c
FB
2086 if (flags & PAGE_WRITE) {
2087 if (!(p->flags & PAGE_WRITE_ORG))
2088 return -1;
2089 /* unprotect the page if it was put read-only because it
2090 contains translated code */
2091 if (!(p->flags & PAGE_WRITE)) {
2092 if (!page_unprotect(addr, 0, NULL))
2093 return -1;
2094 }
2095 return 0;
2096 }
3d97b40b
TS
2097 }
2098 return 0;
2099}
2100
9fa3e853
FB
2101/* called from signal handler: invalidate the code and unprotect the
2102 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2103int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2104{
2105 unsigned int page_index, prot, pindex;
2106 PageDesc *p, *p1;
53a5960a 2107 target_ulong host_start, host_end, addr;
9fa3e853 2108
c8a706fe
PB
2109 /* Technically this isn't safe inside a signal handler. However we
2110 know this only ever happens in a synchronous SEGV handler, so in
2111 practice it seems to be ok. */
2112 mmap_lock();
2113
83fb7adf 2114 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2115 page_index = host_start >> TARGET_PAGE_BITS;
2116 p1 = page_find(page_index);
c8a706fe
PB
2117 if (!p1) {
2118 mmap_unlock();
9fa3e853 2119 return 0;
c8a706fe 2120 }
83fb7adf 2121 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2122 p = p1;
2123 prot = 0;
2124 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2125 prot |= p->flags;
2126 p++;
2127 }
2128 /* if the page was really writable, then we change its
2129 protection back to writable */
2130 if (prot & PAGE_WRITE_ORG) {
2131 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2132 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2133 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2134 (prot & PAGE_BITS) | PAGE_WRITE);
2135 p1[pindex].flags |= PAGE_WRITE;
2136 /* and since the content will be modified, we must invalidate
2137 the corresponding translated code. */
d720b93d 2138 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2139#ifdef DEBUG_TB_CHECK
2140 tb_invalidate_check(address);
2141#endif
c8a706fe 2142 mmap_unlock();
9fa3e853
FB
2143 return 1;
2144 }
2145 }
c8a706fe 2146 mmap_unlock();
9fa3e853
FB
2147 return 0;
2148}
2149
6a00d601
FB
2150static inline void tlb_set_dirty(CPUState *env,
2151 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2152{
2153}
9fa3e853
FB
2154#endif /* defined(CONFIG_USER_ONLY) */
2155
e2eef170 2156#if !defined(CONFIG_USER_ONLY)
db7b5426 2157static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2158 ram_addr_t memory);
2159static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2160 ram_addr_t orig_memory);
db7b5426
BS
2161#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2162 need_subpage) \
2163 do { \
2164 if (addr > start_addr) \
2165 start_addr2 = 0; \
2166 else { \
2167 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2168 if (start_addr2 > 0) \
2169 need_subpage = 1; \
2170 } \
2171 \
49e9fba2 2172 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2173 end_addr2 = TARGET_PAGE_SIZE - 1; \
2174 else { \
2175 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2176 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2177 need_subpage = 1; \
2178 } \
2179 } while (0)
2180
33417e70
FB
2181/* register physical memory. 'size' must be a multiple of the target
2182 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2183 io memory page */
5fafdf24 2184void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2185 ram_addr_t size,
2186 ram_addr_t phys_offset)
33417e70 2187{
108c49b8 2188 target_phys_addr_t addr, end_addr;
92e873b9 2189 PhysPageDesc *p;
9d42037b 2190 CPUState *env;
00f82b8a 2191 ram_addr_t orig_size = size;
db7b5426 2192 void *subpage;
33417e70 2193
da260249
FB
2194#ifdef USE_KQEMU
2195 /* XXX: should not depend on cpu context */
2196 env = first_cpu;
2197 if (env->kqemu_enabled) {
2198 kqemu_set_phys_mem(start_addr, size, phys_offset);
2199 }
2200#endif
5fd386f6 2201 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2202 end_addr = start_addr + (target_phys_addr_t)size;
2203 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2204 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2205 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2206 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2207 target_phys_addr_t start_addr2, end_addr2;
2208 int need_subpage = 0;
2209
2210 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2211 need_subpage);
4254fab8 2212 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2213 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2214 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2215 &p->phys_offset, orig_memory);
2216 } else {
2217 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2218 >> IO_MEM_SHIFT];
2219 }
2220 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2221 } else {
2222 p->phys_offset = phys_offset;
2223 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2224 (phys_offset & IO_MEM_ROMD))
2225 phys_offset += TARGET_PAGE_SIZE;
2226 }
2227 } else {
2228 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2229 p->phys_offset = phys_offset;
2230 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2231 (phys_offset & IO_MEM_ROMD))
2232 phys_offset += TARGET_PAGE_SIZE;
2233 else {
2234 target_phys_addr_t start_addr2, end_addr2;
2235 int need_subpage = 0;
2236
2237 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2238 end_addr2, need_subpage);
2239
4254fab8 2240 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2241 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2242 &p->phys_offset, IO_MEM_UNASSIGNED);
2243 subpage_register(subpage, start_addr2, end_addr2,
2244 phys_offset);
2245 }
2246 }
2247 }
33417e70 2248 }
3b46e624 2249
9d42037b
FB
2250 /* since each CPU stores ram addresses in its TLB cache, we must
2251 reset the modified entries */
2252 /* XXX: slow ! */
2253 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2254 tlb_flush(env, 1);
2255 }
33417e70
FB
2256}
2257
ba863458 2258/* XXX: temporary until new memory mapping API */
00f82b8a 2259ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2260{
2261 PhysPageDesc *p;
2262
2263 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2264 if (!p)
2265 return IO_MEM_UNASSIGNED;
2266 return p->phys_offset;
2267}
2268
e9a1ab19 2269/* XXX: better than nothing */
00f82b8a 2270ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2271{
2272 ram_addr_t addr;
7fb4fdcf 2273 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2274 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2275 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2276 abort();
2277 }
2278 addr = phys_ram_alloc_offset;
2279 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2280 return addr;
2281}
2282
2283void qemu_ram_free(ram_addr_t addr)
2284{
2285}
2286
a4193c8a 2287static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2288{
67d3b957 2289#ifdef DEBUG_UNASSIGNED
ab3d1727 2290 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2291#endif
2292#ifdef TARGET_SPARC
6c36d3fa 2293 do_unassigned_access(addr, 0, 0, 0);
eb38c52c 2294#elif defined(TARGET_CRIS)
f1ccf904 2295 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2296#endif
33417e70
FB
2297 return 0;
2298}
2299
a4193c8a 2300static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2301{
67d3b957 2302#ifdef DEBUG_UNASSIGNED
ab3d1727 2303 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2304#endif
b4f0a316 2305#ifdef TARGET_SPARC
6c36d3fa 2306 do_unassigned_access(addr, 1, 0, 0);
eb38c52c 2307#elif defined(TARGET_CRIS)
f1ccf904 2308 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2309#endif
33417e70
FB
2310}
2311
2312static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2313 unassigned_mem_readb,
2314 unassigned_mem_readb,
2315 unassigned_mem_readb,
2316};
2317
2318static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2319 unassigned_mem_writeb,
2320 unassigned_mem_writeb,
2321 unassigned_mem_writeb,
2322};
2323
0f459d16
PB
2324static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2325 uint32_t val)
9fa3e853 2326{
3a7d929e 2327 int dirty_flags;
3a7d929e
FB
2328 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2329 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2330#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2331 tb_invalidate_phys_page_fast(ram_addr, 1);
2332 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2333#endif
3a7d929e 2334 }
0f459d16 2335 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2336#ifdef USE_KQEMU
2337 if (cpu_single_env->kqemu_enabled &&
2338 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2339 kqemu_modify_page(cpu_single_env, ram_addr);
2340#endif
f23db169
FB
2341 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2342 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2343 /* we remove the notdirty callback only if the code has been
2344 flushed */
2345 if (dirty_flags == 0xff)
2e70f6ef 2346 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2347}
2348
0f459d16
PB
2349static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2350 uint32_t val)
9fa3e853 2351{
3a7d929e 2352 int dirty_flags;
3a7d929e
FB
2353 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2354 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2355#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2356 tb_invalidate_phys_page_fast(ram_addr, 2);
2357 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2358#endif
3a7d929e 2359 }
0f459d16 2360 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2361#ifdef USE_KQEMU
2362 if (cpu_single_env->kqemu_enabled &&
2363 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2364 kqemu_modify_page(cpu_single_env, ram_addr);
2365#endif
f23db169
FB
2366 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2367 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2368 /* we remove the notdirty callback only if the code has been
2369 flushed */
2370 if (dirty_flags == 0xff)
2e70f6ef 2371 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2372}
2373
0f459d16
PB
2374static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2375 uint32_t val)
9fa3e853 2376{
3a7d929e 2377 int dirty_flags;
3a7d929e
FB
2378 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2379 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2380#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2381 tb_invalidate_phys_page_fast(ram_addr, 4);
2382 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2383#endif
3a7d929e 2384 }
0f459d16 2385 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2386#ifdef USE_KQEMU
2387 if (cpu_single_env->kqemu_enabled &&
2388 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2389 kqemu_modify_page(cpu_single_env, ram_addr);
2390#endif
f23db169
FB
2391 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2392 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2393 /* we remove the notdirty callback only if the code has been
2394 flushed */
2395 if (dirty_flags == 0xff)
2e70f6ef 2396 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2397}
2398
3a7d929e 2399static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2400 NULL, /* never used */
2401 NULL, /* never used */
2402 NULL, /* never used */
2403};
2404
1ccde1cb
FB
2405static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2406 notdirty_mem_writeb,
2407 notdirty_mem_writew,
2408 notdirty_mem_writel,
2409};
2410
0f459d16
PB
2411/* Generate a debug exception if a watchpoint has been hit. */
2412static void check_watchpoint(int offset, int flags)
2413{
2414 CPUState *env = cpu_single_env;
2415 target_ulong vaddr;
2416 int i;
2417
2e70f6ef 2418 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
0f459d16
PB
2419 for (i = 0; i < env->nb_watchpoints; i++) {
2420 if (vaddr == env->watchpoint[i].vaddr
2421 && (env->watchpoint[i].type & flags)) {
2422 env->watchpoint_hit = i + 1;
2423 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2424 break;
2425 }
2426 }
2427}
2428
6658ffb8
PB
2429/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2430 so these check for a hit then pass through to the normal out-of-line
2431 phys routines. */
2432static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2433{
0f459d16 2434 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2435 return ldub_phys(addr);
2436}
2437
2438static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2439{
0f459d16 2440 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2441 return lduw_phys(addr);
2442}
2443
2444static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2445{
0f459d16 2446 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2447 return ldl_phys(addr);
2448}
2449
6658ffb8
PB
2450static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2451 uint32_t val)
2452{
0f459d16 2453 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2454 stb_phys(addr, val);
2455}
2456
2457static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2458 uint32_t val)
2459{
0f459d16 2460 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2461 stw_phys(addr, val);
2462}
2463
2464static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2465 uint32_t val)
2466{
0f459d16 2467 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2468 stl_phys(addr, val);
2469}
2470
2471static CPUReadMemoryFunc *watch_mem_read[3] = {
2472 watch_mem_readb,
2473 watch_mem_readw,
2474 watch_mem_readl,
2475};
2476
2477static CPUWriteMemoryFunc *watch_mem_write[3] = {
2478 watch_mem_writeb,
2479 watch_mem_writew,
2480 watch_mem_writel,
2481};
6658ffb8 2482
db7b5426
BS
2483static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2484 unsigned int len)
2485{
db7b5426
BS
2486 uint32_t ret;
2487 unsigned int idx;
2488
2489 idx = SUBPAGE_IDX(addr - mmio->base);
2490#if defined(DEBUG_SUBPAGE)
2491 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2492 mmio, len, addr, idx);
2493#endif
3ee89922 2494 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2495
2496 return ret;
2497}
2498
2499static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2500 uint32_t value, unsigned int len)
2501{
db7b5426
BS
2502 unsigned int idx;
2503
2504 idx = SUBPAGE_IDX(addr - mmio->base);
2505#if defined(DEBUG_SUBPAGE)
2506 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2507 mmio, len, addr, idx, value);
2508#endif
3ee89922 2509 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2510}
2511
2512static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2513{
2514#if defined(DEBUG_SUBPAGE)
2515 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2516#endif
2517
2518 return subpage_readlen(opaque, addr, 0);
2519}
2520
2521static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2522 uint32_t value)
2523{
2524#if defined(DEBUG_SUBPAGE)
2525 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2526#endif
2527 subpage_writelen(opaque, addr, value, 0);
2528}
2529
2530static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2531{
2532#if defined(DEBUG_SUBPAGE)
2533 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2534#endif
2535
2536 return subpage_readlen(opaque, addr, 1);
2537}
2538
2539static void subpage_writew (void *opaque, target_phys_addr_t addr,
2540 uint32_t value)
2541{
2542#if defined(DEBUG_SUBPAGE)
2543 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2544#endif
2545 subpage_writelen(opaque, addr, value, 1);
2546}
2547
2548static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2549{
2550#if defined(DEBUG_SUBPAGE)
2551 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2552#endif
2553
2554 return subpage_readlen(opaque, addr, 2);
2555}
2556
2557static void subpage_writel (void *opaque,
2558 target_phys_addr_t addr, uint32_t value)
2559{
2560#if defined(DEBUG_SUBPAGE)
2561 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2562#endif
2563 subpage_writelen(opaque, addr, value, 2);
2564}
2565
2566static CPUReadMemoryFunc *subpage_read[] = {
2567 &subpage_readb,
2568 &subpage_readw,
2569 &subpage_readl,
2570};
2571
2572static CPUWriteMemoryFunc *subpage_write[] = {
2573 &subpage_writeb,
2574 &subpage_writew,
2575 &subpage_writel,
2576};
2577
2578static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2579 ram_addr_t memory)
db7b5426
BS
2580{
2581 int idx, eidx;
4254fab8 2582 unsigned int i;
db7b5426
BS
2583
2584 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2585 return -1;
2586 idx = SUBPAGE_IDX(start);
2587 eidx = SUBPAGE_IDX(end);
2588#if defined(DEBUG_SUBPAGE)
2589 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2590 mmio, start, end, idx, eidx, memory);
2591#endif
2592 memory >>= IO_MEM_SHIFT;
2593 for (; idx <= eidx; idx++) {
4254fab8 2594 for (i = 0; i < 4; i++) {
3ee89922
BS
2595 if (io_mem_read[memory][i]) {
2596 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2597 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2598 }
2599 if (io_mem_write[memory][i]) {
2600 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2601 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2602 }
4254fab8 2603 }
db7b5426
BS
2604 }
2605
2606 return 0;
2607}
2608
00f82b8a
AJ
2609static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2610 ram_addr_t orig_memory)
db7b5426
BS
2611{
2612 subpage_t *mmio;
2613 int subpage_memory;
2614
2615 mmio = qemu_mallocz(sizeof(subpage_t));
2616 if (mmio != NULL) {
2617 mmio->base = base;
2618 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2619#if defined(DEBUG_SUBPAGE)
2620 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2621 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2622#endif
2623 *phys = subpage_memory | IO_MEM_SUBPAGE;
2624 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2625 }
2626
2627 return mmio;
2628}
2629
33417e70
FB
2630static void io_mem_init(void)
2631{
3a7d929e 2632 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2633 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2634 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2635 io_mem_nb = 5;
2636
0f459d16 2637 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2638 watch_mem_write, NULL);
1ccde1cb 2639 /* alloc dirty bits array */
0a962c02 2640 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2641 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2642}
2643
2644/* mem_read and mem_write are arrays of functions containing the
2645 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2646 2). Functions can be omitted with a NULL function pointer. The
2647 registered functions may be modified dynamically later.
2648 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2649 modified. If it is zero, a new io zone is allocated. The return
2650 value can be used with cpu_register_physical_memory(). (-1) is
2651 returned if error. */
33417e70
FB
2652int cpu_register_io_memory(int io_index,
2653 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2654 CPUWriteMemoryFunc **mem_write,
2655 void *opaque)
33417e70 2656{
4254fab8 2657 int i, subwidth = 0;
33417e70
FB
2658
2659 if (io_index <= 0) {
b5ff1b31 2660 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2661 return -1;
2662 io_index = io_mem_nb++;
2663 } else {
2664 if (io_index >= IO_MEM_NB_ENTRIES)
2665 return -1;
2666 }
b5ff1b31 2667
33417e70 2668 for(i = 0;i < 3; i++) {
4254fab8
BS
2669 if (!mem_read[i] || !mem_write[i])
2670 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2671 io_mem_read[io_index][i] = mem_read[i];
2672 io_mem_write[io_index][i] = mem_write[i];
2673 }
a4193c8a 2674 io_mem_opaque[io_index] = opaque;
4254fab8 2675 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2676}
61382a50 2677
8926b517
FB
2678CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2679{
2680 return io_mem_write[io_index >> IO_MEM_SHIFT];
2681}
2682
2683CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2684{
2685 return io_mem_read[io_index >> IO_MEM_SHIFT];
2686}
2687
e2eef170
PB
2688#endif /* !defined(CONFIG_USER_ONLY) */
2689
13eb76e0
FB
2690/* physical memory access (slow version, mainly for debug) */
2691#if defined(CONFIG_USER_ONLY)
5fafdf24 2692void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2693 int len, int is_write)
2694{
2695 int l, flags;
2696 target_ulong page;
53a5960a 2697 void * p;
13eb76e0
FB
2698
2699 while (len > 0) {
2700 page = addr & TARGET_PAGE_MASK;
2701 l = (page + TARGET_PAGE_SIZE) - addr;
2702 if (l > len)
2703 l = len;
2704 flags = page_get_flags(page);
2705 if (!(flags & PAGE_VALID))
2706 return;
2707 if (is_write) {
2708 if (!(flags & PAGE_WRITE))
2709 return;
579a97f7 2710 /* XXX: this code should not depend on lock_user */
72fb7daa 2711 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2712 /* FIXME - should this return an error rather than just fail? */
2713 return;
72fb7daa
AJ
2714 memcpy(p, buf, l);
2715 unlock_user(p, addr, l);
13eb76e0
FB
2716 } else {
2717 if (!(flags & PAGE_READ))
2718 return;
579a97f7 2719 /* XXX: this code should not depend on lock_user */
72fb7daa 2720 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2721 /* FIXME - should this return an error rather than just fail? */
2722 return;
72fb7daa 2723 memcpy(buf, p, l);
5b257578 2724 unlock_user(p, addr, 0);
13eb76e0
FB
2725 }
2726 len -= l;
2727 buf += l;
2728 addr += l;
2729 }
2730}
8df1cd07 2731
13eb76e0 2732#else
5fafdf24 2733void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2734 int len, int is_write)
2735{
2736 int l, io_index;
2737 uint8_t *ptr;
2738 uint32_t val;
2e12669a
FB
2739 target_phys_addr_t page;
2740 unsigned long pd;
92e873b9 2741 PhysPageDesc *p;
3b46e624 2742
13eb76e0
FB
2743 while (len > 0) {
2744 page = addr & TARGET_PAGE_MASK;
2745 l = (page + TARGET_PAGE_SIZE) - addr;
2746 if (l > len)
2747 l = len;
92e873b9 2748 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2749 if (!p) {
2750 pd = IO_MEM_UNASSIGNED;
2751 } else {
2752 pd = p->phys_offset;
2753 }
3b46e624 2754
13eb76e0 2755 if (is_write) {
3a7d929e 2756 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2757 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2758 /* XXX: could force cpu_single_env to NULL to avoid
2759 potential bugs */
13eb76e0 2760 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2761 /* 32 bit write access */
c27004ec 2762 val = ldl_p(buf);
a4193c8a 2763 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2764 l = 4;
2765 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2766 /* 16 bit write access */
c27004ec 2767 val = lduw_p(buf);
a4193c8a 2768 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2769 l = 2;
2770 } else {
1c213d19 2771 /* 8 bit write access */
c27004ec 2772 val = ldub_p(buf);
a4193c8a 2773 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2774 l = 1;
2775 }
2776 } else {
b448f2f3
FB
2777 unsigned long addr1;
2778 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2779 /* RAM case */
b448f2f3 2780 ptr = phys_ram_base + addr1;
13eb76e0 2781 memcpy(ptr, buf, l);
3a7d929e
FB
2782 if (!cpu_physical_memory_is_dirty(addr1)) {
2783 /* invalidate code */
2784 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2785 /* set dirty bit */
5fafdf24 2786 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2787 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2788 }
13eb76e0
FB
2789 }
2790 } else {
5fafdf24 2791 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2792 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2793 /* I/O case */
2794 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2795 if (l >= 4 && ((addr & 3) == 0)) {
2796 /* 32 bit read access */
a4193c8a 2797 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2798 stl_p(buf, val);
13eb76e0
FB
2799 l = 4;
2800 } else if (l >= 2 && ((addr & 1) == 0)) {
2801 /* 16 bit read access */
a4193c8a 2802 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2803 stw_p(buf, val);
13eb76e0
FB
2804 l = 2;
2805 } else {
1c213d19 2806 /* 8 bit read access */
a4193c8a 2807 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2808 stb_p(buf, val);
13eb76e0
FB
2809 l = 1;
2810 }
2811 } else {
2812 /* RAM case */
5fafdf24 2813 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2814 (addr & ~TARGET_PAGE_MASK);
2815 memcpy(buf, ptr, l);
2816 }
2817 }
2818 len -= l;
2819 buf += l;
2820 addr += l;
2821 }
2822}
8df1cd07 2823
d0ecd2aa 2824/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2825void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2826 const uint8_t *buf, int len)
2827{
2828 int l;
2829 uint8_t *ptr;
2830 target_phys_addr_t page;
2831 unsigned long pd;
2832 PhysPageDesc *p;
3b46e624 2833
d0ecd2aa
FB
2834 while (len > 0) {
2835 page = addr & TARGET_PAGE_MASK;
2836 l = (page + TARGET_PAGE_SIZE) - addr;
2837 if (l > len)
2838 l = len;
2839 p = phys_page_find(page >> TARGET_PAGE_BITS);
2840 if (!p) {
2841 pd = IO_MEM_UNASSIGNED;
2842 } else {
2843 pd = p->phys_offset;
2844 }
3b46e624 2845
d0ecd2aa 2846 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2847 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2848 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2849 /* do nothing */
2850 } else {
2851 unsigned long addr1;
2852 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2853 /* ROM/RAM case */
2854 ptr = phys_ram_base + addr1;
2855 memcpy(ptr, buf, l);
2856 }
2857 len -= l;
2858 buf += l;
2859 addr += l;
2860 }
2861}
2862
2863
8df1cd07
FB
2864/* warning: addr must be aligned */
2865uint32_t ldl_phys(target_phys_addr_t addr)
2866{
2867 int io_index;
2868 uint8_t *ptr;
2869 uint32_t val;
2870 unsigned long pd;
2871 PhysPageDesc *p;
2872
2873 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2874 if (!p) {
2875 pd = IO_MEM_UNASSIGNED;
2876 } else {
2877 pd = p->phys_offset;
2878 }
3b46e624 2879
5fafdf24 2880 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2881 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2882 /* I/O case */
2883 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2884 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2885 } else {
2886 /* RAM case */
5fafdf24 2887 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2888 (addr & ~TARGET_PAGE_MASK);
2889 val = ldl_p(ptr);
2890 }
2891 return val;
2892}
2893
84b7b8e7
FB
2894/* warning: addr must be aligned */
2895uint64_t ldq_phys(target_phys_addr_t addr)
2896{
2897 int io_index;
2898 uint8_t *ptr;
2899 uint64_t val;
2900 unsigned long pd;
2901 PhysPageDesc *p;
2902
2903 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2904 if (!p) {
2905 pd = IO_MEM_UNASSIGNED;
2906 } else {
2907 pd = p->phys_offset;
2908 }
3b46e624 2909
2a4188a3
FB
2910 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2911 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2912 /* I/O case */
2913 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2914#ifdef TARGET_WORDS_BIGENDIAN
2915 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2916 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2917#else
2918 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2919 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2920#endif
2921 } else {
2922 /* RAM case */
5fafdf24 2923 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2924 (addr & ~TARGET_PAGE_MASK);
2925 val = ldq_p(ptr);
2926 }
2927 return val;
2928}
2929
aab33094
FB
2930/* XXX: optimize */
2931uint32_t ldub_phys(target_phys_addr_t addr)
2932{
2933 uint8_t val;
2934 cpu_physical_memory_read(addr, &val, 1);
2935 return val;
2936}
2937
2938/* XXX: optimize */
2939uint32_t lduw_phys(target_phys_addr_t addr)
2940{
2941 uint16_t val;
2942 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2943 return tswap16(val);
2944}
2945
8df1cd07
FB
2946/* warning: addr must be aligned. The ram page is not masked as dirty
2947 and the code inside is not invalidated. It is useful if the dirty
2948 bits are used to track modified PTEs */
2949void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2950{
2951 int io_index;
2952 uint8_t *ptr;
2953 unsigned long pd;
2954 PhysPageDesc *p;
2955
2956 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2957 if (!p) {
2958 pd = IO_MEM_UNASSIGNED;
2959 } else {
2960 pd = p->phys_offset;
2961 }
3b46e624 2962
3a7d929e 2963 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2964 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2965 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2966 } else {
5fafdf24 2967 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2968 (addr & ~TARGET_PAGE_MASK);
2969 stl_p(ptr, val);
2970 }
2971}
2972
bc98a7ef
JM
2973void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2974{
2975 int io_index;
2976 uint8_t *ptr;
2977 unsigned long pd;
2978 PhysPageDesc *p;
2979
2980 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2981 if (!p) {
2982 pd = IO_MEM_UNASSIGNED;
2983 } else {
2984 pd = p->phys_offset;
2985 }
3b46e624 2986
bc98a7ef
JM
2987 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2988 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2989#ifdef TARGET_WORDS_BIGENDIAN
2990 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2991 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2992#else
2993 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2994 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2995#endif
2996 } else {
5fafdf24 2997 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2998 (addr & ~TARGET_PAGE_MASK);
2999 stq_p(ptr, val);
3000 }
3001}
3002
8df1cd07 3003/* warning: addr must be aligned */
8df1cd07
FB
3004void stl_phys(target_phys_addr_t addr, uint32_t val)
3005{
3006 int io_index;
3007 uint8_t *ptr;
3008 unsigned long pd;
3009 PhysPageDesc *p;
3010
3011 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3012 if (!p) {
3013 pd = IO_MEM_UNASSIGNED;
3014 } else {
3015 pd = p->phys_offset;
3016 }
3b46e624 3017
3a7d929e 3018 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
3019 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3020 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3021 } else {
3022 unsigned long addr1;
3023 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3024 /* RAM case */
3025 ptr = phys_ram_base + addr1;
3026 stl_p(ptr, val);
3a7d929e
FB
3027 if (!cpu_physical_memory_is_dirty(addr1)) {
3028 /* invalidate code */
3029 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3030 /* set dirty bit */
f23db169
FB
3031 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3032 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3033 }
8df1cd07
FB
3034 }
3035}
3036
aab33094
FB
3037/* XXX: optimize */
3038void stb_phys(target_phys_addr_t addr, uint32_t val)
3039{
3040 uint8_t v = val;
3041 cpu_physical_memory_write(addr, &v, 1);
3042}
3043
3044/* XXX: optimize */
3045void stw_phys(target_phys_addr_t addr, uint32_t val)
3046{
3047 uint16_t v = tswap16(val);
3048 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3049}
3050
3051/* XXX: optimize */
3052void stq_phys(target_phys_addr_t addr, uint64_t val)
3053{
3054 val = tswap64(val);
3055 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3056}
3057
13eb76e0
FB
3058#endif
3059
3060/* virtual memory access for debug */
5fafdf24 3061int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3062 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3063{
3064 int l;
9b3c35e0
JM
3065 target_phys_addr_t phys_addr;
3066 target_ulong page;
13eb76e0
FB
3067
3068 while (len > 0) {
3069 page = addr & TARGET_PAGE_MASK;
3070 phys_addr = cpu_get_phys_page_debug(env, page);
3071 /* if no physical page mapped, return an error */
3072 if (phys_addr == -1)
3073 return -1;
3074 l = (page + TARGET_PAGE_SIZE) - addr;
3075 if (l > len)
3076 l = len;
5fafdf24 3077 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3078 buf, l, is_write);
13eb76e0
FB
3079 len -= l;
3080 buf += l;
3081 addr += l;
3082 }
3083 return 0;
3084}
3085
2e70f6ef
PB
3086/* in deterministic execution mode, instructions doing device I/Os
3087 must be at the end of the TB */
3088void cpu_io_recompile(CPUState *env, void *retaddr)
3089{
3090 TranslationBlock *tb;
3091 uint32_t n, cflags;
3092 target_ulong pc, cs_base;
3093 uint64_t flags;
3094
3095 tb = tb_find_pc((unsigned long)retaddr);
3096 if (!tb) {
3097 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3098 retaddr);
3099 }
3100 n = env->icount_decr.u16.low + tb->icount;
3101 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3102 /* Calculate how many instructions had been executed before the fault
bf20dc07 3103 occurred. */
2e70f6ef
PB
3104 n = n - env->icount_decr.u16.low;
3105 /* Generate a new TB ending on the I/O insn. */
3106 n++;
3107 /* On MIPS and SH, delay slot instructions can only be restarted if
3108 they were already the first instruction in the TB. If this is not
bf20dc07 3109 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3110 branch. */
3111#if defined(TARGET_MIPS)
3112 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3113 env->active_tc.PC -= 4;
3114 env->icount_decr.u16.low++;
3115 env->hflags &= ~MIPS_HFLAG_BMASK;
3116 }
3117#elif defined(TARGET_SH4)
3118 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3119 && n > 1) {
3120 env->pc -= 2;
3121 env->icount_decr.u16.low++;
3122 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3123 }
3124#endif
3125 /* This should never happen. */
3126 if (n > CF_COUNT_MASK)
3127 cpu_abort(env, "TB too big during recompile");
3128
3129 cflags = n | CF_LAST_IO;
3130 pc = tb->pc;
3131 cs_base = tb->cs_base;
3132 flags = tb->flags;
3133 tb_phys_invalidate(tb, -1);
3134 /* FIXME: In theory this could raise an exception. In practice
3135 we have already translated the block once so it's probably ok. */
3136 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3137 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3138 the first in the TB) then we end up generating a whole new TB and
3139 repeating the fault, which is horribly inefficient.
3140 Better would be to execute just this insn uncached, or generate a
3141 second new TB. */
3142 cpu_resume_from_signal(env, NULL);
3143}
3144
e3db7226
FB
3145void dump_exec_info(FILE *f,
3146 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3147{
3148 int i, target_code_size, max_target_code_size;
3149 int direct_jmp_count, direct_jmp2_count, cross_page;
3150 TranslationBlock *tb;
3b46e624 3151
e3db7226
FB
3152 target_code_size = 0;
3153 max_target_code_size = 0;
3154 cross_page = 0;
3155 direct_jmp_count = 0;
3156 direct_jmp2_count = 0;
3157 for(i = 0; i < nb_tbs; i++) {
3158 tb = &tbs[i];
3159 target_code_size += tb->size;
3160 if (tb->size > max_target_code_size)
3161 max_target_code_size = tb->size;
3162 if (tb->page_addr[1] != -1)
3163 cross_page++;
3164 if (tb->tb_next_offset[0] != 0xffff) {
3165 direct_jmp_count++;
3166 if (tb->tb_next_offset[1] != 0xffff) {
3167 direct_jmp2_count++;
3168 }
3169 }
3170 }
3171 /* XXX: avoid using doubles ? */
57fec1fe 3172 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3173 cpu_fprintf(f, "gen code size %ld/%ld\n",
3174 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3175 cpu_fprintf(f, "TB count %d/%d\n",
3176 nb_tbs, code_gen_max_blocks);
5fafdf24 3177 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3178 nb_tbs ? target_code_size / nb_tbs : 0,
3179 max_target_code_size);
5fafdf24 3180 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3181 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3182 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3183 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3184 cross_page,
e3db7226
FB
3185 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3186 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3187 direct_jmp_count,
e3db7226
FB
3188 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3189 direct_jmp2_count,
3190 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3191 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3192 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3193 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3194 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3195 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3196}
3197
5fafdf24 3198#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3199
3200#define MMUSUFFIX _cmmu
3201#define GETPC() NULL
3202#define env cpu_single_env
b769d8fe 3203#define SOFTMMU_CODE_ACCESS
61382a50
FB
3204
3205#define SHIFT 0
3206#include "softmmu_template.h"
3207
3208#define SHIFT 1
3209#include "softmmu_template.h"
3210
3211#define SHIFT 2
3212#include "softmmu_template.h"
3213
3214#define SHIFT 3
3215#include "softmmu_template.h"
3216
3217#undef env
3218
3219#endif