]> git.proxmox.com Git - qemu.git/blame - exec.c
gdbstub: Return appropriate watch message to gdb (Jan Kiszka)
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
92e873b9
FB
149} PhysPageDesc;
150
54936004 151#define L2_BITS 10
bedb69ea
JM
152#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153/* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
156 */
157#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158#else
03875444 159#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 160#endif
54936004
FB
161
162#define L1_SIZE (1 << L1_BITS)
163#define L2_SIZE (1 << L2_BITS)
164
83fb7adf
FB
165unsigned long qemu_real_host_page_size;
166unsigned long qemu_host_page_bits;
167unsigned long qemu_host_page_size;
168unsigned long qemu_host_page_mask;
54936004 169
92e873b9 170/* XXX: for system emulation, it could just be an array */
54936004 171static PageDesc *l1_map[L1_SIZE];
bdaf78e0 172static PhysPageDesc **l1_phys_map;
54936004 173
e2eef170
PB
174#if !defined(CONFIG_USER_ONLY)
175static void io_mem_init(void);
176
33417e70 177/* io memory support */
33417e70
FB
178CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 180void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 181static int io_mem_nb;
6658ffb8
PB
182static int io_mem_watch;
183#endif
33417e70 184
34865134 185/* log support */
d9b630fd 186static const char *logfilename = "/tmp/qemu.log";
34865134
FB
187FILE *logfile;
188int loglevel;
e735b91c 189static int log_append = 0;
34865134 190
e3db7226
FB
191/* statistics */
192static int tlb_flush_count;
193static int tb_flush_count;
194static int tb_phys_invalidate_count;
195
db7b5426
BS
196#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197typedef struct subpage_t {
198 target_phys_addr_t base;
3ee89922
BS
199 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
202} subpage_t;
203
7cb69cae
FB
204#ifdef _WIN32
205static void map_exec(void *addr, long size)
206{
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211}
212#else
213static void map_exec(void *addr, long size)
214{
4369415f 215 unsigned long start, end, page_size;
7cb69cae 216
4369415f 217 page_size = getpagesize();
7cb69cae 218 start = (unsigned long)addr;
4369415f 219 start &= ~(page_size - 1);
7cb69cae
FB
220
221 end = (unsigned long)addr + size;
4369415f
FB
222 end += page_size - 1;
223 end &= ~(page_size - 1);
7cb69cae
FB
224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
b346ff46 230static void page_init(void)
54936004 231{
83fb7adf 232 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 233 TARGET_PAGE_SIZE */
c2b48b69
AL
234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
83fb7adf
FB
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
248 qemu_host_page_bits = 0;
249 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250 qemu_host_page_bits++;
251 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
252 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
254
255#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256 {
257 long long startaddr, endaddr;
258 FILE *f;
259 int n;
260
c8a706fe 261 mmap_lock();
0776590d 262 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
263 f = fopen("/proc/self/maps", "r");
264 if (f) {
265 do {
266 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267 if (n == 2) {
e0b8d65a
BS
268 startaddr = MIN(startaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 endaddr = MIN(endaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 272 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
273 TARGET_PAGE_ALIGN(endaddr),
274 PAGE_RESERVED);
275 }
276 } while (!feof(f));
277 fclose(f);
278 }
c8a706fe 279 mmap_unlock();
50a9569b
AZ
280 }
281#endif
54936004
FB
282}
283
434929bf 284static inline PageDesc **page_l1_map(target_ulong index)
54936004 285{
17e2377a
PB
286#if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
d8173e0f 289 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
290 return NULL;
291#endif
434929bf
AL
292 return &l1_map[index >> L2_BITS];
293}
294
295static inline PageDesc *page_find_alloc(target_ulong index)
296{
297 PageDesc **lp, *p;
298 lp = page_l1_map(index);
299 if (!lp)
300 return NULL;
301
54936004
FB
302 p = *lp;
303 if (!p) {
304 /* allocate if not found */
17e2377a
PB
305#if defined(CONFIG_USER_ONLY)
306 unsigned long addr;
307 size_t len = sizeof(PageDesc) * L2_SIZE;
308 /* Don't use qemu_malloc because it may recurse. */
309 p = mmap(0, len, PROT_READ | PROT_WRITE,
310 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 311 *lp = p;
17e2377a
PB
312 addr = h2g(p);
313 if (addr == (target_ulong)addr) {
314 page_set_flags(addr & TARGET_PAGE_MASK,
315 TARGET_PAGE_ALIGN(addr + len),
316 PAGE_RESERVED);
317 }
318#else
319 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320 *lp = p;
321#endif
54936004
FB
322 }
323 return p + (index & (L2_SIZE - 1));
324}
325
00f82b8a 326static inline PageDesc *page_find(target_ulong index)
54936004 327{
434929bf
AL
328 PageDesc **lp, *p;
329 lp = page_l1_map(index);
330 if (!lp)
331 return NULL;
54936004 332
434929bf 333 p = *lp;
54936004
FB
334 if (!p)
335 return 0;
fd6ce8f6
FB
336 return p + (index & (L2_SIZE - 1));
337}
338
108c49b8 339static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 340{
108c49b8 341 void **lp, **p;
e3f4e2a4 342 PhysPageDesc *pd;
92e873b9 343
108c49b8
FB
344 p = (void **)l1_phys_map;
345#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346
347#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349#endif
350 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
351 p = *lp;
352 if (!p) {
353 /* allocate if not found */
108c49b8
FB
354 if (!alloc)
355 return NULL;
356 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357 memset(p, 0, sizeof(void *) * L1_SIZE);
358 *lp = p;
359 }
360#endif
361 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
362 pd = *lp;
363 if (!pd) {
364 int i;
108c49b8
FB
365 /* allocate if not found */
366 if (!alloc)
367 return NULL;
e3f4e2a4
PB
368 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369 *lp = pd;
370 for (i = 0; i < L2_SIZE; i++)
371 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 372 }
e3f4e2a4 373 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
374}
375
108c49b8 376static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 377{
108c49b8 378 return phys_page_find_alloc(index, 0);
92e873b9
FB
379}
380
9fa3e853 381#if !defined(CONFIG_USER_ONLY)
6a00d601 382static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 383static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 384 target_ulong vaddr);
c8a706fe
PB
385#define mmap_lock() do { } while(0)
386#define mmap_unlock() do { } while(0)
9fa3e853 387#endif
fd6ce8f6 388
4369415f
FB
389#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390
391#if defined(CONFIG_USER_ONLY)
392/* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394#define USE_STATIC_CODE_GEN_BUFFER
395#endif
396
397#ifdef USE_STATIC_CODE_GEN_BUFFER
398static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399#endif
400
8fcd3692 401static void code_gen_alloc(unsigned long tb_size)
26a5f13b 402{
4369415f
FB
403#ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer = static_code_gen_buffer;
405 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406 map_exec(code_gen_buffer, code_gen_buffer_size);
407#else
26a5f13b
FB
408 code_gen_buffer_size = tb_size;
409 if (code_gen_buffer_size == 0) {
4369415f
FB
410#if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413#else
26a5f13b 414 /* XXX: needs ajustments */
174a9a1f 415 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 416#endif
26a5f13b
FB
417 }
418 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422#if defined(__linux__)
423 {
424 int flags;
141ac468
BS
425 void *start = NULL;
426
26a5f13b
FB
427 flags = MAP_PRIVATE | MAP_ANONYMOUS;
428#if defined(__x86_64__)
429 flags |= MAP_32BIT;
430 /* Cannot map more than that */
431 if (code_gen_buffer_size > (800 * 1024 * 1024))
432 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
433#elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
435 flags |= MAP_FIXED;
436 start = (void *) 0x60000000UL;
437 if (code_gen_buffer_size > (512 * 1024 * 1024))
438 code_gen_buffer_size = (512 * 1024 * 1024);
26a5f13b 439#endif
141ac468
BS
440 code_gen_buffer = mmap(start, code_gen_buffer_size,
441 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
442 flags, -1, 0);
443 if (code_gen_buffer == MAP_FAILED) {
444 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445 exit(1);
446 }
447 }
06e67a82
AL
448#elif defined(__FreeBSD__)
449 {
450 int flags;
451 void *addr = NULL;
452 flags = MAP_PRIVATE | MAP_ANONYMOUS;
453#if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
456 flags |= MAP_FIXED;
457 addr = (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size > (800 * 1024 * 1024))
460 code_gen_buffer_size = (800 * 1024 * 1024);
461#endif
462 code_gen_buffer = mmap(addr, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
26a5f13b
FB
470#else
471 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472 if (!code_gen_buffer) {
473 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474 exit(1);
475 }
476 map_exec(code_gen_buffer, code_gen_buffer_size);
477#endif
4369415f 478#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
479 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480 code_gen_buffer_max_size = code_gen_buffer_size -
481 code_gen_max_block_size();
482 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484}
485
486/* Must be called before using the QEMU cpus. 'tb_size' is the size
487 (in bytes) allocated to the translation buffer. Zero means default
488 size. */
489void cpu_exec_init_all(unsigned long tb_size)
490{
26a5f13b
FB
491 cpu_gen_init();
492 code_gen_alloc(tb_size);
493 code_gen_ptr = code_gen_buffer;
4369415f 494 page_init();
e2eef170 495#if !defined(CONFIG_USER_ONLY)
26a5f13b 496 io_mem_init();
e2eef170 497#endif
26a5f13b
FB
498}
499
9656f324
PB
500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501
502#define CPU_COMMON_SAVE_VERSION 1
503
504static void cpu_common_save(QEMUFile *f, void *opaque)
505{
506 CPUState *env = opaque;
507
508 qemu_put_be32s(f, &env->halted);
509 qemu_put_be32s(f, &env->interrupt_request);
510}
511
512static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513{
514 CPUState *env = opaque;
515
516 if (version_id != CPU_COMMON_SAVE_VERSION)
517 return -EINVAL;
518
519 qemu_get_be32s(f, &env->halted);
75f482ae 520 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
521 tlb_flush(env, 1);
522
523 return 0;
524}
525#endif
526
6a00d601 527void cpu_exec_init(CPUState *env)
fd6ce8f6 528{
6a00d601
FB
529 CPUState **penv;
530 int cpu_index;
531
6a00d601
FB
532 env->next_cpu = NULL;
533 penv = &first_cpu;
534 cpu_index = 0;
535 while (*penv != NULL) {
536 penv = (CPUState **)&(*penv)->next_cpu;
537 cpu_index++;
538 }
539 env->cpu_index = cpu_index;
6658ffb8 540 env->nb_watchpoints = 0;
6a00d601 541 *penv = env;
b3c7724c 542#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
543 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
544 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
545 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
546 cpu_save, cpu_load, env);
547#endif
fd6ce8f6
FB
548}
549
9fa3e853
FB
550static inline void invalidate_page_bitmap(PageDesc *p)
551{
552 if (p->code_bitmap) {
59817ccb 553 qemu_free(p->code_bitmap);
9fa3e853
FB
554 p->code_bitmap = NULL;
555 }
556 p->code_write_count = 0;
557}
558
fd6ce8f6
FB
559/* set to NULL all the 'first_tb' fields in all PageDescs */
560static void page_flush_tb(void)
561{
562 int i, j;
563 PageDesc *p;
564
565 for(i = 0; i < L1_SIZE; i++) {
566 p = l1_map[i];
567 if (p) {
9fa3e853
FB
568 for(j = 0; j < L2_SIZE; j++) {
569 p->first_tb = NULL;
570 invalidate_page_bitmap(p);
571 p++;
572 }
fd6ce8f6
FB
573 }
574 }
575}
576
577/* flush all the translation blocks */
d4e8164f 578/* XXX: tb_flush is currently not thread safe */
6a00d601 579void tb_flush(CPUState *env1)
fd6ce8f6 580{
6a00d601 581 CPUState *env;
0124311e 582#if defined(DEBUG_FLUSH)
ab3d1727
BS
583 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
584 (unsigned long)(code_gen_ptr - code_gen_buffer),
585 nb_tbs, nb_tbs > 0 ?
586 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 587#endif
26a5f13b 588 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
589 cpu_abort(env1, "Internal error: code buffer overflow\n");
590
fd6ce8f6 591 nb_tbs = 0;
3b46e624 592
6a00d601
FB
593 for(env = first_cpu; env != NULL; env = env->next_cpu) {
594 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
595 }
9fa3e853 596
8a8a608f 597 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 598 page_flush_tb();
9fa3e853 599
fd6ce8f6 600 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
601 /* XXX: flush processor icache at this point if cache flush is
602 expensive */
e3db7226 603 tb_flush_count++;
fd6ce8f6
FB
604}
605
606#ifdef DEBUG_TB_CHECK
607
bc98a7ef 608static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
609{
610 TranslationBlock *tb;
611 int i;
612 address &= TARGET_PAGE_MASK;
99773bd4
PB
613 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
614 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
615 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
616 address >= tb->pc + tb->size)) {
617 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 618 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
619 }
620 }
621 }
622}
623
624/* verify that all the pages have correct rights for code */
625static void tb_page_check(void)
626{
627 TranslationBlock *tb;
628 int i, flags1, flags2;
3b46e624 629
99773bd4
PB
630 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
632 flags1 = page_get_flags(tb->pc);
633 flags2 = page_get_flags(tb->pc + tb->size - 1);
634 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
635 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 636 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
637 }
638 }
639 }
640}
641
bdaf78e0 642static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
643{
644 TranslationBlock *tb1;
645 unsigned int n1;
646
647 /* suppress any remaining jumps to this TB */
648 tb1 = tb->jmp_first;
649 for(;;) {
650 n1 = (long)tb1 & 3;
651 tb1 = (TranslationBlock *)((long)tb1 & ~3);
652 if (n1 == 2)
653 break;
654 tb1 = tb1->jmp_next[n1];
655 }
656 /* check end of list */
657 if (tb1 != tb) {
658 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
659 }
660}
661
fd6ce8f6
FB
662#endif
663
664/* invalidate one TB */
665static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
666 int next_offset)
667{
668 TranslationBlock *tb1;
669 for(;;) {
670 tb1 = *ptb;
671 if (tb1 == tb) {
672 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
673 break;
674 }
675 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
676 }
677}
678
9fa3e853
FB
679static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
680{
681 TranslationBlock *tb1;
682 unsigned int n1;
683
684 for(;;) {
685 tb1 = *ptb;
686 n1 = (long)tb1 & 3;
687 tb1 = (TranslationBlock *)((long)tb1 & ~3);
688 if (tb1 == tb) {
689 *ptb = tb1->page_next[n1];
690 break;
691 }
692 ptb = &tb1->page_next[n1];
693 }
694}
695
d4e8164f
FB
696static inline void tb_jmp_remove(TranslationBlock *tb, int n)
697{
698 TranslationBlock *tb1, **ptb;
699 unsigned int n1;
700
701 ptb = &tb->jmp_next[n];
702 tb1 = *ptb;
703 if (tb1) {
704 /* find tb(n) in circular list */
705 for(;;) {
706 tb1 = *ptb;
707 n1 = (long)tb1 & 3;
708 tb1 = (TranslationBlock *)((long)tb1 & ~3);
709 if (n1 == n && tb1 == tb)
710 break;
711 if (n1 == 2) {
712 ptb = &tb1->jmp_first;
713 } else {
714 ptb = &tb1->jmp_next[n1];
715 }
716 }
717 /* now we can suppress tb(n) from the list */
718 *ptb = tb->jmp_next[n];
719
720 tb->jmp_next[n] = NULL;
721 }
722}
723
724/* reset the jump entry 'n' of a TB so that it is not chained to
725 another TB */
726static inline void tb_reset_jump(TranslationBlock *tb, int n)
727{
728 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
729}
730
2e70f6ef 731void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 732{
6a00d601 733 CPUState *env;
8a40a180 734 PageDesc *p;
d4e8164f 735 unsigned int h, n1;
00f82b8a 736 target_phys_addr_t phys_pc;
8a40a180 737 TranslationBlock *tb1, *tb2;
3b46e624 738
8a40a180
FB
739 /* remove the TB from the hash list */
740 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
741 h = tb_phys_hash_func(phys_pc);
5fafdf24 742 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
743 offsetof(TranslationBlock, phys_hash_next));
744
745 /* remove the TB from the page list */
746 if (tb->page_addr[0] != page_addr) {
747 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
748 tb_page_remove(&p->first_tb, tb);
749 invalidate_page_bitmap(p);
750 }
751 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
752 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
753 tb_page_remove(&p->first_tb, tb);
754 invalidate_page_bitmap(p);
755 }
756
36bdbe54 757 tb_invalidated_flag = 1;
59817ccb 758
fd6ce8f6 759 /* remove the TB from the hash list */
8a40a180 760 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
761 for(env = first_cpu; env != NULL; env = env->next_cpu) {
762 if (env->tb_jmp_cache[h] == tb)
763 env->tb_jmp_cache[h] = NULL;
764 }
d4e8164f
FB
765
766 /* suppress this TB from the two jump lists */
767 tb_jmp_remove(tb, 0);
768 tb_jmp_remove(tb, 1);
769
770 /* suppress any remaining jumps to this TB */
771 tb1 = tb->jmp_first;
772 for(;;) {
773 n1 = (long)tb1 & 3;
774 if (n1 == 2)
775 break;
776 tb1 = (TranslationBlock *)((long)tb1 & ~3);
777 tb2 = tb1->jmp_next[n1];
778 tb_reset_jump(tb1, n1);
779 tb1->jmp_next[n1] = NULL;
780 tb1 = tb2;
781 }
782 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 783
e3db7226 784 tb_phys_invalidate_count++;
9fa3e853
FB
785}
786
787static inline void set_bits(uint8_t *tab, int start, int len)
788{
789 int end, mask, end1;
790
791 end = start + len;
792 tab += start >> 3;
793 mask = 0xff << (start & 7);
794 if ((start & ~7) == (end & ~7)) {
795 if (start < end) {
796 mask &= ~(0xff << (end & 7));
797 *tab |= mask;
798 }
799 } else {
800 *tab++ |= mask;
801 start = (start + 8) & ~7;
802 end1 = end & ~7;
803 while (start < end1) {
804 *tab++ = 0xff;
805 start += 8;
806 }
807 if (start < end) {
808 mask = ~(0xff << (end & 7));
809 *tab |= mask;
810 }
811 }
812}
813
814static void build_page_bitmap(PageDesc *p)
815{
816 int n, tb_start, tb_end;
817 TranslationBlock *tb;
3b46e624 818
b2a7081a 819 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
820 if (!p->code_bitmap)
821 return;
9fa3e853
FB
822
823 tb = p->first_tb;
824 while (tb != NULL) {
825 n = (long)tb & 3;
826 tb = (TranslationBlock *)((long)tb & ~3);
827 /* NOTE: this is subtle as a TB may span two physical pages */
828 if (n == 0) {
829 /* NOTE: tb_end may be after the end of the page, but
830 it is not a problem */
831 tb_start = tb->pc & ~TARGET_PAGE_MASK;
832 tb_end = tb_start + tb->size;
833 if (tb_end > TARGET_PAGE_SIZE)
834 tb_end = TARGET_PAGE_SIZE;
835 } else {
836 tb_start = 0;
837 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
838 }
839 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
840 tb = tb->page_next[n];
841 }
842}
843
2e70f6ef
PB
844TranslationBlock *tb_gen_code(CPUState *env,
845 target_ulong pc, target_ulong cs_base,
846 int flags, int cflags)
d720b93d
FB
847{
848 TranslationBlock *tb;
849 uint8_t *tc_ptr;
850 target_ulong phys_pc, phys_page2, virt_page2;
851 int code_gen_size;
852
c27004ec
FB
853 phys_pc = get_phys_addr_code(env, pc);
854 tb = tb_alloc(pc);
d720b93d
FB
855 if (!tb) {
856 /* flush must be done */
857 tb_flush(env);
858 /* cannot fail at this point */
c27004ec 859 tb = tb_alloc(pc);
2e70f6ef
PB
860 /* Don't forget to invalidate previous TB info. */
861 tb_invalidated_flag = 1;
d720b93d
FB
862 }
863 tc_ptr = code_gen_ptr;
864 tb->tc_ptr = tc_ptr;
865 tb->cs_base = cs_base;
866 tb->flags = flags;
867 tb->cflags = cflags;
d07bde88 868 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 869 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 870
d720b93d 871 /* check next page if needed */
c27004ec 872 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 873 phys_page2 = -1;
c27004ec 874 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
875 phys_page2 = get_phys_addr_code(env, virt_page2);
876 }
877 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 878 return tb;
d720b93d 879}
3b46e624 880
9fa3e853
FB
881/* invalidate all TBs which intersect with the target physical page
882 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
883 the same physical page. 'is_cpu_write_access' should be true if called
884 from a real cpu write access: the virtual CPU will exit the current
885 TB if code is modified inside this TB. */
00f82b8a 886void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
887 int is_cpu_write_access)
888{
6b917547 889 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 890 CPUState *env = cpu_single_env;
9fa3e853 891 target_ulong tb_start, tb_end;
6b917547
AL
892 PageDesc *p;
893 int n;
894#ifdef TARGET_HAS_PRECISE_SMC
895 int current_tb_not_found = is_cpu_write_access;
896 TranslationBlock *current_tb = NULL;
897 int current_tb_modified = 0;
898 target_ulong current_pc = 0;
899 target_ulong current_cs_base = 0;
900 int current_flags = 0;
901#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
902
903 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 904 if (!p)
9fa3e853 905 return;
5fafdf24 906 if (!p->code_bitmap &&
d720b93d
FB
907 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
908 is_cpu_write_access) {
9fa3e853
FB
909 /* build code bitmap */
910 build_page_bitmap(p);
911 }
912
913 /* we remove all the TBs in the range [start, end[ */
914 /* XXX: see if in some cases it could be faster to invalidate all the code */
915 tb = p->first_tb;
916 while (tb != NULL) {
917 n = (long)tb & 3;
918 tb = (TranslationBlock *)((long)tb & ~3);
919 tb_next = tb->page_next[n];
920 /* NOTE: this is subtle as a TB may span two physical pages */
921 if (n == 0) {
922 /* NOTE: tb_end may be after the end of the page, but
923 it is not a problem */
924 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
925 tb_end = tb_start + tb->size;
926 } else {
927 tb_start = tb->page_addr[1];
928 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
929 }
930 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
931#ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_not_found) {
933 current_tb_not_found = 0;
934 current_tb = NULL;
2e70f6ef 935 if (env->mem_io_pc) {
d720b93d 936 /* now we have a real cpu fault */
2e70f6ef 937 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
938 }
939 }
940 if (current_tb == tb &&
2e70f6ef 941 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
3b46e624 947
d720b93d 948 current_tb_modified = 1;
5fafdf24 949 cpu_restore_state(current_tb, env,
2e70f6ef 950 env->mem_io_pc, NULL);
6b917547
AL
951 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
952 &current_flags);
d720b93d
FB
953 }
954#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
955 /* we need to do that to handle the case where a signal
956 occurs while doing tb_phys_invalidate() */
957 saved_tb = NULL;
958 if (env) {
959 saved_tb = env->current_tb;
960 env->current_tb = NULL;
961 }
9fa3e853 962 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
963 if (env) {
964 env->current_tb = saved_tb;
965 if (env->interrupt_request && env->current_tb)
966 cpu_interrupt(env, env->interrupt_request);
967 }
9fa3e853
FB
968 }
969 tb = tb_next;
970 }
971#if !defined(CONFIG_USER_ONLY)
972 /* if no code remaining, no need to continue to use slow writes */
973 if (!p->first_tb) {
974 invalidate_page_bitmap(p);
d720b93d 975 if (is_cpu_write_access) {
2e70f6ef 976 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
977 }
978 }
979#endif
980#ifdef TARGET_HAS_PRECISE_SMC
981 if (current_tb_modified) {
982 /* we generate a block containing just the instruction
983 modifying the memory. It will ensure that it cannot modify
984 itself */
ea1c1802 985 env->current_tb = NULL;
2e70f6ef 986 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 987 cpu_resume_from_signal(env, NULL);
9fa3e853 988 }
fd6ce8f6 989#endif
9fa3e853 990}
fd6ce8f6 991
9fa3e853 992/* len must be <= 8 and start must be a multiple of len */
00f82b8a 993static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
994{
995 PageDesc *p;
996 int offset, b;
59817ccb 997#if 0
a4193c8a
FB
998 if (1) {
999 if (loglevel) {
5fafdf24 1000 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 1001 cpu_single_env->mem_io_vaddr, len,
5fafdf24 1002 cpu_single_env->eip,
a4193c8a
FB
1003 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1004 }
59817ccb
FB
1005 }
1006#endif
9fa3e853 1007 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1008 if (!p)
9fa3e853
FB
1009 return;
1010 if (p->code_bitmap) {
1011 offset = start & ~TARGET_PAGE_MASK;
1012 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1013 if (b & ((1 << len) - 1))
1014 goto do_invalidate;
1015 } else {
1016 do_invalidate:
d720b93d 1017 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1018 }
1019}
1020
9fa3e853 1021#if !defined(CONFIG_SOFTMMU)
00f82b8a 1022static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1023 unsigned long pc, void *puc)
9fa3e853 1024{
6b917547 1025 TranslationBlock *tb;
9fa3e853 1026 PageDesc *p;
6b917547 1027 int n;
d720b93d 1028#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1029 TranslationBlock *current_tb = NULL;
d720b93d 1030 CPUState *env = cpu_single_env;
6b917547
AL
1031 int current_tb_modified = 0;
1032 target_ulong current_pc = 0;
1033 target_ulong current_cs_base = 0;
1034 int current_flags = 0;
d720b93d 1035#endif
9fa3e853
FB
1036
1037 addr &= TARGET_PAGE_MASK;
1038 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1039 if (!p)
9fa3e853
FB
1040 return;
1041 tb = p->first_tb;
d720b93d
FB
1042#ifdef TARGET_HAS_PRECISE_SMC
1043 if (tb && pc != 0) {
1044 current_tb = tb_find_pc(pc);
1045 }
1046#endif
9fa3e853
FB
1047 while (tb != NULL) {
1048 n = (long)tb & 3;
1049 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1050#ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb == tb &&
2e70f6ef 1052 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1053 /* If we are modifying the current TB, we must stop
1054 its execution. We could be more precise by checking
1055 that the modification is after the current PC, but it
1056 would require a specialized function to partially
1057 restore the CPU state */
3b46e624 1058
d720b93d
FB
1059 current_tb_modified = 1;
1060 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1061 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1062 &current_flags);
d720b93d
FB
1063 }
1064#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1065 tb_phys_invalidate(tb, addr);
1066 tb = tb->page_next[n];
1067 }
fd6ce8f6 1068 p->first_tb = NULL;
d720b93d
FB
1069#ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_modified) {
1071 /* we generate a block containing just the instruction
1072 modifying the memory. It will ensure that it cannot modify
1073 itself */
ea1c1802 1074 env->current_tb = NULL;
2e70f6ef 1075 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1076 cpu_resume_from_signal(env, puc);
1077 }
1078#endif
fd6ce8f6 1079}
9fa3e853 1080#endif
fd6ce8f6
FB
1081
1082/* add the tb in the target page and protect it if necessary */
5fafdf24 1083static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1084 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1085{
1086 PageDesc *p;
9fa3e853
FB
1087 TranslationBlock *last_first_tb;
1088
1089 tb->page_addr[n] = page_addr;
3a7d929e 1090 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1091 tb->page_next[n] = p->first_tb;
1092 last_first_tb = p->first_tb;
1093 p->first_tb = (TranslationBlock *)((long)tb | n);
1094 invalidate_page_bitmap(p);
fd6ce8f6 1095
107db443 1096#if defined(TARGET_HAS_SMC) || 1
d720b93d 1097
9fa3e853 1098#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1099 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1100 target_ulong addr;
1101 PageDesc *p2;
9fa3e853
FB
1102 int prot;
1103
fd6ce8f6
FB
1104 /* force the host page as non writable (writes will have a
1105 page fault + mprotect overhead) */
53a5960a 1106 page_addr &= qemu_host_page_mask;
fd6ce8f6 1107 prot = 0;
53a5960a
PB
1108 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1109 addr += TARGET_PAGE_SIZE) {
1110
1111 p2 = page_find (addr >> TARGET_PAGE_BITS);
1112 if (!p2)
1113 continue;
1114 prot |= p2->flags;
1115 p2->flags &= ~PAGE_WRITE;
1116 page_get_flags(addr);
1117 }
5fafdf24 1118 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1119 (prot & PAGE_BITS) & ~PAGE_WRITE);
1120#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1121 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1122 page_addr);
fd6ce8f6 1123#endif
fd6ce8f6 1124 }
9fa3e853
FB
1125#else
1126 /* if some code is already present, then the pages are already
1127 protected. So we handle the case where only the first TB is
1128 allocated in a physical page */
1129 if (!last_first_tb) {
6a00d601 1130 tlb_protect_code(page_addr);
9fa3e853
FB
1131 }
1132#endif
d720b93d
FB
1133
1134#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1135}
1136
1137/* Allocate a new translation block. Flush the translation buffer if
1138 too many translation blocks or too much generated code. */
c27004ec 1139TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1140{
1141 TranslationBlock *tb;
fd6ce8f6 1142
26a5f13b
FB
1143 if (nb_tbs >= code_gen_max_blocks ||
1144 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1145 return NULL;
fd6ce8f6
FB
1146 tb = &tbs[nb_tbs++];
1147 tb->pc = pc;
b448f2f3 1148 tb->cflags = 0;
d4e8164f
FB
1149 return tb;
1150}
1151
2e70f6ef
PB
1152void tb_free(TranslationBlock *tb)
1153{
bf20dc07 1154 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1155 Ignore the hard cases and just back up if this TB happens to
1156 be the last one generated. */
1157 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1158 code_gen_ptr = tb->tc_ptr;
1159 nb_tbs--;
1160 }
1161}
1162
9fa3e853
FB
1163/* add a new TB and link it to the physical page tables. phys_page2 is
1164 (-1) to indicate that only one page contains the TB. */
5fafdf24 1165void tb_link_phys(TranslationBlock *tb,
9fa3e853 1166 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1167{
9fa3e853
FB
1168 unsigned int h;
1169 TranslationBlock **ptb;
1170
c8a706fe
PB
1171 /* Grab the mmap lock to stop another thread invalidating this TB
1172 before we are done. */
1173 mmap_lock();
9fa3e853
FB
1174 /* add in the physical hash table */
1175 h = tb_phys_hash_func(phys_pc);
1176 ptb = &tb_phys_hash[h];
1177 tb->phys_hash_next = *ptb;
1178 *ptb = tb;
fd6ce8f6
FB
1179
1180 /* add in the page list */
9fa3e853
FB
1181 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1182 if (phys_page2 != -1)
1183 tb_alloc_page(tb, 1, phys_page2);
1184 else
1185 tb->page_addr[1] = -1;
9fa3e853 1186
d4e8164f
FB
1187 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1188 tb->jmp_next[0] = NULL;
1189 tb->jmp_next[1] = NULL;
1190
1191 /* init original jump addresses */
1192 if (tb->tb_next_offset[0] != 0xffff)
1193 tb_reset_jump(tb, 0);
1194 if (tb->tb_next_offset[1] != 0xffff)
1195 tb_reset_jump(tb, 1);
8a40a180
FB
1196
1197#ifdef DEBUG_TB_CHECK
1198 tb_page_check();
1199#endif
c8a706fe 1200 mmap_unlock();
fd6ce8f6
FB
1201}
1202
9fa3e853
FB
1203/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204 tb[1].tc_ptr. Return NULL if not found */
1205TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1206{
9fa3e853
FB
1207 int m_min, m_max, m;
1208 unsigned long v;
1209 TranslationBlock *tb;
a513fe19
FB
1210
1211 if (nb_tbs <= 0)
1212 return NULL;
1213 if (tc_ptr < (unsigned long)code_gen_buffer ||
1214 tc_ptr >= (unsigned long)code_gen_ptr)
1215 return NULL;
1216 /* binary search (cf Knuth) */
1217 m_min = 0;
1218 m_max = nb_tbs - 1;
1219 while (m_min <= m_max) {
1220 m = (m_min + m_max) >> 1;
1221 tb = &tbs[m];
1222 v = (unsigned long)tb->tc_ptr;
1223 if (v == tc_ptr)
1224 return tb;
1225 else if (tc_ptr < v) {
1226 m_max = m - 1;
1227 } else {
1228 m_min = m + 1;
1229 }
5fafdf24 1230 }
a513fe19
FB
1231 return &tbs[m_max];
1232}
7501267e 1233
ea041c0e
FB
1234static void tb_reset_jump_recursive(TranslationBlock *tb);
1235
1236static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1237{
1238 TranslationBlock *tb1, *tb_next, **ptb;
1239 unsigned int n1;
1240
1241 tb1 = tb->jmp_next[n];
1242 if (tb1 != NULL) {
1243 /* find head of list */
1244 for(;;) {
1245 n1 = (long)tb1 & 3;
1246 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1247 if (n1 == 2)
1248 break;
1249 tb1 = tb1->jmp_next[n1];
1250 }
1251 /* we are now sure now that tb jumps to tb1 */
1252 tb_next = tb1;
1253
1254 /* remove tb from the jmp_first list */
1255 ptb = &tb_next->jmp_first;
1256 for(;;) {
1257 tb1 = *ptb;
1258 n1 = (long)tb1 & 3;
1259 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260 if (n1 == n && tb1 == tb)
1261 break;
1262 ptb = &tb1->jmp_next[n1];
1263 }
1264 *ptb = tb->jmp_next[n];
1265 tb->jmp_next[n] = NULL;
3b46e624 1266
ea041c0e
FB
1267 /* suppress the jump to next tb in generated code */
1268 tb_reset_jump(tb, n);
1269
0124311e 1270 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1271 tb_reset_jump_recursive(tb_next);
1272 }
1273}
1274
1275static void tb_reset_jump_recursive(TranslationBlock *tb)
1276{
1277 tb_reset_jump_recursive2(tb, 0);
1278 tb_reset_jump_recursive2(tb, 1);
1279}
1280
1fddef4b 1281#if defined(TARGET_HAS_ICE)
d720b93d
FB
1282static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283{
9b3c35e0
JM
1284 target_phys_addr_t addr;
1285 target_ulong pd;
c2f07f81
PB
1286 ram_addr_t ram_addr;
1287 PhysPageDesc *p;
d720b93d 1288
c2f07f81
PB
1289 addr = cpu_get_phys_page_debug(env, pc);
1290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p) {
1292 pd = IO_MEM_UNASSIGNED;
1293 } else {
1294 pd = p->phys_offset;
1295 }
1296 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1297 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1298}
c27004ec 1299#endif
d720b93d 1300
6658ffb8 1301/* Add a watchpoint. */
0f459d16 1302int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
6658ffb8
PB
1303{
1304 int i;
1305
1306 for (i = 0; i < env->nb_watchpoints; i++) {
1307 if (addr == env->watchpoint[i].vaddr)
1308 return 0;
1309 }
1310 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1311 return -1;
1312
1313 i = env->nb_watchpoints++;
1314 env->watchpoint[i].vaddr = addr;
0f459d16 1315 env->watchpoint[i].type = type;
6658ffb8
PB
1316 tlb_flush_page(env, addr);
1317 /* FIXME: This flush is needed because of the hack to make memory ops
1318 terminate the TB. It can be removed once the proper IO trap and
1319 re-execute bits are in. */
1320 tb_flush(env);
1321 return i;
1322}
1323
1324/* Remove a watchpoint. */
1325int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1326{
1327 int i;
1328
1329 for (i = 0; i < env->nb_watchpoints; i++) {
1330 if (addr == env->watchpoint[i].vaddr) {
1331 env->nb_watchpoints--;
1332 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1333 tlb_flush_page(env, addr);
1334 return 0;
1335 }
1336 }
1337 return -1;
1338}
1339
7d03f82f
EI
1340/* Remove all watchpoints. */
1341void cpu_watchpoint_remove_all(CPUState *env) {
1342 int i;
1343
1344 for (i = 0; i < env->nb_watchpoints; i++) {
1345 tlb_flush_page(env, env->watchpoint[i].vaddr);
1346 }
1347 env->nb_watchpoints = 0;
1348}
1349
c33a346e
FB
1350/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1351 breakpoint is reached */
2e12669a 1352int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1353{
1fddef4b 1354#if defined(TARGET_HAS_ICE)
4c3a88a2 1355 int i;
3b46e624 1356
4c3a88a2
FB
1357 for(i = 0; i < env->nb_breakpoints; i++) {
1358 if (env->breakpoints[i] == pc)
1359 return 0;
1360 }
1361
1362 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1363 return -1;
1364 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1365
d720b93d 1366 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1367 return 0;
1368#else
1369 return -1;
1370#endif
1371}
1372
7d03f82f
EI
1373/* remove all breakpoints */
1374void cpu_breakpoint_remove_all(CPUState *env) {
1375#if defined(TARGET_HAS_ICE)
1376 int i;
1377 for(i = 0; i < env->nb_breakpoints; i++) {
1378 breakpoint_invalidate(env, env->breakpoints[i]);
1379 }
1380 env->nb_breakpoints = 0;
1381#endif
1382}
1383
4c3a88a2 1384/* remove a breakpoint */
2e12669a 1385int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1386{
1fddef4b 1387#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1388 int i;
1389 for(i = 0; i < env->nb_breakpoints; i++) {
1390 if (env->breakpoints[i] == pc)
1391 goto found;
1392 }
1393 return -1;
1394 found:
4c3a88a2 1395 env->nb_breakpoints--;
1fddef4b
FB
1396 if (i < env->nb_breakpoints)
1397 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1398
1399 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1400 return 0;
1401#else
1402 return -1;
1403#endif
1404}
1405
c33a346e
FB
1406/* enable or disable single step mode. EXCP_DEBUG is returned by the
1407 CPU loop after each instruction */
1408void cpu_single_step(CPUState *env, int enabled)
1409{
1fddef4b 1410#if defined(TARGET_HAS_ICE)
c33a346e
FB
1411 if (env->singlestep_enabled != enabled) {
1412 env->singlestep_enabled = enabled;
1413 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1414 /* XXX: only flush what is necessary */
0124311e 1415 tb_flush(env);
c33a346e
FB
1416 }
1417#endif
1418}
1419
34865134
FB
1420/* enable or disable low levels log */
1421void cpu_set_log(int log_flags)
1422{
1423 loglevel = log_flags;
1424 if (loglevel && !logfile) {
11fcfab4 1425 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1426 if (!logfile) {
1427 perror(logfilename);
1428 _exit(1);
1429 }
9fa3e853
FB
1430#if !defined(CONFIG_SOFTMMU)
1431 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1432 {
b55266b5 1433 static char logfile_buf[4096];
9fa3e853
FB
1434 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1435 }
1436#else
34865134 1437 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1438#endif
e735b91c
PB
1439 log_append = 1;
1440 }
1441 if (!loglevel && logfile) {
1442 fclose(logfile);
1443 logfile = NULL;
34865134
FB
1444 }
1445}
1446
1447void cpu_set_log_filename(const char *filename)
1448{
1449 logfilename = strdup(filename);
e735b91c
PB
1450 if (logfile) {
1451 fclose(logfile);
1452 logfile = NULL;
1453 }
1454 cpu_set_log(loglevel);
34865134 1455}
c33a346e 1456
0124311e 1457/* mask must never be zero, except for A20 change call */
68a79315 1458void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1459{
d5975363 1460#if !defined(USE_NPTL)
ea041c0e 1461 TranslationBlock *tb;
15a51156 1462 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1463#endif
2e70f6ef 1464 int old_mask;
59817ccb 1465
2e70f6ef 1466 old_mask = env->interrupt_request;
d5975363 1467 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1468 be in the middle of a read-modify-write operation. */
68a79315 1469 env->interrupt_request |= mask;
d5975363
PB
1470#if defined(USE_NPTL)
1471 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1472 problem and hope the cpu will stop of its own accord. For userspace
1473 emulation this often isn't actually as bad as it sounds. Often
1474 signals are used primarily to interrupt blocking syscalls. */
1475#else
2e70f6ef 1476 if (use_icount) {
266910c4 1477 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1478#ifndef CONFIG_USER_ONLY
1479 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1480 an async event happened and we need to process it. */
1481 if (!can_do_io(env)
1482 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1483 cpu_abort(env, "Raised interrupt while not in I/O function");
1484 }
1485#endif
1486 } else {
1487 tb = env->current_tb;
1488 /* if the cpu is currently executing code, we must unlink it and
1489 all the potentially executing TB */
1490 if (tb && !testandset(&interrupt_lock)) {
1491 env->current_tb = NULL;
1492 tb_reset_jump_recursive(tb);
1493 resetlock(&interrupt_lock);
1494 }
ea041c0e 1495 }
d5975363 1496#endif
ea041c0e
FB
1497}
1498
b54ad049
FB
1499void cpu_reset_interrupt(CPUState *env, int mask)
1500{
1501 env->interrupt_request &= ~mask;
1502}
1503
c7cd6a37 1504const CPULogItem cpu_log_items[] = {
5fafdf24 1505 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1506 "show generated host assembly code for each compiled TB" },
1507 { CPU_LOG_TB_IN_ASM, "in_asm",
1508 "show target assembly code for each compiled TB" },
5fafdf24 1509 { CPU_LOG_TB_OP, "op",
57fec1fe 1510 "show micro ops for each compiled TB" },
f193c797 1511 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1512 "show micro ops "
1513#ifdef TARGET_I386
1514 "before eflags optimization and "
f193c797 1515#endif
e01a1157 1516 "after liveness analysis" },
f193c797
FB
1517 { CPU_LOG_INT, "int",
1518 "show interrupts/exceptions in short format" },
1519 { CPU_LOG_EXEC, "exec",
1520 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1521 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1522 "show CPU state before block translation" },
f193c797
FB
1523#ifdef TARGET_I386
1524 { CPU_LOG_PCALL, "pcall",
1525 "show protected mode far calls/returns/exceptions" },
1526#endif
8e3a9fd2 1527#ifdef DEBUG_IOPORT
fd872598
FB
1528 { CPU_LOG_IOPORT, "ioport",
1529 "show all i/o ports accesses" },
8e3a9fd2 1530#endif
f193c797
FB
1531 { 0, NULL, NULL },
1532};
1533
1534static int cmp1(const char *s1, int n, const char *s2)
1535{
1536 if (strlen(s2) != n)
1537 return 0;
1538 return memcmp(s1, s2, n) == 0;
1539}
3b46e624 1540
f193c797
FB
1541/* takes a comma separated list of log masks. Return 0 if error. */
1542int cpu_str_to_log_mask(const char *str)
1543{
c7cd6a37 1544 const CPULogItem *item;
f193c797
FB
1545 int mask;
1546 const char *p, *p1;
1547
1548 p = str;
1549 mask = 0;
1550 for(;;) {
1551 p1 = strchr(p, ',');
1552 if (!p1)
1553 p1 = p + strlen(p);
8e3a9fd2
FB
1554 if(cmp1(p,p1-p,"all")) {
1555 for(item = cpu_log_items; item->mask != 0; item++) {
1556 mask |= item->mask;
1557 }
1558 } else {
f193c797
FB
1559 for(item = cpu_log_items; item->mask != 0; item++) {
1560 if (cmp1(p, p1 - p, item->name))
1561 goto found;
1562 }
1563 return 0;
8e3a9fd2 1564 }
f193c797
FB
1565 found:
1566 mask |= item->mask;
1567 if (*p1 != ',')
1568 break;
1569 p = p1 + 1;
1570 }
1571 return mask;
1572}
ea041c0e 1573
7501267e
FB
1574void cpu_abort(CPUState *env, const char *fmt, ...)
1575{
1576 va_list ap;
493ae1f0 1577 va_list ap2;
7501267e
FB
1578
1579 va_start(ap, fmt);
493ae1f0 1580 va_copy(ap2, ap);
7501267e
FB
1581 fprintf(stderr, "qemu: fatal: ");
1582 vfprintf(stderr, fmt, ap);
1583 fprintf(stderr, "\n");
1584#ifdef TARGET_I386
7fe48483
FB
1585 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1586#else
1587 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1588#endif
924edcae 1589 if (logfile) {
f9373291 1590 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1591 vfprintf(logfile, fmt, ap2);
f9373291
JM
1592 fprintf(logfile, "\n");
1593#ifdef TARGET_I386
1594 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1595#else
1596 cpu_dump_state(env, logfile, fprintf, 0);
1597#endif
924edcae
AZ
1598 fflush(logfile);
1599 fclose(logfile);
1600 }
493ae1f0 1601 va_end(ap2);
f9373291 1602 va_end(ap);
7501267e
FB
1603 abort();
1604}
1605
c5be9f08
TS
1606CPUState *cpu_copy(CPUState *env)
1607{
01ba9816 1608 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1609 /* preserve chaining and index */
1610 CPUState *next_cpu = new_env->next_cpu;
1611 int cpu_index = new_env->cpu_index;
1612 memcpy(new_env, env, sizeof(CPUState));
1613 new_env->next_cpu = next_cpu;
1614 new_env->cpu_index = cpu_index;
1615 return new_env;
1616}
1617
0124311e
FB
1618#if !defined(CONFIG_USER_ONLY)
1619
5c751e99
EI
1620static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1621{
1622 unsigned int i;
1623
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1627 memset (&env->tb_jmp_cache[i], 0,
1628 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1629
1630 i = tb_jmp_cache_hash_page(addr);
1631 memset (&env->tb_jmp_cache[i], 0,
1632 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1633}
1634
ee8b7021
FB
1635/* NOTE: if flush_global is true, also flush global entries (not
1636 implemented yet) */
1637void tlb_flush(CPUState *env, int flush_global)
33417e70 1638{
33417e70 1639 int i;
0124311e 1640
9fa3e853
FB
1641#if defined(DEBUG_TLB)
1642 printf("tlb_flush:\n");
1643#endif
0124311e
FB
1644 /* must reset current TB so that interrupts cannot modify the
1645 links while we are modifying them */
1646 env->current_tb = NULL;
1647
33417e70 1648 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1649 env->tlb_table[0][i].addr_read = -1;
1650 env->tlb_table[0][i].addr_write = -1;
1651 env->tlb_table[0][i].addr_code = -1;
1652 env->tlb_table[1][i].addr_read = -1;
1653 env->tlb_table[1][i].addr_write = -1;
1654 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1655#if (NB_MMU_MODES >= 3)
1656 env->tlb_table[2][i].addr_read = -1;
1657 env->tlb_table[2][i].addr_write = -1;
1658 env->tlb_table[2][i].addr_code = -1;
1659#if (NB_MMU_MODES == 4)
1660 env->tlb_table[3][i].addr_read = -1;
1661 env->tlb_table[3][i].addr_write = -1;
1662 env->tlb_table[3][i].addr_code = -1;
1663#endif
1664#endif
33417e70 1665 }
9fa3e853 1666
8a40a180 1667 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1668
0a962c02
FB
1669#ifdef USE_KQEMU
1670 if (env->kqemu_enabled) {
1671 kqemu_flush(env, flush_global);
1672 }
9fa3e853 1673#endif
e3db7226 1674 tlb_flush_count++;
33417e70
FB
1675}
1676
274da6b2 1677static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1678{
5fafdf24 1679 if (addr == (tlb_entry->addr_read &
84b7b8e7 1680 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1681 addr == (tlb_entry->addr_write &
84b7b8e7 1682 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1683 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1684 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1685 tlb_entry->addr_read = -1;
1686 tlb_entry->addr_write = -1;
1687 tlb_entry->addr_code = -1;
1688 }
61382a50
FB
1689}
1690
2e12669a 1691void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1692{
8a40a180 1693 int i;
0124311e 1694
9fa3e853 1695#if defined(DEBUG_TLB)
108c49b8 1696 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1697#endif
0124311e
FB
1698 /* must reset current TB so that interrupts cannot modify the
1699 links while we are modifying them */
1700 env->current_tb = NULL;
61382a50
FB
1701
1702 addr &= TARGET_PAGE_MASK;
1703 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1704 tlb_flush_entry(&env->tlb_table[0][i], addr);
1705 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1706#if (NB_MMU_MODES >= 3)
1707 tlb_flush_entry(&env->tlb_table[2][i], addr);
1708#if (NB_MMU_MODES == 4)
1709 tlb_flush_entry(&env->tlb_table[3][i], addr);
1710#endif
1711#endif
0124311e 1712
5c751e99 1713 tlb_flush_jmp_cache(env, addr);
9fa3e853 1714
0a962c02
FB
1715#ifdef USE_KQEMU
1716 if (env->kqemu_enabled) {
1717 kqemu_flush_page(env, addr);
1718 }
1719#endif
9fa3e853
FB
1720}
1721
9fa3e853
FB
1722/* update the TLBs so that writes to code in the virtual page 'addr'
1723 can be detected */
6a00d601 1724static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1725{
5fafdf24 1726 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1727 ram_addr + TARGET_PAGE_SIZE,
1728 CODE_DIRTY_FLAG);
9fa3e853
FB
1729}
1730
9fa3e853 1731/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1732 tested for self modifying code */
5fafdf24 1733static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1734 target_ulong vaddr)
9fa3e853 1735{
3a7d929e 1736 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1737}
1738
5fafdf24 1739static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1740 unsigned long start, unsigned long length)
1741{
1742 unsigned long addr;
84b7b8e7
FB
1743 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1744 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1745 if ((addr - start) < length) {
0f459d16 1746 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1747 }
1748 }
1749}
1750
3a7d929e 1751void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1752 int dirty_flags)
1ccde1cb
FB
1753{
1754 CPUState *env;
4f2ac237 1755 unsigned long length, start1;
0a962c02
FB
1756 int i, mask, len;
1757 uint8_t *p;
1ccde1cb
FB
1758
1759 start &= TARGET_PAGE_MASK;
1760 end = TARGET_PAGE_ALIGN(end);
1761
1762 length = end - start;
1763 if (length == 0)
1764 return;
0a962c02 1765 len = length >> TARGET_PAGE_BITS;
3a7d929e 1766#ifdef USE_KQEMU
6a00d601
FB
1767 /* XXX: should not depend on cpu context */
1768 env = first_cpu;
3a7d929e 1769 if (env->kqemu_enabled) {
f23db169
FB
1770 ram_addr_t addr;
1771 addr = start;
1772 for(i = 0; i < len; i++) {
1773 kqemu_set_notdirty(env, addr);
1774 addr += TARGET_PAGE_SIZE;
1775 }
3a7d929e
FB
1776 }
1777#endif
f23db169
FB
1778 mask = ~dirty_flags;
1779 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1780 for(i = 0; i < len; i++)
1781 p[i] &= mask;
1782
1ccde1cb
FB
1783 /* we modify the TLB cache so that the dirty bit will be set again
1784 when accessing the range */
59817ccb 1785 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1787 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1788 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1789 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1790 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1791#if (NB_MMU_MODES >= 3)
1792 for(i = 0; i < CPU_TLB_SIZE; i++)
1793 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1794#if (NB_MMU_MODES == 4)
1795 for(i = 0; i < CPU_TLB_SIZE; i++)
1796 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1797#endif
1798#endif
6a00d601 1799 }
1ccde1cb
FB
1800}
1801
74576198
AL
1802int cpu_physical_memory_set_dirty_tracking(int enable)
1803{
1804 in_migration = enable;
1805 return 0;
1806}
1807
1808int cpu_physical_memory_get_dirty_tracking(void)
1809{
1810 return in_migration;
1811}
1812
3a7d929e
FB
1813static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1814{
1815 ram_addr_t ram_addr;
1816
84b7b8e7 1817 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1818 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1819 tlb_entry->addend - (unsigned long)phys_ram_base;
1820 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1821 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1822 }
1823 }
1824}
1825
1826/* update the TLB according to the current state of the dirty bits */
1827void cpu_tlb_update_dirty(CPUState *env)
1828{
1829 int i;
1830 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1831 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1832 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1833 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1834#if (NB_MMU_MODES >= 3)
1835 for(i = 0; i < CPU_TLB_SIZE; i++)
1836 tlb_update_dirty(&env->tlb_table[2][i]);
1837#if (NB_MMU_MODES == 4)
1838 for(i = 0; i < CPU_TLB_SIZE; i++)
1839 tlb_update_dirty(&env->tlb_table[3][i]);
1840#endif
1841#endif
3a7d929e
FB
1842}
1843
0f459d16 1844static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1845{
0f459d16
PB
1846 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1847 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1848}
1849
0f459d16
PB
1850/* update the TLB corresponding to virtual page vaddr
1851 so that it is no longer dirty */
1852static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1853{
1ccde1cb
FB
1854 int i;
1855
0f459d16 1856 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1857 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1858 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1859 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1860#if (NB_MMU_MODES >= 3)
0f459d16 1861 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1862#if (NB_MMU_MODES == 4)
0f459d16 1863 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1864#endif
1865#endif
9fa3e853
FB
1866}
1867
59817ccb
FB
1868/* add a new TLB entry. At most one entry for a given virtual address
1869 is permitted. Return 0 if OK or 2 if the page could not be mapped
1870 (can only happen in non SOFTMMU mode for I/O pages or pages
1871 conflicting with the host address space). */
5fafdf24
TS
1872int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1873 target_phys_addr_t paddr, int prot,
6ebbf390 1874 int mmu_idx, int is_softmmu)
9fa3e853 1875{
92e873b9 1876 PhysPageDesc *p;
4f2ac237 1877 unsigned long pd;
9fa3e853 1878 unsigned int index;
4f2ac237 1879 target_ulong address;
0f459d16 1880 target_ulong code_address;
108c49b8 1881 target_phys_addr_t addend;
9fa3e853 1882 int ret;
84b7b8e7 1883 CPUTLBEntry *te;
6658ffb8 1884 int i;
0f459d16 1885 target_phys_addr_t iotlb;
9fa3e853 1886
92e873b9 1887 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1888 if (!p) {
1889 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1890 } else {
1891 pd = p->phys_offset;
9fa3e853
FB
1892 }
1893#if defined(DEBUG_TLB)
6ebbf390
JM
1894 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1895 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1896#endif
1897
1898 ret = 0;
0f459d16
PB
1899 address = vaddr;
1900 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1901 /* IO memory case (romd handled later) */
1902 address |= TLB_MMIO;
1903 }
1904 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1905 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1906 /* Normal RAM. */
1907 iotlb = pd & TARGET_PAGE_MASK;
1908 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1909 iotlb |= IO_MEM_NOTDIRTY;
1910 else
1911 iotlb |= IO_MEM_ROM;
1912 } else {
1913 /* IO handlers are currently passed a phsical address.
1914 It would be nice to pass an offset from the base address
1915 of that region. This would avoid having to special case RAM,
1916 and avoid full address decoding in every device.
1917 We can't use the high bits of pd for this because
1918 IO_MEM_ROMD uses these as a ram address. */
1919 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1920 }
1921
1922 code_address = address;
1923 /* Make accesses to pages with watchpoints go via the
1924 watchpoint trap routines. */
1925 for (i = 0; i < env->nb_watchpoints; i++) {
1926 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1927 iotlb = io_mem_watch + paddr;
1928 /* TODO: The memory case can be optimized by not trapping
1929 reads of pages with a write breakpoint. */
1930 address |= TLB_MMIO;
6658ffb8 1931 }
0f459d16 1932 }
d79acba4 1933
0f459d16
PB
1934 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1936 te = &env->tlb_table[mmu_idx][index];
1937 te->addend = addend - vaddr;
1938 if (prot & PAGE_READ) {
1939 te->addr_read = address;
1940 } else {
1941 te->addr_read = -1;
1942 }
5c751e99 1943
0f459d16
PB
1944 if (prot & PAGE_EXEC) {
1945 te->addr_code = code_address;
1946 } else {
1947 te->addr_code = -1;
1948 }
1949 if (prot & PAGE_WRITE) {
1950 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1951 (pd & IO_MEM_ROMD)) {
1952 /* Write access calls the I/O callback. */
1953 te->addr_write = address | TLB_MMIO;
1954 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1955 !cpu_physical_memory_is_dirty(pd)) {
1956 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 1957 } else {
0f459d16 1958 te->addr_write = address;
9fa3e853 1959 }
0f459d16
PB
1960 } else {
1961 te->addr_write = -1;
9fa3e853 1962 }
9fa3e853
FB
1963 return ret;
1964}
1965
0124311e
FB
1966#else
1967
ee8b7021 1968void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1969{
1970}
1971
2e12669a 1972void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1973{
1974}
1975
5fafdf24
TS
1976int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1977 target_phys_addr_t paddr, int prot,
6ebbf390 1978 int mmu_idx, int is_softmmu)
9fa3e853
FB
1979{
1980 return 0;
1981}
0124311e 1982
9fa3e853
FB
1983/* dump memory mappings */
1984void page_dump(FILE *f)
33417e70 1985{
9fa3e853
FB
1986 unsigned long start, end;
1987 int i, j, prot, prot1;
1988 PageDesc *p;
33417e70 1989
9fa3e853
FB
1990 fprintf(f, "%-8s %-8s %-8s %s\n",
1991 "start", "end", "size", "prot");
1992 start = -1;
1993 end = -1;
1994 prot = 0;
1995 for(i = 0; i <= L1_SIZE; i++) {
1996 if (i < L1_SIZE)
1997 p = l1_map[i];
1998 else
1999 p = NULL;
2000 for(j = 0;j < L2_SIZE; j++) {
2001 if (!p)
2002 prot1 = 0;
2003 else
2004 prot1 = p[j].flags;
2005 if (prot1 != prot) {
2006 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2007 if (start != -1) {
2008 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2009 start, end, end - start,
9fa3e853
FB
2010 prot & PAGE_READ ? 'r' : '-',
2011 prot & PAGE_WRITE ? 'w' : '-',
2012 prot & PAGE_EXEC ? 'x' : '-');
2013 }
2014 if (prot1 != 0)
2015 start = end;
2016 else
2017 start = -1;
2018 prot = prot1;
2019 }
2020 if (!p)
2021 break;
2022 }
33417e70 2023 }
33417e70
FB
2024}
2025
53a5960a 2026int page_get_flags(target_ulong address)
33417e70 2027{
9fa3e853
FB
2028 PageDesc *p;
2029
2030 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2031 if (!p)
9fa3e853
FB
2032 return 0;
2033 return p->flags;
2034}
2035
2036/* modify the flags of a page and invalidate the code if
2037 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2038 depending on PAGE_WRITE */
53a5960a 2039void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2040{
2041 PageDesc *p;
53a5960a 2042 target_ulong addr;
9fa3e853 2043
c8a706fe 2044 /* mmap_lock should already be held. */
9fa3e853
FB
2045 start = start & TARGET_PAGE_MASK;
2046 end = TARGET_PAGE_ALIGN(end);
2047 if (flags & PAGE_WRITE)
2048 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2049 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2050 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2051 /* We may be called for host regions that are outside guest
2052 address space. */
2053 if (!p)
2054 return;
9fa3e853
FB
2055 /* if the write protection is set, then we invalidate the code
2056 inside */
5fafdf24 2057 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2058 (flags & PAGE_WRITE) &&
2059 p->first_tb) {
d720b93d 2060 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2061 }
2062 p->flags = flags;
2063 }
33417e70
FB
2064}
2065
3d97b40b
TS
2066int page_check_range(target_ulong start, target_ulong len, int flags)
2067{
2068 PageDesc *p;
2069 target_ulong end;
2070 target_ulong addr;
2071
55f280c9
AZ
2072 if (start + len < start)
2073 /* we've wrapped around */
2074 return -1;
2075
3d97b40b
TS
2076 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2077 start = start & TARGET_PAGE_MASK;
2078
3d97b40b
TS
2079 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2080 p = page_find(addr >> TARGET_PAGE_BITS);
2081 if( !p )
2082 return -1;
2083 if( !(p->flags & PAGE_VALID) )
2084 return -1;
2085
dae3270c 2086 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2087 return -1;
dae3270c
FB
2088 if (flags & PAGE_WRITE) {
2089 if (!(p->flags & PAGE_WRITE_ORG))
2090 return -1;
2091 /* unprotect the page if it was put read-only because it
2092 contains translated code */
2093 if (!(p->flags & PAGE_WRITE)) {
2094 if (!page_unprotect(addr, 0, NULL))
2095 return -1;
2096 }
2097 return 0;
2098 }
3d97b40b
TS
2099 }
2100 return 0;
2101}
2102
9fa3e853
FB
2103/* called from signal handler: invalidate the code and unprotect the
2104 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2105int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2106{
2107 unsigned int page_index, prot, pindex;
2108 PageDesc *p, *p1;
53a5960a 2109 target_ulong host_start, host_end, addr;
9fa3e853 2110
c8a706fe
PB
2111 /* Technically this isn't safe inside a signal handler. However we
2112 know this only ever happens in a synchronous SEGV handler, so in
2113 practice it seems to be ok. */
2114 mmap_lock();
2115
83fb7adf 2116 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2117 page_index = host_start >> TARGET_PAGE_BITS;
2118 p1 = page_find(page_index);
c8a706fe
PB
2119 if (!p1) {
2120 mmap_unlock();
9fa3e853 2121 return 0;
c8a706fe 2122 }
83fb7adf 2123 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2124 p = p1;
2125 prot = 0;
2126 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2127 prot |= p->flags;
2128 p++;
2129 }
2130 /* if the page was really writable, then we change its
2131 protection back to writable */
2132 if (prot & PAGE_WRITE_ORG) {
2133 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2134 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2135 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2136 (prot & PAGE_BITS) | PAGE_WRITE);
2137 p1[pindex].flags |= PAGE_WRITE;
2138 /* and since the content will be modified, we must invalidate
2139 the corresponding translated code. */
d720b93d 2140 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2141#ifdef DEBUG_TB_CHECK
2142 tb_invalidate_check(address);
2143#endif
c8a706fe 2144 mmap_unlock();
9fa3e853
FB
2145 return 1;
2146 }
2147 }
c8a706fe 2148 mmap_unlock();
9fa3e853
FB
2149 return 0;
2150}
2151
6a00d601
FB
2152static inline void tlb_set_dirty(CPUState *env,
2153 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2154{
2155}
9fa3e853
FB
2156#endif /* defined(CONFIG_USER_ONLY) */
2157
e2eef170 2158#if !defined(CONFIG_USER_ONLY)
db7b5426 2159static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2160 ram_addr_t memory);
2161static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2162 ram_addr_t orig_memory);
db7b5426
BS
2163#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2164 need_subpage) \
2165 do { \
2166 if (addr > start_addr) \
2167 start_addr2 = 0; \
2168 else { \
2169 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2170 if (start_addr2 > 0) \
2171 need_subpage = 1; \
2172 } \
2173 \
49e9fba2 2174 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2175 end_addr2 = TARGET_PAGE_SIZE - 1; \
2176 else { \
2177 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2178 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2179 need_subpage = 1; \
2180 } \
2181 } while (0)
2182
33417e70
FB
2183/* register physical memory. 'size' must be a multiple of the target
2184 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2185 io memory page */
5fafdf24 2186void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2187 ram_addr_t size,
2188 ram_addr_t phys_offset)
33417e70 2189{
108c49b8 2190 target_phys_addr_t addr, end_addr;
92e873b9 2191 PhysPageDesc *p;
9d42037b 2192 CPUState *env;
00f82b8a 2193 ram_addr_t orig_size = size;
db7b5426 2194 void *subpage;
33417e70 2195
da260249
FB
2196#ifdef USE_KQEMU
2197 /* XXX: should not depend on cpu context */
2198 env = first_cpu;
2199 if (env->kqemu_enabled) {
2200 kqemu_set_phys_mem(start_addr, size, phys_offset);
2201 }
2202#endif
7ba1e619
AL
2203 if (kvm_enabled())
2204 kvm_set_phys_mem(start_addr, size, phys_offset);
2205
5fd386f6 2206 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2207 end_addr = start_addr + (target_phys_addr_t)size;
2208 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2209 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2210 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2211 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2212 target_phys_addr_t start_addr2, end_addr2;
2213 int need_subpage = 0;
2214
2215 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2216 need_subpage);
4254fab8 2217 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2218 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2219 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2220 &p->phys_offset, orig_memory);
2221 } else {
2222 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2223 >> IO_MEM_SHIFT];
2224 }
2225 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2226 } else {
2227 p->phys_offset = phys_offset;
2228 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2229 (phys_offset & IO_MEM_ROMD))
2230 phys_offset += TARGET_PAGE_SIZE;
2231 }
2232 } else {
2233 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2234 p->phys_offset = phys_offset;
2235 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2236 (phys_offset & IO_MEM_ROMD))
2237 phys_offset += TARGET_PAGE_SIZE;
2238 else {
2239 target_phys_addr_t start_addr2, end_addr2;
2240 int need_subpage = 0;
2241
2242 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2243 end_addr2, need_subpage);
2244
4254fab8 2245 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2246 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2247 &p->phys_offset, IO_MEM_UNASSIGNED);
2248 subpage_register(subpage, start_addr2, end_addr2,
2249 phys_offset);
2250 }
2251 }
2252 }
33417e70 2253 }
3b46e624 2254
9d42037b
FB
2255 /* since each CPU stores ram addresses in its TLB cache, we must
2256 reset the modified entries */
2257 /* XXX: slow ! */
2258 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2259 tlb_flush(env, 1);
2260 }
33417e70
FB
2261}
2262
ba863458 2263/* XXX: temporary until new memory mapping API */
00f82b8a 2264ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2265{
2266 PhysPageDesc *p;
2267
2268 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2269 if (!p)
2270 return IO_MEM_UNASSIGNED;
2271 return p->phys_offset;
2272}
2273
e9a1ab19 2274/* XXX: better than nothing */
00f82b8a 2275ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2276{
2277 ram_addr_t addr;
7fb4fdcf 2278 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2279 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2280 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2281 abort();
2282 }
2283 addr = phys_ram_alloc_offset;
2284 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2285 return addr;
2286}
2287
2288void qemu_ram_free(ram_addr_t addr)
2289{
2290}
2291
a4193c8a 2292static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2293{
67d3b957 2294#ifdef DEBUG_UNASSIGNED
ab3d1727 2295 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2296#endif
e18231a3
BS
2297#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2298 do_unassigned_access(addr, 0, 0, 0, 1);
2299#endif
2300 return 0;
2301}
2302
2303static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2304{
2305#ifdef DEBUG_UNASSIGNED
2306 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2307#endif
2308#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2309 do_unassigned_access(addr, 0, 0, 0, 2);
2310#endif
2311 return 0;
2312}
2313
2314static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2315{
2316#ifdef DEBUG_UNASSIGNED
2317 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2318#endif
2319#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2320 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2321#endif
33417e70
FB
2322 return 0;
2323}
2324
a4193c8a 2325static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2326{
67d3b957 2327#ifdef DEBUG_UNASSIGNED
ab3d1727 2328 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2329#endif
e18231a3
BS
2330#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2331 do_unassigned_access(addr, 1, 0, 0, 1);
2332#endif
2333}
2334
2335static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2336{
2337#ifdef DEBUG_UNASSIGNED
2338 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2339#endif
2340#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2341 do_unassigned_access(addr, 1, 0, 0, 2);
2342#endif
2343}
2344
2345static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2346{
2347#ifdef DEBUG_UNASSIGNED
2348 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2349#endif
2350#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2352#endif
33417e70
FB
2353}
2354
2355static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2356 unassigned_mem_readb,
e18231a3
BS
2357 unassigned_mem_readw,
2358 unassigned_mem_readl,
33417e70
FB
2359};
2360
2361static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2362 unassigned_mem_writeb,
e18231a3
BS
2363 unassigned_mem_writew,
2364 unassigned_mem_writel,
33417e70
FB
2365};
2366
0f459d16
PB
2367static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2368 uint32_t val)
9fa3e853 2369{
3a7d929e 2370 int dirty_flags;
3a7d929e
FB
2371 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2372 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2373#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2374 tb_invalidate_phys_page_fast(ram_addr, 1);
2375 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2376#endif
3a7d929e 2377 }
0f459d16 2378 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2379#ifdef USE_KQEMU
2380 if (cpu_single_env->kqemu_enabled &&
2381 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2382 kqemu_modify_page(cpu_single_env, ram_addr);
2383#endif
f23db169
FB
2384 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2385 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2386 /* we remove the notdirty callback only if the code has been
2387 flushed */
2388 if (dirty_flags == 0xff)
2e70f6ef 2389 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2390}
2391
0f459d16
PB
2392static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2393 uint32_t val)
9fa3e853 2394{
3a7d929e 2395 int dirty_flags;
3a7d929e
FB
2396 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2397 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2398#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2399 tb_invalidate_phys_page_fast(ram_addr, 2);
2400 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2401#endif
3a7d929e 2402 }
0f459d16 2403 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2404#ifdef USE_KQEMU
2405 if (cpu_single_env->kqemu_enabled &&
2406 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2407 kqemu_modify_page(cpu_single_env, ram_addr);
2408#endif
f23db169
FB
2409 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2410 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2411 /* we remove the notdirty callback only if the code has been
2412 flushed */
2413 if (dirty_flags == 0xff)
2e70f6ef 2414 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2415}
2416
0f459d16
PB
2417static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2418 uint32_t val)
9fa3e853 2419{
3a7d929e 2420 int dirty_flags;
3a7d929e
FB
2421 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2422 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2423#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2424 tb_invalidate_phys_page_fast(ram_addr, 4);
2425 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2426#endif
3a7d929e 2427 }
0f459d16 2428 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2429#ifdef USE_KQEMU
2430 if (cpu_single_env->kqemu_enabled &&
2431 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2432 kqemu_modify_page(cpu_single_env, ram_addr);
2433#endif
f23db169
FB
2434 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2435 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2436 /* we remove the notdirty callback only if the code has been
2437 flushed */
2438 if (dirty_flags == 0xff)
2e70f6ef 2439 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2440}
2441
3a7d929e 2442static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2443 NULL, /* never used */
2444 NULL, /* never used */
2445 NULL, /* never used */
2446};
2447
1ccde1cb
FB
2448static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2449 notdirty_mem_writeb,
2450 notdirty_mem_writew,
2451 notdirty_mem_writel,
2452};
2453
0f459d16
PB
2454/* Generate a debug exception if a watchpoint has been hit. */
2455static void check_watchpoint(int offset, int flags)
2456{
2457 CPUState *env = cpu_single_env;
2458 target_ulong vaddr;
2459 int i;
2460
2e70f6ef 2461 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
0f459d16
PB
2462 for (i = 0; i < env->nb_watchpoints; i++) {
2463 if (vaddr == env->watchpoint[i].vaddr
2464 && (env->watchpoint[i].type & flags)) {
2465 env->watchpoint_hit = i + 1;
2466 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2467 break;
2468 }
2469 }
2470}
2471
6658ffb8
PB
2472/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2473 so these check for a hit then pass through to the normal out-of-line
2474 phys routines. */
2475static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2476{
0f459d16 2477 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2478 return ldub_phys(addr);
2479}
2480
2481static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2482{
0f459d16 2483 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2484 return lduw_phys(addr);
2485}
2486
2487static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2488{
0f459d16 2489 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
6658ffb8
PB
2490 return ldl_phys(addr);
2491}
2492
6658ffb8
PB
2493static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2494 uint32_t val)
2495{
0f459d16 2496 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2497 stb_phys(addr, val);
2498}
2499
2500static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2501 uint32_t val)
2502{
0f459d16 2503 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2504 stw_phys(addr, val);
2505}
2506
2507static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2508 uint32_t val)
2509{
0f459d16 2510 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
6658ffb8
PB
2511 stl_phys(addr, val);
2512}
2513
2514static CPUReadMemoryFunc *watch_mem_read[3] = {
2515 watch_mem_readb,
2516 watch_mem_readw,
2517 watch_mem_readl,
2518};
2519
2520static CPUWriteMemoryFunc *watch_mem_write[3] = {
2521 watch_mem_writeb,
2522 watch_mem_writew,
2523 watch_mem_writel,
2524};
6658ffb8 2525
db7b5426
BS
2526static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2527 unsigned int len)
2528{
db7b5426
BS
2529 uint32_t ret;
2530 unsigned int idx;
2531
2532 idx = SUBPAGE_IDX(addr - mmio->base);
2533#if defined(DEBUG_SUBPAGE)
2534 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2535 mmio, len, addr, idx);
2536#endif
3ee89922 2537 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2538
2539 return ret;
2540}
2541
2542static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2543 uint32_t value, unsigned int len)
2544{
db7b5426
BS
2545 unsigned int idx;
2546
2547 idx = SUBPAGE_IDX(addr - mmio->base);
2548#if defined(DEBUG_SUBPAGE)
2549 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2550 mmio, len, addr, idx, value);
2551#endif
3ee89922 2552 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2553}
2554
2555static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2556{
2557#if defined(DEBUG_SUBPAGE)
2558 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2559#endif
2560
2561 return subpage_readlen(opaque, addr, 0);
2562}
2563
2564static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2565 uint32_t value)
2566{
2567#if defined(DEBUG_SUBPAGE)
2568 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2569#endif
2570 subpage_writelen(opaque, addr, value, 0);
2571}
2572
2573static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2574{
2575#if defined(DEBUG_SUBPAGE)
2576 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2577#endif
2578
2579 return subpage_readlen(opaque, addr, 1);
2580}
2581
2582static void subpage_writew (void *opaque, target_phys_addr_t addr,
2583 uint32_t value)
2584{
2585#if defined(DEBUG_SUBPAGE)
2586 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2587#endif
2588 subpage_writelen(opaque, addr, value, 1);
2589}
2590
2591static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2592{
2593#if defined(DEBUG_SUBPAGE)
2594 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2595#endif
2596
2597 return subpage_readlen(opaque, addr, 2);
2598}
2599
2600static void subpage_writel (void *opaque,
2601 target_phys_addr_t addr, uint32_t value)
2602{
2603#if defined(DEBUG_SUBPAGE)
2604 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2605#endif
2606 subpage_writelen(opaque, addr, value, 2);
2607}
2608
2609static CPUReadMemoryFunc *subpage_read[] = {
2610 &subpage_readb,
2611 &subpage_readw,
2612 &subpage_readl,
2613};
2614
2615static CPUWriteMemoryFunc *subpage_write[] = {
2616 &subpage_writeb,
2617 &subpage_writew,
2618 &subpage_writel,
2619};
2620
2621static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2622 ram_addr_t memory)
db7b5426
BS
2623{
2624 int idx, eidx;
4254fab8 2625 unsigned int i;
db7b5426
BS
2626
2627 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2628 return -1;
2629 idx = SUBPAGE_IDX(start);
2630 eidx = SUBPAGE_IDX(end);
2631#if defined(DEBUG_SUBPAGE)
2632 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2633 mmio, start, end, idx, eidx, memory);
2634#endif
2635 memory >>= IO_MEM_SHIFT;
2636 for (; idx <= eidx; idx++) {
4254fab8 2637 for (i = 0; i < 4; i++) {
3ee89922
BS
2638 if (io_mem_read[memory][i]) {
2639 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2640 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2641 }
2642 if (io_mem_write[memory][i]) {
2643 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2644 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2645 }
4254fab8 2646 }
db7b5426
BS
2647 }
2648
2649 return 0;
2650}
2651
00f82b8a
AJ
2652static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2653 ram_addr_t orig_memory)
db7b5426
BS
2654{
2655 subpage_t *mmio;
2656 int subpage_memory;
2657
2658 mmio = qemu_mallocz(sizeof(subpage_t));
2659 if (mmio != NULL) {
2660 mmio->base = base;
2661 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2662#if defined(DEBUG_SUBPAGE)
2663 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2664 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2665#endif
2666 *phys = subpage_memory | IO_MEM_SUBPAGE;
2667 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2668 }
2669
2670 return mmio;
2671}
2672
33417e70
FB
2673static void io_mem_init(void)
2674{
3a7d929e 2675 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2676 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2677 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2678 io_mem_nb = 5;
2679
0f459d16 2680 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2681 watch_mem_write, NULL);
1ccde1cb 2682 /* alloc dirty bits array */
0a962c02 2683 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2684 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2685}
2686
2687/* mem_read and mem_write are arrays of functions containing the
2688 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2689 2). Functions can be omitted with a NULL function pointer. The
2690 registered functions may be modified dynamically later.
2691 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2692 modified. If it is zero, a new io zone is allocated. The return
2693 value can be used with cpu_register_physical_memory(). (-1) is
2694 returned if error. */
33417e70
FB
2695int cpu_register_io_memory(int io_index,
2696 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2697 CPUWriteMemoryFunc **mem_write,
2698 void *opaque)
33417e70 2699{
4254fab8 2700 int i, subwidth = 0;
33417e70
FB
2701
2702 if (io_index <= 0) {
b5ff1b31 2703 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2704 return -1;
2705 io_index = io_mem_nb++;
2706 } else {
2707 if (io_index >= IO_MEM_NB_ENTRIES)
2708 return -1;
2709 }
b5ff1b31 2710
33417e70 2711 for(i = 0;i < 3; i++) {
4254fab8
BS
2712 if (!mem_read[i] || !mem_write[i])
2713 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2714 io_mem_read[io_index][i] = mem_read[i];
2715 io_mem_write[io_index][i] = mem_write[i];
2716 }
a4193c8a 2717 io_mem_opaque[io_index] = opaque;
4254fab8 2718 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2719}
61382a50 2720
8926b517
FB
2721CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2722{
2723 return io_mem_write[io_index >> IO_MEM_SHIFT];
2724}
2725
2726CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2727{
2728 return io_mem_read[io_index >> IO_MEM_SHIFT];
2729}
2730
e2eef170
PB
2731#endif /* !defined(CONFIG_USER_ONLY) */
2732
13eb76e0
FB
2733/* physical memory access (slow version, mainly for debug) */
2734#if defined(CONFIG_USER_ONLY)
5fafdf24 2735void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2736 int len, int is_write)
2737{
2738 int l, flags;
2739 target_ulong page;
53a5960a 2740 void * p;
13eb76e0
FB
2741
2742 while (len > 0) {
2743 page = addr & TARGET_PAGE_MASK;
2744 l = (page + TARGET_PAGE_SIZE) - addr;
2745 if (l > len)
2746 l = len;
2747 flags = page_get_flags(page);
2748 if (!(flags & PAGE_VALID))
2749 return;
2750 if (is_write) {
2751 if (!(flags & PAGE_WRITE))
2752 return;
579a97f7 2753 /* XXX: this code should not depend on lock_user */
72fb7daa 2754 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2755 /* FIXME - should this return an error rather than just fail? */
2756 return;
72fb7daa
AJ
2757 memcpy(p, buf, l);
2758 unlock_user(p, addr, l);
13eb76e0
FB
2759 } else {
2760 if (!(flags & PAGE_READ))
2761 return;
579a97f7 2762 /* XXX: this code should not depend on lock_user */
72fb7daa 2763 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2764 /* FIXME - should this return an error rather than just fail? */
2765 return;
72fb7daa 2766 memcpy(buf, p, l);
5b257578 2767 unlock_user(p, addr, 0);
13eb76e0
FB
2768 }
2769 len -= l;
2770 buf += l;
2771 addr += l;
2772 }
2773}
8df1cd07 2774
13eb76e0 2775#else
5fafdf24 2776void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2777 int len, int is_write)
2778{
2779 int l, io_index;
2780 uint8_t *ptr;
2781 uint32_t val;
2e12669a
FB
2782 target_phys_addr_t page;
2783 unsigned long pd;
92e873b9 2784 PhysPageDesc *p;
3b46e624 2785
13eb76e0
FB
2786 while (len > 0) {
2787 page = addr & TARGET_PAGE_MASK;
2788 l = (page + TARGET_PAGE_SIZE) - addr;
2789 if (l > len)
2790 l = len;
92e873b9 2791 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2792 if (!p) {
2793 pd = IO_MEM_UNASSIGNED;
2794 } else {
2795 pd = p->phys_offset;
2796 }
3b46e624 2797
13eb76e0 2798 if (is_write) {
3a7d929e 2799 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2800 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2801 /* XXX: could force cpu_single_env to NULL to avoid
2802 potential bugs */
13eb76e0 2803 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2804 /* 32 bit write access */
c27004ec 2805 val = ldl_p(buf);
a4193c8a 2806 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2807 l = 4;
2808 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2809 /* 16 bit write access */
c27004ec 2810 val = lduw_p(buf);
a4193c8a 2811 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2812 l = 2;
2813 } else {
1c213d19 2814 /* 8 bit write access */
c27004ec 2815 val = ldub_p(buf);
a4193c8a 2816 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2817 l = 1;
2818 }
2819 } else {
b448f2f3
FB
2820 unsigned long addr1;
2821 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2822 /* RAM case */
b448f2f3 2823 ptr = phys_ram_base + addr1;
13eb76e0 2824 memcpy(ptr, buf, l);
3a7d929e
FB
2825 if (!cpu_physical_memory_is_dirty(addr1)) {
2826 /* invalidate code */
2827 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2828 /* set dirty bit */
5fafdf24 2829 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2830 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2831 }
13eb76e0
FB
2832 }
2833 } else {
5fafdf24 2834 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2835 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2836 /* I/O case */
2837 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2838 if (l >= 4 && ((addr & 3) == 0)) {
2839 /* 32 bit read access */
a4193c8a 2840 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2841 stl_p(buf, val);
13eb76e0
FB
2842 l = 4;
2843 } else if (l >= 2 && ((addr & 1) == 0)) {
2844 /* 16 bit read access */
a4193c8a 2845 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2846 stw_p(buf, val);
13eb76e0
FB
2847 l = 2;
2848 } else {
1c213d19 2849 /* 8 bit read access */
a4193c8a 2850 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2851 stb_p(buf, val);
13eb76e0
FB
2852 l = 1;
2853 }
2854 } else {
2855 /* RAM case */
5fafdf24 2856 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2857 (addr & ~TARGET_PAGE_MASK);
2858 memcpy(buf, ptr, l);
2859 }
2860 }
2861 len -= l;
2862 buf += l;
2863 addr += l;
2864 }
2865}
8df1cd07 2866
d0ecd2aa 2867/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2868void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2869 const uint8_t *buf, int len)
2870{
2871 int l;
2872 uint8_t *ptr;
2873 target_phys_addr_t page;
2874 unsigned long pd;
2875 PhysPageDesc *p;
3b46e624 2876
d0ecd2aa
FB
2877 while (len > 0) {
2878 page = addr & TARGET_PAGE_MASK;
2879 l = (page + TARGET_PAGE_SIZE) - addr;
2880 if (l > len)
2881 l = len;
2882 p = phys_page_find(page >> TARGET_PAGE_BITS);
2883 if (!p) {
2884 pd = IO_MEM_UNASSIGNED;
2885 } else {
2886 pd = p->phys_offset;
2887 }
3b46e624 2888
d0ecd2aa 2889 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2890 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2891 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2892 /* do nothing */
2893 } else {
2894 unsigned long addr1;
2895 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2896 /* ROM/RAM case */
2897 ptr = phys_ram_base + addr1;
2898 memcpy(ptr, buf, l);
2899 }
2900 len -= l;
2901 buf += l;
2902 addr += l;
2903 }
2904}
2905
2906
8df1cd07
FB
2907/* warning: addr must be aligned */
2908uint32_t ldl_phys(target_phys_addr_t addr)
2909{
2910 int io_index;
2911 uint8_t *ptr;
2912 uint32_t val;
2913 unsigned long pd;
2914 PhysPageDesc *p;
2915
2916 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2917 if (!p) {
2918 pd = IO_MEM_UNASSIGNED;
2919 } else {
2920 pd = p->phys_offset;
2921 }
3b46e624 2922
5fafdf24 2923 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2924 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2925 /* I/O case */
2926 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2927 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2928 } else {
2929 /* RAM case */
5fafdf24 2930 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2931 (addr & ~TARGET_PAGE_MASK);
2932 val = ldl_p(ptr);
2933 }
2934 return val;
2935}
2936
84b7b8e7
FB
2937/* warning: addr must be aligned */
2938uint64_t ldq_phys(target_phys_addr_t addr)
2939{
2940 int io_index;
2941 uint8_t *ptr;
2942 uint64_t val;
2943 unsigned long pd;
2944 PhysPageDesc *p;
2945
2946 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2947 if (!p) {
2948 pd = IO_MEM_UNASSIGNED;
2949 } else {
2950 pd = p->phys_offset;
2951 }
3b46e624 2952
2a4188a3
FB
2953 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2954 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2955 /* I/O case */
2956 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2957#ifdef TARGET_WORDS_BIGENDIAN
2958 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2959 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2960#else
2961 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2962 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2963#endif
2964 } else {
2965 /* RAM case */
5fafdf24 2966 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2967 (addr & ~TARGET_PAGE_MASK);
2968 val = ldq_p(ptr);
2969 }
2970 return val;
2971}
2972
aab33094
FB
2973/* XXX: optimize */
2974uint32_t ldub_phys(target_phys_addr_t addr)
2975{
2976 uint8_t val;
2977 cpu_physical_memory_read(addr, &val, 1);
2978 return val;
2979}
2980
2981/* XXX: optimize */
2982uint32_t lduw_phys(target_phys_addr_t addr)
2983{
2984 uint16_t val;
2985 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2986 return tswap16(val);
2987}
2988
8df1cd07
FB
2989/* warning: addr must be aligned. The ram page is not masked as dirty
2990 and the code inside is not invalidated. It is useful if the dirty
2991 bits are used to track modified PTEs */
2992void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2993{
2994 int io_index;
2995 uint8_t *ptr;
2996 unsigned long pd;
2997 PhysPageDesc *p;
2998
2999 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3000 if (!p) {
3001 pd = IO_MEM_UNASSIGNED;
3002 } else {
3003 pd = p->phys_offset;
3004 }
3b46e624 3005
3a7d929e 3006 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
3007 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3008 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3009 } else {
74576198
AL
3010 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3011 ptr = phys_ram_base + addr1;
8df1cd07 3012 stl_p(ptr, val);
74576198
AL
3013
3014 if (unlikely(in_migration)) {
3015 if (!cpu_physical_memory_is_dirty(addr1)) {
3016 /* invalidate code */
3017 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3018 /* set dirty bit */
3019 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3020 (0xff & ~CODE_DIRTY_FLAG);
3021 }
3022 }
8df1cd07
FB
3023 }
3024}
3025
bc98a7ef
JM
3026void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3027{
3028 int io_index;
3029 uint8_t *ptr;
3030 unsigned long pd;
3031 PhysPageDesc *p;
3032
3033 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3034 if (!p) {
3035 pd = IO_MEM_UNASSIGNED;
3036 } else {
3037 pd = p->phys_offset;
3038 }
3b46e624 3039
bc98a7ef
JM
3040 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3041 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3042#ifdef TARGET_WORDS_BIGENDIAN
3043 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3044 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3045#else
3046 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3047 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3048#endif
3049 } else {
5fafdf24 3050 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3051 (addr & ~TARGET_PAGE_MASK);
3052 stq_p(ptr, val);
3053 }
3054}
3055
8df1cd07 3056/* warning: addr must be aligned */
8df1cd07
FB
3057void stl_phys(target_phys_addr_t addr, uint32_t val)
3058{
3059 int io_index;
3060 uint8_t *ptr;
3061 unsigned long pd;
3062 PhysPageDesc *p;
3063
3064 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3065 if (!p) {
3066 pd = IO_MEM_UNASSIGNED;
3067 } else {
3068 pd = p->phys_offset;
3069 }
3b46e624 3070
3a7d929e 3071 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
3072 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3073 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3074 } else {
3075 unsigned long addr1;
3076 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3077 /* RAM case */
3078 ptr = phys_ram_base + addr1;
3079 stl_p(ptr, val);
3a7d929e
FB
3080 if (!cpu_physical_memory_is_dirty(addr1)) {
3081 /* invalidate code */
3082 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3083 /* set dirty bit */
f23db169
FB
3084 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3085 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3086 }
8df1cd07
FB
3087 }
3088}
3089
aab33094
FB
3090/* XXX: optimize */
3091void stb_phys(target_phys_addr_t addr, uint32_t val)
3092{
3093 uint8_t v = val;
3094 cpu_physical_memory_write(addr, &v, 1);
3095}
3096
3097/* XXX: optimize */
3098void stw_phys(target_phys_addr_t addr, uint32_t val)
3099{
3100 uint16_t v = tswap16(val);
3101 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3102}
3103
3104/* XXX: optimize */
3105void stq_phys(target_phys_addr_t addr, uint64_t val)
3106{
3107 val = tswap64(val);
3108 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3109}
3110
13eb76e0
FB
3111#endif
3112
3113/* virtual memory access for debug */
5fafdf24 3114int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3115 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3116{
3117 int l;
9b3c35e0
JM
3118 target_phys_addr_t phys_addr;
3119 target_ulong page;
13eb76e0
FB
3120
3121 while (len > 0) {
3122 page = addr & TARGET_PAGE_MASK;
3123 phys_addr = cpu_get_phys_page_debug(env, page);
3124 /* if no physical page mapped, return an error */
3125 if (phys_addr == -1)
3126 return -1;
3127 l = (page + TARGET_PAGE_SIZE) - addr;
3128 if (l > len)
3129 l = len;
5fafdf24 3130 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3131 buf, l, is_write);
13eb76e0
FB
3132 len -= l;
3133 buf += l;
3134 addr += l;
3135 }
3136 return 0;
3137}
3138
2e70f6ef
PB
3139/* in deterministic execution mode, instructions doing device I/Os
3140 must be at the end of the TB */
3141void cpu_io_recompile(CPUState *env, void *retaddr)
3142{
3143 TranslationBlock *tb;
3144 uint32_t n, cflags;
3145 target_ulong pc, cs_base;
3146 uint64_t flags;
3147
3148 tb = tb_find_pc((unsigned long)retaddr);
3149 if (!tb) {
3150 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3151 retaddr);
3152 }
3153 n = env->icount_decr.u16.low + tb->icount;
3154 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3155 /* Calculate how many instructions had been executed before the fault
bf20dc07 3156 occurred. */
2e70f6ef
PB
3157 n = n - env->icount_decr.u16.low;
3158 /* Generate a new TB ending on the I/O insn. */
3159 n++;
3160 /* On MIPS and SH, delay slot instructions can only be restarted if
3161 they were already the first instruction in the TB. If this is not
bf20dc07 3162 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3163 branch. */
3164#if defined(TARGET_MIPS)
3165 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3166 env->active_tc.PC -= 4;
3167 env->icount_decr.u16.low++;
3168 env->hflags &= ~MIPS_HFLAG_BMASK;
3169 }
3170#elif defined(TARGET_SH4)
3171 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3172 && n > 1) {
3173 env->pc -= 2;
3174 env->icount_decr.u16.low++;
3175 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3176 }
3177#endif
3178 /* This should never happen. */
3179 if (n > CF_COUNT_MASK)
3180 cpu_abort(env, "TB too big during recompile");
3181
3182 cflags = n | CF_LAST_IO;
3183 pc = tb->pc;
3184 cs_base = tb->cs_base;
3185 flags = tb->flags;
3186 tb_phys_invalidate(tb, -1);
3187 /* FIXME: In theory this could raise an exception. In practice
3188 we have already translated the block once so it's probably ok. */
3189 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3190 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3191 the first in the TB) then we end up generating a whole new TB and
3192 repeating the fault, which is horribly inefficient.
3193 Better would be to execute just this insn uncached, or generate a
3194 second new TB. */
3195 cpu_resume_from_signal(env, NULL);
3196}
3197
e3db7226
FB
3198void dump_exec_info(FILE *f,
3199 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3200{
3201 int i, target_code_size, max_target_code_size;
3202 int direct_jmp_count, direct_jmp2_count, cross_page;
3203 TranslationBlock *tb;
3b46e624 3204
e3db7226
FB
3205 target_code_size = 0;
3206 max_target_code_size = 0;
3207 cross_page = 0;
3208 direct_jmp_count = 0;
3209 direct_jmp2_count = 0;
3210 for(i = 0; i < nb_tbs; i++) {
3211 tb = &tbs[i];
3212 target_code_size += tb->size;
3213 if (tb->size > max_target_code_size)
3214 max_target_code_size = tb->size;
3215 if (tb->page_addr[1] != -1)
3216 cross_page++;
3217 if (tb->tb_next_offset[0] != 0xffff) {
3218 direct_jmp_count++;
3219 if (tb->tb_next_offset[1] != 0xffff) {
3220 direct_jmp2_count++;
3221 }
3222 }
3223 }
3224 /* XXX: avoid using doubles ? */
57fec1fe 3225 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3226 cpu_fprintf(f, "gen code size %ld/%ld\n",
3227 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3228 cpu_fprintf(f, "TB count %d/%d\n",
3229 nb_tbs, code_gen_max_blocks);
5fafdf24 3230 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3231 nb_tbs ? target_code_size / nb_tbs : 0,
3232 max_target_code_size);
5fafdf24 3233 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3234 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3235 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3236 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3237 cross_page,
e3db7226
FB
3238 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3239 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3240 direct_jmp_count,
e3db7226
FB
3241 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3242 direct_jmp2_count,
3243 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3244 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3245 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3246 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3247 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3248 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3249}
3250
5fafdf24 3251#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3252
3253#define MMUSUFFIX _cmmu
3254#define GETPC() NULL
3255#define env cpu_single_env
b769d8fe 3256#define SOFTMMU_CODE_ACCESS
61382a50
FB
3257
3258#define SHIFT 0
3259#include "softmmu_template.h"
3260
3261#define SHIFT 1
3262#include "softmmu_template.h"
3263
3264#define SHIFT 2
3265#include "softmmu_template.h"
3266
3267#define SHIFT 3
3268#include "softmmu_template.h"
3269
3270#undef env
3271
3272#endif