]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Implement shm* syscalls and fix 64/32bit errors
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
ca10f867 37#include "qemu-common.h"
b67d9a52 38#include "tcg.h"
b3c7724c 39#include "hw/hw.h"
74576198 40#include "osdep.h"
7ba1e619 41#include "kvm.h"
53a5960a
PB
42#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
44#endif
54936004 45
fd6ce8f6 46//#define DEBUG_TB_INVALIDATE
66e85a21 47//#define DEBUG_FLUSH
9fa3e853 48//#define DEBUG_TLB
67d3b957 49//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
50
51/* make various TB consistency checks */
5fafdf24
TS
52//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
fd6ce8f6 54
1196be37 55//#define DEBUG_IOPORT
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
99773bd4
PB
58#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
67#elif defined(TARGET_SPARC)
68#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
69#elif defined(TARGET_ALPHA)
70#define TARGET_PHYS_ADDR_SPACE_BITS 42
71#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
72#elif defined(TARGET_PPC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
640f42e4 74#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
00f82b8a 75#define TARGET_PHYS_ADDR_SPACE_BITS 42
640f42e4 76#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
00f82b8a 77#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
78#else
79/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80#define TARGET_PHYS_ADDR_SPACE_BITS 32
81#endif
82
bdaf78e0 83static TranslationBlock *tbs;
26a5f13b 84int code_gen_max_blocks;
9fa3e853 85TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 86static int nb_tbs;
eb51d102
FB
87/* any access to the tbs or the page table must use this lock */
88spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 89
141ac468
BS
90#if defined(__arm__) || defined(__sparc_v9__)
91/* The prologue must be reachable with a direct jump. ARM and Sparc64
92 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
93 section close to code segment. */
94#define code_gen_section \
95 __attribute__((__section__(".gen_code"))) \
96 __attribute__((aligned (32)))
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
107uint8_t *code_gen_ptr;
108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
9fa3e853 110int phys_ram_fd;
1ccde1cb 111uint8_t *phys_ram_dirty;
74576198 112static int in_migration;
94a6b54f
PB
113
114typedef struct RAMBlock {
115 uint8_t *host;
116 ram_addr_t offset;
117 ram_addr_t length;
118 struct RAMBlock *next;
119} RAMBlock;
120
121static RAMBlock *ram_blocks;
122/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
ccbb4d44 123 then we can no longer assume contiguous ram offsets, and external uses
94a6b54f
PB
124 of this variable will break. */
125ram_addr_t last_ram_offset;
e2eef170 126#endif
9fa3e853 127
6a00d601
FB
128CPUState *first_cpu;
129/* current CPU in the current thread. It is only valid inside
130 cpu_exec() */
5fafdf24 131CPUState *cpu_single_env;
2e70f6ef 132/* 0 = Do not count executed instructions.
bf20dc07 133 1 = Precise instruction counting.
2e70f6ef
PB
134 2 = Adaptive rate instruction counting. */
135int use_icount = 0;
136/* Current instruction counter. While executing translated code this may
137 include some instructions that have not yet been executed. */
138int64_t qemu_icount;
6a00d601 139
54936004 140typedef struct PageDesc {
92e873b9 141 /* list of TBs intersecting this ram page */
fd6ce8f6 142 TranslationBlock *first_tb;
9fa3e853
FB
143 /* in order to optimize self modifying code, we count the number
144 of lookups we do to a given page to use a bitmap */
145 unsigned int code_write_count;
146 uint8_t *code_bitmap;
147#if defined(CONFIG_USER_ONLY)
148 unsigned long flags;
149#endif
54936004
FB
150} PageDesc;
151
92e873b9 152typedef struct PhysPageDesc {
0f459d16 153 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 154 ram_addr_t phys_offset;
8da3ff18 155 ram_addr_t region_offset;
92e873b9
FB
156} PhysPageDesc;
157
54936004 158#define L2_BITS 10
bedb69ea
JM
159#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160/* XXX: this is a temporary hack for alpha target.
161 * In the future, this is to be replaced by a multi-level table
162 * to actually be able to handle the complete 64 bits address space.
163 */
164#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165#else
03875444 166#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 167#endif
54936004
FB
168
169#define L1_SIZE (1 << L1_BITS)
170#define L2_SIZE (1 << L2_BITS)
171
83fb7adf
FB
172unsigned long qemu_real_host_page_size;
173unsigned long qemu_host_page_bits;
174unsigned long qemu_host_page_size;
175unsigned long qemu_host_page_mask;
54936004 176
92e873b9 177/* XXX: for system emulation, it could just be an array */
54936004 178static PageDesc *l1_map[L1_SIZE];
bdaf78e0 179static PhysPageDesc **l1_phys_map;
54936004 180
e2eef170
PB
181#if !defined(CONFIG_USER_ONLY)
182static void io_mem_init(void);
183
33417e70 184/* io memory support */
33417e70
FB
185CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 187void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 188static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
189static int io_mem_watch;
190#endif
33417e70 191
34865134 192/* log support */
d9b630fd 193static const char *logfilename = "/tmp/qemu.log";
34865134
FB
194FILE *logfile;
195int loglevel;
e735b91c 196static int log_append = 0;
34865134 197
e3db7226
FB
198/* statistics */
199static int tlb_flush_count;
200static int tb_flush_count;
201static int tb_phys_invalidate_count;
202
db7b5426
BS
203#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204typedef struct subpage_t {
205 target_phys_addr_t base;
3ee89922
BS
206 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 209 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
210} subpage_t;
211
7cb69cae
FB
212#ifdef _WIN32
213static void map_exec(void *addr, long size)
214{
215 DWORD old_protect;
216 VirtualProtect(addr, size,
217 PAGE_EXECUTE_READWRITE, &old_protect);
218
219}
220#else
221static void map_exec(void *addr, long size)
222{
4369415f 223 unsigned long start, end, page_size;
7cb69cae 224
4369415f 225 page_size = getpagesize();
7cb69cae 226 start = (unsigned long)addr;
4369415f 227 start &= ~(page_size - 1);
7cb69cae
FB
228
229 end = (unsigned long)addr + size;
4369415f
FB
230 end += page_size - 1;
231 end &= ~(page_size - 1);
7cb69cae
FB
232
233 mprotect((void *)start, end - start,
234 PROT_READ | PROT_WRITE | PROT_EXEC);
235}
236#endif
237
b346ff46 238static void page_init(void)
54936004 239{
83fb7adf 240 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 241 TARGET_PAGE_SIZE */
c2b48b69
AL
242#ifdef _WIN32
243 {
244 SYSTEM_INFO system_info;
245
246 GetSystemInfo(&system_info);
247 qemu_real_host_page_size = system_info.dwPageSize;
248 }
249#else
250 qemu_real_host_page_size = getpagesize();
251#endif
83fb7adf
FB
252 if (qemu_host_page_size == 0)
253 qemu_host_page_size = qemu_real_host_page_size;
254 if (qemu_host_page_size < TARGET_PAGE_SIZE)
255 qemu_host_page_size = TARGET_PAGE_SIZE;
256 qemu_host_page_bits = 0;
257 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258 qemu_host_page_bits++;
259 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
262
263#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264 {
265 long long startaddr, endaddr;
266 FILE *f;
267 int n;
268
c8a706fe 269 mmap_lock();
0776590d 270 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
271 f = fopen("/proc/self/maps", "r");
272 if (f) {
273 do {
274 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275 if (n == 2) {
e0b8d65a
BS
276 startaddr = MIN(startaddr,
277 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278 endaddr = MIN(endaddr,
279 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 280 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
281 TARGET_PAGE_ALIGN(endaddr),
282 PAGE_RESERVED);
283 }
284 } while (!feof(f));
285 fclose(f);
286 }
c8a706fe 287 mmap_unlock();
50a9569b
AZ
288 }
289#endif
54936004
FB
290}
291
434929bf 292static inline PageDesc **page_l1_map(target_ulong index)
54936004 293{
17e2377a
PB
294#if TARGET_LONG_BITS > 32
295 /* Host memory outside guest VM. For 32-bit targets we have already
296 excluded high addresses. */
d8173e0f 297 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
298 return NULL;
299#endif
434929bf
AL
300 return &l1_map[index >> L2_BITS];
301}
302
303static inline PageDesc *page_find_alloc(target_ulong index)
304{
305 PageDesc **lp, *p;
306 lp = page_l1_map(index);
307 if (!lp)
308 return NULL;
309
54936004
FB
310 p = *lp;
311 if (!p) {
312 /* allocate if not found */
17e2377a 313#if defined(CONFIG_USER_ONLY)
17e2377a
PB
314 size_t len = sizeof(PageDesc) * L2_SIZE;
315 /* Don't use qemu_malloc because it may recurse. */
316 p = mmap(0, len, PROT_READ | PROT_WRITE,
317 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 318 *lp = p;
fb1c2cd7
AJ
319 if (h2g_valid(p)) {
320 unsigned long addr = h2g(p);
17e2377a
PB
321 page_set_flags(addr & TARGET_PAGE_MASK,
322 TARGET_PAGE_ALIGN(addr + len),
323 PAGE_RESERVED);
324 }
325#else
326 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327 *lp = p;
328#endif
54936004
FB
329 }
330 return p + (index & (L2_SIZE - 1));
331}
332
00f82b8a 333static inline PageDesc *page_find(target_ulong index)
54936004 334{
434929bf
AL
335 PageDesc **lp, *p;
336 lp = page_l1_map(index);
337 if (!lp)
338 return NULL;
54936004 339
434929bf 340 p = *lp;
54936004
FB
341 if (!p)
342 return 0;
fd6ce8f6
FB
343 return p + (index & (L2_SIZE - 1));
344}
345
108c49b8 346static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 347{
108c49b8 348 void **lp, **p;
e3f4e2a4 349 PhysPageDesc *pd;
92e873b9 350
108c49b8
FB
351 p = (void **)l1_phys_map;
352#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353
354#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356#endif
357 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
358 p = *lp;
359 if (!p) {
360 /* allocate if not found */
108c49b8
FB
361 if (!alloc)
362 return NULL;
363 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 memset(p, 0, sizeof(void *) * L1_SIZE);
365 *lp = p;
366 }
367#endif
368 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
369 pd = *lp;
370 if (!pd) {
371 int i;
108c49b8
FB
372 /* allocate if not found */
373 if (!alloc)
374 return NULL;
e3f4e2a4
PB
375 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376 *lp = pd;
67c4d23c 377 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 378 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
379 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380 }
92e873b9 381 }
e3f4e2a4 382 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
383}
384
108c49b8 385static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 386{
108c49b8 387 return phys_page_find_alloc(index, 0);
92e873b9
FB
388}
389
9fa3e853 390#if !defined(CONFIG_USER_ONLY)
6a00d601 391static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 392static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 393 target_ulong vaddr);
c8a706fe
PB
394#define mmap_lock() do { } while(0)
395#define mmap_unlock() do { } while(0)
9fa3e853 396#endif
fd6ce8f6 397
4369415f
FB
398#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399
400#if defined(CONFIG_USER_ONLY)
ccbb4d44 401/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
402 user mode. It will change when a dedicated libc will be used */
403#define USE_STATIC_CODE_GEN_BUFFER
404#endif
405
406#ifdef USE_STATIC_CODE_GEN_BUFFER
407static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408#endif
409
8fcd3692 410static void code_gen_alloc(unsigned long tb_size)
26a5f13b 411{
4369415f
FB
412#ifdef USE_STATIC_CODE_GEN_BUFFER
413 code_gen_buffer = static_code_gen_buffer;
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 map_exec(code_gen_buffer, code_gen_buffer_size);
416#else
26a5f13b
FB
417 code_gen_buffer_size = tb_size;
418 if (code_gen_buffer_size == 0) {
4369415f
FB
419#if defined(CONFIG_USER_ONLY)
420 /* in user mode, phys_ram_size is not meaningful */
421 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422#else
ccbb4d44 423 /* XXX: needs adjustments */
94a6b54f 424 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 425#endif
26a5f13b
FB
426 }
427 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429 /* The code gen buffer location may have constraints depending on
430 the host cpu and OS */
431#if defined(__linux__)
432 {
433 int flags;
141ac468
BS
434 void *start = NULL;
435
26a5f13b
FB
436 flags = MAP_PRIVATE | MAP_ANONYMOUS;
437#if defined(__x86_64__)
438 flags |= MAP_32BIT;
439 /* Cannot map more than that */
440 if (code_gen_buffer_size > (800 * 1024 * 1024))
441 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
442#elif defined(__sparc_v9__)
443 // Map the buffer below 2G, so we can use direct calls and branches
444 flags |= MAP_FIXED;
445 start = (void *) 0x60000000UL;
446 if (code_gen_buffer_size > (512 * 1024 * 1024))
447 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 448#elif defined(__arm__)
63d41246 449 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
450 flags |= MAP_FIXED;
451 start = (void *) 0x01000000UL;
452 if (code_gen_buffer_size > 16 * 1024 * 1024)
453 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 454#endif
141ac468
BS
455 code_gen_buffer = mmap(start, code_gen_buffer_size,
456 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
457 flags, -1, 0);
458 if (code_gen_buffer == MAP_FAILED) {
459 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460 exit(1);
461 }
462 }
c5e97233 463#elif defined(__FreeBSD__) || defined(__DragonFly__)
06e67a82
AL
464 {
465 int flags;
466 void *addr = NULL;
467 flags = MAP_PRIVATE | MAP_ANONYMOUS;
468#if defined(__x86_64__)
469 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470 * 0x40000000 is free */
471 flags |= MAP_FIXED;
472 addr = (void *)0x40000000;
473 /* Cannot map more than that */
474 if (code_gen_buffer_size > (800 * 1024 * 1024))
475 code_gen_buffer_size = (800 * 1024 * 1024);
476#endif
477 code_gen_buffer = mmap(addr, code_gen_buffer_size,
478 PROT_WRITE | PROT_READ | PROT_EXEC,
479 flags, -1, 0);
480 if (code_gen_buffer == MAP_FAILED) {
481 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 exit(1);
483 }
484 }
26a5f13b
FB
485#else
486 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
487 map_exec(code_gen_buffer, code_gen_buffer_size);
488#endif
4369415f 489#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
490 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491 code_gen_buffer_max_size = code_gen_buffer_size -
492 code_gen_max_block_size();
493 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495}
496
497/* Must be called before using the QEMU cpus. 'tb_size' is the size
498 (in bytes) allocated to the translation buffer. Zero means default
499 size. */
500void cpu_exec_init_all(unsigned long tb_size)
501{
26a5f13b
FB
502 cpu_gen_init();
503 code_gen_alloc(tb_size);
504 code_gen_ptr = code_gen_buffer;
4369415f 505 page_init();
e2eef170 506#if !defined(CONFIG_USER_ONLY)
26a5f13b 507 io_mem_init();
e2eef170 508#endif
26a5f13b
FB
509}
510
9656f324
PB
511#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512
513#define CPU_COMMON_SAVE_VERSION 1
514
515static void cpu_common_save(QEMUFile *f, void *opaque)
516{
517 CPUState *env = opaque;
518
b0a46a33
JK
519 cpu_synchronize_state(env, 0);
520
9656f324
PB
521 qemu_put_be32s(f, &env->halted);
522 qemu_put_be32s(f, &env->interrupt_request);
523}
524
525static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
526{
527 CPUState *env = opaque;
528
529 if (version_id != CPU_COMMON_SAVE_VERSION)
530 return -EINVAL;
531
532 qemu_get_be32s(f, &env->halted);
75f482ae 533 qemu_get_be32s(f, &env->interrupt_request);
3098dba0
AJ
534 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535 version_id is increased. */
536 env->interrupt_request &= ~0x01;
9656f324 537 tlb_flush(env, 1);
b0a46a33 538 cpu_synchronize_state(env, 1);
9656f324
PB
539
540 return 0;
541}
542#endif
543
6a00d601 544void cpu_exec_init(CPUState *env)
fd6ce8f6 545{
6a00d601
FB
546 CPUState **penv;
547 int cpu_index;
548
c2764719
PB
549#if defined(CONFIG_USER_ONLY)
550 cpu_list_lock();
551#endif
6a00d601
FB
552 env->next_cpu = NULL;
553 penv = &first_cpu;
554 cpu_index = 0;
555 while (*penv != NULL) {
1e9fa730 556 penv = &(*penv)->next_cpu;
6a00d601
FB
557 cpu_index++;
558 }
559 env->cpu_index = cpu_index;
268a362c 560 env->numa_node = 0;
c0ce998e
AL
561 TAILQ_INIT(&env->breakpoints);
562 TAILQ_INIT(&env->watchpoints);
6a00d601 563 *penv = env;
c2764719
PB
564#if defined(CONFIG_USER_ONLY)
565 cpu_list_unlock();
566#endif
b3c7724c 567#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
568 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
569 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
570 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
571 cpu_save, cpu_load, env);
572#endif
fd6ce8f6
FB
573}
574
9fa3e853
FB
575static inline void invalidate_page_bitmap(PageDesc *p)
576{
577 if (p->code_bitmap) {
59817ccb 578 qemu_free(p->code_bitmap);
9fa3e853
FB
579 p->code_bitmap = NULL;
580 }
581 p->code_write_count = 0;
582}
583
fd6ce8f6
FB
584/* set to NULL all the 'first_tb' fields in all PageDescs */
585static void page_flush_tb(void)
586{
587 int i, j;
588 PageDesc *p;
589
590 for(i = 0; i < L1_SIZE; i++) {
591 p = l1_map[i];
592 if (p) {
9fa3e853
FB
593 for(j = 0; j < L2_SIZE; j++) {
594 p->first_tb = NULL;
595 invalidate_page_bitmap(p);
596 p++;
597 }
fd6ce8f6
FB
598 }
599 }
600}
601
602/* flush all the translation blocks */
d4e8164f 603/* XXX: tb_flush is currently not thread safe */
6a00d601 604void tb_flush(CPUState *env1)
fd6ce8f6 605{
6a00d601 606 CPUState *env;
0124311e 607#if defined(DEBUG_FLUSH)
ab3d1727
BS
608 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
609 (unsigned long)(code_gen_ptr - code_gen_buffer),
610 nb_tbs, nb_tbs > 0 ?
611 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 612#endif
26a5f13b 613 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
614 cpu_abort(env1, "Internal error: code buffer overflow\n");
615
fd6ce8f6 616 nb_tbs = 0;
3b46e624 617
6a00d601
FB
618 for(env = first_cpu; env != NULL; env = env->next_cpu) {
619 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
620 }
9fa3e853 621
8a8a608f 622 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 623 page_flush_tb();
9fa3e853 624
fd6ce8f6 625 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
626 /* XXX: flush processor icache at this point if cache flush is
627 expensive */
e3db7226 628 tb_flush_count++;
fd6ce8f6
FB
629}
630
631#ifdef DEBUG_TB_CHECK
632
bc98a7ef 633static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
634{
635 TranslationBlock *tb;
636 int i;
637 address &= TARGET_PAGE_MASK;
99773bd4
PB
638 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
640 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
641 address >= tb->pc + tb->size)) {
642 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 643 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
644 }
645 }
646 }
647}
648
649/* verify that all the pages have correct rights for code */
650static void tb_page_check(void)
651{
652 TranslationBlock *tb;
653 int i, flags1, flags2;
3b46e624 654
99773bd4
PB
655 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
657 flags1 = page_get_flags(tb->pc);
658 flags2 = page_get_flags(tb->pc + tb->size - 1);
659 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
660 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 661 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
662 }
663 }
664 }
665}
666
bdaf78e0 667static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
668{
669 TranslationBlock *tb1;
670 unsigned int n1;
671
672 /* suppress any remaining jumps to this TB */
673 tb1 = tb->jmp_first;
674 for(;;) {
675 n1 = (long)tb1 & 3;
676 tb1 = (TranslationBlock *)((long)tb1 & ~3);
677 if (n1 == 2)
678 break;
679 tb1 = tb1->jmp_next[n1];
680 }
681 /* check end of list */
682 if (tb1 != tb) {
683 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
684 }
685}
686
fd6ce8f6
FB
687#endif
688
689/* invalidate one TB */
690static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
691 int next_offset)
692{
693 TranslationBlock *tb1;
694 for(;;) {
695 tb1 = *ptb;
696 if (tb1 == tb) {
697 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
698 break;
699 }
700 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
701 }
702}
703
9fa3e853
FB
704static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
705{
706 TranslationBlock *tb1;
707 unsigned int n1;
708
709 for(;;) {
710 tb1 = *ptb;
711 n1 = (long)tb1 & 3;
712 tb1 = (TranslationBlock *)((long)tb1 & ~3);
713 if (tb1 == tb) {
714 *ptb = tb1->page_next[n1];
715 break;
716 }
717 ptb = &tb1->page_next[n1];
718 }
719}
720
d4e8164f
FB
721static inline void tb_jmp_remove(TranslationBlock *tb, int n)
722{
723 TranslationBlock *tb1, **ptb;
724 unsigned int n1;
725
726 ptb = &tb->jmp_next[n];
727 tb1 = *ptb;
728 if (tb1) {
729 /* find tb(n) in circular list */
730 for(;;) {
731 tb1 = *ptb;
732 n1 = (long)tb1 & 3;
733 tb1 = (TranslationBlock *)((long)tb1 & ~3);
734 if (n1 == n && tb1 == tb)
735 break;
736 if (n1 == 2) {
737 ptb = &tb1->jmp_first;
738 } else {
739 ptb = &tb1->jmp_next[n1];
740 }
741 }
742 /* now we can suppress tb(n) from the list */
743 *ptb = tb->jmp_next[n];
744
745 tb->jmp_next[n] = NULL;
746 }
747}
748
749/* reset the jump entry 'n' of a TB so that it is not chained to
750 another TB */
751static inline void tb_reset_jump(TranslationBlock *tb, int n)
752{
753 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
754}
755
2e70f6ef 756void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 757{
6a00d601 758 CPUState *env;
8a40a180 759 PageDesc *p;
d4e8164f 760 unsigned int h, n1;
00f82b8a 761 target_phys_addr_t phys_pc;
8a40a180 762 TranslationBlock *tb1, *tb2;
3b46e624 763
8a40a180
FB
764 /* remove the TB from the hash list */
765 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
766 h = tb_phys_hash_func(phys_pc);
5fafdf24 767 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
768 offsetof(TranslationBlock, phys_hash_next));
769
770 /* remove the TB from the page list */
771 if (tb->page_addr[0] != page_addr) {
772 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
773 tb_page_remove(&p->first_tb, tb);
774 invalidate_page_bitmap(p);
775 }
776 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
777 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
778 tb_page_remove(&p->first_tb, tb);
779 invalidate_page_bitmap(p);
780 }
781
36bdbe54 782 tb_invalidated_flag = 1;
59817ccb 783
fd6ce8f6 784 /* remove the TB from the hash list */
8a40a180 785 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
786 for(env = first_cpu; env != NULL; env = env->next_cpu) {
787 if (env->tb_jmp_cache[h] == tb)
788 env->tb_jmp_cache[h] = NULL;
789 }
d4e8164f
FB
790
791 /* suppress this TB from the two jump lists */
792 tb_jmp_remove(tb, 0);
793 tb_jmp_remove(tb, 1);
794
795 /* suppress any remaining jumps to this TB */
796 tb1 = tb->jmp_first;
797 for(;;) {
798 n1 = (long)tb1 & 3;
799 if (n1 == 2)
800 break;
801 tb1 = (TranslationBlock *)((long)tb1 & ~3);
802 tb2 = tb1->jmp_next[n1];
803 tb_reset_jump(tb1, n1);
804 tb1->jmp_next[n1] = NULL;
805 tb1 = tb2;
806 }
807 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 808
e3db7226 809 tb_phys_invalidate_count++;
9fa3e853
FB
810}
811
812static inline void set_bits(uint8_t *tab, int start, int len)
813{
814 int end, mask, end1;
815
816 end = start + len;
817 tab += start >> 3;
818 mask = 0xff << (start & 7);
819 if ((start & ~7) == (end & ~7)) {
820 if (start < end) {
821 mask &= ~(0xff << (end & 7));
822 *tab |= mask;
823 }
824 } else {
825 *tab++ |= mask;
826 start = (start + 8) & ~7;
827 end1 = end & ~7;
828 while (start < end1) {
829 *tab++ = 0xff;
830 start += 8;
831 }
832 if (start < end) {
833 mask = ~(0xff << (end & 7));
834 *tab |= mask;
835 }
836 }
837}
838
839static void build_page_bitmap(PageDesc *p)
840{
841 int n, tb_start, tb_end;
842 TranslationBlock *tb;
3b46e624 843
b2a7081a 844 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
845
846 tb = p->first_tb;
847 while (tb != NULL) {
848 n = (long)tb & 3;
849 tb = (TranslationBlock *)((long)tb & ~3);
850 /* NOTE: this is subtle as a TB may span two physical pages */
851 if (n == 0) {
852 /* NOTE: tb_end may be after the end of the page, but
853 it is not a problem */
854 tb_start = tb->pc & ~TARGET_PAGE_MASK;
855 tb_end = tb_start + tb->size;
856 if (tb_end > TARGET_PAGE_SIZE)
857 tb_end = TARGET_PAGE_SIZE;
858 } else {
859 tb_start = 0;
860 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
861 }
862 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
863 tb = tb->page_next[n];
864 }
865}
866
2e70f6ef
PB
867TranslationBlock *tb_gen_code(CPUState *env,
868 target_ulong pc, target_ulong cs_base,
869 int flags, int cflags)
d720b93d
FB
870{
871 TranslationBlock *tb;
872 uint8_t *tc_ptr;
873 target_ulong phys_pc, phys_page2, virt_page2;
874 int code_gen_size;
875
c27004ec
FB
876 phys_pc = get_phys_addr_code(env, pc);
877 tb = tb_alloc(pc);
d720b93d
FB
878 if (!tb) {
879 /* flush must be done */
880 tb_flush(env);
881 /* cannot fail at this point */
c27004ec 882 tb = tb_alloc(pc);
2e70f6ef
PB
883 /* Don't forget to invalidate previous TB info. */
884 tb_invalidated_flag = 1;
d720b93d
FB
885 }
886 tc_ptr = code_gen_ptr;
887 tb->tc_ptr = tc_ptr;
888 tb->cs_base = cs_base;
889 tb->flags = flags;
890 tb->cflags = cflags;
d07bde88 891 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 892 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 893
d720b93d 894 /* check next page if needed */
c27004ec 895 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 896 phys_page2 = -1;
c27004ec 897 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
898 phys_page2 = get_phys_addr_code(env, virt_page2);
899 }
900 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 901 return tb;
d720b93d 902}
3b46e624 903
9fa3e853
FB
904/* invalidate all TBs which intersect with the target physical page
905 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
906 the same physical page. 'is_cpu_write_access' should be true if called
907 from a real cpu write access: the virtual CPU will exit the current
908 TB if code is modified inside this TB. */
00f82b8a 909void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
910 int is_cpu_write_access)
911{
6b917547 912 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 913 CPUState *env = cpu_single_env;
9fa3e853 914 target_ulong tb_start, tb_end;
6b917547
AL
915 PageDesc *p;
916 int n;
917#ifdef TARGET_HAS_PRECISE_SMC
918 int current_tb_not_found = is_cpu_write_access;
919 TranslationBlock *current_tb = NULL;
920 int current_tb_modified = 0;
921 target_ulong current_pc = 0;
922 target_ulong current_cs_base = 0;
923 int current_flags = 0;
924#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
925
926 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 927 if (!p)
9fa3e853 928 return;
5fafdf24 929 if (!p->code_bitmap &&
d720b93d
FB
930 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
931 is_cpu_write_access) {
9fa3e853
FB
932 /* build code bitmap */
933 build_page_bitmap(p);
934 }
935
936 /* we remove all the TBs in the range [start, end[ */
937 /* XXX: see if in some cases it could be faster to invalidate all the code */
938 tb = p->first_tb;
939 while (tb != NULL) {
940 n = (long)tb & 3;
941 tb = (TranslationBlock *)((long)tb & ~3);
942 tb_next = tb->page_next[n];
943 /* NOTE: this is subtle as a TB may span two physical pages */
944 if (n == 0) {
945 /* NOTE: tb_end may be after the end of the page, but
946 it is not a problem */
947 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
948 tb_end = tb_start + tb->size;
949 } else {
950 tb_start = tb->page_addr[1];
951 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
952 }
953 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
954#ifdef TARGET_HAS_PRECISE_SMC
955 if (current_tb_not_found) {
956 current_tb_not_found = 0;
957 current_tb = NULL;
2e70f6ef 958 if (env->mem_io_pc) {
d720b93d 959 /* now we have a real cpu fault */
2e70f6ef 960 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
961 }
962 }
963 if (current_tb == tb &&
2e70f6ef 964 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
965 /* If we are modifying the current TB, we must stop
966 its execution. We could be more precise by checking
967 that the modification is after the current PC, but it
968 would require a specialized function to partially
969 restore the CPU state */
3b46e624 970
d720b93d 971 current_tb_modified = 1;
5fafdf24 972 cpu_restore_state(current_tb, env,
2e70f6ef 973 env->mem_io_pc, NULL);
6b917547
AL
974 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
975 &current_flags);
d720b93d
FB
976 }
977#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
978 /* we need to do that to handle the case where a signal
979 occurs while doing tb_phys_invalidate() */
980 saved_tb = NULL;
981 if (env) {
982 saved_tb = env->current_tb;
983 env->current_tb = NULL;
984 }
9fa3e853 985 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
986 if (env) {
987 env->current_tb = saved_tb;
988 if (env->interrupt_request && env->current_tb)
989 cpu_interrupt(env, env->interrupt_request);
990 }
9fa3e853
FB
991 }
992 tb = tb_next;
993 }
994#if !defined(CONFIG_USER_ONLY)
995 /* if no code remaining, no need to continue to use slow writes */
996 if (!p->first_tb) {
997 invalidate_page_bitmap(p);
d720b93d 998 if (is_cpu_write_access) {
2e70f6ef 999 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1000 }
1001 }
1002#endif
1003#ifdef TARGET_HAS_PRECISE_SMC
1004 if (current_tb_modified) {
1005 /* we generate a block containing just the instruction
1006 modifying the memory. It will ensure that it cannot modify
1007 itself */
ea1c1802 1008 env->current_tb = NULL;
2e70f6ef 1009 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1010 cpu_resume_from_signal(env, NULL);
9fa3e853 1011 }
fd6ce8f6 1012#endif
9fa3e853 1013}
fd6ce8f6 1014
9fa3e853 1015/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1016static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1017{
1018 PageDesc *p;
1019 int offset, b;
59817ccb 1020#if 0
a4193c8a 1021 if (1) {
93fcfe39
AL
1022 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1023 cpu_single_env->mem_io_vaddr, len,
1024 cpu_single_env->eip,
1025 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1026 }
1027#endif
9fa3e853 1028 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1029 if (!p)
9fa3e853
FB
1030 return;
1031 if (p->code_bitmap) {
1032 offset = start & ~TARGET_PAGE_MASK;
1033 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1034 if (b & ((1 << len) - 1))
1035 goto do_invalidate;
1036 } else {
1037 do_invalidate:
d720b93d 1038 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1039 }
1040}
1041
9fa3e853 1042#if !defined(CONFIG_SOFTMMU)
00f82b8a 1043static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1044 unsigned long pc, void *puc)
9fa3e853 1045{
6b917547 1046 TranslationBlock *tb;
9fa3e853 1047 PageDesc *p;
6b917547 1048 int n;
d720b93d 1049#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1050 TranslationBlock *current_tb = NULL;
d720b93d 1051 CPUState *env = cpu_single_env;
6b917547
AL
1052 int current_tb_modified = 0;
1053 target_ulong current_pc = 0;
1054 target_ulong current_cs_base = 0;
1055 int current_flags = 0;
d720b93d 1056#endif
9fa3e853
FB
1057
1058 addr &= TARGET_PAGE_MASK;
1059 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1060 if (!p)
9fa3e853
FB
1061 return;
1062 tb = p->first_tb;
d720b93d
FB
1063#ifdef TARGET_HAS_PRECISE_SMC
1064 if (tb && pc != 0) {
1065 current_tb = tb_find_pc(pc);
1066 }
1067#endif
9fa3e853
FB
1068 while (tb != NULL) {
1069 n = (long)tb & 3;
1070 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1071#ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb == tb &&
2e70f6ef 1073 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1074 /* If we are modifying the current TB, we must stop
1075 its execution. We could be more precise by checking
1076 that the modification is after the current PC, but it
1077 would require a specialized function to partially
1078 restore the CPU state */
3b46e624 1079
d720b93d
FB
1080 current_tb_modified = 1;
1081 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1082 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1083 &current_flags);
d720b93d
FB
1084 }
1085#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1086 tb_phys_invalidate(tb, addr);
1087 tb = tb->page_next[n];
1088 }
fd6ce8f6 1089 p->first_tb = NULL;
d720b93d
FB
1090#ifdef TARGET_HAS_PRECISE_SMC
1091 if (current_tb_modified) {
1092 /* we generate a block containing just the instruction
1093 modifying the memory. It will ensure that it cannot modify
1094 itself */
ea1c1802 1095 env->current_tb = NULL;
2e70f6ef 1096 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1097 cpu_resume_from_signal(env, puc);
1098 }
1099#endif
fd6ce8f6 1100}
9fa3e853 1101#endif
fd6ce8f6
FB
1102
1103/* add the tb in the target page and protect it if necessary */
5fafdf24 1104static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1105 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1106{
1107 PageDesc *p;
9fa3e853
FB
1108 TranslationBlock *last_first_tb;
1109
1110 tb->page_addr[n] = page_addr;
3a7d929e 1111 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1112 tb->page_next[n] = p->first_tb;
1113 last_first_tb = p->first_tb;
1114 p->first_tb = (TranslationBlock *)((long)tb | n);
1115 invalidate_page_bitmap(p);
fd6ce8f6 1116
107db443 1117#if defined(TARGET_HAS_SMC) || 1
d720b93d 1118
9fa3e853 1119#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1120 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1121 target_ulong addr;
1122 PageDesc *p2;
9fa3e853
FB
1123 int prot;
1124
fd6ce8f6
FB
1125 /* force the host page as non writable (writes will have a
1126 page fault + mprotect overhead) */
53a5960a 1127 page_addr &= qemu_host_page_mask;
fd6ce8f6 1128 prot = 0;
53a5960a
PB
1129 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1130 addr += TARGET_PAGE_SIZE) {
1131
1132 p2 = page_find (addr >> TARGET_PAGE_BITS);
1133 if (!p2)
1134 continue;
1135 prot |= p2->flags;
1136 p2->flags &= ~PAGE_WRITE;
1137 page_get_flags(addr);
1138 }
5fafdf24 1139 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1140 (prot & PAGE_BITS) & ~PAGE_WRITE);
1141#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1142 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1143 page_addr);
fd6ce8f6 1144#endif
fd6ce8f6 1145 }
9fa3e853
FB
1146#else
1147 /* if some code is already present, then the pages are already
1148 protected. So we handle the case where only the first TB is
1149 allocated in a physical page */
1150 if (!last_first_tb) {
6a00d601 1151 tlb_protect_code(page_addr);
9fa3e853
FB
1152 }
1153#endif
d720b93d
FB
1154
1155#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1156}
1157
1158/* Allocate a new translation block. Flush the translation buffer if
1159 too many translation blocks or too much generated code. */
c27004ec 1160TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1161{
1162 TranslationBlock *tb;
fd6ce8f6 1163
26a5f13b
FB
1164 if (nb_tbs >= code_gen_max_blocks ||
1165 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1166 return NULL;
fd6ce8f6
FB
1167 tb = &tbs[nb_tbs++];
1168 tb->pc = pc;
b448f2f3 1169 tb->cflags = 0;
d4e8164f
FB
1170 return tb;
1171}
1172
2e70f6ef
PB
1173void tb_free(TranslationBlock *tb)
1174{
bf20dc07 1175 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1176 Ignore the hard cases and just back up if this TB happens to
1177 be the last one generated. */
1178 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1179 code_gen_ptr = tb->tc_ptr;
1180 nb_tbs--;
1181 }
1182}
1183
9fa3e853
FB
1184/* add a new TB and link it to the physical page tables. phys_page2 is
1185 (-1) to indicate that only one page contains the TB. */
5fafdf24 1186void tb_link_phys(TranslationBlock *tb,
9fa3e853 1187 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1188{
9fa3e853
FB
1189 unsigned int h;
1190 TranslationBlock **ptb;
1191
c8a706fe
PB
1192 /* Grab the mmap lock to stop another thread invalidating this TB
1193 before we are done. */
1194 mmap_lock();
9fa3e853
FB
1195 /* add in the physical hash table */
1196 h = tb_phys_hash_func(phys_pc);
1197 ptb = &tb_phys_hash[h];
1198 tb->phys_hash_next = *ptb;
1199 *ptb = tb;
fd6ce8f6
FB
1200
1201 /* add in the page list */
9fa3e853
FB
1202 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1203 if (phys_page2 != -1)
1204 tb_alloc_page(tb, 1, phys_page2);
1205 else
1206 tb->page_addr[1] = -1;
9fa3e853 1207
d4e8164f
FB
1208 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1209 tb->jmp_next[0] = NULL;
1210 tb->jmp_next[1] = NULL;
1211
1212 /* init original jump addresses */
1213 if (tb->tb_next_offset[0] != 0xffff)
1214 tb_reset_jump(tb, 0);
1215 if (tb->tb_next_offset[1] != 0xffff)
1216 tb_reset_jump(tb, 1);
8a40a180
FB
1217
1218#ifdef DEBUG_TB_CHECK
1219 tb_page_check();
1220#endif
c8a706fe 1221 mmap_unlock();
fd6ce8f6
FB
1222}
1223
9fa3e853
FB
1224/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1225 tb[1].tc_ptr. Return NULL if not found */
1226TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1227{
9fa3e853
FB
1228 int m_min, m_max, m;
1229 unsigned long v;
1230 TranslationBlock *tb;
a513fe19
FB
1231
1232 if (nb_tbs <= 0)
1233 return NULL;
1234 if (tc_ptr < (unsigned long)code_gen_buffer ||
1235 tc_ptr >= (unsigned long)code_gen_ptr)
1236 return NULL;
1237 /* binary search (cf Knuth) */
1238 m_min = 0;
1239 m_max = nb_tbs - 1;
1240 while (m_min <= m_max) {
1241 m = (m_min + m_max) >> 1;
1242 tb = &tbs[m];
1243 v = (unsigned long)tb->tc_ptr;
1244 if (v == tc_ptr)
1245 return tb;
1246 else if (tc_ptr < v) {
1247 m_max = m - 1;
1248 } else {
1249 m_min = m + 1;
1250 }
5fafdf24 1251 }
a513fe19
FB
1252 return &tbs[m_max];
1253}
7501267e 1254
ea041c0e
FB
1255static void tb_reset_jump_recursive(TranslationBlock *tb);
1256
1257static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1258{
1259 TranslationBlock *tb1, *tb_next, **ptb;
1260 unsigned int n1;
1261
1262 tb1 = tb->jmp_next[n];
1263 if (tb1 != NULL) {
1264 /* find head of list */
1265 for(;;) {
1266 n1 = (long)tb1 & 3;
1267 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268 if (n1 == 2)
1269 break;
1270 tb1 = tb1->jmp_next[n1];
1271 }
1272 /* we are now sure now that tb jumps to tb1 */
1273 tb_next = tb1;
1274
1275 /* remove tb from the jmp_first list */
1276 ptb = &tb_next->jmp_first;
1277 for(;;) {
1278 tb1 = *ptb;
1279 n1 = (long)tb1 & 3;
1280 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281 if (n1 == n && tb1 == tb)
1282 break;
1283 ptb = &tb1->jmp_next[n1];
1284 }
1285 *ptb = tb->jmp_next[n];
1286 tb->jmp_next[n] = NULL;
3b46e624 1287
ea041c0e
FB
1288 /* suppress the jump to next tb in generated code */
1289 tb_reset_jump(tb, n);
1290
0124311e 1291 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1292 tb_reset_jump_recursive(tb_next);
1293 }
1294}
1295
1296static void tb_reset_jump_recursive(TranslationBlock *tb)
1297{
1298 tb_reset_jump_recursive2(tb, 0);
1299 tb_reset_jump_recursive2(tb, 1);
1300}
1301
1fddef4b 1302#if defined(TARGET_HAS_ICE)
d720b93d
FB
1303static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1304{
9b3c35e0
JM
1305 target_phys_addr_t addr;
1306 target_ulong pd;
c2f07f81
PB
1307 ram_addr_t ram_addr;
1308 PhysPageDesc *p;
d720b93d 1309
c2f07f81
PB
1310 addr = cpu_get_phys_page_debug(env, pc);
1311 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1312 if (!p) {
1313 pd = IO_MEM_UNASSIGNED;
1314 } else {
1315 pd = p->phys_offset;
1316 }
1317 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1318 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1319}
c27004ec 1320#endif
d720b93d 1321
6658ffb8 1322/* Add a watchpoint. */
a1d1bb31
AL
1323int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1324 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1325{
b4051334 1326 target_ulong len_mask = ~(len - 1);
c0ce998e 1327 CPUWatchpoint *wp;
6658ffb8 1328
b4051334
AL
1329 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1330 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1331 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1332 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1333 return -EINVAL;
1334 }
a1d1bb31 1335 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1336
1337 wp->vaddr = addr;
b4051334 1338 wp->len_mask = len_mask;
a1d1bb31
AL
1339 wp->flags = flags;
1340
2dc9f411 1341 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1342 if (flags & BP_GDB)
1343 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1344 else
1345 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1346
6658ffb8 1347 tlb_flush_page(env, addr);
a1d1bb31
AL
1348
1349 if (watchpoint)
1350 *watchpoint = wp;
1351 return 0;
6658ffb8
PB
1352}
1353
a1d1bb31
AL
1354/* Remove a specific watchpoint. */
1355int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1356 int flags)
6658ffb8 1357{
b4051334 1358 target_ulong len_mask = ~(len - 1);
a1d1bb31 1359 CPUWatchpoint *wp;
6658ffb8 1360
c0ce998e 1361 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1362 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1363 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1364 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1365 return 0;
1366 }
1367 }
a1d1bb31 1368 return -ENOENT;
6658ffb8
PB
1369}
1370
a1d1bb31
AL
1371/* Remove a specific watchpoint by reference. */
1372void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1373{
c0ce998e 1374 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1375
a1d1bb31
AL
1376 tlb_flush_page(env, watchpoint->vaddr);
1377
1378 qemu_free(watchpoint);
1379}
1380
1381/* Remove all matching watchpoints. */
1382void cpu_watchpoint_remove_all(CPUState *env, int mask)
1383{
c0ce998e 1384 CPUWatchpoint *wp, *next;
a1d1bb31 1385
c0ce998e 1386 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1387 if (wp->flags & mask)
1388 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1389 }
7d03f82f
EI
1390}
1391
a1d1bb31
AL
1392/* Add a breakpoint. */
1393int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1394 CPUBreakpoint **breakpoint)
4c3a88a2 1395{
1fddef4b 1396#if defined(TARGET_HAS_ICE)
c0ce998e 1397 CPUBreakpoint *bp;
3b46e624 1398
a1d1bb31 1399 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1400
a1d1bb31
AL
1401 bp->pc = pc;
1402 bp->flags = flags;
1403
2dc9f411 1404 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1405 if (flags & BP_GDB)
1406 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1407 else
1408 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1409
d720b93d 1410 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1411
1412 if (breakpoint)
1413 *breakpoint = bp;
4c3a88a2
FB
1414 return 0;
1415#else
a1d1bb31 1416 return -ENOSYS;
4c3a88a2
FB
1417#endif
1418}
1419
a1d1bb31
AL
1420/* Remove a specific breakpoint. */
1421int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1422{
7d03f82f 1423#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1424 CPUBreakpoint *bp;
1425
c0ce998e 1426 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1427 if (bp->pc == pc && bp->flags == flags) {
1428 cpu_breakpoint_remove_by_ref(env, bp);
1429 return 0;
1430 }
7d03f82f 1431 }
a1d1bb31
AL
1432 return -ENOENT;
1433#else
1434 return -ENOSYS;
7d03f82f
EI
1435#endif
1436}
1437
a1d1bb31
AL
1438/* Remove a specific breakpoint by reference. */
1439void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1440{
1fddef4b 1441#if defined(TARGET_HAS_ICE)
c0ce998e 1442 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1443
a1d1bb31
AL
1444 breakpoint_invalidate(env, breakpoint->pc);
1445
1446 qemu_free(breakpoint);
1447#endif
1448}
1449
1450/* Remove all matching breakpoints. */
1451void cpu_breakpoint_remove_all(CPUState *env, int mask)
1452{
1453#if defined(TARGET_HAS_ICE)
c0ce998e 1454 CPUBreakpoint *bp, *next;
a1d1bb31 1455
c0ce998e 1456 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1457 if (bp->flags & mask)
1458 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1459 }
4c3a88a2
FB
1460#endif
1461}
1462
c33a346e
FB
1463/* enable or disable single step mode. EXCP_DEBUG is returned by the
1464 CPU loop after each instruction */
1465void cpu_single_step(CPUState *env, int enabled)
1466{
1fddef4b 1467#if defined(TARGET_HAS_ICE)
c33a346e
FB
1468 if (env->singlestep_enabled != enabled) {
1469 env->singlestep_enabled = enabled;
e22a25c9
AL
1470 if (kvm_enabled())
1471 kvm_update_guest_debug(env, 0);
1472 else {
ccbb4d44 1473 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1474 /* XXX: only flush what is necessary */
1475 tb_flush(env);
1476 }
c33a346e
FB
1477 }
1478#endif
1479}
1480
34865134
FB
1481/* enable or disable low levels log */
1482void cpu_set_log(int log_flags)
1483{
1484 loglevel = log_flags;
1485 if (loglevel && !logfile) {
11fcfab4 1486 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1487 if (!logfile) {
1488 perror(logfilename);
1489 _exit(1);
1490 }
9fa3e853
FB
1491#if !defined(CONFIG_SOFTMMU)
1492 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1493 {
b55266b5 1494 static char logfile_buf[4096];
9fa3e853
FB
1495 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1496 }
1497#else
34865134 1498 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1499#endif
e735b91c
PB
1500 log_append = 1;
1501 }
1502 if (!loglevel && logfile) {
1503 fclose(logfile);
1504 logfile = NULL;
34865134
FB
1505 }
1506}
1507
1508void cpu_set_log_filename(const char *filename)
1509{
1510 logfilename = strdup(filename);
e735b91c
PB
1511 if (logfile) {
1512 fclose(logfile);
1513 logfile = NULL;
1514 }
1515 cpu_set_log(loglevel);
34865134 1516}
c33a346e 1517
3098dba0 1518static void cpu_unlink_tb(CPUState *env)
ea041c0e 1519{
3098dba0
AJ
1520#if defined(USE_NPTL)
1521 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1522 problem and hope the cpu will stop of its own accord. For userspace
1523 emulation this often isn't actually as bad as it sounds. Often
1524 signals are used primarily to interrupt blocking syscalls. */
1525#else
ea041c0e 1526 TranslationBlock *tb;
15a51156 1527 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1528
3098dba0
AJ
1529 tb = env->current_tb;
1530 /* if the cpu is currently executing code, we must unlink it and
1531 all the potentially executing TB */
1532 if (tb && !testandset(&interrupt_lock)) {
1533 env->current_tb = NULL;
1534 tb_reset_jump_recursive(tb);
1535 resetlock(&interrupt_lock);
be214e6c 1536 }
3098dba0
AJ
1537#endif
1538}
1539
1540/* mask must never be zero, except for A20 change call */
1541void cpu_interrupt(CPUState *env, int mask)
1542{
1543 int old_mask;
be214e6c 1544
2e70f6ef 1545 old_mask = env->interrupt_request;
68a79315 1546 env->interrupt_request |= mask;
3098dba0 1547
8edac960
AL
1548#ifndef CONFIG_USER_ONLY
1549 /*
1550 * If called from iothread context, wake the target cpu in
1551 * case its halted.
1552 */
1553 if (!qemu_cpu_self(env)) {
1554 qemu_cpu_kick(env);
1555 return;
1556 }
1557#endif
1558
2e70f6ef 1559 if (use_icount) {
266910c4 1560 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1561#ifndef CONFIG_USER_ONLY
2e70f6ef 1562 if (!can_do_io(env)
be214e6c 1563 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1564 cpu_abort(env, "Raised interrupt while not in I/O function");
1565 }
1566#endif
1567 } else {
3098dba0 1568 cpu_unlink_tb(env);
ea041c0e
FB
1569 }
1570}
1571
b54ad049
FB
1572void cpu_reset_interrupt(CPUState *env, int mask)
1573{
1574 env->interrupt_request &= ~mask;
1575}
1576
3098dba0
AJ
1577void cpu_exit(CPUState *env)
1578{
1579 env->exit_request = 1;
1580 cpu_unlink_tb(env);
1581}
1582
c7cd6a37 1583const CPULogItem cpu_log_items[] = {
5fafdf24 1584 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1585 "show generated host assembly code for each compiled TB" },
1586 { CPU_LOG_TB_IN_ASM, "in_asm",
1587 "show target assembly code for each compiled TB" },
5fafdf24 1588 { CPU_LOG_TB_OP, "op",
57fec1fe 1589 "show micro ops for each compiled TB" },
f193c797 1590 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1591 "show micro ops "
1592#ifdef TARGET_I386
1593 "before eflags optimization and "
f193c797 1594#endif
e01a1157 1595 "after liveness analysis" },
f193c797
FB
1596 { CPU_LOG_INT, "int",
1597 "show interrupts/exceptions in short format" },
1598 { CPU_LOG_EXEC, "exec",
1599 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1600 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1601 "show CPU state before block translation" },
f193c797
FB
1602#ifdef TARGET_I386
1603 { CPU_LOG_PCALL, "pcall",
1604 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1605 { CPU_LOG_RESET, "cpu_reset",
1606 "show CPU state before CPU resets" },
f193c797 1607#endif
8e3a9fd2 1608#ifdef DEBUG_IOPORT
fd872598
FB
1609 { CPU_LOG_IOPORT, "ioport",
1610 "show all i/o ports accesses" },
8e3a9fd2 1611#endif
f193c797
FB
1612 { 0, NULL, NULL },
1613};
1614
1615static int cmp1(const char *s1, int n, const char *s2)
1616{
1617 if (strlen(s2) != n)
1618 return 0;
1619 return memcmp(s1, s2, n) == 0;
1620}
3b46e624 1621
f193c797
FB
1622/* takes a comma separated list of log masks. Return 0 if error. */
1623int cpu_str_to_log_mask(const char *str)
1624{
c7cd6a37 1625 const CPULogItem *item;
f193c797
FB
1626 int mask;
1627 const char *p, *p1;
1628
1629 p = str;
1630 mask = 0;
1631 for(;;) {
1632 p1 = strchr(p, ',');
1633 if (!p1)
1634 p1 = p + strlen(p);
8e3a9fd2
FB
1635 if(cmp1(p,p1-p,"all")) {
1636 for(item = cpu_log_items; item->mask != 0; item++) {
1637 mask |= item->mask;
1638 }
1639 } else {
f193c797
FB
1640 for(item = cpu_log_items; item->mask != 0; item++) {
1641 if (cmp1(p, p1 - p, item->name))
1642 goto found;
1643 }
1644 return 0;
8e3a9fd2 1645 }
f193c797
FB
1646 found:
1647 mask |= item->mask;
1648 if (*p1 != ',')
1649 break;
1650 p = p1 + 1;
1651 }
1652 return mask;
1653}
ea041c0e 1654
7501267e
FB
1655void cpu_abort(CPUState *env, const char *fmt, ...)
1656{
1657 va_list ap;
493ae1f0 1658 va_list ap2;
7501267e
FB
1659
1660 va_start(ap, fmt);
493ae1f0 1661 va_copy(ap2, ap);
7501267e
FB
1662 fprintf(stderr, "qemu: fatal: ");
1663 vfprintf(stderr, fmt, ap);
1664 fprintf(stderr, "\n");
1665#ifdef TARGET_I386
7fe48483
FB
1666 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1667#else
1668 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1669#endif
93fcfe39
AL
1670 if (qemu_log_enabled()) {
1671 qemu_log("qemu: fatal: ");
1672 qemu_log_vprintf(fmt, ap2);
1673 qemu_log("\n");
f9373291 1674#ifdef TARGET_I386
93fcfe39 1675 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1676#else
93fcfe39 1677 log_cpu_state(env, 0);
f9373291 1678#endif
31b1a7b4 1679 qemu_log_flush();
93fcfe39 1680 qemu_log_close();
924edcae 1681 }
493ae1f0 1682 va_end(ap2);
f9373291 1683 va_end(ap);
7501267e
FB
1684 abort();
1685}
1686
c5be9f08
TS
1687CPUState *cpu_copy(CPUState *env)
1688{
01ba9816 1689 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1690 CPUState *next_cpu = new_env->next_cpu;
1691 int cpu_index = new_env->cpu_index;
5a38f081
AL
1692#if defined(TARGET_HAS_ICE)
1693 CPUBreakpoint *bp;
1694 CPUWatchpoint *wp;
1695#endif
1696
c5be9f08 1697 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1698
1699 /* Preserve chaining and index. */
c5be9f08
TS
1700 new_env->next_cpu = next_cpu;
1701 new_env->cpu_index = cpu_index;
5a38f081
AL
1702
1703 /* Clone all break/watchpoints.
1704 Note: Once we support ptrace with hw-debug register access, make sure
1705 BP_CPU break/watchpoints are handled correctly on clone. */
1706 TAILQ_INIT(&env->breakpoints);
1707 TAILQ_INIT(&env->watchpoints);
1708#if defined(TARGET_HAS_ICE)
1709 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1710 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1711 }
1712 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1713 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1714 wp->flags, NULL);
1715 }
1716#endif
1717
c5be9f08
TS
1718 return new_env;
1719}
1720
0124311e
FB
1721#if !defined(CONFIG_USER_ONLY)
1722
5c751e99
EI
1723static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1724{
1725 unsigned int i;
1726
1727 /* Discard jump cache entries for any tb which might potentially
1728 overlap the flushed page. */
1729 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1730 memset (&env->tb_jmp_cache[i], 0,
1731 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1732
1733 i = tb_jmp_cache_hash_page(addr);
1734 memset (&env->tb_jmp_cache[i], 0,
1735 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1736}
1737
ee8b7021
FB
1738/* NOTE: if flush_global is true, also flush global entries (not
1739 implemented yet) */
1740void tlb_flush(CPUState *env, int flush_global)
33417e70 1741{
33417e70 1742 int i;
0124311e 1743
9fa3e853
FB
1744#if defined(DEBUG_TLB)
1745 printf("tlb_flush:\n");
1746#endif
0124311e
FB
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env->current_tb = NULL;
1750
33417e70 1751 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1752 env->tlb_table[0][i].addr_read = -1;
1753 env->tlb_table[0][i].addr_write = -1;
1754 env->tlb_table[0][i].addr_code = -1;
1755 env->tlb_table[1][i].addr_read = -1;
1756 env->tlb_table[1][i].addr_write = -1;
1757 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1758#if (NB_MMU_MODES >= 3)
1759 env->tlb_table[2][i].addr_read = -1;
1760 env->tlb_table[2][i].addr_write = -1;
1761 env->tlb_table[2][i].addr_code = -1;
e37e6ee6
AJ
1762#endif
1763#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1764 env->tlb_table[3][i].addr_read = -1;
1765 env->tlb_table[3][i].addr_write = -1;
1766 env->tlb_table[3][i].addr_code = -1;
1767#endif
e37e6ee6
AJ
1768#if (NB_MMU_MODES >= 5)
1769 env->tlb_table[4][i].addr_read = -1;
1770 env->tlb_table[4][i].addr_write = -1;
1771 env->tlb_table[4][i].addr_code = -1;
6fa4cea9 1772#endif
e37e6ee6 1773
33417e70 1774 }
9fa3e853 1775
8a40a180 1776 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1777
640f42e4 1778#ifdef CONFIG_KQEMU
0a962c02
FB
1779 if (env->kqemu_enabled) {
1780 kqemu_flush(env, flush_global);
1781 }
9fa3e853 1782#endif
e3db7226 1783 tlb_flush_count++;
33417e70
FB
1784}
1785
274da6b2 1786static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1787{
5fafdf24 1788 if (addr == (tlb_entry->addr_read &
84b7b8e7 1789 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1790 addr == (tlb_entry->addr_write &
84b7b8e7 1791 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1792 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1793 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1794 tlb_entry->addr_read = -1;
1795 tlb_entry->addr_write = -1;
1796 tlb_entry->addr_code = -1;
1797 }
61382a50
FB
1798}
1799
2e12669a 1800void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1801{
8a40a180 1802 int i;
0124311e 1803
9fa3e853 1804#if defined(DEBUG_TLB)
108c49b8 1805 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1806#endif
0124311e
FB
1807 /* must reset current TB so that interrupts cannot modify the
1808 links while we are modifying them */
1809 env->current_tb = NULL;
61382a50
FB
1810
1811 addr &= TARGET_PAGE_MASK;
1812 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1813 tlb_flush_entry(&env->tlb_table[0][i], addr);
1814 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1815#if (NB_MMU_MODES >= 3)
1816 tlb_flush_entry(&env->tlb_table[2][i], addr);
e37e6ee6
AJ
1817#endif
1818#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1819 tlb_flush_entry(&env->tlb_table[3][i], addr);
1820#endif
e37e6ee6
AJ
1821#if (NB_MMU_MODES >= 5)
1822 tlb_flush_entry(&env->tlb_table[4][i], addr);
6fa4cea9 1823#endif
0124311e 1824
5c751e99 1825 tlb_flush_jmp_cache(env, addr);
9fa3e853 1826
640f42e4 1827#ifdef CONFIG_KQEMU
0a962c02
FB
1828 if (env->kqemu_enabled) {
1829 kqemu_flush_page(env, addr);
1830 }
1831#endif
9fa3e853
FB
1832}
1833
9fa3e853
FB
1834/* update the TLBs so that writes to code in the virtual page 'addr'
1835 can be detected */
6a00d601 1836static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1837{
5fafdf24 1838 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1839 ram_addr + TARGET_PAGE_SIZE,
1840 CODE_DIRTY_FLAG);
9fa3e853
FB
1841}
1842
9fa3e853 1843/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1844 tested for self modifying code */
5fafdf24 1845static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1846 target_ulong vaddr)
9fa3e853 1847{
3a7d929e 1848 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1849}
1850
5fafdf24 1851static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1852 unsigned long start, unsigned long length)
1853{
1854 unsigned long addr;
84b7b8e7
FB
1855 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1856 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1857 if ((addr - start) < length) {
0f459d16 1858 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1859 }
1860 }
1861}
1862
5579c7f3 1863/* Note: start and end must be within the same ram block. */
3a7d929e 1864void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1865 int dirty_flags)
1ccde1cb
FB
1866{
1867 CPUState *env;
4f2ac237 1868 unsigned long length, start1;
0a962c02
FB
1869 int i, mask, len;
1870 uint8_t *p;
1ccde1cb
FB
1871
1872 start &= TARGET_PAGE_MASK;
1873 end = TARGET_PAGE_ALIGN(end);
1874
1875 length = end - start;
1876 if (length == 0)
1877 return;
0a962c02 1878 len = length >> TARGET_PAGE_BITS;
640f42e4 1879#ifdef CONFIG_KQEMU
6a00d601
FB
1880 /* XXX: should not depend on cpu context */
1881 env = first_cpu;
3a7d929e 1882 if (env->kqemu_enabled) {
f23db169
FB
1883 ram_addr_t addr;
1884 addr = start;
1885 for(i = 0; i < len; i++) {
1886 kqemu_set_notdirty(env, addr);
1887 addr += TARGET_PAGE_SIZE;
1888 }
3a7d929e
FB
1889 }
1890#endif
f23db169
FB
1891 mask = ~dirty_flags;
1892 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1893 for(i = 0; i < len; i++)
1894 p[i] &= mask;
1895
1ccde1cb
FB
1896 /* we modify the TLB cache so that the dirty bit will be set again
1897 when accessing the range */
5579c7f3
PB
1898 start1 = (unsigned long)qemu_get_ram_ptr(start);
1899 /* Chek that we don't span multiple blocks - this breaks the
1900 address comparisons below. */
1901 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1902 != (end - 1) - start) {
1903 abort();
1904 }
1905
6a00d601
FB
1906 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1907 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1908 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1909 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1910 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1911#if (NB_MMU_MODES >= 3)
1912 for(i = 0; i < CPU_TLB_SIZE; i++)
1913 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
e37e6ee6
AJ
1914#endif
1915#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1916 for(i = 0; i < CPU_TLB_SIZE; i++)
1917 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1918#endif
e37e6ee6
AJ
1919#if (NB_MMU_MODES >= 5)
1920 for(i = 0; i < CPU_TLB_SIZE; i++)
1921 tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
6fa4cea9 1922#endif
6a00d601 1923 }
1ccde1cb
FB
1924}
1925
74576198
AL
1926int cpu_physical_memory_set_dirty_tracking(int enable)
1927{
1928 in_migration = enable;
b0a46a33
JK
1929 if (kvm_enabled()) {
1930 return kvm_set_migration_log(enable);
1931 }
74576198
AL
1932 return 0;
1933}
1934
1935int cpu_physical_memory_get_dirty_tracking(void)
1936{
1937 return in_migration;
1938}
1939
151f7749
JK
1940int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1941 target_phys_addr_t end_addr)
2bec46dc 1942{
151f7749
JK
1943 int ret = 0;
1944
2bec46dc 1945 if (kvm_enabled())
151f7749
JK
1946 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1947 return ret;
2bec46dc
AL
1948}
1949
3a7d929e
FB
1950static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1951{
1952 ram_addr_t ram_addr;
5579c7f3 1953 void *p;
3a7d929e 1954
84b7b8e7 1955 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
1956 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1957 + tlb_entry->addend);
1958 ram_addr = qemu_ram_addr_from_host(p);
3a7d929e 1959 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1960 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1961 }
1962 }
1963}
1964
1965/* update the TLB according to the current state of the dirty bits */
1966void cpu_tlb_update_dirty(CPUState *env)
1967{
1968 int i;
1969 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1970 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1971 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1972 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1973#if (NB_MMU_MODES >= 3)
1974 for(i = 0; i < CPU_TLB_SIZE; i++)
1975 tlb_update_dirty(&env->tlb_table[2][i]);
e37e6ee6
AJ
1976#endif
1977#if (NB_MMU_MODES >= 4)
6fa4cea9
JM
1978 for(i = 0; i < CPU_TLB_SIZE; i++)
1979 tlb_update_dirty(&env->tlb_table[3][i]);
1980#endif
e37e6ee6
AJ
1981#if (NB_MMU_MODES >= 5)
1982 for(i = 0; i < CPU_TLB_SIZE; i++)
1983 tlb_update_dirty(&env->tlb_table[4][i]);
6fa4cea9 1984#endif
3a7d929e
FB
1985}
1986
0f459d16 1987static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1988{
0f459d16
PB
1989 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1990 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1991}
1992
0f459d16
PB
1993/* update the TLB corresponding to virtual page vaddr
1994 so that it is no longer dirty */
1995static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1996{
1ccde1cb
FB
1997 int i;
1998
0f459d16 1999 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2000 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
2001 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2002 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 2003#if (NB_MMU_MODES >= 3)
0f459d16 2004 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
e37e6ee6
AJ
2005#endif
2006#if (NB_MMU_MODES >= 4)
0f459d16 2007 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9 2008#endif
e37e6ee6
AJ
2009#if (NB_MMU_MODES >= 5)
2010 tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
6fa4cea9 2011#endif
9fa3e853
FB
2012}
2013
59817ccb
FB
2014/* add a new TLB entry. At most one entry for a given virtual address
2015 is permitted. Return 0 if OK or 2 if the page could not be mapped
2016 (can only happen in non SOFTMMU mode for I/O pages or pages
2017 conflicting with the host address space). */
5fafdf24
TS
2018int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2019 target_phys_addr_t paddr, int prot,
6ebbf390 2020 int mmu_idx, int is_softmmu)
9fa3e853 2021{
92e873b9 2022 PhysPageDesc *p;
4f2ac237 2023 unsigned long pd;
9fa3e853 2024 unsigned int index;
4f2ac237 2025 target_ulong address;
0f459d16 2026 target_ulong code_address;
108c49b8 2027 target_phys_addr_t addend;
9fa3e853 2028 int ret;
84b7b8e7 2029 CPUTLBEntry *te;
a1d1bb31 2030 CPUWatchpoint *wp;
0f459d16 2031 target_phys_addr_t iotlb;
9fa3e853 2032
92e873b9 2033 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2034 if (!p) {
2035 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2036 } else {
2037 pd = p->phys_offset;
9fa3e853
FB
2038 }
2039#if defined(DEBUG_TLB)
6ebbf390
JM
2040 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2041 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2042#endif
2043
2044 ret = 0;
0f459d16
PB
2045 address = vaddr;
2046 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2047 /* IO memory case (romd handled later) */
2048 address |= TLB_MMIO;
2049 }
5579c7f3 2050 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2051 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2052 /* Normal RAM. */
2053 iotlb = pd & TARGET_PAGE_MASK;
2054 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2055 iotlb |= IO_MEM_NOTDIRTY;
2056 else
2057 iotlb |= IO_MEM_ROM;
2058 } else {
ccbb4d44 2059 /* IO handlers are currently passed a physical address.
0f459d16
PB
2060 It would be nice to pass an offset from the base address
2061 of that region. This would avoid having to special case RAM,
2062 and avoid full address decoding in every device.
2063 We can't use the high bits of pd for this because
2064 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2065 iotlb = (pd & ~TARGET_PAGE_MASK);
2066 if (p) {
8da3ff18
PB
2067 iotlb += p->region_offset;
2068 } else {
2069 iotlb += paddr;
2070 }
0f459d16
PB
2071 }
2072
2073 code_address = address;
2074 /* Make accesses to pages with watchpoints go via the
2075 watchpoint trap routines. */
c0ce998e 2076 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2077 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2078 iotlb = io_mem_watch + paddr;
2079 /* TODO: The memory case can be optimized by not trapping
2080 reads of pages with a write breakpoint. */
2081 address |= TLB_MMIO;
6658ffb8 2082 }
0f459d16 2083 }
d79acba4 2084
0f459d16
PB
2085 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2086 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2087 te = &env->tlb_table[mmu_idx][index];
2088 te->addend = addend - vaddr;
2089 if (prot & PAGE_READ) {
2090 te->addr_read = address;
2091 } else {
2092 te->addr_read = -1;
2093 }
5c751e99 2094
0f459d16
PB
2095 if (prot & PAGE_EXEC) {
2096 te->addr_code = code_address;
2097 } else {
2098 te->addr_code = -1;
2099 }
2100 if (prot & PAGE_WRITE) {
2101 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2102 (pd & IO_MEM_ROMD)) {
2103 /* Write access calls the I/O callback. */
2104 te->addr_write = address | TLB_MMIO;
2105 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2106 !cpu_physical_memory_is_dirty(pd)) {
2107 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2108 } else {
0f459d16 2109 te->addr_write = address;
9fa3e853 2110 }
0f459d16
PB
2111 } else {
2112 te->addr_write = -1;
9fa3e853 2113 }
9fa3e853
FB
2114 return ret;
2115}
2116
0124311e
FB
2117#else
2118
ee8b7021 2119void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2120{
2121}
2122
2e12669a 2123void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2124{
2125}
2126
5fafdf24
TS
2127int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2128 target_phys_addr_t paddr, int prot,
6ebbf390 2129 int mmu_idx, int is_softmmu)
9fa3e853
FB
2130{
2131 return 0;
2132}
0124311e 2133
9fa3e853
FB
2134/* dump memory mappings */
2135void page_dump(FILE *f)
33417e70 2136{
9fa3e853
FB
2137 unsigned long start, end;
2138 int i, j, prot, prot1;
2139 PageDesc *p;
33417e70 2140
9fa3e853
FB
2141 fprintf(f, "%-8s %-8s %-8s %s\n",
2142 "start", "end", "size", "prot");
2143 start = -1;
2144 end = -1;
2145 prot = 0;
2146 for(i = 0; i <= L1_SIZE; i++) {
2147 if (i < L1_SIZE)
2148 p = l1_map[i];
2149 else
2150 p = NULL;
2151 for(j = 0;j < L2_SIZE; j++) {
2152 if (!p)
2153 prot1 = 0;
2154 else
2155 prot1 = p[j].flags;
2156 if (prot1 != prot) {
2157 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2158 if (start != -1) {
2159 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2160 start, end, end - start,
9fa3e853
FB
2161 prot & PAGE_READ ? 'r' : '-',
2162 prot & PAGE_WRITE ? 'w' : '-',
2163 prot & PAGE_EXEC ? 'x' : '-');
2164 }
2165 if (prot1 != 0)
2166 start = end;
2167 else
2168 start = -1;
2169 prot = prot1;
2170 }
2171 if (!p)
2172 break;
2173 }
33417e70 2174 }
33417e70
FB
2175}
2176
53a5960a 2177int page_get_flags(target_ulong address)
33417e70 2178{
9fa3e853
FB
2179 PageDesc *p;
2180
2181 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2182 if (!p)
9fa3e853
FB
2183 return 0;
2184 return p->flags;
2185}
2186
2187/* modify the flags of a page and invalidate the code if
ccbb4d44 2188 necessary. The flag PAGE_WRITE_ORG is positioned automatically
9fa3e853 2189 depending on PAGE_WRITE */
53a5960a 2190void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2191{
2192 PageDesc *p;
53a5960a 2193 target_ulong addr;
9fa3e853 2194
c8a706fe 2195 /* mmap_lock should already be held. */
9fa3e853
FB
2196 start = start & TARGET_PAGE_MASK;
2197 end = TARGET_PAGE_ALIGN(end);
2198 if (flags & PAGE_WRITE)
2199 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2200 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2201 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2202 /* We may be called for host regions that are outside guest
2203 address space. */
2204 if (!p)
2205 return;
9fa3e853
FB
2206 /* if the write protection is set, then we invalidate the code
2207 inside */
5fafdf24 2208 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2209 (flags & PAGE_WRITE) &&
2210 p->first_tb) {
d720b93d 2211 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2212 }
2213 p->flags = flags;
2214 }
33417e70
FB
2215}
2216
3d97b40b
TS
2217int page_check_range(target_ulong start, target_ulong len, int flags)
2218{
2219 PageDesc *p;
2220 target_ulong end;
2221 target_ulong addr;
2222
55f280c9
AZ
2223 if (start + len < start)
2224 /* we've wrapped around */
2225 return -1;
2226
3d97b40b
TS
2227 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2228 start = start & TARGET_PAGE_MASK;
2229
3d97b40b
TS
2230 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2231 p = page_find(addr >> TARGET_PAGE_BITS);
2232 if( !p )
2233 return -1;
2234 if( !(p->flags & PAGE_VALID) )
2235 return -1;
2236
dae3270c 2237 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2238 return -1;
dae3270c
FB
2239 if (flags & PAGE_WRITE) {
2240 if (!(p->flags & PAGE_WRITE_ORG))
2241 return -1;
2242 /* unprotect the page if it was put read-only because it
2243 contains translated code */
2244 if (!(p->flags & PAGE_WRITE)) {
2245 if (!page_unprotect(addr, 0, NULL))
2246 return -1;
2247 }
2248 return 0;
2249 }
3d97b40b
TS
2250 }
2251 return 0;
2252}
2253
9fa3e853 2254/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2255 page. Return TRUE if the fault was successfully handled. */
53a5960a 2256int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2257{
2258 unsigned int page_index, prot, pindex;
2259 PageDesc *p, *p1;
53a5960a 2260 target_ulong host_start, host_end, addr;
9fa3e853 2261
c8a706fe
PB
2262 /* Technically this isn't safe inside a signal handler. However we
2263 know this only ever happens in a synchronous SEGV handler, so in
2264 practice it seems to be ok. */
2265 mmap_lock();
2266
83fb7adf 2267 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2268 page_index = host_start >> TARGET_PAGE_BITS;
2269 p1 = page_find(page_index);
c8a706fe
PB
2270 if (!p1) {
2271 mmap_unlock();
9fa3e853 2272 return 0;
c8a706fe 2273 }
83fb7adf 2274 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2275 p = p1;
2276 prot = 0;
2277 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2278 prot |= p->flags;
2279 p++;
2280 }
2281 /* if the page was really writable, then we change its
2282 protection back to writable */
2283 if (prot & PAGE_WRITE_ORG) {
2284 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2285 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2286 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2287 (prot & PAGE_BITS) | PAGE_WRITE);
2288 p1[pindex].flags |= PAGE_WRITE;
2289 /* and since the content will be modified, we must invalidate
2290 the corresponding translated code. */
d720b93d 2291 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2292#ifdef DEBUG_TB_CHECK
2293 tb_invalidate_check(address);
2294#endif
c8a706fe 2295 mmap_unlock();
9fa3e853
FB
2296 return 1;
2297 }
2298 }
c8a706fe 2299 mmap_unlock();
9fa3e853
FB
2300 return 0;
2301}
2302
6a00d601
FB
2303static inline void tlb_set_dirty(CPUState *env,
2304 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2305{
2306}
9fa3e853
FB
2307#endif /* defined(CONFIG_USER_ONLY) */
2308
e2eef170 2309#if !defined(CONFIG_USER_ONLY)
8da3ff18 2310
db7b5426 2311static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2312 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2313static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2314 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2315#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2316 need_subpage) \
2317 do { \
2318 if (addr > start_addr) \
2319 start_addr2 = 0; \
2320 else { \
2321 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2322 if (start_addr2 > 0) \
2323 need_subpage = 1; \
2324 } \
2325 \
49e9fba2 2326 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2327 end_addr2 = TARGET_PAGE_SIZE - 1; \
2328 else { \
2329 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2330 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2331 need_subpage = 1; \
2332 } \
2333 } while (0)
2334
33417e70
FB
2335/* register physical memory. 'size' must be a multiple of the target
2336 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2337 io memory page. The address used when calling the IO function is
2338 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2339 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2340 before calculating this offset. This should not be a problem unless
2341 the low bits of start_addr and region_offset differ. */
2342void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2343 ram_addr_t size,
2344 ram_addr_t phys_offset,
2345 ram_addr_t region_offset)
33417e70 2346{
108c49b8 2347 target_phys_addr_t addr, end_addr;
92e873b9 2348 PhysPageDesc *p;
9d42037b 2349 CPUState *env;
00f82b8a 2350 ram_addr_t orig_size = size;
db7b5426 2351 void *subpage;
33417e70 2352
640f42e4 2353#ifdef CONFIG_KQEMU
da260249
FB
2354 /* XXX: should not depend on cpu context */
2355 env = first_cpu;
2356 if (env->kqemu_enabled) {
2357 kqemu_set_phys_mem(start_addr, size, phys_offset);
2358 }
2359#endif
7ba1e619
AL
2360 if (kvm_enabled())
2361 kvm_set_phys_mem(start_addr, size, phys_offset);
2362
67c4d23c
PB
2363 if (phys_offset == IO_MEM_UNASSIGNED) {
2364 region_offset = start_addr;
2365 }
8da3ff18 2366 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2367 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2368 end_addr = start_addr + (target_phys_addr_t)size;
2369 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2370 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2371 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2372 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2373 target_phys_addr_t start_addr2, end_addr2;
2374 int need_subpage = 0;
2375
2376 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2377 need_subpage);
4254fab8 2378 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2379 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2380 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2381 &p->phys_offset, orig_memory,
2382 p->region_offset);
db7b5426
BS
2383 } else {
2384 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2385 >> IO_MEM_SHIFT];
2386 }
8da3ff18
PB
2387 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2388 region_offset);
2389 p->region_offset = 0;
db7b5426
BS
2390 } else {
2391 p->phys_offset = phys_offset;
2392 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2393 (phys_offset & IO_MEM_ROMD))
2394 phys_offset += TARGET_PAGE_SIZE;
2395 }
2396 } else {
2397 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2398 p->phys_offset = phys_offset;
8da3ff18 2399 p->region_offset = region_offset;
db7b5426 2400 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2401 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2402 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2403 } else {
db7b5426
BS
2404 target_phys_addr_t start_addr2, end_addr2;
2405 int need_subpage = 0;
2406
2407 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2408 end_addr2, need_subpage);
2409
4254fab8 2410 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2411 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2412 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2413 addr & TARGET_PAGE_MASK);
db7b5426 2414 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2415 phys_offset, region_offset);
2416 p->region_offset = 0;
db7b5426
BS
2417 }
2418 }
2419 }
8da3ff18 2420 region_offset += TARGET_PAGE_SIZE;
33417e70 2421 }
3b46e624 2422
9d42037b
FB
2423 /* since each CPU stores ram addresses in its TLB cache, we must
2424 reset the modified entries */
2425 /* XXX: slow ! */
2426 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2427 tlb_flush(env, 1);
2428 }
33417e70
FB
2429}
2430
ba863458 2431/* XXX: temporary until new memory mapping API */
00f82b8a 2432ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2433{
2434 PhysPageDesc *p;
2435
2436 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2437 if (!p)
2438 return IO_MEM_UNASSIGNED;
2439 return p->phys_offset;
2440}
2441
f65ed4c1
AL
2442void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2443{
2444 if (kvm_enabled())
2445 kvm_coalesce_mmio_region(addr, size);
2446}
2447
2448void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2449{
2450 if (kvm_enabled())
2451 kvm_uncoalesce_mmio_region(addr, size);
2452}
2453
640f42e4 2454#ifdef CONFIG_KQEMU
e9a1ab19 2455/* XXX: better than nothing */
94a6b54f 2456static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2457{
2458 ram_addr_t addr;
94a6b54f 2459 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
012a7045 2460 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
94a6b54f 2461 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
e9a1ab19
FB
2462 abort();
2463 }
94a6b54f
PB
2464 addr = last_ram_offset;
2465 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
e9a1ab19
FB
2466 return addr;
2467}
94a6b54f
PB
2468#endif
2469
2470ram_addr_t qemu_ram_alloc(ram_addr_t size)
2471{
2472 RAMBlock *new_block;
2473
640f42e4 2474#ifdef CONFIG_KQEMU
94a6b54f
PB
2475 if (kqemu_phys_ram_base) {
2476 return kqemu_ram_alloc(size);
2477 }
2478#endif
2479
2480 size = TARGET_PAGE_ALIGN(size);
2481 new_block = qemu_malloc(sizeof(*new_block));
2482
2483 new_block->host = qemu_vmalloc(size);
2484 new_block->offset = last_ram_offset;
2485 new_block->length = size;
2486
2487 new_block->next = ram_blocks;
2488 ram_blocks = new_block;
2489
2490 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2491 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2492 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2493 0xff, size >> TARGET_PAGE_BITS);
2494
2495 last_ram_offset += size;
2496
6f0437e8
JK
2497 if (kvm_enabled())
2498 kvm_setup_guest_memory(new_block->host, size);
2499
94a6b54f
PB
2500 return new_block->offset;
2501}
e9a1ab19
FB
2502
2503void qemu_ram_free(ram_addr_t addr)
2504{
94a6b54f 2505 /* TODO: implement this. */
e9a1ab19
FB
2506}
2507
dc828ca1 2508/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2509 With the exception of the softmmu code in this file, this should
2510 only be used for local memory (e.g. video ram) that the device owns,
2511 and knows it isn't going to access beyond the end of the block.
2512
2513 It should not be used for general purpose DMA.
2514 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2515 */
dc828ca1
PB
2516void *qemu_get_ram_ptr(ram_addr_t addr)
2517{
94a6b54f
PB
2518 RAMBlock *prev;
2519 RAMBlock **prevp;
2520 RAMBlock *block;
2521
640f42e4 2522#ifdef CONFIG_KQEMU
94a6b54f
PB
2523 if (kqemu_phys_ram_base) {
2524 return kqemu_phys_ram_base + addr;
2525 }
2526#endif
2527
2528 prev = NULL;
2529 prevp = &ram_blocks;
2530 block = ram_blocks;
2531 while (block && (block->offset > addr
2532 || block->offset + block->length <= addr)) {
2533 if (prev)
2534 prevp = &prev->next;
2535 prev = block;
2536 block = block->next;
2537 }
2538 if (!block) {
2539 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2540 abort();
2541 }
2542 /* Move this entry to to start of the list. */
2543 if (prev) {
2544 prev->next = block->next;
2545 block->next = *prevp;
2546 *prevp = block;
2547 }
2548 return block->host + (addr - block->offset);
dc828ca1
PB
2549}
2550
5579c7f3
PB
2551/* Some of the softmmu routines need to translate from a host pointer
2552 (typically a TLB entry) back to a ram offset. */
2553ram_addr_t qemu_ram_addr_from_host(void *ptr)
2554{
94a6b54f
PB
2555 RAMBlock *prev;
2556 RAMBlock **prevp;
2557 RAMBlock *block;
2558 uint8_t *host = ptr;
2559
640f42e4 2560#ifdef CONFIG_KQEMU
94a6b54f
PB
2561 if (kqemu_phys_ram_base) {
2562 return host - kqemu_phys_ram_base;
2563 }
2564#endif
2565
2566 prev = NULL;
2567 prevp = &ram_blocks;
2568 block = ram_blocks;
2569 while (block && (block->host > host
2570 || block->host + block->length <= host)) {
2571 if (prev)
2572 prevp = &prev->next;
2573 prev = block;
2574 block = block->next;
2575 }
2576 if (!block) {
2577 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2578 abort();
2579 }
2580 return block->offset + (host - block->host);
5579c7f3
PB
2581}
2582
a4193c8a 2583static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2584{
67d3b957 2585#ifdef DEBUG_UNASSIGNED
ab3d1727 2586 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2587#endif
0a6f8a6d 2588#if defined(TARGET_SPARC)
e18231a3
BS
2589 do_unassigned_access(addr, 0, 0, 0, 1);
2590#endif
2591 return 0;
2592}
2593
2594static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2595{
2596#ifdef DEBUG_UNASSIGNED
2597 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2598#endif
0a6f8a6d 2599#if defined(TARGET_SPARC)
e18231a3
BS
2600 do_unassigned_access(addr, 0, 0, 0, 2);
2601#endif
2602 return 0;
2603}
2604
2605static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2606{
2607#ifdef DEBUG_UNASSIGNED
2608 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2609#endif
0a6f8a6d 2610#if defined(TARGET_SPARC)
e18231a3 2611 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2612#endif
33417e70
FB
2613 return 0;
2614}
2615
a4193c8a 2616static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2617{
67d3b957 2618#ifdef DEBUG_UNASSIGNED
ab3d1727 2619 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2620#endif
0a6f8a6d 2621#if defined(TARGET_SPARC)
e18231a3
BS
2622 do_unassigned_access(addr, 1, 0, 0, 1);
2623#endif
2624}
2625
2626static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2627{
2628#ifdef DEBUG_UNASSIGNED
2629 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2630#endif
0a6f8a6d 2631#if defined(TARGET_SPARC)
e18231a3
BS
2632 do_unassigned_access(addr, 1, 0, 0, 2);
2633#endif
2634}
2635
2636static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2637{
2638#ifdef DEBUG_UNASSIGNED
2639 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2640#endif
0a6f8a6d 2641#if defined(TARGET_SPARC)
e18231a3 2642 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2643#endif
33417e70
FB
2644}
2645
2646static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2647 unassigned_mem_readb,
e18231a3
BS
2648 unassigned_mem_readw,
2649 unassigned_mem_readl,
33417e70
FB
2650};
2651
2652static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2653 unassigned_mem_writeb,
e18231a3
BS
2654 unassigned_mem_writew,
2655 unassigned_mem_writel,
33417e70
FB
2656};
2657
0f459d16
PB
2658static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2659 uint32_t val)
9fa3e853 2660{
3a7d929e 2661 int dirty_flags;
3a7d929e
FB
2662 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2663 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2664#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2665 tb_invalidate_phys_page_fast(ram_addr, 1);
2666 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2667#endif
3a7d929e 2668 }
5579c7f3 2669 stb_p(qemu_get_ram_ptr(ram_addr), val);
640f42e4 2670#ifdef CONFIG_KQEMU
f32fc648
FB
2671 if (cpu_single_env->kqemu_enabled &&
2672 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2673 kqemu_modify_page(cpu_single_env, ram_addr);
2674#endif
f23db169
FB
2675 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2676 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2677 /* we remove the notdirty callback only if the code has been
2678 flushed */
2679 if (dirty_flags == 0xff)
2e70f6ef 2680 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2681}
2682
0f459d16
PB
2683static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2684 uint32_t val)
9fa3e853 2685{
3a7d929e 2686 int dirty_flags;
3a7d929e
FB
2687 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2688 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2689#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2690 tb_invalidate_phys_page_fast(ram_addr, 2);
2691 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2692#endif
3a7d929e 2693 }
5579c7f3 2694 stw_p(qemu_get_ram_ptr(ram_addr), val);
640f42e4 2695#ifdef CONFIG_KQEMU
f32fc648
FB
2696 if (cpu_single_env->kqemu_enabled &&
2697 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2698 kqemu_modify_page(cpu_single_env, ram_addr);
2699#endif
f23db169
FB
2700 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2701 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2702 /* we remove the notdirty callback only if the code has been
2703 flushed */
2704 if (dirty_flags == 0xff)
2e70f6ef 2705 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2706}
2707
0f459d16
PB
2708static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2709 uint32_t val)
9fa3e853 2710{
3a7d929e 2711 int dirty_flags;
3a7d929e
FB
2712 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2713 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2714#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2715 tb_invalidate_phys_page_fast(ram_addr, 4);
2716 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2717#endif
3a7d929e 2718 }
5579c7f3 2719 stl_p(qemu_get_ram_ptr(ram_addr), val);
640f42e4 2720#ifdef CONFIG_KQEMU
f32fc648
FB
2721 if (cpu_single_env->kqemu_enabled &&
2722 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2723 kqemu_modify_page(cpu_single_env, ram_addr);
2724#endif
f23db169
FB
2725 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2726 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2727 /* we remove the notdirty callback only if the code has been
2728 flushed */
2729 if (dirty_flags == 0xff)
2e70f6ef 2730 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2731}
2732
3a7d929e 2733static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2734 NULL, /* never used */
2735 NULL, /* never used */
2736 NULL, /* never used */
2737};
2738
1ccde1cb
FB
2739static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2740 notdirty_mem_writeb,
2741 notdirty_mem_writew,
2742 notdirty_mem_writel,
2743};
2744
0f459d16 2745/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2746static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2747{
2748 CPUState *env = cpu_single_env;
06d55cc1
AL
2749 target_ulong pc, cs_base;
2750 TranslationBlock *tb;
0f459d16 2751 target_ulong vaddr;
a1d1bb31 2752 CPUWatchpoint *wp;
06d55cc1 2753 int cpu_flags;
0f459d16 2754
06d55cc1
AL
2755 if (env->watchpoint_hit) {
2756 /* We re-entered the check after replacing the TB. Now raise
2757 * the debug interrupt so that is will trigger after the
2758 * current instruction. */
2759 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2760 return;
2761 }
2e70f6ef 2762 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2763 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2764 if ((vaddr == (wp->vaddr & len_mask) ||
2765 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2766 wp->flags |= BP_WATCHPOINT_HIT;
2767 if (!env->watchpoint_hit) {
2768 env->watchpoint_hit = wp;
2769 tb = tb_find_pc(env->mem_io_pc);
2770 if (!tb) {
2771 cpu_abort(env, "check_watchpoint: could not find TB for "
2772 "pc=%p", (void *)env->mem_io_pc);
2773 }
2774 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2775 tb_phys_invalidate(tb, -1);
2776 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2777 env->exception_index = EXCP_DEBUG;
2778 } else {
2779 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2780 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2781 }
2782 cpu_resume_from_signal(env, NULL);
06d55cc1 2783 }
6e140f28
AL
2784 } else {
2785 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2786 }
2787 }
2788}
2789
6658ffb8
PB
2790/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2791 so these check for a hit then pass through to the normal out-of-line
2792 phys routines. */
2793static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2794{
b4051334 2795 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2796 return ldub_phys(addr);
2797}
2798
2799static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2800{
b4051334 2801 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2802 return lduw_phys(addr);
2803}
2804
2805static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2806{
b4051334 2807 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2808 return ldl_phys(addr);
2809}
2810
6658ffb8
PB
2811static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2812 uint32_t val)
2813{
b4051334 2814 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2815 stb_phys(addr, val);
2816}
2817
2818static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2819 uint32_t val)
2820{
b4051334 2821 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2822 stw_phys(addr, val);
2823}
2824
2825static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2826 uint32_t val)
2827{
b4051334 2828 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2829 stl_phys(addr, val);
2830}
2831
2832static CPUReadMemoryFunc *watch_mem_read[3] = {
2833 watch_mem_readb,
2834 watch_mem_readw,
2835 watch_mem_readl,
2836};
2837
2838static CPUWriteMemoryFunc *watch_mem_write[3] = {
2839 watch_mem_writeb,
2840 watch_mem_writew,
2841 watch_mem_writel,
2842};
6658ffb8 2843
db7b5426
BS
2844static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2845 unsigned int len)
2846{
db7b5426
BS
2847 uint32_t ret;
2848 unsigned int idx;
2849
8da3ff18 2850 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2851#if defined(DEBUG_SUBPAGE)
2852 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2853 mmio, len, addr, idx);
2854#endif
8da3ff18
PB
2855 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2856 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2857
2858 return ret;
2859}
2860
2861static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2862 uint32_t value, unsigned int len)
2863{
db7b5426
BS
2864 unsigned int idx;
2865
8da3ff18 2866 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2867#if defined(DEBUG_SUBPAGE)
2868 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2869 mmio, len, addr, idx, value);
2870#endif
8da3ff18
PB
2871 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2872 addr + mmio->region_offset[idx][1][len],
2873 value);
db7b5426
BS
2874}
2875
2876static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2877{
2878#if defined(DEBUG_SUBPAGE)
2879 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2880#endif
2881
2882 return subpage_readlen(opaque, addr, 0);
2883}
2884
2885static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2886 uint32_t value)
2887{
2888#if defined(DEBUG_SUBPAGE)
2889 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2890#endif
2891 subpage_writelen(opaque, addr, value, 0);
2892}
2893
2894static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2895{
2896#if defined(DEBUG_SUBPAGE)
2897 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2898#endif
2899
2900 return subpage_readlen(opaque, addr, 1);
2901}
2902
2903static void subpage_writew (void *opaque, target_phys_addr_t addr,
2904 uint32_t value)
2905{
2906#if defined(DEBUG_SUBPAGE)
2907 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2908#endif
2909 subpage_writelen(opaque, addr, value, 1);
2910}
2911
2912static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2913{
2914#if defined(DEBUG_SUBPAGE)
2915 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2916#endif
2917
2918 return subpage_readlen(opaque, addr, 2);
2919}
2920
2921static void subpage_writel (void *opaque,
2922 target_phys_addr_t addr, uint32_t value)
2923{
2924#if defined(DEBUG_SUBPAGE)
2925 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2926#endif
2927 subpage_writelen(opaque, addr, value, 2);
2928}
2929
2930static CPUReadMemoryFunc *subpage_read[] = {
2931 &subpage_readb,
2932 &subpage_readw,
2933 &subpage_readl,
2934};
2935
2936static CPUWriteMemoryFunc *subpage_write[] = {
2937 &subpage_writeb,
2938 &subpage_writew,
2939 &subpage_writel,
2940};
2941
2942static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2943 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2944{
2945 int idx, eidx;
4254fab8 2946 unsigned int i;
db7b5426
BS
2947
2948 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2949 return -1;
2950 idx = SUBPAGE_IDX(start);
2951 eidx = SUBPAGE_IDX(end);
2952#if defined(DEBUG_SUBPAGE)
2953 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2954 mmio, start, end, idx, eidx, memory);
2955#endif
2956 memory >>= IO_MEM_SHIFT;
2957 for (; idx <= eidx; idx++) {
4254fab8 2958 for (i = 0; i < 4; i++) {
3ee89922
BS
2959 if (io_mem_read[memory][i]) {
2960 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2961 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2962 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2963 }
2964 if (io_mem_write[memory][i]) {
2965 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2966 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2967 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2968 }
4254fab8 2969 }
db7b5426
BS
2970 }
2971
2972 return 0;
2973}
2974
00f82b8a 2975static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2976 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2977{
2978 subpage_t *mmio;
2979 int subpage_memory;
2980
2981 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2982
2983 mmio->base = base;
2984 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2985#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2986 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2987 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2988#endif
1eec614b
AL
2989 *phys = subpage_memory | IO_MEM_SUBPAGE;
2990 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2991 region_offset);
db7b5426
BS
2992
2993 return mmio;
2994}
2995
88715657
AL
2996static int get_free_io_mem_idx(void)
2997{
2998 int i;
2999
3000 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3001 if (!io_mem_used[i]) {
3002 io_mem_used[i] = 1;
3003 return i;
3004 }
3005
3006 return -1;
3007}
3008
33417e70
FB
3009static void io_mem_init(void)
3010{
88715657
AL
3011 int i;
3012
3a7d929e 3013 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 3014 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 3015 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
3016 for (i=0; i<5; i++)
3017 io_mem_used[i] = 1;
1ccde1cb 3018
0f459d16 3019 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 3020 watch_mem_write, NULL);
640f42e4 3021#ifdef CONFIG_KQEMU
94a6b54f
PB
3022 if (kqemu_phys_ram_base) {
3023 /* alloc dirty bits array */
3024 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3025 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3026 }
3027#endif
33417e70
FB
3028}
3029
3030/* mem_read and mem_write are arrays of functions containing the
3031 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3032 2). Functions can be omitted with a NULL function pointer.
3ee89922 3033 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3034 modified. If it is zero, a new io zone is allocated. The return
3035 value can be used with cpu_register_physical_memory(). (-1) is
3036 returned if error. */
33417e70
FB
3037int cpu_register_io_memory(int io_index,
3038 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
3039 CPUWriteMemoryFunc **mem_write,
3040 void *opaque)
33417e70 3041{
4254fab8 3042 int i, subwidth = 0;
33417e70
FB
3043
3044 if (io_index <= 0) {
88715657
AL
3045 io_index = get_free_io_mem_idx();
3046 if (io_index == -1)
3047 return io_index;
33417e70
FB
3048 } else {
3049 if (io_index >= IO_MEM_NB_ENTRIES)
3050 return -1;
3051 }
b5ff1b31 3052
33417e70 3053 for(i = 0;i < 3; i++) {
4254fab8
BS
3054 if (!mem_read[i] || !mem_write[i])
3055 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
3056 io_mem_read[io_index][i] = mem_read[i];
3057 io_mem_write[io_index][i] = mem_write[i];
3058 }
a4193c8a 3059 io_mem_opaque[io_index] = opaque;
4254fab8 3060 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 3061}
61382a50 3062
88715657
AL
3063void cpu_unregister_io_memory(int io_table_address)
3064{
3065 int i;
3066 int io_index = io_table_address >> IO_MEM_SHIFT;
3067
3068 for (i=0;i < 3; i++) {
3069 io_mem_read[io_index][i] = unassigned_mem_read[i];
3070 io_mem_write[io_index][i] = unassigned_mem_write[i];
3071 }
3072 io_mem_opaque[io_index] = NULL;
3073 io_mem_used[io_index] = 0;
3074}
3075
e2eef170
PB
3076#endif /* !defined(CONFIG_USER_ONLY) */
3077
13eb76e0
FB
3078/* physical memory access (slow version, mainly for debug) */
3079#if defined(CONFIG_USER_ONLY)
5fafdf24 3080void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3081 int len, int is_write)
3082{
3083 int l, flags;
3084 target_ulong page;
53a5960a 3085 void * p;
13eb76e0
FB
3086
3087 while (len > 0) {
3088 page = addr & TARGET_PAGE_MASK;
3089 l = (page + TARGET_PAGE_SIZE) - addr;
3090 if (l > len)
3091 l = len;
3092 flags = page_get_flags(page);
3093 if (!(flags & PAGE_VALID))
3094 return;
3095 if (is_write) {
3096 if (!(flags & PAGE_WRITE))
3097 return;
579a97f7 3098 /* XXX: this code should not depend on lock_user */
72fb7daa 3099 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
3100 /* FIXME - should this return an error rather than just fail? */
3101 return;
72fb7daa
AJ
3102 memcpy(p, buf, l);
3103 unlock_user(p, addr, l);
13eb76e0
FB
3104 } else {
3105 if (!(flags & PAGE_READ))
3106 return;
579a97f7 3107 /* XXX: this code should not depend on lock_user */
72fb7daa 3108 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
3109 /* FIXME - should this return an error rather than just fail? */
3110 return;
72fb7daa 3111 memcpy(buf, p, l);
5b257578 3112 unlock_user(p, addr, 0);
13eb76e0
FB
3113 }
3114 len -= l;
3115 buf += l;
3116 addr += l;
3117 }
3118}
8df1cd07 3119
13eb76e0 3120#else
5fafdf24 3121void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3122 int len, int is_write)
3123{
3124 int l, io_index;
3125 uint8_t *ptr;
3126 uint32_t val;
2e12669a
FB
3127 target_phys_addr_t page;
3128 unsigned long pd;
92e873b9 3129 PhysPageDesc *p;
3b46e624 3130
13eb76e0
FB
3131 while (len > 0) {
3132 page = addr & TARGET_PAGE_MASK;
3133 l = (page + TARGET_PAGE_SIZE) - addr;
3134 if (l > len)
3135 l = len;
92e873b9 3136 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3137 if (!p) {
3138 pd = IO_MEM_UNASSIGNED;
3139 } else {
3140 pd = p->phys_offset;
3141 }
3b46e624 3142
13eb76e0 3143 if (is_write) {
3a7d929e 3144 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 3145 target_phys_addr_t addr1 = addr;
13eb76e0 3146 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3147 if (p)
6c2934db 3148 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3149 /* XXX: could force cpu_single_env to NULL to avoid
3150 potential bugs */
6c2934db 3151 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3152 /* 32 bit write access */
c27004ec 3153 val = ldl_p(buf);
6c2934db 3154 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3155 l = 4;
6c2934db 3156 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3157 /* 16 bit write access */
c27004ec 3158 val = lduw_p(buf);
6c2934db 3159 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3160 l = 2;
3161 } else {
1c213d19 3162 /* 8 bit write access */
c27004ec 3163 val = ldub_p(buf);
6c2934db 3164 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3165 l = 1;
3166 }
3167 } else {
b448f2f3
FB
3168 unsigned long addr1;
3169 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3170 /* RAM case */
5579c7f3 3171 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3172 memcpy(ptr, buf, l);
3a7d929e
FB
3173 if (!cpu_physical_memory_is_dirty(addr1)) {
3174 /* invalidate code */
3175 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3176 /* set dirty bit */
5fafdf24 3177 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3178 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3179 }
13eb76e0
FB
3180 }
3181 } else {
5fafdf24 3182 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3183 !(pd & IO_MEM_ROMD)) {
6c2934db 3184 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3185 /* I/O case */
3186 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3187 if (p)
6c2934db
AJ
3188 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3189 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3190 /* 32 bit read access */
6c2934db 3191 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3192 stl_p(buf, val);
13eb76e0 3193 l = 4;
6c2934db 3194 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3195 /* 16 bit read access */
6c2934db 3196 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3197 stw_p(buf, val);
13eb76e0
FB
3198 l = 2;
3199 } else {
1c213d19 3200 /* 8 bit read access */
6c2934db 3201 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3202 stb_p(buf, val);
13eb76e0
FB
3203 l = 1;
3204 }
3205 } else {
3206 /* RAM case */
5579c7f3 3207 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3208 (addr & ~TARGET_PAGE_MASK);
3209 memcpy(buf, ptr, l);
3210 }
3211 }
3212 len -= l;
3213 buf += l;
3214 addr += l;
3215 }
3216}
8df1cd07 3217
d0ecd2aa 3218/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3219void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3220 const uint8_t *buf, int len)
3221{
3222 int l;
3223 uint8_t *ptr;
3224 target_phys_addr_t page;
3225 unsigned long pd;
3226 PhysPageDesc *p;
3b46e624 3227
d0ecd2aa
FB
3228 while (len > 0) {
3229 page = addr & TARGET_PAGE_MASK;
3230 l = (page + TARGET_PAGE_SIZE) - addr;
3231 if (l > len)
3232 l = len;
3233 p = phys_page_find(page >> TARGET_PAGE_BITS);
3234 if (!p) {
3235 pd = IO_MEM_UNASSIGNED;
3236 } else {
3237 pd = p->phys_offset;
3238 }
3b46e624 3239
d0ecd2aa 3240 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3241 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3242 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3243 /* do nothing */
3244 } else {
3245 unsigned long addr1;
3246 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3247 /* ROM/RAM case */
5579c7f3 3248 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3249 memcpy(ptr, buf, l);
3250 }
3251 len -= l;
3252 buf += l;
3253 addr += l;
3254 }
3255}
3256
6d16c2f8
AL
3257typedef struct {
3258 void *buffer;
3259 target_phys_addr_t addr;
3260 target_phys_addr_t len;
3261} BounceBuffer;
3262
3263static BounceBuffer bounce;
3264
ba223c29
AL
3265typedef struct MapClient {
3266 void *opaque;
3267 void (*callback)(void *opaque);
3268 LIST_ENTRY(MapClient) link;
3269} MapClient;
3270
3271static LIST_HEAD(map_client_list, MapClient) map_client_list
3272 = LIST_HEAD_INITIALIZER(map_client_list);
3273
3274void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3275{
3276 MapClient *client = qemu_malloc(sizeof(*client));
3277
3278 client->opaque = opaque;
3279 client->callback = callback;
3280 LIST_INSERT_HEAD(&map_client_list, client, link);
3281 return client;
3282}
3283
3284void cpu_unregister_map_client(void *_client)
3285{
3286 MapClient *client = (MapClient *)_client;
3287
3288 LIST_REMOVE(client, link);
3289}
3290
3291static void cpu_notify_map_clients(void)
3292{
3293 MapClient *client;
3294
3295 while (!LIST_EMPTY(&map_client_list)) {
3296 client = LIST_FIRST(&map_client_list);
3297 client->callback(client->opaque);
3298 LIST_REMOVE(client, link);
3299 }
3300}
3301
6d16c2f8
AL
3302/* Map a physical memory region into a host virtual address.
3303 * May map a subset of the requested range, given by and returned in *plen.
3304 * May return NULL if resources needed to perform the mapping are exhausted.
3305 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3306 * Use cpu_register_map_client() to know when retrying the map operation is
3307 * likely to succeed.
6d16c2f8
AL
3308 */
3309void *cpu_physical_memory_map(target_phys_addr_t addr,
3310 target_phys_addr_t *plen,
3311 int is_write)
3312{
3313 target_phys_addr_t len = *plen;
3314 target_phys_addr_t done = 0;
3315 int l;
3316 uint8_t *ret = NULL;
3317 uint8_t *ptr;
3318 target_phys_addr_t page;
3319 unsigned long pd;
3320 PhysPageDesc *p;
3321 unsigned long addr1;
3322
3323 while (len > 0) {
3324 page = addr & TARGET_PAGE_MASK;
3325 l = (page + TARGET_PAGE_SIZE) - addr;
3326 if (l > len)
3327 l = len;
3328 p = phys_page_find(page >> TARGET_PAGE_BITS);
3329 if (!p) {
3330 pd = IO_MEM_UNASSIGNED;
3331 } else {
3332 pd = p->phys_offset;
3333 }
3334
3335 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3336 if (done || bounce.buffer) {
3337 break;
3338 }
3339 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3340 bounce.addr = addr;
3341 bounce.len = l;
3342 if (!is_write) {
3343 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3344 }
3345 ptr = bounce.buffer;
3346 } else {
3347 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3348 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3349 }
3350 if (!done) {
3351 ret = ptr;
3352 } else if (ret + done != ptr) {
3353 break;
3354 }
3355
3356 len -= l;
3357 addr += l;
3358 done += l;
3359 }
3360 *plen = done;
3361 return ret;
3362}
3363
3364/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3365 * Will also mark the memory as dirty if is_write == 1. access_len gives
3366 * the amount of memory that was actually read or written by the caller.
3367 */
3368void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3369 int is_write, target_phys_addr_t access_len)
3370{
3371 if (buffer != bounce.buffer) {
3372 if (is_write) {
5579c7f3 3373 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
6d16c2f8
AL
3374 while (access_len) {
3375 unsigned l;
3376 l = TARGET_PAGE_SIZE;
3377 if (l > access_len)
3378 l = access_len;
3379 if (!cpu_physical_memory_is_dirty(addr1)) {
3380 /* invalidate code */
3381 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3382 /* set dirty bit */
3383 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3384 (0xff & ~CODE_DIRTY_FLAG);
3385 }
3386 addr1 += l;
3387 access_len -= l;
3388 }
3389 }
3390 return;
3391 }
3392 if (is_write) {
3393 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3394 }
3395 qemu_free(bounce.buffer);
3396 bounce.buffer = NULL;
ba223c29 3397 cpu_notify_map_clients();
6d16c2f8 3398}
d0ecd2aa 3399
8df1cd07
FB
3400/* warning: addr must be aligned */
3401uint32_t ldl_phys(target_phys_addr_t addr)
3402{
3403 int io_index;
3404 uint8_t *ptr;
3405 uint32_t val;
3406 unsigned long pd;
3407 PhysPageDesc *p;
3408
3409 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3410 if (!p) {
3411 pd = IO_MEM_UNASSIGNED;
3412 } else {
3413 pd = p->phys_offset;
3414 }
3b46e624 3415
5fafdf24 3416 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3417 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3418 /* I/O case */
3419 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3420 if (p)
3421 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3422 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3423 } else {
3424 /* RAM case */
5579c7f3 3425 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3426 (addr & ~TARGET_PAGE_MASK);
3427 val = ldl_p(ptr);
3428 }
3429 return val;
3430}
3431
84b7b8e7
FB
3432/* warning: addr must be aligned */
3433uint64_t ldq_phys(target_phys_addr_t addr)
3434{
3435 int io_index;
3436 uint8_t *ptr;
3437 uint64_t val;
3438 unsigned long pd;
3439 PhysPageDesc *p;
3440
3441 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3442 if (!p) {
3443 pd = IO_MEM_UNASSIGNED;
3444 } else {
3445 pd = p->phys_offset;
3446 }
3b46e624 3447
2a4188a3
FB
3448 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3449 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3450 /* I/O case */
3451 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3452 if (p)
3453 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3454#ifdef TARGET_WORDS_BIGENDIAN
3455 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3456 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3457#else
3458 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3459 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3460#endif
3461 } else {
3462 /* RAM case */
5579c7f3 3463 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3464 (addr & ~TARGET_PAGE_MASK);
3465 val = ldq_p(ptr);
3466 }
3467 return val;
3468}
3469
aab33094
FB
3470/* XXX: optimize */
3471uint32_t ldub_phys(target_phys_addr_t addr)
3472{
3473 uint8_t val;
3474 cpu_physical_memory_read(addr, &val, 1);
3475 return val;
3476}
3477
3478/* XXX: optimize */
3479uint32_t lduw_phys(target_phys_addr_t addr)
3480{
3481 uint16_t val;
3482 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3483 return tswap16(val);
3484}
3485
8df1cd07
FB
3486/* warning: addr must be aligned. The ram page is not masked as dirty
3487 and the code inside is not invalidated. It is useful if the dirty
3488 bits are used to track modified PTEs */
3489void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3490{
3491 int io_index;
3492 uint8_t *ptr;
3493 unsigned long pd;
3494 PhysPageDesc *p;
3495
3496 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3497 if (!p) {
3498 pd = IO_MEM_UNASSIGNED;
3499 } else {
3500 pd = p->phys_offset;
3501 }
3b46e624 3502
3a7d929e 3503 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3504 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3505 if (p)
3506 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3507 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3508 } else {
74576198 3509 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3510 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3511 stl_p(ptr, val);
74576198
AL
3512
3513 if (unlikely(in_migration)) {
3514 if (!cpu_physical_memory_is_dirty(addr1)) {
3515 /* invalidate code */
3516 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3517 /* set dirty bit */
3518 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3519 (0xff & ~CODE_DIRTY_FLAG);
3520 }
3521 }
8df1cd07
FB
3522 }
3523}
3524
bc98a7ef
JM
3525void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3526{
3527 int io_index;
3528 uint8_t *ptr;
3529 unsigned long pd;
3530 PhysPageDesc *p;
3531
3532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533 if (!p) {
3534 pd = IO_MEM_UNASSIGNED;
3535 } else {
3536 pd = p->phys_offset;
3537 }
3b46e624 3538
bc98a7ef
JM
3539 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3540 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3541 if (p)
3542 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3543#ifdef TARGET_WORDS_BIGENDIAN
3544 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3545 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3546#else
3547 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3548 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3549#endif
3550 } else {
5579c7f3 3551 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3552 (addr & ~TARGET_PAGE_MASK);
3553 stq_p(ptr, val);
3554 }
3555}
3556
8df1cd07 3557/* warning: addr must be aligned */
8df1cd07
FB
3558void stl_phys(target_phys_addr_t addr, uint32_t val)
3559{
3560 int io_index;
3561 uint8_t *ptr;
3562 unsigned long pd;
3563 PhysPageDesc *p;
3564
3565 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3566 if (!p) {
3567 pd = IO_MEM_UNASSIGNED;
3568 } else {
3569 pd = p->phys_offset;
3570 }
3b46e624 3571
3a7d929e 3572 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3573 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3574 if (p)
3575 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3576 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3577 } else {
3578 unsigned long addr1;
3579 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3580 /* RAM case */
5579c7f3 3581 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3582 stl_p(ptr, val);
3a7d929e
FB
3583 if (!cpu_physical_memory_is_dirty(addr1)) {
3584 /* invalidate code */
3585 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3586 /* set dirty bit */
f23db169
FB
3587 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3588 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3589 }
8df1cd07
FB
3590 }
3591}
3592
aab33094
FB
3593/* XXX: optimize */
3594void stb_phys(target_phys_addr_t addr, uint32_t val)
3595{
3596 uint8_t v = val;
3597 cpu_physical_memory_write(addr, &v, 1);
3598}
3599
3600/* XXX: optimize */
3601void stw_phys(target_phys_addr_t addr, uint32_t val)
3602{
3603 uint16_t v = tswap16(val);
3604 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3605}
3606
3607/* XXX: optimize */
3608void stq_phys(target_phys_addr_t addr, uint64_t val)
3609{
3610 val = tswap64(val);
3611 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3612}
3613
13eb76e0
FB
3614#endif
3615
5e2972fd 3616/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3617int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3618 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3619{
3620 int l;
9b3c35e0
JM
3621 target_phys_addr_t phys_addr;
3622 target_ulong page;
13eb76e0
FB
3623
3624 while (len > 0) {
3625 page = addr & TARGET_PAGE_MASK;
3626 phys_addr = cpu_get_phys_page_debug(env, page);
3627 /* if no physical page mapped, return an error */
3628 if (phys_addr == -1)
3629 return -1;
3630 l = (page + TARGET_PAGE_SIZE) - addr;
3631 if (l > len)
3632 l = len;
5e2972fd
AL
3633 phys_addr += (addr & ~TARGET_PAGE_MASK);
3634#if !defined(CONFIG_USER_ONLY)
3635 if (is_write)
3636 cpu_physical_memory_write_rom(phys_addr, buf, l);
3637 else
3638#endif
3639 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3640 len -= l;
3641 buf += l;
3642 addr += l;
3643 }
3644 return 0;
3645}
3646
2e70f6ef
PB
3647/* in deterministic execution mode, instructions doing device I/Os
3648 must be at the end of the TB */
3649void cpu_io_recompile(CPUState *env, void *retaddr)
3650{
3651 TranslationBlock *tb;
3652 uint32_t n, cflags;
3653 target_ulong pc, cs_base;
3654 uint64_t flags;
3655
3656 tb = tb_find_pc((unsigned long)retaddr);
3657 if (!tb) {
3658 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3659 retaddr);
3660 }
3661 n = env->icount_decr.u16.low + tb->icount;
3662 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3663 /* Calculate how many instructions had been executed before the fault
bf20dc07 3664 occurred. */
2e70f6ef
PB
3665 n = n - env->icount_decr.u16.low;
3666 /* Generate a new TB ending on the I/O insn. */
3667 n++;
3668 /* On MIPS and SH, delay slot instructions can only be restarted if
3669 they were already the first instruction in the TB. If this is not
bf20dc07 3670 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3671 branch. */
3672#if defined(TARGET_MIPS)
3673 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3674 env->active_tc.PC -= 4;
3675 env->icount_decr.u16.low++;
3676 env->hflags &= ~MIPS_HFLAG_BMASK;
3677 }
3678#elif defined(TARGET_SH4)
3679 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3680 && n > 1) {
3681 env->pc -= 2;
3682 env->icount_decr.u16.low++;
3683 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3684 }
3685#endif
3686 /* This should never happen. */
3687 if (n > CF_COUNT_MASK)
3688 cpu_abort(env, "TB too big during recompile");
3689
3690 cflags = n | CF_LAST_IO;
3691 pc = tb->pc;
3692 cs_base = tb->cs_base;
3693 flags = tb->flags;
3694 tb_phys_invalidate(tb, -1);
3695 /* FIXME: In theory this could raise an exception. In practice
3696 we have already translated the block once so it's probably ok. */
3697 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3698 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3699 the first in the TB) then we end up generating a whole new TB and
3700 repeating the fault, which is horribly inefficient.
3701 Better would be to execute just this insn uncached, or generate a
3702 second new TB. */
3703 cpu_resume_from_signal(env, NULL);
3704}
3705
e3db7226
FB
3706void dump_exec_info(FILE *f,
3707 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3708{
3709 int i, target_code_size, max_target_code_size;
3710 int direct_jmp_count, direct_jmp2_count, cross_page;
3711 TranslationBlock *tb;
3b46e624 3712
e3db7226
FB
3713 target_code_size = 0;
3714 max_target_code_size = 0;
3715 cross_page = 0;
3716 direct_jmp_count = 0;
3717 direct_jmp2_count = 0;
3718 for(i = 0; i < nb_tbs; i++) {
3719 tb = &tbs[i];
3720 target_code_size += tb->size;
3721 if (tb->size > max_target_code_size)
3722 max_target_code_size = tb->size;
3723 if (tb->page_addr[1] != -1)
3724 cross_page++;
3725 if (tb->tb_next_offset[0] != 0xffff) {
3726 direct_jmp_count++;
3727 if (tb->tb_next_offset[1] != 0xffff) {
3728 direct_jmp2_count++;
3729 }
3730 }
3731 }
3732 /* XXX: avoid using doubles ? */
57fec1fe 3733 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3734 cpu_fprintf(f, "gen code size %ld/%ld\n",
3735 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3736 cpu_fprintf(f, "TB count %d/%d\n",
3737 nb_tbs, code_gen_max_blocks);
5fafdf24 3738 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3739 nb_tbs ? target_code_size / nb_tbs : 0,
3740 max_target_code_size);
5fafdf24 3741 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3742 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3743 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3744 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3745 cross_page,
e3db7226
FB
3746 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3747 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3748 direct_jmp_count,
e3db7226
FB
3749 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3750 direct_jmp2_count,
3751 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3752 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3753 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3754 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3755 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3756 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3757}
3758
5fafdf24 3759#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3760
3761#define MMUSUFFIX _cmmu
3762#define GETPC() NULL
3763#define env cpu_single_env
b769d8fe 3764#define SOFTMMU_CODE_ACCESS
61382a50
FB
3765
3766#define SHIFT 0
3767#include "softmmu_template.h"
3768
3769#define SHIFT 1
3770#include "softmmu_template.h"
3771
3772#define SHIFT 2
3773#include "softmmu_template.h"
3774
3775#define SHIFT 3
3776#include "softmmu_template.h"
3777
3778#undef env
3779
3780#endif