]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix build with DEBUG_PCI in pci_host.h enabled
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
ca10f867 36#include "qemu-common.h"
b67d9a52 37#include "tcg.h"
b3c7724c 38#include "hw/hw.h"
74576198 39#include "osdep.h"
7ba1e619 40#include "kvm.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
43#endif
54936004 44
fd6ce8f6 45//#define DEBUG_TB_INVALIDATE
66e85a21 46//#define DEBUG_FLUSH
9fa3e853 47//#define DEBUG_TLB
67d3b957 48//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
49
50/* make various TB consistency checks */
5fafdf24
TS
51//#define DEBUG_TB_CHECK
52//#define DEBUG_TLB_CHECK
fd6ce8f6 53
1196be37 54//#define DEBUG_IOPORT
db7b5426 55//#define DEBUG_SUBPAGE
1196be37 56
99773bd4
PB
57#if !defined(CONFIG_USER_ONLY)
58/* TB consistency checks only implemented for usermode emulation. */
59#undef DEBUG_TB_CHECK
60#endif
61
9fa3e853
FB
62#define SMC_BITMAP_USE_THRESHOLD 10
63
108c49b8
FB
64#if defined(TARGET_SPARC64)
65#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
66#elif defined(TARGET_SPARC)
67#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
68#elif defined(TARGET_ALPHA)
69#define TARGET_PHYS_ADDR_SPACE_BITS 42
70#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
71#elif defined(TARGET_PPC64)
72#define TARGET_PHYS_ADDR_SPACE_BITS 42
640f42e4 73#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
00f82b8a 74#define TARGET_PHYS_ADDR_SPACE_BITS 42
640f42e4 75#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
00f82b8a 76#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
77#else
78/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
bdaf78e0 82static TranslationBlock *tbs;
26a5f13b 83int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102
FB
86/* any access to the tbs or the page table must use this lock */
87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
110uint8_t *code_gen_ptr;
111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
1ccde1cb 114uint8_t *phys_ram_dirty;
74576198 115static int in_migration;
94a6b54f
PB
116
117typedef struct RAMBlock {
118 uint8_t *host;
119 ram_addr_t offset;
120 ram_addr_t length;
121 struct RAMBlock *next;
122} RAMBlock;
123
124static RAMBlock *ram_blocks;
125/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
ccbb4d44 126 then we can no longer assume contiguous ram offsets, and external uses
94a6b54f
PB
127 of this variable will break. */
128ram_addr_t last_ram_offset;
e2eef170 129#endif
9fa3e853 130
6a00d601
FB
131CPUState *first_cpu;
132/* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
5fafdf24 134CPUState *cpu_single_env;
2e70f6ef 135/* 0 = Do not count executed instructions.
bf20dc07 136 1 = Precise instruction counting.
2e70f6ef
PB
137 2 = Adaptive rate instruction counting. */
138int use_icount = 0;
139/* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141int64_t qemu_icount;
6a00d601 142
54936004 143typedef struct PageDesc {
92e873b9 144 /* list of TBs intersecting this ram page */
fd6ce8f6 145 TranslationBlock *first_tb;
9fa3e853
FB
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150#if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152#endif
54936004
FB
153} PageDesc;
154
92e873b9 155typedef struct PhysPageDesc {
0f459d16 156 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 157 ram_addr_t phys_offset;
8da3ff18 158 ram_addr_t region_offset;
92e873b9
FB
159} PhysPageDesc;
160
54936004 161#define L2_BITS 10
bedb69ea
JM
162#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163/* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
166 */
167#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168#else
03875444 169#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 170#endif
54936004
FB
171
172#define L1_SIZE (1 << L1_BITS)
173#define L2_SIZE (1 << L2_BITS)
174
83fb7adf
FB
175unsigned long qemu_real_host_page_size;
176unsigned long qemu_host_page_bits;
177unsigned long qemu_host_page_size;
178unsigned long qemu_host_page_mask;
54936004 179
92e873b9 180/* XXX: for system emulation, it could just be an array */
54936004 181static PageDesc *l1_map[L1_SIZE];
bdaf78e0 182static PhysPageDesc **l1_phys_map;
54936004 183
e2eef170
PB
184#if !defined(CONFIG_USER_ONLY)
185static void io_mem_init(void);
186
33417e70 187/* io memory support */
33417e70
FB
188CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 190void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 191static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
192static int io_mem_watch;
193#endif
33417e70 194
34865134 195/* log support */
d9b630fd 196static const char *logfilename = "/tmp/qemu.log";
34865134
FB
197FILE *logfile;
198int loglevel;
e735b91c 199static int log_append = 0;
34865134 200
e3db7226
FB
201/* statistics */
202static int tlb_flush_count;
203static int tb_flush_count;
204static int tb_phys_invalidate_count;
205
db7b5426
BS
206#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
207typedef struct subpage_t {
208 target_phys_addr_t base;
3ee89922
BS
209 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
210 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
211 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 212 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
213} subpage_t;
214
7cb69cae
FB
215#ifdef _WIN32
216static void map_exec(void *addr, long size)
217{
218 DWORD old_protect;
219 VirtualProtect(addr, size,
220 PAGE_EXECUTE_READWRITE, &old_protect);
221
222}
223#else
224static void map_exec(void *addr, long size)
225{
4369415f 226 unsigned long start, end, page_size;
7cb69cae 227
4369415f 228 page_size = getpagesize();
7cb69cae 229 start = (unsigned long)addr;
4369415f 230 start &= ~(page_size - 1);
7cb69cae
FB
231
232 end = (unsigned long)addr + size;
4369415f
FB
233 end += page_size - 1;
234 end &= ~(page_size - 1);
7cb69cae
FB
235
236 mprotect((void *)start, end - start,
237 PROT_READ | PROT_WRITE | PROT_EXEC);
238}
239#endif
240
b346ff46 241static void page_init(void)
54936004 242{
83fb7adf 243 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 244 TARGET_PAGE_SIZE */
c2b48b69
AL
245#ifdef _WIN32
246 {
247 SYSTEM_INFO system_info;
248
249 GetSystemInfo(&system_info);
250 qemu_real_host_page_size = system_info.dwPageSize;
251 }
252#else
253 qemu_real_host_page_size = getpagesize();
254#endif
83fb7adf
FB
255 if (qemu_host_page_size == 0)
256 qemu_host_page_size = qemu_real_host_page_size;
257 if (qemu_host_page_size < TARGET_PAGE_SIZE)
258 qemu_host_page_size = TARGET_PAGE_SIZE;
259 qemu_host_page_bits = 0;
260 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
261 qemu_host_page_bits++;
262 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
263 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
264 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
265
266#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267 {
268 long long startaddr, endaddr;
269 FILE *f;
270 int n;
271
c8a706fe 272 mmap_lock();
0776590d 273 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
274 f = fopen("/proc/self/maps", "r");
275 if (f) {
276 do {
277 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
278 if (n == 2) {
e0b8d65a
BS
279 startaddr = MIN(startaddr,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281 endaddr = MIN(endaddr,
282 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 283 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
284 TARGET_PAGE_ALIGN(endaddr),
285 PAGE_RESERVED);
286 }
287 } while (!feof(f));
288 fclose(f);
289 }
c8a706fe 290 mmap_unlock();
50a9569b
AZ
291 }
292#endif
54936004
FB
293}
294
434929bf 295static inline PageDesc **page_l1_map(target_ulong index)
54936004 296{
17e2377a
PB
297#if TARGET_LONG_BITS > 32
298 /* Host memory outside guest VM. For 32-bit targets we have already
299 excluded high addresses. */
d8173e0f 300 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
301 return NULL;
302#endif
434929bf
AL
303 return &l1_map[index >> L2_BITS];
304}
305
306static inline PageDesc *page_find_alloc(target_ulong index)
307{
308 PageDesc **lp, *p;
309 lp = page_l1_map(index);
310 if (!lp)
311 return NULL;
312
54936004
FB
313 p = *lp;
314 if (!p) {
315 /* allocate if not found */
17e2377a 316#if defined(CONFIG_USER_ONLY)
17e2377a
PB
317 size_t len = sizeof(PageDesc) * L2_SIZE;
318 /* Don't use qemu_malloc because it may recurse. */
319 p = mmap(0, len, PROT_READ | PROT_WRITE,
320 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 321 *lp = p;
fb1c2cd7
AJ
322 if (h2g_valid(p)) {
323 unsigned long addr = h2g(p);
17e2377a
PB
324 page_set_flags(addr & TARGET_PAGE_MASK,
325 TARGET_PAGE_ALIGN(addr + len),
326 PAGE_RESERVED);
327 }
328#else
329 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330 *lp = p;
331#endif
54936004
FB
332 }
333 return p + (index & (L2_SIZE - 1));
334}
335
00f82b8a 336static inline PageDesc *page_find(target_ulong index)
54936004 337{
434929bf
AL
338 PageDesc **lp, *p;
339 lp = page_l1_map(index);
340 if (!lp)
341 return NULL;
54936004 342
434929bf 343 p = *lp;
54936004
FB
344 if (!p)
345 return 0;
fd6ce8f6
FB
346 return p + (index & (L2_SIZE - 1));
347}
348
108c49b8 349static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 350{
108c49b8 351 void **lp, **p;
e3f4e2a4 352 PhysPageDesc *pd;
92e873b9 353
108c49b8
FB
354 p = (void **)l1_phys_map;
355#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356
357#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359#endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
108c49b8
FB
364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
369 }
370#endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
372 pd = *lp;
373 if (!pd) {
374 int i;
108c49b8
FB
375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
e3f4e2a4
PB
378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
67c4d23c 380 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
382 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 }
92e873b9 384 }
e3f4e2a4 385 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
386}
387
108c49b8 388static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 389{
108c49b8 390 return phys_page_find_alloc(index, 0);
92e873b9
FB
391}
392
9fa3e853 393#if !defined(CONFIG_USER_ONLY)
6a00d601 394static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 395static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 396 target_ulong vaddr);
c8a706fe
PB
397#define mmap_lock() do { } while(0)
398#define mmap_unlock() do { } while(0)
9fa3e853 399#endif
fd6ce8f6 400
4369415f
FB
401#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402
403#if defined(CONFIG_USER_ONLY)
ccbb4d44 404/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
405 user mode. It will change when a dedicated libc will be used */
406#define USE_STATIC_CODE_GEN_BUFFER
407#endif
408
409#ifdef USE_STATIC_CODE_GEN_BUFFER
410static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411#endif
412
8fcd3692 413static void code_gen_alloc(unsigned long tb_size)
26a5f13b 414{
4369415f
FB
415#ifdef USE_STATIC_CODE_GEN_BUFFER
416 code_gen_buffer = static_code_gen_buffer;
417 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418 map_exec(code_gen_buffer, code_gen_buffer_size);
419#else
26a5f13b
FB
420 code_gen_buffer_size = tb_size;
421 if (code_gen_buffer_size == 0) {
4369415f
FB
422#if defined(CONFIG_USER_ONLY)
423 /* in user mode, phys_ram_size is not meaningful */
424 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425#else
ccbb4d44 426 /* XXX: needs adjustments */
94a6b54f 427 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 428#endif
26a5f13b
FB
429 }
430 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432 /* The code gen buffer location may have constraints depending on
433 the host cpu and OS */
434#if defined(__linux__)
435 {
436 int flags;
141ac468
BS
437 void *start = NULL;
438
26a5f13b
FB
439 flags = MAP_PRIVATE | MAP_ANONYMOUS;
440#if defined(__x86_64__)
441 flags |= MAP_32BIT;
442 /* Cannot map more than that */
443 if (code_gen_buffer_size > (800 * 1024 * 1024))
444 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
445#elif defined(__sparc_v9__)
446 // Map the buffer below 2G, so we can use direct calls and branches
447 flags |= MAP_FIXED;
448 start = (void *) 0x60000000UL;
449 if (code_gen_buffer_size > (512 * 1024 * 1024))
450 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 451#elif defined(__arm__)
63d41246 452 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
453 flags |= MAP_FIXED;
454 start = (void *) 0x01000000UL;
455 if (code_gen_buffer_size > 16 * 1024 * 1024)
456 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 457#endif
141ac468
BS
458 code_gen_buffer = mmap(start, code_gen_buffer_size,
459 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
460 flags, -1, 0);
461 if (code_gen_buffer == MAP_FAILED) {
462 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463 exit(1);
464 }
465 }
c5e97233 466#elif defined(__FreeBSD__) || defined(__DragonFly__)
06e67a82
AL
467 {
468 int flags;
469 void *addr = NULL;
470 flags = MAP_PRIVATE | MAP_ANONYMOUS;
471#if defined(__x86_64__)
472 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473 * 0x40000000 is free */
474 flags |= MAP_FIXED;
475 addr = (void *)0x40000000;
476 /* Cannot map more than that */
477 if (code_gen_buffer_size > (800 * 1024 * 1024))
478 code_gen_buffer_size = (800 * 1024 * 1024);
479#endif
480 code_gen_buffer = mmap(addr, code_gen_buffer_size,
481 PROT_WRITE | PROT_READ | PROT_EXEC,
482 flags, -1, 0);
483 if (code_gen_buffer == MAP_FAILED) {
484 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485 exit(1);
486 }
487 }
26a5f13b
FB
488#else
489 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
490 map_exec(code_gen_buffer, code_gen_buffer_size);
491#endif
4369415f 492#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
493 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494 code_gen_buffer_max_size = code_gen_buffer_size -
495 code_gen_max_block_size();
496 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498}
499
500/* Must be called before using the QEMU cpus. 'tb_size' is the size
501 (in bytes) allocated to the translation buffer. Zero means default
502 size. */
503void cpu_exec_init_all(unsigned long tb_size)
504{
26a5f13b
FB
505 cpu_gen_init();
506 code_gen_alloc(tb_size);
507 code_gen_ptr = code_gen_buffer;
4369415f 508 page_init();
e2eef170 509#if !defined(CONFIG_USER_ONLY)
26a5f13b 510 io_mem_init();
e2eef170 511#endif
26a5f13b
FB
512}
513
9656f324
PB
514#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515
516#define CPU_COMMON_SAVE_VERSION 1
517
518static void cpu_common_save(QEMUFile *f, void *opaque)
519{
520 CPUState *env = opaque;
521
b0a46a33
JK
522 cpu_synchronize_state(env, 0);
523
9656f324
PB
524 qemu_put_be32s(f, &env->halted);
525 qemu_put_be32s(f, &env->interrupt_request);
526}
527
528static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
529{
530 CPUState *env = opaque;
531
532 if (version_id != CPU_COMMON_SAVE_VERSION)
533 return -EINVAL;
534
535 qemu_get_be32s(f, &env->halted);
75f482ae 536 qemu_get_be32s(f, &env->interrupt_request);
3098dba0
AJ
537 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
538 version_id is increased. */
539 env->interrupt_request &= ~0x01;
9656f324 540 tlb_flush(env, 1);
b0a46a33 541 cpu_synchronize_state(env, 1);
9656f324
PB
542
543 return 0;
544}
545#endif
546
950f1472
GC
547CPUState *qemu_get_cpu(int cpu)
548{
549 CPUState *env = first_cpu;
550
551 while (env) {
552 if (env->cpu_index == cpu)
553 break;
554 env = env->next_cpu;
555 }
556
557 return env;
558}
559
6a00d601 560void cpu_exec_init(CPUState *env)
fd6ce8f6 561{
6a00d601
FB
562 CPUState **penv;
563 int cpu_index;
564
c2764719
PB
565#if defined(CONFIG_USER_ONLY)
566 cpu_list_lock();
567#endif
6a00d601
FB
568 env->next_cpu = NULL;
569 penv = &first_cpu;
570 cpu_index = 0;
571 while (*penv != NULL) {
1e9fa730 572 penv = &(*penv)->next_cpu;
6a00d601
FB
573 cpu_index++;
574 }
575 env->cpu_index = cpu_index;
268a362c 576 env->numa_node = 0;
c0ce998e
AL
577 TAILQ_INIT(&env->breakpoints);
578 TAILQ_INIT(&env->watchpoints);
6a00d601 579 *penv = env;
c2764719
PB
580#if defined(CONFIG_USER_ONLY)
581 cpu_list_unlock();
582#endif
b3c7724c 583#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
584 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
585 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
586 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587 cpu_save, cpu_load, env);
588#endif
fd6ce8f6
FB
589}
590
9fa3e853
FB
591static inline void invalidate_page_bitmap(PageDesc *p)
592{
593 if (p->code_bitmap) {
59817ccb 594 qemu_free(p->code_bitmap);
9fa3e853
FB
595 p->code_bitmap = NULL;
596 }
597 p->code_write_count = 0;
598}
599
fd6ce8f6
FB
600/* set to NULL all the 'first_tb' fields in all PageDescs */
601static void page_flush_tb(void)
602{
603 int i, j;
604 PageDesc *p;
605
606 for(i = 0; i < L1_SIZE; i++) {
607 p = l1_map[i];
608 if (p) {
9fa3e853
FB
609 for(j = 0; j < L2_SIZE; j++) {
610 p->first_tb = NULL;
611 invalidate_page_bitmap(p);
612 p++;
613 }
fd6ce8f6
FB
614 }
615 }
616}
617
618/* flush all the translation blocks */
d4e8164f 619/* XXX: tb_flush is currently not thread safe */
6a00d601 620void tb_flush(CPUState *env1)
fd6ce8f6 621{
6a00d601 622 CPUState *env;
0124311e 623#if defined(DEBUG_FLUSH)
ab3d1727
BS
624 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625 (unsigned long)(code_gen_ptr - code_gen_buffer),
626 nb_tbs, nb_tbs > 0 ?
627 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 628#endif
26a5f13b 629 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
630 cpu_abort(env1, "Internal error: code buffer overflow\n");
631
fd6ce8f6 632 nb_tbs = 0;
3b46e624 633
6a00d601
FB
634 for(env = first_cpu; env != NULL; env = env->next_cpu) {
635 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
636 }
9fa3e853 637
8a8a608f 638 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 639 page_flush_tb();
9fa3e853 640
fd6ce8f6 641 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
642 /* XXX: flush processor icache at this point if cache flush is
643 expensive */
e3db7226 644 tb_flush_count++;
fd6ce8f6
FB
645}
646
647#ifdef DEBUG_TB_CHECK
648
bc98a7ef 649static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
650{
651 TranslationBlock *tb;
652 int i;
653 address &= TARGET_PAGE_MASK;
99773bd4
PB
654 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
656 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657 address >= tb->pc + tb->size)) {
658 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 659 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
660 }
661 }
662 }
663}
664
665/* verify that all the pages have correct rights for code */
666static void tb_page_check(void)
667{
668 TranslationBlock *tb;
669 int i, flags1, flags2;
3b46e624 670
99773bd4
PB
671 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
672 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
673 flags1 = page_get_flags(tb->pc);
674 flags2 = page_get_flags(tb->pc + tb->size - 1);
675 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
676 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 677 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
678 }
679 }
680 }
681}
682
bdaf78e0 683static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
684{
685 TranslationBlock *tb1;
686 unsigned int n1;
687
688 /* suppress any remaining jumps to this TB */
689 tb1 = tb->jmp_first;
690 for(;;) {
691 n1 = (long)tb1 & 3;
692 tb1 = (TranslationBlock *)((long)tb1 & ~3);
693 if (n1 == 2)
694 break;
695 tb1 = tb1->jmp_next[n1];
696 }
697 /* check end of list */
698 if (tb1 != tb) {
699 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
700 }
701}
702
fd6ce8f6
FB
703#endif
704
705/* invalidate one TB */
706static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
707 int next_offset)
708{
709 TranslationBlock *tb1;
710 for(;;) {
711 tb1 = *ptb;
712 if (tb1 == tb) {
713 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
714 break;
715 }
716 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
717 }
718}
719
9fa3e853
FB
720static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
721{
722 TranslationBlock *tb1;
723 unsigned int n1;
724
725 for(;;) {
726 tb1 = *ptb;
727 n1 = (long)tb1 & 3;
728 tb1 = (TranslationBlock *)((long)tb1 & ~3);
729 if (tb1 == tb) {
730 *ptb = tb1->page_next[n1];
731 break;
732 }
733 ptb = &tb1->page_next[n1];
734 }
735}
736
d4e8164f
FB
737static inline void tb_jmp_remove(TranslationBlock *tb, int n)
738{
739 TranslationBlock *tb1, **ptb;
740 unsigned int n1;
741
742 ptb = &tb->jmp_next[n];
743 tb1 = *ptb;
744 if (tb1) {
745 /* find tb(n) in circular list */
746 for(;;) {
747 tb1 = *ptb;
748 n1 = (long)tb1 & 3;
749 tb1 = (TranslationBlock *)((long)tb1 & ~3);
750 if (n1 == n && tb1 == tb)
751 break;
752 if (n1 == 2) {
753 ptb = &tb1->jmp_first;
754 } else {
755 ptb = &tb1->jmp_next[n1];
756 }
757 }
758 /* now we can suppress tb(n) from the list */
759 *ptb = tb->jmp_next[n];
760
761 tb->jmp_next[n] = NULL;
762 }
763}
764
765/* reset the jump entry 'n' of a TB so that it is not chained to
766 another TB */
767static inline void tb_reset_jump(TranslationBlock *tb, int n)
768{
769 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
770}
771
2e70f6ef 772void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 773{
6a00d601 774 CPUState *env;
8a40a180 775 PageDesc *p;
d4e8164f 776 unsigned int h, n1;
00f82b8a 777 target_phys_addr_t phys_pc;
8a40a180 778 TranslationBlock *tb1, *tb2;
3b46e624 779
8a40a180
FB
780 /* remove the TB from the hash list */
781 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
782 h = tb_phys_hash_func(phys_pc);
5fafdf24 783 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
784 offsetof(TranslationBlock, phys_hash_next));
785
786 /* remove the TB from the page list */
787 if (tb->page_addr[0] != page_addr) {
788 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
789 tb_page_remove(&p->first_tb, tb);
790 invalidate_page_bitmap(p);
791 }
792 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
793 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
794 tb_page_remove(&p->first_tb, tb);
795 invalidate_page_bitmap(p);
796 }
797
36bdbe54 798 tb_invalidated_flag = 1;
59817ccb 799
fd6ce8f6 800 /* remove the TB from the hash list */
8a40a180 801 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
802 for(env = first_cpu; env != NULL; env = env->next_cpu) {
803 if (env->tb_jmp_cache[h] == tb)
804 env->tb_jmp_cache[h] = NULL;
805 }
d4e8164f
FB
806
807 /* suppress this TB from the two jump lists */
808 tb_jmp_remove(tb, 0);
809 tb_jmp_remove(tb, 1);
810
811 /* suppress any remaining jumps to this TB */
812 tb1 = tb->jmp_first;
813 for(;;) {
814 n1 = (long)tb1 & 3;
815 if (n1 == 2)
816 break;
817 tb1 = (TranslationBlock *)((long)tb1 & ~3);
818 tb2 = tb1->jmp_next[n1];
819 tb_reset_jump(tb1, n1);
820 tb1->jmp_next[n1] = NULL;
821 tb1 = tb2;
822 }
823 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 824
e3db7226 825 tb_phys_invalidate_count++;
9fa3e853
FB
826}
827
828static inline void set_bits(uint8_t *tab, int start, int len)
829{
830 int end, mask, end1;
831
832 end = start + len;
833 tab += start >> 3;
834 mask = 0xff << (start & 7);
835 if ((start & ~7) == (end & ~7)) {
836 if (start < end) {
837 mask &= ~(0xff << (end & 7));
838 *tab |= mask;
839 }
840 } else {
841 *tab++ |= mask;
842 start = (start + 8) & ~7;
843 end1 = end & ~7;
844 while (start < end1) {
845 *tab++ = 0xff;
846 start += 8;
847 }
848 if (start < end) {
849 mask = ~(0xff << (end & 7));
850 *tab |= mask;
851 }
852 }
853}
854
855static void build_page_bitmap(PageDesc *p)
856{
857 int n, tb_start, tb_end;
858 TranslationBlock *tb;
3b46e624 859
b2a7081a 860 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
861
862 tb = p->first_tb;
863 while (tb != NULL) {
864 n = (long)tb & 3;
865 tb = (TranslationBlock *)((long)tb & ~3);
866 /* NOTE: this is subtle as a TB may span two physical pages */
867 if (n == 0) {
868 /* NOTE: tb_end may be after the end of the page, but
869 it is not a problem */
870 tb_start = tb->pc & ~TARGET_PAGE_MASK;
871 tb_end = tb_start + tb->size;
872 if (tb_end > TARGET_PAGE_SIZE)
873 tb_end = TARGET_PAGE_SIZE;
874 } else {
875 tb_start = 0;
876 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
877 }
878 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
879 tb = tb->page_next[n];
880 }
881}
882
2e70f6ef
PB
883TranslationBlock *tb_gen_code(CPUState *env,
884 target_ulong pc, target_ulong cs_base,
885 int flags, int cflags)
d720b93d
FB
886{
887 TranslationBlock *tb;
888 uint8_t *tc_ptr;
889 target_ulong phys_pc, phys_page2, virt_page2;
890 int code_gen_size;
891
c27004ec
FB
892 phys_pc = get_phys_addr_code(env, pc);
893 tb = tb_alloc(pc);
d720b93d
FB
894 if (!tb) {
895 /* flush must be done */
896 tb_flush(env);
897 /* cannot fail at this point */
c27004ec 898 tb = tb_alloc(pc);
2e70f6ef
PB
899 /* Don't forget to invalidate previous TB info. */
900 tb_invalidated_flag = 1;
d720b93d
FB
901 }
902 tc_ptr = code_gen_ptr;
903 tb->tc_ptr = tc_ptr;
904 tb->cs_base = cs_base;
905 tb->flags = flags;
906 tb->cflags = cflags;
d07bde88 907 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 908 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 909
d720b93d 910 /* check next page if needed */
c27004ec 911 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 912 phys_page2 = -1;
c27004ec 913 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
914 phys_page2 = get_phys_addr_code(env, virt_page2);
915 }
916 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 917 return tb;
d720b93d 918}
3b46e624 919
9fa3e853
FB
920/* invalidate all TBs which intersect with the target physical page
921 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
922 the same physical page. 'is_cpu_write_access' should be true if called
923 from a real cpu write access: the virtual CPU will exit the current
924 TB if code is modified inside this TB. */
00f82b8a 925void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
926 int is_cpu_write_access)
927{
6b917547 928 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 929 CPUState *env = cpu_single_env;
9fa3e853 930 target_ulong tb_start, tb_end;
6b917547
AL
931 PageDesc *p;
932 int n;
933#ifdef TARGET_HAS_PRECISE_SMC
934 int current_tb_not_found = is_cpu_write_access;
935 TranslationBlock *current_tb = NULL;
936 int current_tb_modified = 0;
937 target_ulong current_pc = 0;
938 target_ulong current_cs_base = 0;
939 int current_flags = 0;
940#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
941
942 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 943 if (!p)
9fa3e853 944 return;
5fafdf24 945 if (!p->code_bitmap &&
d720b93d
FB
946 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
947 is_cpu_write_access) {
9fa3e853
FB
948 /* build code bitmap */
949 build_page_bitmap(p);
950 }
951
952 /* we remove all the TBs in the range [start, end[ */
953 /* XXX: see if in some cases it could be faster to invalidate all the code */
954 tb = p->first_tb;
955 while (tb != NULL) {
956 n = (long)tb & 3;
957 tb = (TranslationBlock *)((long)tb & ~3);
958 tb_next = tb->page_next[n];
959 /* NOTE: this is subtle as a TB may span two physical pages */
960 if (n == 0) {
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
964 tb_end = tb_start + tb->size;
965 } else {
966 tb_start = tb->page_addr[1];
967 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
968 }
969 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
970#ifdef TARGET_HAS_PRECISE_SMC
971 if (current_tb_not_found) {
972 current_tb_not_found = 0;
973 current_tb = NULL;
2e70f6ef 974 if (env->mem_io_pc) {
d720b93d 975 /* now we have a real cpu fault */
2e70f6ef 976 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
977 }
978 }
979 if (current_tb == tb &&
2e70f6ef 980 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
981 /* If we are modifying the current TB, we must stop
982 its execution. We could be more precise by checking
983 that the modification is after the current PC, but it
984 would require a specialized function to partially
985 restore the CPU state */
3b46e624 986
d720b93d 987 current_tb_modified = 1;
5fafdf24 988 cpu_restore_state(current_tb, env,
2e70f6ef 989 env->mem_io_pc, NULL);
6b917547
AL
990 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
991 &current_flags);
d720b93d
FB
992 }
993#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
994 /* we need to do that to handle the case where a signal
995 occurs while doing tb_phys_invalidate() */
996 saved_tb = NULL;
997 if (env) {
998 saved_tb = env->current_tb;
999 env->current_tb = NULL;
1000 }
9fa3e853 1001 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1002 if (env) {
1003 env->current_tb = saved_tb;
1004 if (env->interrupt_request && env->current_tb)
1005 cpu_interrupt(env, env->interrupt_request);
1006 }
9fa3e853
FB
1007 }
1008 tb = tb_next;
1009 }
1010#if !defined(CONFIG_USER_ONLY)
1011 /* if no code remaining, no need to continue to use slow writes */
1012 if (!p->first_tb) {
1013 invalidate_page_bitmap(p);
d720b93d 1014 if (is_cpu_write_access) {
2e70f6ef 1015 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1016 }
1017 }
1018#endif
1019#ifdef TARGET_HAS_PRECISE_SMC
1020 if (current_tb_modified) {
1021 /* we generate a block containing just the instruction
1022 modifying the memory. It will ensure that it cannot modify
1023 itself */
ea1c1802 1024 env->current_tb = NULL;
2e70f6ef 1025 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1026 cpu_resume_from_signal(env, NULL);
9fa3e853 1027 }
fd6ce8f6 1028#endif
9fa3e853 1029}
fd6ce8f6 1030
9fa3e853 1031/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1032static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1033{
1034 PageDesc *p;
1035 int offset, b;
59817ccb 1036#if 0
a4193c8a 1037 if (1) {
93fcfe39
AL
1038 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1039 cpu_single_env->mem_io_vaddr, len,
1040 cpu_single_env->eip,
1041 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1042 }
1043#endif
9fa3e853 1044 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1045 if (!p)
9fa3e853
FB
1046 return;
1047 if (p->code_bitmap) {
1048 offset = start & ~TARGET_PAGE_MASK;
1049 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1050 if (b & ((1 << len) - 1))
1051 goto do_invalidate;
1052 } else {
1053 do_invalidate:
d720b93d 1054 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1055 }
1056}
1057
9fa3e853 1058#if !defined(CONFIG_SOFTMMU)
00f82b8a 1059static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1060 unsigned long pc, void *puc)
9fa3e853 1061{
6b917547 1062 TranslationBlock *tb;
9fa3e853 1063 PageDesc *p;
6b917547 1064 int n;
d720b93d 1065#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1066 TranslationBlock *current_tb = NULL;
d720b93d 1067 CPUState *env = cpu_single_env;
6b917547
AL
1068 int current_tb_modified = 0;
1069 target_ulong current_pc = 0;
1070 target_ulong current_cs_base = 0;
1071 int current_flags = 0;
d720b93d 1072#endif
9fa3e853
FB
1073
1074 addr &= TARGET_PAGE_MASK;
1075 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1076 if (!p)
9fa3e853
FB
1077 return;
1078 tb = p->first_tb;
d720b93d
FB
1079#ifdef TARGET_HAS_PRECISE_SMC
1080 if (tb && pc != 0) {
1081 current_tb = tb_find_pc(pc);
1082 }
1083#endif
9fa3e853
FB
1084 while (tb != NULL) {
1085 n = (long)tb & 3;
1086 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1087#ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb == tb &&
2e70f6ef 1089 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
3b46e624 1095
d720b93d
FB
1096 current_tb_modified = 1;
1097 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1098 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099 &current_flags);
d720b93d
FB
1100 }
1101#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1102 tb_phys_invalidate(tb, addr);
1103 tb = tb->page_next[n];
1104 }
fd6ce8f6 1105 p->first_tb = NULL;
d720b93d
FB
1106#ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1110 itself */
ea1c1802 1111 env->current_tb = NULL;
2e70f6ef 1112 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1113 cpu_resume_from_signal(env, puc);
1114 }
1115#endif
fd6ce8f6 1116}
9fa3e853 1117#endif
fd6ce8f6
FB
1118
1119/* add the tb in the target page and protect it if necessary */
5fafdf24 1120static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1121 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1122{
1123 PageDesc *p;
9fa3e853
FB
1124 TranslationBlock *last_first_tb;
1125
1126 tb->page_addr[n] = page_addr;
3a7d929e 1127 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1128 tb->page_next[n] = p->first_tb;
1129 last_first_tb = p->first_tb;
1130 p->first_tb = (TranslationBlock *)((long)tb | n);
1131 invalidate_page_bitmap(p);
fd6ce8f6 1132
107db443 1133#if defined(TARGET_HAS_SMC) || 1
d720b93d 1134
9fa3e853 1135#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1136 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1137 target_ulong addr;
1138 PageDesc *p2;
9fa3e853
FB
1139 int prot;
1140
fd6ce8f6
FB
1141 /* force the host page as non writable (writes will have a
1142 page fault + mprotect overhead) */
53a5960a 1143 page_addr &= qemu_host_page_mask;
fd6ce8f6 1144 prot = 0;
53a5960a
PB
1145 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1146 addr += TARGET_PAGE_SIZE) {
1147
1148 p2 = page_find (addr >> TARGET_PAGE_BITS);
1149 if (!p2)
1150 continue;
1151 prot |= p2->flags;
1152 p2->flags &= ~PAGE_WRITE;
1153 page_get_flags(addr);
1154 }
5fafdf24 1155 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1156 (prot & PAGE_BITS) & ~PAGE_WRITE);
1157#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1158 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1159 page_addr);
fd6ce8f6 1160#endif
fd6ce8f6 1161 }
9fa3e853
FB
1162#else
1163 /* if some code is already present, then the pages are already
1164 protected. So we handle the case where only the first TB is
1165 allocated in a physical page */
1166 if (!last_first_tb) {
6a00d601 1167 tlb_protect_code(page_addr);
9fa3e853
FB
1168 }
1169#endif
d720b93d
FB
1170
1171#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1172}
1173
1174/* Allocate a new translation block. Flush the translation buffer if
1175 too many translation blocks or too much generated code. */
c27004ec 1176TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1177{
1178 TranslationBlock *tb;
fd6ce8f6 1179
26a5f13b
FB
1180 if (nb_tbs >= code_gen_max_blocks ||
1181 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1182 return NULL;
fd6ce8f6
FB
1183 tb = &tbs[nb_tbs++];
1184 tb->pc = pc;
b448f2f3 1185 tb->cflags = 0;
d4e8164f
FB
1186 return tb;
1187}
1188
2e70f6ef
PB
1189void tb_free(TranslationBlock *tb)
1190{
bf20dc07 1191 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1192 Ignore the hard cases and just back up if this TB happens to
1193 be the last one generated. */
1194 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1195 code_gen_ptr = tb->tc_ptr;
1196 nb_tbs--;
1197 }
1198}
1199
9fa3e853
FB
1200/* add a new TB and link it to the physical page tables. phys_page2 is
1201 (-1) to indicate that only one page contains the TB. */
5fafdf24 1202void tb_link_phys(TranslationBlock *tb,
9fa3e853 1203 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1204{
9fa3e853
FB
1205 unsigned int h;
1206 TranslationBlock **ptb;
1207
c8a706fe
PB
1208 /* Grab the mmap lock to stop another thread invalidating this TB
1209 before we are done. */
1210 mmap_lock();
9fa3e853
FB
1211 /* add in the physical hash table */
1212 h = tb_phys_hash_func(phys_pc);
1213 ptb = &tb_phys_hash[h];
1214 tb->phys_hash_next = *ptb;
1215 *ptb = tb;
fd6ce8f6
FB
1216
1217 /* add in the page list */
9fa3e853
FB
1218 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1219 if (phys_page2 != -1)
1220 tb_alloc_page(tb, 1, phys_page2);
1221 else
1222 tb->page_addr[1] = -1;
9fa3e853 1223
d4e8164f
FB
1224 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1225 tb->jmp_next[0] = NULL;
1226 tb->jmp_next[1] = NULL;
1227
1228 /* init original jump addresses */
1229 if (tb->tb_next_offset[0] != 0xffff)
1230 tb_reset_jump(tb, 0);
1231 if (tb->tb_next_offset[1] != 0xffff)
1232 tb_reset_jump(tb, 1);
8a40a180
FB
1233
1234#ifdef DEBUG_TB_CHECK
1235 tb_page_check();
1236#endif
c8a706fe 1237 mmap_unlock();
fd6ce8f6
FB
1238}
1239
9fa3e853
FB
1240/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1241 tb[1].tc_ptr. Return NULL if not found */
1242TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1243{
9fa3e853
FB
1244 int m_min, m_max, m;
1245 unsigned long v;
1246 TranslationBlock *tb;
a513fe19
FB
1247
1248 if (nb_tbs <= 0)
1249 return NULL;
1250 if (tc_ptr < (unsigned long)code_gen_buffer ||
1251 tc_ptr >= (unsigned long)code_gen_ptr)
1252 return NULL;
1253 /* binary search (cf Knuth) */
1254 m_min = 0;
1255 m_max = nb_tbs - 1;
1256 while (m_min <= m_max) {
1257 m = (m_min + m_max) >> 1;
1258 tb = &tbs[m];
1259 v = (unsigned long)tb->tc_ptr;
1260 if (v == tc_ptr)
1261 return tb;
1262 else if (tc_ptr < v) {
1263 m_max = m - 1;
1264 } else {
1265 m_min = m + 1;
1266 }
5fafdf24 1267 }
a513fe19
FB
1268 return &tbs[m_max];
1269}
7501267e 1270
ea041c0e
FB
1271static void tb_reset_jump_recursive(TranslationBlock *tb);
1272
1273static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1274{
1275 TranslationBlock *tb1, *tb_next, **ptb;
1276 unsigned int n1;
1277
1278 tb1 = tb->jmp_next[n];
1279 if (tb1 != NULL) {
1280 /* find head of list */
1281 for(;;) {
1282 n1 = (long)tb1 & 3;
1283 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1284 if (n1 == 2)
1285 break;
1286 tb1 = tb1->jmp_next[n1];
1287 }
1288 /* we are now sure now that tb jumps to tb1 */
1289 tb_next = tb1;
1290
1291 /* remove tb from the jmp_first list */
1292 ptb = &tb_next->jmp_first;
1293 for(;;) {
1294 tb1 = *ptb;
1295 n1 = (long)tb1 & 3;
1296 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1297 if (n1 == n && tb1 == tb)
1298 break;
1299 ptb = &tb1->jmp_next[n1];
1300 }
1301 *ptb = tb->jmp_next[n];
1302 tb->jmp_next[n] = NULL;
3b46e624 1303
ea041c0e
FB
1304 /* suppress the jump to next tb in generated code */
1305 tb_reset_jump(tb, n);
1306
0124311e 1307 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1308 tb_reset_jump_recursive(tb_next);
1309 }
1310}
1311
1312static void tb_reset_jump_recursive(TranslationBlock *tb)
1313{
1314 tb_reset_jump_recursive2(tb, 0);
1315 tb_reset_jump_recursive2(tb, 1);
1316}
1317
1fddef4b 1318#if defined(TARGET_HAS_ICE)
d720b93d
FB
1319static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1320{
9b3c35e0
JM
1321 target_phys_addr_t addr;
1322 target_ulong pd;
c2f07f81
PB
1323 ram_addr_t ram_addr;
1324 PhysPageDesc *p;
d720b93d 1325
c2f07f81
PB
1326 addr = cpu_get_phys_page_debug(env, pc);
1327 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1328 if (!p) {
1329 pd = IO_MEM_UNASSIGNED;
1330 } else {
1331 pd = p->phys_offset;
1332 }
1333 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1334 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1335}
c27004ec 1336#endif
d720b93d 1337
6658ffb8 1338/* Add a watchpoint. */
a1d1bb31
AL
1339int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1340 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1341{
b4051334 1342 target_ulong len_mask = ~(len - 1);
c0ce998e 1343 CPUWatchpoint *wp;
6658ffb8 1344
b4051334
AL
1345 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1346 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1347 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1348 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1349 return -EINVAL;
1350 }
a1d1bb31 1351 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1352
1353 wp->vaddr = addr;
b4051334 1354 wp->len_mask = len_mask;
a1d1bb31
AL
1355 wp->flags = flags;
1356
2dc9f411 1357 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1358 if (flags & BP_GDB)
1359 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1360 else
1361 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1362
6658ffb8 1363 tlb_flush_page(env, addr);
a1d1bb31
AL
1364
1365 if (watchpoint)
1366 *watchpoint = wp;
1367 return 0;
6658ffb8
PB
1368}
1369
a1d1bb31
AL
1370/* Remove a specific watchpoint. */
1371int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1372 int flags)
6658ffb8 1373{
b4051334 1374 target_ulong len_mask = ~(len - 1);
a1d1bb31 1375 CPUWatchpoint *wp;
6658ffb8 1376
c0ce998e 1377 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1378 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1379 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1380 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1381 return 0;
1382 }
1383 }
a1d1bb31 1384 return -ENOENT;
6658ffb8
PB
1385}
1386
a1d1bb31
AL
1387/* Remove a specific watchpoint by reference. */
1388void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1389{
c0ce998e 1390 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1391
a1d1bb31
AL
1392 tlb_flush_page(env, watchpoint->vaddr);
1393
1394 qemu_free(watchpoint);
1395}
1396
1397/* Remove all matching watchpoints. */
1398void cpu_watchpoint_remove_all(CPUState *env, int mask)
1399{
c0ce998e 1400 CPUWatchpoint *wp, *next;
a1d1bb31 1401
c0ce998e 1402 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1403 if (wp->flags & mask)
1404 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1405 }
7d03f82f
EI
1406}
1407
a1d1bb31
AL
1408/* Add a breakpoint. */
1409int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1410 CPUBreakpoint **breakpoint)
4c3a88a2 1411{
1fddef4b 1412#if defined(TARGET_HAS_ICE)
c0ce998e 1413 CPUBreakpoint *bp;
3b46e624 1414
a1d1bb31 1415 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1416
a1d1bb31
AL
1417 bp->pc = pc;
1418 bp->flags = flags;
1419
2dc9f411 1420 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1421 if (flags & BP_GDB)
1422 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1423 else
1424 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1425
d720b93d 1426 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1427
1428 if (breakpoint)
1429 *breakpoint = bp;
4c3a88a2
FB
1430 return 0;
1431#else
a1d1bb31 1432 return -ENOSYS;
4c3a88a2
FB
1433#endif
1434}
1435
a1d1bb31
AL
1436/* Remove a specific breakpoint. */
1437int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1438{
7d03f82f 1439#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1440 CPUBreakpoint *bp;
1441
c0ce998e 1442 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1443 if (bp->pc == pc && bp->flags == flags) {
1444 cpu_breakpoint_remove_by_ref(env, bp);
1445 return 0;
1446 }
7d03f82f 1447 }
a1d1bb31
AL
1448 return -ENOENT;
1449#else
1450 return -ENOSYS;
7d03f82f
EI
1451#endif
1452}
1453
a1d1bb31
AL
1454/* Remove a specific breakpoint by reference. */
1455void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1456{
1fddef4b 1457#if defined(TARGET_HAS_ICE)
c0ce998e 1458 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1459
a1d1bb31
AL
1460 breakpoint_invalidate(env, breakpoint->pc);
1461
1462 qemu_free(breakpoint);
1463#endif
1464}
1465
1466/* Remove all matching breakpoints. */
1467void cpu_breakpoint_remove_all(CPUState *env, int mask)
1468{
1469#if defined(TARGET_HAS_ICE)
c0ce998e 1470 CPUBreakpoint *bp, *next;
a1d1bb31 1471
c0ce998e 1472 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1473 if (bp->flags & mask)
1474 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1475 }
4c3a88a2
FB
1476#endif
1477}
1478
c33a346e
FB
1479/* enable or disable single step mode. EXCP_DEBUG is returned by the
1480 CPU loop after each instruction */
1481void cpu_single_step(CPUState *env, int enabled)
1482{
1fddef4b 1483#if defined(TARGET_HAS_ICE)
c33a346e
FB
1484 if (env->singlestep_enabled != enabled) {
1485 env->singlestep_enabled = enabled;
e22a25c9
AL
1486 if (kvm_enabled())
1487 kvm_update_guest_debug(env, 0);
1488 else {
ccbb4d44 1489 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1490 /* XXX: only flush what is necessary */
1491 tb_flush(env);
1492 }
c33a346e
FB
1493 }
1494#endif
1495}
1496
34865134
FB
1497/* enable or disable low levels log */
1498void cpu_set_log(int log_flags)
1499{
1500 loglevel = log_flags;
1501 if (loglevel && !logfile) {
11fcfab4 1502 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1503 if (!logfile) {
1504 perror(logfilename);
1505 _exit(1);
1506 }
9fa3e853
FB
1507#if !defined(CONFIG_SOFTMMU)
1508 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1509 {
b55266b5 1510 static char logfile_buf[4096];
9fa3e853
FB
1511 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1512 }
1513#else
34865134 1514 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1515#endif
e735b91c
PB
1516 log_append = 1;
1517 }
1518 if (!loglevel && logfile) {
1519 fclose(logfile);
1520 logfile = NULL;
34865134
FB
1521 }
1522}
1523
1524void cpu_set_log_filename(const char *filename)
1525{
1526 logfilename = strdup(filename);
e735b91c
PB
1527 if (logfile) {
1528 fclose(logfile);
1529 logfile = NULL;
1530 }
1531 cpu_set_log(loglevel);
34865134 1532}
c33a346e 1533
3098dba0 1534static void cpu_unlink_tb(CPUState *env)
ea041c0e 1535{
3098dba0
AJ
1536#if defined(USE_NPTL)
1537 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1538 problem and hope the cpu will stop of its own accord. For userspace
1539 emulation this often isn't actually as bad as it sounds. Often
1540 signals are used primarily to interrupt blocking syscalls. */
1541#else
ea041c0e 1542 TranslationBlock *tb;
15a51156 1543 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1544
3098dba0
AJ
1545 tb = env->current_tb;
1546 /* if the cpu is currently executing code, we must unlink it and
1547 all the potentially executing TB */
1548 if (tb && !testandset(&interrupt_lock)) {
1549 env->current_tb = NULL;
1550 tb_reset_jump_recursive(tb);
1551 resetlock(&interrupt_lock);
be214e6c 1552 }
3098dba0
AJ
1553#endif
1554}
1555
1556/* mask must never be zero, except for A20 change call */
1557void cpu_interrupt(CPUState *env, int mask)
1558{
1559 int old_mask;
be214e6c 1560
2e70f6ef 1561 old_mask = env->interrupt_request;
68a79315 1562 env->interrupt_request |= mask;
3098dba0 1563
8edac960
AL
1564#ifndef CONFIG_USER_ONLY
1565 /*
1566 * If called from iothread context, wake the target cpu in
1567 * case its halted.
1568 */
1569 if (!qemu_cpu_self(env)) {
1570 qemu_cpu_kick(env);
1571 return;
1572 }
1573#endif
1574
2e70f6ef 1575 if (use_icount) {
266910c4 1576 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1577#ifndef CONFIG_USER_ONLY
2e70f6ef 1578 if (!can_do_io(env)
be214e6c 1579 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1580 cpu_abort(env, "Raised interrupt while not in I/O function");
1581 }
1582#endif
1583 } else {
3098dba0 1584 cpu_unlink_tb(env);
ea041c0e
FB
1585 }
1586}
1587
b54ad049
FB
1588void cpu_reset_interrupt(CPUState *env, int mask)
1589{
1590 env->interrupt_request &= ~mask;
1591}
1592
3098dba0
AJ
1593void cpu_exit(CPUState *env)
1594{
1595 env->exit_request = 1;
1596 cpu_unlink_tb(env);
1597}
1598
c7cd6a37 1599const CPULogItem cpu_log_items[] = {
5fafdf24 1600 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1601 "show generated host assembly code for each compiled TB" },
1602 { CPU_LOG_TB_IN_ASM, "in_asm",
1603 "show target assembly code for each compiled TB" },
5fafdf24 1604 { CPU_LOG_TB_OP, "op",
57fec1fe 1605 "show micro ops for each compiled TB" },
f193c797 1606 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1607 "show micro ops "
1608#ifdef TARGET_I386
1609 "before eflags optimization and "
f193c797 1610#endif
e01a1157 1611 "after liveness analysis" },
f193c797
FB
1612 { CPU_LOG_INT, "int",
1613 "show interrupts/exceptions in short format" },
1614 { CPU_LOG_EXEC, "exec",
1615 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1616 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1617 "show CPU state before block translation" },
f193c797
FB
1618#ifdef TARGET_I386
1619 { CPU_LOG_PCALL, "pcall",
1620 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1621 { CPU_LOG_RESET, "cpu_reset",
1622 "show CPU state before CPU resets" },
f193c797 1623#endif
8e3a9fd2 1624#ifdef DEBUG_IOPORT
fd872598
FB
1625 { CPU_LOG_IOPORT, "ioport",
1626 "show all i/o ports accesses" },
8e3a9fd2 1627#endif
f193c797
FB
1628 { 0, NULL, NULL },
1629};
1630
1631static int cmp1(const char *s1, int n, const char *s2)
1632{
1633 if (strlen(s2) != n)
1634 return 0;
1635 return memcmp(s1, s2, n) == 0;
1636}
3b46e624 1637
f193c797
FB
1638/* takes a comma separated list of log masks. Return 0 if error. */
1639int cpu_str_to_log_mask(const char *str)
1640{
c7cd6a37 1641 const CPULogItem *item;
f193c797
FB
1642 int mask;
1643 const char *p, *p1;
1644
1645 p = str;
1646 mask = 0;
1647 for(;;) {
1648 p1 = strchr(p, ',');
1649 if (!p1)
1650 p1 = p + strlen(p);
8e3a9fd2
FB
1651 if(cmp1(p,p1-p,"all")) {
1652 for(item = cpu_log_items; item->mask != 0; item++) {
1653 mask |= item->mask;
1654 }
1655 } else {
f193c797
FB
1656 for(item = cpu_log_items; item->mask != 0; item++) {
1657 if (cmp1(p, p1 - p, item->name))
1658 goto found;
1659 }
1660 return 0;
8e3a9fd2 1661 }
f193c797
FB
1662 found:
1663 mask |= item->mask;
1664 if (*p1 != ',')
1665 break;
1666 p = p1 + 1;
1667 }
1668 return mask;
1669}
ea041c0e 1670
7501267e
FB
1671void cpu_abort(CPUState *env, const char *fmt, ...)
1672{
1673 va_list ap;
493ae1f0 1674 va_list ap2;
7501267e
FB
1675
1676 va_start(ap, fmt);
493ae1f0 1677 va_copy(ap2, ap);
7501267e
FB
1678 fprintf(stderr, "qemu: fatal: ");
1679 vfprintf(stderr, fmt, ap);
1680 fprintf(stderr, "\n");
1681#ifdef TARGET_I386
7fe48483
FB
1682 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1683#else
1684 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1685#endif
93fcfe39
AL
1686 if (qemu_log_enabled()) {
1687 qemu_log("qemu: fatal: ");
1688 qemu_log_vprintf(fmt, ap2);
1689 qemu_log("\n");
f9373291 1690#ifdef TARGET_I386
93fcfe39 1691 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1692#else
93fcfe39 1693 log_cpu_state(env, 0);
f9373291 1694#endif
31b1a7b4 1695 qemu_log_flush();
93fcfe39 1696 qemu_log_close();
924edcae 1697 }
493ae1f0 1698 va_end(ap2);
f9373291 1699 va_end(ap);
7501267e
FB
1700 abort();
1701}
1702
c5be9f08
TS
1703CPUState *cpu_copy(CPUState *env)
1704{
01ba9816 1705 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1706 CPUState *next_cpu = new_env->next_cpu;
1707 int cpu_index = new_env->cpu_index;
5a38f081
AL
1708#if defined(TARGET_HAS_ICE)
1709 CPUBreakpoint *bp;
1710 CPUWatchpoint *wp;
1711#endif
1712
c5be9f08 1713 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1714
1715 /* Preserve chaining and index. */
c5be9f08
TS
1716 new_env->next_cpu = next_cpu;
1717 new_env->cpu_index = cpu_index;
5a38f081
AL
1718
1719 /* Clone all break/watchpoints.
1720 Note: Once we support ptrace with hw-debug register access, make sure
1721 BP_CPU break/watchpoints are handled correctly on clone. */
1722 TAILQ_INIT(&env->breakpoints);
1723 TAILQ_INIT(&env->watchpoints);
1724#if defined(TARGET_HAS_ICE)
1725 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1726 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1727 }
1728 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1729 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1730 wp->flags, NULL);
1731 }
1732#endif
1733
c5be9f08
TS
1734 return new_env;
1735}
1736
0124311e
FB
1737#if !defined(CONFIG_USER_ONLY)
1738
5c751e99
EI
1739static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1740{
1741 unsigned int i;
1742
1743 /* Discard jump cache entries for any tb which might potentially
1744 overlap the flushed page. */
1745 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1746 memset (&env->tb_jmp_cache[i], 0,
1747 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1748
1749 i = tb_jmp_cache_hash_page(addr);
1750 memset (&env->tb_jmp_cache[i], 0,
1751 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1752}
1753
08738984
IK
1754static CPUTLBEntry s_cputlb_empty_entry = {
1755 .addr_read = -1,
1756 .addr_write = -1,
1757 .addr_code = -1,
1758 .addend = -1,
1759};
1760
ee8b7021
FB
1761/* NOTE: if flush_global is true, also flush global entries (not
1762 implemented yet) */
1763void tlb_flush(CPUState *env, int flush_global)
33417e70 1764{
33417e70 1765 int i;
0124311e 1766
9fa3e853
FB
1767#if defined(DEBUG_TLB)
1768 printf("tlb_flush:\n");
1769#endif
0124311e
FB
1770 /* must reset current TB so that interrupts cannot modify the
1771 links while we are modifying them */
1772 env->current_tb = NULL;
1773
33417e70 1774 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1775 int mmu_idx;
1776 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1777 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1778 }
33417e70 1779 }
9fa3e853 1780
8a40a180 1781 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1782
640f42e4 1783#ifdef CONFIG_KQEMU
0a962c02
FB
1784 if (env->kqemu_enabled) {
1785 kqemu_flush(env, flush_global);
1786 }
9fa3e853 1787#endif
e3db7226 1788 tlb_flush_count++;
33417e70
FB
1789}
1790
274da6b2 1791static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1792{
5fafdf24 1793 if (addr == (tlb_entry->addr_read &
84b7b8e7 1794 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1795 addr == (tlb_entry->addr_write &
84b7b8e7 1796 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1797 addr == (tlb_entry->addr_code &
84b7b8e7 1798 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1799 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1800 }
61382a50
FB
1801}
1802
2e12669a 1803void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1804{
8a40a180 1805 int i;
cfde4bd9 1806 int mmu_idx;
0124311e 1807
9fa3e853 1808#if defined(DEBUG_TLB)
108c49b8 1809 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1810#endif
0124311e
FB
1811 /* must reset current TB so that interrupts cannot modify the
1812 links while we are modifying them */
1813 env->current_tb = NULL;
61382a50
FB
1814
1815 addr &= TARGET_PAGE_MASK;
1816 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1817 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1818 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1819
5c751e99 1820 tlb_flush_jmp_cache(env, addr);
9fa3e853 1821
640f42e4 1822#ifdef CONFIG_KQEMU
0a962c02
FB
1823 if (env->kqemu_enabled) {
1824 kqemu_flush_page(env, addr);
1825 }
1826#endif
9fa3e853
FB
1827}
1828
9fa3e853
FB
1829/* update the TLBs so that writes to code in the virtual page 'addr'
1830 can be detected */
6a00d601 1831static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1832{
5fafdf24 1833 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1834 ram_addr + TARGET_PAGE_SIZE,
1835 CODE_DIRTY_FLAG);
9fa3e853
FB
1836}
1837
9fa3e853 1838/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1839 tested for self modifying code */
5fafdf24 1840static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1841 target_ulong vaddr)
9fa3e853 1842{
3a7d929e 1843 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1844}
1845
5fafdf24 1846static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1847 unsigned long start, unsigned long length)
1848{
1849 unsigned long addr;
84b7b8e7
FB
1850 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1851 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1852 if ((addr - start) < length) {
0f459d16 1853 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1854 }
1855 }
1856}
1857
5579c7f3 1858/* Note: start and end must be within the same ram block. */
3a7d929e 1859void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1860 int dirty_flags)
1ccde1cb
FB
1861{
1862 CPUState *env;
4f2ac237 1863 unsigned long length, start1;
0a962c02
FB
1864 int i, mask, len;
1865 uint8_t *p;
1ccde1cb
FB
1866
1867 start &= TARGET_PAGE_MASK;
1868 end = TARGET_PAGE_ALIGN(end);
1869
1870 length = end - start;
1871 if (length == 0)
1872 return;
0a962c02 1873 len = length >> TARGET_PAGE_BITS;
640f42e4 1874#ifdef CONFIG_KQEMU
6a00d601
FB
1875 /* XXX: should not depend on cpu context */
1876 env = first_cpu;
3a7d929e 1877 if (env->kqemu_enabled) {
f23db169
FB
1878 ram_addr_t addr;
1879 addr = start;
1880 for(i = 0; i < len; i++) {
1881 kqemu_set_notdirty(env, addr);
1882 addr += TARGET_PAGE_SIZE;
1883 }
3a7d929e
FB
1884 }
1885#endif
f23db169
FB
1886 mask = ~dirty_flags;
1887 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1888 for(i = 0; i < len; i++)
1889 p[i] &= mask;
1890
1ccde1cb
FB
1891 /* we modify the TLB cache so that the dirty bit will be set again
1892 when accessing the range */
5579c7f3
PB
1893 start1 = (unsigned long)qemu_get_ram_ptr(start);
1894 /* Chek that we don't span multiple blocks - this breaks the
1895 address comparisons below. */
1896 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1897 != (end - 1) - start) {
1898 abort();
1899 }
1900
6a00d601 1901 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
1902 int mmu_idx;
1903 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1904 for(i = 0; i < CPU_TLB_SIZE; i++)
1905 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1906 start1, length);
1907 }
6a00d601 1908 }
1ccde1cb
FB
1909}
1910
74576198
AL
1911int cpu_physical_memory_set_dirty_tracking(int enable)
1912{
1913 in_migration = enable;
b0a46a33
JK
1914 if (kvm_enabled()) {
1915 return kvm_set_migration_log(enable);
1916 }
74576198
AL
1917 return 0;
1918}
1919
1920int cpu_physical_memory_get_dirty_tracking(void)
1921{
1922 return in_migration;
1923}
1924
151f7749
JK
1925int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1926 target_phys_addr_t end_addr)
2bec46dc 1927{
151f7749
JK
1928 int ret = 0;
1929
2bec46dc 1930 if (kvm_enabled())
151f7749
JK
1931 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1932 return ret;
2bec46dc
AL
1933}
1934
3a7d929e
FB
1935static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1936{
1937 ram_addr_t ram_addr;
5579c7f3 1938 void *p;
3a7d929e 1939
84b7b8e7 1940 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
1941 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1942 + tlb_entry->addend);
1943 ram_addr = qemu_ram_addr_from_host(p);
3a7d929e 1944 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1945 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1946 }
1947 }
1948}
1949
1950/* update the TLB according to the current state of the dirty bits */
1951void cpu_tlb_update_dirty(CPUState *env)
1952{
1953 int i;
cfde4bd9
IY
1954 int mmu_idx;
1955 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1956 for(i = 0; i < CPU_TLB_SIZE; i++)
1957 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1958 }
3a7d929e
FB
1959}
1960
0f459d16 1961static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1962{
0f459d16
PB
1963 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1964 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1965}
1966
0f459d16
PB
1967/* update the TLB corresponding to virtual page vaddr
1968 so that it is no longer dirty */
1969static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1970{
1ccde1cb 1971 int i;
cfde4bd9 1972 int mmu_idx;
1ccde1cb 1973
0f459d16 1974 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1975 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1976 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1977 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
1978}
1979
59817ccb
FB
1980/* add a new TLB entry. At most one entry for a given virtual address
1981 is permitted. Return 0 if OK or 2 if the page could not be mapped
1982 (can only happen in non SOFTMMU mode for I/O pages or pages
1983 conflicting with the host address space). */
5fafdf24
TS
1984int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1985 target_phys_addr_t paddr, int prot,
6ebbf390 1986 int mmu_idx, int is_softmmu)
9fa3e853 1987{
92e873b9 1988 PhysPageDesc *p;
4f2ac237 1989 unsigned long pd;
9fa3e853 1990 unsigned int index;
4f2ac237 1991 target_ulong address;
0f459d16 1992 target_ulong code_address;
108c49b8 1993 target_phys_addr_t addend;
9fa3e853 1994 int ret;
84b7b8e7 1995 CPUTLBEntry *te;
a1d1bb31 1996 CPUWatchpoint *wp;
0f459d16 1997 target_phys_addr_t iotlb;
9fa3e853 1998
92e873b9 1999 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2000 if (!p) {
2001 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2002 } else {
2003 pd = p->phys_offset;
9fa3e853
FB
2004 }
2005#if defined(DEBUG_TLB)
6ebbf390
JM
2006 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2007 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2008#endif
2009
2010 ret = 0;
0f459d16
PB
2011 address = vaddr;
2012 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2013 /* IO memory case (romd handled later) */
2014 address |= TLB_MMIO;
2015 }
5579c7f3 2016 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2017 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2018 /* Normal RAM. */
2019 iotlb = pd & TARGET_PAGE_MASK;
2020 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2021 iotlb |= IO_MEM_NOTDIRTY;
2022 else
2023 iotlb |= IO_MEM_ROM;
2024 } else {
ccbb4d44 2025 /* IO handlers are currently passed a physical address.
0f459d16
PB
2026 It would be nice to pass an offset from the base address
2027 of that region. This would avoid having to special case RAM,
2028 and avoid full address decoding in every device.
2029 We can't use the high bits of pd for this because
2030 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2031 iotlb = (pd & ~TARGET_PAGE_MASK);
2032 if (p) {
8da3ff18
PB
2033 iotlb += p->region_offset;
2034 } else {
2035 iotlb += paddr;
2036 }
0f459d16
PB
2037 }
2038
2039 code_address = address;
2040 /* Make accesses to pages with watchpoints go via the
2041 watchpoint trap routines. */
c0ce998e 2042 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2043 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2044 iotlb = io_mem_watch + paddr;
2045 /* TODO: The memory case can be optimized by not trapping
2046 reads of pages with a write breakpoint. */
2047 address |= TLB_MMIO;
6658ffb8 2048 }
0f459d16 2049 }
d79acba4 2050
0f459d16
PB
2051 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2052 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2053 te = &env->tlb_table[mmu_idx][index];
2054 te->addend = addend - vaddr;
2055 if (prot & PAGE_READ) {
2056 te->addr_read = address;
2057 } else {
2058 te->addr_read = -1;
2059 }
5c751e99 2060
0f459d16
PB
2061 if (prot & PAGE_EXEC) {
2062 te->addr_code = code_address;
2063 } else {
2064 te->addr_code = -1;
2065 }
2066 if (prot & PAGE_WRITE) {
2067 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2068 (pd & IO_MEM_ROMD)) {
2069 /* Write access calls the I/O callback. */
2070 te->addr_write = address | TLB_MMIO;
2071 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2072 !cpu_physical_memory_is_dirty(pd)) {
2073 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2074 } else {
0f459d16 2075 te->addr_write = address;
9fa3e853 2076 }
0f459d16
PB
2077 } else {
2078 te->addr_write = -1;
9fa3e853 2079 }
9fa3e853
FB
2080 return ret;
2081}
2082
0124311e
FB
2083#else
2084
ee8b7021 2085void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2086{
2087}
2088
2e12669a 2089void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2090{
2091}
2092
5fafdf24
TS
2093int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2094 target_phys_addr_t paddr, int prot,
6ebbf390 2095 int mmu_idx, int is_softmmu)
9fa3e853
FB
2096{
2097 return 0;
2098}
0124311e 2099
edf8e2af
MW
2100/*
2101 * Walks guest process memory "regions" one by one
2102 * and calls callback function 'fn' for each region.
2103 */
2104int walk_memory_regions(void *priv,
2105 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
33417e70 2106{
9fa3e853 2107 unsigned long start, end;
edf8e2af 2108 PageDesc *p = NULL;
9fa3e853 2109 int i, j, prot, prot1;
edf8e2af 2110 int rc = 0;
33417e70 2111
edf8e2af 2112 start = end = -1;
9fa3e853 2113 prot = 0;
edf8e2af
MW
2114
2115 for (i = 0; i <= L1_SIZE; i++) {
2116 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2117 for (j = 0; j < L2_SIZE; j++) {
2118 prot1 = (p == NULL) ? 0 : p[j].flags;
2119 /*
2120 * "region" is one continuous chunk of memory
2121 * that has same protection flags set.
2122 */
9fa3e853
FB
2123 if (prot1 != prot) {
2124 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2125 if (start != -1) {
edf8e2af
MW
2126 rc = (*fn)(priv, start, end, prot);
2127 /* callback can stop iteration by returning != 0 */
2128 if (rc != 0)
2129 return (rc);
9fa3e853
FB
2130 }
2131 if (prot1 != 0)
2132 start = end;
2133 else
2134 start = -1;
2135 prot = prot1;
2136 }
edf8e2af 2137 if (p == NULL)
9fa3e853
FB
2138 break;
2139 }
33417e70 2140 }
edf8e2af
MW
2141 return (rc);
2142}
2143
2144static int dump_region(void *priv, unsigned long start,
2145 unsigned long end, unsigned long prot)
2146{
2147 FILE *f = (FILE *)priv;
2148
2149 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2150 start, end, end - start,
2151 ((prot & PAGE_READ) ? 'r' : '-'),
2152 ((prot & PAGE_WRITE) ? 'w' : '-'),
2153 ((prot & PAGE_EXEC) ? 'x' : '-'));
2154
2155 return (0);
2156}
2157
2158/* dump memory mappings */
2159void page_dump(FILE *f)
2160{
2161 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2162 "start", "end", "size", "prot");
2163 walk_memory_regions(f, dump_region);
33417e70
FB
2164}
2165
53a5960a 2166int page_get_flags(target_ulong address)
33417e70 2167{
9fa3e853
FB
2168 PageDesc *p;
2169
2170 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2171 if (!p)
9fa3e853
FB
2172 return 0;
2173 return p->flags;
2174}
2175
2176/* modify the flags of a page and invalidate the code if
ccbb4d44 2177 necessary. The flag PAGE_WRITE_ORG is positioned automatically
9fa3e853 2178 depending on PAGE_WRITE */
53a5960a 2179void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2180{
2181 PageDesc *p;
53a5960a 2182 target_ulong addr;
9fa3e853 2183
c8a706fe 2184 /* mmap_lock should already be held. */
9fa3e853
FB
2185 start = start & TARGET_PAGE_MASK;
2186 end = TARGET_PAGE_ALIGN(end);
2187 if (flags & PAGE_WRITE)
2188 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2189 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2191 /* We may be called for host regions that are outside guest
2192 address space. */
2193 if (!p)
2194 return;
9fa3e853
FB
2195 /* if the write protection is set, then we invalidate the code
2196 inside */
5fafdf24 2197 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2198 (flags & PAGE_WRITE) &&
2199 p->first_tb) {
d720b93d 2200 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2201 }
2202 p->flags = flags;
2203 }
33417e70
FB
2204}
2205
3d97b40b
TS
2206int page_check_range(target_ulong start, target_ulong len, int flags)
2207{
2208 PageDesc *p;
2209 target_ulong end;
2210 target_ulong addr;
2211
55f280c9
AZ
2212 if (start + len < start)
2213 /* we've wrapped around */
2214 return -1;
2215
3d97b40b
TS
2216 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2217 start = start & TARGET_PAGE_MASK;
2218
3d97b40b
TS
2219 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2220 p = page_find(addr >> TARGET_PAGE_BITS);
2221 if( !p )
2222 return -1;
2223 if( !(p->flags & PAGE_VALID) )
2224 return -1;
2225
dae3270c 2226 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2227 return -1;
dae3270c
FB
2228 if (flags & PAGE_WRITE) {
2229 if (!(p->flags & PAGE_WRITE_ORG))
2230 return -1;
2231 /* unprotect the page if it was put read-only because it
2232 contains translated code */
2233 if (!(p->flags & PAGE_WRITE)) {
2234 if (!page_unprotect(addr, 0, NULL))
2235 return -1;
2236 }
2237 return 0;
2238 }
3d97b40b
TS
2239 }
2240 return 0;
2241}
2242
9fa3e853 2243/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2244 page. Return TRUE if the fault was successfully handled. */
53a5960a 2245int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2246{
2247 unsigned int page_index, prot, pindex;
2248 PageDesc *p, *p1;
53a5960a 2249 target_ulong host_start, host_end, addr;
9fa3e853 2250
c8a706fe
PB
2251 /* Technically this isn't safe inside a signal handler. However we
2252 know this only ever happens in a synchronous SEGV handler, so in
2253 practice it seems to be ok. */
2254 mmap_lock();
2255
83fb7adf 2256 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2257 page_index = host_start >> TARGET_PAGE_BITS;
2258 p1 = page_find(page_index);
c8a706fe
PB
2259 if (!p1) {
2260 mmap_unlock();
9fa3e853 2261 return 0;
c8a706fe 2262 }
83fb7adf 2263 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2264 p = p1;
2265 prot = 0;
2266 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2267 prot |= p->flags;
2268 p++;
2269 }
2270 /* if the page was really writable, then we change its
2271 protection back to writable */
2272 if (prot & PAGE_WRITE_ORG) {
2273 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2274 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2275 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2276 (prot & PAGE_BITS) | PAGE_WRITE);
2277 p1[pindex].flags |= PAGE_WRITE;
2278 /* and since the content will be modified, we must invalidate
2279 the corresponding translated code. */
d720b93d 2280 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2281#ifdef DEBUG_TB_CHECK
2282 tb_invalidate_check(address);
2283#endif
c8a706fe 2284 mmap_unlock();
9fa3e853
FB
2285 return 1;
2286 }
2287 }
c8a706fe 2288 mmap_unlock();
9fa3e853
FB
2289 return 0;
2290}
2291
6a00d601
FB
2292static inline void tlb_set_dirty(CPUState *env,
2293 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2294{
2295}
9fa3e853
FB
2296#endif /* defined(CONFIG_USER_ONLY) */
2297
e2eef170 2298#if !defined(CONFIG_USER_ONLY)
8da3ff18 2299
db7b5426 2300static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2301 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2302static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2303 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2304#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2305 need_subpage) \
2306 do { \
2307 if (addr > start_addr) \
2308 start_addr2 = 0; \
2309 else { \
2310 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2311 if (start_addr2 > 0) \
2312 need_subpage = 1; \
2313 } \
2314 \
49e9fba2 2315 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2316 end_addr2 = TARGET_PAGE_SIZE - 1; \
2317 else { \
2318 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2319 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2320 need_subpage = 1; \
2321 } \
2322 } while (0)
2323
33417e70
FB
2324/* register physical memory. 'size' must be a multiple of the target
2325 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2326 io memory page. The address used when calling the IO function is
2327 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2328 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2329 before calculating this offset. This should not be a problem unless
2330 the low bits of start_addr and region_offset differ. */
2331void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2332 ram_addr_t size,
2333 ram_addr_t phys_offset,
2334 ram_addr_t region_offset)
33417e70 2335{
108c49b8 2336 target_phys_addr_t addr, end_addr;
92e873b9 2337 PhysPageDesc *p;
9d42037b 2338 CPUState *env;
00f82b8a 2339 ram_addr_t orig_size = size;
db7b5426 2340 void *subpage;
33417e70 2341
640f42e4 2342#ifdef CONFIG_KQEMU
da260249
FB
2343 /* XXX: should not depend on cpu context */
2344 env = first_cpu;
2345 if (env->kqemu_enabled) {
2346 kqemu_set_phys_mem(start_addr, size, phys_offset);
2347 }
2348#endif
7ba1e619
AL
2349 if (kvm_enabled())
2350 kvm_set_phys_mem(start_addr, size, phys_offset);
2351
67c4d23c
PB
2352 if (phys_offset == IO_MEM_UNASSIGNED) {
2353 region_offset = start_addr;
2354 }
8da3ff18 2355 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2356 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2357 end_addr = start_addr + (target_phys_addr_t)size;
2358 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2359 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2360 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2361 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2362 target_phys_addr_t start_addr2, end_addr2;
2363 int need_subpage = 0;
2364
2365 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2366 need_subpage);
4254fab8 2367 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2368 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2369 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2370 &p->phys_offset, orig_memory,
2371 p->region_offset);
db7b5426
BS
2372 } else {
2373 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2374 >> IO_MEM_SHIFT];
2375 }
8da3ff18
PB
2376 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2377 region_offset);
2378 p->region_offset = 0;
db7b5426
BS
2379 } else {
2380 p->phys_offset = phys_offset;
2381 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2382 (phys_offset & IO_MEM_ROMD))
2383 phys_offset += TARGET_PAGE_SIZE;
2384 }
2385 } else {
2386 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2387 p->phys_offset = phys_offset;
8da3ff18 2388 p->region_offset = region_offset;
db7b5426 2389 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2390 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2391 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2392 } else {
db7b5426
BS
2393 target_phys_addr_t start_addr2, end_addr2;
2394 int need_subpage = 0;
2395
2396 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2397 end_addr2, need_subpage);
2398
4254fab8 2399 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2400 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2401 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2402 addr & TARGET_PAGE_MASK);
db7b5426 2403 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2404 phys_offset, region_offset);
2405 p->region_offset = 0;
db7b5426
BS
2406 }
2407 }
2408 }
8da3ff18 2409 region_offset += TARGET_PAGE_SIZE;
33417e70 2410 }
3b46e624 2411
9d42037b
FB
2412 /* since each CPU stores ram addresses in its TLB cache, we must
2413 reset the modified entries */
2414 /* XXX: slow ! */
2415 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2416 tlb_flush(env, 1);
2417 }
33417e70
FB
2418}
2419
ba863458 2420/* XXX: temporary until new memory mapping API */
00f82b8a 2421ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2422{
2423 PhysPageDesc *p;
2424
2425 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 if (!p)
2427 return IO_MEM_UNASSIGNED;
2428 return p->phys_offset;
2429}
2430
f65ed4c1
AL
2431void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2432{
2433 if (kvm_enabled())
2434 kvm_coalesce_mmio_region(addr, size);
2435}
2436
2437void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2438{
2439 if (kvm_enabled())
2440 kvm_uncoalesce_mmio_region(addr, size);
2441}
2442
640f42e4 2443#ifdef CONFIG_KQEMU
e9a1ab19 2444/* XXX: better than nothing */
94a6b54f 2445static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2446{
2447 ram_addr_t addr;
94a6b54f 2448 if ((last_ram_offset + size) > kqemu_phys_ram_size) {
012a7045 2449 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
94a6b54f 2450 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
e9a1ab19
FB
2451 abort();
2452 }
94a6b54f
PB
2453 addr = last_ram_offset;
2454 last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
e9a1ab19
FB
2455 return addr;
2456}
94a6b54f
PB
2457#endif
2458
2459ram_addr_t qemu_ram_alloc(ram_addr_t size)
2460{
2461 RAMBlock *new_block;
2462
640f42e4 2463#ifdef CONFIG_KQEMU
94a6b54f
PB
2464 if (kqemu_phys_ram_base) {
2465 return kqemu_ram_alloc(size);
2466 }
2467#endif
2468
2469 size = TARGET_PAGE_ALIGN(size);
2470 new_block = qemu_malloc(sizeof(*new_block));
2471
2472 new_block->host = qemu_vmalloc(size);
2473 new_block->offset = last_ram_offset;
2474 new_block->length = size;
2475
2476 new_block->next = ram_blocks;
2477 ram_blocks = new_block;
2478
2479 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2480 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2481 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2482 0xff, size >> TARGET_PAGE_BITS);
2483
2484 last_ram_offset += size;
2485
6f0437e8
JK
2486 if (kvm_enabled())
2487 kvm_setup_guest_memory(new_block->host, size);
2488
94a6b54f
PB
2489 return new_block->offset;
2490}
e9a1ab19
FB
2491
2492void qemu_ram_free(ram_addr_t addr)
2493{
94a6b54f 2494 /* TODO: implement this. */
e9a1ab19
FB
2495}
2496
dc828ca1 2497/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2498 With the exception of the softmmu code in this file, this should
2499 only be used for local memory (e.g. video ram) that the device owns,
2500 and knows it isn't going to access beyond the end of the block.
2501
2502 It should not be used for general purpose DMA.
2503 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2504 */
dc828ca1
PB
2505void *qemu_get_ram_ptr(ram_addr_t addr)
2506{
94a6b54f
PB
2507 RAMBlock *prev;
2508 RAMBlock **prevp;
2509 RAMBlock *block;
2510
640f42e4 2511#ifdef CONFIG_KQEMU
94a6b54f
PB
2512 if (kqemu_phys_ram_base) {
2513 return kqemu_phys_ram_base + addr;
2514 }
2515#endif
2516
2517 prev = NULL;
2518 prevp = &ram_blocks;
2519 block = ram_blocks;
2520 while (block && (block->offset > addr
2521 || block->offset + block->length <= addr)) {
2522 if (prev)
2523 prevp = &prev->next;
2524 prev = block;
2525 block = block->next;
2526 }
2527 if (!block) {
2528 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2529 abort();
2530 }
2531 /* Move this entry to to start of the list. */
2532 if (prev) {
2533 prev->next = block->next;
2534 block->next = *prevp;
2535 *prevp = block;
2536 }
2537 return block->host + (addr - block->offset);
dc828ca1
PB
2538}
2539
5579c7f3
PB
2540/* Some of the softmmu routines need to translate from a host pointer
2541 (typically a TLB entry) back to a ram offset. */
2542ram_addr_t qemu_ram_addr_from_host(void *ptr)
2543{
94a6b54f
PB
2544 RAMBlock *prev;
2545 RAMBlock **prevp;
2546 RAMBlock *block;
2547 uint8_t *host = ptr;
2548
640f42e4 2549#ifdef CONFIG_KQEMU
94a6b54f
PB
2550 if (kqemu_phys_ram_base) {
2551 return host - kqemu_phys_ram_base;
2552 }
2553#endif
2554
2555 prev = NULL;
2556 prevp = &ram_blocks;
2557 block = ram_blocks;
2558 while (block && (block->host > host
2559 || block->host + block->length <= host)) {
2560 if (prev)
2561 prevp = &prev->next;
2562 prev = block;
2563 block = block->next;
2564 }
2565 if (!block) {
2566 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2567 abort();
2568 }
2569 return block->offset + (host - block->host);
5579c7f3
PB
2570}
2571
a4193c8a 2572static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2573{
67d3b957 2574#ifdef DEBUG_UNASSIGNED
ab3d1727 2575 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2576#endif
0a6f8a6d 2577#if defined(TARGET_SPARC)
e18231a3
BS
2578 do_unassigned_access(addr, 0, 0, 0, 1);
2579#endif
2580 return 0;
2581}
2582
2583static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2584{
2585#ifdef DEBUG_UNASSIGNED
2586 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2587#endif
0a6f8a6d 2588#if defined(TARGET_SPARC)
e18231a3
BS
2589 do_unassigned_access(addr, 0, 0, 0, 2);
2590#endif
2591 return 0;
2592}
2593
2594static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2595{
2596#ifdef DEBUG_UNASSIGNED
2597 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2598#endif
0a6f8a6d 2599#if defined(TARGET_SPARC)
e18231a3 2600 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2601#endif
33417e70
FB
2602 return 0;
2603}
2604
a4193c8a 2605static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2606{
67d3b957 2607#ifdef DEBUG_UNASSIGNED
ab3d1727 2608 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2609#endif
0a6f8a6d 2610#if defined(TARGET_SPARC)
e18231a3
BS
2611 do_unassigned_access(addr, 1, 0, 0, 1);
2612#endif
2613}
2614
2615static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2616{
2617#ifdef DEBUG_UNASSIGNED
2618 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2619#endif
0a6f8a6d 2620#if defined(TARGET_SPARC)
e18231a3
BS
2621 do_unassigned_access(addr, 1, 0, 0, 2);
2622#endif
2623}
2624
2625static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2626{
2627#ifdef DEBUG_UNASSIGNED
2628 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2629#endif
0a6f8a6d 2630#if defined(TARGET_SPARC)
e18231a3 2631 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2632#endif
33417e70
FB
2633}
2634
2635static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2636 unassigned_mem_readb,
e18231a3
BS
2637 unassigned_mem_readw,
2638 unassigned_mem_readl,
33417e70
FB
2639};
2640
2641static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2642 unassigned_mem_writeb,
e18231a3
BS
2643 unassigned_mem_writew,
2644 unassigned_mem_writel,
33417e70
FB
2645};
2646
0f459d16
PB
2647static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2648 uint32_t val)
9fa3e853 2649{
3a7d929e 2650 int dirty_flags;
3a7d929e
FB
2651 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2652 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2653#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2654 tb_invalidate_phys_page_fast(ram_addr, 1);
2655 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2656#endif
3a7d929e 2657 }
5579c7f3 2658 stb_p(qemu_get_ram_ptr(ram_addr), val);
640f42e4 2659#ifdef CONFIG_KQEMU
f32fc648
FB
2660 if (cpu_single_env->kqemu_enabled &&
2661 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2662 kqemu_modify_page(cpu_single_env, ram_addr);
2663#endif
f23db169
FB
2664 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2665 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2666 /* we remove the notdirty callback only if the code has been
2667 flushed */
2668 if (dirty_flags == 0xff)
2e70f6ef 2669 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2670}
2671
0f459d16
PB
2672static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2673 uint32_t val)
9fa3e853 2674{
3a7d929e 2675 int dirty_flags;
3a7d929e
FB
2676 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2677 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2678#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2679 tb_invalidate_phys_page_fast(ram_addr, 2);
2680 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2681#endif
3a7d929e 2682 }
5579c7f3 2683 stw_p(qemu_get_ram_ptr(ram_addr), val);
640f42e4 2684#ifdef CONFIG_KQEMU
f32fc648
FB
2685 if (cpu_single_env->kqemu_enabled &&
2686 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2687 kqemu_modify_page(cpu_single_env, ram_addr);
2688#endif
f23db169
FB
2689 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2690 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2691 /* we remove the notdirty callback only if the code has been
2692 flushed */
2693 if (dirty_flags == 0xff)
2e70f6ef 2694 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2695}
2696
0f459d16
PB
2697static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2698 uint32_t val)
9fa3e853 2699{
3a7d929e 2700 int dirty_flags;
3a7d929e
FB
2701 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2702 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2703#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2704 tb_invalidate_phys_page_fast(ram_addr, 4);
2705 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2706#endif
3a7d929e 2707 }
5579c7f3 2708 stl_p(qemu_get_ram_ptr(ram_addr), val);
640f42e4 2709#ifdef CONFIG_KQEMU
f32fc648
FB
2710 if (cpu_single_env->kqemu_enabled &&
2711 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2712 kqemu_modify_page(cpu_single_env, ram_addr);
2713#endif
f23db169
FB
2714 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2715 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2716 /* we remove the notdirty callback only if the code has been
2717 flushed */
2718 if (dirty_flags == 0xff)
2e70f6ef 2719 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2720}
2721
3a7d929e 2722static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2723 NULL, /* never used */
2724 NULL, /* never used */
2725 NULL, /* never used */
2726};
2727
1ccde1cb
FB
2728static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2729 notdirty_mem_writeb,
2730 notdirty_mem_writew,
2731 notdirty_mem_writel,
2732};
2733
0f459d16 2734/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2735static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2736{
2737 CPUState *env = cpu_single_env;
06d55cc1
AL
2738 target_ulong pc, cs_base;
2739 TranslationBlock *tb;
0f459d16 2740 target_ulong vaddr;
a1d1bb31 2741 CPUWatchpoint *wp;
06d55cc1 2742 int cpu_flags;
0f459d16 2743
06d55cc1
AL
2744 if (env->watchpoint_hit) {
2745 /* We re-entered the check after replacing the TB. Now raise
2746 * the debug interrupt so that is will trigger after the
2747 * current instruction. */
2748 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2749 return;
2750 }
2e70f6ef 2751 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2752 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2753 if ((vaddr == (wp->vaddr & len_mask) ||
2754 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2755 wp->flags |= BP_WATCHPOINT_HIT;
2756 if (!env->watchpoint_hit) {
2757 env->watchpoint_hit = wp;
2758 tb = tb_find_pc(env->mem_io_pc);
2759 if (!tb) {
2760 cpu_abort(env, "check_watchpoint: could not find TB for "
2761 "pc=%p", (void *)env->mem_io_pc);
2762 }
2763 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2764 tb_phys_invalidate(tb, -1);
2765 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2766 env->exception_index = EXCP_DEBUG;
2767 } else {
2768 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2769 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2770 }
2771 cpu_resume_from_signal(env, NULL);
06d55cc1 2772 }
6e140f28
AL
2773 } else {
2774 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2775 }
2776 }
2777}
2778
6658ffb8
PB
2779/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2780 so these check for a hit then pass through to the normal out-of-line
2781 phys routines. */
2782static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2783{
b4051334 2784 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2785 return ldub_phys(addr);
2786}
2787
2788static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2789{
b4051334 2790 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2791 return lduw_phys(addr);
2792}
2793
2794static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2795{
b4051334 2796 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2797 return ldl_phys(addr);
2798}
2799
6658ffb8
PB
2800static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2801 uint32_t val)
2802{
b4051334 2803 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2804 stb_phys(addr, val);
2805}
2806
2807static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2808 uint32_t val)
2809{
b4051334 2810 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2811 stw_phys(addr, val);
2812}
2813
2814static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2815 uint32_t val)
2816{
b4051334 2817 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2818 stl_phys(addr, val);
2819}
2820
2821static CPUReadMemoryFunc *watch_mem_read[3] = {
2822 watch_mem_readb,
2823 watch_mem_readw,
2824 watch_mem_readl,
2825};
2826
2827static CPUWriteMemoryFunc *watch_mem_write[3] = {
2828 watch_mem_writeb,
2829 watch_mem_writew,
2830 watch_mem_writel,
2831};
6658ffb8 2832
db7b5426
BS
2833static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2834 unsigned int len)
2835{
db7b5426
BS
2836 uint32_t ret;
2837 unsigned int idx;
2838
8da3ff18 2839 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2840#if defined(DEBUG_SUBPAGE)
2841 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2842 mmio, len, addr, idx);
2843#endif
8da3ff18
PB
2844 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2845 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2846
2847 return ret;
2848}
2849
2850static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2851 uint32_t value, unsigned int len)
2852{
db7b5426
BS
2853 unsigned int idx;
2854
8da3ff18 2855 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2856#if defined(DEBUG_SUBPAGE)
2857 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2858 mmio, len, addr, idx, value);
2859#endif
8da3ff18
PB
2860 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2861 addr + mmio->region_offset[idx][1][len],
2862 value);
db7b5426
BS
2863}
2864
2865static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2866{
2867#if defined(DEBUG_SUBPAGE)
2868 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2869#endif
2870
2871 return subpage_readlen(opaque, addr, 0);
2872}
2873
2874static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2875 uint32_t value)
2876{
2877#if defined(DEBUG_SUBPAGE)
2878 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2879#endif
2880 subpage_writelen(opaque, addr, value, 0);
2881}
2882
2883static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2884{
2885#if defined(DEBUG_SUBPAGE)
2886 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2887#endif
2888
2889 return subpage_readlen(opaque, addr, 1);
2890}
2891
2892static void subpage_writew (void *opaque, target_phys_addr_t addr,
2893 uint32_t value)
2894{
2895#if defined(DEBUG_SUBPAGE)
2896 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2897#endif
2898 subpage_writelen(opaque, addr, value, 1);
2899}
2900
2901static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2902{
2903#if defined(DEBUG_SUBPAGE)
2904 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2905#endif
2906
2907 return subpage_readlen(opaque, addr, 2);
2908}
2909
2910static void subpage_writel (void *opaque,
2911 target_phys_addr_t addr, uint32_t value)
2912{
2913#if defined(DEBUG_SUBPAGE)
2914 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2915#endif
2916 subpage_writelen(opaque, addr, value, 2);
2917}
2918
2919static CPUReadMemoryFunc *subpage_read[] = {
2920 &subpage_readb,
2921 &subpage_readw,
2922 &subpage_readl,
2923};
2924
2925static CPUWriteMemoryFunc *subpage_write[] = {
2926 &subpage_writeb,
2927 &subpage_writew,
2928 &subpage_writel,
2929};
2930
2931static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2932 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2933{
2934 int idx, eidx;
4254fab8 2935 unsigned int i;
db7b5426
BS
2936
2937 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2938 return -1;
2939 idx = SUBPAGE_IDX(start);
2940 eidx = SUBPAGE_IDX(end);
2941#if defined(DEBUG_SUBPAGE)
2942 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2943 mmio, start, end, idx, eidx, memory);
2944#endif
2945 memory >>= IO_MEM_SHIFT;
2946 for (; idx <= eidx; idx++) {
4254fab8 2947 for (i = 0; i < 4; i++) {
3ee89922
BS
2948 if (io_mem_read[memory][i]) {
2949 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2950 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2951 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2952 }
2953 if (io_mem_write[memory][i]) {
2954 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2955 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2956 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2957 }
4254fab8 2958 }
db7b5426
BS
2959 }
2960
2961 return 0;
2962}
2963
00f82b8a 2964static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2965 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2966{
2967 subpage_t *mmio;
2968 int subpage_memory;
2969
2970 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2971
2972 mmio->base = base;
1eed09cb 2973 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 2974#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2975 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2976 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2977#endif
1eec614b
AL
2978 *phys = subpage_memory | IO_MEM_SUBPAGE;
2979 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2980 region_offset);
db7b5426
BS
2981
2982 return mmio;
2983}
2984
88715657
AL
2985static int get_free_io_mem_idx(void)
2986{
2987 int i;
2988
2989 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2990 if (!io_mem_used[i]) {
2991 io_mem_used[i] = 1;
2992 return i;
2993 }
2994
2995 return -1;
2996}
2997
33417e70
FB
2998/* mem_read and mem_write are arrays of functions containing the
2999 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3000 2). Functions can be omitted with a NULL function pointer.
3ee89922 3001 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3002 modified. If it is zero, a new io zone is allocated. The return
3003 value can be used with cpu_register_physical_memory(). (-1) is
3004 returned if error. */
1eed09cb
AK
3005static int cpu_register_io_memory_fixed(int io_index,
3006 CPUReadMemoryFunc **mem_read,
3007 CPUWriteMemoryFunc **mem_write,
3008 void *opaque)
33417e70 3009{
4254fab8 3010 int i, subwidth = 0;
33417e70
FB
3011
3012 if (io_index <= 0) {
88715657
AL
3013 io_index = get_free_io_mem_idx();
3014 if (io_index == -1)
3015 return io_index;
33417e70 3016 } else {
1eed09cb 3017 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3018 if (io_index >= IO_MEM_NB_ENTRIES)
3019 return -1;
3020 }
b5ff1b31 3021
33417e70 3022 for(i = 0;i < 3; i++) {
4254fab8
BS
3023 if (!mem_read[i] || !mem_write[i])
3024 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
3025 io_mem_read[io_index][i] = mem_read[i];
3026 io_mem_write[io_index][i] = mem_write[i];
3027 }
a4193c8a 3028 io_mem_opaque[io_index] = opaque;
4254fab8 3029 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 3030}
61382a50 3031
1eed09cb
AK
3032int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3033 CPUWriteMemoryFunc **mem_write,
3034 void *opaque)
3035{
3036 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3037}
3038
88715657
AL
3039void cpu_unregister_io_memory(int io_table_address)
3040{
3041 int i;
3042 int io_index = io_table_address >> IO_MEM_SHIFT;
3043
3044 for (i=0;i < 3; i++) {
3045 io_mem_read[io_index][i] = unassigned_mem_read[i];
3046 io_mem_write[io_index][i] = unassigned_mem_write[i];
3047 }
3048 io_mem_opaque[io_index] = NULL;
3049 io_mem_used[io_index] = 0;
3050}
3051
e9179ce1
AK
3052static void io_mem_init(void)
3053{
3054 int i;
3055
3056 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3057 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3058 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3059 for (i=0; i<5; i++)
3060 io_mem_used[i] = 1;
3061
3062 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3063 watch_mem_write, NULL);
3064#ifdef CONFIG_KQEMU
3065 if (kqemu_phys_ram_base) {
3066 /* alloc dirty bits array */
3067 phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3068 memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3069 }
3070#endif
3071}
3072
e2eef170
PB
3073#endif /* !defined(CONFIG_USER_ONLY) */
3074
13eb76e0
FB
3075/* physical memory access (slow version, mainly for debug) */
3076#if defined(CONFIG_USER_ONLY)
5fafdf24 3077void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3078 int len, int is_write)
3079{
3080 int l, flags;
3081 target_ulong page;
53a5960a 3082 void * p;
13eb76e0
FB
3083
3084 while (len > 0) {
3085 page = addr & TARGET_PAGE_MASK;
3086 l = (page + TARGET_PAGE_SIZE) - addr;
3087 if (l > len)
3088 l = len;
3089 flags = page_get_flags(page);
3090 if (!(flags & PAGE_VALID))
3091 return;
3092 if (is_write) {
3093 if (!(flags & PAGE_WRITE))
3094 return;
579a97f7 3095 /* XXX: this code should not depend on lock_user */
72fb7daa 3096 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
3097 /* FIXME - should this return an error rather than just fail? */
3098 return;
72fb7daa
AJ
3099 memcpy(p, buf, l);
3100 unlock_user(p, addr, l);
13eb76e0
FB
3101 } else {
3102 if (!(flags & PAGE_READ))
3103 return;
579a97f7 3104 /* XXX: this code should not depend on lock_user */
72fb7daa 3105 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
3106 /* FIXME - should this return an error rather than just fail? */
3107 return;
72fb7daa 3108 memcpy(buf, p, l);
5b257578 3109 unlock_user(p, addr, 0);
13eb76e0
FB
3110 }
3111 len -= l;
3112 buf += l;
3113 addr += l;
3114 }
3115}
8df1cd07 3116
13eb76e0 3117#else
5fafdf24 3118void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3119 int len, int is_write)
3120{
3121 int l, io_index;
3122 uint8_t *ptr;
3123 uint32_t val;
2e12669a
FB
3124 target_phys_addr_t page;
3125 unsigned long pd;
92e873b9 3126 PhysPageDesc *p;
3b46e624 3127
13eb76e0
FB
3128 while (len > 0) {
3129 page = addr & TARGET_PAGE_MASK;
3130 l = (page + TARGET_PAGE_SIZE) - addr;
3131 if (l > len)
3132 l = len;
92e873b9 3133 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3134 if (!p) {
3135 pd = IO_MEM_UNASSIGNED;
3136 } else {
3137 pd = p->phys_offset;
3138 }
3b46e624 3139
13eb76e0 3140 if (is_write) {
3a7d929e 3141 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 3142 target_phys_addr_t addr1 = addr;
13eb76e0 3143 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3144 if (p)
6c2934db 3145 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3146 /* XXX: could force cpu_single_env to NULL to avoid
3147 potential bugs */
6c2934db 3148 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3149 /* 32 bit write access */
c27004ec 3150 val = ldl_p(buf);
6c2934db 3151 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3152 l = 4;
6c2934db 3153 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3154 /* 16 bit write access */
c27004ec 3155 val = lduw_p(buf);
6c2934db 3156 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3157 l = 2;
3158 } else {
1c213d19 3159 /* 8 bit write access */
c27004ec 3160 val = ldub_p(buf);
6c2934db 3161 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3162 l = 1;
3163 }
3164 } else {
b448f2f3
FB
3165 unsigned long addr1;
3166 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3167 /* RAM case */
5579c7f3 3168 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3169 memcpy(ptr, buf, l);
3a7d929e
FB
3170 if (!cpu_physical_memory_is_dirty(addr1)) {
3171 /* invalidate code */
3172 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3173 /* set dirty bit */
5fafdf24 3174 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3175 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3176 }
13eb76e0
FB
3177 }
3178 } else {
5fafdf24 3179 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3180 !(pd & IO_MEM_ROMD)) {
6c2934db 3181 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3182 /* I/O case */
3183 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3184 if (p)
6c2934db
AJ
3185 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3186 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3187 /* 32 bit read access */
6c2934db 3188 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3189 stl_p(buf, val);
13eb76e0 3190 l = 4;
6c2934db 3191 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3192 /* 16 bit read access */
6c2934db 3193 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3194 stw_p(buf, val);
13eb76e0
FB
3195 l = 2;
3196 } else {
1c213d19 3197 /* 8 bit read access */
6c2934db 3198 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3199 stb_p(buf, val);
13eb76e0
FB
3200 l = 1;
3201 }
3202 } else {
3203 /* RAM case */
5579c7f3 3204 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3205 (addr & ~TARGET_PAGE_MASK);
3206 memcpy(buf, ptr, l);
3207 }
3208 }
3209 len -= l;
3210 buf += l;
3211 addr += l;
3212 }
3213}
8df1cd07 3214
d0ecd2aa 3215/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3216void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3217 const uint8_t *buf, int len)
3218{
3219 int l;
3220 uint8_t *ptr;
3221 target_phys_addr_t page;
3222 unsigned long pd;
3223 PhysPageDesc *p;
3b46e624 3224
d0ecd2aa
FB
3225 while (len > 0) {
3226 page = addr & TARGET_PAGE_MASK;
3227 l = (page + TARGET_PAGE_SIZE) - addr;
3228 if (l > len)
3229 l = len;
3230 p = phys_page_find(page >> TARGET_PAGE_BITS);
3231 if (!p) {
3232 pd = IO_MEM_UNASSIGNED;
3233 } else {
3234 pd = p->phys_offset;
3235 }
3b46e624 3236
d0ecd2aa 3237 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3238 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3239 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3240 /* do nothing */
3241 } else {
3242 unsigned long addr1;
3243 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3244 /* ROM/RAM case */
5579c7f3 3245 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3246 memcpy(ptr, buf, l);
3247 }
3248 len -= l;
3249 buf += l;
3250 addr += l;
3251 }
3252}
3253
6d16c2f8
AL
3254typedef struct {
3255 void *buffer;
3256 target_phys_addr_t addr;
3257 target_phys_addr_t len;
3258} BounceBuffer;
3259
3260static BounceBuffer bounce;
3261
ba223c29
AL
3262typedef struct MapClient {
3263 void *opaque;
3264 void (*callback)(void *opaque);
3265 LIST_ENTRY(MapClient) link;
3266} MapClient;
3267
3268static LIST_HEAD(map_client_list, MapClient) map_client_list
3269 = LIST_HEAD_INITIALIZER(map_client_list);
3270
3271void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3272{
3273 MapClient *client = qemu_malloc(sizeof(*client));
3274
3275 client->opaque = opaque;
3276 client->callback = callback;
3277 LIST_INSERT_HEAD(&map_client_list, client, link);
3278 return client;
3279}
3280
3281void cpu_unregister_map_client(void *_client)
3282{
3283 MapClient *client = (MapClient *)_client;
3284
3285 LIST_REMOVE(client, link);
34d5e948 3286 qemu_free(client);
ba223c29
AL
3287}
3288
3289static void cpu_notify_map_clients(void)
3290{
3291 MapClient *client;
3292
3293 while (!LIST_EMPTY(&map_client_list)) {
3294 client = LIST_FIRST(&map_client_list);
3295 client->callback(client->opaque);
34d5e948 3296 cpu_unregister_map_client(client);
ba223c29
AL
3297 }
3298}
3299
6d16c2f8
AL
3300/* Map a physical memory region into a host virtual address.
3301 * May map a subset of the requested range, given by and returned in *plen.
3302 * May return NULL if resources needed to perform the mapping are exhausted.
3303 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3304 * Use cpu_register_map_client() to know when retrying the map operation is
3305 * likely to succeed.
6d16c2f8
AL
3306 */
3307void *cpu_physical_memory_map(target_phys_addr_t addr,
3308 target_phys_addr_t *plen,
3309 int is_write)
3310{
3311 target_phys_addr_t len = *plen;
3312 target_phys_addr_t done = 0;
3313 int l;
3314 uint8_t *ret = NULL;
3315 uint8_t *ptr;
3316 target_phys_addr_t page;
3317 unsigned long pd;
3318 PhysPageDesc *p;
3319 unsigned long addr1;
3320
3321 while (len > 0) {
3322 page = addr & TARGET_PAGE_MASK;
3323 l = (page + TARGET_PAGE_SIZE) - addr;
3324 if (l > len)
3325 l = len;
3326 p = phys_page_find(page >> TARGET_PAGE_BITS);
3327 if (!p) {
3328 pd = IO_MEM_UNASSIGNED;
3329 } else {
3330 pd = p->phys_offset;
3331 }
3332
3333 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3334 if (done || bounce.buffer) {
3335 break;
3336 }
3337 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3338 bounce.addr = addr;
3339 bounce.len = l;
3340 if (!is_write) {
3341 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3342 }
3343 ptr = bounce.buffer;
3344 } else {
3345 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3346 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3347 }
3348 if (!done) {
3349 ret = ptr;
3350 } else if (ret + done != ptr) {
3351 break;
3352 }
3353
3354 len -= l;
3355 addr += l;
3356 done += l;
3357 }
3358 *plen = done;
3359 return ret;
3360}
3361
3362/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3363 * Will also mark the memory as dirty if is_write == 1. access_len gives
3364 * the amount of memory that was actually read or written by the caller.
3365 */
3366void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3367 int is_write, target_phys_addr_t access_len)
3368{
3369 if (buffer != bounce.buffer) {
3370 if (is_write) {
5579c7f3 3371 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
6d16c2f8
AL
3372 while (access_len) {
3373 unsigned l;
3374 l = TARGET_PAGE_SIZE;
3375 if (l > access_len)
3376 l = access_len;
3377 if (!cpu_physical_memory_is_dirty(addr1)) {
3378 /* invalidate code */
3379 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3380 /* set dirty bit */
3381 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3382 (0xff & ~CODE_DIRTY_FLAG);
3383 }
3384 addr1 += l;
3385 access_len -= l;
3386 }
3387 }
3388 return;
3389 }
3390 if (is_write) {
3391 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3392 }
3393 qemu_free(bounce.buffer);
3394 bounce.buffer = NULL;
ba223c29 3395 cpu_notify_map_clients();
6d16c2f8 3396}
d0ecd2aa 3397
8df1cd07
FB
3398/* warning: addr must be aligned */
3399uint32_t ldl_phys(target_phys_addr_t addr)
3400{
3401 int io_index;
3402 uint8_t *ptr;
3403 uint32_t val;
3404 unsigned long pd;
3405 PhysPageDesc *p;
3406
3407 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3408 if (!p) {
3409 pd = IO_MEM_UNASSIGNED;
3410 } else {
3411 pd = p->phys_offset;
3412 }
3b46e624 3413
5fafdf24 3414 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3415 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3416 /* I/O case */
3417 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3418 if (p)
3419 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3420 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3421 } else {
3422 /* RAM case */
5579c7f3 3423 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3424 (addr & ~TARGET_PAGE_MASK);
3425 val = ldl_p(ptr);
3426 }
3427 return val;
3428}
3429
84b7b8e7
FB
3430/* warning: addr must be aligned */
3431uint64_t ldq_phys(target_phys_addr_t addr)
3432{
3433 int io_index;
3434 uint8_t *ptr;
3435 uint64_t val;
3436 unsigned long pd;
3437 PhysPageDesc *p;
3438
3439 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3440 if (!p) {
3441 pd = IO_MEM_UNASSIGNED;
3442 } else {
3443 pd = p->phys_offset;
3444 }
3b46e624 3445
2a4188a3
FB
3446 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3447 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3448 /* I/O case */
3449 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3450 if (p)
3451 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3452#ifdef TARGET_WORDS_BIGENDIAN
3453 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3454 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3455#else
3456 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3457 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3458#endif
3459 } else {
3460 /* RAM case */
5579c7f3 3461 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3462 (addr & ~TARGET_PAGE_MASK);
3463 val = ldq_p(ptr);
3464 }
3465 return val;
3466}
3467
aab33094
FB
3468/* XXX: optimize */
3469uint32_t ldub_phys(target_phys_addr_t addr)
3470{
3471 uint8_t val;
3472 cpu_physical_memory_read(addr, &val, 1);
3473 return val;
3474}
3475
3476/* XXX: optimize */
3477uint32_t lduw_phys(target_phys_addr_t addr)
3478{
3479 uint16_t val;
3480 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3481 return tswap16(val);
3482}
3483
8df1cd07
FB
3484/* warning: addr must be aligned. The ram page is not masked as dirty
3485 and the code inside is not invalidated. It is useful if the dirty
3486 bits are used to track modified PTEs */
3487void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3488{
3489 int io_index;
3490 uint8_t *ptr;
3491 unsigned long pd;
3492 PhysPageDesc *p;
3493
3494 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3495 if (!p) {
3496 pd = IO_MEM_UNASSIGNED;
3497 } else {
3498 pd = p->phys_offset;
3499 }
3b46e624 3500
3a7d929e 3501 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3502 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3503 if (p)
3504 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3506 } else {
74576198 3507 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3508 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3509 stl_p(ptr, val);
74576198
AL
3510
3511 if (unlikely(in_migration)) {
3512 if (!cpu_physical_memory_is_dirty(addr1)) {
3513 /* invalidate code */
3514 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3515 /* set dirty bit */
3516 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3517 (0xff & ~CODE_DIRTY_FLAG);
3518 }
3519 }
8df1cd07
FB
3520 }
3521}
3522
bc98a7ef
JM
3523void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3524{
3525 int io_index;
3526 uint8_t *ptr;
3527 unsigned long pd;
3528 PhysPageDesc *p;
3529
3530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3531 if (!p) {
3532 pd = IO_MEM_UNASSIGNED;
3533 } else {
3534 pd = p->phys_offset;
3535 }
3b46e624 3536
bc98a7ef
JM
3537 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3538 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3539 if (p)
3540 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3541#ifdef TARGET_WORDS_BIGENDIAN
3542 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3543 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3544#else
3545 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3546 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3547#endif
3548 } else {
5579c7f3 3549 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3550 (addr & ~TARGET_PAGE_MASK);
3551 stq_p(ptr, val);
3552 }
3553}
3554
8df1cd07 3555/* warning: addr must be aligned */
8df1cd07
FB
3556void stl_phys(target_phys_addr_t addr, uint32_t val)
3557{
3558 int io_index;
3559 uint8_t *ptr;
3560 unsigned long pd;
3561 PhysPageDesc *p;
3562
3563 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3564 if (!p) {
3565 pd = IO_MEM_UNASSIGNED;
3566 } else {
3567 pd = p->phys_offset;
3568 }
3b46e624 3569
3a7d929e 3570 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3571 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3572 if (p)
3573 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3574 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3575 } else {
3576 unsigned long addr1;
3577 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3578 /* RAM case */
5579c7f3 3579 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3580 stl_p(ptr, val);
3a7d929e
FB
3581 if (!cpu_physical_memory_is_dirty(addr1)) {
3582 /* invalidate code */
3583 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3584 /* set dirty bit */
f23db169
FB
3585 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3586 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3587 }
8df1cd07
FB
3588 }
3589}
3590
aab33094
FB
3591/* XXX: optimize */
3592void stb_phys(target_phys_addr_t addr, uint32_t val)
3593{
3594 uint8_t v = val;
3595 cpu_physical_memory_write(addr, &v, 1);
3596}
3597
3598/* XXX: optimize */
3599void stw_phys(target_phys_addr_t addr, uint32_t val)
3600{
3601 uint16_t v = tswap16(val);
3602 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3603}
3604
3605/* XXX: optimize */
3606void stq_phys(target_phys_addr_t addr, uint64_t val)
3607{
3608 val = tswap64(val);
3609 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3610}
3611
13eb76e0
FB
3612#endif
3613
5e2972fd 3614/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3615int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3616 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3617{
3618 int l;
9b3c35e0
JM
3619 target_phys_addr_t phys_addr;
3620 target_ulong page;
13eb76e0
FB
3621
3622 while (len > 0) {
3623 page = addr & TARGET_PAGE_MASK;
3624 phys_addr = cpu_get_phys_page_debug(env, page);
3625 /* if no physical page mapped, return an error */
3626 if (phys_addr == -1)
3627 return -1;
3628 l = (page + TARGET_PAGE_SIZE) - addr;
3629 if (l > len)
3630 l = len;
5e2972fd
AL
3631 phys_addr += (addr & ~TARGET_PAGE_MASK);
3632#if !defined(CONFIG_USER_ONLY)
3633 if (is_write)
3634 cpu_physical_memory_write_rom(phys_addr, buf, l);
3635 else
3636#endif
3637 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3638 len -= l;
3639 buf += l;
3640 addr += l;
3641 }
3642 return 0;
3643}
3644
2e70f6ef
PB
3645/* in deterministic execution mode, instructions doing device I/Os
3646 must be at the end of the TB */
3647void cpu_io_recompile(CPUState *env, void *retaddr)
3648{
3649 TranslationBlock *tb;
3650 uint32_t n, cflags;
3651 target_ulong pc, cs_base;
3652 uint64_t flags;
3653
3654 tb = tb_find_pc((unsigned long)retaddr);
3655 if (!tb) {
3656 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3657 retaddr);
3658 }
3659 n = env->icount_decr.u16.low + tb->icount;
3660 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3661 /* Calculate how many instructions had been executed before the fault
bf20dc07 3662 occurred. */
2e70f6ef
PB
3663 n = n - env->icount_decr.u16.low;
3664 /* Generate a new TB ending on the I/O insn. */
3665 n++;
3666 /* On MIPS and SH, delay slot instructions can only be restarted if
3667 they were already the first instruction in the TB. If this is not
bf20dc07 3668 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3669 branch. */
3670#if defined(TARGET_MIPS)
3671 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3672 env->active_tc.PC -= 4;
3673 env->icount_decr.u16.low++;
3674 env->hflags &= ~MIPS_HFLAG_BMASK;
3675 }
3676#elif defined(TARGET_SH4)
3677 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3678 && n > 1) {
3679 env->pc -= 2;
3680 env->icount_decr.u16.low++;
3681 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3682 }
3683#endif
3684 /* This should never happen. */
3685 if (n > CF_COUNT_MASK)
3686 cpu_abort(env, "TB too big during recompile");
3687
3688 cflags = n | CF_LAST_IO;
3689 pc = tb->pc;
3690 cs_base = tb->cs_base;
3691 flags = tb->flags;
3692 tb_phys_invalidate(tb, -1);
3693 /* FIXME: In theory this could raise an exception. In practice
3694 we have already translated the block once so it's probably ok. */
3695 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3696 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3697 the first in the TB) then we end up generating a whole new TB and
3698 repeating the fault, which is horribly inefficient.
3699 Better would be to execute just this insn uncached, or generate a
3700 second new TB. */
3701 cpu_resume_from_signal(env, NULL);
3702}
3703
e3db7226
FB
3704void dump_exec_info(FILE *f,
3705 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3706{
3707 int i, target_code_size, max_target_code_size;
3708 int direct_jmp_count, direct_jmp2_count, cross_page;
3709 TranslationBlock *tb;
3b46e624 3710
e3db7226
FB
3711 target_code_size = 0;
3712 max_target_code_size = 0;
3713 cross_page = 0;
3714 direct_jmp_count = 0;
3715 direct_jmp2_count = 0;
3716 for(i = 0; i < nb_tbs; i++) {
3717 tb = &tbs[i];
3718 target_code_size += tb->size;
3719 if (tb->size > max_target_code_size)
3720 max_target_code_size = tb->size;
3721 if (tb->page_addr[1] != -1)
3722 cross_page++;
3723 if (tb->tb_next_offset[0] != 0xffff) {
3724 direct_jmp_count++;
3725 if (tb->tb_next_offset[1] != 0xffff) {
3726 direct_jmp2_count++;
3727 }
3728 }
3729 }
3730 /* XXX: avoid using doubles ? */
57fec1fe 3731 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3732 cpu_fprintf(f, "gen code size %ld/%ld\n",
3733 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3734 cpu_fprintf(f, "TB count %d/%d\n",
3735 nb_tbs, code_gen_max_blocks);
5fafdf24 3736 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3737 nb_tbs ? target_code_size / nb_tbs : 0,
3738 max_target_code_size);
5fafdf24 3739 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3740 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3741 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3742 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3743 cross_page,
e3db7226
FB
3744 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3745 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3746 direct_jmp_count,
e3db7226
FB
3747 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3748 direct_jmp2_count,
3749 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3750 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3751 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3752 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3753 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3754 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3755}
3756
5fafdf24 3757#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3758
3759#define MMUSUFFIX _cmmu
3760#define GETPC() NULL
3761#define env cpu_single_env
b769d8fe 3762#define SOFTMMU_CODE_ACCESS
61382a50
FB
3763
3764#define SHIFT 0
3765#include "softmmu_template.h"
3766
3767#define SHIFT 1
3768#include "softmmu_template.h"
3769
3770#define SHIFT 2
3771#include "softmmu_template.h"
3772
3773#define SHIFT 3
3774#include "softmmu_template.h"
3775
3776#undef env
3777
3778#endif