]> git.proxmox.com Git - qemu.git/blame - exec.c
Set limits for memory size to avoid overlap with devices
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37
TS
50//#define DEBUG_IOPORT
51
99773bd4
PB
52#if !defined(CONFIG_USER_ONLY)
53/* TB consistency checks only implemented for usermode emulation. */
54#undef DEBUG_TB_CHECK
55#endif
56
fd6ce8f6
FB
57/* threshold to flush the translated code buffer */
58#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59
9fa3e853
FB
60#define SMC_BITMAP_USE_THRESHOLD 10
61
62#define MMAP_AREA_START 0x00000000
63#define MMAP_AREA_END 0xa8000000
fd6ce8f6 64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
bedb69ea
JM
67#elif defined(TARGET_ALPHA)
68#define TARGET_PHYS_ADDR_SPACE_BITS 42
69#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
70#elif defined(TARGET_PPC64)
71#define TARGET_PHYS_ADDR_SPACE_BITS 42
72#else
73/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74#define TARGET_PHYS_ADDR_SPACE_BITS 32
75#endif
76
fd6ce8f6 77TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 78TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 79int nb_tbs;
eb51d102
FB
80/* any access to the tbs or the page table must use this lock */
81spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 82
b8076a74 83uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
84uint8_t *code_gen_ptr;
85
9fa3e853
FB
86int phys_ram_size;
87int phys_ram_fd;
88uint8_t *phys_ram_base;
1ccde1cb 89uint8_t *phys_ram_dirty;
e9a1ab19 90static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 91
6a00d601
FB
92CPUState *first_cpu;
93/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
95CPUState *cpu_single_env;
96
54936004 97typedef struct PageDesc {
92e873b9 98 /* list of TBs intersecting this ram page */
fd6ce8f6 99 TranslationBlock *first_tb;
9fa3e853
FB
100 /* in order to optimize self modifying code, we count the number
101 of lookups we do to a given page to use a bitmap */
102 unsigned int code_write_count;
103 uint8_t *code_bitmap;
104#if defined(CONFIG_USER_ONLY)
105 unsigned long flags;
106#endif
54936004
FB
107} PageDesc;
108
92e873b9
FB
109typedef struct PhysPageDesc {
110 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 111 uint32_t phys_offset;
92e873b9
FB
112} PhysPageDesc;
113
54936004 114#define L2_BITS 10
bedb69ea
JM
115#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116/* XXX: this is a temporary hack for alpha target.
117 * In the future, this is to be replaced by a multi-level table
118 * to actually be able to handle the complete 64 bits address space.
119 */
120#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
121#else
54936004 122#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 123#endif
54936004
FB
124
125#define L1_SIZE (1 << L1_BITS)
126#define L2_SIZE (1 << L2_BITS)
127
33417e70 128static void io_mem_init(void);
fd6ce8f6 129
83fb7adf
FB
130unsigned long qemu_real_host_page_size;
131unsigned long qemu_host_page_bits;
132unsigned long qemu_host_page_size;
133unsigned long qemu_host_page_mask;
54936004 134
92e873b9 135/* XXX: for system emulation, it could just be an array */
54936004 136static PageDesc *l1_map[L1_SIZE];
0a962c02 137PhysPageDesc **l1_phys_map;
54936004 138
33417e70 139/* io memory support */
33417e70
FB
140CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
141CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 142void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 143static int io_mem_nb;
6658ffb8
PB
144#if defined(CONFIG_SOFTMMU)
145static int io_mem_watch;
146#endif
33417e70 147
34865134
FB
148/* log support */
149char *logfilename = "/tmp/qemu.log";
150FILE *logfile;
151int loglevel;
152
e3db7226
FB
153/* statistics */
154static int tlb_flush_count;
155static int tb_flush_count;
156static int tb_phys_invalidate_count;
157
b346ff46 158static void page_init(void)
54936004 159{
83fb7adf 160 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 161 TARGET_PAGE_SIZE */
67b915a5 162#ifdef _WIN32
d5a8f07c
FB
163 {
164 SYSTEM_INFO system_info;
165 DWORD old_protect;
166
167 GetSystemInfo(&system_info);
168 qemu_real_host_page_size = system_info.dwPageSize;
169
170 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171 PAGE_EXECUTE_READWRITE, &old_protect);
172 }
67b915a5 173#else
83fb7adf 174 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
175 {
176 unsigned long start, end;
177
178 start = (unsigned long)code_gen_buffer;
179 start &= ~(qemu_real_host_page_size - 1);
180
181 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182 end += qemu_real_host_page_size - 1;
183 end &= ~(qemu_real_host_page_size - 1);
184
185 mprotect((void *)start, end - start,
186 PROT_READ | PROT_WRITE | PROT_EXEC);
187 }
67b915a5 188#endif
d5a8f07c 189
83fb7adf
FB
190 if (qemu_host_page_size == 0)
191 qemu_host_page_size = qemu_real_host_page_size;
192 if (qemu_host_page_size < TARGET_PAGE_SIZE)
193 qemu_host_page_size = TARGET_PAGE_SIZE;
194 qemu_host_page_bits = 0;
195 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
196 qemu_host_page_bits++;
197 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
198 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
200}
201
fd6ce8f6 202static inline PageDesc *page_find_alloc(unsigned int index)
54936004 203{
54936004
FB
204 PageDesc **lp, *p;
205
54936004
FB
206 lp = &l1_map[index >> L2_BITS];
207 p = *lp;
208 if (!p) {
209 /* allocate if not found */
59817ccb 210 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 211 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
212 *lp = p;
213 }
214 return p + (index & (L2_SIZE - 1));
215}
216
fd6ce8f6 217static inline PageDesc *page_find(unsigned int index)
54936004 218{
54936004
FB
219 PageDesc *p;
220
54936004
FB
221 p = l1_map[index >> L2_BITS];
222 if (!p)
223 return 0;
fd6ce8f6
FB
224 return p + (index & (L2_SIZE - 1));
225}
226
108c49b8 227static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 228{
108c49b8 229 void **lp, **p;
e3f4e2a4 230 PhysPageDesc *pd;
92e873b9 231
108c49b8
FB
232 p = (void **)l1_phys_map;
233#if TARGET_PHYS_ADDR_SPACE_BITS > 32
234
235#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
237#endif
238 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
239 p = *lp;
240 if (!p) {
241 /* allocate if not found */
108c49b8
FB
242 if (!alloc)
243 return NULL;
244 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245 memset(p, 0, sizeof(void *) * L1_SIZE);
246 *lp = p;
247 }
248#endif
249 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
250 pd = *lp;
251 if (!pd) {
252 int i;
108c49b8
FB
253 /* allocate if not found */
254 if (!alloc)
255 return NULL;
e3f4e2a4
PB
256 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
257 *lp = pd;
258 for (i = 0; i < L2_SIZE; i++)
259 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 260 }
e3f4e2a4 261 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
262}
263
108c49b8 264static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 265{
108c49b8 266 return phys_page_find_alloc(index, 0);
92e873b9
FB
267}
268
9fa3e853 269#if !defined(CONFIG_USER_ONLY)
6a00d601 270static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
271static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
272 target_ulong vaddr);
9fa3e853 273#endif
fd6ce8f6 274
6a00d601 275void cpu_exec_init(CPUState *env)
fd6ce8f6 276{
6a00d601
FB
277 CPUState **penv;
278 int cpu_index;
279
fd6ce8f6
FB
280 if (!code_gen_ptr) {
281 code_gen_ptr = code_gen_buffer;
b346ff46 282 page_init();
33417e70 283 io_mem_init();
fd6ce8f6 284 }
6a00d601
FB
285 env->next_cpu = NULL;
286 penv = &first_cpu;
287 cpu_index = 0;
288 while (*penv != NULL) {
289 penv = (CPUState **)&(*penv)->next_cpu;
290 cpu_index++;
291 }
292 env->cpu_index = cpu_index;
6658ffb8 293 env->nb_watchpoints = 0;
6a00d601 294 *penv = env;
fd6ce8f6
FB
295}
296
9fa3e853
FB
297static inline void invalidate_page_bitmap(PageDesc *p)
298{
299 if (p->code_bitmap) {
59817ccb 300 qemu_free(p->code_bitmap);
9fa3e853
FB
301 p->code_bitmap = NULL;
302 }
303 p->code_write_count = 0;
304}
305
fd6ce8f6
FB
306/* set to NULL all the 'first_tb' fields in all PageDescs */
307static void page_flush_tb(void)
308{
309 int i, j;
310 PageDesc *p;
311
312 for(i = 0; i < L1_SIZE; i++) {
313 p = l1_map[i];
314 if (p) {
9fa3e853
FB
315 for(j = 0; j < L2_SIZE; j++) {
316 p->first_tb = NULL;
317 invalidate_page_bitmap(p);
318 p++;
319 }
fd6ce8f6
FB
320 }
321 }
322}
323
324/* flush all the translation blocks */
d4e8164f 325/* XXX: tb_flush is currently not thread safe */
6a00d601 326void tb_flush(CPUState *env1)
fd6ce8f6 327{
6a00d601 328 CPUState *env;
0124311e 329#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
330 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
331 code_gen_ptr - code_gen_buffer,
332 nb_tbs,
0124311e 333 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
334#endif
335 nb_tbs = 0;
6a00d601
FB
336
337 for(env = first_cpu; env != NULL; env = env->next_cpu) {
338 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
339 }
9fa3e853 340
8a8a608f 341 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 342 page_flush_tb();
9fa3e853 343
fd6ce8f6 344 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
345 /* XXX: flush processor icache at this point if cache flush is
346 expensive */
e3db7226 347 tb_flush_count++;
fd6ce8f6
FB
348}
349
350#ifdef DEBUG_TB_CHECK
351
bc98a7ef 352static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
353{
354 TranslationBlock *tb;
355 int i;
356 address &= TARGET_PAGE_MASK;
99773bd4
PB
357 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
359 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360 address >= tb->pc + tb->size)) {
361 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 362 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
363 }
364 }
365 }
366}
367
368/* verify that all the pages have correct rights for code */
369static void tb_page_check(void)
370{
371 TranslationBlock *tb;
372 int i, flags1, flags2;
373
99773bd4
PB
374 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
375 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
376 flags1 = page_get_flags(tb->pc);
377 flags2 = page_get_flags(tb->pc + tb->size - 1);
378 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 380 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
381 }
382 }
383 }
384}
385
d4e8164f
FB
386void tb_jmp_check(TranslationBlock *tb)
387{
388 TranslationBlock *tb1;
389 unsigned int n1;
390
391 /* suppress any remaining jumps to this TB */
392 tb1 = tb->jmp_first;
393 for(;;) {
394 n1 = (long)tb1 & 3;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
396 if (n1 == 2)
397 break;
398 tb1 = tb1->jmp_next[n1];
399 }
400 /* check end of list */
401 if (tb1 != tb) {
402 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
403 }
404}
405
fd6ce8f6
FB
406#endif
407
408/* invalidate one TB */
409static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
410 int next_offset)
411{
412 TranslationBlock *tb1;
413 for(;;) {
414 tb1 = *ptb;
415 if (tb1 == tb) {
416 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
417 break;
418 }
419 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
420 }
421}
422
9fa3e853
FB
423static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
424{
425 TranslationBlock *tb1;
426 unsigned int n1;
427
428 for(;;) {
429 tb1 = *ptb;
430 n1 = (long)tb1 & 3;
431 tb1 = (TranslationBlock *)((long)tb1 & ~3);
432 if (tb1 == tb) {
433 *ptb = tb1->page_next[n1];
434 break;
435 }
436 ptb = &tb1->page_next[n1];
437 }
438}
439
d4e8164f
FB
440static inline void tb_jmp_remove(TranslationBlock *tb, int n)
441{
442 TranslationBlock *tb1, **ptb;
443 unsigned int n1;
444
445 ptb = &tb->jmp_next[n];
446 tb1 = *ptb;
447 if (tb1) {
448 /* find tb(n) in circular list */
449 for(;;) {
450 tb1 = *ptb;
451 n1 = (long)tb1 & 3;
452 tb1 = (TranslationBlock *)((long)tb1 & ~3);
453 if (n1 == n && tb1 == tb)
454 break;
455 if (n1 == 2) {
456 ptb = &tb1->jmp_first;
457 } else {
458 ptb = &tb1->jmp_next[n1];
459 }
460 }
461 /* now we can suppress tb(n) from the list */
462 *ptb = tb->jmp_next[n];
463
464 tb->jmp_next[n] = NULL;
465 }
466}
467
468/* reset the jump entry 'n' of a TB so that it is not chained to
469 another TB */
470static inline void tb_reset_jump(TranslationBlock *tb, int n)
471{
472 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
473}
474
8a40a180 475static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 476{
6a00d601 477 CPUState *env;
8a40a180 478 PageDesc *p;
d4e8164f 479 unsigned int h, n1;
8a40a180
FB
480 target_ulong phys_pc;
481 TranslationBlock *tb1, *tb2;
d4e8164f 482
8a40a180
FB
483 /* remove the TB from the hash list */
484 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
485 h = tb_phys_hash_func(phys_pc);
486 tb_remove(&tb_phys_hash[h], tb,
487 offsetof(TranslationBlock, phys_hash_next));
488
489 /* remove the TB from the page list */
490 if (tb->page_addr[0] != page_addr) {
491 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
492 tb_page_remove(&p->first_tb, tb);
493 invalidate_page_bitmap(p);
494 }
495 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
496 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
497 tb_page_remove(&p->first_tb, tb);
498 invalidate_page_bitmap(p);
499 }
500
36bdbe54 501 tb_invalidated_flag = 1;
59817ccb 502
fd6ce8f6 503 /* remove the TB from the hash list */
8a40a180 504 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
505 for(env = first_cpu; env != NULL; env = env->next_cpu) {
506 if (env->tb_jmp_cache[h] == tb)
507 env->tb_jmp_cache[h] = NULL;
508 }
d4e8164f
FB
509
510 /* suppress this TB from the two jump lists */
511 tb_jmp_remove(tb, 0);
512 tb_jmp_remove(tb, 1);
513
514 /* suppress any remaining jumps to this TB */
515 tb1 = tb->jmp_first;
516 for(;;) {
517 n1 = (long)tb1 & 3;
518 if (n1 == 2)
519 break;
520 tb1 = (TranslationBlock *)((long)tb1 & ~3);
521 tb2 = tb1->jmp_next[n1];
522 tb_reset_jump(tb1, n1);
523 tb1->jmp_next[n1] = NULL;
524 tb1 = tb2;
525 }
526 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 527
e3db7226 528 tb_phys_invalidate_count++;
9fa3e853
FB
529}
530
531static inline void set_bits(uint8_t *tab, int start, int len)
532{
533 int end, mask, end1;
534
535 end = start + len;
536 tab += start >> 3;
537 mask = 0xff << (start & 7);
538 if ((start & ~7) == (end & ~7)) {
539 if (start < end) {
540 mask &= ~(0xff << (end & 7));
541 *tab |= mask;
542 }
543 } else {
544 *tab++ |= mask;
545 start = (start + 8) & ~7;
546 end1 = end & ~7;
547 while (start < end1) {
548 *tab++ = 0xff;
549 start += 8;
550 }
551 if (start < end) {
552 mask = ~(0xff << (end & 7));
553 *tab |= mask;
554 }
555 }
556}
557
558static void build_page_bitmap(PageDesc *p)
559{
560 int n, tb_start, tb_end;
561 TranslationBlock *tb;
562
59817ccb 563 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
564 if (!p->code_bitmap)
565 return;
566 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
567
568 tb = p->first_tb;
569 while (tb != NULL) {
570 n = (long)tb & 3;
571 tb = (TranslationBlock *)((long)tb & ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
573 if (n == 0) {
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start = tb->pc & ~TARGET_PAGE_MASK;
577 tb_end = tb_start + tb->size;
578 if (tb_end > TARGET_PAGE_SIZE)
579 tb_end = TARGET_PAGE_SIZE;
580 } else {
581 tb_start = 0;
582 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583 }
584 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585 tb = tb->page_next[n];
586 }
587}
588
d720b93d
FB
589#ifdef TARGET_HAS_PRECISE_SMC
590
591static void tb_gen_code(CPUState *env,
592 target_ulong pc, target_ulong cs_base, int flags,
593 int cflags)
594{
595 TranslationBlock *tb;
596 uint8_t *tc_ptr;
597 target_ulong phys_pc, phys_page2, virt_page2;
598 int code_gen_size;
599
c27004ec
FB
600 phys_pc = get_phys_addr_code(env, pc);
601 tb = tb_alloc(pc);
d720b93d
FB
602 if (!tb) {
603 /* flush must be done */
604 tb_flush(env);
605 /* cannot fail at this point */
c27004ec 606 tb = tb_alloc(pc);
d720b93d
FB
607 }
608 tc_ptr = code_gen_ptr;
609 tb->tc_ptr = tc_ptr;
610 tb->cs_base = cs_base;
611 tb->flags = flags;
612 tb->cflags = cflags;
613 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615
616 /* check next page if needed */
c27004ec 617 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 618 phys_page2 = -1;
c27004ec 619 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
620 phys_page2 = get_phys_addr_code(env, virt_page2);
621 }
622 tb_link_phys(tb, phys_pc, phys_page2);
623}
624#endif
625
9fa3e853
FB
626/* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632 int is_cpu_write_access)
633{
634 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 635 CPUState *env = cpu_single_env;
9fa3e853 636 PageDesc *p;
ea1c1802 637 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 638 target_ulong tb_start, tb_end;
d720b93d 639 target_ulong current_pc, current_cs_base;
9fa3e853
FB
640
641 p = page_find(start >> TARGET_PAGE_BITS);
642 if (!p)
643 return;
644 if (!p->code_bitmap &&
d720b93d
FB
645 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646 is_cpu_write_access) {
9fa3e853
FB
647 /* build code bitmap */
648 build_page_bitmap(p);
649 }
650
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
653 current_tb_not_found = is_cpu_write_access;
654 current_tb_modified = 0;
655 current_tb = NULL; /* avoid warning */
656 current_pc = 0; /* avoid warning */
657 current_cs_base = 0; /* avoid warning */
658 current_flags = 0; /* avoid warning */
9fa3e853
FB
659 tb = p->first_tb;
660 while (tb != NULL) {
661 n = (long)tb & 3;
662 tb = (TranslationBlock *)((long)tb & ~3);
663 tb_next = tb->page_next[n];
664 /* NOTE: this is subtle as a TB may span two physical pages */
665 if (n == 0) {
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669 tb_end = tb_start + tb->size;
670 } else {
671 tb_start = tb->page_addr[1];
672 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673 }
674 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
675#ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found) {
677 current_tb_not_found = 0;
678 current_tb = NULL;
679 if (env->mem_write_pc) {
680 /* now we have a real cpu fault */
681 current_tb = tb_find_pc(env->mem_write_pc);
682 }
683 }
684 if (current_tb == tb &&
685 !(current_tb->cflags & CF_SINGLE_INSN)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
691
692 current_tb_modified = 1;
693 cpu_restore_state(current_tb, env,
694 env->mem_write_pc, NULL);
695#if defined(TARGET_I386)
696 current_flags = env->hflags;
697 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698 current_cs_base = (target_ulong)env->segs[R_CS].base;
699 current_pc = current_cs_base + env->eip;
700#else
701#error unsupported CPU
702#endif
703 }
704#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
705 /* we need to do that to handle the case where a signal
706 occurs while doing tb_phys_invalidate() */
707 saved_tb = NULL;
708 if (env) {
709 saved_tb = env->current_tb;
710 env->current_tb = NULL;
711 }
9fa3e853 712 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
713 if (env) {
714 env->current_tb = saved_tb;
715 if (env->interrupt_request && env->current_tb)
716 cpu_interrupt(env, env->interrupt_request);
717 }
9fa3e853
FB
718 }
719 tb = tb_next;
720 }
721#if !defined(CONFIG_USER_ONLY)
722 /* if no code remaining, no need to continue to use slow writes */
723 if (!p->first_tb) {
724 invalidate_page_bitmap(p);
d720b93d
FB
725 if (is_cpu_write_access) {
726 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
727 }
728 }
729#endif
730#ifdef TARGET_HAS_PRECISE_SMC
731 if (current_tb_modified) {
732 /* we generate a block containing just the instruction
733 modifying the memory. It will ensure that it cannot modify
734 itself */
ea1c1802 735 env->current_tb = NULL;
d720b93d
FB
736 tb_gen_code(env, current_pc, current_cs_base, current_flags,
737 CF_SINGLE_INSN);
738 cpu_resume_from_signal(env, NULL);
9fa3e853 739 }
fd6ce8f6 740#endif
9fa3e853 741}
fd6ce8f6 742
9fa3e853 743/* len must be <= 8 and start must be a multiple of len */
d720b93d 744static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
745{
746 PageDesc *p;
747 int offset, b;
59817ccb 748#if 0
a4193c8a
FB
749 if (1) {
750 if (loglevel) {
751 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
752 cpu_single_env->mem_write_vaddr, len,
753 cpu_single_env->eip,
754 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
755 }
59817ccb
FB
756 }
757#endif
9fa3e853
FB
758 p = page_find(start >> TARGET_PAGE_BITS);
759 if (!p)
760 return;
761 if (p->code_bitmap) {
762 offset = start & ~TARGET_PAGE_MASK;
763 b = p->code_bitmap[offset >> 3] >> (offset & 7);
764 if (b & ((1 << len) - 1))
765 goto do_invalidate;
766 } else {
767 do_invalidate:
d720b93d 768 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
769 }
770}
771
9fa3e853 772#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
773static void tb_invalidate_phys_page(target_ulong addr,
774 unsigned long pc, void *puc)
9fa3e853 775{
d720b93d
FB
776 int n, current_flags, current_tb_modified;
777 target_ulong current_pc, current_cs_base;
9fa3e853 778 PageDesc *p;
d720b93d
FB
779 TranslationBlock *tb, *current_tb;
780#ifdef TARGET_HAS_PRECISE_SMC
781 CPUState *env = cpu_single_env;
782#endif
9fa3e853
FB
783
784 addr &= TARGET_PAGE_MASK;
785 p = page_find(addr >> TARGET_PAGE_BITS);
786 if (!p)
787 return;
788 tb = p->first_tb;
d720b93d
FB
789 current_tb_modified = 0;
790 current_tb = NULL;
791 current_pc = 0; /* avoid warning */
792 current_cs_base = 0; /* avoid warning */
793 current_flags = 0; /* avoid warning */
794#ifdef TARGET_HAS_PRECISE_SMC
795 if (tb && pc != 0) {
796 current_tb = tb_find_pc(pc);
797 }
798#endif
9fa3e853
FB
799 while (tb != NULL) {
800 n = (long)tb & 3;
801 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
802#ifdef TARGET_HAS_PRECISE_SMC
803 if (current_tb == tb &&
804 !(current_tb->cflags & CF_SINGLE_INSN)) {
805 /* If we are modifying the current TB, we must stop
806 its execution. We could be more precise by checking
807 that the modification is after the current PC, but it
808 would require a specialized function to partially
809 restore the CPU state */
810
811 current_tb_modified = 1;
812 cpu_restore_state(current_tb, env, pc, puc);
813#if defined(TARGET_I386)
814 current_flags = env->hflags;
815 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816 current_cs_base = (target_ulong)env->segs[R_CS].base;
817 current_pc = current_cs_base + env->eip;
818#else
819#error unsupported CPU
820#endif
821 }
822#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
823 tb_phys_invalidate(tb, addr);
824 tb = tb->page_next[n];
825 }
fd6ce8f6 826 p->first_tb = NULL;
d720b93d
FB
827#ifdef TARGET_HAS_PRECISE_SMC
828 if (current_tb_modified) {
829 /* we generate a block containing just the instruction
830 modifying the memory. It will ensure that it cannot modify
831 itself */
ea1c1802 832 env->current_tb = NULL;
d720b93d
FB
833 tb_gen_code(env, current_pc, current_cs_base, current_flags,
834 CF_SINGLE_INSN);
835 cpu_resume_from_signal(env, puc);
836 }
837#endif
fd6ce8f6 838}
9fa3e853 839#endif
fd6ce8f6
FB
840
841/* add the tb in the target page and protect it if necessary */
9fa3e853 842static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 843 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
844{
845 PageDesc *p;
9fa3e853
FB
846 TranslationBlock *last_first_tb;
847
848 tb->page_addr[n] = page_addr;
3a7d929e 849 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
850 tb->page_next[n] = p->first_tb;
851 last_first_tb = p->first_tb;
852 p->first_tb = (TranslationBlock *)((long)tb | n);
853 invalidate_page_bitmap(p);
fd6ce8f6 854
107db443 855#if defined(TARGET_HAS_SMC) || 1
d720b93d 856
9fa3e853 857#if defined(CONFIG_USER_ONLY)
fd6ce8f6 858 if (p->flags & PAGE_WRITE) {
53a5960a
PB
859 target_ulong addr;
860 PageDesc *p2;
9fa3e853
FB
861 int prot;
862
fd6ce8f6
FB
863 /* force the host page as non writable (writes will have a
864 page fault + mprotect overhead) */
53a5960a 865 page_addr &= qemu_host_page_mask;
fd6ce8f6 866 prot = 0;
53a5960a
PB
867 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
868 addr += TARGET_PAGE_SIZE) {
869
870 p2 = page_find (addr >> TARGET_PAGE_BITS);
871 if (!p2)
872 continue;
873 prot |= p2->flags;
874 p2->flags &= ~PAGE_WRITE;
875 page_get_flags(addr);
876 }
877 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
878 (prot & PAGE_BITS) & ~PAGE_WRITE);
879#ifdef DEBUG_TB_INVALIDATE
880 printf("protecting code page: 0x%08lx\n",
53a5960a 881 page_addr);
fd6ce8f6 882#endif
fd6ce8f6 883 }
9fa3e853
FB
884#else
885 /* if some code is already present, then the pages are already
886 protected. So we handle the case where only the first TB is
887 allocated in a physical page */
888 if (!last_first_tb) {
6a00d601 889 tlb_protect_code(page_addr);
9fa3e853
FB
890 }
891#endif
d720b93d
FB
892
893#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
894}
895
896/* Allocate a new translation block. Flush the translation buffer if
897 too many translation blocks or too much generated code. */
c27004ec 898TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
899{
900 TranslationBlock *tb;
fd6ce8f6
FB
901
902 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
903 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 904 return NULL;
fd6ce8f6
FB
905 tb = &tbs[nb_tbs++];
906 tb->pc = pc;
b448f2f3 907 tb->cflags = 0;
d4e8164f
FB
908 return tb;
909}
910
9fa3e853
FB
911/* add a new TB and link it to the physical page tables. phys_page2 is
912 (-1) to indicate that only one page contains the TB. */
913void tb_link_phys(TranslationBlock *tb,
914 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 915{
9fa3e853
FB
916 unsigned int h;
917 TranslationBlock **ptb;
918
919 /* add in the physical hash table */
920 h = tb_phys_hash_func(phys_pc);
921 ptb = &tb_phys_hash[h];
922 tb->phys_hash_next = *ptb;
923 *ptb = tb;
fd6ce8f6
FB
924
925 /* add in the page list */
9fa3e853
FB
926 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927 if (phys_page2 != -1)
928 tb_alloc_page(tb, 1, phys_page2);
929 else
930 tb->page_addr[1] = -1;
9fa3e853 931
d4e8164f
FB
932 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933 tb->jmp_next[0] = NULL;
934 tb->jmp_next[1] = NULL;
b448f2f3
FB
935#ifdef USE_CODE_COPY
936 tb->cflags &= ~CF_FP_USED;
937 if (tb->cflags & CF_TB_FP_USED)
938 tb->cflags |= CF_FP_USED;
939#endif
d4e8164f
FB
940
941 /* init original jump addresses */
942 if (tb->tb_next_offset[0] != 0xffff)
943 tb_reset_jump(tb, 0);
944 if (tb->tb_next_offset[1] != 0xffff)
945 tb_reset_jump(tb, 1);
8a40a180
FB
946
947#ifdef DEBUG_TB_CHECK
948 tb_page_check();
949#endif
fd6ce8f6
FB
950}
951
9fa3e853
FB
952/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953 tb[1].tc_ptr. Return NULL if not found */
954TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 955{
9fa3e853
FB
956 int m_min, m_max, m;
957 unsigned long v;
958 TranslationBlock *tb;
a513fe19
FB
959
960 if (nb_tbs <= 0)
961 return NULL;
962 if (tc_ptr < (unsigned long)code_gen_buffer ||
963 tc_ptr >= (unsigned long)code_gen_ptr)
964 return NULL;
965 /* binary search (cf Knuth) */
966 m_min = 0;
967 m_max = nb_tbs - 1;
968 while (m_min <= m_max) {
969 m = (m_min + m_max) >> 1;
970 tb = &tbs[m];
971 v = (unsigned long)tb->tc_ptr;
972 if (v == tc_ptr)
973 return tb;
974 else if (tc_ptr < v) {
975 m_max = m - 1;
976 } else {
977 m_min = m + 1;
978 }
979 }
980 return &tbs[m_max];
981}
7501267e 982
ea041c0e
FB
983static void tb_reset_jump_recursive(TranslationBlock *tb);
984
985static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
986{
987 TranslationBlock *tb1, *tb_next, **ptb;
988 unsigned int n1;
989
990 tb1 = tb->jmp_next[n];
991 if (tb1 != NULL) {
992 /* find head of list */
993 for(;;) {
994 n1 = (long)tb1 & 3;
995 tb1 = (TranslationBlock *)((long)tb1 & ~3);
996 if (n1 == 2)
997 break;
998 tb1 = tb1->jmp_next[n1];
999 }
1000 /* we are now sure now that tb jumps to tb1 */
1001 tb_next = tb1;
1002
1003 /* remove tb from the jmp_first list */
1004 ptb = &tb_next->jmp_first;
1005 for(;;) {
1006 tb1 = *ptb;
1007 n1 = (long)tb1 & 3;
1008 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009 if (n1 == n && tb1 == tb)
1010 break;
1011 ptb = &tb1->jmp_next[n1];
1012 }
1013 *ptb = tb->jmp_next[n];
1014 tb->jmp_next[n] = NULL;
1015
1016 /* suppress the jump to next tb in generated code */
1017 tb_reset_jump(tb, n);
1018
0124311e 1019 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1020 tb_reset_jump_recursive(tb_next);
1021 }
1022}
1023
1024static void tb_reset_jump_recursive(TranslationBlock *tb)
1025{
1026 tb_reset_jump_recursive2(tb, 0);
1027 tb_reset_jump_recursive2(tb, 1);
1028}
1029
1fddef4b 1030#if defined(TARGET_HAS_ICE)
d720b93d
FB
1031static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1032{
9b3c35e0
JM
1033 target_phys_addr_t addr;
1034 target_ulong pd;
c2f07f81
PB
1035 ram_addr_t ram_addr;
1036 PhysPageDesc *p;
d720b93d 1037
c2f07f81
PB
1038 addr = cpu_get_phys_page_debug(env, pc);
1039 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1040 if (!p) {
1041 pd = IO_MEM_UNASSIGNED;
1042 } else {
1043 pd = p->phys_offset;
1044 }
1045 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1046 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1047}
c27004ec 1048#endif
d720b93d 1049
6658ffb8
PB
1050/* Add a watchpoint. */
1051int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1052{
1053 int i;
1054
1055 for (i = 0; i < env->nb_watchpoints; i++) {
1056 if (addr == env->watchpoint[i].vaddr)
1057 return 0;
1058 }
1059 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1060 return -1;
1061
1062 i = env->nb_watchpoints++;
1063 env->watchpoint[i].vaddr = addr;
1064 tlb_flush_page(env, addr);
1065 /* FIXME: This flush is needed because of the hack to make memory ops
1066 terminate the TB. It can be removed once the proper IO trap and
1067 re-execute bits are in. */
1068 tb_flush(env);
1069 return i;
1070}
1071
1072/* Remove a watchpoint. */
1073int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1074{
1075 int i;
1076
1077 for (i = 0; i < env->nb_watchpoints; i++) {
1078 if (addr == env->watchpoint[i].vaddr) {
1079 env->nb_watchpoints--;
1080 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1081 tlb_flush_page(env, addr);
1082 return 0;
1083 }
1084 }
1085 return -1;
1086}
1087
c33a346e
FB
1088/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1089 breakpoint is reached */
2e12669a 1090int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1091{
1fddef4b 1092#if defined(TARGET_HAS_ICE)
4c3a88a2 1093 int i;
d720b93d 1094
4c3a88a2
FB
1095 for(i = 0; i < env->nb_breakpoints; i++) {
1096 if (env->breakpoints[i] == pc)
1097 return 0;
1098 }
1099
1100 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1101 return -1;
1102 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1103
1104 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1105 return 0;
1106#else
1107 return -1;
1108#endif
1109}
1110
1111/* remove a breakpoint */
2e12669a 1112int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1113{
1fddef4b 1114#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1115 int i;
1116 for(i = 0; i < env->nb_breakpoints; i++) {
1117 if (env->breakpoints[i] == pc)
1118 goto found;
1119 }
1120 return -1;
1121 found:
4c3a88a2 1122 env->nb_breakpoints--;
1fddef4b
FB
1123 if (i < env->nb_breakpoints)
1124 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1125
1126 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1127 return 0;
1128#else
1129 return -1;
1130#endif
1131}
1132
c33a346e
FB
1133/* enable or disable single step mode. EXCP_DEBUG is returned by the
1134 CPU loop after each instruction */
1135void cpu_single_step(CPUState *env, int enabled)
1136{
1fddef4b 1137#if defined(TARGET_HAS_ICE)
c33a346e
FB
1138 if (env->singlestep_enabled != enabled) {
1139 env->singlestep_enabled = enabled;
1140 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1141 /* XXX: only flush what is necessary */
0124311e 1142 tb_flush(env);
c33a346e
FB
1143 }
1144#endif
1145}
1146
34865134
FB
1147/* enable or disable low levels log */
1148void cpu_set_log(int log_flags)
1149{
1150 loglevel = log_flags;
1151 if (loglevel && !logfile) {
1152 logfile = fopen(logfilename, "w");
1153 if (!logfile) {
1154 perror(logfilename);
1155 _exit(1);
1156 }
9fa3e853
FB
1157#if !defined(CONFIG_SOFTMMU)
1158 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1159 {
1160 static uint8_t logfile_buf[4096];
1161 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1162 }
1163#else
34865134 1164 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1165#endif
34865134
FB
1166 }
1167}
1168
1169void cpu_set_log_filename(const char *filename)
1170{
1171 logfilename = strdup(filename);
1172}
c33a346e 1173
0124311e 1174/* mask must never be zero, except for A20 change call */
68a79315 1175void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1176{
1177 TranslationBlock *tb;
ee8b7021 1178 static int interrupt_lock;
59817ccb 1179
68a79315 1180 env->interrupt_request |= mask;
ea041c0e
FB
1181 /* if the cpu is currently executing code, we must unlink it and
1182 all the potentially executing TB */
1183 tb = env->current_tb;
ee8b7021
FB
1184 if (tb && !testandset(&interrupt_lock)) {
1185 env->current_tb = NULL;
ea041c0e 1186 tb_reset_jump_recursive(tb);
ee8b7021 1187 interrupt_lock = 0;
ea041c0e
FB
1188 }
1189}
1190
b54ad049
FB
1191void cpu_reset_interrupt(CPUState *env, int mask)
1192{
1193 env->interrupt_request &= ~mask;
1194}
1195
f193c797
FB
1196CPULogItem cpu_log_items[] = {
1197 { CPU_LOG_TB_OUT_ASM, "out_asm",
1198 "show generated host assembly code for each compiled TB" },
1199 { CPU_LOG_TB_IN_ASM, "in_asm",
1200 "show target assembly code for each compiled TB" },
1201 { CPU_LOG_TB_OP, "op",
1202 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1203#ifdef TARGET_I386
1204 { CPU_LOG_TB_OP_OPT, "op_opt",
1205 "show micro ops after optimization for each compiled TB" },
1206#endif
1207 { CPU_LOG_INT, "int",
1208 "show interrupts/exceptions in short format" },
1209 { CPU_LOG_EXEC, "exec",
1210 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1211 { CPU_LOG_TB_CPU, "cpu",
1212 "show CPU state before bloc translation" },
f193c797
FB
1213#ifdef TARGET_I386
1214 { CPU_LOG_PCALL, "pcall",
1215 "show protected mode far calls/returns/exceptions" },
1216#endif
8e3a9fd2 1217#ifdef DEBUG_IOPORT
fd872598
FB
1218 { CPU_LOG_IOPORT, "ioport",
1219 "show all i/o ports accesses" },
8e3a9fd2 1220#endif
f193c797
FB
1221 { 0, NULL, NULL },
1222};
1223
1224static int cmp1(const char *s1, int n, const char *s2)
1225{
1226 if (strlen(s2) != n)
1227 return 0;
1228 return memcmp(s1, s2, n) == 0;
1229}
1230
1231/* takes a comma separated list of log masks. Return 0 if error. */
1232int cpu_str_to_log_mask(const char *str)
1233{
1234 CPULogItem *item;
1235 int mask;
1236 const char *p, *p1;
1237
1238 p = str;
1239 mask = 0;
1240 for(;;) {
1241 p1 = strchr(p, ',');
1242 if (!p1)
1243 p1 = p + strlen(p);
8e3a9fd2
FB
1244 if(cmp1(p,p1-p,"all")) {
1245 for(item = cpu_log_items; item->mask != 0; item++) {
1246 mask |= item->mask;
1247 }
1248 } else {
f193c797
FB
1249 for(item = cpu_log_items; item->mask != 0; item++) {
1250 if (cmp1(p, p1 - p, item->name))
1251 goto found;
1252 }
1253 return 0;
8e3a9fd2 1254 }
f193c797
FB
1255 found:
1256 mask |= item->mask;
1257 if (*p1 != ',')
1258 break;
1259 p = p1 + 1;
1260 }
1261 return mask;
1262}
ea041c0e 1263
7501267e
FB
1264void cpu_abort(CPUState *env, const char *fmt, ...)
1265{
1266 va_list ap;
1267
1268 va_start(ap, fmt);
1269 fprintf(stderr, "qemu: fatal: ");
1270 vfprintf(stderr, fmt, ap);
1271 fprintf(stderr, "\n");
1272#ifdef TARGET_I386
7fe48483
FB
1273 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1274#else
1275 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1276#endif
1277 va_end(ap);
1278 abort();
1279}
1280
c5be9f08
TS
1281CPUState *cpu_copy(CPUState *env)
1282{
1283 CPUState *new_env = cpu_init();
1284 /* preserve chaining and index */
1285 CPUState *next_cpu = new_env->next_cpu;
1286 int cpu_index = new_env->cpu_index;
1287 memcpy(new_env, env, sizeof(CPUState));
1288 new_env->next_cpu = next_cpu;
1289 new_env->cpu_index = cpu_index;
1290 return new_env;
1291}
1292
0124311e
FB
1293#if !defined(CONFIG_USER_ONLY)
1294
ee8b7021
FB
1295/* NOTE: if flush_global is true, also flush global entries (not
1296 implemented yet) */
1297void tlb_flush(CPUState *env, int flush_global)
33417e70 1298{
33417e70 1299 int i;
0124311e 1300
9fa3e853
FB
1301#if defined(DEBUG_TLB)
1302 printf("tlb_flush:\n");
1303#endif
0124311e
FB
1304 /* must reset current TB so that interrupts cannot modify the
1305 links while we are modifying them */
1306 env->current_tb = NULL;
1307
33417e70 1308 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1309 env->tlb_table[0][i].addr_read = -1;
1310 env->tlb_table[0][i].addr_write = -1;
1311 env->tlb_table[0][i].addr_code = -1;
1312 env->tlb_table[1][i].addr_read = -1;
1313 env->tlb_table[1][i].addr_write = -1;
1314 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1315#if (NB_MMU_MODES >= 3)
1316 env->tlb_table[2][i].addr_read = -1;
1317 env->tlb_table[2][i].addr_write = -1;
1318 env->tlb_table[2][i].addr_code = -1;
1319#if (NB_MMU_MODES == 4)
1320 env->tlb_table[3][i].addr_read = -1;
1321 env->tlb_table[3][i].addr_write = -1;
1322 env->tlb_table[3][i].addr_code = -1;
1323#endif
1324#endif
33417e70 1325 }
9fa3e853 1326
8a40a180 1327 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1328
1329#if !defined(CONFIG_SOFTMMU)
1330 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1331#endif
1332#ifdef USE_KQEMU
1333 if (env->kqemu_enabled) {
1334 kqemu_flush(env, flush_global);
1335 }
9fa3e853 1336#endif
e3db7226 1337 tlb_flush_count++;
33417e70
FB
1338}
1339
274da6b2 1340static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1341{
84b7b8e7
FB
1342 if (addr == (tlb_entry->addr_read &
1343 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1344 addr == (tlb_entry->addr_write &
1345 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1346 addr == (tlb_entry->addr_code &
1347 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1348 tlb_entry->addr_read = -1;
1349 tlb_entry->addr_write = -1;
1350 tlb_entry->addr_code = -1;
1351 }
61382a50
FB
1352}
1353
2e12669a 1354void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1355{
8a40a180 1356 int i;
9fa3e853 1357 TranslationBlock *tb;
0124311e 1358
9fa3e853 1359#if defined(DEBUG_TLB)
108c49b8 1360 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1361#endif
0124311e
FB
1362 /* must reset current TB so that interrupts cannot modify the
1363 links while we are modifying them */
1364 env->current_tb = NULL;
61382a50
FB
1365
1366 addr &= TARGET_PAGE_MASK;
1367 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1368 tlb_flush_entry(&env->tlb_table[0][i], addr);
1369 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1370#if (NB_MMU_MODES >= 3)
1371 tlb_flush_entry(&env->tlb_table[2][i], addr);
1372#if (NB_MMU_MODES == 4)
1373 tlb_flush_entry(&env->tlb_table[3][i], addr);
1374#endif
1375#endif
0124311e 1376
b362e5e0
PB
1377 /* Discard jump cache entries for any tb which might potentially
1378 overlap the flushed page. */
1379 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1380 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1381
1382 i = tb_jmp_cache_hash_page(addr);
1383 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1384
0124311e 1385#if !defined(CONFIG_SOFTMMU)
9fa3e853 1386 if (addr < MMAP_AREA_END)
0124311e 1387 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1388#endif
0a962c02
FB
1389#ifdef USE_KQEMU
1390 if (env->kqemu_enabled) {
1391 kqemu_flush_page(env, addr);
1392 }
1393#endif
9fa3e853
FB
1394}
1395
9fa3e853
FB
1396/* update the TLBs so that writes to code in the virtual page 'addr'
1397 can be detected */
6a00d601 1398static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1399{
6a00d601
FB
1400 cpu_physical_memory_reset_dirty(ram_addr,
1401 ram_addr + TARGET_PAGE_SIZE,
1402 CODE_DIRTY_FLAG);
9fa3e853
FB
1403}
1404
9fa3e853 1405/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1406 tested for self modifying code */
1407static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1408 target_ulong vaddr)
9fa3e853 1409{
3a7d929e 1410 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1411}
1412
1413static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1414 unsigned long start, unsigned long length)
1415{
1416 unsigned long addr;
84b7b8e7
FB
1417 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1418 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1419 if ((addr - start) < length) {
84b7b8e7 1420 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1421 }
1422 }
1423}
1424
3a7d929e 1425void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1426 int dirty_flags)
1ccde1cb
FB
1427{
1428 CPUState *env;
4f2ac237 1429 unsigned long length, start1;
0a962c02
FB
1430 int i, mask, len;
1431 uint8_t *p;
1ccde1cb
FB
1432
1433 start &= TARGET_PAGE_MASK;
1434 end = TARGET_PAGE_ALIGN(end);
1435
1436 length = end - start;
1437 if (length == 0)
1438 return;
0a962c02 1439 len = length >> TARGET_PAGE_BITS;
3a7d929e 1440#ifdef USE_KQEMU
6a00d601
FB
1441 /* XXX: should not depend on cpu context */
1442 env = first_cpu;
3a7d929e 1443 if (env->kqemu_enabled) {
f23db169
FB
1444 ram_addr_t addr;
1445 addr = start;
1446 for(i = 0; i < len; i++) {
1447 kqemu_set_notdirty(env, addr);
1448 addr += TARGET_PAGE_SIZE;
1449 }
3a7d929e
FB
1450 }
1451#endif
f23db169
FB
1452 mask = ~dirty_flags;
1453 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1454 for(i = 0; i < len; i++)
1455 p[i] &= mask;
1456
1ccde1cb
FB
1457 /* we modify the TLB cache so that the dirty bit will be set again
1458 when accessing the range */
59817ccb 1459 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1460 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1461 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1462 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1463 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1464 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1465#if (NB_MMU_MODES >= 3)
1466 for(i = 0; i < CPU_TLB_SIZE; i++)
1467 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1468#if (NB_MMU_MODES == 4)
1469 for(i = 0; i < CPU_TLB_SIZE; i++)
1470 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1471#endif
1472#endif
6a00d601 1473 }
59817ccb
FB
1474
1475#if !defined(CONFIG_SOFTMMU)
1476 /* XXX: this is expensive */
1477 {
1478 VirtPageDesc *p;
1479 int j;
1480 target_ulong addr;
1481
1482 for(i = 0; i < L1_SIZE; i++) {
1483 p = l1_virt_map[i];
1484 if (p) {
1485 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1486 for(j = 0; j < L2_SIZE; j++) {
1487 if (p->valid_tag == virt_valid_tag &&
1488 p->phys_addr >= start && p->phys_addr < end &&
1489 (p->prot & PROT_WRITE)) {
1490 if (addr < MMAP_AREA_END) {
1491 mprotect((void *)addr, TARGET_PAGE_SIZE,
1492 p->prot & ~PROT_WRITE);
1493 }
1494 }
1495 addr += TARGET_PAGE_SIZE;
1496 p++;
1497 }
1498 }
1499 }
1500 }
1501#endif
1ccde1cb
FB
1502}
1503
3a7d929e
FB
1504static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1505{
1506 ram_addr_t ram_addr;
1507
84b7b8e7
FB
1508 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1509 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1510 tlb_entry->addend - (unsigned long)phys_ram_base;
1511 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1512 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1513 }
1514 }
1515}
1516
1517/* update the TLB according to the current state of the dirty bits */
1518void cpu_tlb_update_dirty(CPUState *env)
1519{
1520 int i;
1521 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1522 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1523 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1524 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1525#if (NB_MMU_MODES >= 3)
1526 for(i = 0; i < CPU_TLB_SIZE; i++)
1527 tlb_update_dirty(&env->tlb_table[2][i]);
1528#if (NB_MMU_MODES == 4)
1529 for(i = 0; i < CPU_TLB_SIZE; i++)
1530 tlb_update_dirty(&env->tlb_table[3][i]);
1531#endif
1532#endif
3a7d929e
FB
1533}
1534
1ccde1cb 1535static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1536 unsigned long start)
1ccde1cb
FB
1537{
1538 unsigned long addr;
84b7b8e7
FB
1539 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1540 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1541 if (addr == start) {
84b7b8e7 1542 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1543 }
1544 }
1545}
1546
1547/* update the TLB corresponding to virtual page vaddr and phys addr
1548 addr so that it is no longer dirty */
6a00d601
FB
1549static inline void tlb_set_dirty(CPUState *env,
1550 unsigned long addr, target_ulong vaddr)
1ccde1cb 1551{
1ccde1cb
FB
1552 int i;
1553
1ccde1cb
FB
1554 addr &= TARGET_PAGE_MASK;
1555 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1556 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1557 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1558#if (NB_MMU_MODES >= 3)
1559 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1560#if (NB_MMU_MODES == 4)
1561 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1562#endif
1563#endif
9fa3e853
FB
1564}
1565
59817ccb
FB
1566/* add a new TLB entry. At most one entry for a given virtual address
1567 is permitted. Return 0 if OK or 2 if the page could not be mapped
1568 (can only happen in non SOFTMMU mode for I/O pages or pages
1569 conflicting with the host address space). */
84b7b8e7
FB
1570int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1571 target_phys_addr_t paddr, int prot,
1572 int is_user, int is_softmmu)
9fa3e853 1573{
92e873b9 1574 PhysPageDesc *p;
4f2ac237 1575 unsigned long pd;
9fa3e853 1576 unsigned int index;
4f2ac237 1577 target_ulong address;
108c49b8 1578 target_phys_addr_t addend;
9fa3e853 1579 int ret;
84b7b8e7 1580 CPUTLBEntry *te;
6658ffb8 1581 int i;
9fa3e853 1582
92e873b9 1583 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1584 if (!p) {
1585 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1586 } else {
1587 pd = p->phys_offset;
9fa3e853
FB
1588 }
1589#if defined(DEBUG_TLB)
3a7d929e 1590 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1591 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1592#endif
1593
1594 ret = 0;
1595#if !defined(CONFIG_SOFTMMU)
1596 if (is_softmmu)
1597#endif
1598 {
2a4188a3 1599 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1600 /* IO memory case */
1601 address = vaddr | pd;
1602 addend = paddr;
1603 } else {
1604 /* standard memory */
1605 address = vaddr;
1606 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1607 }
6658ffb8
PB
1608
1609 /* Make accesses to pages with watchpoints go via the
1610 watchpoint trap routines. */
1611 for (i = 0; i < env->nb_watchpoints; i++) {
1612 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1613 if (address & ~TARGET_PAGE_MASK) {
1614 env->watchpoint[i].is_ram = 0;
1615 address = vaddr | io_mem_watch;
1616 } else {
1617 env->watchpoint[i].is_ram = 1;
1618 /* TODO: Figure out how to make read watchpoints coexist
1619 with code. */
1620 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1621 }
1622 }
1623 }
9fa3e853 1624
90f18422 1625 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1626 addend -= vaddr;
84b7b8e7
FB
1627 te = &env->tlb_table[is_user][index];
1628 te->addend = addend;
67b915a5 1629 if (prot & PAGE_READ) {
84b7b8e7
FB
1630 te->addr_read = address;
1631 } else {
1632 te->addr_read = -1;
1633 }
1634 if (prot & PAGE_EXEC) {
1635 te->addr_code = address;
9fa3e853 1636 } else {
84b7b8e7 1637 te->addr_code = -1;
9fa3e853 1638 }
67b915a5 1639 if (prot & PAGE_WRITE) {
856074ec
FB
1640 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1641 (pd & IO_MEM_ROMD)) {
1642 /* write access calls the I/O callback */
1643 te->addr_write = vaddr |
1644 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1645 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1646 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1647 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1648 } else {
84b7b8e7 1649 te->addr_write = address;
9fa3e853
FB
1650 }
1651 } else {
84b7b8e7 1652 te->addr_write = -1;
9fa3e853
FB
1653 }
1654 }
1655#if !defined(CONFIG_SOFTMMU)
1656 else {
1657 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1658 /* IO access: no mapping is done as it will be handled by the
1659 soft MMU */
1660 if (!(env->hflags & HF_SOFTMMU_MASK))
1661 ret = 2;
1662 } else {
1663 void *map_addr;
59817ccb
FB
1664
1665 if (vaddr >= MMAP_AREA_END) {
1666 ret = 2;
1667 } else {
1668 if (prot & PROT_WRITE) {
1669 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1670#if defined(TARGET_HAS_SMC) || 1
59817ccb 1671 first_tb ||
d720b93d 1672#endif
59817ccb
FB
1673 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1674 !cpu_physical_memory_is_dirty(pd))) {
1675 /* ROM: we do as if code was inside */
1676 /* if code is present, we only map as read only and save the
1677 original mapping */
1678 VirtPageDesc *vp;
1679
90f18422 1680 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1681 vp->phys_addr = pd;
1682 vp->prot = prot;
1683 vp->valid_tag = virt_valid_tag;
1684 prot &= ~PAGE_WRITE;
1685 }
1686 }
1687 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1688 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1689 if (map_addr == MAP_FAILED) {
1690 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1691 paddr, vaddr);
9fa3e853 1692 }
9fa3e853
FB
1693 }
1694 }
1695 }
1696#endif
1697 return ret;
1698}
1699
1700/* called from signal handler: invalidate the code and unprotect the
1701 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1702int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1703{
1704#if !defined(CONFIG_SOFTMMU)
1705 VirtPageDesc *vp;
1706
1707#if defined(DEBUG_TLB)
1708 printf("page_unprotect: addr=0x%08x\n", addr);
1709#endif
1710 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1711
1712 /* if it is not mapped, no need to worry here */
1713 if (addr >= MMAP_AREA_END)
1714 return 0;
9fa3e853
FB
1715 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1716 if (!vp)
1717 return 0;
1718 /* NOTE: in this case, validate_tag is _not_ tested as it
1719 validates only the code TLB */
1720 if (vp->valid_tag != virt_valid_tag)
1721 return 0;
1722 if (!(vp->prot & PAGE_WRITE))
1723 return 0;
1724#if defined(DEBUG_TLB)
1725 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1726 addr, vp->phys_addr, vp->prot);
1727#endif
59817ccb
FB
1728 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1729 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1730 (unsigned long)addr, vp->prot);
d720b93d 1731 /* set the dirty bit */
0a962c02 1732 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1733 /* flush the code inside */
1734 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1735 return 1;
1736#else
1737 return 0;
1738#endif
33417e70
FB
1739}
1740
0124311e
FB
1741#else
1742
ee8b7021 1743void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1744{
1745}
1746
2e12669a 1747void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1748{
1749}
1750
84b7b8e7
FB
1751int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1752 target_phys_addr_t paddr, int prot,
1753 int is_user, int is_softmmu)
9fa3e853
FB
1754{
1755 return 0;
1756}
0124311e 1757
9fa3e853
FB
1758/* dump memory mappings */
1759void page_dump(FILE *f)
33417e70 1760{
9fa3e853
FB
1761 unsigned long start, end;
1762 int i, j, prot, prot1;
1763 PageDesc *p;
33417e70 1764
9fa3e853
FB
1765 fprintf(f, "%-8s %-8s %-8s %s\n",
1766 "start", "end", "size", "prot");
1767 start = -1;
1768 end = -1;
1769 prot = 0;
1770 for(i = 0; i <= L1_SIZE; i++) {
1771 if (i < L1_SIZE)
1772 p = l1_map[i];
1773 else
1774 p = NULL;
1775 for(j = 0;j < L2_SIZE; j++) {
1776 if (!p)
1777 prot1 = 0;
1778 else
1779 prot1 = p[j].flags;
1780 if (prot1 != prot) {
1781 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1782 if (start != -1) {
1783 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1784 start, end, end - start,
1785 prot & PAGE_READ ? 'r' : '-',
1786 prot & PAGE_WRITE ? 'w' : '-',
1787 prot & PAGE_EXEC ? 'x' : '-');
1788 }
1789 if (prot1 != 0)
1790 start = end;
1791 else
1792 start = -1;
1793 prot = prot1;
1794 }
1795 if (!p)
1796 break;
1797 }
33417e70 1798 }
33417e70
FB
1799}
1800
53a5960a 1801int page_get_flags(target_ulong address)
33417e70 1802{
9fa3e853
FB
1803 PageDesc *p;
1804
1805 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1806 if (!p)
9fa3e853
FB
1807 return 0;
1808 return p->flags;
1809}
1810
1811/* modify the flags of a page and invalidate the code if
1812 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1813 depending on PAGE_WRITE */
53a5960a 1814void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1815{
1816 PageDesc *p;
53a5960a 1817 target_ulong addr;
9fa3e853
FB
1818
1819 start = start & TARGET_PAGE_MASK;
1820 end = TARGET_PAGE_ALIGN(end);
1821 if (flags & PAGE_WRITE)
1822 flags |= PAGE_WRITE_ORG;
1823 spin_lock(&tb_lock);
1824 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1825 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1826 /* if the write protection is set, then we invalidate the code
1827 inside */
1828 if (!(p->flags & PAGE_WRITE) &&
1829 (flags & PAGE_WRITE) &&
1830 p->first_tb) {
d720b93d 1831 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1832 }
1833 p->flags = flags;
1834 }
1835 spin_unlock(&tb_lock);
33417e70
FB
1836}
1837
9fa3e853
FB
1838/* called from signal handler: invalidate the code and unprotect the
1839 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1840int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1841{
1842 unsigned int page_index, prot, pindex;
1843 PageDesc *p, *p1;
53a5960a 1844 target_ulong host_start, host_end, addr;
9fa3e853 1845
83fb7adf 1846 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1847 page_index = host_start >> TARGET_PAGE_BITS;
1848 p1 = page_find(page_index);
1849 if (!p1)
1850 return 0;
83fb7adf 1851 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1852 p = p1;
1853 prot = 0;
1854 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1855 prot |= p->flags;
1856 p++;
1857 }
1858 /* if the page was really writable, then we change its
1859 protection back to writable */
1860 if (prot & PAGE_WRITE_ORG) {
1861 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1862 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1863 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1864 (prot & PAGE_BITS) | PAGE_WRITE);
1865 p1[pindex].flags |= PAGE_WRITE;
1866 /* and since the content will be modified, we must invalidate
1867 the corresponding translated code. */
d720b93d 1868 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1869#ifdef DEBUG_TB_CHECK
1870 tb_invalidate_check(address);
1871#endif
1872 return 1;
1873 }
1874 }
1875 return 0;
1876}
1877
1878/* call this function when system calls directly modify a memory area */
53a5960a
PB
1879/* ??? This should be redundant now we have lock_user. */
1880void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1881{
53a5960a 1882 target_ulong start, end, addr;
9fa3e853 1883
53a5960a 1884 start = data;
9fa3e853
FB
1885 end = start + data_size;
1886 start &= TARGET_PAGE_MASK;
1887 end = TARGET_PAGE_ALIGN(end);
1888 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1889 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1890 }
1891}
1892
6a00d601
FB
1893static inline void tlb_set_dirty(CPUState *env,
1894 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1895{
1896}
9fa3e853
FB
1897#endif /* defined(CONFIG_USER_ONLY) */
1898
33417e70
FB
1899/* register physical memory. 'size' must be a multiple of the target
1900 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1901 io memory page */
2e12669a
FB
1902void cpu_register_physical_memory(target_phys_addr_t start_addr,
1903 unsigned long size,
1904 unsigned long phys_offset)
33417e70 1905{
108c49b8 1906 target_phys_addr_t addr, end_addr;
92e873b9 1907 PhysPageDesc *p;
9d42037b 1908 CPUState *env;
33417e70 1909
5fd386f6 1910 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1911 end_addr = start_addr + size;
5fd386f6 1912 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1913 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1914 p->phys_offset = phys_offset;
2a4188a3
FB
1915 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1916 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1917 phys_offset += TARGET_PAGE_SIZE;
1918 }
9d42037b
FB
1919
1920 /* since each CPU stores ram addresses in its TLB cache, we must
1921 reset the modified entries */
1922 /* XXX: slow ! */
1923 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1924 tlb_flush(env, 1);
1925 }
33417e70
FB
1926}
1927
ba863458
FB
1928/* XXX: temporary until new memory mapping API */
1929uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1930{
1931 PhysPageDesc *p;
1932
1933 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1934 if (!p)
1935 return IO_MEM_UNASSIGNED;
1936 return p->phys_offset;
1937}
1938
e9a1ab19
FB
1939/* XXX: better than nothing */
1940ram_addr_t qemu_ram_alloc(unsigned int size)
1941{
1942 ram_addr_t addr;
1943 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1944 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1945 size, phys_ram_size);
1946 abort();
1947 }
1948 addr = phys_ram_alloc_offset;
1949 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1950 return addr;
1951}
1952
1953void qemu_ram_free(ram_addr_t addr)
1954{
1955}
1956
a4193c8a 1957static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 1958{
67d3b957
PB
1959#ifdef DEBUG_UNASSIGNED
1960 printf("Unassigned mem read 0x%08x\n", (int)addr);
b4f0a316
BS
1961#endif
1962#ifdef TARGET_SPARC
1963 // Not enabled yet because of bugs in gdbstub etc.
1964 //raise_exception(TT_DATA_ACCESS);
67d3b957 1965#endif
33417e70
FB
1966 return 0;
1967}
1968
a4193c8a 1969static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 1970{
67d3b957
PB
1971#ifdef DEBUG_UNASSIGNED
1972 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1973#endif
b4f0a316
BS
1974#ifdef TARGET_SPARC
1975 // Not enabled yet because of bugs in gdbstub etc.
1976 //raise_exception(TT_DATA_ACCESS);
1977#endif
33417e70
FB
1978}
1979
1980static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1981 unassigned_mem_readb,
1982 unassigned_mem_readb,
1983 unassigned_mem_readb,
1984};
1985
1986static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1987 unassigned_mem_writeb,
1988 unassigned_mem_writeb,
1989 unassigned_mem_writeb,
1990};
1991
3a7d929e 1992static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1993{
3a7d929e
FB
1994 unsigned long ram_addr;
1995 int dirty_flags;
1996 ram_addr = addr - (unsigned long)phys_ram_base;
1997 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1998 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1999#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2000 tb_invalidate_phys_page_fast(ram_addr, 1);
2001 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2002#endif
3a7d929e 2003 }
c27004ec 2004 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2005#ifdef USE_KQEMU
2006 if (cpu_single_env->kqemu_enabled &&
2007 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2008 kqemu_modify_page(cpu_single_env, ram_addr);
2009#endif
f23db169
FB
2010 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2011 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2012 /* we remove the notdirty callback only if the code has been
2013 flushed */
2014 if (dirty_flags == 0xff)
6a00d601 2015 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2016}
2017
3a7d929e 2018static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2019{
3a7d929e
FB
2020 unsigned long ram_addr;
2021 int dirty_flags;
2022 ram_addr = addr - (unsigned long)phys_ram_base;
2023 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2024 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2025#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2026 tb_invalidate_phys_page_fast(ram_addr, 2);
2027 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2028#endif
3a7d929e 2029 }
c27004ec 2030 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2031#ifdef USE_KQEMU
2032 if (cpu_single_env->kqemu_enabled &&
2033 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2034 kqemu_modify_page(cpu_single_env, ram_addr);
2035#endif
f23db169
FB
2036 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2037 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2038 /* we remove the notdirty callback only if the code has been
2039 flushed */
2040 if (dirty_flags == 0xff)
6a00d601 2041 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2042}
2043
3a7d929e 2044static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2045{
3a7d929e
FB
2046 unsigned long ram_addr;
2047 int dirty_flags;
2048 ram_addr = addr - (unsigned long)phys_ram_base;
2049 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2050 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2051#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2052 tb_invalidate_phys_page_fast(ram_addr, 4);
2053 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2054#endif
3a7d929e 2055 }
c27004ec 2056 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2057#ifdef USE_KQEMU
2058 if (cpu_single_env->kqemu_enabled &&
2059 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2060 kqemu_modify_page(cpu_single_env, ram_addr);
2061#endif
f23db169
FB
2062 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2063 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2064 /* we remove the notdirty callback only if the code has been
2065 flushed */
2066 if (dirty_flags == 0xff)
6a00d601 2067 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2068}
2069
3a7d929e 2070static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2071 NULL, /* never used */
2072 NULL, /* never used */
2073 NULL, /* never used */
2074};
2075
1ccde1cb
FB
2076static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2077 notdirty_mem_writeb,
2078 notdirty_mem_writew,
2079 notdirty_mem_writel,
2080};
2081
6658ffb8
PB
2082#if defined(CONFIG_SOFTMMU)
2083/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2084 so these check for a hit then pass through to the normal out-of-line
2085 phys routines. */
2086static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2087{
2088 return ldub_phys(addr);
2089}
2090
2091static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2092{
2093 return lduw_phys(addr);
2094}
2095
2096static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2097{
2098 return ldl_phys(addr);
2099}
2100
2101/* Generate a debug exception if a watchpoint has been hit.
2102 Returns the real physical address of the access. addr will be a host
2103 address in the is_ram case. */
2104static target_ulong check_watchpoint(target_phys_addr_t addr)
2105{
2106 CPUState *env = cpu_single_env;
2107 target_ulong watch;
2108 target_ulong retaddr;
2109 int i;
2110
2111 retaddr = addr;
2112 for (i = 0; i < env->nb_watchpoints; i++) {
2113 watch = env->watchpoint[i].vaddr;
2114 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2115 if (env->watchpoint[i].is_ram)
2116 retaddr = addr - (unsigned long)phys_ram_base;
2117 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2118 cpu_single_env->watchpoint_hit = i + 1;
2119 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2120 break;
2121 }
2122 }
2123 }
2124 return retaddr;
2125}
2126
2127static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2128 uint32_t val)
2129{
2130 addr = check_watchpoint(addr);
2131 stb_phys(addr, val);
2132}
2133
2134static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2135 uint32_t val)
2136{
2137 addr = check_watchpoint(addr);
2138 stw_phys(addr, val);
2139}
2140
2141static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2142 uint32_t val)
2143{
2144 addr = check_watchpoint(addr);
2145 stl_phys(addr, val);
2146}
2147
2148static CPUReadMemoryFunc *watch_mem_read[3] = {
2149 watch_mem_readb,
2150 watch_mem_readw,
2151 watch_mem_readl,
2152};
2153
2154static CPUWriteMemoryFunc *watch_mem_write[3] = {
2155 watch_mem_writeb,
2156 watch_mem_writew,
2157 watch_mem_writel,
2158};
2159#endif
2160
33417e70
FB
2161static void io_mem_init(void)
2162{
3a7d929e 2163 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2164 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2165 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2166 io_mem_nb = 5;
2167
6658ffb8
PB
2168#if defined(CONFIG_SOFTMMU)
2169 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2170 watch_mem_write, NULL);
2171#endif
1ccde1cb 2172 /* alloc dirty bits array */
0a962c02 2173 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2174 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2175}
2176
2177/* mem_read and mem_write are arrays of functions containing the
2178 function to access byte (index 0), word (index 1) and dword (index
2179 2). All functions must be supplied. If io_index is non zero, the
2180 corresponding io zone is modified. If it is zero, a new io zone is
2181 allocated. The return value can be used with
2182 cpu_register_physical_memory(). (-1) is returned if error. */
2183int cpu_register_io_memory(int io_index,
2184 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2185 CPUWriteMemoryFunc **mem_write,
2186 void *opaque)
33417e70
FB
2187{
2188 int i;
2189
2190 if (io_index <= 0) {
b5ff1b31 2191 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2192 return -1;
2193 io_index = io_mem_nb++;
2194 } else {
2195 if (io_index >= IO_MEM_NB_ENTRIES)
2196 return -1;
2197 }
b5ff1b31 2198
33417e70
FB
2199 for(i = 0;i < 3; i++) {
2200 io_mem_read[io_index][i] = mem_read[i];
2201 io_mem_write[io_index][i] = mem_write[i];
2202 }
a4193c8a 2203 io_mem_opaque[io_index] = opaque;
33417e70
FB
2204 return io_index << IO_MEM_SHIFT;
2205}
61382a50 2206
8926b517
FB
2207CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2208{
2209 return io_mem_write[io_index >> IO_MEM_SHIFT];
2210}
2211
2212CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2213{
2214 return io_mem_read[io_index >> IO_MEM_SHIFT];
2215}
2216
13eb76e0
FB
2217/* physical memory access (slow version, mainly for debug) */
2218#if defined(CONFIG_USER_ONLY)
2e12669a 2219void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2220 int len, int is_write)
2221{
2222 int l, flags;
2223 target_ulong page;
53a5960a 2224 void * p;
13eb76e0
FB
2225
2226 while (len > 0) {
2227 page = addr & TARGET_PAGE_MASK;
2228 l = (page + TARGET_PAGE_SIZE) - addr;
2229 if (l > len)
2230 l = len;
2231 flags = page_get_flags(page);
2232 if (!(flags & PAGE_VALID))
2233 return;
2234 if (is_write) {
2235 if (!(flags & PAGE_WRITE))
2236 return;
53a5960a
PB
2237 p = lock_user(addr, len, 0);
2238 memcpy(p, buf, len);
2239 unlock_user(p, addr, len);
13eb76e0
FB
2240 } else {
2241 if (!(flags & PAGE_READ))
2242 return;
53a5960a
PB
2243 p = lock_user(addr, len, 1);
2244 memcpy(buf, p, len);
2245 unlock_user(p, addr, 0);
13eb76e0
FB
2246 }
2247 len -= l;
2248 buf += l;
2249 addr += l;
2250 }
2251}
8df1cd07 2252
13eb76e0 2253#else
2e12669a 2254void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2255 int len, int is_write)
2256{
2257 int l, io_index;
2258 uint8_t *ptr;
2259 uint32_t val;
2e12669a
FB
2260 target_phys_addr_t page;
2261 unsigned long pd;
92e873b9 2262 PhysPageDesc *p;
13eb76e0
FB
2263
2264 while (len > 0) {
2265 page = addr & TARGET_PAGE_MASK;
2266 l = (page + TARGET_PAGE_SIZE) - addr;
2267 if (l > len)
2268 l = len;
92e873b9 2269 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2270 if (!p) {
2271 pd = IO_MEM_UNASSIGNED;
2272 } else {
2273 pd = p->phys_offset;
2274 }
2275
2276 if (is_write) {
3a7d929e 2277 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2278 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2279 /* XXX: could force cpu_single_env to NULL to avoid
2280 potential bugs */
13eb76e0 2281 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2282 /* 32 bit write access */
c27004ec 2283 val = ldl_p(buf);
a4193c8a 2284 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2285 l = 4;
2286 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2287 /* 16 bit write access */
c27004ec 2288 val = lduw_p(buf);
a4193c8a 2289 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2290 l = 2;
2291 } else {
1c213d19 2292 /* 8 bit write access */
c27004ec 2293 val = ldub_p(buf);
a4193c8a 2294 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2295 l = 1;
2296 }
2297 } else {
b448f2f3
FB
2298 unsigned long addr1;
2299 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2300 /* RAM case */
b448f2f3 2301 ptr = phys_ram_base + addr1;
13eb76e0 2302 memcpy(ptr, buf, l);
3a7d929e
FB
2303 if (!cpu_physical_memory_is_dirty(addr1)) {
2304 /* invalidate code */
2305 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2306 /* set dirty bit */
f23db169
FB
2307 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2308 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2309 }
13eb76e0
FB
2310 }
2311 } else {
2a4188a3
FB
2312 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2313 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2314 /* I/O case */
2315 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2316 if (l >= 4 && ((addr & 3) == 0)) {
2317 /* 32 bit read access */
a4193c8a 2318 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2319 stl_p(buf, val);
13eb76e0
FB
2320 l = 4;
2321 } else if (l >= 2 && ((addr & 1) == 0)) {
2322 /* 16 bit read access */
a4193c8a 2323 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2324 stw_p(buf, val);
13eb76e0
FB
2325 l = 2;
2326 } else {
1c213d19 2327 /* 8 bit read access */
a4193c8a 2328 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2329 stb_p(buf, val);
13eb76e0
FB
2330 l = 1;
2331 }
2332 } else {
2333 /* RAM case */
2334 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2335 (addr & ~TARGET_PAGE_MASK);
2336 memcpy(buf, ptr, l);
2337 }
2338 }
2339 len -= l;
2340 buf += l;
2341 addr += l;
2342 }
2343}
8df1cd07 2344
d0ecd2aa
FB
2345/* used for ROM loading : can write in RAM and ROM */
2346void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2347 const uint8_t *buf, int len)
2348{
2349 int l;
2350 uint8_t *ptr;
2351 target_phys_addr_t page;
2352 unsigned long pd;
2353 PhysPageDesc *p;
2354
2355 while (len > 0) {
2356 page = addr & TARGET_PAGE_MASK;
2357 l = (page + TARGET_PAGE_SIZE) - addr;
2358 if (l > len)
2359 l = len;
2360 p = phys_page_find(page >> TARGET_PAGE_BITS);
2361 if (!p) {
2362 pd = IO_MEM_UNASSIGNED;
2363 } else {
2364 pd = p->phys_offset;
2365 }
2366
2367 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2368 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2369 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2370 /* do nothing */
2371 } else {
2372 unsigned long addr1;
2373 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2374 /* ROM/RAM case */
2375 ptr = phys_ram_base + addr1;
2376 memcpy(ptr, buf, l);
2377 }
2378 len -= l;
2379 buf += l;
2380 addr += l;
2381 }
2382}
2383
2384
8df1cd07
FB
2385/* warning: addr must be aligned */
2386uint32_t ldl_phys(target_phys_addr_t addr)
2387{
2388 int io_index;
2389 uint8_t *ptr;
2390 uint32_t val;
2391 unsigned long pd;
2392 PhysPageDesc *p;
2393
2394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395 if (!p) {
2396 pd = IO_MEM_UNASSIGNED;
2397 } else {
2398 pd = p->phys_offset;
2399 }
2400
2a4188a3
FB
2401 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2402 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2403 /* I/O case */
2404 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2405 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2406 } else {
2407 /* RAM case */
2408 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2409 (addr & ~TARGET_PAGE_MASK);
2410 val = ldl_p(ptr);
2411 }
2412 return val;
2413}
2414
84b7b8e7
FB
2415/* warning: addr must be aligned */
2416uint64_t ldq_phys(target_phys_addr_t addr)
2417{
2418 int io_index;
2419 uint8_t *ptr;
2420 uint64_t val;
2421 unsigned long pd;
2422 PhysPageDesc *p;
2423
2424 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2425 if (!p) {
2426 pd = IO_MEM_UNASSIGNED;
2427 } else {
2428 pd = p->phys_offset;
2429 }
2430
2a4188a3
FB
2431 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2432 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2433 /* I/O case */
2434 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2435#ifdef TARGET_WORDS_BIGENDIAN
2436 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2437 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2438#else
2439 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2440 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2441#endif
2442 } else {
2443 /* RAM case */
2444 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2445 (addr & ~TARGET_PAGE_MASK);
2446 val = ldq_p(ptr);
2447 }
2448 return val;
2449}
2450
aab33094
FB
2451/* XXX: optimize */
2452uint32_t ldub_phys(target_phys_addr_t addr)
2453{
2454 uint8_t val;
2455 cpu_physical_memory_read(addr, &val, 1);
2456 return val;
2457}
2458
2459/* XXX: optimize */
2460uint32_t lduw_phys(target_phys_addr_t addr)
2461{
2462 uint16_t val;
2463 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2464 return tswap16(val);
2465}
2466
8df1cd07
FB
2467/* warning: addr must be aligned. The ram page is not masked as dirty
2468 and the code inside is not invalidated. It is useful if the dirty
2469 bits are used to track modified PTEs */
2470void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2471{
2472 int io_index;
2473 uint8_t *ptr;
2474 unsigned long pd;
2475 PhysPageDesc *p;
2476
2477 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2478 if (!p) {
2479 pd = IO_MEM_UNASSIGNED;
2480 } else {
2481 pd = p->phys_offset;
2482 }
2483
3a7d929e 2484 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2485 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2486 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2487 } else {
2488 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2489 (addr & ~TARGET_PAGE_MASK);
2490 stl_p(ptr, val);
2491 }
2492}
2493
bc98a7ef
JM
2494void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2495{
2496 int io_index;
2497 uint8_t *ptr;
2498 unsigned long pd;
2499 PhysPageDesc *p;
2500
2501 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2502 if (!p) {
2503 pd = IO_MEM_UNASSIGNED;
2504 } else {
2505 pd = p->phys_offset;
2506 }
2507
2508 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2509 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2510#ifdef TARGET_WORDS_BIGENDIAN
2511 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2512 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2513#else
2514 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2515 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2516#endif
2517 } else {
2518 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2519 (addr & ~TARGET_PAGE_MASK);
2520 stq_p(ptr, val);
2521 }
2522}
2523
8df1cd07 2524/* warning: addr must be aligned */
8df1cd07
FB
2525void stl_phys(target_phys_addr_t addr, uint32_t val)
2526{
2527 int io_index;
2528 uint8_t *ptr;
2529 unsigned long pd;
2530 PhysPageDesc *p;
2531
2532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2533 if (!p) {
2534 pd = IO_MEM_UNASSIGNED;
2535 } else {
2536 pd = p->phys_offset;
2537 }
2538
3a7d929e 2539 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2540 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2541 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2542 } else {
2543 unsigned long addr1;
2544 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2545 /* RAM case */
2546 ptr = phys_ram_base + addr1;
2547 stl_p(ptr, val);
3a7d929e
FB
2548 if (!cpu_physical_memory_is_dirty(addr1)) {
2549 /* invalidate code */
2550 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2551 /* set dirty bit */
f23db169
FB
2552 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2553 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2554 }
8df1cd07
FB
2555 }
2556}
2557
aab33094
FB
2558/* XXX: optimize */
2559void stb_phys(target_phys_addr_t addr, uint32_t val)
2560{
2561 uint8_t v = val;
2562 cpu_physical_memory_write(addr, &v, 1);
2563}
2564
2565/* XXX: optimize */
2566void stw_phys(target_phys_addr_t addr, uint32_t val)
2567{
2568 uint16_t v = tswap16(val);
2569 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2570}
2571
2572/* XXX: optimize */
2573void stq_phys(target_phys_addr_t addr, uint64_t val)
2574{
2575 val = tswap64(val);
2576 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2577}
2578
13eb76e0
FB
2579#endif
2580
2581/* virtual memory access for debug */
b448f2f3
FB
2582int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2583 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2584{
2585 int l;
9b3c35e0
JM
2586 target_phys_addr_t phys_addr;
2587 target_ulong page;
13eb76e0
FB
2588
2589 while (len > 0) {
2590 page = addr & TARGET_PAGE_MASK;
2591 phys_addr = cpu_get_phys_page_debug(env, page);
2592 /* if no physical page mapped, return an error */
2593 if (phys_addr == -1)
2594 return -1;
2595 l = (page + TARGET_PAGE_SIZE) - addr;
2596 if (l > len)
2597 l = len;
b448f2f3
FB
2598 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2599 buf, l, is_write);
13eb76e0
FB
2600 len -= l;
2601 buf += l;
2602 addr += l;
2603 }
2604 return 0;
2605}
2606
e3db7226
FB
2607void dump_exec_info(FILE *f,
2608 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2609{
2610 int i, target_code_size, max_target_code_size;
2611 int direct_jmp_count, direct_jmp2_count, cross_page;
2612 TranslationBlock *tb;
2613
2614 target_code_size = 0;
2615 max_target_code_size = 0;
2616 cross_page = 0;
2617 direct_jmp_count = 0;
2618 direct_jmp2_count = 0;
2619 for(i = 0; i < nb_tbs; i++) {
2620 tb = &tbs[i];
2621 target_code_size += tb->size;
2622 if (tb->size > max_target_code_size)
2623 max_target_code_size = tb->size;
2624 if (tb->page_addr[1] != -1)
2625 cross_page++;
2626 if (tb->tb_next_offset[0] != 0xffff) {
2627 direct_jmp_count++;
2628 if (tb->tb_next_offset[1] != 0xffff) {
2629 direct_jmp2_count++;
2630 }
2631 }
2632 }
2633 /* XXX: avoid using doubles ? */
2634 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2635 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2636 nb_tbs ? target_code_size / nb_tbs : 0,
2637 max_target_code_size);
2638 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2639 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2640 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2641 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2642 cross_page,
2643 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2644 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2645 direct_jmp_count,
2646 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2647 direct_jmp2_count,
2648 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2649 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2650 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2651 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2652}
2653
61382a50
FB
2654#if !defined(CONFIG_USER_ONLY)
2655
2656#define MMUSUFFIX _cmmu
2657#define GETPC() NULL
2658#define env cpu_single_env
b769d8fe 2659#define SOFTMMU_CODE_ACCESS
61382a50
FB
2660
2661#define SHIFT 0
2662#include "softmmu_template.h"
2663
2664#define SHIFT 1
2665#include "softmmu_template.h"
2666
2667#define SHIFT 2
2668#include "softmmu_template.h"
2669
2670#define SHIFT 3
2671#include "softmmu_template.h"
2672
2673#undef env
2674
2675#endif