]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
removed warning
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
53
54TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 56TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 57int nb_tbs;
eb51d102
FB
58/* any access to the tbs or the page table must use this lock */
59spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
60
61uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
62uint8_t *code_gen_ptr;
63
9fa3e853
FB
64int phys_ram_size;
65int phys_ram_fd;
66uint8_t *phys_ram_base;
1ccde1cb 67uint8_t *phys_ram_dirty;
9fa3e853 68
54936004 69typedef struct PageDesc {
92e873b9 70 /* list of TBs intersecting this ram page */
fd6ce8f6 71 TranslationBlock *first_tb;
9fa3e853
FB
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76#if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78#endif
54936004
FB
79} PageDesc;
80
92e873b9
FB
81typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset;
84} PhysPageDesc;
85
9fa3e853
FB
86typedef struct VirtPageDesc {
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr;
90 unsigned int valid_tag;
91#if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
94 unsigned int prot;
95#endif
96} VirtPageDesc;
97
54936004
FB
98#define L2_BITS 10
99#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100
101#define L1_SIZE (1 << L1_BITS)
102#define L2_SIZE (1 << L2_BITS)
103
33417e70 104static void io_mem_init(void);
fd6ce8f6 105
83fb7adf
FB
106unsigned long qemu_real_host_page_size;
107unsigned long qemu_host_page_bits;
108unsigned long qemu_host_page_size;
109unsigned long qemu_host_page_mask;
54936004 110
92e873b9 111/* XXX: for system emulation, it could just be an array */
54936004 112static PageDesc *l1_map[L1_SIZE];
92e873b9 113static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 114
9fa3e853
FB
115#if !defined(CONFIG_USER_ONLY)
116static VirtPageDesc *l1_virt_map[L1_SIZE];
117static unsigned int virt_valid_tag;
118#endif
119
33417e70 120/* io memory support */
33417e70
FB
121CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 123void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
124static int io_mem_nb;
125
34865134
FB
126/* log support */
127char *logfilename = "/tmp/qemu.log";
128FILE *logfile;
129int loglevel;
130
b346ff46 131static void page_init(void)
54936004 132{
83fb7adf 133 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 134 TARGET_PAGE_SIZE */
67b915a5 135#ifdef _WIN32
d5a8f07c
FB
136 {
137 SYSTEM_INFO system_info;
138 DWORD old_protect;
139
140 GetSystemInfo(&system_info);
141 qemu_real_host_page_size = system_info.dwPageSize;
142
143 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
144 PAGE_EXECUTE_READWRITE, &old_protect);
145 }
67b915a5 146#else
83fb7adf 147 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
148 {
149 unsigned long start, end;
150
151 start = (unsigned long)code_gen_buffer;
152 start &= ~(qemu_real_host_page_size - 1);
153
154 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
155 end += qemu_real_host_page_size - 1;
156 end &= ~(qemu_real_host_page_size - 1);
157
158 mprotect((void *)start, end - start,
159 PROT_READ | PROT_WRITE | PROT_EXEC);
160 }
67b915a5 161#endif
d5a8f07c 162
83fb7adf
FB
163 if (qemu_host_page_size == 0)
164 qemu_host_page_size = qemu_real_host_page_size;
165 if (qemu_host_page_size < TARGET_PAGE_SIZE)
166 qemu_host_page_size = TARGET_PAGE_SIZE;
167 qemu_host_page_bits = 0;
168 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
169 qemu_host_page_bits++;
170 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
171#if !defined(CONFIG_USER_ONLY)
172 virt_valid_tag = 1;
173#endif
54936004
FB
174}
175
fd6ce8f6 176static inline PageDesc *page_find_alloc(unsigned int index)
54936004 177{
54936004
FB
178 PageDesc **lp, *p;
179
54936004
FB
180 lp = &l1_map[index >> L2_BITS];
181 p = *lp;
182 if (!p) {
183 /* allocate if not found */
59817ccb 184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
186 *lp = p;
187 }
188 return p + (index & (L2_SIZE - 1));
189}
190
fd6ce8f6 191static inline PageDesc *page_find(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc *p;
194
54936004
FB
195 p = l1_map[index >> L2_BITS];
196 if (!p)
197 return 0;
fd6ce8f6
FB
198 return p + (index & (L2_SIZE - 1));
199}
200
92e873b9
FB
201static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
202{
203 PhysPageDesc **lp, *p;
204
205 lp = &l1_phys_map[index >> L2_BITS];
206 p = *lp;
207 if (!p) {
208 /* allocate if not found */
209 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
210 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
211 *lp = p;
212 }
213 return p + (index & (L2_SIZE - 1));
214}
215
216static inline PhysPageDesc *phys_page_find(unsigned int index)
217{
218 PhysPageDesc *p;
219
220 p = l1_phys_map[index >> L2_BITS];
221 if (!p)
222 return 0;
223 return p + (index & (L2_SIZE - 1));
224}
225
9fa3e853 226#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
227static void tlb_protect_code(CPUState *env, target_ulong addr);
228static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
229
230static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 231{
9fa3e853 232 VirtPageDesc **lp, *p;
fd6ce8f6 233
9fa3e853
FB
234 lp = &l1_virt_map[index >> L2_BITS];
235 p = *lp;
236 if (!p) {
237 /* allocate if not found */
59817ccb 238 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
239 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
240 *lp = p;
241 }
242 return p + (index & (L2_SIZE - 1));
243}
244
245static inline VirtPageDesc *virt_page_find(unsigned int index)
246{
247 VirtPageDesc *p;
248
249 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
250 if (!p)
251 return 0;
9fa3e853 252 return p + (index & (L2_SIZE - 1));
54936004
FB
253}
254
9fa3e853 255static void virt_page_flush(void)
54936004 256{
9fa3e853
FB
257 int i, j;
258 VirtPageDesc *p;
259
260 virt_valid_tag++;
261
262 if (virt_valid_tag == 0) {
263 virt_valid_tag = 1;
264 for(i = 0; i < L1_SIZE; i++) {
265 p = l1_virt_map[i];
266 if (p) {
267 for(j = 0; j < L2_SIZE; j++)
268 p[j].valid_tag = 0;
269 }
fd6ce8f6 270 }
54936004
FB
271 }
272}
9fa3e853
FB
273#else
274static void virt_page_flush(void)
275{
276}
277#endif
fd6ce8f6 278
b346ff46 279void cpu_exec_init(void)
fd6ce8f6
FB
280{
281 if (!code_gen_ptr) {
282 code_gen_ptr = code_gen_buffer;
b346ff46 283 page_init();
33417e70 284 io_mem_init();
fd6ce8f6
FB
285 }
286}
287
9fa3e853
FB
288static inline void invalidate_page_bitmap(PageDesc *p)
289{
290 if (p->code_bitmap) {
59817ccb 291 qemu_free(p->code_bitmap);
9fa3e853
FB
292 p->code_bitmap = NULL;
293 }
294 p->code_write_count = 0;
295}
296
fd6ce8f6
FB
297/* set to NULL all the 'first_tb' fields in all PageDescs */
298static void page_flush_tb(void)
299{
300 int i, j;
301 PageDesc *p;
302
303 for(i = 0; i < L1_SIZE; i++) {
304 p = l1_map[i];
305 if (p) {
9fa3e853
FB
306 for(j = 0; j < L2_SIZE; j++) {
307 p->first_tb = NULL;
308 invalidate_page_bitmap(p);
309 p++;
310 }
fd6ce8f6
FB
311 }
312 }
313}
314
315/* flush all the translation blocks */
d4e8164f 316/* XXX: tb_flush is currently not thread safe */
0124311e 317void tb_flush(CPUState *env)
fd6ce8f6 318{
0124311e 319#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
320 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
321 code_gen_ptr - code_gen_buffer,
322 nb_tbs,
0124311e 323 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
324#endif
325 nb_tbs = 0;
8a8a608f 326 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
327 virt_page_flush();
328
8a8a608f 329 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 330 page_flush_tb();
9fa3e853 331
fd6ce8f6 332 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
333 /* XXX: flush processor icache at this point if cache flush is
334 expensive */
fd6ce8f6
FB
335}
336
337#ifdef DEBUG_TB_CHECK
338
339static void tb_invalidate_check(unsigned long address)
340{
341 TranslationBlock *tb;
342 int i;
343 address &= TARGET_PAGE_MASK;
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
347 address >= tb->pc + tb->size)) {
348 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
349 address, tb->pc, tb->size);
350 }
351 }
352 }
353}
354
355/* verify that all the pages have correct rights for code */
356static void tb_page_check(void)
357{
358 TranslationBlock *tb;
359 int i, flags1, flags2;
360
361 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
362 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
363 flags1 = page_get_flags(tb->pc);
364 flags2 = page_get_flags(tb->pc + tb->size - 1);
365 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
366 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
367 tb->pc, tb->size, flags1, flags2);
368 }
369 }
370 }
371}
372
d4e8164f
FB
373void tb_jmp_check(TranslationBlock *tb)
374{
375 TranslationBlock *tb1;
376 unsigned int n1;
377
378 /* suppress any remaining jumps to this TB */
379 tb1 = tb->jmp_first;
380 for(;;) {
381 n1 = (long)tb1 & 3;
382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
383 if (n1 == 2)
384 break;
385 tb1 = tb1->jmp_next[n1];
386 }
387 /* check end of list */
388 if (tb1 != tb) {
389 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
390 }
391}
392
fd6ce8f6
FB
393#endif
394
395/* invalidate one TB */
396static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
397 int next_offset)
398{
399 TranslationBlock *tb1;
400 for(;;) {
401 tb1 = *ptb;
402 if (tb1 == tb) {
403 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
404 break;
405 }
406 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
407 }
408}
409
9fa3e853
FB
410static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
411{
412 TranslationBlock *tb1;
413 unsigned int n1;
414
415 for(;;) {
416 tb1 = *ptb;
417 n1 = (long)tb1 & 3;
418 tb1 = (TranslationBlock *)((long)tb1 & ~3);
419 if (tb1 == tb) {
420 *ptb = tb1->page_next[n1];
421 break;
422 }
423 ptb = &tb1->page_next[n1];
424 }
425}
426
d4e8164f
FB
427static inline void tb_jmp_remove(TranslationBlock *tb, int n)
428{
429 TranslationBlock *tb1, **ptb;
430 unsigned int n1;
431
432 ptb = &tb->jmp_next[n];
433 tb1 = *ptb;
434 if (tb1) {
435 /* find tb(n) in circular list */
436 for(;;) {
437 tb1 = *ptb;
438 n1 = (long)tb1 & 3;
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 if (n1 == n && tb1 == tb)
441 break;
442 if (n1 == 2) {
443 ptb = &tb1->jmp_first;
444 } else {
445 ptb = &tb1->jmp_next[n1];
446 }
447 }
448 /* now we can suppress tb(n) from the list */
449 *ptb = tb->jmp_next[n];
450
451 tb->jmp_next[n] = NULL;
452 }
453}
454
455/* reset the jump entry 'n' of a TB so that it is not chained to
456 another TB */
457static inline void tb_reset_jump(TranslationBlock *tb, int n)
458{
459 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
460}
461
9fa3e853 462static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 463{
d4e8164f 464 unsigned int h, n1;
9fa3e853 465 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 466
36bdbe54 467 tb_invalidated_flag = 1;
59817ccb 468
fd6ce8f6
FB
469 /* remove the TB from the hash list */
470 h = tb_hash_func(tb->pc);
9fa3e853
FB
471 ptb = &tb_hash[h];
472 for(;;) {
473 tb1 = *ptb;
474 /* NOTE: the TB is not necessarily linked in the hash. It
475 indicates that it is not currently used */
476 if (tb1 == NULL)
477 return;
478 if (tb1 == tb) {
479 *ptb = tb1->hash_next;
480 break;
481 }
482 ptb = &tb1->hash_next;
fd6ce8f6 483 }
d4e8164f
FB
484
485 /* suppress this TB from the two jump lists */
486 tb_jmp_remove(tb, 0);
487 tb_jmp_remove(tb, 1);
488
489 /* suppress any remaining jumps to this TB */
490 tb1 = tb->jmp_first;
491 for(;;) {
492 n1 = (long)tb1 & 3;
493 if (n1 == 2)
494 break;
495 tb1 = (TranslationBlock *)((long)tb1 & ~3);
496 tb2 = tb1->jmp_next[n1];
497 tb_reset_jump(tb1, n1);
498 tb1->jmp_next[n1] = NULL;
499 tb1 = tb2;
500 }
501 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
502}
503
9fa3e853 504static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 505{
fd6ce8f6 506 PageDesc *p;
9fa3e853
FB
507 unsigned int h;
508 target_ulong phys_pc;
509
510 /* remove the TB from the hash list */
511 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
512 h = tb_phys_hash_func(phys_pc);
513 tb_remove(&tb_phys_hash[h], tb,
514 offsetof(TranslationBlock, phys_hash_next));
515
516 /* remove the TB from the page list */
517 if (tb->page_addr[0] != page_addr) {
518 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
519 tb_page_remove(&p->first_tb, tb);
520 invalidate_page_bitmap(p);
521 }
522 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
523 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
524 tb_page_remove(&p->first_tb, tb);
525 invalidate_page_bitmap(p);
526 }
527
528 tb_invalidate(tb);
529}
530
531static inline void set_bits(uint8_t *tab, int start, int len)
532{
533 int end, mask, end1;
534
535 end = start + len;
536 tab += start >> 3;
537 mask = 0xff << (start & 7);
538 if ((start & ~7) == (end & ~7)) {
539 if (start < end) {
540 mask &= ~(0xff << (end & 7));
541 *tab |= mask;
542 }
543 } else {
544 *tab++ |= mask;
545 start = (start + 8) & ~7;
546 end1 = end & ~7;
547 while (start < end1) {
548 *tab++ = 0xff;
549 start += 8;
550 }
551 if (start < end) {
552 mask = ~(0xff << (end & 7));
553 *tab |= mask;
554 }
555 }
556}
557
558static void build_page_bitmap(PageDesc *p)
559{
560 int n, tb_start, tb_end;
561 TranslationBlock *tb;
562
59817ccb 563 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
564 if (!p->code_bitmap)
565 return;
566 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
567
568 tb = p->first_tb;
569 while (tb != NULL) {
570 n = (long)tb & 3;
571 tb = (TranslationBlock *)((long)tb & ~3);
572 /* NOTE: this is subtle as a TB may span two physical pages */
573 if (n == 0) {
574 /* NOTE: tb_end may be after the end of the page, but
575 it is not a problem */
576 tb_start = tb->pc & ~TARGET_PAGE_MASK;
577 tb_end = tb_start + tb->size;
578 if (tb_end > TARGET_PAGE_SIZE)
579 tb_end = TARGET_PAGE_SIZE;
580 } else {
581 tb_start = 0;
582 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583 }
584 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585 tb = tb->page_next[n];
586 }
587}
588
d720b93d
FB
589#ifdef TARGET_HAS_PRECISE_SMC
590
591static void tb_gen_code(CPUState *env,
592 target_ulong pc, target_ulong cs_base, int flags,
593 int cflags)
594{
595 TranslationBlock *tb;
596 uint8_t *tc_ptr;
597 target_ulong phys_pc, phys_page2, virt_page2;
598 int code_gen_size;
599
600 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
601 tb = tb_alloc((unsigned long)pc);
602 if (!tb) {
603 /* flush must be done */
604 tb_flush(env);
605 /* cannot fail at this point */
606 tb = tb_alloc((unsigned long)pc);
607 }
608 tc_ptr = code_gen_ptr;
609 tb->tc_ptr = tc_ptr;
610 tb->cs_base = cs_base;
611 tb->flags = flags;
612 tb->cflags = cflags;
613 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615
616 /* check next page if needed */
617 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
618 phys_page2 = -1;
619 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
620 phys_page2 = get_phys_addr_code(env, virt_page2);
621 }
622 tb_link_phys(tb, phys_pc, phys_page2);
623}
624#endif
625
9fa3e853
FB
626/* invalidate all TBs which intersect with the target physical page
627 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
628 the same physical page. 'is_cpu_write_access' should be true if called
629 from a real cpu write access: the virtual CPU will exit the current
630 TB if code is modified inside this TB. */
631void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
632 int is_cpu_write_access)
633{
634 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 635 CPUState *env = cpu_single_env;
9fa3e853 636 PageDesc *p;
ea1c1802 637 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 638 target_ulong tb_start, tb_end;
d720b93d 639 target_ulong current_pc, current_cs_base;
9fa3e853
FB
640
641 p = page_find(start >> TARGET_PAGE_BITS);
642 if (!p)
643 return;
644 if (!p->code_bitmap &&
d720b93d
FB
645 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646 is_cpu_write_access) {
9fa3e853
FB
647 /* build code bitmap */
648 build_page_bitmap(p);
649 }
650
651 /* we remove all the TBs in the range [start, end[ */
652 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
653 current_tb_not_found = is_cpu_write_access;
654 current_tb_modified = 0;
655 current_tb = NULL; /* avoid warning */
656 current_pc = 0; /* avoid warning */
657 current_cs_base = 0; /* avoid warning */
658 current_flags = 0; /* avoid warning */
9fa3e853
FB
659 tb = p->first_tb;
660 while (tb != NULL) {
661 n = (long)tb & 3;
662 tb = (TranslationBlock *)((long)tb & ~3);
663 tb_next = tb->page_next[n];
664 /* NOTE: this is subtle as a TB may span two physical pages */
665 if (n == 0) {
666 /* NOTE: tb_end may be after the end of the page, but
667 it is not a problem */
668 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669 tb_end = tb_start + tb->size;
670 } else {
671 tb_start = tb->page_addr[1];
672 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673 }
674 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
675#ifdef TARGET_HAS_PRECISE_SMC
676 if (current_tb_not_found) {
677 current_tb_not_found = 0;
678 current_tb = NULL;
679 if (env->mem_write_pc) {
680 /* now we have a real cpu fault */
681 current_tb = tb_find_pc(env->mem_write_pc);
682 }
683 }
684 if (current_tb == tb &&
685 !(current_tb->cflags & CF_SINGLE_INSN)) {
686 /* If we are modifying the current TB, we must stop
687 its execution. We could be more precise by checking
688 that the modification is after the current PC, but it
689 would require a specialized function to partially
690 restore the CPU state */
691
692 current_tb_modified = 1;
693 cpu_restore_state(current_tb, env,
694 env->mem_write_pc, NULL);
695#if defined(TARGET_I386)
696 current_flags = env->hflags;
697 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698 current_cs_base = (target_ulong)env->segs[R_CS].base;
699 current_pc = current_cs_base + env->eip;
700#else
701#error unsupported CPU
702#endif
703 }
704#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
705 saved_tb = env->current_tb;
706 env->current_tb = NULL;
9fa3e853 707 tb_phys_invalidate(tb, -1);
ea1c1802
FB
708 env->current_tb = saved_tb;
709 if (env->interrupt_request && env->current_tb)
710 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
711 }
712 tb = tb_next;
713 }
714#if !defined(CONFIG_USER_ONLY)
715 /* if no code remaining, no need to continue to use slow writes */
716 if (!p->first_tb) {
717 invalidate_page_bitmap(p);
d720b93d
FB
718 if (is_cpu_write_access) {
719 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
720 }
721 }
722#endif
723#ifdef TARGET_HAS_PRECISE_SMC
724 if (current_tb_modified) {
725 /* we generate a block containing just the instruction
726 modifying the memory. It will ensure that it cannot modify
727 itself */
ea1c1802 728 env->current_tb = NULL;
d720b93d
FB
729 tb_gen_code(env, current_pc, current_cs_base, current_flags,
730 CF_SINGLE_INSN);
731 cpu_resume_from_signal(env, NULL);
9fa3e853 732 }
fd6ce8f6 733#endif
9fa3e853 734}
fd6ce8f6 735
9fa3e853 736/* len must be <= 8 and start must be a multiple of len */
d720b93d 737static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
738{
739 PageDesc *p;
740 int offset, b;
59817ccb 741#if 0
a4193c8a
FB
742 if (1) {
743 if (loglevel) {
744 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
745 cpu_single_env->mem_write_vaddr, len,
746 cpu_single_env->eip,
747 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
748 }
59817ccb
FB
749 }
750#endif
9fa3e853
FB
751 p = page_find(start >> TARGET_PAGE_BITS);
752 if (!p)
753 return;
754 if (p->code_bitmap) {
755 offset = start & ~TARGET_PAGE_MASK;
756 b = p->code_bitmap[offset >> 3] >> (offset & 7);
757 if (b & ((1 << len) - 1))
758 goto do_invalidate;
759 } else {
760 do_invalidate:
d720b93d 761 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
762 }
763}
764
9fa3e853 765#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
766static void tb_invalidate_phys_page(target_ulong addr,
767 unsigned long pc, void *puc)
9fa3e853 768{
d720b93d
FB
769 int n, current_flags, current_tb_modified;
770 target_ulong current_pc, current_cs_base;
9fa3e853 771 PageDesc *p;
d720b93d
FB
772 TranslationBlock *tb, *current_tb;
773#ifdef TARGET_HAS_PRECISE_SMC
774 CPUState *env = cpu_single_env;
775#endif
9fa3e853
FB
776
777 addr &= TARGET_PAGE_MASK;
778 p = page_find(addr >> TARGET_PAGE_BITS);
779 if (!p)
780 return;
781 tb = p->first_tb;
d720b93d
FB
782 current_tb_modified = 0;
783 current_tb = NULL;
784 current_pc = 0; /* avoid warning */
785 current_cs_base = 0; /* avoid warning */
786 current_flags = 0; /* avoid warning */
787#ifdef TARGET_HAS_PRECISE_SMC
788 if (tb && pc != 0) {
789 current_tb = tb_find_pc(pc);
790 }
791#endif
9fa3e853
FB
792 while (tb != NULL) {
793 n = (long)tb & 3;
794 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
795#ifdef TARGET_HAS_PRECISE_SMC
796 if (current_tb == tb &&
797 !(current_tb->cflags & CF_SINGLE_INSN)) {
798 /* If we are modifying the current TB, we must stop
799 its execution. We could be more precise by checking
800 that the modification is after the current PC, but it
801 would require a specialized function to partially
802 restore the CPU state */
803
804 current_tb_modified = 1;
805 cpu_restore_state(current_tb, env, pc, puc);
806#if defined(TARGET_I386)
807 current_flags = env->hflags;
808 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
809 current_cs_base = (target_ulong)env->segs[R_CS].base;
810 current_pc = current_cs_base + env->eip;
811#else
812#error unsupported CPU
813#endif
814 }
815#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
816 tb_phys_invalidate(tb, addr);
817 tb = tb->page_next[n];
818 }
fd6ce8f6 819 p->first_tb = NULL;
d720b93d
FB
820#ifdef TARGET_HAS_PRECISE_SMC
821 if (current_tb_modified) {
822 /* we generate a block containing just the instruction
823 modifying the memory. It will ensure that it cannot modify
824 itself */
ea1c1802 825 env->current_tb = NULL;
d720b93d
FB
826 tb_gen_code(env, current_pc, current_cs_base, current_flags,
827 CF_SINGLE_INSN);
828 cpu_resume_from_signal(env, puc);
829 }
830#endif
fd6ce8f6 831}
9fa3e853 832#endif
fd6ce8f6
FB
833
834/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
835static inline void tb_alloc_page(TranslationBlock *tb,
836 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
837{
838 PageDesc *p;
9fa3e853
FB
839 TranslationBlock *last_first_tb;
840
841 tb->page_addr[n] = page_addr;
842 p = page_find(page_addr >> TARGET_PAGE_BITS);
843 tb->page_next[n] = p->first_tb;
844 last_first_tb = p->first_tb;
845 p->first_tb = (TranslationBlock *)((long)tb | n);
846 invalidate_page_bitmap(p);
fd6ce8f6 847
107db443 848#if defined(TARGET_HAS_SMC) || 1
d720b93d 849
9fa3e853 850#if defined(CONFIG_USER_ONLY)
fd6ce8f6 851 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
852 unsigned long host_start, host_end, addr;
853 int prot;
854
fd6ce8f6
FB
855 /* force the host page as non writable (writes will have a
856 page fault + mprotect overhead) */
83fb7adf
FB
857 host_start = page_addr & qemu_host_page_mask;
858 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
859 prot = 0;
860 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
861 prot |= page_get_flags(addr);
83fb7adf 862 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
863 (prot & PAGE_BITS) & ~PAGE_WRITE);
864#ifdef DEBUG_TB_INVALIDATE
865 printf("protecting code page: 0x%08lx\n",
866 host_start);
867#endif
868 p->flags &= ~PAGE_WRITE;
fd6ce8f6 869 }
9fa3e853
FB
870#else
871 /* if some code is already present, then the pages are already
872 protected. So we handle the case where only the first TB is
873 allocated in a physical page */
874 if (!last_first_tb) {
875 target_ulong virt_addr;
876
877 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
878 tlb_protect_code(cpu_single_env, virt_addr);
879 }
880#endif
d720b93d
FB
881
882#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
883}
884
885/* Allocate a new translation block. Flush the translation buffer if
886 too many translation blocks or too much generated code. */
d4e8164f 887TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
888{
889 TranslationBlock *tb;
fd6ce8f6
FB
890
891 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
892 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 893 return NULL;
fd6ce8f6
FB
894 tb = &tbs[nb_tbs++];
895 tb->pc = pc;
b448f2f3 896 tb->cflags = 0;
d4e8164f
FB
897 return tb;
898}
899
9fa3e853
FB
900/* add a new TB and link it to the physical page tables. phys_page2 is
901 (-1) to indicate that only one page contains the TB. */
902void tb_link_phys(TranslationBlock *tb,
903 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 904{
9fa3e853
FB
905 unsigned int h;
906 TranslationBlock **ptb;
907
908 /* add in the physical hash table */
909 h = tb_phys_hash_func(phys_pc);
910 ptb = &tb_phys_hash[h];
911 tb->phys_hash_next = *ptb;
912 *ptb = tb;
fd6ce8f6
FB
913
914 /* add in the page list */
9fa3e853
FB
915 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
916 if (phys_page2 != -1)
917 tb_alloc_page(tb, 1, phys_page2);
918 else
919 tb->page_addr[1] = -1;
61382a50
FB
920#ifdef DEBUG_TB_CHECK
921 tb_page_check();
922#endif
9fa3e853
FB
923}
924
925/* link the tb with the other TBs */
926void tb_link(TranslationBlock *tb)
927{
928#if !defined(CONFIG_USER_ONLY)
929 {
930 VirtPageDesc *vp;
931 target_ulong addr;
932
933 /* save the code memory mappings (needed to invalidate the code) */
934 addr = tb->pc & TARGET_PAGE_MASK;
935 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
936#ifdef DEBUG_TLB_CHECK
937 if (vp->valid_tag == virt_valid_tag &&
938 vp->phys_addr != tb->page_addr[0]) {
939 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
940 addr, tb->page_addr[0], vp->phys_addr);
941 }
942#endif
9fa3e853 943 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
944 if (vp->valid_tag != virt_valid_tag) {
945 vp->valid_tag = virt_valid_tag;
946#if !defined(CONFIG_SOFTMMU)
947 vp->prot = 0;
948#endif
949 }
9fa3e853
FB
950
951 if (tb->page_addr[1] != -1) {
952 addr += TARGET_PAGE_SIZE;
953 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
954#ifdef DEBUG_TLB_CHECK
955 if (vp->valid_tag == virt_valid_tag &&
956 vp->phys_addr != tb->page_addr[1]) {
957 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
958 addr, tb->page_addr[1], vp->phys_addr);
959 }
960#endif
9fa3e853 961 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
962 if (vp->valid_tag != virt_valid_tag) {
963 vp->valid_tag = virt_valid_tag;
964#if !defined(CONFIG_SOFTMMU)
965 vp->prot = 0;
966#endif
967 }
9fa3e853
FB
968 }
969 }
970#endif
971
d4e8164f
FB
972 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
973 tb->jmp_next[0] = NULL;
974 tb->jmp_next[1] = NULL;
b448f2f3
FB
975#ifdef USE_CODE_COPY
976 tb->cflags &= ~CF_FP_USED;
977 if (tb->cflags & CF_TB_FP_USED)
978 tb->cflags |= CF_FP_USED;
979#endif
d4e8164f
FB
980
981 /* init original jump addresses */
982 if (tb->tb_next_offset[0] != 0xffff)
983 tb_reset_jump(tb, 0);
984 if (tb->tb_next_offset[1] != 0xffff)
985 tb_reset_jump(tb, 1);
fd6ce8f6
FB
986}
987
9fa3e853
FB
988/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
989 tb[1].tc_ptr. Return NULL if not found */
990TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 991{
9fa3e853
FB
992 int m_min, m_max, m;
993 unsigned long v;
994 TranslationBlock *tb;
a513fe19
FB
995
996 if (nb_tbs <= 0)
997 return NULL;
998 if (tc_ptr < (unsigned long)code_gen_buffer ||
999 tc_ptr >= (unsigned long)code_gen_ptr)
1000 return NULL;
1001 /* binary search (cf Knuth) */
1002 m_min = 0;
1003 m_max = nb_tbs - 1;
1004 while (m_min <= m_max) {
1005 m = (m_min + m_max) >> 1;
1006 tb = &tbs[m];
1007 v = (unsigned long)tb->tc_ptr;
1008 if (v == tc_ptr)
1009 return tb;
1010 else if (tc_ptr < v) {
1011 m_max = m - 1;
1012 } else {
1013 m_min = m + 1;
1014 }
1015 }
1016 return &tbs[m_max];
1017}
7501267e 1018
ea041c0e
FB
1019static void tb_reset_jump_recursive(TranslationBlock *tb);
1020
1021static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1022{
1023 TranslationBlock *tb1, *tb_next, **ptb;
1024 unsigned int n1;
1025
1026 tb1 = tb->jmp_next[n];
1027 if (tb1 != NULL) {
1028 /* find head of list */
1029 for(;;) {
1030 n1 = (long)tb1 & 3;
1031 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1032 if (n1 == 2)
1033 break;
1034 tb1 = tb1->jmp_next[n1];
1035 }
1036 /* we are now sure now that tb jumps to tb1 */
1037 tb_next = tb1;
1038
1039 /* remove tb from the jmp_first list */
1040 ptb = &tb_next->jmp_first;
1041 for(;;) {
1042 tb1 = *ptb;
1043 n1 = (long)tb1 & 3;
1044 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1045 if (n1 == n && tb1 == tb)
1046 break;
1047 ptb = &tb1->jmp_next[n1];
1048 }
1049 *ptb = tb->jmp_next[n];
1050 tb->jmp_next[n] = NULL;
1051
1052 /* suppress the jump to next tb in generated code */
1053 tb_reset_jump(tb, n);
1054
0124311e 1055 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1056 tb_reset_jump_recursive(tb_next);
1057 }
1058}
1059
1060static void tb_reset_jump_recursive(TranslationBlock *tb)
1061{
1062 tb_reset_jump_recursive2(tb, 0);
1063 tb_reset_jump_recursive2(tb, 1);
1064}
1065
d720b93d
FB
1066static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1067{
1068 target_ulong phys_addr;
1069
1070 phys_addr = cpu_get_phys_page_debug(env, pc);
1071 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1072}
1073
c33a346e
FB
1074/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1075 breakpoint is reached */
2e12669a 1076int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1077{
e95c8d51 1078#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2 1079 int i;
d720b93d 1080
4c3a88a2
FB
1081 for(i = 0; i < env->nb_breakpoints; i++) {
1082 if (env->breakpoints[i] == pc)
1083 return 0;
1084 }
1085
1086 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1087 return -1;
1088 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1089
1090 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1091 return 0;
1092#else
1093 return -1;
1094#endif
1095}
1096
1097/* remove a breakpoint */
2e12669a 1098int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1099{
e95c8d51 1100#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
4c3a88a2
FB
1101 int i;
1102 for(i = 0; i < env->nb_breakpoints; i++) {
1103 if (env->breakpoints[i] == pc)
1104 goto found;
1105 }
1106 return -1;
1107 found:
1108 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1109 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1110 env->nb_breakpoints--;
d720b93d
FB
1111
1112 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1113 return 0;
1114#else
1115 return -1;
1116#endif
1117}
1118
c33a346e
FB
1119/* enable or disable single step mode. EXCP_DEBUG is returned by the
1120 CPU loop after each instruction */
1121void cpu_single_step(CPUState *env, int enabled)
1122{
e95c8d51 1123#if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
c33a346e
FB
1124 if (env->singlestep_enabled != enabled) {
1125 env->singlestep_enabled = enabled;
1126 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1127 /* XXX: only flush what is necessary */
0124311e 1128 tb_flush(env);
c33a346e
FB
1129 }
1130#endif
1131}
1132
34865134
FB
1133/* enable or disable low levels log */
1134void cpu_set_log(int log_flags)
1135{
1136 loglevel = log_flags;
1137 if (loglevel && !logfile) {
1138 logfile = fopen(logfilename, "w");
1139 if (!logfile) {
1140 perror(logfilename);
1141 _exit(1);
1142 }
9fa3e853
FB
1143#if !defined(CONFIG_SOFTMMU)
1144 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1145 {
1146 static uint8_t logfile_buf[4096];
1147 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1148 }
1149#else
34865134 1150 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1151#endif
34865134
FB
1152 }
1153}
1154
1155void cpu_set_log_filename(const char *filename)
1156{
1157 logfilename = strdup(filename);
1158}
c33a346e 1159
0124311e 1160/* mask must never be zero, except for A20 change call */
68a79315 1161void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1162{
1163 TranslationBlock *tb;
ee8b7021 1164 static int interrupt_lock;
59817ccb 1165
68a79315 1166 env->interrupt_request |= mask;
ea041c0e
FB
1167 /* if the cpu is currently executing code, we must unlink it and
1168 all the potentially executing TB */
1169 tb = env->current_tb;
ee8b7021
FB
1170 if (tb && !testandset(&interrupt_lock)) {
1171 env->current_tb = NULL;
ea041c0e 1172 tb_reset_jump_recursive(tb);
ee8b7021 1173 interrupt_lock = 0;
ea041c0e
FB
1174 }
1175}
1176
b54ad049
FB
1177void cpu_reset_interrupt(CPUState *env, int mask)
1178{
1179 env->interrupt_request &= ~mask;
1180}
1181
f193c797
FB
1182CPULogItem cpu_log_items[] = {
1183 { CPU_LOG_TB_OUT_ASM, "out_asm",
1184 "show generated host assembly code for each compiled TB" },
1185 { CPU_LOG_TB_IN_ASM, "in_asm",
1186 "show target assembly code for each compiled TB" },
1187 { CPU_LOG_TB_OP, "op",
1188 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1189#ifdef TARGET_I386
1190 { CPU_LOG_TB_OP_OPT, "op_opt",
1191 "show micro ops after optimization for each compiled TB" },
1192#endif
1193 { CPU_LOG_INT, "int",
1194 "show interrupts/exceptions in short format" },
1195 { CPU_LOG_EXEC, "exec",
1196 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1197 { CPU_LOG_TB_CPU, "cpu",
1198 "show CPU state before bloc translation" },
f193c797
FB
1199#ifdef TARGET_I386
1200 { CPU_LOG_PCALL, "pcall",
1201 "show protected mode far calls/returns/exceptions" },
1202#endif
8e3a9fd2 1203#ifdef DEBUG_IOPORT
fd872598
FB
1204 { CPU_LOG_IOPORT, "ioport",
1205 "show all i/o ports accesses" },
8e3a9fd2 1206#endif
f193c797
FB
1207 { 0, NULL, NULL },
1208};
1209
1210static int cmp1(const char *s1, int n, const char *s2)
1211{
1212 if (strlen(s2) != n)
1213 return 0;
1214 return memcmp(s1, s2, n) == 0;
1215}
1216
1217/* takes a comma separated list of log masks. Return 0 if error. */
1218int cpu_str_to_log_mask(const char *str)
1219{
1220 CPULogItem *item;
1221 int mask;
1222 const char *p, *p1;
1223
1224 p = str;
1225 mask = 0;
1226 for(;;) {
1227 p1 = strchr(p, ',');
1228 if (!p1)
1229 p1 = p + strlen(p);
8e3a9fd2
FB
1230 if(cmp1(p,p1-p,"all")) {
1231 for(item = cpu_log_items; item->mask != 0; item++) {
1232 mask |= item->mask;
1233 }
1234 } else {
f193c797
FB
1235 for(item = cpu_log_items; item->mask != 0; item++) {
1236 if (cmp1(p, p1 - p, item->name))
1237 goto found;
1238 }
1239 return 0;
8e3a9fd2 1240 }
f193c797
FB
1241 found:
1242 mask |= item->mask;
1243 if (*p1 != ',')
1244 break;
1245 p = p1 + 1;
1246 }
1247 return mask;
1248}
ea041c0e 1249
7501267e
FB
1250void cpu_abort(CPUState *env, const char *fmt, ...)
1251{
1252 va_list ap;
1253
1254 va_start(ap, fmt);
1255 fprintf(stderr, "qemu: fatal: ");
1256 vfprintf(stderr, fmt, ap);
1257 fprintf(stderr, "\n");
1258#ifdef TARGET_I386
7fe48483
FB
1259 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1260#else
1261 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1262#endif
1263 va_end(ap);
1264 abort();
1265}
1266
0124311e
FB
1267#if !defined(CONFIG_USER_ONLY)
1268
ee8b7021
FB
1269/* NOTE: if flush_global is true, also flush global entries (not
1270 implemented yet) */
1271void tlb_flush(CPUState *env, int flush_global)
33417e70 1272{
33417e70 1273 int i;
0124311e 1274
9fa3e853
FB
1275#if defined(DEBUG_TLB)
1276 printf("tlb_flush:\n");
1277#endif
0124311e
FB
1278 /* must reset current TB so that interrupts cannot modify the
1279 links while we are modifying them */
1280 env->current_tb = NULL;
1281
33417e70
FB
1282 for(i = 0; i < CPU_TLB_SIZE; i++) {
1283 env->tlb_read[0][i].address = -1;
1284 env->tlb_write[0][i].address = -1;
1285 env->tlb_read[1][i].address = -1;
1286 env->tlb_write[1][i].address = -1;
1287 }
9fa3e853
FB
1288
1289 virt_page_flush();
8a8a608f 1290 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1291
1292#if !defined(CONFIG_SOFTMMU)
1293 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1294#endif
33417e70
FB
1295}
1296
274da6b2 1297static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1298{
1299 if (addr == (tlb_entry->address &
1300 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1301 tlb_entry->address = -1;
1302}
1303
2e12669a 1304void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1305{
9fa3e853
FB
1306 int i, n;
1307 VirtPageDesc *vp;
1308 PageDesc *p;
1309 TranslationBlock *tb;
0124311e 1310
9fa3e853
FB
1311#if defined(DEBUG_TLB)
1312 printf("tlb_flush_page: 0x%08x\n", addr);
1313#endif
0124311e
FB
1314 /* must reset current TB so that interrupts cannot modify the
1315 links while we are modifying them */
1316 env->current_tb = NULL;
61382a50
FB
1317
1318 addr &= TARGET_PAGE_MASK;
1319 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1320 tlb_flush_entry(&env->tlb_read[0][i], addr);
1321 tlb_flush_entry(&env->tlb_write[0][i], addr);
1322 tlb_flush_entry(&env->tlb_read[1][i], addr);
1323 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1324
9fa3e853
FB
1325 /* remove from the virtual pc hash table all the TB at this
1326 virtual address */
1327
1328 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1329 if (vp && vp->valid_tag == virt_valid_tag) {
1330 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1331 if (p) {
1332 /* we remove all the links to the TBs in this virtual page */
1333 tb = p->first_tb;
1334 while (tb != NULL) {
1335 n = (long)tb & 3;
1336 tb = (TranslationBlock *)((long)tb & ~3);
1337 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1338 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1339 tb_invalidate(tb);
1340 }
1341 tb = tb->page_next[n];
1342 }
1343 }
98857888 1344 vp->valid_tag = 0;
9fa3e853
FB
1345 }
1346
0124311e 1347#if !defined(CONFIG_SOFTMMU)
9fa3e853 1348 if (addr < MMAP_AREA_END)
0124311e 1349 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1350#endif
9fa3e853
FB
1351}
1352
4f2ac237 1353static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1354{
1355 if (addr == (tlb_entry->address &
1356 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1357 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1358 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1359 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1360 }
1361}
1362
1363/* update the TLBs so that writes to code in the virtual page 'addr'
1364 can be detected */
4f2ac237 1365static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1366{
1367 int i;
1368
1369 addr &= TARGET_PAGE_MASK;
1370 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1371 tlb_protect_code1(&env->tlb_write[0][i], addr);
1372 tlb_protect_code1(&env->tlb_write[1][i], addr);
1373#if !defined(CONFIG_SOFTMMU)
1374 /* NOTE: as we generated the code for this page, it is already at
1375 least readable */
1376 if (addr < MMAP_AREA_END)
1377 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1378#endif
1379}
1380
9fa3e853 1381static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1382 unsigned long phys_addr)
9fa3e853
FB
1383{
1384 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1385 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1386 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1387 }
1388}
1389
1390/* update the TLB so that writes in physical page 'phys_addr' are no longer
1391 tested self modifying code */
4f2ac237 1392static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1393{
1394 int i;
1395
1396 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1397 phys_addr += (long)phys_ram_base;
1398 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1399 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1400 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1401}
1402
1403static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1404 unsigned long start, unsigned long length)
1405{
1406 unsigned long addr;
1407 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1408 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1409 if ((addr - start) < length) {
1410 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1411 }
1412 }
1413}
1414
1415void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1416{
1417 CPUState *env;
4f2ac237 1418 unsigned long length, start1;
1ccde1cb
FB
1419 int i;
1420
1421 start &= TARGET_PAGE_MASK;
1422 end = TARGET_PAGE_ALIGN(end);
1423
1424 length = end - start;
1425 if (length == 0)
1426 return;
1427 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1428
1429 env = cpu_single_env;
1430 /* we modify the TLB cache so that the dirty bit will be set again
1431 when accessing the range */
59817ccb 1432 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1433 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1434 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1435 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1436 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1437
1438#if !defined(CONFIG_SOFTMMU)
1439 /* XXX: this is expensive */
1440 {
1441 VirtPageDesc *p;
1442 int j;
1443 target_ulong addr;
1444
1445 for(i = 0; i < L1_SIZE; i++) {
1446 p = l1_virt_map[i];
1447 if (p) {
1448 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1449 for(j = 0; j < L2_SIZE; j++) {
1450 if (p->valid_tag == virt_valid_tag &&
1451 p->phys_addr >= start && p->phys_addr < end &&
1452 (p->prot & PROT_WRITE)) {
1453 if (addr < MMAP_AREA_END) {
1454 mprotect((void *)addr, TARGET_PAGE_SIZE,
1455 p->prot & ~PROT_WRITE);
1456 }
1457 }
1458 addr += TARGET_PAGE_SIZE;
1459 p++;
1460 }
1461 }
1462 }
1463 }
1464#endif
1ccde1cb
FB
1465}
1466
1467static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1468 unsigned long start)
1469{
1470 unsigned long addr;
1471 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1472 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1473 if (addr == start) {
1474 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1475 }
1476 }
1477}
1478
1479/* update the TLB corresponding to virtual page vaddr and phys addr
1480 addr so that it is no longer dirty */
1481static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1482{
1483 CPUState *env = cpu_single_env;
1484 int i;
1485
1486 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1487
1488 addr &= TARGET_PAGE_MASK;
1489 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1490 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1491 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1492}
1493
59817ccb
FB
1494/* add a new TLB entry. At most one entry for a given virtual address
1495 is permitted. Return 0 if OK or 2 if the page could not be mapped
1496 (can only happen in non SOFTMMU mode for I/O pages or pages
1497 conflicting with the host address space). */
2e12669a
FB
1498int tlb_set_page(CPUState *env, target_ulong vaddr,
1499 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1500 int is_user, int is_softmmu)
1501{
92e873b9 1502 PhysPageDesc *p;
4f2ac237 1503 unsigned long pd;
9fa3e853
FB
1504 TranslationBlock *first_tb;
1505 unsigned int index;
4f2ac237
FB
1506 target_ulong address;
1507 unsigned long addend;
9fa3e853
FB
1508 int ret;
1509
92e873b9
FB
1510 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1511 first_tb = NULL;
9fa3e853
FB
1512 if (!p) {
1513 pd = IO_MEM_UNASSIGNED;
9fa3e853 1514 } else {
92e873b9 1515 PageDesc *p1;
9fa3e853 1516 pd = p->phys_offset;
92e873b9
FB
1517 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1518 /* NOTE: we also allocate the page at this stage */
1519 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1520 first_tb = p1->first_tb;
1521 }
9fa3e853
FB
1522 }
1523#if defined(DEBUG_TLB)
1524 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1525 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1526#endif
1527
1528 ret = 0;
1529#if !defined(CONFIG_SOFTMMU)
1530 if (is_softmmu)
1531#endif
1532 {
1533 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1534 /* IO memory case */
1535 address = vaddr | pd;
1536 addend = paddr;
1537 } else {
1538 /* standard memory */
1539 address = vaddr;
1540 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1541 }
1542
1543 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1544 addend -= vaddr;
67b915a5 1545 if (prot & PAGE_READ) {
9fa3e853
FB
1546 env->tlb_read[is_user][index].address = address;
1547 env->tlb_read[is_user][index].addend = addend;
1548 } else {
1549 env->tlb_read[is_user][index].address = -1;
1550 env->tlb_read[is_user][index].addend = -1;
1551 }
67b915a5 1552 if (prot & PAGE_WRITE) {
9fa3e853
FB
1553 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1554 /* ROM: access is ignored (same as unassigned) */
1555 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1556 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1557 } else
1558 /* XXX: the PowerPC code seems not ready to handle
1559 self modifying code with DCBI */
1560#if defined(TARGET_HAS_SMC) || 1
1561 if (first_tb) {
9fa3e853
FB
1562 /* if code is present, we use a specific memory
1563 handler. It works only for physical memory access */
1564 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1565 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1566 } else
1567#endif
1568 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1569 !cpu_physical_memory_is_dirty(pd)) {
1570 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1571 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1572 } else {
1573 env->tlb_write[is_user][index].address = address;
1574 env->tlb_write[is_user][index].addend = addend;
1575 }
1576 } else {
1577 env->tlb_write[is_user][index].address = -1;
1578 env->tlb_write[is_user][index].addend = -1;
1579 }
1580 }
1581#if !defined(CONFIG_SOFTMMU)
1582 else {
1583 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1584 /* IO access: no mapping is done as it will be handled by the
1585 soft MMU */
1586 if (!(env->hflags & HF_SOFTMMU_MASK))
1587 ret = 2;
1588 } else {
1589 void *map_addr;
59817ccb
FB
1590
1591 if (vaddr >= MMAP_AREA_END) {
1592 ret = 2;
1593 } else {
1594 if (prot & PROT_WRITE) {
1595 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1596#if defined(TARGET_HAS_SMC) || 1
59817ccb 1597 first_tb ||
d720b93d 1598#endif
59817ccb
FB
1599 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1600 !cpu_physical_memory_is_dirty(pd))) {
1601 /* ROM: we do as if code was inside */
1602 /* if code is present, we only map as read only and save the
1603 original mapping */
1604 VirtPageDesc *vp;
1605
1606 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1607 vp->phys_addr = pd;
1608 vp->prot = prot;
1609 vp->valid_tag = virt_valid_tag;
1610 prot &= ~PAGE_WRITE;
1611 }
1612 }
1613 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1614 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1615 if (map_addr == MAP_FAILED) {
1616 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1617 paddr, vaddr);
9fa3e853 1618 }
9fa3e853
FB
1619 }
1620 }
1621 }
1622#endif
1623 return ret;
1624}
1625
1626/* called from signal handler: invalidate the code and unprotect the
1627 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1628int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1629{
1630#if !defined(CONFIG_SOFTMMU)
1631 VirtPageDesc *vp;
1632
1633#if defined(DEBUG_TLB)
1634 printf("page_unprotect: addr=0x%08x\n", addr);
1635#endif
1636 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1637
1638 /* if it is not mapped, no need to worry here */
1639 if (addr >= MMAP_AREA_END)
1640 return 0;
9fa3e853
FB
1641 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1642 if (!vp)
1643 return 0;
1644 /* NOTE: in this case, validate_tag is _not_ tested as it
1645 validates only the code TLB */
1646 if (vp->valid_tag != virt_valid_tag)
1647 return 0;
1648 if (!(vp->prot & PAGE_WRITE))
1649 return 0;
1650#if defined(DEBUG_TLB)
1651 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1652 addr, vp->phys_addr, vp->prot);
1653#endif
59817ccb
FB
1654 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1655 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1656 (unsigned long)addr, vp->prot);
d720b93d
FB
1657 /* set the dirty bit */
1658 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1659 /* flush the code inside */
1660 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1661 return 1;
1662#else
1663 return 0;
1664#endif
33417e70
FB
1665}
1666
0124311e
FB
1667#else
1668
ee8b7021 1669void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1670{
1671}
1672
2e12669a 1673void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1674{
1675}
1676
2e12669a
FB
1677int tlb_set_page(CPUState *env, target_ulong vaddr,
1678 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1679 int is_user, int is_softmmu)
1680{
1681 return 0;
1682}
0124311e 1683
9fa3e853
FB
1684/* dump memory mappings */
1685void page_dump(FILE *f)
33417e70 1686{
9fa3e853
FB
1687 unsigned long start, end;
1688 int i, j, prot, prot1;
1689 PageDesc *p;
33417e70 1690
9fa3e853
FB
1691 fprintf(f, "%-8s %-8s %-8s %s\n",
1692 "start", "end", "size", "prot");
1693 start = -1;
1694 end = -1;
1695 prot = 0;
1696 for(i = 0; i <= L1_SIZE; i++) {
1697 if (i < L1_SIZE)
1698 p = l1_map[i];
1699 else
1700 p = NULL;
1701 for(j = 0;j < L2_SIZE; j++) {
1702 if (!p)
1703 prot1 = 0;
1704 else
1705 prot1 = p[j].flags;
1706 if (prot1 != prot) {
1707 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1708 if (start != -1) {
1709 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1710 start, end, end - start,
1711 prot & PAGE_READ ? 'r' : '-',
1712 prot & PAGE_WRITE ? 'w' : '-',
1713 prot & PAGE_EXEC ? 'x' : '-');
1714 }
1715 if (prot1 != 0)
1716 start = end;
1717 else
1718 start = -1;
1719 prot = prot1;
1720 }
1721 if (!p)
1722 break;
1723 }
33417e70 1724 }
33417e70
FB
1725}
1726
9fa3e853 1727int page_get_flags(unsigned long address)
33417e70 1728{
9fa3e853
FB
1729 PageDesc *p;
1730
1731 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1732 if (!p)
9fa3e853
FB
1733 return 0;
1734 return p->flags;
1735}
1736
1737/* modify the flags of a page and invalidate the code if
1738 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1739 depending on PAGE_WRITE */
1740void page_set_flags(unsigned long start, unsigned long end, int flags)
1741{
1742 PageDesc *p;
1743 unsigned long addr;
1744
1745 start = start & TARGET_PAGE_MASK;
1746 end = TARGET_PAGE_ALIGN(end);
1747 if (flags & PAGE_WRITE)
1748 flags |= PAGE_WRITE_ORG;
1749 spin_lock(&tb_lock);
1750 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1751 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1752 /* if the write protection is set, then we invalidate the code
1753 inside */
1754 if (!(p->flags & PAGE_WRITE) &&
1755 (flags & PAGE_WRITE) &&
1756 p->first_tb) {
d720b93d 1757 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1758 }
1759 p->flags = flags;
1760 }
1761 spin_unlock(&tb_lock);
33417e70
FB
1762}
1763
9fa3e853
FB
1764/* called from signal handler: invalidate the code and unprotect the
1765 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1766int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1767{
1768 unsigned int page_index, prot, pindex;
1769 PageDesc *p, *p1;
1770 unsigned long host_start, host_end, addr;
1771
83fb7adf 1772 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1773 page_index = host_start >> TARGET_PAGE_BITS;
1774 p1 = page_find(page_index);
1775 if (!p1)
1776 return 0;
83fb7adf 1777 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1778 p = p1;
1779 prot = 0;
1780 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1781 prot |= p->flags;
1782 p++;
1783 }
1784 /* if the page was really writable, then we change its
1785 protection back to writable */
1786 if (prot & PAGE_WRITE_ORG) {
1787 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1788 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1789 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1790 (prot & PAGE_BITS) | PAGE_WRITE);
1791 p1[pindex].flags |= PAGE_WRITE;
1792 /* and since the content will be modified, we must invalidate
1793 the corresponding translated code. */
d720b93d 1794 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1795#ifdef DEBUG_TB_CHECK
1796 tb_invalidate_check(address);
1797#endif
1798 return 1;
1799 }
1800 }
1801 return 0;
1802}
1803
1804/* call this function when system calls directly modify a memory area */
1805void page_unprotect_range(uint8_t *data, unsigned long data_size)
1806{
1807 unsigned long start, end, addr;
1808
1809 start = (unsigned long)data;
1810 end = start + data_size;
1811 start &= TARGET_PAGE_MASK;
1812 end = TARGET_PAGE_ALIGN(end);
1813 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1814 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1815 }
1816}
1817
1ccde1cb
FB
1818static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1819{
1820}
9fa3e853
FB
1821#endif /* defined(CONFIG_USER_ONLY) */
1822
33417e70
FB
1823/* register physical memory. 'size' must be a multiple of the target
1824 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1825 io memory page */
2e12669a
FB
1826void cpu_register_physical_memory(target_phys_addr_t start_addr,
1827 unsigned long size,
1828 unsigned long phys_offset)
33417e70
FB
1829{
1830 unsigned long addr, end_addr;
92e873b9 1831 PhysPageDesc *p;
33417e70 1832
5fd386f6 1833 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1834 end_addr = start_addr + size;
5fd386f6 1835 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1836 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1837 p->phys_offset = phys_offset;
1838 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1839 phys_offset += TARGET_PAGE_SIZE;
1840 }
1841}
1842
a4193c8a 1843static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1844{
1845 return 0;
1846}
1847
a4193c8a 1848static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1849{
1850}
1851
1852static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1853 unassigned_mem_readb,
1854 unassigned_mem_readb,
1855 unassigned_mem_readb,
1856};
1857
1858static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1859 unassigned_mem_writeb,
1860 unassigned_mem_writeb,
1861 unassigned_mem_writeb,
1862};
1863
9fa3e853
FB
1864/* self modifying code support in soft mmu mode : writing to a page
1865 containing code comes to these functions */
1866
a4193c8a 1867static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1868{
1ccde1cb
FB
1869 unsigned long phys_addr;
1870
274da6b2 1871 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1872#if !defined(CONFIG_USER_ONLY)
d720b93d 1873 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1874#endif
1ccde1cb
FB
1875 stb_raw((uint8_t *)addr, val);
1876 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1877}
1878
a4193c8a 1879static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1880{
1ccde1cb
FB
1881 unsigned long phys_addr;
1882
274da6b2 1883 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1884#if !defined(CONFIG_USER_ONLY)
d720b93d 1885 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1886#endif
1ccde1cb
FB
1887 stw_raw((uint8_t *)addr, val);
1888 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1889}
1890
a4193c8a 1891static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1892{
1ccde1cb
FB
1893 unsigned long phys_addr;
1894
274da6b2 1895 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1896#if !defined(CONFIG_USER_ONLY)
d720b93d 1897 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1898#endif
1ccde1cb
FB
1899 stl_raw((uint8_t *)addr, val);
1900 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1901}
1902
1903static CPUReadMemoryFunc *code_mem_read[3] = {
1904 NULL, /* never used */
1905 NULL, /* never used */
1906 NULL, /* never used */
1907};
1908
1909static CPUWriteMemoryFunc *code_mem_write[3] = {
1910 code_mem_writeb,
1911 code_mem_writew,
1912 code_mem_writel,
1913};
33417e70 1914
a4193c8a 1915static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1916{
1917 stb_raw((uint8_t *)addr, val);
d720b93d 1918 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1919}
1920
a4193c8a 1921static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1922{
1923 stw_raw((uint8_t *)addr, val);
d720b93d 1924 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1925}
1926
a4193c8a 1927static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1928{
1929 stl_raw((uint8_t *)addr, val);
d720b93d 1930 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1931}
1932
1933static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1934 notdirty_mem_writeb,
1935 notdirty_mem_writew,
1936 notdirty_mem_writel,
1937};
1938
33417e70
FB
1939static void io_mem_init(void)
1940{
a4193c8a
FB
1941 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1942 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1943 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1944 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1945 io_mem_nb = 5;
1946
1947 /* alloc dirty bits array */
59817ccb 1948 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1949}
1950
1951/* mem_read and mem_write are arrays of functions containing the
1952 function to access byte (index 0), word (index 1) and dword (index
1953 2). All functions must be supplied. If io_index is non zero, the
1954 corresponding io zone is modified. If it is zero, a new io zone is
1955 allocated. The return value can be used with
1956 cpu_register_physical_memory(). (-1) is returned if error. */
1957int cpu_register_io_memory(int io_index,
1958 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1959 CPUWriteMemoryFunc **mem_write,
1960 void *opaque)
33417e70
FB
1961{
1962 int i;
1963
1964 if (io_index <= 0) {
1965 if (io_index >= IO_MEM_NB_ENTRIES)
1966 return -1;
1967 io_index = io_mem_nb++;
1968 } else {
1969 if (io_index >= IO_MEM_NB_ENTRIES)
1970 return -1;
1971 }
1972
1973 for(i = 0;i < 3; i++) {
1974 io_mem_read[io_index][i] = mem_read[i];
1975 io_mem_write[io_index][i] = mem_write[i];
1976 }
a4193c8a 1977 io_mem_opaque[io_index] = opaque;
33417e70
FB
1978 return io_index << IO_MEM_SHIFT;
1979}
61382a50 1980
8926b517
FB
1981CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1982{
1983 return io_mem_write[io_index >> IO_MEM_SHIFT];
1984}
1985
1986CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1987{
1988 return io_mem_read[io_index >> IO_MEM_SHIFT];
1989}
1990
13eb76e0
FB
1991/* physical memory access (slow version, mainly for debug) */
1992#if defined(CONFIG_USER_ONLY)
2e12669a 1993void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1994 int len, int is_write)
1995{
1996 int l, flags;
1997 target_ulong page;
1998
1999 while (len > 0) {
2000 page = addr & TARGET_PAGE_MASK;
2001 l = (page + TARGET_PAGE_SIZE) - addr;
2002 if (l > len)
2003 l = len;
2004 flags = page_get_flags(page);
2005 if (!(flags & PAGE_VALID))
2006 return;
2007 if (is_write) {
2008 if (!(flags & PAGE_WRITE))
2009 return;
2010 memcpy((uint8_t *)addr, buf, len);
2011 } else {
2012 if (!(flags & PAGE_READ))
2013 return;
2014 memcpy(buf, (uint8_t *)addr, len);
2015 }
2016 len -= l;
2017 buf += l;
2018 addr += l;
2019 }
2020}
2021#else
2e12669a 2022void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2023 int len, int is_write)
2024{
2025 int l, io_index;
2026 uint8_t *ptr;
2027 uint32_t val;
2e12669a
FB
2028 target_phys_addr_t page;
2029 unsigned long pd;
92e873b9 2030 PhysPageDesc *p;
13eb76e0
FB
2031
2032 while (len > 0) {
2033 page = addr & TARGET_PAGE_MASK;
2034 l = (page + TARGET_PAGE_SIZE) - addr;
2035 if (l > len)
2036 l = len;
92e873b9 2037 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2038 if (!p) {
2039 pd = IO_MEM_UNASSIGNED;
2040 } else {
2041 pd = p->phys_offset;
2042 }
2043
2044 if (is_write) {
2045 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2046 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2047 if (l >= 4 && ((addr & 3) == 0)) {
2048 /* 32 bit read access */
2049 val = ldl_raw(buf);
a4193c8a 2050 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2051 l = 4;
2052 } else if (l >= 2 && ((addr & 1) == 0)) {
2053 /* 16 bit read access */
2054 val = lduw_raw(buf);
a4193c8a 2055 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2056 l = 2;
2057 } else {
2058 /* 8 bit access */
2059 val = ldub_raw(buf);
a4193c8a 2060 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2061 l = 1;
2062 }
2063 } else {
b448f2f3
FB
2064 unsigned long addr1;
2065 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2066 /* RAM case */
b448f2f3 2067 ptr = phys_ram_base + addr1;
13eb76e0 2068 memcpy(ptr, buf, l);
b448f2f3
FB
2069 /* invalidate code */
2070 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2071 /* set dirty bit */
2072 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2073 }
2074 } else {
2075 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2076 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2077 /* I/O case */
2078 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2079 if (l >= 4 && ((addr & 3) == 0)) {
2080 /* 32 bit read access */
a4193c8a 2081 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
13eb76e0
FB
2082 stl_raw(buf, val);
2083 l = 4;
2084 } else if (l >= 2 && ((addr & 1) == 0)) {
2085 /* 16 bit read access */
a4193c8a 2086 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
13eb76e0
FB
2087 stw_raw(buf, val);
2088 l = 2;
2089 } else {
2090 /* 8 bit access */
a4193c8a 2091 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
13eb76e0
FB
2092 stb_raw(buf, val);
2093 l = 1;
2094 }
2095 } else {
2096 /* RAM case */
2097 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2098 (addr & ~TARGET_PAGE_MASK);
2099 memcpy(buf, ptr, l);
2100 }
2101 }
2102 len -= l;
2103 buf += l;
2104 addr += l;
2105 }
2106}
2107#endif
2108
2109/* virtual memory access for debug */
b448f2f3
FB
2110int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2111 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2112{
2113 int l;
2114 target_ulong page, phys_addr;
2115
2116 while (len > 0) {
2117 page = addr & TARGET_PAGE_MASK;
2118 phys_addr = cpu_get_phys_page_debug(env, page);
2119 /* if no physical page mapped, return an error */
2120 if (phys_addr == -1)
2121 return -1;
2122 l = (page + TARGET_PAGE_SIZE) - addr;
2123 if (l > len)
2124 l = len;
b448f2f3
FB
2125 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2126 buf, l, is_write);
13eb76e0
FB
2127 len -= l;
2128 buf += l;
2129 addr += l;
2130 }
2131 return 0;
2132}
2133
61382a50
FB
2134#if !defined(CONFIG_USER_ONLY)
2135
2136#define MMUSUFFIX _cmmu
2137#define GETPC() NULL
2138#define env cpu_single_env
b769d8fe 2139#define SOFTMMU_CODE_ACCESS
61382a50
FB
2140
2141#define SHIFT 0
2142#include "softmmu_template.h"
2143
2144#define SHIFT 1
2145#include "softmmu_template.h"
2146
2147#define SHIFT 2
2148#include "softmmu_template.h"
2149
2150#define SHIFT 3
2151#include "softmmu_template.h"
2152
2153#undef env
2154
2155#endif