]> git.proxmox.com Git - qemu.git/blame - exec.c
disable buggy tb_invalidate_page_range()
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
54936004
FB
21#include <stdlib.h>
22#include <stdio.h>
23#include <stdarg.h>
24#include <string.h>
25#include <errno.h>
26#include <unistd.h>
27#include <inttypes.h>
67b915a5 28#if !defined(CONFIG_SOFTMMU)
fd6ce8f6 29#include <sys/mman.h>
67b915a5 30#endif
54936004 31
6180a181
FB
32#include "cpu.h"
33#include "exec-all.h"
54936004 34
fd6ce8f6 35//#define DEBUG_TB_INVALIDATE
66e85a21 36//#define DEBUG_FLUSH
9fa3e853 37//#define DEBUG_TLB
fd6ce8f6
FB
38
39/* make various TB consistency checks */
40//#define DEBUG_TB_CHECK
98857888 41//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
42
43/* threshold to flush the translated code buffer */
44#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45
9fa3e853
FB
46#define SMC_BITMAP_USE_THRESHOLD 10
47
48#define MMAP_AREA_START 0x00000000
49#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
50
51TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 53TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 54int nb_tbs;
eb51d102
FB
55/* any access to the tbs or the page table must use this lock */
56spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
57
58uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59uint8_t *code_gen_ptr;
60
9fa3e853
FB
61int phys_ram_size;
62int phys_ram_fd;
63uint8_t *phys_ram_base;
1ccde1cb 64uint8_t *phys_ram_dirty;
9fa3e853 65
54936004 66typedef struct PageDesc {
92e873b9 67 /* list of TBs intersecting this ram page */
fd6ce8f6 68 TranslationBlock *first_tb;
9fa3e853
FB
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73#if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75#endif
54936004
FB
76} PageDesc;
77
92e873b9
FB
78typedef struct PhysPageDesc {
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset;
81} PhysPageDesc;
82
9fa3e853
FB
83typedef struct VirtPageDesc {
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr;
87 unsigned int valid_tag;
88#if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 unsigned int prot;
92#endif
93} VirtPageDesc;
94
54936004
FB
95#define L2_BITS 10
96#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97
98#define L1_SIZE (1 << L1_BITS)
99#define L2_SIZE (1 << L2_BITS)
100
33417e70 101static void io_mem_init(void);
fd6ce8f6 102
54936004
FB
103unsigned long real_host_page_size;
104unsigned long host_page_bits;
105unsigned long host_page_size;
106unsigned long host_page_mask;
107
92e873b9 108/* XXX: for system emulation, it could just be an array */
54936004 109static PageDesc *l1_map[L1_SIZE];
92e873b9 110static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 111
9fa3e853
FB
112#if !defined(CONFIG_USER_ONLY)
113static VirtPageDesc *l1_virt_map[L1_SIZE];
114static unsigned int virt_valid_tag;
115#endif
116
33417e70 117/* io memory support */
33417e70
FB
118CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 120void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
121static int io_mem_nb;
122
34865134
FB
123/* log support */
124char *logfilename = "/tmp/qemu.log";
125FILE *logfile;
126int loglevel;
127
b346ff46 128static void page_init(void)
54936004
FB
129{
130 /* NOTE: we can always suppose that host_page_size >=
131 TARGET_PAGE_SIZE */
67b915a5
FB
132#ifdef _WIN32
133 real_host_page_size = 4096;
134#else
54936004 135 real_host_page_size = getpagesize();
67b915a5 136#endif
54936004
FB
137 if (host_page_size == 0)
138 host_page_size = real_host_page_size;
139 if (host_page_size < TARGET_PAGE_SIZE)
140 host_page_size = TARGET_PAGE_SIZE;
141 host_page_bits = 0;
142 while ((1 << host_page_bits) < host_page_size)
143 host_page_bits++;
144 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
145#if !defined(CONFIG_USER_ONLY)
146 virt_valid_tag = 1;
147#endif
54936004
FB
148}
149
fd6ce8f6 150static inline PageDesc *page_find_alloc(unsigned int index)
54936004 151{
54936004
FB
152 PageDesc **lp, *p;
153
54936004
FB
154 lp = &l1_map[index >> L2_BITS];
155 p = *lp;
156 if (!p) {
157 /* allocate if not found */
59817ccb 158 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 159 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
160 *lp = p;
161 }
162 return p + (index & (L2_SIZE - 1));
163}
164
fd6ce8f6 165static inline PageDesc *page_find(unsigned int index)
54936004 166{
54936004
FB
167 PageDesc *p;
168
54936004
FB
169 p = l1_map[index >> L2_BITS];
170 if (!p)
171 return 0;
fd6ce8f6
FB
172 return p + (index & (L2_SIZE - 1));
173}
174
92e873b9
FB
175static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
176{
177 PhysPageDesc **lp, *p;
178
179 lp = &l1_phys_map[index >> L2_BITS];
180 p = *lp;
181 if (!p) {
182 /* allocate if not found */
183 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
184 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
185 *lp = p;
186 }
187 return p + (index & (L2_SIZE - 1));
188}
189
190static inline PhysPageDesc *phys_page_find(unsigned int index)
191{
192 PhysPageDesc *p;
193
194 p = l1_phys_map[index >> L2_BITS];
195 if (!p)
196 return 0;
197 return p + (index & (L2_SIZE - 1));
198}
199
9fa3e853 200#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
201static void tlb_protect_code(CPUState *env, target_ulong addr);
202static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
203
204static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 205{
9fa3e853 206 VirtPageDesc **lp, *p;
fd6ce8f6 207
9fa3e853
FB
208 lp = &l1_virt_map[index >> L2_BITS];
209 p = *lp;
210 if (!p) {
211 /* allocate if not found */
59817ccb 212 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
213 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
214 *lp = p;
215 }
216 return p + (index & (L2_SIZE - 1));
217}
218
219static inline VirtPageDesc *virt_page_find(unsigned int index)
220{
221 VirtPageDesc *p;
222
223 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
224 if (!p)
225 return 0;
9fa3e853 226 return p + (index & (L2_SIZE - 1));
54936004
FB
227}
228
9fa3e853 229static void virt_page_flush(void)
54936004 230{
9fa3e853
FB
231 int i, j;
232 VirtPageDesc *p;
233
234 virt_valid_tag++;
235
236 if (virt_valid_tag == 0) {
237 virt_valid_tag = 1;
238 for(i = 0; i < L1_SIZE; i++) {
239 p = l1_virt_map[i];
240 if (p) {
241 for(j = 0; j < L2_SIZE; j++)
242 p[j].valid_tag = 0;
243 }
fd6ce8f6 244 }
54936004
FB
245 }
246}
9fa3e853
FB
247#else
248static void virt_page_flush(void)
249{
250}
251#endif
fd6ce8f6 252
b346ff46 253void cpu_exec_init(void)
fd6ce8f6
FB
254{
255 if (!code_gen_ptr) {
256 code_gen_ptr = code_gen_buffer;
b346ff46 257 page_init();
33417e70 258 io_mem_init();
fd6ce8f6
FB
259 }
260}
261
9fa3e853
FB
262static inline void invalidate_page_bitmap(PageDesc *p)
263{
264 if (p->code_bitmap) {
59817ccb 265 qemu_free(p->code_bitmap);
9fa3e853
FB
266 p->code_bitmap = NULL;
267 }
268 p->code_write_count = 0;
269}
270
fd6ce8f6
FB
271/* set to NULL all the 'first_tb' fields in all PageDescs */
272static void page_flush_tb(void)
273{
274 int i, j;
275 PageDesc *p;
276
277 for(i = 0; i < L1_SIZE; i++) {
278 p = l1_map[i];
279 if (p) {
9fa3e853
FB
280 for(j = 0; j < L2_SIZE; j++) {
281 p->first_tb = NULL;
282 invalidate_page_bitmap(p);
283 p++;
284 }
fd6ce8f6
FB
285 }
286 }
287}
288
289/* flush all the translation blocks */
d4e8164f 290/* XXX: tb_flush is currently not thread safe */
0124311e 291void tb_flush(CPUState *env)
fd6ce8f6
FB
292{
293 int i;
0124311e 294#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
295 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
296 code_gen_ptr - code_gen_buffer,
297 nb_tbs,
0124311e 298 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
299#endif
300 nb_tbs = 0;
301 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
302 tb_hash[i] = NULL;
9fa3e853
FB
303 virt_page_flush();
304
305 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
306 tb_phys_hash[i] = NULL;
fd6ce8f6 307 page_flush_tb();
9fa3e853 308
fd6ce8f6 309 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
310 /* XXX: flush processor icache at this point if cache flush is
311 expensive */
fd6ce8f6
FB
312}
313
314#ifdef DEBUG_TB_CHECK
315
316static void tb_invalidate_check(unsigned long address)
317{
318 TranslationBlock *tb;
319 int i;
320 address &= TARGET_PAGE_MASK;
321 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
322 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
323 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
324 address >= tb->pc + tb->size)) {
325 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326 address, tb->pc, tb->size);
327 }
328 }
329 }
330}
331
332/* verify that all the pages have correct rights for code */
333static void tb_page_check(void)
334{
335 TranslationBlock *tb;
336 int i, flags1, flags2;
337
338 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
339 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
340 flags1 = page_get_flags(tb->pc);
341 flags2 = page_get_flags(tb->pc + tb->size - 1);
342 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
343 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344 tb->pc, tb->size, flags1, flags2);
345 }
346 }
347 }
348}
349
d4e8164f
FB
350void tb_jmp_check(TranslationBlock *tb)
351{
352 TranslationBlock *tb1;
353 unsigned int n1;
354
355 /* suppress any remaining jumps to this TB */
356 tb1 = tb->jmp_first;
357 for(;;) {
358 n1 = (long)tb1 & 3;
359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
360 if (n1 == 2)
361 break;
362 tb1 = tb1->jmp_next[n1];
363 }
364 /* check end of list */
365 if (tb1 != tb) {
366 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
367 }
368}
369
fd6ce8f6
FB
370#endif
371
372/* invalidate one TB */
373static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
374 int next_offset)
375{
376 TranslationBlock *tb1;
377 for(;;) {
378 tb1 = *ptb;
379 if (tb1 == tb) {
380 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
381 break;
382 }
383 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
384 }
385}
386
9fa3e853
FB
387static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
388{
389 TranslationBlock *tb1;
390 unsigned int n1;
391
392 for(;;) {
393 tb1 = *ptb;
394 n1 = (long)tb1 & 3;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
396 if (tb1 == tb) {
397 *ptb = tb1->page_next[n1];
398 break;
399 }
400 ptb = &tb1->page_next[n1];
401 }
402}
403
d4e8164f
FB
404static inline void tb_jmp_remove(TranslationBlock *tb, int n)
405{
406 TranslationBlock *tb1, **ptb;
407 unsigned int n1;
408
409 ptb = &tb->jmp_next[n];
410 tb1 = *ptb;
411 if (tb1) {
412 /* find tb(n) in circular list */
413 for(;;) {
414 tb1 = *ptb;
415 n1 = (long)tb1 & 3;
416 tb1 = (TranslationBlock *)((long)tb1 & ~3);
417 if (n1 == n && tb1 == tb)
418 break;
419 if (n1 == 2) {
420 ptb = &tb1->jmp_first;
421 } else {
422 ptb = &tb1->jmp_next[n1];
423 }
424 }
425 /* now we can suppress tb(n) from the list */
426 *ptb = tb->jmp_next[n];
427
428 tb->jmp_next[n] = NULL;
429 }
430}
431
432/* reset the jump entry 'n' of a TB so that it is not chained to
433 another TB */
434static inline void tb_reset_jump(TranslationBlock *tb, int n)
435{
436 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
437}
438
9fa3e853 439static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 440{
d4e8164f 441 unsigned int h, n1;
9fa3e853 442 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 443
36bdbe54 444 tb_invalidated_flag = 1;
59817ccb 445
fd6ce8f6
FB
446 /* remove the TB from the hash list */
447 h = tb_hash_func(tb->pc);
9fa3e853
FB
448 ptb = &tb_hash[h];
449 for(;;) {
450 tb1 = *ptb;
451 /* NOTE: the TB is not necessarily linked in the hash. It
452 indicates that it is not currently used */
453 if (tb1 == NULL)
454 return;
455 if (tb1 == tb) {
456 *ptb = tb1->hash_next;
457 break;
458 }
459 ptb = &tb1->hash_next;
fd6ce8f6 460 }
d4e8164f
FB
461
462 /* suppress this TB from the two jump lists */
463 tb_jmp_remove(tb, 0);
464 tb_jmp_remove(tb, 1);
465
466 /* suppress any remaining jumps to this TB */
467 tb1 = tb->jmp_first;
468 for(;;) {
469 n1 = (long)tb1 & 3;
470 if (n1 == 2)
471 break;
472 tb1 = (TranslationBlock *)((long)tb1 & ~3);
473 tb2 = tb1->jmp_next[n1];
474 tb_reset_jump(tb1, n1);
475 tb1->jmp_next[n1] = NULL;
476 tb1 = tb2;
477 }
478 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
479}
480
9fa3e853 481static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 482{
fd6ce8f6 483 PageDesc *p;
9fa3e853
FB
484 unsigned int h;
485 target_ulong phys_pc;
486
487 /* remove the TB from the hash list */
488 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
489 h = tb_phys_hash_func(phys_pc);
490 tb_remove(&tb_phys_hash[h], tb,
491 offsetof(TranslationBlock, phys_hash_next));
492
493 /* remove the TB from the page list */
494 if (tb->page_addr[0] != page_addr) {
495 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
496 tb_page_remove(&p->first_tb, tb);
497 invalidate_page_bitmap(p);
498 }
499 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
500 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
501 tb_page_remove(&p->first_tb, tb);
502 invalidate_page_bitmap(p);
503 }
504
505 tb_invalidate(tb);
506}
507
508static inline void set_bits(uint8_t *tab, int start, int len)
509{
510 int end, mask, end1;
511
512 end = start + len;
513 tab += start >> 3;
514 mask = 0xff << (start & 7);
515 if ((start & ~7) == (end & ~7)) {
516 if (start < end) {
517 mask &= ~(0xff << (end & 7));
518 *tab |= mask;
519 }
520 } else {
521 *tab++ |= mask;
522 start = (start + 8) & ~7;
523 end1 = end & ~7;
524 while (start < end1) {
525 *tab++ = 0xff;
526 start += 8;
527 }
528 if (start < end) {
529 mask = ~(0xff << (end & 7));
530 *tab |= mask;
531 }
532 }
533}
534
535static void build_page_bitmap(PageDesc *p)
536{
537 int n, tb_start, tb_end;
538 TranslationBlock *tb;
539
59817ccb 540 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
541 if (!p->code_bitmap)
542 return;
543 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544
545 tb = p->first_tb;
546 while (tb != NULL) {
547 n = (long)tb & 3;
548 tb = (TranslationBlock *)((long)tb & ~3);
549 /* NOTE: this is subtle as a TB may span two physical pages */
550 if (n == 0) {
551 /* NOTE: tb_end may be after the end of the page, but
552 it is not a problem */
553 tb_start = tb->pc & ~TARGET_PAGE_MASK;
554 tb_end = tb_start + tb->size;
555 if (tb_end > TARGET_PAGE_SIZE)
556 tb_end = TARGET_PAGE_SIZE;
557 } else {
558 tb_start = 0;
559 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
560 }
561 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
562 tb = tb->page_next[n];
563 }
564}
565
d720b93d
FB
566#ifdef TARGET_HAS_PRECISE_SMC
567
568static void tb_gen_code(CPUState *env,
569 target_ulong pc, target_ulong cs_base, int flags,
570 int cflags)
571{
572 TranslationBlock *tb;
573 uint8_t *tc_ptr;
574 target_ulong phys_pc, phys_page2, virt_page2;
575 int code_gen_size;
576
577 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
578 tb = tb_alloc((unsigned long)pc);
579 if (!tb) {
580 /* flush must be done */
581 tb_flush(env);
582 /* cannot fail at this point */
583 tb = tb_alloc((unsigned long)pc);
584 }
585 tc_ptr = code_gen_ptr;
586 tb->tc_ptr = tc_ptr;
587 tb->cs_base = cs_base;
588 tb->flags = flags;
589 tb->cflags = cflags;
590 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
591 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
592
593 /* check next page if needed */
594 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
595 phys_page2 = -1;
596 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
597 phys_page2 = get_phys_addr_code(env, virt_page2);
598 }
599 tb_link_phys(tb, phys_pc, phys_page2);
600}
601#endif
602
9fa3e853
FB
603/* invalidate all TBs which intersect with the target physical page
604 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
605 the same physical page. 'is_cpu_write_access' should be true if called
606 from a real cpu write access: the virtual CPU will exit the current
607 TB if code is modified inside this TB. */
608void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
609 int is_cpu_write_access)
610{
611 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 612 CPUState *env = cpu_single_env;
9fa3e853 613 PageDesc *p;
ea1c1802 614 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 615 target_ulong tb_start, tb_end;
d720b93d 616 target_ulong current_pc, current_cs_base;
9fa3e853
FB
617
618 p = page_find(start >> TARGET_PAGE_BITS);
619 if (!p)
620 return;
621 if (!p->code_bitmap &&
d720b93d
FB
622 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
623 is_cpu_write_access) {
9fa3e853
FB
624 /* build code bitmap */
625 build_page_bitmap(p);
626 }
627
628 /* we remove all the TBs in the range [start, end[ */
629 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
630 current_tb_not_found = is_cpu_write_access;
631 current_tb_modified = 0;
632 current_tb = NULL; /* avoid warning */
633 current_pc = 0; /* avoid warning */
634 current_cs_base = 0; /* avoid warning */
635 current_flags = 0; /* avoid warning */
9fa3e853
FB
636 tb = p->first_tb;
637 while (tb != NULL) {
638 n = (long)tb & 3;
639 tb = (TranslationBlock *)((long)tb & ~3);
640 tb_next = tb->page_next[n];
641 /* NOTE: this is subtle as a TB may span two physical pages */
642 if (n == 0) {
643 /* NOTE: tb_end may be after the end of the page, but
644 it is not a problem */
645 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646 tb_end = tb_start + tb->size;
647 } else {
648 tb_start = tb->page_addr[1];
649 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
650 }
651 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
652#ifdef TARGET_HAS_PRECISE_SMC
653 if (current_tb_not_found) {
654 current_tb_not_found = 0;
655 current_tb = NULL;
656 if (env->mem_write_pc) {
657 /* now we have a real cpu fault */
658 current_tb = tb_find_pc(env->mem_write_pc);
659 }
660 }
661 if (current_tb == tb &&
662 !(current_tb->cflags & CF_SINGLE_INSN)) {
663 /* If we are modifying the current TB, we must stop
664 its execution. We could be more precise by checking
665 that the modification is after the current PC, but it
666 would require a specialized function to partially
667 restore the CPU state */
668
669 current_tb_modified = 1;
670 cpu_restore_state(current_tb, env,
671 env->mem_write_pc, NULL);
672#if defined(TARGET_I386)
673 current_flags = env->hflags;
674 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
675 current_cs_base = (target_ulong)env->segs[R_CS].base;
676 current_pc = current_cs_base + env->eip;
677#else
678#error unsupported CPU
679#endif
680 }
681#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
682 saved_tb = env->current_tb;
683 env->current_tb = NULL;
9fa3e853 684 tb_phys_invalidate(tb, -1);
ea1c1802
FB
685 env->current_tb = saved_tb;
686 if (env->interrupt_request && env->current_tb)
687 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
688 }
689 tb = tb_next;
690 }
691#if !defined(CONFIG_USER_ONLY)
692 /* if no code remaining, no need to continue to use slow writes */
693 if (!p->first_tb) {
694 invalidate_page_bitmap(p);
d720b93d
FB
695 if (is_cpu_write_access) {
696 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
697 }
698 }
699#endif
700#ifdef TARGET_HAS_PRECISE_SMC
701 if (current_tb_modified) {
702 /* we generate a block containing just the instruction
703 modifying the memory. It will ensure that it cannot modify
704 itself */
ea1c1802 705 env->current_tb = NULL;
d720b93d
FB
706 tb_gen_code(env, current_pc, current_cs_base, current_flags,
707 CF_SINGLE_INSN);
708 cpu_resume_from_signal(env, NULL);
9fa3e853 709 }
fd6ce8f6 710#endif
9fa3e853 711}
fd6ce8f6 712
9fa3e853 713/* len must be <= 8 and start must be a multiple of len */
d720b93d 714static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
715{
716 PageDesc *p;
717 int offset, b;
59817ccb 718#if 0
a4193c8a
FB
719 if (1) {
720 if (loglevel) {
721 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
722 cpu_single_env->mem_write_vaddr, len,
723 cpu_single_env->eip,
724 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
725 }
59817ccb
FB
726 }
727#endif
9fa3e853
FB
728 p = page_find(start >> TARGET_PAGE_BITS);
729 if (!p)
730 return;
731 if (p->code_bitmap) {
732 offset = start & ~TARGET_PAGE_MASK;
733 b = p->code_bitmap[offset >> 3] >> (offset & 7);
734 if (b & ((1 << len) - 1))
735 goto do_invalidate;
736 } else {
737 do_invalidate:
d720b93d 738 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
739 }
740}
741
9fa3e853 742#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
743static void tb_invalidate_phys_page(target_ulong addr,
744 unsigned long pc, void *puc)
9fa3e853 745{
d720b93d
FB
746 int n, current_flags, current_tb_modified;
747 target_ulong current_pc, current_cs_base;
9fa3e853 748 PageDesc *p;
d720b93d
FB
749 TranslationBlock *tb, *current_tb;
750#ifdef TARGET_HAS_PRECISE_SMC
751 CPUState *env = cpu_single_env;
752#endif
9fa3e853
FB
753
754 addr &= TARGET_PAGE_MASK;
755 p = page_find(addr >> TARGET_PAGE_BITS);
756 if (!p)
757 return;
758 tb = p->first_tb;
d720b93d
FB
759 current_tb_modified = 0;
760 current_tb = NULL;
761 current_pc = 0; /* avoid warning */
762 current_cs_base = 0; /* avoid warning */
763 current_flags = 0; /* avoid warning */
764#ifdef TARGET_HAS_PRECISE_SMC
765 if (tb && pc != 0) {
766 current_tb = tb_find_pc(pc);
767 }
768#endif
9fa3e853
FB
769 while (tb != NULL) {
770 n = (long)tb & 3;
771 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
772#ifdef TARGET_HAS_PRECISE_SMC
773 if (current_tb == tb &&
774 !(current_tb->cflags & CF_SINGLE_INSN)) {
775 /* If we are modifying the current TB, we must stop
776 its execution. We could be more precise by checking
777 that the modification is after the current PC, but it
778 would require a specialized function to partially
779 restore the CPU state */
780
781 current_tb_modified = 1;
782 cpu_restore_state(current_tb, env, pc, puc);
783#if defined(TARGET_I386)
784 current_flags = env->hflags;
785 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786 current_cs_base = (target_ulong)env->segs[R_CS].base;
787 current_pc = current_cs_base + env->eip;
788#else
789#error unsupported CPU
790#endif
791 }
792#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
793 tb_phys_invalidate(tb, addr);
794 tb = tb->page_next[n];
795 }
fd6ce8f6 796 p->first_tb = NULL;
d720b93d
FB
797#ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb_modified) {
799 /* we generate a block containing just the instruction
800 modifying the memory. It will ensure that it cannot modify
801 itself */
ea1c1802 802 env->current_tb = NULL;
d720b93d
FB
803 tb_gen_code(env, current_pc, current_cs_base, current_flags,
804 CF_SINGLE_INSN);
805 cpu_resume_from_signal(env, puc);
806 }
807#endif
fd6ce8f6 808}
9fa3e853 809#endif
fd6ce8f6
FB
810
811/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
812static inline void tb_alloc_page(TranslationBlock *tb,
813 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
814{
815 PageDesc *p;
9fa3e853
FB
816 TranslationBlock *last_first_tb;
817
818 tb->page_addr[n] = page_addr;
819 p = page_find(page_addr >> TARGET_PAGE_BITS);
820 tb->page_next[n] = p->first_tb;
821 last_first_tb = p->first_tb;
822 p->first_tb = (TranslationBlock *)((long)tb | n);
823 invalidate_page_bitmap(p);
fd6ce8f6 824
d720b93d
FB
825#ifdef TARGET_HAS_SMC
826
9fa3e853 827#if defined(CONFIG_USER_ONLY)
fd6ce8f6 828 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
829 unsigned long host_start, host_end, addr;
830 int prot;
831
fd6ce8f6
FB
832 /* force the host page as non writable (writes will have a
833 page fault + mprotect overhead) */
fd6ce8f6
FB
834 host_start = page_addr & host_page_mask;
835 host_end = host_start + host_page_size;
836 prot = 0;
837 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838 prot |= page_get_flags(addr);
839 mprotect((void *)host_start, host_page_size,
840 (prot & PAGE_BITS) & ~PAGE_WRITE);
841#ifdef DEBUG_TB_INVALIDATE
842 printf("protecting code page: 0x%08lx\n",
843 host_start);
844#endif
845 p->flags &= ~PAGE_WRITE;
fd6ce8f6 846 }
9fa3e853
FB
847#else
848 /* if some code is already present, then the pages are already
849 protected. So we handle the case where only the first TB is
850 allocated in a physical page */
851 if (!last_first_tb) {
852 target_ulong virt_addr;
853
854 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
855 tlb_protect_code(cpu_single_env, virt_addr);
856 }
857#endif
d720b93d
FB
858
859#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
860}
861
862/* Allocate a new translation block. Flush the translation buffer if
863 too many translation blocks or too much generated code. */
d4e8164f 864TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
865{
866 TranslationBlock *tb;
fd6ce8f6
FB
867
868 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
869 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 870 return NULL;
fd6ce8f6
FB
871 tb = &tbs[nb_tbs++];
872 tb->pc = pc;
b448f2f3 873 tb->cflags = 0;
d4e8164f
FB
874 return tb;
875}
876
9fa3e853
FB
877/* add a new TB and link it to the physical page tables. phys_page2 is
878 (-1) to indicate that only one page contains the TB. */
879void tb_link_phys(TranslationBlock *tb,
880 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 881{
9fa3e853
FB
882 unsigned int h;
883 TranslationBlock **ptb;
884
885 /* add in the physical hash table */
886 h = tb_phys_hash_func(phys_pc);
887 ptb = &tb_phys_hash[h];
888 tb->phys_hash_next = *ptb;
889 *ptb = tb;
fd6ce8f6
FB
890
891 /* add in the page list */
9fa3e853
FB
892 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
893 if (phys_page2 != -1)
894 tb_alloc_page(tb, 1, phys_page2);
895 else
896 tb->page_addr[1] = -1;
61382a50
FB
897#ifdef DEBUG_TB_CHECK
898 tb_page_check();
899#endif
9fa3e853
FB
900}
901
902/* link the tb with the other TBs */
903void tb_link(TranslationBlock *tb)
904{
905#if !defined(CONFIG_USER_ONLY)
906 {
907 VirtPageDesc *vp;
908 target_ulong addr;
909
910 /* save the code memory mappings (needed to invalidate the code) */
911 addr = tb->pc & TARGET_PAGE_MASK;
912 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
913#ifdef DEBUG_TLB_CHECK
914 if (vp->valid_tag == virt_valid_tag &&
915 vp->phys_addr != tb->page_addr[0]) {
916 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
917 addr, tb->page_addr[0], vp->phys_addr);
918 }
919#endif
9fa3e853 920 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
921 if (vp->valid_tag != virt_valid_tag) {
922 vp->valid_tag = virt_valid_tag;
923#if !defined(CONFIG_SOFTMMU)
924 vp->prot = 0;
925#endif
926 }
9fa3e853
FB
927
928 if (tb->page_addr[1] != -1) {
929 addr += TARGET_PAGE_SIZE;
930 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
931#ifdef DEBUG_TLB_CHECK
932 if (vp->valid_tag == virt_valid_tag &&
933 vp->phys_addr != tb->page_addr[1]) {
934 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
935 addr, tb->page_addr[1], vp->phys_addr);
936 }
937#endif
9fa3e853 938 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
939 if (vp->valid_tag != virt_valid_tag) {
940 vp->valid_tag = virt_valid_tag;
941#if !defined(CONFIG_SOFTMMU)
942 vp->prot = 0;
943#endif
944 }
9fa3e853
FB
945 }
946 }
947#endif
948
d4e8164f
FB
949 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
950 tb->jmp_next[0] = NULL;
951 tb->jmp_next[1] = NULL;
b448f2f3
FB
952#ifdef USE_CODE_COPY
953 tb->cflags &= ~CF_FP_USED;
954 if (tb->cflags & CF_TB_FP_USED)
955 tb->cflags |= CF_FP_USED;
956#endif
d4e8164f
FB
957
958 /* init original jump addresses */
959 if (tb->tb_next_offset[0] != 0xffff)
960 tb_reset_jump(tb, 0);
961 if (tb->tb_next_offset[1] != 0xffff)
962 tb_reset_jump(tb, 1);
fd6ce8f6
FB
963}
964
9fa3e853
FB
965/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
966 tb[1].tc_ptr. Return NULL if not found */
967TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 968{
9fa3e853
FB
969 int m_min, m_max, m;
970 unsigned long v;
971 TranslationBlock *tb;
a513fe19
FB
972
973 if (nb_tbs <= 0)
974 return NULL;
975 if (tc_ptr < (unsigned long)code_gen_buffer ||
976 tc_ptr >= (unsigned long)code_gen_ptr)
977 return NULL;
978 /* binary search (cf Knuth) */
979 m_min = 0;
980 m_max = nb_tbs - 1;
981 while (m_min <= m_max) {
982 m = (m_min + m_max) >> 1;
983 tb = &tbs[m];
984 v = (unsigned long)tb->tc_ptr;
985 if (v == tc_ptr)
986 return tb;
987 else if (tc_ptr < v) {
988 m_max = m - 1;
989 } else {
990 m_min = m + 1;
991 }
992 }
993 return &tbs[m_max];
994}
7501267e 995
ea041c0e
FB
996static void tb_reset_jump_recursive(TranslationBlock *tb);
997
998static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
999{
1000 TranslationBlock *tb1, *tb_next, **ptb;
1001 unsigned int n1;
1002
1003 tb1 = tb->jmp_next[n];
1004 if (tb1 != NULL) {
1005 /* find head of list */
1006 for(;;) {
1007 n1 = (long)tb1 & 3;
1008 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009 if (n1 == 2)
1010 break;
1011 tb1 = tb1->jmp_next[n1];
1012 }
1013 /* we are now sure now that tb jumps to tb1 */
1014 tb_next = tb1;
1015
1016 /* remove tb from the jmp_first list */
1017 ptb = &tb_next->jmp_first;
1018 for(;;) {
1019 tb1 = *ptb;
1020 n1 = (long)tb1 & 3;
1021 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1022 if (n1 == n && tb1 == tb)
1023 break;
1024 ptb = &tb1->jmp_next[n1];
1025 }
1026 *ptb = tb->jmp_next[n];
1027 tb->jmp_next[n] = NULL;
1028
1029 /* suppress the jump to next tb in generated code */
1030 tb_reset_jump(tb, n);
1031
0124311e 1032 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1033 tb_reset_jump_recursive(tb_next);
1034 }
1035}
1036
1037static void tb_reset_jump_recursive(TranslationBlock *tb)
1038{
1039 tb_reset_jump_recursive2(tb, 0);
1040 tb_reset_jump_recursive2(tb, 1);
1041}
1042
d720b93d
FB
1043static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044{
1045 target_ulong phys_addr;
1046
1047 phys_addr = cpu_get_phys_page_debug(env, pc);
1048 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1049}
1050
c33a346e
FB
1051/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1052 breakpoint is reached */
2e12669a 1053int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1054{
a541f297 1055#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2 1056 int i;
d720b93d 1057
4c3a88a2
FB
1058 for(i = 0; i < env->nb_breakpoints; i++) {
1059 if (env->breakpoints[i] == pc)
1060 return 0;
1061 }
1062
1063 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1064 return -1;
1065 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1066
1067 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1068 return 0;
1069#else
1070 return -1;
1071#endif
1072}
1073
1074/* remove a breakpoint */
2e12669a 1075int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1076{
a541f297 1077#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2
FB
1078 int i;
1079 for(i = 0; i < env->nb_breakpoints; i++) {
1080 if (env->breakpoints[i] == pc)
1081 goto found;
1082 }
1083 return -1;
1084 found:
1085 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1086 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1087 env->nb_breakpoints--;
d720b93d
FB
1088
1089 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1090 return 0;
1091#else
1092 return -1;
1093#endif
1094}
1095
c33a346e
FB
1096/* enable or disable single step mode. EXCP_DEBUG is returned by the
1097 CPU loop after each instruction */
1098void cpu_single_step(CPUState *env, int enabled)
1099{
a541f297 1100#if defined(TARGET_I386) || defined(TARGET_PPC)
c33a346e
FB
1101 if (env->singlestep_enabled != enabled) {
1102 env->singlestep_enabled = enabled;
1103 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1104 /* XXX: only flush what is necessary */
0124311e 1105 tb_flush(env);
c33a346e
FB
1106 }
1107#endif
1108}
1109
34865134
FB
1110/* enable or disable low levels log */
1111void cpu_set_log(int log_flags)
1112{
1113 loglevel = log_flags;
1114 if (loglevel && !logfile) {
1115 logfile = fopen(logfilename, "w");
1116 if (!logfile) {
1117 perror(logfilename);
1118 _exit(1);
1119 }
9fa3e853
FB
1120#if !defined(CONFIG_SOFTMMU)
1121 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1122 {
1123 static uint8_t logfile_buf[4096];
1124 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1125 }
1126#else
34865134 1127 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1128#endif
34865134
FB
1129 }
1130}
1131
1132void cpu_set_log_filename(const char *filename)
1133{
1134 logfilename = strdup(filename);
1135}
c33a346e 1136
0124311e 1137/* mask must never be zero, except for A20 change call */
68a79315 1138void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1139{
1140 TranslationBlock *tb;
ee8b7021 1141 static int interrupt_lock;
59817ccb 1142
68a79315 1143 env->interrupt_request |= mask;
ea041c0e
FB
1144 /* if the cpu is currently executing code, we must unlink it and
1145 all the potentially executing TB */
1146 tb = env->current_tb;
ee8b7021
FB
1147 if (tb && !testandset(&interrupt_lock)) {
1148 env->current_tb = NULL;
ea041c0e 1149 tb_reset_jump_recursive(tb);
ee8b7021 1150 interrupt_lock = 0;
ea041c0e
FB
1151 }
1152}
1153
b54ad049
FB
1154void cpu_reset_interrupt(CPUState *env, int mask)
1155{
1156 env->interrupt_request &= ~mask;
1157}
1158
f193c797
FB
1159CPULogItem cpu_log_items[] = {
1160 { CPU_LOG_TB_OUT_ASM, "out_asm",
1161 "show generated host assembly code for each compiled TB" },
1162 { CPU_LOG_TB_IN_ASM, "in_asm",
1163 "show target assembly code for each compiled TB" },
1164 { CPU_LOG_TB_OP, "op",
1165 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1166#ifdef TARGET_I386
1167 { CPU_LOG_TB_OP_OPT, "op_opt",
1168 "show micro ops after optimization for each compiled TB" },
1169#endif
1170 { CPU_LOG_INT, "int",
1171 "show interrupts/exceptions in short format" },
1172 { CPU_LOG_EXEC, "exec",
1173 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1174 { CPU_LOG_TB_CPU, "cpu",
1175 "show CPU state before bloc translation" },
f193c797
FB
1176#ifdef TARGET_I386
1177 { CPU_LOG_PCALL, "pcall",
1178 "show protected mode far calls/returns/exceptions" },
1179#endif
fd872598
FB
1180 { CPU_LOG_IOPORT, "ioport",
1181 "show all i/o ports accesses" },
f193c797
FB
1182 { 0, NULL, NULL },
1183};
1184
1185static int cmp1(const char *s1, int n, const char *s2)
1186{
1187 if (strlen(s2) != n)
1188 return 0;
1189 return memcmp(s1, s2, n) == 0;
1190}
1191
1192/* takes a comma separated list of log masks. Return 0 if error. */
1193int cpu_str_to_log_mask(const char *str)
1194{
1195 CPULogItem *item;
1196 int mask;
1197 const char *p, *p1;
1198
1199 p = str;
1200 mask = 0;
1201 for(;;) {
1202 p1 = strchr(p, ',');
1203 if (!p1)
1204 p1 = p + strlen(p);
1205 for(item = cpu_log_items; item->mask != 0; item++) {
1206 if (cmp1(p, p1 - p, item->name))
1207 goto found;
1208 }
1209 return 0;
1210 found:
1211 mask |= item->mask;
1212 if (*p1 != ',')
1213 break;
1214 p = p1 + 1;
1215 }
1216 return mask;
1217}
ea041c0e 1218
7501267e
FB
1219void cpu_abort(CPUState *env, const char *fmt, ...)
1220{
1221 va_list ap;
1222
1223 va_start(ap, fmt);
1224 fprintf(stderr, "qemu: fatal: ");
1225 vfprintf(stderr, fmt, ap);
1226 fprintf(stderr, "\n");
1227#ifdef TARGET_I386
1228 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1229#endif
1230 va_end(ap);
1231 abort();
1232}
1233
0124311e
FB
1234#if !defined(CONFIG_USER_ONLY)
1235
ee8b7021
FB
1236/* NOTE: if flush_global is true, also flush global entries (not
1237 implemented yet) */
1238void tlb_flush(CPUState *env, int flush_global)
33417e70 1239{
33417e70 1240 int i;
0124311e 1241
9fa3e853
FB
1242#if defined(DEBUG_TLB)
1243 printf("tlb_flush:\n");
1244#endif
0124311e
FB
1245 /* must reset current TB so that interrupts cannot modify the
1246 links while we are modifying them */
1247 env->current_tb = NULL;
1248
33417e70
FB
1249 for(i = 0; i < CPU_TLB_SIZE; i++) {
1250 env->tlb_read[0][i].address = -1;
1251 env->tlb_write[0][i].address = -1;
1252 env->tlb_read[1][i].address = -1;
1253 env->tlb_write[1][i].address = -1;
1254 }
9fa3e853
FB
1255
1256 virt_page_flush();
1257 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1258 tb_hash[i] = NULL;
1259
1260#if !defined(CONFIG_SOFTMMU)
1261 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1262#endif
33417e70
FB
1263}
1264
274da6b2 1265static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1266{
1267 if (addr == (tlb_entry->address &
1268 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1269 tlb_entry->address = -1;
1270}
1271
2e12669a 1272void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1273{
9fa3e853
FB
1274 int i, n;
1275 VirtPageDesc *vp;
1276 PageDesc *p;
1277 TranslationBlock *tb;
0124311e 1278
9fa3e853
FB
1279#if defined(DEBUG_TLB)
1280 printf("tlb_flush_page: 0x%08x\n", addr);
1281#endif
0124311e
FB
1282 /* must reset current TB so that interrupts cannot modify the
1283 links while we are modifying them */
1284 env->current_tb = NULL;
61382a50
FB
1285
1286 addr &= TARGET_PAGE_MASK;
1287 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1288 tlb_flush_entry(&env->tlb_read[0][i], addr);
1289 tlb_flush_entry(&env->tlb_write[0][i], addr);
1290 tlb_flush_entry(&env->tlb_read[1][i], addr);
1291 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1292
9fa3e853
FB
1293 /* remove from the virtual pc hash table all the TB at this
1294 virtual address */
1295
1296 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1297 if (vp && vp->valid_tag == virt_valid_tag) {
1298 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1299 if (p) {
1300 /* we remove all the links to the TBs in this virtual page */
1301 tb = p->first_tb;
1302 while (tb != NULL) {
1303 n = (long)tb & 3;
1304 tb = (TranslationBlock *)((long)tb & ~3);
1305 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1306 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1307 tb_invalidate(tb);
1308 }
1309 tb = tb->page_next[n];
1310 }
1311 }
98857888 1312 vp->valid_tag = 0;
9fa3e853
FB
1313 }
1314
0124311e 1315#if !defined(CONFIG_SOFTMMU)
9fa3e853 1316 if (addr < MMAP_AREA_END)
0124311e 1317 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1318#endif
9fa3e853
FB
1319}
1320
4f2ac237 1321static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1322{
1323 if (addr == (tlb_entry->address &
1324 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1325 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1326 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1327 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1328 }
1329}
1330
1331/* update the TLBs so that writes to code in the virtual page 'addr'
1332 can be detected */
4f2ac237 1333static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1334{
1335 int i;
1336
1337 addr &= TARGET_PAGE_MASK;
1338 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1339 tlb_protect_code1(&env->tlb_write[0][i], addr);
1340 tlb_protect_code1(&env->tlb_write[1][i], addr);
1341#if !defined(CONFIG_SOFTMMU)
1342 /* NOTE: as we generated the code for this page, it is already at
1343 least readable */
1344 if (addr < MMAP_AREA_END)
1345 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1346#endif
1347}
1348
9fa3e853 1349static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1350 unsigned long phys_addr)
9fa3e853
FB
1351{
1352 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1353 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1354 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1355 }
1356}
1357
1358/* update the TLB so that writes in physical page 'phys_addr' are no longer
1359 tested self modifying code */
4f2ac237 1360static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1361{
1362 int i;
1363
1364 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1365 phys_addr += (long)phys_ram_base;
1366 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1367 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1368 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1369}
1370
1371static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1372 unsigned long start, unsigned long length)
1373{
1374 unsigned long addr;
1375 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1376 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1377 if ((addr - start) < length) {
1378 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1379 }
1380 }
1381}
1382
1383void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1384{
1385 CPUState *env;
4f2ac237 1386 unsigned long length, start1;
1ccde1cb
FB
1387 int i;
1388
1389 start &= TARGET_PAGE_MASK;
1390 end = TARGET_PAGE_ALIGN(end);
1391
1392 length = end - start;
1393 if (length == 0)
1394 return;
1395 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1396
1397 env = cpu_single_env;
1398 /* we modify the TLB cache so that the dirty bit will be set again
1399 when accessing the range */
59817ccb 1400 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1401 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1402 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1403 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1404 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1405
1406#if !defined(CONFIG_SOFTMMU)
1407 /* XXX: this is expensive */
1408 {
1409 VirtPageDesc *p;
1410 int j;
1411 target_ulong addr;
1412
1413 for(i = 0; i < L1_SIZE; i++) {
1414 p = l1_virt_map[i];
1415 if (p) {
1416 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1417 for(j = 0; j < L2_SIZE; j++) {
1418 if (p->valid_tag == virt_valid_tag &&
1419 p->phys_addr >= start && p->phys_addr < end &&
1420 (p->prot & PROT_WRITE)) {
1421 if (addr < MMAP_AREA_END) {
1422 mprotect((void *)addr, TARGET_PAGE_SIZE,
1423 p->prot & ~PROT_WRITE);
1424 }
1425 }
1426 addr += TARGET_PAGE_SIZE;
1427 p++;
1428 }
1429 }
1430 }
1431 }
1432#endif
1ccde1cb
FB
1433}
1434
1435static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1436 unsigned long start)
1437{
1438 unsigned long addr;
1439 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1440 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1441 if (addr == start) {
1442 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1443 }
1444 }
1445}
1446
1447/* update the TLB corresponding to virtual page vaddr and phys addr
1448 addr so that it is no longer dirty */
1449static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1450{
1451 CPUState *env = cpu_single_env;
1452 int i;
1453
1454 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1455
1456 addr &= TARGET_PAGE_MASK;
1457 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1458 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1459 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1460}
1461
59817ccb
FB
1462/* add a new TLB entry. At most one entry for a given virtual address
1463 is permitted. Return 0 if OK or 2 if the page could not be mapped
1464 (can only happen in non SOFTMMU mode for I/O pages or pages
1465 conflicting with the host address space). */
2e12669a
FB
1466int tlb_set_page(CPUState *env, target_ulong vaddr,
1467 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1468 int is_user, int is_softmmu)
1469{
92e873b9 1470 PhysPageDesc *p;
4f2ac237 1471 unsigned long pd;
9fa3e853
FB
1472 TranslationBlock *first_tb;
1473 unsigned int index;
4f2ac237
FB
1474 target_ulong address;
1475 unsigned long addend;
9fa3e853
FB
1476 int ret;
1477
92e873b9
FB
1478 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1479 first_tb = NULL;
9fa3e853
FB
1480 if (!p) {
1481 pd = IO_MEM_UNASSIGNED;
9fa3e853 1482 } else {
92e873b9 1483 PageDesc *p1;
9fa3e853 1484 pd = p->phys_offset;
92e873b9
FB
1485 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1486 /* NOTE: we also allocate the page at this stage */
1487 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1488 first_tb = p1->first_tb;
1489 }
9fa3e853
FB
1490 }
1491#if defined(DEBUG_TLB)
1492 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1493 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1494#endif
1495
1496 ret = 0;
1497#if !defined(CONFIG_SOFTMMU)
1498 if (is_softmmu)
1499#endif
1500 {
1501 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1502 /* IO memory case */
1503 address = vaddr | pd;
1504 addend = paddr;
1505 } else {
1506 /* standard memory */
1507 address = vaddr;
1508 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1509 }
1510
1511 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1512 addend -= vaddr;
67b915a5 1513 if (prot & PAGE_READ) {
9fa3e853
FB
1514 env->tlb_read[is_user][index].address = address;
1515 env->tlb_read[is_user][index].addend = addend;
1516 } else {
1517 env->tlb_read[is_user][index].address = -1;
1518 env->tlb_read[is_user][index].addend = -1;
1519 }
67b915a5 1520 if (prot & PAGE_WRITE) {
9fa3e853
FB
1521 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1522 /* ROM: access is ignored (same as unassigned) */
1523 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1524 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1525 } else
1526 /* XXX: the PowerPC code seems not ready to handle
1527 self modifying code with DCBI */
1528#if defined(TARGET_HAS_SMC) || 1
1529 if (first_tb) {
9fa3e853
FB
1530 /* if code is present, we use a specific memory
1531 handler. It works only for physical memory access */
1532 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1533 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1534 } else
1535#endif
1536 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1537 !cpu_physical_memory_is_dirty(pd)) {
1538 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1539 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1540 } else {
1541 env->tlb_write[is_user][index].address = address;
1542 env->tlb_write[is_user][index].addend = addend;
1543 }
1544 } else {
1545 env->tlb_write[is_user][index].address = -1;
1546 env->tlb_write[is_user][index].addend = -1;
1547 }
1548 }
1549#if !defined(CONFIG_SOFTMMU)
1550 else {
1551 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1552 /* IO access: no mapping is done as it will be handled by the
1553 soft MMU */
1554 if (!(env->hflags & HF_SOFTMMU_MASK))
1555 ret = 2;
1556 } else {
1557 void *map_addr;
59817ccb
FB
1558
1559 if (vaddr >= MMAP_AREA_END) {
1560 ret = 2;
1561 } else {
1562 if (prot & PROT_WRITE) {
1563 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1564#if defined(TARGET_HAS_SMC) || 1
59817ccb 1565 first_tb ||
d720b93d 1566#endif
59817ccb
FB
1567 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1568 !cpu_physical_memory_is_dirty(pd))) {
1569 /* ROM: we do as if code was inside */
1570 /* if code is present, we only map as read only and save the
1571 original mapping */
1572 VirtPageDesc *vp;
1573
1574 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1575 vp->phys_addr = pd;
1576 vp->prot = prot;
1577 vp->valid_tag = virt_valid_tag;
1578 prot &= ~PAGE_WRITE;
1579 }
1580 }
1581 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1582 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1583 if (map_addr == MAP_FAILED) {
1584 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1585 paddr, vaddr);
9fa3e853 1586 }
9fa3e853
FB
1587 }
1588 }
1589 }
1590#endif
1591 return ret;
1592}
1593
1594/* called from signal handler: invalidate the code and unprotect the
1595 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1596int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1597{
1598#if !defined(CONFIG_SOFTMMU)
1599 VirtPageDesc *vp;
1600
1601#if defined(DEBUG_TLB)
1602 printf("page_unprotect: addr=0x%08x\n", addr);
1603#endif
1604 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1605
1606 /* if it is not mapped, no need to worry here */
1607 if (addr >= MMAP_AREA_END)
1608 return 0;
9fa3e853
FB
1609 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1610 if (!vp)
1611 return 0;
1612 /* NOTE: in this case, validate_tag is _not_ tested as it
1613 validates only the code TLB */
1614 if (vp->valid_tag != virt_valid_tag)
1615 return 0;
1616 if (!(vp->prot & PAGE_WRITE))
1617 return 0;
1618#if defined(DEBUG_TLB)
1619 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1620 addr, vp->phys_addr, vp->prot);
1621#endif
59817ccb
FB
1622 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1623 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1624 (unsigned long)addr, vp->prot);
d720b93d
FB
1625 /* set the dirty bit */
1626 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1627 /* flush the code inside */
1628 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1629 return 1;
1630#else
1631 return 0;
1632#endif
33417e70
FB
1633}
1634
0124311e
FB
1635#else
1636
ee8b7021 1637void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1638{
1639}
1640
2e12669a 1641void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1642{
1643}
1644
2e12669a
FB
1645int tlb_set_page(CPUState *env, target_ulong vaddr,
1646 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1647 int is_user, int is_softmmu)
1648{
1649 return 0;
1650}
0124311e 1651
9fa3e853
FB
1652/* dump memory mappings */
1653void page_dump(FILE *f)
33417e70 1654{
9fa3e853
FB
1655 unsigned long start, end;
1656 int i, j, prot, prot1;
1657 PageDesc *p;
33417e70 1658
9fa3e853
FB
1659 fprintf(f, "%-8s %-8s %-8s %s\n",
1660 "start", "end", "size", "prot");
1661 start = -1;
1662 end = -1;
1663 prot = 0;
1664 for(i = 0; i <= L1_SIZE; i++) {
1665 if (i < L1_SIZE)
1666 p = l1_map[i];
1667 else
1668 p = NULL;
1669 for(j = 0;j < L2_SIZE; j++) {
1670 if (!p)
1671 prot1 = 0;
1672 else
1673 prot1 = p[j].flags;
1674 if (prot1 != prot) {
1675 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1676 if (start != -1) {
1677 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1678 start, end, end - start,
1679 prot & PAGE_READ ? 'r' : '-',
1680 prot & PAGE_WRITE ? 'w' : '-',
1681 prot & PAGE_EXEC ? 'x' : '-');
1682 }
1683 if (prot1 != 0)
1684 start = end;
1685 else
1686 start = -1;
1687 prot = prot1;
1688 }
1689 if (!p)
1690 break;
1691 }
33417e70 1692 }
33417e70
FB
1693}
1694
9fa3e853 1695int page_get_flags(unsigned long address)
33417e70 1696{
9fa3e853
FB
1697 PageDesc *p;
1698
1699 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1700 if (!p)
9fa3e853
FB
1701 return 0;
1702 return p->flags;
1703}
1704
1705/* modify the flags of a page and invalidate the code if
1706 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1707 depending on PAGE_WRITE */
1708void page_set_flags(unsigned long start, unsigned long end, int flags)
1709{
1710 PageDesc *p;
1711 unsigned long addr;
1712
1713 start = start & TARGET_PAGE_MASK;
1714 end = TARGET_PAGE_ALIGN(end);
1715 if (flags & PAGE_WRITE)
1716 flags |= PAGE_WRITE_ORG;
1717 spin_lock(&tb_lock);
1718 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1719 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1720 /* if the write protection is set, then we invalidate the code
1721 inside */
1722 if (!(p->flags & PAGE_WRITE) &&
1723 (flags & PAGE_WRITE) &&
1724 p->first_tb) {
d720b93d 1725 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1726 }
1727 p->flags = flags;
1728 }
1729 spin_unlock(&tb_lock);
33417e70
FB
1730}
1731
9fa3e853
FB
1732/* called from signal handler: invalidate the code and unprotect the
1733 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1734int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1735{
1736 unsigned int page_index, prot, pindex;
1737 PageDesc *p, *p1;
1738 unsigned long host_start, host_end, addr;
1739
1740 host_start = address & host_page_mask;
1741 page_index = host_start >> TARGET_PAGE_BITS;
1742 p1 = page_find(page_index);
1743 if (!p1)
1744 return 0;
1745 host_end = host_start + host_page_size;
1746 p = p1;
1747 prot = 0;
1748 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1749 prot |= p->flags;
1750 p++;
1751 }
1752 /* if the page was really writable, then we change its
1753 protection back to writable */
1754 if (prot & PAGE_WRITE_ORG) {
1755 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1756 if (!(p1[pindex].flags & PAGE_WRITE)) {
1757 mprotect((void *)host_start, host_page_size,
1758 (prot & PAGE_BITS) | PAGE_WRITE);
1759 p1[pindex].flags |= PAGE_WRITE;
1760 /* and since the content will be modified, we must invalidate
1761 the corresponding translated code. */
d720b93d 1762 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1763#ifdef DEBUG_TB_CHECK
1764 tb_invalidate_check(address);
1765#endif
1766 return 1;
1767 }
1768 }
1769 return 0;
1770}
1771
1772/* call this function when system calls directly modify a memory area */
1773void page_unprotect_range(uint8_t *data, unsigned long data_size)
1774{
1775 unsigned long start, end, addr;
1776
1777 start = (unsigned long)data;
1778 end = start + data_size;
1779 start &= TARGET_PAGE_MASK;
1780 end = TARGET_PAGE_ALIGN(end);
1781 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1782 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1783 }
1784}
1785
1ccde1cb
FB
1786static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1787{
1788}
9fa3e853
FB
1789#endif /* defined(CONFIG_USER_ONLY) */
1790
33417e70
FB
1791/* register physical memory. 'size' must be a multiple of the target
1792 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1793 io memory page */
2e12669a
FB
1794void cpu_register_physical_memory(target_phys_addr_t start_addr,
1795 unsigned long size,
1796 unsigned long phys_offset)
33417e70
FB
1797{
1798 unsigned long addr, end_addr;
92e873b9 1799 PhysPageDesc *p;
33417e70 1800
5fd386f6 1801 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1802 end_addr = start_addr + size;
5fd386f6 1803 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1804 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1805 p->phys_offset = phys_offset;
1806 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1807 phys_offset += TARGET_PAGE_SIZE;
1808 }
1809}
1810
a4193c8a 1811static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1812{
1813 return 0;
1814}
1815
a4193c8a 1816static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1817{
1818}
1819
1820static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1821 unassigned_mem_readb,
1822 unassigned_mem_readb,
1823 unassigned_mem_readb,
1824};
1825
1826static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1827 unassigned_mem_writeb,
1828 unassigned_mem_writeb,
1829 unassigned_mem_writeb,
1830};
1831
9fa3e853
FB
1832/* self modifying code support in soft mmu mode : writing to a page
1833 containing code comes to these functions */
1834
a4193c8a 1835static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1836{
1ccde1cb
FB
1837 unsigned long phys_addr;
1838
274da6b2 1839 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1840#if !defined(CONFIG_USER_ONLY)
d720b93d 1841 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1842#endif
1ccde1cb
FB
1843 stb_raw((uint8_t *)addr, val);
1844 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1845}
1846
a4193c8a 1847static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1848{
1ccde1cb
FB
1849 unsigned long phys_addr;
1850
274da6b2 1851 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1852#if !defined(CONFIG_USER_ONLY)
d720b93d 1853 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1854#endif
1ccde1cb
FB
1855 stw_raw((uint8_t *)addr, val);
1856 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1857}
1858
a4193c8a 1859static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1860{
1ccde1cb
FB
1861 unsigned long phys_addr;
1862
274da6b2 1863 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1864#if !defined(CONFIG_USER_ONLY)
d720b93d 1865 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1866#endif
1ccde1cb
FB
1867 stl_raw((uint8_t *)addr, val);
1868 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1869}
1870
1871static CPUReadMemoryFunc *code_mem_read[3] = {
1872 NULL, /* never used */
1873 NULL, /* never used */
1874 NULL, /* never used */
1875};
1876
1877static CPUWriteMemoryFunc *code_mem_write[3] = {
1878 code_mem_writeb,
1879 code_mem_writew,
1880 code_mem_writel,
1881};
33417e70 1882
a4193c8a 1883static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1884{
1885 stb_raw((uint8_t *)addr, val);
d720b93d 1886 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1887}
1888
a4193c8a 1889static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1890{
1891 stw_raw((uint8_t *)addr, val);
d720b93d 1892 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1893}
1894
a4193c8a 1895static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1896{
1897 stl_raw((uint8_t *)addr, val);
d720b93d 1898 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1899}
1900
1901static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1902 notdirty_mem_writeb,
1903 notdirty_mem_writew,
1904 notdirty_mem_writel,
1905};
1906
33417e70
FB
1907static void io_mem_init(void)
1908{
a4193c8a
FB
1909 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1910 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1911 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1912 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1913 io_mem_nb = 5;
1914
1915 /* alloc dirty bits array */
59817ccb 1916 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1917}
1918
1919/* mem_read and mem_write are arrays of functions containing the
1920 function to access byte (index 0), word (index 1) and dword (index
1921 2). All functions must be supplied. If io_index is non zero, the
1922 corresponding io zone is modified. If it is zero, a new io zone is
1923 allocated. The return value can be used with
1924 cpu_register_physical_memory(). (-1) is returned if error. */
1925int cpu_register_io_memory(int io_index,
1926 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1927 CPUWriteMemoryFunc **mem_write,
1928 void *opaque)
33417e70
FB
1929{
1930 int i;
1931
1932 if (io_index <= 0) {
1933 if (io_index >= IO_MEM_NB_ENTRIES)
1934 return -1;
1935 io_index = io_mem_nb++;
1936 } else {
1937 if (io_index >= IO_MEM_NB_ENTRIES)
1938 return -1;
1939 }
1940
1941 for(i = 0;i < 3; i++) {
1942 io_mem_read[io_index][i] = mem_read[i];
1943 io_mem_write[io_index][i] = mem_write[i];
1944 }
a4193c8a 1945 io_mem_opaque[io_index] = opaque;
33417e70
FB
1946 return io_index << IO_MEM_SHIFT;
1947}
61382a50 1948
13eb76e0
FB
1949/* physical memory access (slow version, mainly for debug) */
1950#if defined(CONFIG_USER_ONLY)
2e12669a 1951void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1952 int len, int is_write)
1953{
1954 int l, flags;
1955 target_ulong page;
1956
1957 while (len > 0) {
1958 page = addr & TARGET_PAGE_MASK;
1959 l = (page + TARGET_PAGE_SIZE) - addr;
1960 if (l > len)
1961 l = len;
1962 flags = page_get_flags(page);
1963 if (!(flags & PAGE_VALID))
1964 return;
1965 if (is_write) {
1966 if (!(flags & PAGE_WRITE))
1967 return;
1968 memcpy((uint8_t *)addr, buf, len);
1969 } else {
1970 if (!(flags & PAGE_READ))
1971 return;
1972 memcpy(buf, (uint8_t *)addr, len);
1973 }
1974 len -= l;
1975 buf += l;
1976 addr += l;
1977 }
1978}
1979#else
2e12669a 1980void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1981 int len, int is_write)
1982{
1983 int l, io_index;
1984 uint8_t *ptr;
1985 uint32_t val;
2e12669a
FB
1986 target_phys_addr_t page;
1987 unsigned long pd;
92e873b9 1988 PhysPageDesc *p;
13eb76e0
FB
1989
1990 while (len > 0) {
1991 page = addr & TARGET_PAGE_MASK;
1992 l = (page + TARGET_PAGE_SIZE) - addr;
1993 if (l > len)
1994 l = len;
92e873b9 1995 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1996 if (!p) {
1997 pd = IO_MEM_UNASSIGNED;
1998 } else {
1999 pd = p->phys_offset;
2000 }
2001
2002 if (is_write) {
2003 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2004 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2005 if (l >= 4 && ((addr & 3) == 0)) {
2006 /* 32 bit read access */
2007 val = ldl_raw(buf);
a4193c8a 2008 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2009 l = 4;
2010 } else if (l >= 2 && ((addr & 1) == 0)) {
2011 /* 16 bit read access */
2012 val = lduw_raw(buf);
a4193c8a 2013 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2014 l = 2;
2015 } else {
2016 /* 8 bit access */
2017 val = ldub_raw(buf);
a4193c8a 2018 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2019 l = 1;
2020 }
2021 } else {
b448f2f3
FB
2022 unsigned long addr1;
2023 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2024 /* RAM case */
b448f2f3 2025 ptr = phys_ram_base + addr1;
13eb76e0 2026 memcpy(ptr, buf, l);
b448f2f3
FB
2027 /* invalidate code */
2028 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2029 /* set dirty bit */
2030 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2031 }
2032 } else {
2033 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2034 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2035 /* I/O case */
2036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2037 if (l >= 4 && ((addr & 3) == 0)) {
2038 /* 32 bit read access */
a4193c8a 2039 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
13eb76e0
FB
2040 stl_raw(buf, val);
2041 l = 4;
2042 } else if (l >= 2 && ((addr & 1) == 0)) {
2043 /* 16 bit read access */
a4193c8a 2044 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
13eb76e0
FB
2045 stw_raw(buf, val);
2046 l = 2;
2047 } else {
2048 /* 8 bit access */
a4193c8a 2049 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
13eb76e0
FB
2050 stb_raw(buf, val);
2051 l = 1;
2052 }
2053 } else {
2054 /* RAM case */
2055 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2056 (addr & ~TARGET_PAGE_MASK);
2057 memcpy(buf, ptr, l);
2058 }
2059 }
2060 len -= l;
2061 buf += l;
2062 addr += l;
2063 }
2064}
2065#endif
2066
2067/* virtual memory access for debug */
b448f2f3
FB
2068int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2069 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2070{
2071 int l;
2072 target_ulong page, phys_addr;
2073
2074 while (len > 0) {
2075 page = addr & TARGET_PAGE_MASK;
2076 phys_addr = cpu_get_phys_page_debug(env, page);
2077 /* if no physical page mapped, return an error */
2078 if (phys_addr == -1)
2079 return -1;
2080 l = (page + TARGET_PAGE_SIZE) - addr;
2081 if (l > len)
2082 l = len;
b448f2f3
FB
2083 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2084 buf, l, is_write);
13eb76e0
FB
2085 len -= l;
2086 buf += l;
2087 addr += l;
2088 }
2089 return 0;
2090}
2091
61382a50
FB
2092#if !defined(CONFIG_USER_ONLY)
2093
2094#define MMUSUFFIX _cmmu
2095#define GETPC() NULL
2096#define env cpu_single_env
2097
2098#define SHIFT 0
2099#include "softmmu_template.h"
2100
2101#define SHIFT 1
2102#include "softmmu_template.h"
2103
2104#define SHIFT 2
2105#include "softmmu_template.h"
2106
2107#define SHIFT 3
2108#include "softmmu_template.h"
2109
2110#undef env
2111
2112#endif