]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
header fix
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
54936004
FB
21#include <stdlib.h>
22#include <stdio.h>
23#include <stdarg.h>
24#include <string.h>
25#include <errno.h>
26#include <unistd.h>
27#include <inttypes.h>
67b915a5 28#if !defined(CONFIG_SOFTMMU)
fd6ce8f6 29#include <sys/mman.h>
67b915a5 30#endif
54936004 31
6180a181
FB
32#include "cpu.h"
33#include "exec-all.h"
54936004 34
fd6ce8f6 35//#define DEBUG_TB_INVALIDATE
66e85a21 36//#define DEBUG_FLUSH
9fa3e853 37//#define DEBUG_TLB
fd6ce8f6
FB
38
39/* make various TB consistency checks */
40//#define DEBUG_TB_CHECK
98857888 41//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
42
43/* threshold to flush the translated code buffer */
44#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45
9fa3e853
FB
46#define SMC_BITMAP_USE_THRESHOLD 10
47
48#define MMAP_AREA_START 0x00000000
49#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
50
51TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 53TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 54int nb_tbs;
eb51d102
FB
55/* any access to the tbs or the page table must use this lock */
56spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
57
58uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59uint8_t *code_gen_ptr;
60
9fa3e853
FB
61int phys_ram_size;
62int phys_ram_fd;
63uint8_t *phys_ram_base;
1ccde1cb 64uint8_t *phys_ram_dirty;
9fa3e853 65
54936004 66typedef struct PageDesc {
92e873b9 67 /* list of TBs intersecting this ram page */
fd6ce8f6 68 TranslationBlock *first_tb;
9fa3e853
FB
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73#if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75#endif
54936004
FB
76} PageDesc;
77
92e873b9
FB
78typedef struct PhysPageDesc {
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset;
81} PhysPageDesc;
82
9fa3e853
FB
83typedef struct VirtPageDesc {
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr;
87 unsigned int valid_tag;
88#if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 unsigned int prot;
92#endif
93} VirtPageDesc;
94
54936004
FB
95#define L2_BITS 10
96#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97
98#define L1_SIZE (1 << L1_BITS)
99#define L2_SIZE (1 << L2_BITS)
100
33417e70 101static void io_mem_init(void);
fd6ce8f6 102
54936004
FB
103unsigned long real_host_page_size;
104unsigned long host_page_bits;
105unsigned long host_page_size;
106unsigned long host_page_mask;
107
92e873b9 108/* XXX: for system emulation, it could just be an array */
54936004 109static PageDesc *l1_map[L1_SIZE];
92e873b9 110static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 111
9fa3e853
FB
112#if !defined(CONFIG_USER_ONLY)
113static VirtPageDesc *l1_virt_map[L1_SIZE];
114static unsigned int virt_valid_tag;
115#endif
116
33417e70 117/* io memory support */
33417e70
FB
118CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
120static int io_mem_nb;
121
34865134
FB
122/* log support */
123char *logfilename = "/tmp/qemu.log";
124FILE *logfile;
125int loglevel;
126
b346ff46 127static void page_init(void)
54936004
FB
128{
129 /* NOTE: we can always suppose that host_page_size >=
130 TARGET_PAGE_SIZE */
67b915a5
FB
131#ifdef _WIN32
132 real_host_page_size = 4096;
133#else
54936004 134 real_host_page_size = getpagesize();
67b915a5 135#endif
54936004
FB
136 if (host_page_size == 0)
137 host_page_size = real_host_page_size;
138 if (host_page_size < TARGET_PAGE_SIZE)
139 host_page_size = TARGET_PAGE_SIZE;
140 host_page_bits = 0;
141 while ((1 << host_page_bits) < host_page_size)
142 host_page_bits++;
143 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
144#if !defined(CONFIG_USER_ONLY)
145 virt_valid_tag = 1;
146#endif
54936004
FB
147}
148
fd6ce8f6 149static inline PageDesc *page_find_alloc(unsigned int index)
54936004 150{
54936004
FB
151 PageDesc **lp, *p;
152
54936004
FB
153 lp = &l1_map[index >> L2_BITS];
154 p = *lp;
155 if (!p) {
156 /* allocate if not found */
59817ccb 157 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 158 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
159 *lp = p;
160 }
161 return p + (index & (L2_SIZE - 1));
162}
163
fd6ce8f6 164static inline PageDesc *page_find(unsigned int index)
54936004 165{
54936004
FB
166 PageDesc *p;
167
54936004
FB
168 p = l1_map[index >> L2_BITS];
169 if (!p)
170 return 0;
fd6ce8f6
FB
171 return p + (index & (L2_SIZE - 1));
172}
173
92e873b9
FB
174static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
175{
176 PhysPageDesc **lp, *p;
177
178 lp = &l1_phys_map[index >> L2_BITS];
179 p = *lp;
180 if (!p) {
181 /* allocate if not found */
182 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
183 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
184 *lp = p;
185 }
186 return p + (index & (L2_SIZE - 1));
187}
188
189static inline PhysPageDesc *phys_page_find(unsigned int index)
190{
191 PhysPageDesc *p;
192
193 p = l1_phys_map[index >> L2_BITS];
194 if (!p)
195 return 0;
196 return p + (index & (L2_SIZE - 1));
197}
198
9fa3e853 199#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
200static void tlb_protect_code(CPUState *env, target_ulong addr);
201static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
202
203static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 204{
9fa3e853 205 VirtPageDesc **lp, *p;
fd6ce8f6 206
9fa3e853
FB
207 lp = &l1_virt_map[index >> L2_BITS];
208 p = *lp;
209 if (!p) {
210 /* allocate if not found */
59817ccb 211 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
212 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
213 *lp = p;
214 }
215 return p + (index & (L2_SIZE - 1));
216}
217
218static inline VirtPageDesc *virt_page_find(unsigned int index)
219{
220 VirtPageDesc *p;
221
222 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
223 if (!p)
224 return 0;
9fa3e853 225 return p + (index & (L2_SIZE - 1));
54936004
FB
226}
227
9fa3e853 228static void virt_page_flush(void)
54936004 229{
9fa3e853
FB
230 int i, j;
231 VirtPageDesc *p;
232
233 virt_valid_tag++;
234
235 if (virt_valid_tag == 0) {
236 virt_valid_tag = 1;
237 for(i = 0; i < L1_SIZE; i++) {
238 p = l1_virt_map[i];
239 if (p) {
240 for(j = 0; j < L2_SIZE; j++)
241 p[j].valid_tag = 0;
242 }
fd6ce8f6 243 }
54936004
FB
244 }
245}
9fa3e853
FB
246#else
247static void virt_page_flush(void)
248{
249}
250#endif
fd6ce8f6 251
b346ff46 252void cpu_exec_init(void)
fd6ce8f6
FB
253{
254 if (!code_gen_ptr) {
255 code_gen_ptr = code_gen_buffer;
b346ff46 256 page_init();
33417e70 257 io_mem_init();
fd6ce8f6
FB
258 }
259}
260
9fa3e853
FB
261static inline void invalidate_page_bitmap(PageDesc *p)
262{
263 if (p->code_bitmap) {
59817ccb 264 qemu_free(p->code_bitmap);
9fa3e853
FB
265 p->code_bitmap = NULL;
266 }
267 p->code_write_count = 0;
268}
269
fd6ce8f6
FB
270/* set to NULL all the 'first_tb' fields in all PageDescs */
271static void page_flush_tb(void)
272{
273 int i, j;
274 PageDesc *p;
275
276 for(i = 0; i < L1_SIZE; i++) {
277 p = l1_map[i];
278 if (p) {
9fa3e853
FB
279 for(j = 0; j < L2_SIZE; j++) {
280 p->first_tb = NULL;
281 invalidate_page_bitmap(p);
282 p++;
283 }
fd6ce8f6
FB
284 }
285 }
286}
287
288/* flush all the translation blocks */
d4e8164f 289/* XXX: tb_flush is currently not thread safe */
0124311e 290void tb_flush(CPUState *env)
fd6ce8f6
FB
291{
292 int i;
0124311e 293#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
294 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
295 code_gen_ptr - code_gen_buffer,
296 nb_tbs,
0124311e 297 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
298#endif
299 nb_tbs = 0;
300 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
301 tb_hash[i] = NULL;
9fa3e853
FB
302 virt_page_flush();
303
304 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
305 tb_phys_hash[i] = NULL;
fd6ce8f6 306 page_flush_tb();
9fa3e853 307
fd6ce8f6 308 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
309 /* XXX: flush processor icache at this point if cache flush is
310 expensive */
fd6ce8f6
FB
311}
312
313#ifdef DEBUG_TB_CHECK
314
315static void tb_invalidate_check(unsigned long address)
316{
317 TranslationBlock *tb;
318 int i;
319 address &= TARGET_PAGE_MASK;
320 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
321 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
322 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
323 address >= tb->pc + tb->size)) {
324 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
325 address, tb->pc, tb->size);
326 }
327 }
328 }
329}
330
331/* verify that all the pages have correct rights for code */
332static void tb_page_check(void)
333{
334 TranslationBlock *tb;
335 int i, flags1, flags2;
336
337 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
338 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
339 flags1 = page_get_flags(tb->pc);
340 flags2 = page_get_flags(tb->pc + tb->size - 1);
341 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
342 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
343 tb->pc, tb->size, flags1, flags2);
344 }
345 }
346 }
347}
348
d4e8164f
FB
349void tb_jmp_check(TranslationBlock *tb)
350{
351 TranslationBlock *tb1;
352 unsigned int n1;
353
354 /* suppress any remaining jumps to this TB */
355 tb1 = tb->jmp_first;
356 for(;;) {
357 n1 = (long)tb1 & 3;
358 tb1 = (TranslationBlock *)((long)tb1 & ~3);
359 if (n1 == 2)
360 break;
361 tb1 = tb1->jmp_next[n1];
362 }
363 /* check end of list */
364 if (tb1 != tb) {
365 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
366 }
367}
368
fd6ce8f6
FB
369#endif
370
371/* invalidate one TB */
372static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
373 int next_offset)
374{
375 TranslationBlock *tb1;
376 for(;;) {
377 tb1 = *ptb;
378 if (tb1 == tb) {
379 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
380 break;
381 }
382 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
383 }
384}
385
9fa3e853
FB
386static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
387{
388 TranslationBlock *tb1;
389 unsigned int n1;
390
391 for(;;) {
392 tb1 = *ptb;
393 n1 = (long)tb1 & 3;
394 tb1 = (TranslationBlock *)((long)tb1 & ~3);
395 if (tb1 == tb) {
396 *ptb = tb1->page_next[n1];
397 break;
398 }
399 ptb = &tb1->page_next[n1];
400 }
401}
402
d4e8164f
FB
403static inline void tb_jmp_remove(TranslationBlock *tb, int n)
404{
405 TranslationBlock *tb1, **ptb;
406 unsigned int n1;
407
408 ptb = &tb->jmp_next[n];
409 tb1 = *ptb;
410 if (tb1) {
411 /* find tb(n) in circular list */
412 for(;;) {
413 tb1 = *ptb;
414 n1 = (long)tb1 & 3;
415 tb1 = (TranslationBlock *)((long)tb1 & ~3);
416 if (n1 == n && tb1 == tb)
417 break;
418 if (n1 == 2) {
419 ptb = &tb1->jmp_first;
420 } else {
421 ptb = &tb1->jmp_next[n1];
422 }
423 }
424 /* now we can suppress tb(n) from the list */
425 *ptb = tb->jmp_next[n];
426
427 tb->jmp_next[n] = NULL;
428 }
429}
430
431/* reset the jump entry 'n' of a TB so that it is not chained to
432 another TB */
433static inline void tb_reset_jump(TranslationBlock *tb, int n)
434{
435 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
436}
437
9fa3e853 438static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 439{
d4e8164f 440 unsigned int h, n1;
9fa3e853 441 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 442
36bdbe54 443 tb_invalidated_flag = 1;
59817ccb 444
fd6ce8f6
FB
445 /* remove the TB from the hash list */
446 h = tb_hash_func(tb->pc);
9fa3e853
FB
447 ptb = &tb_hash[h];
448 for(;;) {
449 tb1 = *ptb;
450 /* NOTE: the TB is not necessarily linked in the hash. It
451 indicates that it is not currently used */
452 if (tb1 == NULL)
453 return;
454 if (tb1 == tb) {
455 *ptb = tb1->hash_next;
456 break;
457 }
458 ptb = &tb1->hash_next;
fd6ce8f6 459 }
d4e8164f
FB
460
461 /* suppress this TB from the two jump lists */
462 tb_jmp_remove(tb, 0);
463 tb_jmp_remove(tb, 1);
464
465 /* suppress any remaining jumps to this TB */
466 tb1 = tb->jmp_first;
467 for(;;) {
468 n1 = (long)tb1 & 3;
469 if (n1 == 2)
470 break;
471 tb1 = (TranslationBlock *)((long)tb1 & ~3);
472 tb2 = tb1->jmp_next[n1];
473 tb_reset_jump(tb1, n1);
474 tb1->jmp_next[n1] = NULL;
475 tb1 = tb2;
476 }
477 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
478}
479
9fa3e853 480static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 481{
fd6ce8f6 482 PageDesc *p;
9fa3e853
FB
483 unsigned int h;
484 target_ulong phys_pc;
485
486 /* remove the TB from the hash list */
487 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
488 h = tb_phys_hash_func(phys_pc);
489 tb_remove(&tb_phys_hash[h], tb,
490 offsetof(TranslationBlock, phys_hash_next));
491
492 /* remove the TB from the page list */
493 if (tb->page_addr[0] != page_addr) {
494 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
495 tb_page_remove(&p->first_tb, tb);
496 invalidate_page_bitmap(p);
497 }
498 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
499 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
500 tb_page_remove(&p->first_tb, tb);
501 invalidate_page_bitmap(p);
502 }
503
504 tb_invalidate(tb);
505}
506
507static inline void set_bits(uint8_t *tab, int start, int len)
508{
509 int end, mask, end1;
510
511 end = start + len;
512 tab += start >> 3;
513 mask = 0xff << (start & 7);
514 if ((start & ~7) == (end & ~7)) {
515 if (start < end) {
516 mask &= ~(0xff << (end & 7));
517 *tab |= mask;
518 }
519 } else {
520 *tab++ |= mask;
521 start = (start + 8) & ~7;
522 end1 = end & ~7;
523 while (start < end1) {
524 *tab++ = 0xff;
525 start += 8;
526 }
527 if (start < end) {
528 mask = ~(0xff << (end & 7));
529 *tab |= mask;
530 }
531 }
532}
533
534static void build_page_bitmap(PageDesc *p)
535{
536 int n, tb_start, tb_end;
537 TranslationBlock *tb;
538
59817ccb 539 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
540 if (!p->code_bitmap)
541 return;
542 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
543
544 tb = p->first_tb;
545 while (tb != NULL) {
546 n = (long)tb & 3;
547 tb = (TranslationBlock *)((long)tb & ~3);
548 /* NOTE: this is subtle as a TB may span two physical pages */
549 if (n == 0) {
550 /* NOTE: tb_end may be after the end of the page, but
551 it is not a problem */
552 tb_start = tb->pc & ~TARGET_PAGE_MASK;
553 tb_end = tb_start + tb->size;
554 if (tb_end > TARGET_PAGE_SIZE)
555 tb_end = TARGET_PAGE_SIZE;
556 } else {
557 tb_start = 0;
558 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
559 }
560 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
561 tb = tb->page_next[n];
562 }
563}
564
d720b93d
FB
565#ifdef TARGET_HAS_PRECISE_SMC
566
567static void tb_gen_code(CPUState *env,
568 target_ulong pc, target_ulong cs_base, int flags,
569 int cflags)
570{
571 TranslationBlock *tb;
572 uint8_t *tc_ptr;
573 target_ulong phys_pc, phys_page2, virt_page2;
574 int code_gen_size;
575
576 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
577 tb = tb_alloc((unsigned long)pc);
578 if (!tb) {
579 /* flush must be done */
580 tb_flush(env);
581 /* cannot fail at this point */
582 tb = tb_alloc((unsigned long)pc);
583 }
584 tc_ptr = code_gen_ptr;
585 tb->tc_ptr = tc_ptr;
586 tb->cs_base = cs_base;
587 tb->flags = flags;
588 tb->cflags = cflags;
589 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
590 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
591
592 /* check next page if needed */
593 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
594 phys_page2 = -1;
595 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
596 phys_page2 = get_phys_addr_code(env, virt_page2);
597 }
598 tb_link_phys(tb, phys_pc, phys_page2);
599}
600#endif
601
9fa3e853
FB
602/* invalidate all TBs which intersect with the target physical page
603 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
604 the same physical page. 'is_cpu_write_access' should be true if called
605 from a real cpu write access: the virtual CPU will exit the current
606 TB if code is modified inside this TB. */
607void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
608 int is_cpu_write_access)
609{
610 int n, current_tb_modified, current_tb_not_found, current_flags;
611#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
612 CPUState *env = cpu_single_env;
613#endif
9fa3e853 614 PageDesc *p;
d720b93d 615 TranslationBlock *tb, *tb_next, *current_tb;
9fa3e853 616 target_ulong tb_start, tb_end;
d720b93d 617 target_ulong current_pc, current_cs_base;
9fa3e853
FB
618
619 p = page_find(start >> TARGET_PAGE_BITS);
620 if (!p)
621 return;
622 if (!p->code_bitmap &&
d720b93d
FB
623 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
624 is_cpu_write_access) {
9fa3e853
FB
625 /* build code bitmap */
626 build_page_bitmap(p);
627 }
628
629 /* we remove all the TBs in the range [start, end[ */
630 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
631 current_tb_not_found = is_cpu_write_access;
632 current_tb_modified = 0;
633 current_tb = NULL; /* avoid warning */
634 current_pc = 0; /* avoid warning */
635 current_cs_base = 0; /* avoid warning */
636 current_flags = 0; /* avoid warning */
9fa3e853
FB
637 tb = p->first_tb;
638 while (tb != NULL) {
639 n = (long)tb & 3;
640 tb = (TranslationBlock *)((long)tb & ~3);
641 tb_next = tb->page_next[n];
642 /* NOTE: this is subtle as a TB may span two physical pages */
643 if (n == 0) {
644 /* NOTE: tb_end may be after the end of the page, but
645 it is not a problem */
646 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
647 tb_end = tb_start + tb->size;
648 } else {
649 tb_start = tb->page_addr[1];
650 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
651 }
652 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
653#ifdef TARGET_HAS_PRECISE_SMC
654 if (current_tb_not_found) {
655 current_tb_not_found = 0;
656 current_tb = NULL;
657 if (env->mem_write_pc) {
658 /* now we have a real cpu fault */
659 current_tb = tb_find_pc(env->mem_write_pc);
660 }
661 }
662 if (current_tb == tb &&
663 !(current_tb->cflags & CF_SINGLE_INSN)) {
664 /* If we are modifying the current TB, we must stop
665 its execution. We could be more precise by checking
666 that the modification is after the current PC, but it
667 would require a specialized function to partially
668 restore the CPU state */
669
670 current_tb_modified = 1;
671 cpu_restore_state(current_tb, env,
672 env->mem_write_pc, NULL);
673#if defined(TARGET_I386)
674 current_flags = env->hflags;
675 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
676 current_cs_base = (target_ulong)env->segs[R_CS].base;
677 current_pc = current_cs_base + env->eip;
678#else
679#error unsupported CPU
680#endif
681 }
682#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
683 tb_phys_invalidate(tb, -1);
684 }
685 tb = tb_next;
686 }
687#if !defined(CONFIG_USER_ONLY)
688 /* if no code remaining, no need to continue to use slow writes */
689 if (!p->first_tb) {
690 invalidate_page_bitmap(p);
d720b93d
FB
691 if (is_cpu_write_access) {
692 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
693 }
694 }
695#endif
696#ifdef TARGET_HAS_PRECISE_SMC
697 if (current_tb_modified) {
698 /* we generate a block containing just the instruction
699 modifying the memory. It will ensure that it cannot modify
700 itself */
701 tb_gen_code(env, current_pc, current_cs_base, current_flags,
702 CF_SINGLE_INSN);
703 cpu_resume_from_signal(env, NULL);
9fa3e853 704 }
fd6ce8f6 705#endif
9fa3e853 706}
fd6ce8f6 707
9fa3e853 708/* len must be <= 8 and start must be a multiple of len */
d720b93d 709static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
710{
711 PageDesc *p;
712 int offset, b;
59817ccb
FB
713#if 0
714 if (cpu_single_env->cr[0] & CR0_PE_MASK) {
715 printf("modifying code at 0x%x size=%d EIP=%x\n",
716 (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
717 cpu_single_env->eip);
718 }
719#endif
9fa3e853
FB
720 p = page_find(start >> TARGET_PAGE_BITS);
721 if (!p)
722 return;
723 if (p->code_bitmap) {
724 offset = start & ~TARGET_PAGE_MASK;
725 b = p->code_bitmap[offset >> 3] >> (offset & 7);
726 if (b & ((1 << len) - 1))
727 goto do_invalidate;
728 } else {
729 do_invalidate:
d720b93d 730 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
731 }
732}
733
9fa3e853 734#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
735static void tb_invalidate_phys_page(target_ulong addr,
736 unsigned long pc, void *puc)
9fa3e853 737{
d720b93d
FB
738 int n, current_flags, current_tb_modified;
739 target_ulong current_pc, current_cs_base;
9fa3e853 740 PageDesc *p;
d720b93d
FB
741 TranslationBlock *tb, *current_tb;
742#ifdef TARGET_HAS_PRECISE_SMC
743 CPUState *env = cpu_single_env;
744#endif
9fa3e853
FB
745
746 addr &= TARGET_PAGE_MASK;
747 p = page_find(addr >> TARGET_PAGE_BITS);
748 if (!p)
749 return;
750 tb = p->first_tb;
d720b93d
FB
751 current_tb_modified = 0;
752 current_tb = NULL;
753 current_pc = 0; /* avoid warning */
754 current_cs_base = 0; /* avoid warning */
755 current_flags = 0; /* avoid warning */
756#ifdef TARGET_HAS_PRECISE_SMC
757 if (tb && pc != 0) {
758 current_tb = tb_find_pc(pc);
759 }
760#endif
9fa3e853
FB
761 while (tb != NULL) {
762 n = (long)tb & 3;
763 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
764#ifdef TARGET_HAS_PRECISE_SMC
765 if (current_tb == tb &&
766 !(current_tb->cflags & CF_SINGLE_INSN)) {
767 /* If we are modifying the current TB, we must stop
768 its execution. We could be more precise by checking
769 that the modification is after the current PC, but it
770 would require a specialized function to partially
771 restore the CPU state */
772
773 current_tb_modified = 1;
774 cpu_restore_state(current_tb, env, pc, puc);
775#if defined(TARGET_I386)
776 current_flags = env->hflags;
777 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
778 current_cs_base = (target_ulong)env->segs[R_CS].base;
779 current_pc = current_cs_base + env->eip;
780#else
781#error unsupported CPU
782#endif
783 }
784#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
785 tb_phys_invalidate(tb, addr);
786 tb = tb->page_next[n];
787 }
fd6ce8f6 788 p->first_tb = NULL;
d720b93d
FB
789#ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
793 itself */
794 tb_gen_code(env, current_pc, current_cs_base, current_flags,
795 CF_SINGLE_INSN);
796 cpu_resume_from_signal(env, puc);
797 }
798#endif
fd6ce8f6 799}
9fa3e853 800#endif
fd6ce8f6
FB
801
802/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
803static inline void tb_alloc_page(TranslationBlock *tb,
804 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
805{
806 PageDesc *p;
9fa3e853
FB
807 TranslationBlock *last_first_tb;
808
809 tb->page_addr[n] = page_addr;
810 p = page_find(page_addr >> TARGET_PAGE_BITS);
811 tb->page_next[n] = p->first_tb;
812 last_first_tb = p->first_tb;
813 p->first_tb = (TranslationBlock *)((long)tb | n);
814 invalidate_page_bitmap(p);
fd6ce8f6 815
d720b93d
FB
816#ifdef TARGET_HAS_SMC
817
9fa3e853 818#if defined(CONFIG_USER_ONLY)
fd6ce8f6 819 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
820 unsigned long host_start, host_end, addr;
821 int prot;
822
fd6ce8f6
FB
823 /* force the host page as non writable (writes will have a
824 page fault + mprotect overhead) */
fd6ce8f6
FB
825 host_start = page_addr & host_page_mask;
826 host_end = host_start + host_page_size;
827 prot = 0;
828 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
829 prot |= page_get_flags(addr);
830 mprotect((void *)host_start, host_page_size,
831 (prot & PAGE_BITS) & ~PAGE_WRITE);
832#ifdef DEBUG_TB_INVALIDATE
833 printf("protecting code page: 0x%08lx\n",
834 host_start);
835#endif
836 p->flags &= ~PAGE_WRITE;
fd6ce8f6 837 }
9fa3e853
FB
838#else
839 /* if some code is already present, then the pages are already
840 protected. So we handle the case where only the first TB is
841 allocated in a physical page */
842 if (!last_first_tb) {
843 target_ulong virt_addr;
844
845 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
846 tlb_protect_code(cpu_single_env, virt_addr);
847 }
848#endif
d720b93d
FB
849
850#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
851}
852
853/* Allocate a new translation block. Flush the translation buffer if
854 too many translation blocks or too much generated code. */
d4e8164f 855TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
856{
857 TranslationBlock *tb;
fd6ce8f6
FB
858
859 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
860 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 861 return NULL;
fd6ce8f6
FB
862 tb = &tbs[nb_tbs++];
863 tb->pc = pc;
b448f2f3 864 tb->cflags = 0;
d4e8164f
FB
865 return tb;
866}
867
9fa3e853
FB
868/* add a new TB and link it to the physical page tables. phys_page2 is
869 (-1) to indicate that only one page contains the TB. */
870void tb_link_phys(TranslationBlock *tb,
871 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 872{
9fa3e853
FB
873 unsigned int h;
874 TranslationBlock **ptb;
875
876 /* add in the physical hash table */
877 h = tb_phys_hash_func(phys_pc);
878 ptb = &tb_phys_hash[h];
879 tb->phys_hash_next = *ptb;
880 *ptb = tb;
fd6ce8f6
FB
881
882 /* add in the page list */
9fa3e853
FB
883 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
884 if (phys_page2 != -1)
885 tb_alloc_page(tb, 1, phys_page2);
886 else
887 tb->page_addr[1] = -1;
61382a50
FB
888#ifdef DEBUG_TB_CHECK
889 tb_page_check();
890#endif
9fa3e853
FB
891}
892
893/* link the tb with the other TBs */
894void tb_link(TranslationBlock *tb)
895{
896#if !defined(CONFIG_USER_ONLY)
897 {
898 VirtPageDesc *vp;
899 target_ulong addr;
900
901 /* save the code memory mappings (needed to invalidate the code) */
902 addr = tb->pc & TARGET_PAGE_MASK;
903 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
904#ifdef DEBUG_TLB_CHECK
905 if (vp->valid_tag == virt_valid_tag &&
906 vp->phys_addr != tb->page_addr[0]) {
907 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
908 addr, tb->page_addr[0], vp->phys_addr);
909 }
910#endif
9fa3e853 911 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
912 if (vp->valid_tag != virt_valid_tag) {
913 vp->valid_tag = virt_valid_tag;
914#if !defined(CONFIG_SOFTMMU)
915 vp->prot = 0;
916#endif
917 }
9fa3e853
FB
918
919 if (tb->page_addr[1] != -1) {
920 addr += TARGET_PAGE_SIZE;
921 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
922#ifdef DEBUG_TLB_CHECK
923 if (vp->valid_tag == virt_valid_tag &&
924 vp->phys_addr != tb->page_addr[1]) {
925 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
926 addr, tb->page_addr[1], vp->phys_addr);
927 }
928#endif
9fa3e853 929 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
930 if (vp->valid_tag != virt_valid_tag) {
931 vp->valid_tag = virt_valid_tag;
932#if !defined(CONFIG_SOFTMMU)
933 vp->prot = 0;
934#endif
935 }
9fa3e853
FB
936 }
937 }
938#endif
939
d4e8164f
FB
940 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
941 tb->jmp_next[0] = NULL;
942 tb->jmp_next[1] = NULL;
b448f2f3
FB
943#ifdef USE_CODE_COPY
944 tb->cflags &= ~CF_FP_USED;
945 if (tb->cflags & CF_TB_FP_USED)
946 tb->cflags |= CF_FP_USED;
947#endif
d4e8164f
FB
948
949 /* init original jump addresses */
950 if (tb->tb_next_offset[0] != 0xffff)
951 tb_reset_jump(tb, 0);
952 if (tb->tb_next_offset[1] != 0xffff)
953 tb_reset_jump(tb, 1);
fd6ce8f6
FB
954}
955
9fa3e853
FB
956/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
957 tb[1].tc_ptr. Return NULL if not found */
958TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 959{
9fa3e853
FB
960 int m_min, m_max, m;
961 unsigned long v;
962 TranslationBlock *tb;
a513fe19
FB
963
964 if (nb_tbs <= 0)
965 return NULL;
966 if (tc_ptr < (unsigned long)code_gen_buffer ||
967 tc_ptr >= (unsigned long)code_gen_ptr)
968 return NULL;
969 /* binary search (cf Knuth) */
970 m_min = 0;
971 m_max = nb_tbs - 1;
972 while (m_min <= m_max) {
973 m = (m_min + m_max) >> 1;
974 tb = &tbs[m];
975 v = (unsigned long)tb->tc_ptr;
976 if (v == tc_ptr)
977 return tb;
978 else if (tc_ptr < v) {
979 m_max = m - 1;
980 } else {
981 m_min = m + 1;
982 }
983 }
984 return &tbs[m_max];
985}
7501267e 986
ea041c0e
FB
987static void tb_reset_jump_recursive(TranslationBlock *tb);
988
989static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
990{
991 TranslationBlock *tb1, *tb_next, **ptb;
992 unsigned int n1;
993
994 tb1 = tb->jmp_next[n];
995 if (tb1 != NULL) {
996 /* find head of list */
997 for(;;) {
998 n1 = (long)tb1 & 3;
999 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1000 if (n1 == 2)
1001 break;
1002 tb1 = tb1->jmp_next[n1];
1003 }
1004 /* we are now sure now that tb jumps to tb1 */
1005 tb_next = tb1;
1006
1007 /* remove tb from the jmp_first list */
1008 ptb = &tb_next->jmp_first;
1009 for(;;) {
1010 tb1 = *ptb;
1011 n1 = (long)tb1 & 3;
1012 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1013 if (n1 == n && tb1 == tb)
1014 break;
1015 ptb = &tb1->jmp_next[n1];
1016 }
1017 *ptb = tb->jmp_next[n];
1018 tb->jmp_next[n] = NULL;
1019
1020 /* suppress the jump to next tb in generated code */
1021 tb_reset_jump(tb, n);
1022
0124311e 1023 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1024 tb_reset_jump_recursive(tb_next);
1025 }
1026}
1027
1028static void tb_reset_jump_recursive(TranslationBlock *tb)
1029{
1030 tb_reset_jump_recursive2(tb, 0);
1031 tb_reset_jump_recursive2(tb, 1);
1032}
1033
d720b93d
FB
1034static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1035{
1036 target_ulong phys_addr;
1037
1038 phys_addr = cpu_get_phys_page_debug(env, pc);
1039 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1040}
1041
c33a346e
FB
1042/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1043 breakpoint is reached */
2e12669a 1044int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1045{
a541f297 1046#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2 1047 int i;
d720b93d 1048
4c3a88a2
FB
1049 for(i = 0; i < env->nb_breakpoints; i++) {
1050 if (env->breakpoints[i] == pc)
1051 return 0;
1052 }
1053
1054 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1055 return -1;
1056 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1057
1058 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1059 return 0;
1060#else
1061 return -1;
1062#endif
1063}
1064
1065/* remove a breakpoint */
2e12669a 1066int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1067{
a541f297 1068#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2
FB
1069 int i;
1070 for(i = 0; i < env->nb_breakpoints; i++) {
1071 if (env->breakpoints[i] == pc)
1072 goto found;
1073 }
1074 return -1;
1075 found:
1076 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1077 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1078 env->nb_breakpoints--;
d720b93d
FB
1079
1080 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1081 return 0;
1082#else
1083 return -1;
1084#endif
1085}
1086
c33a346e
FB
1087/* enable or disable single step mode. EXCP_DEBUG is returned by the
1088 CPU loop after each instruction */
1089void cpu_single_step(CPUState *env, int enabled)
1090{
a541f297 1091#if defined(TARGET_I386) || defined(TARGET_PPC)
c33a346e
FB
1092 if (env->singlestep_enabled != enabled) {
1093 env->singlestep_enabled = enabled;
1094 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1095 /* XXX: only flush what is necessary */
0124311e 1096 tb_flush(env);
c33a346e
FB
1097 }
1098#endif
1099}
1100
34865134
FB
1101/* enable or disable low levels log */
1102void cpu_set_log(int log_flags)
1103{
1104 loglevel = log_flags;
1105 if (loglevel && !logfile) {
1106 logfile = fopen(logfilename, "w");
1107 if (!logfile) {
1108 perror(logfilename);
1109 _exit(1);
1110 }
9fa3e853
FB
1111#if !defined(CONFIG_SOFTMMU)
1112 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1113 {
1114 static uint8_t logfile_buf[4096];
1115 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1116 }
1117#else
34865134 1118 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1119#endif
34865134
FB
1120 }
1121}
1122
1123void cpu_set_log_filename(const char *filename)
1124{
1125 logfilename = strdup(filename);
1126}
c33a346e 1127
0124311e 1128/* mask must never be zero, except for A20 change call */
68a79315 1129void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1130{
1131 TranslationBlock *tb;
ee8b7021 1132 static int interrupt_lock;
59817ccb 1133
68a79315 1134 env->interrupt_request |= mask;
ea041c0e
FB
1135 /* if the cpu is currently executing code, we must unlink it and
1136 all the potentially executing TB */
1137 tb = env->current_tb;
ee8b7021
FB
1138 if (tb && !testandset(&interrupt_lock)) {
1139 env->current_tb = NULL;
ea041c0e 1140 tb_reset_jump_recursive(tb);
ee8b7021 1141 interrupt_lock = 0;
ea041c0e
FB
1142 }
1143}
1144
b54ad049
FB
1145void cpu_reset_interrupt(CPUState *env, int mask)
1146{
1147 env->interrupt_request &= ~mask;
1148}
1149
f193c797
FB
1150CPULogItem cpu_log_items[] = {
1151 { CPU_LOG_TB_OUT_ASM, "out_asm",
1152 "show generated host assembly code for each compiled TB" },
1153 { CPU_LOG_TB_IN_ASM, "in_asm",
1154 "show target assembly code for each compiled TB" },
1155 { CPU_LOG_TB_OP, "op",
1156 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1157#ifdef TARGET_I386
1158 { CPU_LOG_TB_OP_OPT, "op_opt",
1159 "show micro ops after optimization for each compiled TB" },
1160#endif
1161 { CPU_LOG_INT, "int",
1162 "show interrupts/exceptions in short format" },
1163 { CPU_LOG_EXEC, "exec",
1164 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1165 { CPU_LOG_TB_CPU, "cpu",
1166 "show CPU state before bloc translation" },
f193c797
FB
1167#ifdef TARGET_I386
1168 { CPU_LOG_PCALL, "pcall",
1169 "show protected mode far calls/returns/exceptions" },
1170#endif
fd872598
FB
1171 { CPU_LOG_IOPORT, "ioport",
1172 "show all i/o ports accesses" },
f193c797
FB
1173 { 0, NULL, NULL },
1174};
1175
1176static int cmp1(const char *s1, int n, const char *s2)
1177{
1178 if (strlen(s2) != n)
1179 return 0;
1180 return memcmp(s1, s2, n) == 0;
1181}
1182
1183/* takes a comma separated list of log masks. Return 0 if error. */
1184int cpu_str_to_log_mask(const char *str)
1185{
1186 CPULogItem *item;
1187 int mask;
1188 const char *p, *p1;
1189
1190 p = str;
1191 mask = 0;
1192 for(;;) {
1193 p1 = strchr(p, ',');
1194 if (!p1)
1195 p1 = p + strlen(p);
1196 for(item = cpu_log_items; item->mask != 0; item++) {
1197 if (cmp1(p, p1 - p, item->name))
1198 goto found;
1199 }
1200 return 0;
1201 found:
1202 mask |= item->mask;
1203 if (*p1 != ',')
1204 break;
1205 p = p1 + 1;
1206 }
1207 return mask;
1208}
ea041c0e 1209
7501267e
FB
1210void cpu_abort(CPUState *env, const char *fmt, ...)
1211{
1212 va_list ap;
1213
1214 va_start(ap, fmt);
1215 fprintf(stderr, "qemu: fatal: ");
1216 vfprintf(stderr, fmt, ap);
1217 fprintf(stderr, "\n");
1218#ifdef TARGET_I386
1219 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1220#endif
1221 va_end(ap);
1222 abort();
1223}
1224
0124311e
FB
1225#if !defined(CONFIG_USER_ONLY)
1226
ee8b7021
FB
1227/* NOTE: if flush_global is true, also flush global entries (not
1228 implemented yet) */
1229void tlb_flush(CPUState *env, int flush_global)
33417e70 1230{
33417e70 1231 int i;
0124311e 1232
9fa3e853
FB
1233#if defined(DEBUG_TLB)
1234 printf("tlb_flush:\n");
1235#endif
0124311e
FB
1236 /* must reset current TB so that interrupts cannot modify the
1237 links while we are modifying them */
1238 env->current_tb = NULL;
1239
33417e70
FB
1240 for(i = 0; i < CPU_TLB_SIZE; i++) {
1241 env->tlb_read[0][i].address = -1;
1242 env->tlb_write[0][i].address = -1;
1243 env->tlb_read[1][i].address = -1;
1244 env->tlb_write[1][i].address = -1;
1245 }
9fa3e853
FB
1246
1247 virt_page_flush();
1248 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1249 tb_hash[i] = NULL;
1250
1251#if !defined(CONFIG_SOFTMMU)
1252 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1253#endif
33417e70
FB
1254}
1255
274da6b2 1256static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1257{
1258 if (addr == (tlb_entry->address &
1259 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1260 tlb_entry->address = -1;
1261}
1262
2e12669a 1263void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1264{
9fa3e853
FB
1265 int i, n;
1266 VirtPageDesc *vp;
1267 PageDesc *p;
1268 TranslationBlock *tb;
0124311e 1269
9fa3e853
FB
1270#if defined(DEBUG_TLB)
1271 printf("tlb_flush_page: 0x%08x\n", addr);
1272#endif
0124311e
FB
1273 /* must reset current TB so that interrupts cannot modify the
1274 links while we are modifying them */
1275 env->current_tb = NULL;
61382a50
FB
1276
1277 addr &= TARGET_PAGE_MASK;
1278 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1279 tlb_flush_entry(&env->tlb_read[0][i], addr);
1280 tlb_flush_entry(&env->tlb_write[0][i], addr);
1281 tlb_flush_entry(&env->tlb_read[1][i], addr);
1282 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1283
9fa3e853
FB
1284 /* remove from the virtual pc hash table all the TB at this
1285 virtual address */
1286
1287 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1288 if (vp && vp->valid_tag == virt_valid_tag) {
1289 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1290 if (p) {
1291 /* we remove all the links to the TBs in this virtual page */
1292 tb = p->first_tb;
1293 while (tb != NULL) {
1294 n = (long)tb & 3;
1295 tb = (TranslationBlock *)((long)tb & ~3);
1296 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1297 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1298 tb_invalidate(tb);
1299 }
1300 tb = tb->page_next[n];
1301 }
1302 }
98857888 1303 vp->valid_tag = 0;
9fa3e853
FB
1304 }
1305
0124311e 1306#if !defined(CONFIG_SOFTMMU)
9fa3e853 1307 if (addr < MMAP_AREA_END)
0124311e 1308 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1309#endif
9fa3e853
FB
1310}
1311
4f2ac237 1312static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1313{
1314 if (addr == (tlb_entry->address &
1315 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1316 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1317 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1318 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1319 }
1320}
1321
1322/* update the TLBs so that writes to code in the virtual page 'addr'
1323 can be detected */
4f2ac237 1324static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1325{
1326 int i;
1327
1328 addr &= TARGET_PAGE_MASK;
1329 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1330 tlb_protect_code1(&env->tlb_write[0][i], addr);
1331 tlb_protect_code1(&env->tlb_write[1][i], addr);
1332#if !defined(CONFIG_SOFTMMU)
1333 /* NOTE: as we generated the code for this page, it is already at
1334 least readable */
1335 if (addr < MMAP_AREA_END)
1336 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1337#endif
1338}
1339
9fa3e853 1340static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1341 unsigned long phys_addr)
9fa3e853
FB
1342{
1343 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1344 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1345 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1346 }
1347}
1348
1349/* update the TLB so that writes in physical page 'phys_addr' are no longer
1350 tested self modifying code */
4f2ac237 1351static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1352{
1353 int i;
1354
1355 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1356 phys_addr += (long)phys_ram_base;
1357 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1358 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1359 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1360}
1361
1362static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1363 unsigned long start, unsigned long length)
1364{
1365 unsigned long addr;
1366 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1367 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1368 if ((addr - start) < length) {
1369 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1370 }
1371 }
1372}
1373
1374void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1375{
1376 CPUState *env;
4f2ac237 1377 unsigned long length, start1;
1ccde1cb
FB
1378 int i;
1379
1380 start &= TARGET_PAGE_MASK;
1381 end = TARGET_PAGE_ALIGN(end);
1382
1383 length = end - start;
1384 if (length == 0)
1385 return;
1386 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1387
1388 env = cpu_single_env;
1389 /* we modify the TLB cache so that the dirty bit will be set again
1390 when accessing the range */
59817ccb 1391 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1392 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1393 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1394 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1395 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1396
1397#if !defined(CONFIG_SOFTMMU)
1398 /* XXX: this is expensive */
1399 {
1400 VirtPageDesc *p;
1401 int j;
1402 target_ulong addr;
1403
1404 for(i = 0; i < L1_SIZE; i++) {
1405 p = l1_virt_map[i];
1406 if (p) {
1407 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1408 for(j = 0; j < L2_SIZE; j++) {
1409 if (p->valid_tag == virt_valid_tag &&
1410 p->phys_addr >= start && p->phys_addr < end &&
1411 (p->prot & PROT_WRITE)) {
1412 if (addr < MMAP_AREA_END) {
1413 mprotect((void *)addr, TARGET_PAGE_SIZE,
1414 p->prot & ~PROT_WRITE);
1415 }
1416 }
1417 addr += TARGET_PAGE_SIZE;
1418 p++;
1419 }
1420 }
1421 }
1422 }
1423#endif
1ccde1cb
FB
1424}
1425
1426static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1427 unsigned long start)
1428{
1429 unsigned long addr;
1430 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1431 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1432 if (addr == start) {
1433 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1434 }
1435 }
1436}
1437
1438/* update the TLB corresponding to virtual page vaddr and phys addr
1439 addr so that it is no longer dirty */
1440static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1441{
1442 CPUState *env = cpu_single_env;
1443 int i;
1444
1445 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1446
1447 addr &= TARGET_PAGE_MASK;
1448 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1449 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1450 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1451}
1452
59817ccb
FB
1453/* add a new TLB entry. At most one entry for a given virtual address
1454 is permitted. Return 0 if OK or 2 if the page could not be mapped
1455 (can only happen in non SOFTMMU mode for I/O pages or pages
1456 conflicting with the host address space). */
2e12669a
FB
1457int tlb_set_page(CPUState *env, target_ulong vaddr,
1458 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1459 int is_user, int is_softmmu)
1460{
92e873b9 1461 PhysPageDesc *p;
4f2ac237 1462 unsigned long pd;
9fa3e853
FB
1463 TranslationBlock *first_tb;
1464 unsigned int index;
4f2ac237
FB
1465 target_ulong address;
1466 unsigned long addend;
9fa3e853
FB
1467 int ret;
1468
92e873b9
FB
1469 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1470 first_tb = NULL;
9fa3e853
FB
1471 if (!p) {
1472 pd = IO_MEM_UNASSIGNED;
9fa3e853 1473 } else {
92e873b9 1474 PageDesc *p1;
9fa3e853 1475 pd = p->phys_offset;
92e873b9
FB
1476 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1477 /* NOTE: we also allocate the page at this stage */
1478 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1479 first_tb = p1->first_tb;
1480 }
9fa3e853
FB
1481 }
1482#if defined(DEBUG_TLB)
1483 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1484 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1485#endif
1486
1487 ret = 0;
1488#if !defined(CONFIG_SOFTMMU)
1489 if (is_softmmu)
1490#endif
1491 {
1492 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1493 /* IO memory case */
1494 address = vaddr | pd;
1495 addend = paddr;
1496 } else {
1497 /* standard memory */
1498 address = vaddr;
1499 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1500 }
1501
1502 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1503 addend -= vaddr;
67b915a5 1504 if (prot & PAGE_READ) {
9fa3e853
FB
1505 env->tlb_read[is_user][index].address = address;
1506 env->tlb_read[is_user][index].addend = addend;
1507 } else {
1508 env->tlb_read[is_user][index].address = -1;
1509 env->tlb_read[is_user][index].addend = -1;
1510 }
67b915a5 1511 if (prot & PAGE_WRITE) {
9fa3e853
FB
1512 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1513 /* ROM: access is ignored (same as unassigned) */
1514 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1515 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1516 } else
1517 /* XXX: the PowerPC code seems not ready to handle
1518 self modifying code with DCBI */
1519#if defined(TARGET_HAS_SMC) || 1
1520 if (first_tb) {
9fa3e853
FB
1521 /* if code is present, we use a specific memory
1522 handler. It works only for physical memory access */
1523 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1524 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1525 } else
1526#endif
1527 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1528 !cpu_physical_memory_is_dirty(pd)) {
1529 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1530 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1531 } else {
1532 env->tlb_write[is_user][index].address = address;
1533 env->tlb_write[is_user][index].addend = addend;
1534 }
1535 } else {
1536 env->tlb_write[is_user][index].address = -1;
1537 env->tlb_write[is_user][index].addend = -1;
1538 }
1539 }
1540#if !defined(CONFIG_SOFTMMU)
1541 else {
1542 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1543 /* IO access: no mapping is done as it will be handled by the
1544 soft MMU */
1545 if (!(env->hflags & HF_SOFTMMU_MASK))
1546 ret = 2;
1547 } else {
1548 void *map_addr;
59817ccb
FB
1549
1550 if (vaddr >= MMAP_AREA_END) {
1551 ret = 2;
1552 } else {
1553 if (prot & PROT_WRITE) {
1554 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1555#if defined(TARGET_HAS_SMC) || 1
59817ccb 1556 first_tb ||
d720b93d 1557#endif
59817ccb
FB
1558 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1559 !cpu_physical_memory_is_dirty(pd))) {
1560 /* ROM: we do as if code was inside */
1561 /* if code is present, we only map as read only and save the
1562 original mapping */
1563 VirtPageDesc *vp;
1564
1565 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1566 vp->phys_addr = pd;
1567 vp->prot = prot;
1568 vp->valid_tag = virt_valid_tag;
1569 prot &= ~PAGE_WRITE;
1570 }
1571 }
1572 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1573 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1574 if (map_addr == MAP_FAILED) {
1575 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1576 paddr, vaddr);
9fa3e853 1577 }
9fa3e853
FB
1578 }
1579 }
1580 }
1581#endif
1582 return ret;
1583}
1584
1585/* called from signal handler: invalidate the code and unprotect the
1586 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1587int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1588{
1589#if !defined(CONFIG_SOFTMMU)
1590 VirtPageDesc *vp;
1591
1592#if defined(DEBUG_TLB)
1593 printf("page_unprotect: addr=0x%08x\n", addr);
1594#endif
1595 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1596
1597 /* if it is not mapped, no need to worry here */
1598 if (addr >= MMAP_AREA_END)
1599 return 0;
9fa3e853
FB
1600 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1601 if (!vp)
1602 return 0;
1603 /* NOTE: in this case, validate_tag is _not_ tested as it
1604 validates only the code TLB */
1605 if (vp->valid_tag != virt_valid_tag)
1606 return 0;
1607 if (!(vp->prot & PAGE_WRITE))
1608 return 0;
1609#if defined(DEBUG_TLB)
1610 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1611 addr, vp->phys_addr, vp->prot);
1612#endif
59817ccb
FB
1613 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1614 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1615 (unsigned long)addr, vp->prot);
d720b93d
FB
1616 /* set the dirty bit */
1617 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1618 /* flush the code inside */
1619 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1620 return 1;
1621#else
1622 return 0;
1623#endif
33417e70
FB
1624}
1625
0124311e
FB
1626#else
1627
ee8b7021 1628void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1629{
1630}
1631
2e12669a 1632void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1633{
1634}
1635
2e12669a
FB
1636int tlb_set_page(CPUState *env, target_ulong vaddr,
1637 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1638 int is_user, int is_softmmu)
1639{
1640 return 0;
1641}
0124311e 1642
9fa3e853
FB
1643/* dump memory mappings */
1644void page_dump(FILE *f)
33417e70 1645{
9fa3e853
FB
1646 unsigned long start, end;
1647 int i, j, prot, prot1;
1648 PageDesc *p;
33417e70 1649
9fa3e853
FB
1650 fprintf(f, "%-8s %-8s %-8s %s\n",
1651 "start", "end", "size", "prot");
1652 start = -1;
1653 end = -1;
1654 prot = 0;
1655 for(i = 0; i <= L1_SIZE; i++) {
1656 if (i < L1_SIZE)
1657 p = l1_map[i];
1658 else
1659 p = NULL;
1660 for(j = 0;j < L2_SIZE; j++) {
1661 if (!p)
1662 prot1 = 0;
1663 else
1664 prot1 = p[j].flags;
1665 if (prot1 != prot) {
1666 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1667 if (start != -1) {
1668 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1669 start, end, end - start,
1670 prot & PAGE_READ ? 'r' : '-',
1671 prot & PAGE_WRITE ? 'w' : '-',
1672 prot & PAGE_EXEC ? 'x' : '-');
1673 }
1674 if (prot1 != 0)
1675 start = end;
1676 else
1677 start = -1;
1678 prot = prot1;
1679 }
1680 if (!p)
1681 break;
1682 }
33417e70 1683 }
33417e70
FB
1684}
1685
9fa3e853 1686int page_get_flags(unsigned long address)
33417e70 1687{
9fa3e853
FB
1688 PageDesc *p;
1689
1690 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1691 if (!p)
9fa3e853
FB
1692 return 0;
1693 return p->flags;
1694}
1695
1696/* modify the flags of a page and invalidate the code if
1697 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1698 depending on PAGE_WRITE */
1699void page_set_flags(unsigned long start, unsigned long end, int flags)
1700{
1701 PageDesc *p;
1702 unsigned long addr;
1703
1704 start = start & TARGET_PAGE_MASK;
1705 end = TARGET_PAGE_ALIGN(end);
1706 if (flags & PAGE_WRITE)
1707 flags |= PAGE_WRITE_ORG;
1708 spin_lock(&tb_lock);
1709 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1710 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1711 /* if the write protection is set, then we invalidate the code
1712 inside */
1713 if (!(p->flags & PAGE_WRITE) &&
1714 (flags & PAGE_WRITE) &&
1715 p->first_tb) {
d720b93d 1716 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1717 }
1718 p->flags = flags;
1719 }
1720 spin_unlock(&tb_lock);
33417e70
FB
1721}
1722
9fa3e853
FB
1723/* called from signal handler: invalidate the code and unprotect the
1724 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1725int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1726{
1727 unsigned int page_index, prot, pindex;
1728 PageDesc *p, *p1;
1729 unsigned long host_start, host_end, addr;
1730
1731 host_start = address & host_page_mask;
1732 page_index = host_start >> TARGET_PAGE_BITS;
1733 p1 = page_find(page_index);
1734 if (!p1)
1735 return 0;
1736 host_end = host_start + host_page_size;
1737 p = p1;
1738 prot = 0;
1739 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1740 prot |= p->flags;
1741 p++;
1742 }
1743 /* if the page was really writable, then we change its
1744 protection back to writable */
1745 if (prot & PAGE_WRITE_ORG) {
1746 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1747 if (!(p1[pindex].flags & PAGE_WRITE)) {
1748 mprotect((void *)host_start, host_page_size,
1749 (prot & PAGE_BITS) | PAGE_WRITE);
1750 p1[pindex].flags |= PAGE_WRITE;
1751 /* and since the content will be modified, we must invalidate
1752 the corresponding translated code. */
d720b93d 1753 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1754#ifdef DEBUG_TB_CHECK
1755 tb_invalidate_check(address);
1756#endif
1757 return 1;
1758 }
1759 }
1760 return 0;
1761}
1762
1763/* call this function when system calls directly modify a memory area */
1764void page_unprotect_range(uint8_t *data, unsigned long data_size)
1765{
1766 unsigned long start, end, addr;
1767
1768 start = (unsigned long)data;
1769 end = start + data_size;
1770 start &= TARGET_PAGE_MASK;
1771 end = TARGET_PAGE_ALIGN(end);
1772 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1773 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1774 }
1775}
1776
1ccde1cb
FB
1777static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1778{
1779}
9fa3e853
FB
1780#endif /* defined(CONFIG_USER_ONLY) */
1781
33417e70
FB
1782/* register physical memory. 'size' must be a multiple of the target
1783 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1784 io memory page */
2e12669a
FB
1785void cpu_register_physical_memory(target_phys_addr_t start_addr,
1786 unsigned long size,
1787 unsigned long phys_offset)
33417e70
FB
1788{
1789 unsigned long addr, end_addr;
92e873b9 1790 PhysPageDesc *p;
33417e70 1791
5fd386f6 1792 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1793 end_addr = start_addr + size;
5fd386f6 1794 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1795 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1796 p->phys_offset = phys_offset;
1797 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1798 phys_offset += TARGET_PAGE_SIZE;
1799 }
1800}
1801
2e12669a 1802static uint32_t unassigned_mem_readb(target_phys_addr_t addr)
33417e70
FB
1803{
1804 return 0;
1805}
1806
2e12669a 1807static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val)
33417e70
FB
1808{
1809}
1810
1811static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1812 unassigned_mem_readb,
1813 unassigned_mem_readb,
1814 unassigned_mem_readb,
1815};
1816
1817static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1818 unassigned_mem_writeb,
1819 unassigned_mem_writeb,
1820 unassigned_mem_writeb,
1821};
1822
9fa3e853
FB
1823/* self modifying code support in soft mmu mode : writing to a page
1824 containing code comes to these functions */
1825
2e12669a 1826static void code_mem_writeb(target_phys_addr_t addr, uint32_t val)
9fa3e853 1827{
1ccde1cb
FB
1828 unsigned long phys_addr;
1829
274da6b2 1830 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1831#if !defined(CONFIG_USER_ONLY)
d720b93d 1832 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1833#endif
1ccde1cb
FB
1834 stb_raw((uint8_t *)addr, val);
1835 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1836}
1837
2e12669a 1838static void code_mem_writew(target_phys_addr_t addr, uint32_t val)
9fa3e853 1839{
1ccde1cb
FB
1840 unsigned long phys_addr;
1841
274da6b2 1842 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1843#if !defined(CONFIG_USER_ONLY)
d720b93d 1844 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1845#endif
1ccde1cb
FB
1846 stw_raw((uint8_t *)addr, val);
1847 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1848}
1849
2e12669a 1850static void code_mem_writel(target_phys_addr_t addr, uint32_t val)
9fa3e853 1851{
1ccde1cb
FB
1852 unsigned long phys_addr;
1853
274da6b2 1854 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1855#if !defined(CONFIG_USER_ONLY)
d720b93d 1856 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1857#endif
1ccde1cb
FB
1858 stl_raw((uint8_t *)addr, val);
1859 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1860}
1861
1862static CPUReadMemoryFunc *code_mem_read[3] = {
1863 NULL, /* never used */
1864 NULL, /* never used */
1865 NULL, /* never used */
1866};
1867
1868static CPUWriteMemoryFunc *code_mem_write[3] = {
1869 code_mem_writeb,
1870 code_mem_writew,
1871 code_mem_writel,
1872};
33417e70 1873
4f2ac237 1874static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1875{
1876 stb_raw((uint8_t *)addr, val);
d720b93d 1877 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1878}
1879
4f2ac237 1880static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1881{
1882 stw_raw((uint8_t *)addr, val);
d720b93d 1883 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1884}
1885
4f2ac237 1886static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1887{
1888 stl_raw((uint8_t *)addr, val);
d720b93d 1889 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1890}
1891
1892static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1893 notdirty_mem_writeb,
1894 notdirty_mem_writew,
1895 notdirty_mem_writel,
1896};
1897
33417e70
FB
1898static void io_mem_init(void)
1899{
9fa3e853
FB
1900 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1901 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1902 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1ccde1cb
FB
1903 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1904 io_mem_nb = 5;
1905
1906 /* alloc dirty bits array */
59817ccb 1907 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1908}
1909
1910/* mem_read and mem_write are arrays of functions containing the
1911 function to access byte (index 0), word (index 1) and dword (index
1912 2). All functions must be supplied. If io_index is non zero, the
1913 corresponding io zone is modified. If it is zero, a new io zone is
1914 allocated. The return value can be used with
1915 cpu_register_physical_memory(). (-1) is returned if error. */
1916int cpu_register_io_memory(int io_index,
1917 CPUReadMemoryFunc **mem_read,
1918 CPUWriteMemoryFunc **mem_write)
1919{
1920 int i;
1921
1922 if (io_index <= 0) {
1923 if (io_index >= IO_MEM_NB_ENTRIES)
1924 return -1;
1925 io_index = io_mem_nb++;
1926 } else {
1927 if (io_index >= IO_MEM_NB_ENTRIES)
1928 return -1;
1929 }
1930
1931 for(i = 0;i < 3; i++) {
1932 io_mem_read[io_index][i] = mem_read[i];
1933 io_mem_write[io_index][i] = mem_write[i];
1934 }
1935 return io_index << IO_MEM_SHIFT;
1936}
61382a50 1937
13eb76e0
FB
1938/* physical memory access (slow version, mainly for debug) */
1939#if defined(CONFIG_USER_ONLY)
2e12669a 1940void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1941 int len, int is_write)
1942{
1943 int l, flags;
1944 target_ulong page;
1945
1946 while (len > 0) {
1947 page = addr & TARGET_PAGE_MASK;
1948 l = (page + TARGET_PAGE_SIZE) - addr;
1949 if (l > len)
1950 l = len;
1951 flags = page_get_flags(page);
1952 if (!(flags & PAGE_VALID))
1953 return;
1954 if (is_write) {
1955 if (!(flags & PAGE_WRITE))
1956 return;
1957 memcpy((uint8_t *)addr, buf, len);
1958 } else {
1959 if (!(flags & PAGE_READ))
1960 return;
1961 memcpy(buf, (uint8_t *)addr, len);
1962 }
1963 len -= l;
1964 buf += l;
1965 addr += l;
1966 }
1967}
1968#else
2e12669a 1969void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1970 int len, int is_write)
1971{
1972 int l, io_index;
1973 uint8_t *ptr;
1974 uint32_t val;
2e12669a
FB
1975 target_phys_addr_t page;
1976 unsigned long pd;
92e873b9 1977 PhysPageDesc *p;
13eb76e0
FB
1978
1979 while (len > 0) {
1980 page = addr & TARGET_PAGE_MASK;
1981 l = (page + TARGET_PAGE_SIZE) - addr;
1982 if (l > len)
1983 l = len;
92e873b9 1984 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1985 if (!p) {
1986 pd = IO_MEM_UNASSIGNED;
1987 } else {
1988 pd = p->phys_offset;
1989 }
1990
1991 if (is_write) {
1992 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1993 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1994 if (l >= 4 && ((addr & 3) == 0)) {
1995 /* 32 bit read access */
1996 val = ldl_raw(buf);
d720b93d 1997 io_mem_write[io_index][2](addr, val);
13eb76e0
FB
1998 l = 4;
1999 } else if (l >= 2 && ((addr & 1) == 0)) {
2000 /* 16 bit read access */
2001 val = lduw_raw(buf);
d720b93d 2002 io_mem_write[io_index][1](addr, val);
13eb76e0
FB
2003 l = 2;
2004 } else {
2005 /* 8 bit access */
2006 val = ldub_raw(buf);
d720b93d 2007 io_mem_write[io_index][0](addr, val);
13eb76e0
FB
2008 l = 1;
2009 }
2010 } else {
b448f2f3
FB
2011 unsigned long addr1;
2012 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2013 /* RAM case */
b448f2f3 2014 ptr = phys_ram_base + addr1;
13eb76e0 2015 memcpy(ptr, buf, l);
b448f2f3
FB
2016 /* invalidate code */
2017 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2018 /* set dirty bit */
2019 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2020 }
2021 } else {
2022 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2023 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2024 /* I/O case */
2025 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2026 if (l >= 4 && ((addr & 3) == 0)) {
2027 /* 32 bit read access */
2028 val = io_mem_read[io_index][2](addr);
2029 stl_raw(buf, val);
2030 l = 4;
2031 } else if (l >= 2 && ((addr & 1) == 0)) {
2032 /* 16 bit read access */
2033 val = io_mem_read[io_index][1](addr);
2034 stw_raw(buf, val);
2035 l = 2;
2036 } else {
2037 /* 8 bit access */
2038 val = io_mem_read[io_index][0](addr);
2039 stb_raw(buf, val);
2040 l = 1;
2041 }
2042 } else {
2043 /* RAM case */
2044 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2045 (addr & ~TARGET_PAGE_MASK);
2046 memcpy(buf, ptr, l);
2047 }
2048 }
2049 len -= l;
2050 buf += l;
2051 addr += l;
2052 }
2053}
2054#endif
2055
2056/* virtual memory access for debug */
b448f2f3
FB
2057int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2058 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2059{
2060 int l;
2061 target_ulong page, phys_addr;
2062
2063 while (len > 0) {
2064 page = addr & TARGET_PAGE_MASK;
2065 phys_addr = cpu_get_phys_page_debug(env, page);
2066 /* if no physical page mapped, return an error */
2067 if (phys_addr == -1)
2068 return -1;
2069 l = (page + TARGET_PAGE_SIZE) - addr;
2070 if (l > len)
2071 l = len;
b448f2f3
FB
2072 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2073 buf, l, is_write);
13eb76e0
FB
2074 len -= l;
2075 buf += l;
2076 addr += l;
2077 }
2078 return 0;
2079}
2080
61382a50
FB
2081#if !defined(CONFIG_USER_ONLY)
2082
2083#define MMUSUFFIX _cmmu
2084#define GETPC() NULL
2085#define env cpu_single_env
2086
2087#define SHIFT 0
2088#include "softmmu_template.h"
2089
2090#define SHIFT 1
2091#include "softmmu_template.h"
2092
2093#define SHIFT 2
2094#include "softmmu_template.h"
2095
2096#define SHIFT 3
2097#include "softmmu_template.h"
2098
2099#undef env
2100
2101#endif