]> git.proxmox.com Git - qemu.git/blame - exec.c
update
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdlib.h>
21#include <stdio.h>
22#include <stdarg.h>
23#include <string.h>
24#include <errno.h>
25#include <unistd.h>
26#include <inttypes.h>
fd6ce8f6 27#include <sys/mman.h>
54936004 28
ea041c0e 29#include "config.h"
6180a181
FB
30#include "cpu.h"
31#include "exec-all.h"
54936004 32
fd6ce8f6 33//#define DEBUG_TB_INVALIDATE
66e85a21 34//#define DEBUG_FLUSH
9fa3e853 35//#define DEBUG_TLB
fd6ce8f6
FB
36
37/* make various TB consistency checks */
38//#define DEBUG_TB_CHECK
98857888 39//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
40
41/* threshold to flush the translated code buffer */
42#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
43
9fa3e853
FB
44#define SMC_BITMAP_USE_THRESHOLD 10
45
46#define MMAP_AREA_START 0x00000000
47#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
48
49TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 51TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 52int nb_tbs;
eb51d102
FB
53/* any access to the tbs or the page table must use this lock */
54spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
55
56uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
57uint8_t *code_gen_ptr;
58
9fa3e853
FB
59int phys_ram_size;
60int phys_ram_fd;
61uint8_t *phys_ram_base;
1ccde1cb 62uint8_t *phys_ram_dirty;
9fa3e853 63
54936004 64typedef struct PageDesc {
9fa3e853
FB
65 /* offset in memory of the page + io_index in the low 12 bits */
66 unsigned long phys_offset;
67 /* list of TBs intersecting this physical page */
fd6ce8f6 68 TranslationBlock *first_tb;
9fa3e853
FB
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73#if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75#endif
54936004
FB
76} PageDesc;
77
9fa3e853
FB
78typedef struct VirtPageDesc {
79 /* physical address of code page. It is valid only if 'valid_tag'
80 matches 'virt_valid_tag' */
81 target_ulong phys_addr;
82 unsigned int valid_tag;
83#if !defined(CONFIG_SOFTMMU)
84 /* original page access rights. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 unsigned int prot;
87#endif
88} VirtPageDesc;
89
54936004
FB
90#define L2_BITS 10
91#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
92
93#define L1_SIZE (1 << L1_BITS)
94#define L2_SIZE (1 << L2_BITS)
95
33417e70 96static void io_mem_init(void);
fd6ce8f6 97
54936004
FB
98unsigned long real_host_page_size;
99unsigned long host_page_bits;
100unsigned long host_page_size;
101unsigned long host_page_mask;
102
103static PageDesc *l1_map[L1_SIZE];
104
9fa3e853
FB
105#if !defined(CONFIG_USER_ONLY)
106static VirtPageDesc *l1_virt_map[L1_SIZE];
107static unsigned int virt_valid_tag;
108#endif
109
33417e70 110/* io memory support */
33417e70
FB
111CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
112CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
113static int io_mem_nb;
114
34865134
FB
115/* log support */
116char *logfilename = "/tmp/qemu.log";
117FILE *logfile;
118int loglevel;
119
b346ff46 120static void page_init(void)
54936004
FB
121{
122 /* NOTE: we can always suppose that host_page_size >=
123 TARGET_PAGE_SIZE */
124 real_host_page_size = getpagesize();
125 if (host_page_size == 0)
126 host_page_size = real_host_page_size;
127 if (host_page_size < TARGET_PAGE_SIZE)
128 host_page_size = TARGET_PAGE_SIZE;
129 host_page_bits = 0;
130 while ((1 << host_page_bits) < host_page_size)
131 host_page_bits++;
132 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
133#if !defined(CONFIG_USER_ONLY)
134 virt_valid_tag = 1;
135#endif
54936004
FB
136}
137
fd6ce8f6 138static inline PageDesc *page_find_alloc(unsigned int index)
54936004 139{
54936004
FB
140 PageDesc **lp, *p;
141
54936004
FB
142 lp = &l1_map[index >> L2_BITS];
143 p = *lp;
144 if (!p) {
145 /* allocate if not found */
59817ccb 146 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 147 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
148 *lp = p;
149 }
150 return p + (index & (L2_SIZE - 1));
151}
152
fd6ce8f6 153static inline PageDesc *page_find(unsigned int index)
54936004 154{
54936004
FB
155 PageDesc *p;
156
54936004
FB
157 p = l1_map[index >> L2_BITS];
158 if (!p)
159 return 0;
fd6ce8f6
FB
160 return p + (index & (L2_SIZE - 1));
161}
162
9fa3e853
FB
163#if !defined(CONFIG_USER_ONLY)
164static void tlb_protect_code(CPUState *env, uint32_t addr);
165static void tlb_unprotect_code(CPUState *env, uint32_t addr);
1ccde1cb 166static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
9fa3e853
FB
167
168static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 169{
9fa3e853 170 VirtPageDesc **lp, *p;
fd6ce8f6 171
9fa3e853
FB
172 lp = &l1_virt_map[index >> L2_BITS];
173 p = *lp;
174 if (!p) {
175 /* allocate if not found */
59817ccb 176 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
177 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
178 *lp = p;
179 }
180 return p + (index & (L2_SIZE - 1));
181}
182
183static inline VirtPageDesc *virt_page_find(unsigned int index)
184{
185 VirtPageDesc *p;
186
187 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
188 if (!p)
189 return 0;
9fa3e853 190 return p + (index & (L2_SIZE - 1));
54936004
FB
191}
192
9fa3e853 193static void virt_page_flush(void)
54936004 194{
9fa3e853
FB
195 int i, j;
196 VirtPageDesc *p;
197
198 virt_valid_tag++;
199
200 if (virt_valid_tag == 0) {
201 virt_valid_tag = 1;
202 for(i = 0; i < L1_SIZE; i++) {
203 p = l1_virt_map[i];
204 if (p) {
205 for(j = 0; j < L2_SIZE; j++)
206 p[j].valid_tag = 0;
207 }
fd6ce8f6 208 }
54936004
FB
209 }
210}
9fa3e853
FB
211#else
212static void virt_page_flush(void)
213{
214}
215#endif
fd6ce8f6 216
b346ff46 217void cpu_exec_init(void)
fd6ce8f6
FB
218{
219 if (!code_gen_ptr) {
220 code_gen_ptr = code_gen_buffer;
b346ff46 221 page_init();
33417e70 222 io_mem_init();
fd6ce8f6
FB
223 }
224}
225
9fa3e853
FB
226static inline void invalidate_page_bitmap(PageDesc *p)
227{
228 if (p->code_bitmap) {
59817ccb 229 qemu_free(p->code_bitmap);
9fa3e853
FB
230 p->code_bitmap = NULL;
231 }
232 p->code_write_count = 0;
233}
234
fd6ce8f6
FB
235/* set to NULL all the 'first_tb' fields in all PageDescs */
236static void page_flush_tb(void)
237{
238 int i, j;
239 PageDesc *p;
240
241 for(i = 0; i < L1_SIZE; i++) {
242 p = l1_map[i];
243 if (p) {
9fa3e853
FB
244 for(j = 0; j < L2_SIZE; j++) {
245 p->first_tb = NULL;
246 invalidate_page_bitmap(p);
247 p++;
248 }
fd6ce8f6
FB
249 }
250 }
251}
252
253/* flush all the translation blocks */
d4e8164f 254/* XXX: tb_flush is currently not thread safe */
0124311e 255void tb_flush(CPUState *env)
fd6ce8f6
FB
256{
257 int i;
0124311e 258#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
259 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
260 code_gen_ptr - code_gen_buffer,
261 nb_tbs,
0124311e 262 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
263#endif
264 nb_tbs = 0;
265 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
266 tb_hash[i] = NULL;
9fa3e853
FB
267 virt_page_flush();
268
269 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
270 tb_phys_hash[i] = NULL;
fd6ce8f6 271 page_flush_tb();
9fa3e853 272
fd6ce8f6 273 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
274 /* XXX: flush processor icache at this point if cache flush is
275 expensive */
fd6ce8f6
FB
276}
277
278#ifdef DEBUG_TB_CHECK
279
280static void tb_invalidate_check(unsigned long address)
281{
282 TranslationBlock *tb;
283 int i;
284 address &= TARGET_PAGE_MASK;
285 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
286 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
287 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
288 address >= tb->pc + tb->size)) {
289 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
290 address, tb->pc, tb->size);
291 }
292 }
293 }
294}
295
296/* verify that all the pages have correct rights for code */
297static void tb_page_check(void)
298{
299 TranslationBlock *tb;
300 int i, flags1, flags2;
301
302 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
303 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
304 flags1 = page_get_flags(tb->pc);
305 flags2 = page_get_flags(tb->pc + tb->size - 1);
306 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
307 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
308 tb->pc, tb->size, flags1, flags2);
309 }
310 }
311 }
312}
313
d4e8164f
FB
314void tb_jmp_check(TranslationBlock *tb)
315{
316 TranslationBlock *tb1;
317 unsigned int n1;
318
319 /* suppress any remaining jumps to this TB */
320 tb1 = tb->jmp_first;
321 for(;;) {
322 n1 = (long)tb1 & 3;
323 tb1 = (TranslationBlock *)((long)tb1 & ~3);
324 if (n1 == 2)
325 break;
326 tb1 = tb1->jmp_next[n1];
327 }
328 /* check end of list */
329 if (tb1 != tb) {
330 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
331 }
332}
333
fd6ce8f6
FB
334#endif
335
336/* invalidate one TB */
337static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
338 int next_offset)
339{
340 TranslationBlock *tb1;
341 for(;;) {
342 tb1 = *ptb;
343 if (tb1 == tb) {
344 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
345 break;
346 }
347 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
348 }
349}
350
9fa3e853
FB
351static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
352{
353 TranslationBlock *tb1;
354 unsigned int n1;
355
356 for(;;) {
357 tb1 = *ptb;
358 n1 = (long)tb1 & 3;
359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
360 if (tb1 == tb) {
361 *ptb = tb1->page_next[n1];
362 break;
363 }
364 ptb = &tb1->page_next[n1];
365 }
366}
367
d4e8164f
FB
368static inline void tb_jmp_remove(TranslationBlock *tb, int n)
369{
370 TranslationBlock *tb1, **ptb;
371 unsigned int n1;
372
373 ptb = &tb->jmp_next[n];
374 tb1 = *ptb;
375 if (tb1) {
376 /* find tb(n) in circular list */
377 for(;;) {
378 tb1 = *ptb;
379 n1 = (long)tb1 & 3;
380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
381 if (n1 == n && tb1 == tb)
382 break;
383 if (n1 == 2) {
384 ptb = &tb1->jmp_first;
385 } else {
386 ptb = &tb1->jmp_next[n1];
387 }
388 }
389 /* now we can suppress tb(n) from the list */
390 *ptb = tb->jmp_next[n];
391
392 tb->jmp_next[n] = NULL;
393 }
394}
395
396/* reset the jump entry 'n' of a TB so that it is not chained to
397 another TB */
398static inline void tb_reset_jump(TranslationBlock *tb, int n)
399{
400 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
401}
402
9fa3e853 403static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 404{
d4e8164f 405 unsigned int h, n1;
9fa3e853 406 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 407
36bdbe54 408 tb_invalidated_flag = 1;
59817ccb 409
fd6ce8f6
FB
410 /* remove the TB from the hash list */
411 h = tb_hash_func(tb->pc);
9fa3e853
FB
412 ptb = &tb_hash[h];
413 for(;;) {
414 tb1 = *ptb;
415 /* NOTE: the TB is not necessarily linked in the hash. It
416 indicates that it is not currently used */
417 if (tb1 == NULL)
418 return;
419 if (tb1 == tb) {
420 *ptb = tb1->hash_next;
421 break;
422 }
423 ptb = &tb1->hash_next;
fd6ce8f6 424 }
d4e8164f
FB
425
426 /* suppress this TB from the two jump lists */
427 tb_jmp_remove(tb, 0);
428 tb_jmp_remove(tb, 1);
429
430 /* suppress any remaining jumps to this TB */
431 tb1 = tb->jmp_first;
432 for(;;) {
433 n1 = (long)tb1 & 3;
434 if (n1 == 2)
435 break;
436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
437 tb2 = tb1->jmp_next[n1];
438 tb_reset_jump(tb1, n1);
439 tb1->jmp_next[n1] = NULL;
440 tb1 = tb2;
441 }
442 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
443}
444
9fa3e853 445static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 446{
fd6ce8f6 447 PageDesc *p;
9fa3e853
FB
448 unsigned int h;
449 target_ulong phys_pc;
450
451 /* remove the TB from the hash list */
452 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
453 h = tb_phys_hash_func(phys_pc);
454 tb_remove(&tb_phys_hash[h], tb,
455 offsetof(TranslationBlock, phys_hash_next));
456
457 /* remove the TB from the page list */
458 if (tb->page_addr[0] != page_addr) {
459 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
460 tb_page_remove(&p->first_tb, tb);
461 invalidate_page_bitmap(p);
462 }
463 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
464 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468
469 tb_invalidate(tb);
470}
471
472static inline void set_bits(uint8_t *tab, int start, int len)
473{
474 int end, mask, end1;
475
476 end = start + len;
477 tab += start >> 3;
478 mask = 0xff << (start & 7);
479 if ((start & ~7) == (end & ~7)) {
480 if (start < end) {
481 mask &= ~(0xff << (end & 7));
482 *tab |= mask;
483 }
484 } else {
485 *tab++ |= mask;
486 start = (start + 8) & ~7;
487 end1 = end & ~7;
488 while (start < end1) {
489 *tab++ = 0xff;
490 start += 8;
491 }
492 if (start < end) {
493 mask = ~(0xff << (end & 7));
494 *tab |= mask;
495 }
496 }
497}
498
499static void build_page_bitmap(PageDesc *p)
500{
501 int n, tb_start, tb_end;
502 TranslationBlock *tb;
503
59817ccb 504 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
505 if (!p->code_bitmap)
506 return;
507 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
508
509 tb = p->first_tb;
510 while (tb != NULL) {
511 n = (long)tb & 3;
512 tb = (TranslationBlock *)((long)tb & ~3);
513 /* NOTE: this is subtle as a TB may span two physical pages */
514 if (n == 0) {
515 /* NOTE: tb_end may be after the end of the page, but
516 it is not a problem */
517 tb_start = tb->pc & ~TARGET_PAGE_MASK;
518 tb_end = tb_start + tb->size;
519 if (tb_end > TARGET_PAGE_SIZE)
520 tb_end = TARGET_PAGE_SIZE;
521 } else {
522 tb_start = 0;
523 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
524 }
525 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
526 tb = tb->page_next[n];
527 }
528}
529
530/* invalidate all TBs which intersect with the target physical page
531 starting in range [start;end[. NOTE: start and end must refer to
1ccde1cb
FB
532 the same physical page. 'vaddr' is a virtual address referencing
533 the physical page of code. It is only used an a hint if there is no
534 code left. */
535static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
536 target_ulong vaddr)
9fa3e853
FB
537{
538 int n;
539 PageDesc *p;
540 TranslationBlock *tb, *tb_next;
541 target_ulong tb_start, tb_end;
542
543 p = page_find(start >> TARGET_PAGE_BITS);
544 if (!p)
545 return;
546 if (!p->code_bitmap &&
547 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
548 /* build code bitmap */
549 build_page_bitmap(p);
550 }
551
552 /* we remove all the TBs in the range [start, end[ */
553 /* XXX: see if in some cases it could be faster to invalidate all the code */
554 tb = p->first_tb;
555 while (tb != NULL) {
556 n = (long)tb & 3;
557 tb = (TranslationBlock *)((long)tb & ~3);
558 tb_next = tb->page_next[n];
559 /* NOTE: this is subtle as a TB may span two physical pages */
560 if (n == 0) {
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
564 tb_end = tb_start + tb->size;
565 } else {
566 tb_start = tb->page_addr[1];
567 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
568 }
569 if (!(tb_end <= start || tb_start >= end)) {
570 tb_phys_invalidate(tb, -1);
571 }
572 tb = tb_next;
573 }
574#if !defined(CONFIG_USER_ONLY)
575 /* if no code remaining, no need to continue to use slow writes */
576 if (!p->first_tb) {
577 invalidate_page_bitmap(p);
1ccde1cb 578 tlb_unprotect_code_phys(cpu_single_env, start, vaddr);
9fa3e853 579 }
fd6ce8f6 580#endif
9fa3e853 581}
fd6ce8f6 582
9fa3e853 583/* len must be <= 8 and start must be a multiple of len */
1ccde1cb 584static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, target_ulong vaddr)
9fa3e853
FB
585{
586 PageDesc *p;
587 int offset, b;
59817ccb
FB
588#if 0
589 if (cpu_single_env->cr[0] & CR0_PE_MASK) {
590 printf("modifying code at 0x%x size=%d EIP=%x\n",
591 (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
592 cpu_single_env->eip);
593 }
594#endif
9fa3e853
FB
595 p = page_find(start >> TARGET_PAGE_BITS);
596 if (!p)
597 return;
598 if (p->code_bitmap) {
599 offset = start & ~TARGET_PAGE_MASK;
600 b = p->code_bitmap[offset >> 3] >> (offset & 7);
601 if (b & ((1 << len) - 1))
602 goto do_invalidate;
603 } else {
604 do_invalidate:
1ccde1cb 605 tb_invalidate_phys_page_range(start, start + len, vaddr);
9fa3e853
FB
606 }
607}
608
609/* invalidate all TBs which intersect with the target virtual page
610 starting in range [start;end[. This function is usually used when
611 the target processor flushes its I-cache. NOTE: start and end must
612 refer to the same physical page */
613void tb_invalidate_page_range(target_ulong start, target_ulong end)
614{
615 int n;
616 PageDesc *p;
617 TranslationBlock *tb, *tb_next;
618 target_ulong pc;
619 target_ulong phys_start;
620
621#if !defined(CONFIG_USER_ONLY)
622 {
623 VirtPageDesc *vp;
624 vp = virt_page_find(start >> TARGET_PAGE_BITS);
625 if (!vp)
626 return;
627 if (vp->valid_tag != virt_valid_tag)
628 return;
629 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
630 }
631#else
632 phys_start = start;
633#endif
634 p = page_find(phys_start >> TARGET_PAGE_BITS);
635 if (!p)
fd6ce8f6 636 return;
9fa3e853
FB
637 /* we remove all the TBs in the range [start, end[ */
638 /* XXX: see if in some cases it could be faster to invalidate all the code */
fd6ce8f6 639 tb = p->first_tb;
fd6ce8f6 640 while (tb != NULL) {
9fa3e853
FB
641 n = (long)tb & 3;
642 tb = (TranslationBlock *)((long)tb & ~3);
643 tb_next = tb->page_next[n];
644 pc = tb->pc;
645 if (!((pc + tb->size) <= start || pc >= end)) {
646 tb_phys_invalidate(tb, -1);
647 }
fd6ce8f6
FB
648 tb = tb_next;
649 }
9fa3e853
FB
650#if !defined(CONFIG_USER_ONLY)
651 /* if no code remaining, no need to continue to use slow writes */
652 if (!p->first_tb)
653 tlb_unprotect_code(cpu_single_env, start);
654#endif
655}
656
657#if !defined(CONFIG_SOFTMMU)
658static void tb_invalidate_phys_page(target_ulong addr)
659{
660 int n;
661 PageDesc *p;
662 TranslationBlock *tb;
663
664 addr &= TARGET_PAGE_MASK;
665 p = page_find(addr >> TARGET_PAGE_BITS);
666 if (!p)
667 return;
668 tb = p->first_tb;
669 while (tb != NULL) {
670 n = (long)tb & 3;
671 tb = (TranslationBlock *)((long)tb & ~3);
672 tb_phys_invalidate(tb, addr);
673 tb = tb->page_next[n];
674 }
fd6ce8f6
FB
675 p->first_tb = NULL;
676}
9fa3e853 677#endif
fd6ce8f6
FB
678
679/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
680static inline void tb_alloc_page(TranslationBlock *tb,
681 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
682{
683 PageDesc *p;
9fa3e853
FB
684 TranslationBlock *last_first_tb;
685
686 tb->page_addr[n] = page_addr;
687 p = page_find(page_addr >> TARGET_PAGE_BITS);
688 tb->page_next[n] = p->first_tb;
689 last_first_tb = p->first_tb;
690 p->first_tb = (TranslationBlock *)((long)tb | n);
691 invalidate_page_bitmap(p);
fd6ce8f6 692
9fa3e853 693#if defined(CONFIG_USER_ONLY)
fd6ce8f6 694 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
695 unsigned long host_start, host_end, addr;
696 int prot;
697
fd6ce8f6
FB
698 /* force the host page as non writable (writes will have a
699 page fault + mprotect overhead) */
fd6ce8f6
FB
700 host_start = page_addr & host_page_mask;
701 host_end = host_start + host_page_size;
702 prot = 0;
703 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
704 prot |= page_get_flags(addr);
705 mprotect((void *)host_start, host_page_size,
706 (prot & PAGE_BITS) & ~PAGE_WRITE);
707#ifdef DEBUG_TB_INVALIDATE
708 printf("protecting code page: 0x%08lx\n",
709 host_start);
710#endif
711 p->flags &= ~PAGE_WRITE;
fd6ce8f6 712 }
9fa3e853
FB
713#else
714 /* if some code is already present, then the pages are already
715 protected. So we handle the case where only the first TB is
716 allocated in a physical page */
717 if (!last_first_tb) {
718 target_ulong virt_addr;
719
720 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
721 tlb_protect_code(cpu_single_env, virt_addr);
722 }
723#endif
fd6ce8f6
FB
724}
725
726/* Allocate a new translation block. Flush the translation buffer if
727 too many translation blocks or too much generated code. */
d4e8164f 728TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
729{
730 TranslationBlock *tb;
fd6ce8f6
FB
731
732 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
733 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 734 return NULL;
fd6ce8f6
FB
735 tb = &tbs[nb_tbs++];
736 tb->pc = pc;
b448f2f3 737 tb->cflags = 0;
d4e8164f
FB
738 return tb;
739}
740
9fa3e853
FB
741/* add a new TB and link it to the physical page tables. phys_page2 is
742 (-1) to indicate that only one page contains the TB. */
743void tb_link_phys(TranslationBlock *tb,
744 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 745{
9fa3e853
FB
746 unsigned int h;
747 TranslationBlock **ptb;
748
749 /* add in the physical hash table */
750 h = tb_phys_hash_func(phys_pc);
751 ptb = &tb_phys_hash[h];
752 tb->phys_hash_next = *ptb;
753 *ptb = tb;
fd6ce8f6
FB
754
755 /* add in the page list */
9fa3e853
FB
756 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
757 if (phys_page2 != -1)
758 tb_alloc_page(tb, 1, phys_page2);
759 else
760 tb->page_addr[1] = -1;
61382a50
FB
761#ifdef DEBUG_TB_CHECK
762 tb_page_check();
763#endif
9fa3e853
FB
764}
765
766/* link the tb with the other TBs */
767void tb_link(TranslationBlock *tb)
768{
769#if !defined(CONFIG_USER_ONLY)
770 {
771 VirtPageDesc *vp;
772 target_ulong addr;
773
774 /* save the code memory mappings (needed to invalidate the code) */
775 addr = tb->pc & TARGET_PAGE_MASK;
776 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
777#ifdef DEBUG_TLB_CHECK
778 if (vp->valid_tag == virt_valid_tag &&
779 vp->phys_addr != tb->page_addr[0]) {
780 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
781 addr, tb->page_addr[0], vp->phys_addr);
782 }
783#endif
9fa3e853 784 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
785 if (vp->valid_tag != virt_valid_tag) {
786 vp->valid_tag = virt_valid_tag;
787#if !defined(CONFIG_SOFTMMU)
788 vp->prot = 0;
789#endif
790 }
9fa3e853
FB
791
792 if (tb->page_addr[1] != -1) {
793 addr += TARGET_PAGE_SIZE;
794 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
795#ifdef DEBUG_TLB_CHECK
796 if (vp->valid_tag == virt_valid_tag &&
797 vp->phys_addr != tb->page_addr[1]) {
798 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
799 addr, tb->page_addr[1], vp->phys_addr);
800 }
801#endif
9fa3e853 802 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
803 if (vp->valid_tag != virt_valid_tag) {
804 vp->valid_tag = virt_valid_tag;
805#if !defined(CONFIG_SOFTMMU)
806 vp->prot = 0;
807#endif
808 }
9fa3e853
FB
809 }
810 }
811#endif
812
d4e8164f
FB
813 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
814 tb->jmp_next[0] = NULL;
815 tb->jmp_next[1] = NULL;
b448f2f3
FB
816#ifdef USE_CODE_COPY
817 tb->cflags &= ~CF_FP_USED;
818 if (tb->cflags & CF_TB_FP_USED)
819 tb->cflags |= CF_FP_USED;
820#endif
d4e8164f
FB
821
822 /* init original jump addresses */
823 if (tb->tb_next_offset[0] != 0xffff)
824 tb_reset_jump(tb, 0);
825 if (tb->tb_next_offset[1] != 0xffff)
826 tb_reset_jump(tb, 1);
fd6ce8f6
FB
827}
828
9fa3e853
FB
829/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
830 tb[1].tc_ptr. Return NULL if not found */
831TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 832{
9fa3e853
FB
833 int m_min, m_max, m;
834 unsigned long v;
835 TranslationBlock *tb;
a513fe19
FB
836
837 if (nb_tbs <= 0)
838 return NULL;
839 if (tc_ptr < (unsigned long)code_gen_buffer ||
840 tc_ptr >= (unsigned long)code_gen_ptr)
841 return NULL;
842 /* binary search (cf Knuth) */
843 m_min = 0;
844 m_max = nb_tbs - 1;
845 while (m_min <= m_max) {
846 m = (m_min + m_max) >> 1;
847 tb = &tbs[m];
848 v = (unsigned long)tb->tc_ptr;
849 if (v == tc_ptr)
850 return tb;
851 else if (tc_ptr < v) {
852 m_max = m - 1;
853 } else {
854 m_min = m + 1;
855 }
856 }
857 return &tbs[m_max];
858}
7501267e 859
ea041c0e
FB
860static void tb_reset_jump_recursive(TranslationBlock *tb);
861
862static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
863{
864 TranslationBlock *tb1, *tb_next, **ptb;
865 unsigned int n1;
866
867 tb1 = tb->jmp_next[n];
868 if (tb1 != NULL) {
869 /* find head of list */
870 for(;;) {
871 n1 = (long)tb1 & 3;
872 tb1 = (TranslationBlock *)((long)tb1 & ~3);
873 if (n1 == 2)
874 break;
875 tb1 = tb1->jmp_next[n1];
876 }
877 /* we are now sure now that tb jumps to tb1 */
878 tb_next = tb1;
879
880 /* remove tb from the jmp_first list */
881 ptb = &tb_next->jmp_first;
882 for(;;) {
883 tb1 = *ptb;
884 n1 = (long)tb1 & 3;
885 tb1 = (TranslationBlock *)((long)tb1 & ~3);
886 if (n1 == n && tb1 == tb)
887 break;
888 ptb = &tb1->jmp_next[n1];
889 }
890 *ptb = tb->jmp_next[n];
891 tb->jmp_next[n] = NULL;
892
893 /* suppress the jump to next tb in generated code */
894 tb_reset_jump(tb, n);
895
0124311e 896 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
897 tb_reset_jump_recursive(tb_next);
898 }
899}
900
901static void tb_reset_jump_recursive(TranslationBlock *tb)
902{
903 tb_reset_jump_recursive2(tb, 0);
904 tb_reset_jump_recursive2(tb, 1);
905}
906
c33a346e
FB
907/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
908 breakpoint is reached */
4c3a88a2
FB
909int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
910{
911#if defined(TARGET_I386)
912 int i;
913
914 for(i = 0; i < env->nb_breakpoints; i++) {
915 if (env->breakpoints[i] == pc)
916 return 0;
917 }
918
919 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
920 return -1;
921 env->breakpoints[env->nb_breakpoints++] = pc;
9fa3e853 922 tb_invalidate_page_range(pc, pc + 1);
4c3a88a2
FB
923 return 0;
924#else
925 return -1;
926#endif
927}
928
929/* remove a breakpoint */
930int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
931{
932#if defined(TARGET_I386)
933 int i;
934 for(i = 0; i < env->nb_breakpoints; i++) {
935 if (env->breakpoints[i] == pc)
936 goto found;
937 }
938 return -1;
939 found:
940 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
941 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
942 env->nb_breakpoints--;
9fa3e853 943 tb_invalidate_page_range(pc, pc + 1);
4c3a88a2
FB
944 return 0;
945#else
946 return -1;
947#endif
948}
949
c33a346e
FB
950/* enable or disable single step mode. EXCP_DEBUG is returned by the
951 CPU loop after each instruction */
952void cpu_single_step(CPUState *env, int enabled)
953{
954#if defined(TARGET_I386)
955 if (env->singlestep_enabled != enabled) {
956 env->singlestep_enabled = enabled;
957 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 958 /* XXX: only flush what is necessary */
0124311e 959 tb_flush(env);
c33a346e
FB
960 }
961#endif
962}
963
34865134
FB
964/* enable or disable low levels log */
965void cpu_set_log(int log_flags)
966{
967 loglevel = log_flags;
968 if (loglevel && !logfile) {
969 logfile = fopen(logfilename, "w");
970 if (!logfile) {
971 perror(logfilename);
972 _exit(1);
973 }
9fa3e853
FB
974#if !defined(CONFIG_SOFTMMU)
975 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
976 {
977 static uint8_t logfile_buf[4096];
978 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
979 }
980#else
34865134 981 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 982#endif
34865134
FB
983 }
984}
985
986void cpu_set_log_filename(const char *filename)
987{
988 logfilename = strdup(filename);
989}
c33a346e 990
0124311e 991/* mask must never be zero, except for A20 change call */
68a79315 992void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
993{
994 TranslationBlock *tb;
ee8b7021 995 static int interrupt_lock;
59817ccb 996
68a79315 997 env->interrupt_request |= mask;
ea041c0e
FB
998 /* if the cpu is currently executing code, we must unlink it and
999 all the potentially executing TB */
1000 tb = env->current_tb;
ee8b7021
FB
1001 if (tb && !testandset(&interrupt_lock)) {
1002 env->current_tb = NULL;
ea041c0e 1003 tb_reset_jump_recursive(tb);
ee8b7021 1004 interrupt_lock = 0;
ea041c0e
FB
1005 }
1006}
1007
f193c797
FB
1008CPULogItem cpu_log_items[] = {
1009 { CPU_LOG_TB_OUT_ASM, "out_asm",
1010 "show generated host assembly code for each compiled TB" },
1011 { CPU_LOG_TB_IN_ASM, "in_asm",
1012 "show target assembly code for each compiled TB" },
1013 { CPU_LOG_TB_OP, "op",
1014 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1015#ifdef TARGET_I386
1016 { CPU_LOG_TB_OP_OPT, "op_opt",
1017 "show micro ops after optimization for each compiled TB" },
1018#endif
1019 { CPU_LOG_INT, "int",
1020 "show interrupts/exceptions in short format" },
1021 { CPU_LOG_EXEC, "exec",
1022 "show trace before each executed TB (lots of logs)" },
1023#ifdef TARGET_I386
1024 { CPU_LOG_PCALL, "pcall",
1025 "show protected mode far calls/returns/exceptions" },
1026#endif
1027 { 0, NULL, NULL },
1028};
1029
1030static int cmp1(const char *s1, int n, const char *s2)
1031{
1032 if (strlen(s2) != n)
1033 return 0;
1034 return memcmp(s1, s2, n) == 0;
1035}
1036
1037/* takes a comma separated list of log masks. Return 0 if error. */
1038int cpu_str_to_log_mask(const char *str)
1039{
1040 CPULogItem *item;
1041 int mask;
1042 const char *p, *p1;
1043
1044 p = str;
1045 mask = 0;
1046 for(;;) {
1047 p1 = strchr(p, ',');
1048 if (!p1)
1049 p1 = p + strlen(p);
1050 for(item = cpu_log_items; item->mask != 0; item++) {
1051 if (cmp1(p, p1 - p, item->name))
1052 goto found;
1053 }
1054 return 0;
1055 found:
1056 mask |= item->mask;
1057 if (*p1 != ',')
1058 break;
1059 p = p1 + 1;
1060 }
1061 return mask;
1062}
ea041c0e 1063
7501267e
FB
1064void cpu_abort(CPUState *env, const char *fmt, ...)
1065{
1066 va_list ap;
1067
1068 va_start(ap, fmt);
1069 fprintf(stderr, "qemu: fatal: ");
1070 vfprintf(stderr, fmt, ap);
1071 fprintf(stderr, "\n");
1072#ifdef TARGET_I386
1073 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1074#endif
1075 va_end(ap);
1076 abort();
1077}
1078
0124311e
FB
1079#if !defined(CONFIG_USER_ONLY)
1080
ee8b7021
FB
1081/* NOTE: if flush_global is true, also flush global entries (not
1082 implemented yet) */
1083void tlb_flush(CPUState *env, int flush_global)
33417e70 1084{
33417e70 1085 int i;
0124311e 1086
9fa3e853
FB
1087#if defined(DEBUG_TLB)
1088 printf("tlb_flush:\n");
1089#endif
0124311e
FB
1090 /* must reset current TB so that interrupts cannot modify the
1091 links while we are modifying them */
1092 env->current_tb = NULL;
1093
33417e70
FB
1094 for(i = 0; i < CPU_TLB_SIZE; i++) {
1095 env->tlb_read[0][i].address = -1;
1096 env->tlb_write[0][i].address = -1;
1097 env->tlb_read[1][i].address = -1;
1098 env->tlb_write[1][i].address = -1;
1099 }
9fa3e853
FB
1100
1101 virt_page_flush();
1102 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1103 tb_hash[i] = NULL;
1104
1105#if !defined(CONFIG_SOFTMMU)
1106 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1107#endif
33417e70
FB
1108}
1109
61382a50
FB
1110static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1111{
1112 if (addr == (tlb_entry->address &
1113 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1114 tlb_entry->address = -1;
1115}
1116
33417e70
FB
1117void tlb_flush_page(CPUState *env, uint32_t addr)
1118{
9fa3e853
FB
1119 int i, n;
1120 VirtPageDesc *vp;
1121 PageDesc *p;
1122 TranslationBlock *tb;
0124311e 1123
9fa3e853
FB
1124#if defined(DEBUG_TLB)
1125 printf("tlb_flush_page: 0x%08x\n", addr);
1126#endif
0124311e
FB
1127 /* must reset current TB so that interrupts cannot modify the
1128 links while we are modifying them */
1129 env->current_tb = NULL;
61382a50
FB
1130
1131 addr &= TARGET_PAGE_MASK;
1132 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1133 tlb_flush_entry(&env->tlb_read[0][i], addr);
1134 tlb_flush_entry(&env->tlb_write[0][i], addr);
1135 tlb_flush_entry(&env->tlb_read[1][i], addr);
1136 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1137
9fa3e853
FB
1138 /* remove from the virtual pc hash table all the TB at this
1139 virtual address */
1140
1141 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1142 if (vp && vp->valid_tag == virt_valid_tag) {
1143 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1144 if (p) {
1145 /* we remove all the links to the TBs in this virtual page */
1146 tb = p->first_tb;
1147 while (tb != NULL) {
1148 n = (long)tb & 3;
1149 tb = (TranslationBlock *)((long)tb & ~3);
1150 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1151 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1152 tb_invalidate(tb);
1153 }
1154 tb = tb->page_next[n];
1155 }
1156 }
98857888 1157 vp->valid_tag = 0;
9fa3e853
FB
1158 }
1159
0124311e 1160#if !defined(CONFIG_SOFTMMU)
9fa3e853 1161 if (addr < MMAP_AREA_END)
0124311e 1162 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1163#endif
9fa3e853
FB
1164}
1165
1166static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1167{
1168 if (addr == (tlb_entry->address &
1169 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1170 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1171 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1172 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1173 }
1174}
1175
1176/* update the TLBs so that writes to code in the virtual page 'addr'
1177 can be detected */
1178static void tlb_protect_code(CPUState *env, uint32_t addr)
1179{
1180 int i;
1181
1182 addr &= TARGET_PAGE_MASK;
1183 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1184 tlb_protect_code1(&env->tlb_write[0][i], addr);
1185 tlb_protect_code1(&env->tlb_write[1][i], addr);
1186#if !defined(CONFIG_SOFTMMU)
1187 /* NOTE: as we generated the code for this page, it is already at
1188 least readable */
1189 if (addr < MMAP_AREA_END)
1190 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1191#endif
1192}
1193
1194static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1195{
1196 if (addr == (tlb_entry->address &
1197 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1198 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1ccde1cb 1199 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
0124311e 1200 }
61382a50
FB
1201}
1202
9fa3e853
FB
1203/* update the TLB so that writes in virtual page 'addr' are no longer
1204 tested self modifying code */
1205static void tlb_unprotect_code(CPUState *env, uint32_t addr)
61382a50 1206{
33417e70
FB
1207 int i;
1208
61382a50 1209 addr &= TARGET_PAGE_MASK;
33417e70 1210 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853
FB
1211 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1212 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1213}
1214
1215static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1216 uint32_t phys_addr)
1217{
1218 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1219 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1220 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1221 }
1222}
1223
1224/* update the TLB so that writes in physical page 'phys_addr' are no longer
1225 tested self modifying code */
1ccde1cb 1226static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr)
9fa3e853
FB
1227{
1228 int i;
1229
1230 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1231 phys_addr += (long)phys_ram_base;
1232 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1233 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1234 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1235}
1236
1237static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1238 unsigned long start, unsigned long length)
1239{
1240 unsigned long addr;
1241 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1242 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1243 if ((addr - start) < length) {
1244 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1245 }
1246 }
1247}
1248
1249void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1250{
1251 CPUState *env;
59817ccb 1252 target_ulong length, start1;
1ccde1cb
FB
1253 int i;
1254
1255 start &= TARGET_PAGE_MASK;
1256 end = TARGET_PAGE_ALIGN(end);
1257
1258 length = end - start;
1259 if (length == 0)
1260 return;
1261 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1262
1263 env = cpu_single_env;
1264 /* we modify the TLB cache so that the dirty bit will be set again
1265 when accessing the range */
59817ccb 1266 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1267 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1268 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1269 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1270 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1271
1272#if !defined(CONFIG_SOFTMMU)
1273 /* XXX: this is expensive */
1274 {
1275 VirtPageDesc *p;
1276 int j;
1277 target_ulong addr;
1278
1279 for(i = 0; i < L1_SIZE; i++) {
1280 p = l1_virt_map[i];
1281 if (p) {
1282 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1283 for(j = 0; j < L2_SIZE; j++) {
1284 if (p->valid_tag == virt_valid_tag &&
1285 p->phys_addr >= start && p->phys_addr < end &&
1286 (p->prot & PROT_WRITE)) {
1287 if (addr < MMAP_AREA_END) {
1288 mprotect((void *)addr, TARGET_PAGE_SIZE,
1289 p->prot & ~PROT_WRITE);
1290 }
1291 }
1292 addr += TARGET_PAGE_SIZE;
1293 p++;
1294 }
1295 }
1296 }
1297 }
1298#endif
1ccde1cb
FB
1299}
1300
1301static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1302 unsigned long start)
1303{
1304 unsigned long addr;
1305 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1306 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1307 if (addr == start) {
1308 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1309 }
1310 }
1311}
1312
1313/* update the TLB corresponding to virtual page vaddr and phys addr
1314 addr so that it is no longer dirty */
1315static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1316{
1317 CPUState *env = cpu_single_env;
1318 int i;
1319
1320 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1321
1322 addr &= TARGET_PAGE_MASK;
1323 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1324 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1325 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1326}
1327
59817ccb
FB
1328/* add a new TLB entry. At most one entry for a given virtual address
1329 is permitted. Return 0 if OK or 2 if the page could not be mapped
1330 (can only happen in non SOFTMMU mode for I/O pages or pages
1331 conflicting with the host address space). */
9fa3e853
FB
1332int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1333 int is_user, int is_softmmu)
1334{
1335 PageDesc *p;
1336 target_ulong pd;
1337 TranslationBlock *first_tb;
1338 unsigned int index;
1339 target_ulong address, addend;
1340 int ret;
1341
1342 p = page_find(paddr >> TARGET_PAGE_BITS);
1343 if (!p) {
1344 pd = IO_MEM_UNASSIGNED;
1345 first_tb = NULL;
1346 } else {
1347 pd = p->phys_offset;
1348 first_tb = p->first_tb;
1349 }
1350#if defined(DEBUG_TLB)
1351 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1352 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1353#endif
1354
1355 ret = 0;
1356#if !defined(CONFIG_SOFTMMU)
1357 if (is_softmmu)
1358#endif
1359 {
1360 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1361 /* IO memory case */
1362 address = vaddr | pd;
1363 addend = paddr;
1364 } else {
1365 /* standard memory */
1366 address = vaddr;
1367 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1368 }
1369
1370 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1371 addend -= vaddr;
1372 if (prot & PROT_READ) {
1373 env->tlb_read[is_user][index].address = address;
1374 env->tlb_read[is_user][index].addend = addend;
1375 } else {
1376 env->tlb_read[is_user][index].address = -1;
1377 env->tlb_read[is_user][index].addend = -1;
1378 }
1379 if (prot & PROT_WRITE) {
1380 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1381 /* ROM: access is ignored (same as unassigned) */
1382 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1383 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1384 } else if (first_tb) {
1385 /* if code is present, we use a specific memory
1386 handler. It works only for physical memory access */
1387 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb
FB
1388 env->tlb_write[is_user][index].addend = addend;
1389 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1390 !cpu_physical_memory_is_dirty(pd)) {
1391 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1392 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1393 } else {
1394 env->tlb_write[is_user][index].address = address;
1395 env->tlb_write[is_user][index].addend = addend;
1396 }
1397 } else {
1398 env->tlb_write[is_user][index].address = -1;
1399 env->tlb_write[is_user][index].addend = -1;
1400 }
1401 }
1402#if !defined(CONFIG_SOFTMMU)
1403 else {
1404 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1405 /* IO access: no mapping is done as it will be handled by the
1406 soft MMU */
1407 if (!(env->hflags & HF_SOFTMMU_MASK))
1408 ret = 2;
1409 } else {
1410 void *map_addr;
59817ccb
FB
1411
1412 if (vaddr >= MMAP_AREA_END) {
1413 ret = 2;
1414 } else {
1415 if (prot & PROT_WRITE) {
1416 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1417 first_tb ||
1418 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1419 !cpu_physical_memory_is_dirty(pd))) {
1420 /* ROM: we do as if code was inside */
1421 /* if code is present, we only map as read only and save the
1422 original mapping */
1423 VirtPageDesc *vp;
1424
1425 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1426 vp->phys_addr = pd;
1427 vp->prot = prot;
1428 vp->valid_tag = virt_valid_tag;
1429 prot &= ~PAGE_WRITE;
1430 }
1431 }
1432 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1433 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1434 if (map_addr == MAP_FAILED) {
1435 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1436 paddr, vaddr);
9fa3e853 1437 }
9fa3e853
FB
1438 }
1439 }
1440 }
1441#endif
1442 return ret;
1443}
1444
1445/* called from signal handler: invalidate the code and unprotect the
1446 page. Return TRUE if the fault was succesfully handled. */
1447int page_unprotect(unsigned long addr)
1448{
1449#if !defined(CONFIG_SOFTMMU)
1450 VirtPageDesc *vp;
1451
1452#if defined(DEBUG_TLB)
1453 printf("page_unprotect: addr=0x%08x\n", addr);
1454#endif
1455 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1456
1457 /* if it is not mapped, no need to worry here */
1458 if (addr >= MMAP_AREA_END)
1459 return 0;
9fa3e853
FB
1460 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1461 if (!vp)
1462 return 0;
1463 /* NOTE: in this case, validate_tag is _not_ tested as it
1464 validates only the code TLB */
1465 if (vp->valid_tag != virt_valid_tag)
1466 return 0;
1467 if (!(vp->prot & PAGE_WRITE))
1468 return 0;
1469#if defined(DEBUG_TLB)
1470 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1471 addr, vp->phys_addr, vp->prot);
1472#endif
59817ccb
FB
1473 /* set the dirty bit */
1474 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1475 /* flush the code inside */
9fa3e853 1476 tb_invalidate_phys_page(vp->phys_addr);
59817ccb
FB
1477 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1478 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1479 (unsigned long)addr, vp->prot);
9fa3e853
FB
1480 return 1;
1481#else
1482 return 0;
1483#endif
33417e70
FB
1484}
1485
0124311e
FB
1486#else
1487
ee8b7021 1488void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1489{
1490}
1491
1492void tlb_flush_page(CPUState *env, uint32_t addr)
1493{
1494}
1495
1496void tlb_flush_page_write(CPUState *env, uint32_t addr)
1497{
1498}
1499
9fa3e853
FB
1500int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1501 int is_user, int is_softmmu)
1502{
1503 return 0;
1504}
0124311e 1505
9fa3e853
FB
1506/* dump memory mappings */
1507void page_dump(FILE *f)
33417e70 1508{
9fa3e853
FB
1509 unsigned long start, end;
1510 int i, j, prot, prot1;
1511 PageDesc *p;
33417e70 1512
9fa3e853
FB
1513 fprintf(f, "%-8s %-8s %-8s %s\n",
1514 "start", "end", "size", "prot");
1515 start = -1;
1516 end = -1;
1517 prot = 0;
1518 for(i = 0; i <= L1_SIZE; i++) {
1519 if (i < L1_SIZE)
1520 p = l1_map[i];
1521 else
1522 p = NULL;
1523 for(j = 0;j < L2_SIZE; j++) {
1524 if (!p)
1525 prot1 = 0;
1526 else
1527 prot1 = p[j].flags;
1528 if (prot1 != prot) {
1529 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1530 if (start != -1) {
1531 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1532 start, end, end - start,
1533 prot & PAGE_READ ? 'r' : '-',
1534 prot & PAGE_WRITE ? 'w' : '-',
1535 prot & PAGE_EXEC ? 'x' : '-');
1536 }
1537 if (prot1 != 0)
1538 start = end;
1539 else
1540 start = -1;
1541 prot = prot1;
1542 }
1543 if (!p)
1544 break;
1545 }
33417e70 1546 }
33417e70
FB
1547}
1548
9fa3e853 1549int page_get_flags(unsigned long address)
33417e70 1550{
9fa3e853
FB
1551 PageDesc *p;
1552
1553 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1554 if (!p)
9fa3e853
FB
1555 return 0;
1556 return p->flags;
1557}
1558
1559/* modify the flags of a page and invalidate the code if
1560 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1561 depending on PAGE_WRITE */
1562void page_set_flags(unsigned long start, unsigned long end, int flags)
1563{
1564 PageDesc *p;
1565 unsigned long addr;
1566
1567 start = start & TARGET_PAGE_MASK;
1568 end = TARGET_PAGE_ALIGN(end);
1569 if (flags & PAGE_WRITE)
1570 flags |= PAGE_WRITE_ORG;
1571 spin_lock(&tb_lock);
1572 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1573 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1574 /* if the write protection is set, then we invalidate the code
1575 inside */
1576 if (!(p->flags & PAGE_WRITE) &&
1577 (flags & PAGE_WRITE) &&
1578 p->first_tb) {
1579 tb_invalidate_phys_page(addr);
1580 }
1581 p->flags = flags;
1582 }
1583 spin_unlock(&tb_lock);
33417e70
FB
1584}
1585
9fa3e853
FB
1586/* called from signal handler: invalidate the code and unprotect the
1587 page. Return TRUE if the fault was succesfully handled. */
1588int page_unprotect(unsigned long address)
1589{
1590 unsigned int page_index, prot, pindex;
1591 PageDesc *p, *p1;
1592 unsigned long host_start, host_end, addr;
1593
1594 host_start = address & host_page_mask;
1595 page_index = host_start >> TARGET_PAGE_BITS;
1596 p1 = page_find(page_index);
1597 if (!p1)
1598 return 0;
1599 host_end = host_start + host_page_size;
1600 p = p1;
1601 prot = 0;
1602 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1603 prot |= p->flags;
1604 p++;
1605 }
1606 /* if the page was really writable, then we change its
1607 protection back to writable */
1608 if (prot & PAGE_WRITE_ORG) {
1609 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1610 if (!(p1[pindex].flags & PAGE_WRITE)) {
1611 mprotect((void *)host_start, host_page_size,
1612 (prot & PAGE_BITS) | PAGE_WRITE);
1613 p1[pindex].flags |= PAGE_WRITE;
1614 /* and since the content will be modified, we must invalidate
1615 the corresponding translated code. */
1616 tb_invalidate_phys_page(address);
1617#ifdef DEBUG_TB_CHECK
1618 tb_invalidate_check(address);
1619#endif
1620 return 1;
1621 }
1622 }
1623 return 0;
1624}
1625
1626/* call this function when system calls directly modify a memory area */
1627void page_unprotect_range(uint8_t *data, unsigned long data_size)
1628{
1629 unsigned long start, end, addr;
1630
1631 start = (unsigned long)data;
1632 end = start + data_size;
1633 start &= TARGET_PAGE_MASK;
1634 end = TARGET_PAGE_ALIGN(end);
1635 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1636 page_unprotect(addr);
1637 }
1638}
1639
1ccde1cb
FB
1640static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1641{
1642}
1643
9fa3e853
FB
1644#endif /* defined(CONFIG_USER_ONLY) */
1645
33417e70
FB
1646/* register physical memory. 'size' must be a multiple of the target
1647 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1648 io memory page */
1649void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1650 long phys_offset)
1651{
1652 unsigned long addr, end_addr;
9fa3e853 1653 PageDesc *p;
33417e70
FB
1654
1655 end_addr = start_addr + size;
1656 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
9fa3e853
FB
1657 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1658 p->phys_offset = phys_offset;
1659 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1660 phys_offset += TARGET_PAGE_SIZE;
1661 }
1662}
1663
1664static uint32_t unassigned_mem_readb(uint32_t addr)
1665{
1666 return 0;
1667}
1668
1ccde1cb 1669static void unassigned_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
33417e70
FB
1670{
1671}
1672
1673static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1674 unassigned_mem_readb,
1675 unassigned_mem_readb,
1676 unassigned_mem_readb,
1677};
1678
1679static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1680 unassigned_mem_writeb,
1681 unassigned_mem_writeb,
1682 unassigned_mem_writeb,
1683};
1684
9fa3e853
FB
1685/* self modifying code support in soft mmu mode : writing to a page
1686 containing code comes to these functions */
1687
1ccde1cb 1688static void code_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
9fa3e853 1689{
1ccde1cb
FB
1690 unsigned long phys_addr;
1691
1692 phys_addr = addr - (long)phys_ram_base;
9fa3e853 1693#if !defined(CONFIG_USER_ONLY)
1ccde1cb 1694 tb_invalidate_phys_page_fast(phys_addr, 1, vaddr);
9fa3e853 1695#endif
1ccde1cb
FB
1696 stb_raw((uint8_t *)addr, val);
1697 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1698}
1699
1ccde1cb 1700static void code_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
9fa3e853 1701{
1ccde1cb
FB
1702 unsigned long phys_addr;
1703
1704 phys_addr = addr - (long)phys_ram_base;
9fa3e853 1705#if !defined(CONFIG_USER_ONLY)
1ccde1cb 1706 tb_invalidate_phys_page_fast(phys_addr, 2, vaddr);
9fa3e853 1707#endif
1ccde1cb
FB
1708 stw_raw((uint8_t *)addr, val);
1709 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1710}
1711
1ccde1cb 1712static void code_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
9fa3e853 1713{
1ccde1cb
FB
1714 unsigned long phys_addr;
1715
1716 phys_addr = addr - (long)phys_ram_base;
9fa3e853 1717#if !defined(CONFIG_USER_ONLY)
1ccde1cb 1718 tb_invalidate_phys_page_fast(phys_addr, 4, vaddr);
9fa3e853 1719#endif
1ccde1cb
FB
1720 stl_raw((uint8_t *)addr, val);
1721 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1722}
1723
1724static CPUReadMemoryFunc *code_mem_read[3] = {
1725 NULL, /* never used */
1726 NULL, /* never used */
1727 NULL, /* never used */
1728};
1729
1730static CPUWriteMemoryFunc *code_mem_write[3] = {
1731 code_mem_writeb,
1732 code_mem_writew,
1733 code_mem_writel,
1734};
33417e70 1735
1ccde1cb
FB
1736static void notdirty_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1737{
1738 stb_raw((uint8_t *)addr, val);
1739 tlb_set_dirty(addr, vaddr);
1740}
1741
1742static void notdirty_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
1743{
1744 stw_raw((uint8_t *)addr, val);
1745 tlb_set_dirty(addr, vaddr);
1746}
1747
1748static void notdirty_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
1749{
1750 stl_raw((uint8_t *)addr, val);
1751 tlb_set_dirty(addr, vaddr);
1752}
1753
1754static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1755 notdirty_mem_writeb,
1756 notdirty_mem_writew,
1757 notdirty_mem_writel,
1758};
1759
33417e70
FB
1760static void io_mem_init(void)
1761{
9fa3e853
FB
1762 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1763 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1764 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1ccde1cb
FB
1765 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1766 io_mem_nb = 5;
1767
1768 /* alloc dirty bits array */
59817ccb 1769 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1770}
1771
1772/* mem_read and mem_write are arrays of functions containing the
1773 function to access byte (index 0), word (index 1) and dword (index
1774 2). All functions must be supplied. If io_index is non zero, the
1775 corresponding io zone is modified. If it is zero, a new io zone is
1776 allocated. The return value can be used with
1777 cpu_register_physical_memory(). (-1) is returned if error. */
1778int cpu_register_io_memory(int io_index,
1779 CPUReadMemoryFunc **mem_read,
1780 CPUWriteMemoryFunc **mem_write)
1781{
1782 int i;
1783
1784 if (io_index <= 0) {
1785 if (io_index >= IO_MEM_NB_ENTRIES)
1786 return -1;
1787 io_index = io_mem_nb++;
1788 } else {
1789 if (io_index >= IO_MEM_NB_ENTRIES)
1790 return -1;
1791 }
1792
1793 for(i = 0;i < 3; i++) {
1794 io_mem_read[io_index][i] = mem_read[i];
1795 io_mem_write[io_index][i] = mem_write[i];
1796 }
1797 return io_index << IO_MEM_SHIFT;
1798}
61382a50 1799
13eb76e0
FB
1800/* physical memory access (slow version, mainly for debug) */
1801#if defined(CONFIG_USER_ONLY)
b448f2f3 1802void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf,
13eb76e0
FB
1803 int len, int is_write)
1804{
1805 int l, flags;
1806 target_ulong page;
1807
1808 while (len > 0) {
1809 page = addr & TARGET_PAGE_MASK;
1810 l = (page + TARGET_PAGE_SIZE) - addr;
1811 if (l > len)
1812 l = len;
1813 flags = page_get_flags(page);
1814 if (!(flags & PAGE_VALID))
1815 return;
1816 if (is_write) {
1817 if (!(flags & PAGE_WRITE))
1818 return;
1819 memcpy((uint8_t *)addr, buf, len);
1820 } else {
1821 if (!(flags & PAGE_READ))
1822 return;
1823 memcpy(buf, (uint8_t *)addr, len);
1824 }
1825 len -= l;
1826 buf += l;
1827 addr += l;
1828 }
1829}
1830#else
b448f2f3 1831void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf,
13eb76e0
FB
1832 int len, int is_write)
1833{
1834 int l, io_index;
1835 uint8_t *ptr;
1836 uint32_t val;
1837 target_ulong page, pd;
1838 PageDesc *p;
1839
1840 while (len > 0) {
1841 page = addr & TARGET_PAGE_MASK;
1842 l = (page + TARGET_PAGE_SIZE) - addr;
1843 if (l > len)
1844 l = len;
1845 p = page_find(page >> TARGET_PAGE_BITS);
1846 if (!p) {
1847 pd = IO_MEM_UNASSIGNED;
1848 } else {
1849 pd = p->phys_offset;
1850 }
1851
1852 if (is_write) {
1853 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1854 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1855 if (l >= 4 && ((addr & 3) == 0)) {
1856 /* 32 bit read access */
1857 val = ldl_raw(buf);
1ccde1cb 1858 io_mem_write[io_index][2](addr, val, 0);
13eb76e0
FB
1859 l = 4;
1860 } else if (l >= 2 && ((addr & 1) == 0)) {
1861 /* 16 bit read access */
1862 val = lduw_raw(buf);
1ccde1cb 1863 io_mem_write[io_index][1](addr, val, 0);
13eb76e0
FB
1864 l = 2;
1865 } else {
1866 /* 8 bit access */
1867 val = ldub_raw(buf);
1ccde1cb 1868 io_mem_write[io_index][0](addr, val, 0);
13eb76e0
FB
1869 l = 1;
1870 }
1871 } else {
b448f2f3
FB
1872 unsigned long addr1;
1873 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1874 /* RAM case */
b448f2f3 1875 ptr = phys_ram_base + addr1;
13eb76e0 1876 memcpy(ptr, buf, l);
b448f2f3
FB
1877 /* invalidate code */
1878 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1879 /* set dirty bit */
1880 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
1881 }
1882 } else {
1883 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1884 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1885 /* I/O case */
1886 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1887 if (l >= 4 && ((addr & 3) == 0)) {
1888 /* 32 bit read access */
1889 val = io_mem_read[io_index][2](addr);
1890 stl_raw(buf, val);
1891 l = 4;
1892 } else if (l >= 2 && ((addr & 1) == 0)) {
1893 /* 16 bit read access */
1894 val = io_mem_read[io_index][1](addr);
1895 stw_raw(buf, val);
1896 l = 2;
1897 } else {
1898 /* 8 bit access */
1899 val = io_mem_read[io_index][0](addr);
1900 stb_raw(buf, val);
1901 l = 1;
1902 }
1903 } else {
1904 /* RAM case */
1905 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1906 (addr & ~TARGET_PAGE_MASK);
1907 memcpy(buf, ptr, l);
1908 }
1909 }
1910 len -= l;
1911 buf += l;
1912 addr += l;
1913 }
1914}
1915#endif
1916
1917/* virtual memory access for debug */
b448f2f3
FB
1918int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1919 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1920{
1921 int l;
1922 target_ulong page, phys_addr;
1923
1924 while (len > 0) {
1925 page = addr & TARGET_PAGE_MASK;
1926 phys_addr = cpu_get_phys_page_debug(env, page);
1927 /* if no physical page mapped, return an error */
1928 if (phys_addr == -1)
1929 return -1;
1930 l = (page + TARGET_PAGE_SIZE) - addr;
1931 if (l > len)
1932 l = len;
b448f2f3
FB
1933 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
1934 buf, l, is_write);
13eb76e0
FB
1935 len -= l;
1936 buf += l;
1937 addr += l;
1938 }
1939 return 0;
1940}
1941
61382a50
FB
1942#if !defined(CONFIG_USER_ONLY)
1943
1944#define MMUSUFFIX _cmmu
1945#define GETPC() NULL
1946#define env cpu_single_env
1947
1948#define SHIFT 0
1949#include "softmmu_template.h"
1950
1951#define SHIFT 1
1952#include "softmmu_template.h"
1953
1954#define SHIFT 2
1955#include "softmmu_template.h"
1956
1957#define SHIFT 3
1958#include "softmmu_template.h"
1959
1960#undef env
1961
1962#endif