]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
fixed invalid command test
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
54936004
FB
21#include <stdlib.h>
22#include <stdio.h>
23#include <stdarg.h>
24#include <string.h>
25#include <errno.h>
26#include <unistd.h>
27#include <inttypes.h>
67b915a5 28#if !defined(CONFIG_SOFTMMU)
fd6ce8f6 29#include <sys/mman.h>
67b915a5 30#endif
54936004 31
6180a181
FB
32#include "cpu.h"
33#include "exec-all.h"
54936004 34
fd6ce8f6 35//#define DEBUG_TB_INVALIDATE
66e85a21 36//#define DEBUG_FLUSH
9fa3e853 37//#define DEBUG_TLB
fd6ce8f6
FB
38
39/* make various TB consistency checks */
40//#define DEBUG_TB_CHECK
98857888 41//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
42
43/* threshold to flush the translated code buffer */
44#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45
9fa3e853
FB
46#define SMC_BITMAP_USE_THRESHOLD 10
47
48#define MMAP_AREA_START 0x00000000
49#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
50
51TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 53TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 54int nb_tbs;
eb51d102
FB
55/* any access to the tbs or the page table must use this lock */
56spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
57
58uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59uint8_t *code_gen_ptr;
60
9fa3e853
FB
61int phys_ram_size;
62int phys_ram_fd;
63uint8_t *phys_ram_base;
1ccde1cb 64uint8_t *phys_ram_dirty;
9fa3e853 65
54936004 66typedef struct PageDesc {
9fa3e853
FB
67 /* offset in memory of the page + io_index in the low 12 bits */
68 unsigned long phys_offset;
69 /* list of TBs intersecting this physical page */
fd6ce8f6 70 TranslationBlock *first_tb;
9fa3e853
FB
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
74 uint8_t *code_bitmap;
75#if defined(CONFIG_USER_ONLY)
76 unsigned long flags;
77#endif
54936004
FB
78} PageDesc;
79
9fa3e853
FB
80typedef struct VirtPageDesc {
81 /* physical address of code page. It is valid only if 'valid_tag'
82 matches 'virt_valid_tag' */
83 target_ulong phys_addr;
84 unsigned int valid_tag;
85#if !defined(CONFIG_SOFTMMU)
86 /* original page access rights. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 unsigned int prot;
89#endif
90} VirtPageDesc;
91
54936004
FB
92#define L2_BITS 10
93#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
94
95#define L1_SIZE (1 << L1_BITS)
96#define L2_SIZE (1 << L2_BITS)
97
33417e70 98static void io_mem_init(void);
fd6ce8f6 99
54936004
FB
100unsigned long real_host_page_size;
101unsigned long host_page_bits;
102unsigned long host_page_size;
103unsigned long host_page_mask;
104
105static PageDesc *l1_map[L1_SIZE];
106
9fa3e853
FB
107#if !defined(CONFIG_USER_ONLY)
108static VirtPageDesc *l1_virt_map[L1_SIZE];
109static unsigned int virt_valid_tag;
110#endif
111
33417e70 112/* io memory support */
33417e70
FB
113CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
114CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
115static int io_mem_nb;
116
34865134
FB
117/* log support */
118char *logfilename = "/tmp/qemu.log";
119FILE *logfile;
120int loglevel;
121
b346ff46 122static void page_init(void)
54936004
FB
123{
124 /* NOTE: we can always suppose that host_page_size >=
125 TARGET_PAGE_SIZE */
67b915a5
FB
126#ifdef _WIN32
127 real_host_page_size = 4096;
128#else
54936004 129 real_host_page_size = getpagesize();
67b915a5 130#endif
54936004
FB
131 if (host_page_size == 0)
132 host_page_size = real_host_page_size;
133 if (host_page_size < TARGET_PAGE_SIZE)
134 host_page_size = TARGET_PAGE_SIZE;
135 host_page_bits = 0;
136 while ((1 << host_page_bits) < host_page_size)
137 host_page_bits++;
138 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
139#if !defined(CONFIG_USER_ONLY)
140 virt_valid_tag = 1;
141#endif
54936004
FB
142}
143
fd6ce8f6 144static inline PageDesc *page_find_alloc(unsigned int index)
54936004 145{
54936004
FB
146 PageDesc **lp, *p;
147
54936004
FB
148 lp = &l1_map[index >> L2_BITS];
149 p = *lp;
150 if (!p) {
151 /* allocate if not found */
59817ccb 152 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 153 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
154 *lp = p;
155 }
156 return p + (index & (L2_SIZE - 1));
157}
158
fd6ce8f6 159static inline PageDesc *page_find(unsigned int index)
54936004 160{
54936004
FB
161 PageDesc *p;
162
54936004
FB
163 p = l1_map[index >> L2_BITS];
164 if (!p)
165 return 0;
fd6ce8f6
FB
166 return p + (index & (L2_SIZE - 1));
167}
168
9fa3e853
FB
169#if !defined(CONFIG_USER_ONLY)
170static void tlb_protect_code(CPUState *env, uint32_t addr);
171static void tlb_unprotect_code(CPUState *env, uint32_t addr);
1ccde1cb 172static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
9fa3e853
FB
173
174static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 175{
9fa3e853 176 VirtPageDesc **lp, *p;
fd6ce8f6 177
9fa3e853
FB
178 lp = &l1_virt_map[index >> L2_BITS];
179 p = *lp;
180 if (!p) {
181 /* allocate if not found */
59817ccb 182 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
183 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
184 *lp = p;
185 }
186 return p + (index & (L2_SIZE - 1));
187}
188
189static inline VirtPageDesc *virt_page_find(unsigned int index)
190{
191 VirtPageDesc *p;
192
193 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
194 if (!p)
195 return 0;
9fa3e853 196 return p + (index & (L2_SIZE - 1));
54936004
FB
197}
198
9fa3e853 199static void virt_page_flush(void)
54936004 200{
9fa3e853
FB
201 int i, j;
202 VirtPageDesc *p;
203
204 virt_valid_tag++;
205
206 if (virt_valid_tag == 0) {
207 virt_valid_tag = 1;
208 for(i = 0; i < L1_SIZE; i++) {
209 p = l1_virt_map[i];
210 if (p) {
211 for(j = 0; j < L2_SIZE; j++)
212 p[j].valid_tag = 0;
213 }
fd6ce8f6 214 }
54936004
FB
215 }
216}
9fa3e853
FB
217#else
218static void virt_page_flush(void)
219{
220}
221#endif
fd6ce8f6 222
b346ff46 223void cpu_exec_init(void)
fd6ce8f6
FB
224{
225 if (!code_gen_ptr) {
226 code_gen_ptr = code_gen_buffer;
b346ff46 227 page_init();
33417e70 228 io_mem_init();
fd6ce8f6
FB
229 }
230}
231
9fa3e853
FB
232static inline void invalidate_page_bitmap(PageDesc *p)
233{
234 if (p->code_bitmap) {
59817ccb 235 qemu_free(p->code_bitmap);
9fa3e853
FB
236 p->code_bitmap = NULL;
237 }
238 p->code_write_count = 0;
239}
240
fd6ce8f6
FB
241/* set to NULL all the 'first_tb' fields in all PageDescs */
242static void page_flush_tb(void)
243{
244 int i, j;
245 PageDesc *p;
246
247 for(i = 0; i < L1_SIZE; i++) {
248 p = l1_map[i];
249 if (p) {
9fa3e853
FB
250 for(j = 0; j < L2_SIZE; j++) {
251 p->first_tb = NULL;
252 invalidate_page_bitmap(p);
253 p++;
254 }
fd6ce8f6
FB
255 }
256 }
257}
258
259/* flush all the translation blocks */
d4e8164f 260/* XXX: tb_flush is currently not thread safe */
0124311e 261void tb_flush(CPUState *env)
fd6ce8f6
FB
262{
263 int i;
0124311e 264#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
265 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
266 code_gen_ptr - code_gen_buffer,
267 nb_tbs,
0124311e 268 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
269#endif
270 nb_tbs = 0;
271 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
272 tb_hash[i] = NULL;
9fa3e853
FB
273 virt_page_flush();
274
275 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
276 tb_phys_hash[i] = NULL;
fd6ce8f6 277 page_flush_tb();
9fa3e853 278
fd6ce8f6 279 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
280 /* XXX: flush processor icache at this point if cache flush is
281 expensive */
fd6ce8f6
FB
282}
283
284#ifdef DEBUG_TB_CHECK
285
286static void tb_invalidate_check(unsigned long address)
287{
288 TranslationBlock *tb;
289 int i;
290 address &= TARGET_PAGE_MASK;
291 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
292 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
293 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
294 address >= tb->pc + tb->size)) {
295 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
296 address, tb->pc, tb->size);
297 }
298 }
299 }
300}
301
302/* verify that all the pages have correct rights for code */
303static void tb_page_check(void)
304{
305 TranslationBlock *tb;
306 int i, flags1, flags2;
307
308 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
309 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
310 flags1 = page_get_flags(tb->pc);
311 flags2 = page_get_flags(tb->pc + tb->size - 1);
312 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
313 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
314 tb->pc, tb->size, flags1, flags2);
315 }
316 }
317 }
318}
319
d4e8164f
FB
320void tb_jmp_check(TranslationBlock *tb)
321{
322 TranslationBlock *tb1;
323 unsigned int n1;
324
325 /* suppress any remaining jumps to this TB */
326 tb1 = tb->jmp_first;
327 for(;;) {
328 n1 = (long)tb1 & 3;
329 tb1 = (TranslationBlock *)((long)tb1 & ~3);
330 if (n1 == 2)
331 break;
332 tb1 = tb1->jmp_next[n1];
333 }
334 /* check end of list */
335 if (tb1 != tb) {
336 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
337 }
338}
339
fd6ce8f6
FB
340#endif
341
342/* invalidate one TB */
343static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
344 int next_offset)
345{
346 TranslationBlock *tb1;
347 for(;;) {
348 tb1 = *ptb;
349 if (tb1 == tb) {
350 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
351 break;
352 }
353 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
354 }
355}
356
9fa3e853
FB
357static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
358{
359 TranslationBlock *tb1;
360 unsigned int n1;
361
362 for(;;) {
363 tb1 = *ptb;
364 n1 = (long)tb1 & 3;
365 tb1 = (TranslationBlock *)((long)tb1 & ~3);
366 if (tb1 == tb) {
367 *ptb = tb1->page_next[n1];
368 break;
369 }
370 ptb = &tb1->page_next[n1];
371 }
372}
373
d4e8164f
FB
374static inline void tb_jmp_remove(TranslationBlock *tb, int n)
375{
376 TranslationBlock *tb1, **ptb;
377 unsigned int n1;
378
379 ptb = &tb->jmp_next[n];
380 tb1 = *ptb;
381 if (tb1) {
382 /* find tb(n) in circular list */
383 for(;;) {
384 tb1 = *ptb;
385 n1 = (long)tb1 & 3;
386 tb1 = (TranslationBlock *)((long)tb1 & ~3);
387 if (n1 == n && tb1 == tb)
388 break;
389 if (n1 == 2) {
390 ptb = &tb1->jmp_first;
391 } else {
392 ptb = &tb1->jmp_next[n1];
393 }
394 }
395 /* now we can suppress tb(n) from the list */
396 *ptb = tb->jmp_next[n];
397
398 tb->jmp_next[n] = NULL;
399 }
400}
401
402/* reset the jump entry 'n' of a TB so that it is not chained to
403 another TB */
404static inline void tb_reset_jump(TranslationBlock *tb, int n)
405{
406 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
407}
408
9fa3e853 409static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 410{
d4e8164f 411 unsigned int h, n1;
9fa3e853 412 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 413
36bdbe54 414 tb_invalidated_flag = 1;
59817ccb 415
fd6ce8f6
FB
416 /* remove the TB from the hash list */
417 h = tb_hash_func(tb->pc);
9fa3e853
FB
418 ptb = &tb_hash[h];
419 for(;;) {
420 tb1 = *ptb;
421 /* NOTE: the TB is not necessarily linked in the hash. It
422 indicates that it is not currently used */
423 if (tb1 == NULL)
424 return;
425 if (tb1 == tb) {
426 *ptb = tb1->hash_next;
427 break;
428 }
429 ptb = &tb1->hash_next;
fd6ce8f6 430 }
d4e8164f
FB
431
432 /* suppress this TB from the two jump lists */
433 tb_jmp_remove(tb, 0);
434 tb_jmp_remove(tb, 1);
435
436 /* suppress any remaining jumps to this TB */
437 tb1 = tb->jmp_first;
438 for(;;) {
439 n1 = (long)tb1 & 3;
440 if (n1 == 2)
441 break;
442 tb1 = (TranslationBlock *)((long)tb1 & ~3);
443 tb2 = tb1->jmp_next[n1];
444 tb_reset_jump(tb1, n1);
445 tb1->jmp_next[n1] = NULL;
446 tb1 = tb2;
447 }
448 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
449}
450
9fa3e853 451static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 452{
fd6ce8f6 453 PageDesc *p;
9fa3e853
FB
454 unsigned int h;
455 target_ulong phys_pc;
456
457 /* remove the TB from the hash list */
458 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
459 h = tb_phys_hash_func(phys_pc);
460 tb_remove(&tb_phys_hash[h], tb,
461 offsetof(TranslationBlock, phys_hash_next));
462
463 /* remove the TB from the page list */
464 if (tb->page_addr[0] != page_addr) {
465 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
466 tb_page_remove(&p->first_tb, tb);
467 invalidate_page_bitmap(p);
468 }
469 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
470 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
471 tb_page_remove(&p->first_tb, tb);
472 invalidate_page_bitmap(p);
473 }
474
475 tb_invalidate(tb);
476}
477
478static inline void set_bits(uint8_t *tab, int start, int len)
479{
480 int end, mask, end1;
481
482 end = start + len;
483 tab += start >> 3;
484 mask = 0xff << (start & 7);
485 if ((start & ~7) == (end & ~7)) {
486 if (start < end) {
487 mask &= ~(0xff << (end & 7));
488 *tab |= mask;
489 }
490 } else {
491 *tab++ |= mask;
492 start = (start + 8) & ~7;
493 end1 = end & ~7;
494 while (start < end1) {
495 *tab++ = 0xff;
496 start += 8;
497 }
498 if (start < end) {
499 mask = ~(0xff << (end & 7));
500 *tab |= mask;
501 }
502 }
503}
504
505static void build_page_bitmap(PageDesc *p)
506{
507 int n, tb_start, tb_end;
508 TranslationBlock *tb;
509
59817ccb 510 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
511 if (!p->code_bitmap)
512 return;
513 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
514
515 tb = p->first_tb;
516 while (tb != NULL) {
517 n = (long)tb & 3;
518 tb = (TranslationBlock *)((long)tb & ~3);
519 /* NOTE: this is subtle as a TB may span two physical pages */
520 if (n == 0) {
521 /* NOTE: tb_end may be after the end of the page, but
522 it is not a problem */
523 tb_start = tb->pc & ~TARGET_PAGE_MASK;
524 tb_end = tb_start + tb->size;
525 if (tb_end > TARGET_PAGE_SIZE)
526 tb_end = TARGET_PAGE_SIZE;
527 } else {
528 tb_start = 0;
529 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
530 }
531 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
532 tb = tb->page_next[n];
533 }
534}
535
536/* invalidate all TBs which intersect with the target physical page
537 starting in range [start;end[. NOTE: start and end must refer to
1ccde1cb
FB
538 the same physical page. 'vaddr' is a virtual address referencing
539 the physical page of code. It is only used an a hint if there is no
540 code left. */
541static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
542 target_ulong vaddr)
9fa3e853
FB
543{
544 int n;
545 PageDesc *p;
546 TranslationBlock *tb, *tb_next;
547 target_ulong tb_start, tb_end;
548
549 p = page_find(start >> TARGET_PAGE_BITS);
550 if (!p)
551 return;
552 if (!p->code_bitmap &&
553 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
554 /* build code bitmap */
555 build_page_bitmap(p);
556 }
557
558 /* we remove all the TBs in the range [start, end[ */
559 /* XXX: see if in some cases it could be faster to invalidate all the code */
560 tb = p->first_tb;
561 while (tb != NULL) {
562 n = (long)tb & 3;
563 tb = (TranslationBlock *)((long)tb & ~3);
564 tb_next = tb->page_next[n];
565 /* NOTE: this is subtle as a TB may span two physical pages */
566 if (n == 0) {
567 /* NOTE: tb_end may be after the end of the page, but
568 it is not a problem */
569 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
570 tb_end = tb_start + tb->size;
571 } else {
572 tb_start = tb->page_addr[1];
573 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
574 }
575 if (!(tb_end <= start || tb_start >= end)) {
576 tb_phys_invalidate(tb, -1);
577 }
578 tb = tb_next;
579 }
580#if !defined(CONFIG_USER_ONLY)
581 /* if no code remaining, no need to continue to use slow writes */
582 if (!p->first_tb) {
583 invalidate_page_bitmap(p);
1ccde1cb 584 tlb_unprotect_code_phys(cpu_single_env, start, vaddr);
9fa3e853 585 }
fd6ce8f6 586#endif
9fa3e853 587}
fd6ce8f6 588
9fa3e853 589/* len must be <= 8 and start must be a multiple of len */
1ccde1cb 590static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, target_ulong vaddr)
9fa3e853
FB
591{
592 PageDesc *p;
593 int offset, b;
59817ccb
FB
594#if 0
595 if (cpu_single_env->cr[0] & CR0_PE_MASK) {
596 printf("modifying code at 0x%x size=%d EIP=%x\n",
597 (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
598 cpu_single_env->eip);
599 }
600#endif
9fa3e853
FB
601 p = page_find(start >> TARGET_PAGE_BITS);
602 if (!p)
603 return;
604 if (p->code_bitmap) {
605 offset = start & ~TARGET_PAGE_MASK;
606 b = p->code_bitmap[offset >> 3] >> (offset & 7);
607 if (b & ((1 << len) - 1))
608 goto do_invalidate;
609 } else {
610 do_invalidate:
1ccde1cb 611 tb_invalidate_phys_page_range(start, start + len, vaddr);
9fa3e853
FB
612 }
613}
614
615/* invalidate all TBs which intersect with the target virtual page
616 starting in range [start;end[. This function is usually used when
617 the target processor flushes its I-cache. NOTE: start and end must
618 refer to the same physical page */
619void tb_invalidate_page_range(target_ulong start, target_ulong end)
620{
621 int n;
622 PageDesc *p;
623 TranslationBlock *tb, *tb_next;
624 target_ulong pc;
625 target_ulong phys_start;
626
627#if !defined(CONFIG_USER_ONLY)
628 {
629 VirtPageDesc *vp;
630 vp = virt_page_find(start >> TARGET_PAGE_BITS);
631 if (!vp)
632 return;
633 if (vp->valid_tag != virt_valid_tag)
634 return;
635 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
636 }
637#else
638 phys_start = start;
639#endif
640 p = page_find(phys_start >> TARGET_PAGE_BITS);
641 if (!p)
fd6ce8f6 642 return;
9fa3e853
FB
643 /* we remove all the TBs in the range [start, end[ */
644 /* XXX: see if in some cases it could be faster to invalidate all the code */
fd6ce8f6 645 tb = p->first_tb;
fd6ce8f6 646 while (tb != NULL) {
9fa3e853
FB
647 n = (long)tb & 3;
648 tb = (TranslationBlock *)((long)tb & ~3);
649 tb_next = tb->page_next[n];
650 pc = tb->pc;
651 if (!((pc + tb->size) <= start || pc >= end)) {
652 tb_phys_invalidate(tb, -1);
653 }
fd6ce8f6
FB
654 tb = tb_next;
655 }
9fa3e853
FB
656#if !defined(CONFIG_USER_ONLY)
657 /* if no code remaining, no need to continue to use slow writes */
658 if (!p->first_tb)
659 tlb_unprotect_code(cpu_single_env, start);
660#endif
661}
662
663#if !defined(CONFIG_SOFTMMU)
664static void tb_invalidate_phys_page(target_ulong addr)
665{
666 int n;
667 PageDesc *p;
668 TranslationBlock *tb;
669
670 addr &= TARGET_PAGE_MASK;
671 p = page_find(addr >> TARGET_PAGE_BITS);
672 if (!p)
673 return;
674 tb = p->first_tb;
675 while (tb != NULL) {
676 n = (long)tb & 3;
677 tb = (TranslationBlock *)((long)tb & ~3);
678 tb_phys_invalidate(tb, addr);
679 tb = tb->page_next[n];
680 }
fd6ce8f6
FB
681 p->first_tb = NULL;
682}
9fa3e853 683#endif
fd6ce8f6
FB
684
685/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
686static inline void tb_alloc_page(TranslationBlock *tb,
687 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
688{
689 PageDesc *p;
9fa3e853
FB
690 TranslationBlock *last_first_tb;
691
692 tb->page_addr[n] = page_addr;
693 p = page_find(page_addr >> TARGET_PAGE_BITS);
694 tb->page_next[n] = p->first_tb;
695 last_first_tb = p->first_tb;
696 p->first_tb = (TranslationBlock *)((long)tb | n);
697 invalidate_page_bitmap(p);
fd6ce8f6 698
9fa3e853 699#if defined(CONFIG_USER_ONLY)
fd6ce8f6 700 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
701 unsigned long host_start, host_end, addr;
702 int prot;
703
fd6ce8f6
FB
704 /* force the host page as non writable (writes will have a
705 page fault + mprotect overhead) */
fd6ce8f6
FB
706 host_start = page_addr & host_page_mask;
707 host_end = host_start + host_page_size;
708 prot = 0;
709 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
710 prot |= page_get_flags(addr);
711 mprotect((void *)host_start, host_page_size,
712 (prot & PAGE_BITS) & ~PAGE_WRITE);
713#ifdef DEBUG_TB_INVALIDATE
714 printf("protecting code page: 0x%08lx\n",
715 host_start);
716#endif
717 p->flags &= ~PAGE_WRITE;
fd6ce8f6 718 }
9fa3e853
FB
719#else
720 /* if some code is already present, then the pages are already
721 protected. So we handle the case where only the first TB is
722 allocated in a physical page */
723 if (!last_first_tb) {
724 target_ulong virt_addr;
725
726 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
727 tlb_protect_code(cpu_single_env, virt_addr);
728 }
729#endif
fd6ce8f6
FB
730}
731
732/* Allocate a new translation block. Flush the translation buffer if
733 too many translation blocks or too much generated code. */
d4e8164f 734TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
735{
736 TranslationBlock *tb;
fd6ce8f6
FB
737
738 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
739 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 740 return NULL;
fd6ce8f6
FB
741 tb = &tbs[nb_tbs++];
742 tb->pc = pc;
b448f2f3 743 tb->cflags = 0;
d4e8164f
FB
744 return tb;
745}
746
9fa3e853
FB
747/* add a new TB and link it to the physical page tables. phys_page2 is
748 (-1) to indicate that only one page contains the TB. */
749void tb_link_phys(TranslationBlock *tb,
750 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 751{
9fa3e853
FB
752 unsigned int h;
753 TranslationBlock **ptb;
754
755 /* add in the physical hash table */
756 h = tb_phys_hash_func(phys_pc);
757 ptb = &tb_phys_hash[h];
758 tb->phys_hash_next = *ptb;
759 *ptb = tb;
fd6ce8f6
FB
760
761 /* add in the page list */
9fa3e853
FB
762 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
763 if (phys_page2 != -1)
764 tb_alloc_page(tb, 1, phys_page2);
765 else
766 tb->page_addr[1] = -1;
61382a50
FB
767#ifdef DEBUG_TB_CHECK
768 tb_page_check();
769#endif
9fa3e853
FB
770}
771
772/* link the tb with the other TBs */
773void tb_link(TranslationBlock *tb)
774{
775#if !defined(CONFIG_USER_ONLY)
776 {
777 VirtPageDesc *vp;
778 target_ulong addr;
779
780 /* save the code memory mappings (needed to invalidate the code) */
781 addr = tb->pc & TARGET_PAGE_MASK;
782 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
783#ifdef DEBUG_TLB_CHECK
784 if (vp->valid_tag == virt_valid_tag &&
785 vp->phys_addr != tb->page_addr[0]) {
786 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
787 addr, tb->page_addr[0], vp->phys_addr);
788 }
789#endif
9fa3e853 790 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
791 if (vp->valid_tag != virt_valid_tag) {
792 vp->valid_tag = virt_valid_tag;
793#if !defined(CONFIG_SOFTMMU)
794 vp->prot = 0;
795#endif
796 }
9fa3e853
FB
797
798 if (tb->page_addr[1] != -1) {
799 addr += TARGET_PAGE_SIZE;
800 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
801#ifdef DEBUG_TLB_CHECK
802 if (vp->valid_tag == virt_valid_tag &&
803 vp->phys_addr != tb->page_addr[1]) {
804 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
805 addr, tb->page_addr[1], vp->phys_addr);
806 }
807#endif
9fa3e853 808 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
809 if (vp->valid_tag != virt_valid_tag) {
810 vp->valid_tag = virt_valid_tag;
811#if !defined(CONFIG_SOFTMMU)
812 vp->prot = 0;
813#endif
814 }
9fa3e853
FB
815 }
816 }
817#endif
818
d4e8164f
FB
819 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
820 tb->jmp_next[0] = NULL;
821 tb->jmp_next[1] = NULL;
b448f2f3
FB
822#ifdef USE_CODE_COPY
823 tb->cflags &= ~CF_FP_USED;
824 if (tb->cflags & CF_TB_FP_USED)
825 tb->cflags |= CF_FP_USED;
826#endif
d4e8164f
FB
827
828 /* init original jump addresses */
829 if (tb->tb_next_offset[0] != 0xffff)
830 tb_reset_jump(tb, 0);
831 if (tb->tb_next_offset[1] != 0xffff)
832 tb_reset_jump(tb, 1);
fd6ce8f6
FB
833}
834
9fa3e853
FB
835/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
836 tb[1].tc_ptr. Return NULL if not found */
837TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 838{
9fa3e853
FB
839 int m_min, m_max, m;
840 unsigned long v;
841 TranslationBlock *tb;
a513fe19
FB
842
843 if (nb_tbs <= 0)
844 return NULL;
845 if (tc_ptr < (unsigned long)code_gen_buffer ||
846 tc_ptr >= (unsigned long)code_gen_ptr)
847 return NULL;
848 /* binary search (cf Knuth) */
849 m_min = 0;
850 m_max = nb_tbs - 1;
851 while (m_min <= m_max) {
852 m = (m_min + m_max) >> 1;
853 tb = &tbs[m];
854 v = (unsigned long)tb->tc_ptr;
855 if (v == tc_ptr)
856 return tb;
857 else if (tc_ptr < v) {
858 m_max = m - 1;
859 } else {
860 m_min = m + 1;
861 }
862 }
863 return &tbs[m_max];
864}
7501267e 865
ea041c0e
FB
866static void tb_reset_jump_recursive(TranslationBlock *tb);
867
868static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
869{
870 TranslationBlock *tb1, *tb_next, **ptb;
871 unsigned int n1;
872
873 tb1 = tb->jmp_next[n];
874 if (tb1 != NULL) {
875 /* find head of list */
876 for(;;) {
877 n1 = (long)tb1 & 3;
878 tb1 = (TranslationBlock *)((long)tb1 & ~3);
879 if (n1 == 2)
880 break;
881 tb1 = tb1->jmp_next[n1];
882 }
883 /* we are now sure now that tb jumps to tb1 */
884 tb_next = tb1;
885
886 /* remove tb from the jmp_first list */
887 ptb = &tb_next->jmp_first;
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (n1 == n && tb1 == tb)
893 break;
894 ptb = &tb1->jmp_next[n1];
895 }
896 *ptb = tb->jmp_next[n];
897 tb->jmp_next[n] = NULL;
898
899 /* suppress the jump to next tb in generated code */
900 tb_reset_jump(tb, n);
901
0124311e 902 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
903 tb_reset_jump_recursive(tb_next);
904 }
905}
906
907static void tb_reset_jump_recursive(TranslationBlock *tb)
908{
909 tb_reset_jump_recursive2(tb, 0);
910 tb_reset_jump_recursive2(tb, 1);
911}
912
c33a346e
FB
913/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
914 breakpoint is reached */
4c3a88a2
FB
915int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
916{
917#if defined(TARGET_I386)
918 int i;
919
920 for(i = 0; i < env->nb_breakpoints; i++) {
921 if (env->breakpoints[i] == pc)
922 return 0;
923 }
924
925 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
926 return -1;
927 env->breakpoints[env->nb_breakpoints++] = pc;
9fa3e853 928 tb_invalidate_page_range(pc, pc + 1);
4c3a88a2
FB
929 return 0;
930#else
931 return -1;
932#endif
933}
934
935/* remove a breakpoint */
936int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
937{
938#if defined(TARGET_I386)
939 int i;
940 for(i = 0; i < env->nb_breakpoints; i++) {
941 if (env->breakpoints[i] == pc)
942 goto found;
943 }
944 return -1;
945 found:
946 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
947 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
948 env->nb_breakpoints--;
9fa3e853 949 tb_invalidate_page_range(pc, pc + 1);
4c3a88a2
FB
950 return 0;
951#else
952 return -1;
953#endif
954}
955
c33a346e
FB
956/* enable or disable single step mode. EXCP_DEBUG is returned by the
957 CPU loop after each instruction */
958void cpu_single_step(CPUState *env, int enabled)
959{
960#if defined(TARGET_I386)
961 if (env->singlestep_enabled != enabled) {
962 env->singlestep_enabled = enabled;
963 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 964 /* XXX: only flush what is necessary */
0124311e 965 tb_flush(env);
c33a346e
FB
966 }
967#endif
968}
969
34865134
FB
970/* enable or disable low levels log */
971void cpu_set_log(int log_flags)
972{
973 loglevel = log_flags;
974 if (loglevel && !logfile) {
975 logfile = fopen(logfilename, "w");
976 if (!logfile) {
977 perror(logfilename);
978 _exit(1);
979 }
9fa3e853
FB
980#if !defined(CONFIG_SOFTMMU)
981 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
982 {
983 static uint8_t logfile_buf[4096];
984 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
985 }
986#else
34865134 987 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 988#endif
34865134
FB
989 }
990}
991
992void cpu_set_log_filename(const char *filename)
993{
994 logfilename = strdup(filename);
995}
c33a346e 996
0124311e 997/* mask must never be zero, except for A20 change call */
68a79315 998void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
999{
1000 TranslationBlock *tb;
ee8b7021 1001 static int interrupt_lock;
59817ccb 1002
68a79315 1003 env->interrupt_request |= mask;
ea041c0e
FB
1004 /* if the cpu is currently executing code, we must unlink it and
1005 all the potentially executing TB */
1006 tb = env->current_tb;
ee8b7021
FB
1007 if (tb && !testandset(&interrupt_lock)) {
1008 env->current_tb = NULL;
ea041c0e 1009 tb_reset_jump_recursive(tb);
ee8b7021 1010 interrupt_lock = 0;
ea041c0e
FB
1011 }
1012}
1013
f193c797
FB
1014CPULogItem cpu_log_items[] = {
1015 { CPU_LOG_TB_OUT_ASM, "out_asm",
1016 "show generated host assembly code for each compiled TB" },
1017 { CPU_LOG_TB_IN_ASM, "in_asm",
1018 "show target assembly code for each compiled TB" },
1019 { CPU_LOG_TB_OP, "op",
1020 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1021#ifdef TARGET_I386
1022 { CPU_LOG_TB_OP_OPT, "op_opt",
1023 "show micro ops after optimization for each compiled TB" },
1024#endif
1025 { CPU_LOG_INT, "int",
1026 "show interrupts/exceptions in short format" },
1027 { CPU_LOG_EXEC, "exec",
1028 "show trace before each executed TB (lots of logs)" },
1029#ifdef TARGET_I386
1030 { CPU_LOG_PCALL, "pcall",
1031 "show protected mode far calls/returns/exceptions" },
1032#endif
1033 { 0, NULL, NULL },
1034};
1035
1036static int cmp1(const char *s1, int n, const char *s2)
1037{
1038 if (strlen(s2) != n)
1039 return 0;
1040 return memcmp(s1, s2, n) == 0;
1041}
1042
1043/* takes a comma separated list of log masks. Return 0 if error. */
1044int cpu_str_to_log_mask(const char *str)
1045{
1046 CPULogItem *item;
1047 int mask;
1048 const char *p, *p1;
1049
1050 p = str;
1051 mask = 0;
1052 for(;;) {
1053 p1 = strchr(p, ',');
1054 if (!p1)
1055 p1 = p + strlen(p);
1056 for(item = cpu_log_items; item->mask != 0; item++) {
1057 if (cmp1(p, p1 - p, item->name))
1058 goto found;
1059 }
1060 return 0;
1061 found:
1062 mask |= item->mask;
1063 if (*p1 != ',')
1064 break;
1065 p = p1 + 1;
1066 }
1067 return mask;
1068}
ea041c0e 1069
7501267e
FB
1070void cpu_abort(CPUState *env, const char *fmt, ...)
1071{
1072 va_list ap;
1073
1074 va_start(ap, fmt);
1075 fprintf(stderr, "qemu: fatal: ");
1076 vfprintf(stderr, fmt, ap);
1077 fprintf(stderr, "\n");
1078#ifdef TARGET_I386
1079 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1080#endif
1081 va_end(ap);
1082 abort();
1083}
1084
0124311e
FB
1085#if !defined(CONFIG_USER_ONLY)
1086
ee8b7021
FB
1087/* NOTE: if flush_global is true, also flush global entries (not
1088 implemented yet) */
1089void tlb_flush(CPUState *env, int flush_global)
33417e70 1090{
33417e70 1091 int i;
0124311e 1092
9fa3e853
FB
1093#if defined(DEBUG_TLB)
1094 printf("tlb_flush:\n");
1095#endif
0124311e
FB
1096 /* must reset current TB so that interrupts cannot modify the
1097 links while we are modifying them */
1098 env->current_tb = NULL;
1099
33417e70
FB
1100 for(i = 0; i < CPU_TLB_SIZE; i++) {
1101 env->tlb_read[0][i].address = -1;
1102 env->tlb_write[0][i].address = -1;
1103 env->tlb_read[1][i].address = -1;
1104 env->tlb_write[1][i].address = -1;
1105 }
9fa3e853
FB
1106
1107 virt_page_flush();
1108 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1109 tb_hash[i] = NULL;
1110
1111#if !defined(CONFIG_SOFTMMU)
1112 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1113#endif
33417e70
FB
1114}
1115
61382a50
FB
1116static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1117{
1118 if (addr == (tlb_entry->address &
1119 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1120 tlb_entry->address = -1;
1121}
1122
33417e70
FB
1123void tlb_flush_page(CPUState *env, uint32_t addr)
1124{
9fa3e853
FB
1125 int i, n;
1126 VirtPageDesc *vp;
1127 PageDesc *p;
1128 TranslationBlock *tb;
0124311e 1129
9fa3e853
FB
1130#if defined(DEBUG_TLB)
1131 printf("tlb_flush_page: 0x%08x\n", addr);
1132#endif
0124311e
FB
1133 /* must reset current TB so that interrupts cannot modify the
1134 links while we are modifying them */
1135 env->current_tb = NULL;
61382a50
FB
1136
1137 addr &= TARGET_PAGE_MASK;
1138 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1139 tlb_flush_entry(&env->tlb_read[0][i], addr);
1140 tlb_flush_entry(&env->tlb_write[0][i], addr);
1141 tlb_flush_entry(&env->tlb_read[1][i], addr);
1142 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1143
9fa3e853
FB
1144 /* remove from the virtual pc hash table all the TB at this
1145 virtual address */
1146
1147 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1148 if (vp && vp->valid_tag == virt_valid_tag) {
1149 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1150 if (p) {
1151 /* we remove all the links to the TBs in this virtual page */
1152 tb = p->first_tb;
1153 while (tb != NULL) {
1154 n = (long)tb & 3;
1155 tb = (TranslationBlock *)((long)tb & ~3);
1156 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1157 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1158 tb_invalidate(tb);
1159 }
1160 tb = tb->page_next[n];
1161 }
1162 }
98857888 1163 vp->valid_tag = 0;
9fa3e853
FB
1164 }
1165
0124311e 1166#if !defined(CONFIG_SOFTMMU)
9fa3e853 1167 if (addr < MMAP_AREA_END)
0124311e 1168 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1169#endif
9fa3e853
FB
1170}
1171
1172static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1173{
1174 if (addr == (tlb_entry->address &
1175 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1176 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1177 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1178 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1179 }
1180}
1181
1182/* update the TLBs so that writes to code in the virtual page 'addr'
1183 can be detected */
1184static void tlb_protect_code(CPUState *env, uint32_t addr)
1185{
1186 int i;
1187
1188 addr &= TARGET_PAGE_MASK;
1189 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1190 tlb_protect_code1(&env->tlb_write[0][i], addr);
1191 tlb_protect_code1(&env->tlb_write[1][i], addr);
1192#if !defined(CONFIG_SOFTMMU)
1193 /* NOTE: as we generated the code for this page, it is already at
1194 least readable */
1195 if (addr < MMAP_AREA_END)
1196 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1197#endif
1198}
1199
1200static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1201{
1202 if (addr == (tlb_entry->address &
1203 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1204 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1ccde1cb 1205 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
0124311e 1206 }
61382a50
FB
1207}
1208
9fa3e853
FB
1209/* update the TLB so that writes in virtual page 'addr' are no longer
1210 tested self modifying code */
1211static void tlb_unprotect_code(CPUState *env, uint32_t addr)
61382a50 1212{
33417e70
FB
1213 int i;
1214
61382a50 1215 addr &= TARGET_PAGE_MASK;
33417e70 1216 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853
FB
1217 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1218 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1219}
1220
1221static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1222 uint32_t phys_addr)
1223{
1224 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1225 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1226 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1227 }
1228}
1229
1230/* update the TLB so that writes in physical page 'phys_addr' are no longer
1231 tested self modifying code */
1ccde1cb 1232static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr)
9fa3e853
FB
1233{
1234 int i;
1235
1236 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1237 phys_addr += (long)phys_ram_base;
1238 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1239 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1240 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1241}
1242
1243static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1244 unsigned long start, unsigned long length)
1245{
1246 unsigned long addr;
1247 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1248 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1249 if ((addr - start) < length) {
1250 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1251 }
1252 }
1253}
1254
1255void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1256{
1257 CPUState *env;
59817ccb 1258 target_ulong length, start1;
1ccde1cb
FB
1259 int i;
1260
1261 start &= TARGET_PAGE_MASK;
1262 end = TARGET_PAGE_ALIGN(end);
1263
1264 length = end - start;
1265 if (length == 0)
1266 return;
1267 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1268
1269 env = cpu_single_env;
1270 /* we modify the TLB cache so that the dirty bit will be set again
1271 when accessing the range */
59817ccb 1272 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1273 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1274 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1275 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1276 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1277
1278#if !defined(CONFIG_SOFTMMU)
1279 /* XXX: this is expensive */
1280 {
1281 VirtPageDesc *p;
1282 int j;
1283 target_ulong addr;
1284
1285 for(i = 0; i < L1_SIZE; i++) {
1286 p = l1_virt_map[i];
1287 if (p) {
1288 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1289 for(j = 0; j < L2_SIZE; j++) {
1290 if (p->valid_tag == virt_valid_tag &&
1291 p->phys_addr >= start && p->phys_addr < end &&
1292 (p->prot & PROT_WRITE)) {
1293 if (addr < MMAP_AREA_END) {
1294 mprotect((void *)addr, TARGET_PAGE_SIZE,
1295 p->prot & ~PROT_WRITE);
1296 }
1297 }
1298 addr += TARGET_PAGE_SIZE;
1299 p++;
1300 }
1301 }
1302 }
1303 }
1304#endif
1ccde1cb
FB
1305}
1306
1307static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1308 unsigned long start)
1309{
1310 unsigned long addr;
1311 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1312 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1313 if (addr == start) {
1314 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1315 }
1316 }
1317}
1318
1319/* update the TLB corresponding to virtual page vaddr and phys addr
1320 addr so that it is no longer dirty */
1321static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1322{
1323 CPUState *env = cpu_single_env;
1324 int i;
1325
1326 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1327
1328 addr &= TARGET_PAGE_MASK;
1329 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1330 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1331 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1332}
1333
59817ccb
FB
1334/* add a new TLB entry. At most one entry for a given virtual address
1335 is permitted. Return 0 if OK or 2 if the page could not be mapped
1336 (can only happen in non SOFTMMU mode for I/O pages or pages
1337 conflicting with the host address space). */
9fa3e853
FB
1338int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1339 int is_user, int is_softmmu)
1340{
1341 PageDesc *p;
1342 target_ulong pd;
1343 TranslationBlock *first_tb;
1344 unsigned int index;
1345 target_ulong address, addend;
1346 int ret;
1347
1348 p = page_find(paddr >> TARGET_PAGE_BITS);
1349 if (!p) {
1350 pd = IO_MEM_UNASSIGNED;
1351 first_tb = NULL;
1352 } else {
1353 pd = p->phys_offset;
1354 first_tb = p->first_tb;
1355 }
1356#if defined(DEBUG_TLB)
1357 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1358 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1359#endif
1360
1361 ret = 0;
1362#if !defined(CONFIG_SOFTMMU)
1363 if (is_softmmu)
1364#endif
1365 {
1366 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1367 /* IO memory case */
1368 address = vaddr | pd;
1369 addend = paddr;
1370 } else {
1371 /* standard memory */
1372 address = vaddr;
1373 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1374 }
1375
1376 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1377 addend -= vaddr;
67b915a5 1378 if (prot & PAGE_READ) {
9fa3e853
FB
1379 env->tlb_read[is_user][index].address = address;
1380 env->tlb_read[is_user][index].addend = addend;
1381 } else {
1382 env->tlb_read[is_user][index].address = -1;
1383 env->tlb_read[is_user][index].addend = -1;
1384 }
67b915a5 1385 if (prot & PAGE_WRITE) {
9fa3e853
FB
1386 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1387 /* ROM: access is ignored (same as unassigned) */
1388 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1389 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1390 } else if (first_tb) {
1391 /* if code is present, we use a specific memory
1392 handler. It works only for physical memory access */
1393 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb
FB
1394 env->tlb_write[is_user][index].addend = addend;
1395 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1396 !cpu_physical_memory_is_dirty(pd)) {
1397 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1398 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1399 } else {
1400 env->tlb_write[is_user][index].address = address;
1401 env->tlb_write[is_user][index].addend = addend;
1402 }
1403 } else {
1404 env->tlb_write[is_user][index].address = -1;
1405 env->tlb_write[is_user][index].addend = -1;
1406 }
1407 }
1408#if !defined(CONFIG_SOFTMMU)
1409 else {
1410 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1411 /* IO access: no mapping is done as it will be handled by the
1412 soft MMU */
1413 if (!(env->hflags & HF_SOFTMMU_MASK))
1414 ret = 2;
1415 } else {
1416 void *map_addr;
59817ccb
FB
1417
1418 if (vaddr >= MMAP_AREA_END) {
1419 ret = 2;
1420 } else {
1421 if (prot & PROT_WRITE) {
1422 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1423 first_tb ||
1424 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1425 !cpu_physical_memory_is_dirty(pd))) {
1426 /* ROM: we do as if code was inside */
1427 /* if code is present, we only map as read only and save the
1428 original mapping */
1429 VirtPageDesc *vp;
1430
1431 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1432 vp->phys_addr = pd;
1433 vp->prot = prot;
1434 vp->valid_tag = virt_valid_tag;
1435 prot &= ~PAGE_WRITE;
1436 }
1437 }
1438 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1439 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1440 if (map_addr == MAP_FAILED) {
1441 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1442 paddr, vaddr);
9fa3e853 1443 }
9fa3e853
FB
1444 }
1445 }
1446 }
1447#endif
1448 return ret;
1449}
1450
1451/* called from signal handler: invalidate the code and unprotect the
1452 page. Return TRUE if the fault was succesfully handled. */
1453int page_unprotect(unsigned long addr)
1454{
1455#if !defined(CONFIG_SOFTMMU)
1456 VirtPageDesc *vp;
1457
1458#if defined(DEBUG_TLB)
1459 printf("page_unprotect: addr=0x%08x\n", addr);
1460#endif
1461 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1462
1463 /* if it is not mapped, no need to worry here */
1464 if (addr >= MMAP_AREA_END)
1465 return 0;
9fa3e853
FB
1466 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1467 if (!vp)
1468 return 0;
1469 /* NOTE: in this case, validate_tag is _not_ tested as it
1470 validates only the code TLB */
1471 if (vp->valid_tag != virt_valid_tag)
1472 return 0;
1473 if (!(vp->prot & PAGE_WRITE))
1474 return 0;
1475#if defined(DEBUG_TLB)
1476 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1477 addr, vp->phys_addr, vp->prot);
1478#endif
59817ccb
FB
1479 /* set the dirty bit */
1480 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1481 /* flush the code inside */
9fa3e853 1482 tb_invalidate_phys_page(vp->phys_addr);
59817ccb
FB
1483 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1484 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1485 (unsigned long)addr, vp->prot);
9fa3e853
FB
1486 return 1;
1487#else
1488 return 0;
1489#endif
33417e70
FB
1490}
1491
0124311e
FB
1492#else
1493
ee8b7021 1494void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1495{
1496}
1497
1498void tlb_flush_page(CPUState *env, uint32_t addr)
1499{
1500}
1501
1502void tlb_flush_page_write(CPUState *env, uint32_t addr)
1503{
1504}
1505
9fa3e853
FB
1506int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1507 int is_user, int is_softmmu)
1508{
1509 return 0;
1510}
0124311e 1511
9fa3e853
FB
1512/* dump memory mappings */
1513void page_dump(FILE *f)
33417e70 1514{
9fa3e853
FB
1515 unsigned long start, end;
1516 int i, j, prot, prot1;
1517 PageDesc *p;
33417e70 1518
9fa3e853
FB
1519 fprintf(f, "%-8s %-8s %-8s %s\n",
1520 "start", "end", "size", "prot");
1521 start = -1;
1522 end = -1;
1523 prot = 0;
1524 for(i = 0; i <= L1_SIZE; i++) {
1525 if (i < L1_SIZE)
1526 p = l1_map[i];
1527 else
1528 p = NULL;
1529 for(j = 0;j < L2_SIZE; j++) {
1530 if (!p)
1531 prot1 = 0;
1532 else
1533 prot1 = p[j].flags;
1534 if (prot1 != prot) {
1535 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1536 if (start != -1) {
1537 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1538 start, end, end - start,
1539 prot & PAGE_READ ? 'r' : '-',
1540 prot & PAGE_WRITE ? 'w' : '-',
1541 prot & PAGE_EXEC ? 'x' : '-');
1542 }
1543 if (prot1 != 0)
1544 start = end;
1545 else
1546 start = -1;
1547 prot = prot1;
1548 }
1549 if (!p)
1550 break;
1551 }
33417e70 1552 }
33417e70
FB
1553}
1554
9fa3e853 1555int page_get_flags(unsigned long address)
33417e70 1556{
9fa3e853
FB
1557 PageDesc *p;
1558
1559 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1560 if (!p)
9fa3e853
FB
1561 return 0;
1562 return p->flags;
1563}
1564
1565/* modify the flags of a page and invalidate the code if
1566 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1567 depending on PAGE_WRITE */
1568void page_set_flags(unsigned long start, unsigned long end, int flags)
1569{
1570 PageDesc *p;
1571 unsigned long addr;
1572
1573 start = start & TARGET_PAGE_MASK;
1574 end = TARGET_PAGE_ALIGN(end);
1575 if (flags & PAGE_WRITE)
1576 flags |= PAGE_WRITE_ORG;
1577 spin_lock(&tb_lock);
1578 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1579 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1580 /* if the write protection is set, then we invalidate the code
1581 inside */
1582 if (!(p->flags & PAGE_WRITE) &&
1583 (flags & PAGE_WRITE) &&
1584 p->first_tb) {
1585 tb_invalidate_phys_page(addr);
1586 }
1587 p->flags = flags;
1588 }
1589 spin_unlock(&tb_lock);
33417e70
FB
1590}
1591
9fa3e853
FB
1592/* called from signal handler: invalidate the code and unprotect the
1593 page. Return TRUE if the fault was succesfully handled. */
1594int page_unprotect(unsigned long address)
1595{
1596 unsigned int page_index, prot, pindex;
1597 PageDesc *p, *p1;
1598 unsigned long host_start, host_end, addr;
1599
1600 host_start = address & host_page_mask;
1601 page_index = host_start >> TARGET_PAGE_BITS;
1602 p1 = page_find(page_index);
1603 if (!p1)
1604 return 0;
1605 host_end = host_start + host_page_size;
1606 p = p1;
1607 prot = 0;
1608 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1609 prot |= p->flags;
1610 p++;
1611 }
1612 /* if the page was really writable, then we change its
1613 protection back to writable */
1614 if (prot & PAGE_WRITE_ORG) {
1615 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1616 if (!(p1[pindex].flags & PAGE_WRITE)) {
1617 mprotect((void *)host_start, host_page_size,
1618 (prot & PAGE_BITS) | PAGE_WRITE);
1619 p1[pindex].flags |= PAGE_WRITE;
1620 /* and since the content will be modified, we must invalidate
1621 the corresponding translated code. */
1622 tb_invalidate_phys_page(address);
1623#ifdef DEBUG_TB_CHECK
1624 tb_invalidate_check(address);
1625#endif
1626 return 1;
1627 }
1628 }
1629 return 0;
1630}
1631
1632/* call this function when system calls directly modify a memory area */
1633void page_unprotect_range(uint8_t *data, unsigned long data_size)
1634{
1635 unsigned long start, end, addr;
1636
1637 start = (unsigned long)data;
1638 end = start + data_size;
1639 start &= TARGET_PAGE_MASK;
1640 end = TARGET_PAGE_ALIGN(end);
1641 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1642 page_unprotect(addr);
1643 }
1644}
1645
1ccde1cb
FB
1646static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1647{
1648}
1649
9fa3e853
FB
1650#endif /* defined(CONFIG_USER_ONLY) */
1651
33417e70
FB
1652/* register physical memory. 'size' must be a multiple of the target
1653 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1654 io memory page */
1655void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1656 long phys_offset)
1657{
1658 unsigned long addr, end_addr;
9fa3e853 1659 PageDesc *p;
33417e70
FB
1660
1661 end_addr = start_addr + size;
1662 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
9fa3e853
FB
1663 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1664 p->phys_offset = phys_offset;
1665 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1666 phys_offset += TARGET_PAGE_SIZE;
1667 }
1668}
1669
1670static uint32_t unassigned_mem_readb(uint32_t addr)
1671{
1672 return 0;
1673}
1674
1ccde1cb 1675static void unassigned_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
33417e70
FB
1676{
1677}
1678
1679static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1680 unassigned_mem_readb,
1681 unassigned_mem_readb,
1682 unassigned_mem_readb,
1683};
1684
1685static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1686 unassigned_mem_writeb,
1687 unassigned_mem_writeb,
1688 unassigned_mem_writeb,
1689};
1690
9fa3e853
FB
1691/* self modifying code support in soft mmu mode : writing to a page
1692 containing code comes to these functions */
1693
1ccde1cb 1694static void code_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
9fa3e853 1695{
1ccde1cb
FB
1696 unsigned long phys_addr;
1697
1698 phys_addr = addr - (long)phys_ram_base;
9fa3e853 1699#if !defined(CONFIG_USER_ONLY)
1ccde1cb 1700 tb_invalidate_phys_page_fast(phys_addr, 1, vaddr);
9fa3e853 1701#endif
1ccde1cb
FB
1702 stb_raw((uint8_t *)addr, val);
1703 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1704}
1705
1ccde1cb 1706static void code_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
9fa3e853 1707{
1ccde1cb
FB
1708 unsigned long phys_addr;
1709
1710 phys_addr = addr - (long)phys_ram_base;
9fa3e853 1711#if !defined(CONFIG_USER_ONLY)
1ccde1cb 1712 tb_invalidate_phys_page_fast(phys_addr, 2, vaddr);
9fa3e853 1713#endif
1ccde1cb
FB
1714 stw_raw((uint8_t *)addr, val);
1715 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1716}
1717
1ccde1cb 1718static void code_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
9fa3e853 1719{
1ccde1cb
FB
1720 unsigned long phys_addr;
1721
1722 phys_addr = addr - (long)phys_ram_base;
9fa3e853 1723#if !defined(CONFIG_USER_ONLY)
1ccde1cb 1724 tb_invalidate_phys_page_fast(phys_addr, 4, vaddr);
9fa3e853 1725#endif
1ccde1cb
FB
1726 stl_raw((uint8_t *)addr, val);
1727 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1728}
1729
1730static CPUReadMemoryFunc *code_mem_read[3] = {
1731 NULL, /* never used */
1732 NULL, /* never used */
1733 NULL, /* never used */
1734};
1735
1736static CPUWriteMemoryFunc *code_mem_write[3] = {
1737 code_mem_writeb,
1738 code_mem_writew,
1739 code_mem_writel,
1740};
33417e70 1741
1ccde1cb
FB
1742static void notdirty_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1743{
1744 stb_raw((uint8_t *)addr, val);
1745 tlb_set_dirty(addr, vaddr);
1746}
1747
1748static void notdirty_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
1749{
1750 stw_raw((uint8_t *)addr, val);
1751 tlb_set_dirty(addr, vaddr);
1752}
1753
1754static void notdirty_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
1755{
1756 stl_raw((uint8_t *)addr, val);
1757 tlb_set_dirty(addr, vaddr);
1758}
1759
1760static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1761 notdirty_mem_writeb,
1762 notdirty_mem_writew,
1763 notdirty_mem_writel,
1764};
1765
33417e70
FB
1766static void io_mem_init(void)
1767{
9fa3e853
FB
1768 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1769 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1770 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1ccde1cb
FB
1771 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1772 io_mem_nb = 5;
1773
1774 /* alloc dirty bits array */
59817ccb 1775 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1776}
1777
1778/* mem_read and mem_write are arrays of functions containing the
1779 function to access byte (index 0), word (index 1) and dword (index
1780 2). All functions must be supplied. If io_index is non zero, the
1781 corresponding io zone is modified. If it is zero, a new io zone is
1782 allocated. The return value can be used with
1783 cpu_register_physical_memory(). (-1) is returned if error. */
1784int cpu_register_io_memory(int io_index,
1785 CPUReadMemoryFunc **mem_read,
1786 CPUWriteMemoryFunc **mem_write)
1787{
1788 int i;
1789
1790 if (io_index <= 0) {
1791 if (io_index >= IO_MEM_NB_ENTRIES)
1792 return -1;
1793 io_index = io_mem_nb++;
1794 } else {
1795 if (io_index >= IO_MEM_NB_ENTRIES)
1796 return -1;
1797 }
1798
1799 for(i = 0;i < 3; i++) {
1800 io_mem_read[io_index][i] = mem_read[i];
1801 io_mem_write[io_index][i] = mem_write[i];
1802 }
1803 return io_index << IO_MEM_SHIFT;
1804}
61382a50 1805
13eb76e0
FB
1806/* physical memory access (slow version, mainly for debug) */
1807#if defined(CONFIG_USER_ONLY)
b448f2f3 1808void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf,
13eb76e0
FB
1809 int len, int is_write)
1810{
1811 int l, flags;
1812 target_ulong page;
1813
1814 while (len > 0) {
1815 page = addr & TARGET_PAGE_MASK;
1816 l = (page + TARGET_PAGE_SIZE) - addr;
1817 if (l > len)
1818 l = len;
1819 flags = page_get_flags(page);
1820 if (!(flags & PAGE_VALID))
1821 return;
1822 if (is_write) {
1823 if (!(flags & PAGE_WRITE))
1824 return;
1825 memcpy((uint8_t *)addr, buf, len);
1826 } else {
1827 if (!(flags & PAGE_READ))
1828 return;
1829 memcpy(buf, (uint8_t *)addr, len);
1830 }
1831 len -= l;
1832 buf += l;
1833 addr += l;
1834 }
1835}
1836#else
b448f2f3 1837void cpu_physical_memory_rw(target_ulong addr, uint8_t *buf,
13eb76e0
FB
1838 int len, int is_write)
1839{
1840 int l, io_index;
1841 uint8_t *ptr;
1842 uint32_t val;
1843 target_ulong page, pd;
1844 PageDesc *p;
1845
1846 while (len > 0) {
1847 page = addr & TARGET_PAGE_MASK;
1848 l = (page + TARGET_PAGE_SIZE) - addr;
1849 if (l > len)
1850 l = len;
1851 p = page_find(page >> TARGET_PAGE_BITS);
1852 if (!p) {
1853 pd = IO_MEM_UNASSIGNED;
1854 } else {
1855 pd = p->phys_offset;
1856 }
1857
1858 if (is_write) {
1859 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1860 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1861 if (l >= 4 && ((addr & 3) == 0)) {
1862 /* 32 bit read access */
1863 val = ldl_raw(buf);
1ccde1cb 1864 io_mem_write[io_index][2](addr, val, 0);
13eb76e0
FB
1865 l = 4;
1866 } else if (l >= 2 && ((addr & 1) == 0)) {
1867 /* 16 bit read access */
1868 val = lduw_raw(buf);
1ccde1cb 1869 io_mem_write[io_index][1](addr, val, 0);
13eb76e0
FB
1870 l = 2;
1871 } else {
1872 /* 8 bit access */
1873 val = ldub_raw(buf);
1ccde1cb 1874 io_mem_write[io_index][0](addr, val, 0);
13eb76e0
FB
1875 l = 1;
1876 }
1877 } else {
b448f2f3
FB
1878 unsigned long addr1;
1879 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1880 /* RAM case */
b448f2f3 1881 ptr = phys_ram_base + addr1;
13eb76e0 1882 memcpy(ptr, buf, l);
b448f2f3
FB
1883 /* invalidate code */
1884 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1885 /* set dirty bit */
1886 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
1887 }
1888 } else {
1889 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1890 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1891 /* I/O case */
1892 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1893 if (l >= 4 && ((addr & 3) == 0)) {
1894 /* 32 bit read access */
1895 val = io_mem_read[io_index][2](addr);
1896 stl_raw(buf, val);
1897 l = 4;
1898 } else if (l >= 2 && ((addr & 1) == 0)) {
1899 /* 16 bit read access */
1900 val = io_mem_read[io_index][1](addr);
1901 stw_raw(buf, val);
1902 l = 2;
1903 } else {
1904 /* 8 bit access */
1905 val = io_mem_read[io_index][0](addr);
1906 stb_raw(buf, val);
1907 l = 1;
1908 }
1909 } else {
1910 /* RAM case */
1911 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1912 (addr & ~TARGET_PAGE_MASK);
1913 memcpy(buf, ptr, l);
1914 }
1915 }
1916 len -= l;
1917 buf += l;
1918 addr += l;
1919 }
1920}
1921#endif
1922
1923/* virtual memory access for debug */
b448f2f3
FB
1924int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1925 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1926{
1927 int l;
1928 target_ulong page, phys_addr;
1929
1930 while (len > 0) {
1931 page = addr & TARGET_PAGE_MASK;
1932 phys_addr = cpu_get_phys_page_debug(env, page);
1933 /* if no physical page mapped, return an error */
1934 if (phys_addr == -1)
1935 return -1;
1936 l = (page + TARGET_PAGE_SIZE) - addr;
1937 if (l > len)
1938 l = len;
b448f2f3
FB
1939 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
1940 buf, l, is_write);
13eb76e0
FB
1941 len -= l;
1942 buf += l;
1943 addr += l;
1944 }
1945 return 0;
1946}
1947
61382a50
FB
1948#if !defined(CONFIG_USER_ONLY)
1949
1950#define MMUSUFFIX _cmmu
1951#define GETPC() NULL
1952#define env cpu_single_env
1953
1954#define SHIFT 0
1955#include "softmmu_template.h"
1956
1957#define SHIFT 1
1958#include "softmmu_template.h"
1959
1960#define SHIFT 2
1961#include "softmmu_template.h"
1962
1963#define SHIFT 3
1964#include "softmmu_template.h"
1965
1966#undef env
1967
1968#endif